1/*
2 * Copyright (C) 2008-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "ExecutableAllocator.h"
28
29#if ENABLE(JIT)
30
31#include "CodeProfiling.h"
32#include "ExecutableAllocationFuzz.h"
33#include "JSCInlines.h"
34#include <wtf/FileSystem.h>
35#include <wtf/MetaAllocator.h>
36#include <wtf/PageReservation.h>
37#include <wtf/ProcessID.h>
38#include <wtf/SystemTracing.h>
39#include <wtf/WorkQueue.h>
40
41#if OS(DARWIN)
42#include <mach/mach_time.h>
43#include <sys/mman.h>
44#endif
45
46#if PLATFORM(IOS_FAMILY)
47#include <wtf/cocoa/Entitlements.h>
48#endif
49
50#include "LinkBuffer.h"
51#include "MacroAssembler.h"
52
53#if PLATFORM(COCOA)
54#define HAVE_REMAP_JIT 1
55#endif
56
57#if HAVE(REMAP_JIT)
58#if CPU(ARM64) && PLATFORM(IOS_FAMILY)
59#define USE_EXECUTE_ONLY_JIT_WRITE_FUNCTION 1
60#endif
61#endif
62
63#if OS(DARWIN)
64#include <mach/mach.h>
65extern "C" {
66 /* Routine mach_vm_remap */
67#ifdef mig_external
68 mig_external
69#else
70 extern
71#endif /* mig_external */
72 kern_return_t mach_vm_remap
73 (
74 vm_map_t target_task,
75 mach_vm_address_t *target_address,
76 mach_vm_size_t size,
77 mach_vm_offset_t mask,
78 int flags,
79 vm_map_t src_task,
80 mach_vm_address_t src_address,
81 boolean_t copy,
82 vm_prot_t *cur_protection,
83 vm_prot_t *max_protection,
84 vm_inherit_t inheritance
85 );
86}
87
88#endif
89
90namespace JSC {
91
92using namespace WTF;
93
94#if defined(FIXED_EXECUTABLE_MEMORY_POOL_SIZE_IN_MB) && FIXED_EXECUTABLE_MEMORY_POOL_SIZE_IN_MB > 0
95static constexpr size_t fixedExecutableMemoryPoolSize = FIXED_EXECUTABLE_MEMORY_POOL_SIZE_IN_MB * 1024 * 1024;
96#elif CPU(ARM)
97static constexpr size_t fixedExecutableMemoryPoolSize = 16 * 1024 * 1024;
98#elif CPU(ARM64)
99static constexpr size_t fixedExecutableMemoryPoolSize = 128 * 1024 * 1024;
100#elif CPU(X86_64)
101static constexpr size_t fixedExecutableMemoryPoolSize = 1024 * 1024 * 1024;
102#else
103static constexpr size_t fixedExecutableMemoryPoolSize = 32 * 1024 * 1024;
104#endif
105
106#if CPU(ARM)
107static constexpr double executablePoolReservationFraction = 0.15;
108#else
109static constexpr double executablePoolReservationFraction = 0.25;
110#endif
111
112static bool isJITEnabled()
113{
114 bool jitEnabled = !g_jscConfig.jitDisabled;
115#if PLATFORM(IOS_FAMILY) && (CPU(ARM64) || CPU(ARM))
116 return processHasEntitlement("dynamic-codesigning") && jitEnabled;
117#else
118 return jitEnabled;
119#endif
120}
121
122void ExecutableAllocator::setJITEnabled(bool enabled)
123{
124 bool jitEnabled = !g_jscConfig.jitDisabled;
125 ASSERT(!g_jscConfig.fixedVMPoolExecutableAllocator);
126 if (jitEnabled == enabled)
127 return;
128
129 g_jscConfig.jitDisabled = !enabled;
130
131#if PLATFORM(IOS_FAMILY) && (CPU(ARM64) || CPU(ARM))
132 if (!enabled) {
133 // Because of an OS quirk, even after the JIT region has been unmapped,
134 // the OS thinks that region is reserved, and as such, can cause Gigacage
135 // allocation to fail. We work around this by initializing the Gigacage
136 // first.
137 // Note: when called, setJITEnabled() is always called extra early in the
138 // process bootstrap. Under normal operation (when setJITEnabled() isn't
139 // called at all), we will naturally initialize the Gigacage before we
140 // allocate the JIT region. Hence, this workaround is merely ensuring the
141 // same behavior of allocation ordering.
142 Gigacage::ensureGigacage();
143
144 constexpr size_t size = 1;
145 constexpr int protection = PROT_READ | PROT_WRITE | PROT_EXEC;
146 constexpr int flags = MAP_PRIVATE | MAP_ANON | MAP_JIT;
147 constexpr int fd = OSAllocator::JSJITCodePages;
148 void* allocation = mmap(nullptr, size, protection, flags, fd, 0);
149 const void* executableMemoryAllocationFailure = reinterpret_cast<void*>(-1);
150 RELEASE_ASSERT_WITH_MESSAGE(allocation && allocation != executableMemoryAllocationFailure, "We should not have allocated executable memory before disabling the JIT.");
151 RELEASE_ASSERT_WITH_MESSAGE(!munmap(allocation, size), "Unmapping executable memory should succeed so we do not have any executable memory in the address space");
152 RELEASE_ASSERT_WITH_MESSAGE(mmap(nullptr, size, protection, flags, fd, 0) == executableMemoryAllocationFailure, "Allocating executable memory should fail after setJITEnabled(false) is called.");
153 }
154#endif
155}
156
157class FixedVMPoolExecutableAllocator final : public MetaAllocator {
158 WTF_MAKE_FAST_ALLOCATED;
159public:
160 FixedVMPoolExecutableAllocator()
161 : MetaAllocator(jitAllocationGranule) // round up all allocations to 32 bytes
162 {
163 if (!isJITEnabled())
164 return;
165
166 size_t reservationSize;
167 if (Options::jitMemoryReservationSize())
168 reservationSize = Options::jitMemoryReservationSize();
169 else
170 reservationSize = fixedExecutableMemoryPoolSize;
171 reservationSize = std::max(roundUpToMultipleOf(pageSize(), reservationSize), pageSize() * 2);
172
173 auto tryCreatePageReservation = [] (size_t reservationSize) {
174#if OS(LINUX)
175 // If we use uncommitted reservation, mmap operation is recorded with small page size in perf command's output.
176 // This makes the following JIT code logging broken and some of JIT code is not recorded correctly.
177 // To avoid this problem, we use committed reservation if we need perf JITDump logging.
178 if (Options::logJITCodeForPerf())
179 return PageReservation::reserveAndCommitWithGuardPages(reservationSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
180#endif
181 return PageReservation::reserveWithGuardPages(reservationSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
182 };
183
184 m_reservation = tryCreatePageReservation(reservationSize);
185 if (m_reservation) {
186 ASSERT(m_reservation.size() == reservationSize);
187 void* reservationBase = m_reservation.base();
188
189#if ENABLE(FAST_JIT_PERMISSIONS) && !ENABLE(SEPARATED_WX_HEAP)
190 RELEASE_ASSERT(os_thread_self_restrict_rwx_is_supported());
191 os_thread_self_restrict_rwx_to_rx();
192
193#else // not ENABLE(FAST_JIT_PERMISSIONS) or ENABLE(SEPARATED_WX_HEAP)
194#if ENABLE(FAST_JIT_PERMISSIONS)
195 if (os_thread_self_restrict_rwx_is_supported()) {
196 g_jscConfig.useFastPermisionsJITCopy = true;
197 os_thread_self_restrict_rwx_to_rx();
198 } else
199#endif
200 if (Options::useSeparatedWXHeap()) {
201 // First page of our JIT allocation is reserved.
202 ASSERT(reservationSize >= pageSize() * 2);
203 reservationBase = (void*)((uintptr_t)reservationBase + pageSize());
204 reservationSize -= pageSize();
205 initializeSeparatedWXHeaps(m_reservation.base(), pageSize(), reservationBase, reservationSize);
206 }
207#endif // not ENABLE(FAST_JIT_PERMISSIONS) or ENABLE(SEPARATED_WX_HEAP)
208
209 addFreshFreeSpace(reservationBase, reservationSize);
210
211 ASSERT(bytesReserved() == reservationSize); // Since our executable memory is fixed-sized, bytesReserved is never changed after initialization.
212
213 void* reservationEnd = reinterpret_cast<uint8_t*>(reservationBase) + reservationSize;
214
215 g_jscConfig.startExecutableMemory = tagCodePtr<ExecutableMemoryPtrTag>(reservationBase);
216 g_jscConfig.endExecutableMemory = tagCodePtr<ExecutableMemoryPtrTag>(reservationEnd);
217 }
218 }
219
220 virtual ~FixedVMPoolExecutableAllocator();
221
222 void* memoryStart() { return untagCodePtr<ExecutableMemoryPtrTag>(g_jscConfig.startExecutableMemory); }
223 void* memoryEnd() { return untagCodePtr<ExecutableMemoryPtrTag>(g_jscConfig.endExecutableMemory); }
224 bool isJITPC(void* pc) { return memoryStart() <= pc && pc < memoryEnd(); }
225
226protected:
227 FreeSpacePtr allocateNewSpace(size_t&) override
228 {
229 // We're operating in a fixed pool, so new allocation is always prohibited.
230 return nullptr;
231 }
232
233 void notifyNeedPage(void* page, size_t count) override
234 {
235#if USE(MADV_FREE_FOR_JIT_MEMORY)
236 UNUSED_PARAM(page);
237 UNUSED_PARAM(count);
238#else
239 m_reservation.commit(page, pageSize() * count);
240#endif
241 }
242
243 void notifyPageIsFree(void* page, size_t count) override
244 {
245#if USE(MADV_FREE_FOR_JIT_MEMORY)
246 for (;;) {
247 int result = madvise(page, pageSize() * count, MADV_FREE);
248 if (!result)
249 return;
250 ASSERT(result == -1);
251 if (errno != EAGAIN) {
252 RELEASE_ASSERT_NOT_REACHED(); // In debug mode, this should be a hard failure.
253 break; // In release mode, we should just ignore the error - not returning memory to the OS is better than crashing, especially since we _will_ be able to reuse the memory internally anyway.
254 }
255 }
256#else
257 m_reservation.decommit(page, pageSize() * count);
258#endif
259 }
260
261private:
262#if OS(DARWIN) && HAVE(REMAP_JIT)
263 void initializeSeparatedWXHeaps(void* stubBase, size_t stubSize, void* jitBase, size_t jitSize)
264 {
265 mach_vm_address_t writableAddr = 0;
266
267 // Create a second mapping of the JIT region at a random address.
268 vm_prot_t cur, max;
269 int remapFlags = VM_FLAGS_ANYWHERE;
270#if defined(VM_FLAGS_RANDOM_ADDR)
271 remapFlags |= VM_FLAGS_RANDOM_ADDR;
272#endif
273 kern_return_t ret = mach_vm_remap(mach_task_self(), &writableAddr, jitSize, 0,
274 remapFlags,
275 mach_task_self(), (mach_vm_address_t)jitBase, FALSE,
276 &cur, &max, VM_INHERIT_DEFAULT);
277
278 bool remapSucceeded = (ret == KERN_SUCCESS);
279 if (!remapSucceeded)
280 return;
281
282 // Assemble a thunk that will serve as the means for writing into the JIT region.
283 MacroAssemblerCodeRef<JITThunkPtrTag> writeThunk = jitWriteThunkGenerator(reinterpret_cast<void*>(writableAddr), stubBase, stubSize);
284
285 int result = 0;
286
287#if USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION)
288 // Prevent reading the write thunk code.
289 result = vm_protect(mach_task_self(), reinterpret_cast<vm_address_t>(stubBase), stubSize, true, VM_PROT_EXECUTE);
290 RELEASE_ASSERT(!result);
291#endif
292
293 // Prevent writing into the executable JIT mapping.
294 result = vm_protect(mach_task_self(), reinterpret_cast<vm_address_t>(jitBase), jitSize, true, VM_PROT_READ | VM_PROT_EXECUTE);
295 RELEASE_ASSERT(!result);
296
297 // Prevent execution in the writable JIT mapping.
298 result = vm_protect(mach_task_self(), static_cast<vm_address_t>(writableAddr), jitSize, true, VM_PROT_READ | VM_PROT_WRITE);
299 RELEASE_ASSERT(!result);
300
301 // Zero out writableAddr to avoid leaking the address of the writable mapping.
302 memset_s(&writableAddr, sizeof(writableAddr), 0, sizeof(writableAddr));
303
304#if ENABLE(SEPARATED_WX_HEAP)
305 g_jscConfig.jitWriteSeparateHeaps = reinterpret_cast<JITWriteSeparateHeapsFunction>(writeThunk.code().executableAddress());
306#endif
307 }
308
309#if CPU(ARM64) && USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION)
310 MacroAssemblerCodeRef<JITThunkPtrTag> jitWriteThunkGenerator(void* writableAddr, void* stubBase, size_t stubSize)
311 {
312 using namespace ARM64Registers;
313 using TrustedImm32 = MacroAssembler::TrustedImm32;
314
315 MacroAssembler jit;
316
317 jit.tagReturnAddress();
318 jit.move(MacroAssembler::TrustedImmPtr(writableAddr), x7);
319 jit.addPtr(x7, x0);
320
321 jit.move(x0, x3);
322 MacroAssembler::Jump smallCopy = jit.branch64(MacroAssembler::Below, x2, MacroAssembler::TrustedImm64(64));
323
324 jit.add64(TrustedImm32(32), x3);
325 jit.and64(TrustedImm32(-32), x3);
326 jit.loadPair64(x1, x12, x13);
327 jit.loadPair64(x1, TrustedImm32(16), x14, x15);
328 jit.sub64(x3, x0, x5);
329 jit.addPtr(x5, x1);
330
331 jit.loadPair64(x1, x8, x9);
332 jit.loadPair64(x1, TrustedImm32(16), x10, x11);
333 jit.add64(TrustedImm32(32), x1);
334 jit.sub64(x5, x2);
335 jit.storePair64(x12, x13, x0);
336 jit.storePair64(x14, x15, x0, TrustedImm32(16));
337 MacroAssembler::Jump cleanup = jit.branchSub64(MacroAssembler::BelowOrEqual, TrustedImm32(64), x2);
338
339 MacroAssembler::Label copyLoop = jit.label();
340 jit.storePair64WithNonTemporalAccess(x8, x9, x3);
341 jit.storePair64WithNonTemporalAccess(x10, x11, x3, TrustedImm32(16));
342 jit.add64(TrustedImm32(32), x3);
343 jit.loadPair64WithNonTemporalAccess(x1, x8, x9);
344 jit.loadPair64WithNonTemporalAccess(x1, TrustedImm32(16), x10, x11);
345 jit.add64(TrustedImm32(32), x1);
346 jit.branchSub64(MacroAssembler::Above, TrustedImm32(32), x2).linkTo(copyLoop, &jit);
347
348 cleanup.link(&jit);
349 jit.add64(x2, x1);
350 jit.loadPair64(x1, x12, x13);
351 jit.loadPair64(x1, TrustedImm32(16), x14, x15);
352 jit.storePair64(x8, x9, x3);
353 jit.storePair64(x10, x11, x3, TrustedImm32(16));
354 jit.addPtr(x2, x3);
355 jit.storePair64(x12, x13, x3, TrustedImm32(32));
356 jit.storePair64(x14, x15, x3, TrustedImm32(48));
357 jit.ret();
358
359 MacroAssembler::Label local0 = jit.label();
360 jit.load64(x1, PostIndex(8), x6);
361 jit.store64(x6, x3, PostIndex(8));
362 smallCopy.link(&jit);
363 jit.branchSub64(MacroAssembler::AboveOrEqual, TrustedImm32(8), x2).linkTo(local0, &jit);
364 MacroAssembler::Jump local2 = jit.branchAdd64(MacroAssembler::Equal, TrustedImm32(8), x2);
365 MacroAssembler::Label local1 = jit.label();
366 jit.load8(x1, PostIndex(1), x6);
367 jit.store8(x6, x3, PostIndex(1));
368 jit.branchSub64(MacroAssembler::NotEqual, TrustedImm32(1), x2).linkTo(local1, &jit);
369 local2.link(&jit);
370 jit.ret();
371
372 auto stubBaseCodePtr = MacroAssemblerCodePtr<LinkBufferPtrTag>(tagCodePtr<LinkBufferPtrTag>(stubBase));
373 LinkBuffer linkBuffer(jit, stubBaseCodePtr, stubSize);
374 // We don't use FINALIZE_CODE() for two reasons.
375 // The first is that we don't want the writeable address, as disassembled instructions,
376 // to appear in the console or anywhere in memory, via the PrintStream buffer.
377 // The second is we can't guarantee that the code is readable when using the
378 // asyncDisassembly option as our caller will set our pages execute only.
379 return linkBuffer.finalizeCodeWithoutDisassembly<JITThunkPtrTag>();
380 }
381#else // not CPU(ARM64) && USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION)
382 static void genericWriteToJITRegion(off_t offset, const void* data, size_t dataSize)
383 {
384 memcpy((void*)(g_jscConfig.startOfFixedWritableMemoryPool + offset), data, dataSize);
385 }
386
387 MacroAssemblerCodeRef<JITThunkPtrTag> jitWriteThunkGenerator(void* address, void*, size_t)
388 {
389 g_jscConfig.startOfFixedWritableMemoryPool = reinterpret_cast<uintptr_t>(address);
390 void* function = reinterpret_cast<void*>(&genericWriteToJITRegion);
391#if CPU(ARM_THUMB2)
392 // Handle thumb offset
393 uintptr_t functionAsInt = reinterpret_cast<uintptr_t>(function);
394 functionAsInt -= 1;
395 function = reinterpret_cast<void*>(functionAsInt);
396#endif
397 auto codePtr = MacroAssemblerCodePtr<JITThunkPtrTag>(tagCFunctionPtr<JITThunkPtrTag>(function));
398 return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(codePtr);
399 }
400#endif // CPU(ARM64) && USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION)
401
402#else // OS(DARWIN) && HAVE(REMAP_JIT)
403 void initializeSeparatedWXHeaps(void*, size_t, void*, size_t)
404 {
405 }
406#endif
407
408private:
409 PageReservation m_reservation;
410};
411
412FixedVMPoolExecutableAllocator::~FixedVMPoolExecutableAllocator()
413{
414 m_reservation.deallocate();
415}
416
417// Keep this pointer in a mutable global variable to help Leaks find it.
418// But we do not use this pointer.
419static FixedVMPoolExecutableAllocator* globalFixedVMPoolExecutableAllocatorToWorkAroundLeaks = nullptr;
420void ExecutableAllocator::initializeUnderlyingAllocator()
421{
422 RELEASE_ASSERT(!g_jscConfig.fixedVMPoolExecutableAllocator);
423 g_jscConfig.fixedVMPoolExecutableAllocator = new FixedVMPoolExecutableAllocator();
424 globalFixedVMPoolExecutableAllocatorToWorkAroundLeaks = g_jscConfig.fixedVMPoolExecutableAllocator;
425 CodeProfiling::notifyAllocator(g_jscConfig.fixedVMPoolExecutableAllocator);
426}
427
428bool ExecutableAllocator::isValid() const
429{
430 auto* allocator = g_jscConfig.fixedVMPoolExecutableAllocator;
431 if (!allocator)
432 return Base::isValid();
433 return !!allocator->bytesReserved();
434}
435
436bool ExecutableAllocator::underMemoryPressure()
437{
438 auto* allocator = g_jscConfig.fixedVMPoolExecutableAllocator;
439 if (!allocator)
440 return Base::underMemoryPressure();
441 return allocator->bytesAllocated() > allocator->bytesReserved() / 2;
442}
443
444double ExecutableAllocator::memoryPressureMultiplier(size_t addedMemoryUsage)
445{
446 auto* allocator = g_jscConfig.fixedVMPoolExecutableAllocator;
447 if (!allocator)
448 return Base::memoryPressureMultiplier(addedMemoryUsage);
449 ASSERT(allocator->bytesAllocated() <= allocator->bytesReserved());
450 size_t bytesAllocated = allocator->bytesAllocated() + addedMemoryUsage;
451 size_t bytesAvailable = static_cast<size_t>(
452 allocator->bytesReserved() * (1 - executablePoolReservationFraction));
453 if (bytesAllocated >= bytesAvailable)
454 bytesAllocated = bytesAvailable;
455 double result = 1.0;
456 size_t divisor = bytesAvailable - bytesAllocated;
457 if (divisor)
458 result = static_cast<double>(bytesAvailable) / divisor;
459 if (result < 1.0)
460 result = 1.0;
461 return result;
462}
463
464RefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort)
465{
466 auto* allocator = g_jscConfig.fixedVMPoolExecutableAllocator;
467 if (!allocator)
468 return Base::allocate(sizeInBytes, ownerUID, effort);
469 if (Options::logExecutableAllocation()) {
470 MetaAllocator::Statistics stats = allocator->currentStatistics();
471 dataLog("Allocating ", sizeInBytes, " bytes of executable memory with ", stats.bytesAllocated, " bytes allocated, ", stats.bytesReserved, " bytes reserved, and ", stats.bytesCommitted, " committed.\n");
472 }
473
474 if (effort != JITCompilationCanFail && Options::reportMustSucceedExecutableAllocations()) {
475 dataLog("Allocating ", sizeInBytes, " bytes of executable memory with JITCompilationMustSucceed.\n");
476 WTFReportBacktrace();
477 }
478
479 if (effort == JITCompilationCanFail
480 && doExecutableAllocationFuzzingIfEnabled() == PretendToFailExecutableAllocation)
481 return nullptr;
482
483 if (effort == JITCompilationCanFail) {
484 // Don't allow allocations if we are down to reserve.
485 size_t bytesAllocated = allocator->bytesAllocated() + sizeInBytes;
486 size_t bytesAvailable = static_cast<size_t>(
487 allocator->bytesReserved() * (1 - executablePoolReservationFraction));
488 if (bytesAllocated > bytesAvailable) {
489 if (Options::logExecutableAllocation())
490 dataLog("Allocation failed because bytes allocated ", bytesAllocated, " > ", bytesAvailable, " bytes available.\n");
491 return nullptr;
492 }
493 }
494
495 RefPtr<ExecutableMemoryHandle> result = allocator->allocate(sizeInBytes, ownerUID);
496 if (!result) {
497 if (effort != JITCompilationCanFail) {
498 dataLog("Ran out of executable memory while allocating ", sizeInBytes, " bytes.\n");
499 CRASH();
500 }
501 return nullptr;
502 }
503
504 void* start = allocator->memoryStart();
505 void* end = allocator->memoryEnd();
506 void* resultStart = result->start().untaggedPtr();
507 void* resultEnd = result->end().untaggedPtr();
508 RELEASE_ASSERT(start <= resultStart && resultStart < end);
509 RELEASE_ASSERT(start < resultEnd && resultEnd <= end);
510 return result;
511}
512
513bool ExecutableAllocator::isValidExecutableMemory(const AbstractLocker& locker, void* address)
514{
515 auto* allocator = g_jscConfig.fixedVMPoolExecutableAllocator;
516 if (!allocator)
517 return Base::isValidExecutableMemory(locker, address);
518 return allocator->isInAllocatedMemory(locker, address);
519}
520
521Lock& ExecutableAllocator::getLock() const
522{
523 auto* allocator = g_jscConfig.fixedVMPoolExecutableAllocator;
524 if (!allocator)
525 return Base::getLock();
526 return allocator->getLock();
527}
528
529size_t ExecutableAllocator::committedByteCount()
530{
531 auto* allocator = g_jscConfig.fixedVMPoolExecutableAllocator;
532 if (!allocator)
533 return Base::committedByteCount();
534 return allocator->bytesCommitted();
535}
536
537#if ENABLE(META_ALLOCATOR_PROFILE)
538void ExecutableAllocator::dumpProfile()
539{
540 auto* allocator = g_jscConfig.fixedVMPoolExecutableAllocator;
541 if (!allocator)
542 return;
543 allocator->dumpProfile();
544}
545#endif
546
547void* startOfFixedExecutableMemoryPoolImpl()
548{
549 auto* allocator = g_jscConfig.fixedVMPoolExecutableAllocator;
550 if (!allocator)
551 return nullptr;
552 return allocator->memoryStart();
553}
554
555void* endOfFixedExecutableMemoryPoolImpl()
556{
557 auto* allocator = g_jscConfig.fixedVMPoolExecutableAllocator;
558 if (!allocator)
559 return nullptr;
560 return allocator->memoryEnd();
561}
562
563bool isJITPC(void* pc)
564{
565 auto* allocator = g_jscConfig.fixedVMPoolExecutableAllocator;
566 return allocator && allocator->isJITPC(pc);
567}
568
569void dumpJITMemory(const void* dst, const void* src, size_t size)
570{
571 RELEASE_ASSERT(Options::dumpJITMemoryPath());
572
573#if OS(DARWIN)
574 static int fd = -1;
575 static uint8_t* buffer;
576 static constexpr size_t bufferSize = fixedExecutableMemoryPoolSize;
577 static size_t offset = 0;
578 static Lock dumpJITMemoryLock;
579 static bool needsToFlush = false;
580 static auto flush = [](const AbstractLocker&) {
581 if (fd == -1) {
582 String path = Options::dumpJITMemoryPath();
583 path = path.replace("%pid", String::number(getCurrentProcessID()));
584 fd = open(FileSystem::fileSystemRepresentation(path).data(), O_CREAT | O_TRUNC | O_APPEND | O_WRONLY | O_EXLOCK | O_NONBLOCK, 0666);
585 RELEASE_ASSERT(fd != -1);
586 }
587 write(fd, buffer, offset);
588 offset = 0;
589 needsToFlush = false;
590 };
591
592 static std::once_flag once;
593 static LazyNeverDestroyed<Ref<WorkQueue>> flushQueue;
594 std::call_once(once, [] {
595 buffer = bitwise_cast<uint8_t*>(malloc(bufferSize));
596 flushQueue.construct(WorkQueue::create("jsc.dumpJITMemory.queue", WorkQueue::Type::Serial, WorkQueue::QOS::Background));
597 std::atexit([] {
598 LockHolder locker(dumpJITMemoryLock);
599 flush(locker);
600 close(fd);
601 fd = -1;
602 });
603 });
604
605 static auto enqueueFlush = [](const AbstractLocker&) {
606 if (needsToFlush)
607 return;
608
609 needsToFlush = true;
610 flushQueue.get()->dispatchAfter(Seconds(Options::dumpJITMemoryFlushInterval()), [] {
611 LockHolder locker(dumpJITMemoryLock);
612 if (!needsToFlush)
613 return;
614 flush(locker);
615 });
616 };
617
618 static auto write = [](const AbstractLocker& locker, const void* src, size_t size) {
619 if (UNLIKELY(offset + size > bufferSize))
620 flush(locker);
621 memcpy(buffer + offset, src, size);
622 offset += size;
623 enqueueFlush(locker);
624 };
625
626 LockHolder locker(dumpJITMemoryLock);
627 uint64_t time = mach_absolute_time();
628 uint64_t dst64 = bitwise_cast<uintptr_t>(dst);
629 uint64_t size64 = size;
630 TraceScope(DumpJITMemoryStart, DumpJITMemoryStop, time, dst64, size64);
631 write(locker, &time, sizeof(time));
632 write(locker, &dst64, sizeof(dst64));
633 write(locker, &size64, sizeof(size64));
634 write(locker, src, size);
635#else
636 UNUSED_PARAM(dst);
637 UNUSED_PARAM(src);
638 UNUSED_PARAM(size);
639 RELEASE_ASSERT_NOT_REACHED();
640#endif
641}
642
643} // namespace JSC
644
645#endif // ENABLE(JIT)
646
647namespace JSC {
648
649// Keep this pointer in a mutable global variable to help Leaks find it.
650// But we do not use this pointer.
651static ExecutableAllocator* globalExecutableAllocatorToWorkAroundLeaks = nullptr;
652void ExecutableAllocator::initialize()
653{
654 g_jscConfig.executableAllocator = new ExecutableAllocator;
655 globalExecutableAllocatorToWorkAroundLeaks = g_jscConfig.executableAllocator;
656}
657
658ExecutableAllocator& ExecutableAllocator::singleton()
659{
660 ASSERT(g_jscConfig.executableAllocator);
661 return *g_jscConfig.executableAllocator;
662}
663
664} // namespace JSC
665