1 | /* |
2 | * Copyright (C) 2008-2019 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #include "config.h" |
27 | #include "ExecutableAllocator.h" |
28 | |
29 | #if ENABLE(JIT) |
30 | |
31 | #include "CodeProfiling.h" |
32 | #include "ExecutableAllocationFuzz.h" |
33 | #include "JSCInlines.h" |
34 | #include <wtf/FileSystem.h> |
35 | #include <wtf/MetaAllocator.h> |
36 | #include <wtf/PageReservation.h> |
37 | #include <wtf/ProcessID.h> |
38 | #include <wtf/SystemTracing.h> |
39 | #include <wtf/WorkQueue.h> |
40 | |
41 | #if OS(DARWIN) |
42 | #include <mach/mach_time.h> |
43 | #include <sys/mman.h> |
44 | #endif |
45 | |
46 | #if PLATFORM(IOS_FAMILY) |
47 | #include <wtf/cocoa/Entitlements.h> |
48 | #endif |
49 | |
50 | #include "LinkBuffer.h" |
51 | #include "MacroAssembler.h" |
52 | |
53 | #if PLATFORM(COCOA) |
54 | #define HAVE_REMAP_JIT 1 |
55 | #endif |
56 | |
57 | #if HAVE(REMAP_JIT) |
58 | #if CPU(ARM64) && PLATFORM(IOS_FAMILY) |
59 | #define USE_EXECUTE_ONLY_JIT_WRITE_FUNCTION 1 |
60 | #endif |
61 | #endif |
62 | |
63 | #if OS(DARWIN) |
64 | #include <mach/mach.h> |
65 | extern "C" { |
66 | /* Routine mach_vm_remap */ |
67 | #ifdef mig_external |
68 | mig_external |
69 | #else |
70 | extern |
71 | #endif /* mig_external */ |
72 | kern_return_t mach_vm_remap |
73 | ( |
74 | vm_map_t target_task, |
75 | mach_vm_address_t *target_address, |
76 | mach_vm_size_t size, |
77 | mach_vm_offset_t mask, |
78 | int flags, |
79 | vm_map_t src_task, |
80 | mach_vm_address_t src_address, |
81 | boolean_t copy, |
82 | vm_prot_t *cur_protection, |
83 | vm_prot_t *max_protection, |
84 | vm_inherit_t inheritance |
85 | ); |
86 | } |
87 | |
88 | #endif |
89 | |
90 | namespace JSC { |
91 | |
92 | using namespace WTF; |
93 | |
94 | #if defined(FIXED_EXECUTABLE_MEMORY_POOL_SIZE_IN_MB) && FIXED_EXECUTABLE_MEMORY_POOL_SIZE_IN_MB > 0 |
95 | static const size_t fixedExecutableMemoryPoolSize = FIXED_EXECUTABLE_MEMORY_POOL_SIZE_IN_MB * 1024 * 1024; |
96 | #elif CPU(ARM) |
97 | static const size_t fixedExecutableMemoryPoolSize = 16 * 1024 * 1024; |
98 | #elif CPU(ARM64) |
99 | static const size_t fixedExecutableMemoryPoolSize = 128 * 1024 * 1024; |
100 | #elif CPU(X86_64) |
101 | static const size_t fixedExecutableMemoryPoolSize = 1024 * 1024 * 1024; |
102 | #else |
103 | static const size_t fixedExecutableMemoryPoolSize = 32 * 1024 * 1024; |
104 | #endif |
105 | |
106 | #if CPU(ARM) |
107 | static const double executablePoolReservationFraction = 0.15; |
108 | #else |
109 | static const double executablePoolReservationFraction = 0.25; |
110 | #endif |
111 | |
112 | #if ENABLE(SEPARATED_WX_HEAP) |
113 | JS_EXPORT_PRIVATE bool useFastPermisionsJITCopy { false }; |
114 | JS_EXPORT_PRIVATE JITWriteSeparateHeapsFunction jitWriteSeparateHeapsFunction; |
115 | #endif |
116 | |
117 | #if !USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION) && HAVE(REMAP_JIT) |
118 | static uintptr_t startOfFixedWritableMemoryPool; |
119 | #endif |
120 | |
121 | class FixedVMPoolExecutableAllocator; |
122 | static FixedVMPoolExecutableAllocator* allocator = nullptr; |
123 | |
124 | static bool s_isJITEnabled = true; |
125 | static bool isJITEnabled() |
126 | { |
127 | #if PLATFORM(IOS_FAMILY) && (CPU(ARM64) || CPU(ARM)) |
128 | return processHasEntitlement("dynamic-codesigning" ) && s_isJITEnabled; |
129 | #else |
130 | return s_isJITEnabled; |
131 | #endif |
132 | } |
133 | |
134 | void ExecutableAllocator::setJITEnabled(bool enabled) |
135 | { |
136 | ASSERT(!allocator); |
137 | if (s_isJITEnabled == enabled) |
138 | return; |
139 | |
140 | s_isJITEnabled = enabled; |
141 | |
142 | #if PLATFORM(IOS_FAMILY) && (CPU(ARM64) || CPU(ARM)) |
143 | if (!enabled) { |
144 | constexpr size_t size = 1; |
145 | constexpr int protection = PROT_READ | PROT_WRITE | PROT_EXEC; |
146 | constexpr int flags = MAP_PRIVATE | MAP_ANON | MAP_JIT; |
147 | constexpr int fd = OSAllocator::JSJITCodePages; |
148 | void* allocation = mmap(nullptr, size, protection, flags, fd, 0); |
149 | const void* executableMemoryAllocationFailure = reinterpret_cast<void*>(-1); |
150 | RELEASE_ASSERT_WITH_MESSAGE(allocation && allocation != executableMemoryAllocationFailure, "We should not have allocated executable memory before disabling the JIT." ); |
151 | RELEASE_ASSERT_WITH_MESSAGE(!munmap(allocation, size), "Unmapping executable memory should succeed so we do not have any executable memory in the address space" ); |
152 | RELEASE_ASSERT_WITH_MESSAGE(mmap(nullptr, size, protection, flags, fd, 0) == executableMemoryAllocationFailure, "Allocating executable memory should fail after setJITEnabled(false) is called." ); |
153 | } |
154 | #endif |
155 | } |
156 | |
157 | class FixedVMPoolExecutableAllocator : public MetaAllocator { |
158 | WTF_MAKE_FAST_ALLOCATED; |
159 | public: |
160 | FixedVMPoolExecutableAllocator() |
161 | : MetaAllocator(jitAllocationGranule) // round up all allocations to 32 bytes |
162 | { |
163 | if (!isJITEnabled()) |
164 | return; |
165 | |
166 | size_t reservationSize; |
167 | if (Options::jitMemoryReservationSize()) |
168 | reservationSize = Options::jitMemoryReservationSize(); |
169 | else |
170 | reservationSize = fixedExecutableMemoryPoolSize; |
171 | reservationSize = std::max(roundUpToMultipleOf(pageSize(), reservationSize), pageSize() * 2); |
172 | |
173 | auto = [] (size_t reservationSize) { |
174 | #if OS(LINUX) |
175 | // If we use uncommitted reservation, mmap operation is recorded with small page size in perf command's output. |
176 | // This makes the following JIT code logging broken and some of JIT code is not recorded correctly. |
177 | // To avoid this problem, we use committed reservation if we need perf JITDump logging. |
178 | if (Options::logJITCodeForPerf()) |
179 | return PageReservation::reserveAndCommitWithGuardPages(reservationSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true); |
180 | #endif |
181 | return PageReservation::reserveWithGuardPages(reservationSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true); |
182 | }; |
183 | |
184 | m_reservation = tryCreatePageReservation(reservationSize); |
185 | if (m_reservation) { |
186 | ASSERT(m_reservation.size() == reservationSize); |
187 | void* reservationBase = m_reservation.base(); |
188 | |
189 | #if ENABLE(FAST_JIT_PERMISSIONS) && !ENABLE(SEPARATED_WX_HEAP) |
190 | RELEASE_ASSERT(os_thread_self_restrict_rwx_is_supported()); |
191 | os_thread_self_restrict_rwx_to_rx(); |
192 | |
193 | #else // not ENABLE(FAST_JIT_PERMISSIONS) or ENABLE(SEPARATED_WX_HEAP) |
194 | #if ENABLE(FAST_JIT_PERMISSIONS) |
195 | if (os_thread_self_restrict_rwx_is_supported()) { |
196 | useFastPermisionsJITCopy = true; |
197 | os_thread_self_restrict_rwx_to_rx(); |
198 | } else |
199 | #endif |
200 | if (Options::useSeparatedWXHeap()) { |
201 | // First page of our JIT allocation is reserved. |
202 | ASSERT(reservationSize >= pageSize() * 2); |
203 | reservationBase = (void*)((uintptr_t)reservationBase + pageSize()); |
204 | reservationSize -= pageSize(); |
205 | initializeSeparatedWXHeaps(m_reservation.base(), pageSize(), reservationBase, reservationSize); |
206 | } |
207 | #endif // not ENABLE(FAST_JIT_PERMISSIONS) or ENABLE(SEPARATED_WX_HEAP) |
208 | |
209 | addFreshFreeSpace(reservationBase, reservationSize); |
210 | |
211 | void* reservationEnd = reinterpret_cast<uint8_t*>(reservationBase) + reservationSize; |
212 | |
213 | m_memoryStart = MacroAssemblerCodePtr<ExecutableMemoryPtrTag>(tagCodePtr<ExecutableMemoryPtrTag>(reservationBase)); |
214 | m_memoryEnd = MacroAssemblerCodePtr<ExecutableMemoryPtrTag>(tagCodePtr<ExecutableMemoryPtrTag>(reservationEnd)); |
215 | } |
216 | } |
217 | |
218 | virtual ~FixedVMPoolExecutableAllocator(); |
219 | |
220 | void* memoryStart() { return m_memoryStart.untaggedExecutableAddress(); } |
221 | void* memoryEnd() { return m_memoryEnd.untaggedExecutableAddress(); } |
222 | bool isJITPC(void* pc) { return memoryStart() <= pc && pc < memoryEnd(); } |
223 | |
224 | protected: |
225 | FreeSpacePtr allocateNewSpace(size_t&) override |
226 | { |
227 | // We're operating in a fixed pool, so new allocation is always prohibited. |
228 | return nullptr; |
229 | } |
230 | |
231 | void notifyNeedPage(void* page) override |
232 | { |
233 | #if USE(MADV_FREE_FOR_JIT_MEMORY) |
234 | UNUSED_PARAM(page); |
235 | #else |
236 | m_reservation.commit(page, pageSize()); |
237 | #endif |
238 | } |
239 | |
240 | void notifyPageIsFree(void* page) override |
241 | { |
242 | #if USE(MADV_FREE_FOR_JIT_MEMORY) |
243 | for (;;) { |
244 | int result = madvise(page, pageSize(), MADV_FREE); |
245 | if (!result) |
246 | return; |
247 | ASSERT(result == -1); |
248 | if (errno != EAGAIN) { |
249 | RELEASE_ASSERT_NOT_REACHED(); // In debug mode, this should be a hard failure. |
250 | break; // In release mode, we should just ignore the error - not returning memory to the OS is better than crashing, especially since we _will_ be able to reuse the memory internally anyway. |
251 | } |
252 | } |
253 | #else |
254 | m_reservation.decommit(page, pageSize()); |
255 | #endif |
256 | } |
257 | |
258 | private: |
259 | #if OS(DARWIN) && HAVE(REMAP_JIT) |
260 | void initializeSeparatedWXHeaps(void* stubBase, size_t stubSize, void* jitBase, size_t jitSize) |
261 | { |
262 | mach_vm_address_t writableAddr = 0; |
263 | |
264 | // Create a second mapping of the JIT region at a random address. |
265 | vm_prot_t cur, max; |
266 | int remapFlags = VM_FLAGS_ANYWHERE; |
267 | #if defined(VM_FLAGS_RANDOM_ADDR) |
268 | remapFlags |= VM_FLAGS_RANDOM_ADDR; |
269 | #endif |
270 | kern_return_t ret = mach_vm_remap(mach_task_self(), &writableAddr, jitSize, 0, |
271 | remapFlags, |
272 | mach_task_self(), (mach_vm_address_t)jitBase, FALSE, |
273 | &cur, &max, VM_INHERIT_DEFAULT); |
274 | |
275 | bool remapSucceeded = (ret == KERN_SUCCESS); |
276 | if (!remapSucceeded) |
277 | return; |
278 | |
279 | // Assemble a thunk that will serve as the means for writing into the JIT region. |
280 | MacroAssemblerCodeRef<JITThunkPtrTag> writeThunk = jitWriteThunkGenerator(reinterpret_cast<void*>(writableAddr), stubBase, stubSize); |
281 | |
282 | int result = 0; |
283 | |
284 | #if USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION) |
285 | // Prevent reading the write thunk code. |
286 | result = vm_protect(mach_task_self(), reinterpret_cast<vm_address_t>(stubBase), stubSize, true, VM_PROT_EXECUTE); |
287 | RELEASE_ASSERT(!result); |
288 | #endif |
289 | |
290 | // Prevent writing into the executable JIT mapping. |
291 | result = vm_protect(mach_task_self(), reinterpret_cast<vm_address_t>(jitBase), jitSize, true, VM_PROT_READ | VM_PROT_EXECUTE); |
292 | RELEASE_ASSERT(!result); |
293 | |
294 | // Prevent execution in the writable JIT mapping. |
295 | result = vm_protect(mach_task_self(), static_cast<vm_address_t>(writableAddr), jitSize, true, VM_PROT_READ | VM_PROT_WRITE); |
296 | RELEASE_ASSERT(!result); |
297 | |
298 | // Zero out writableAddr to avoid leaking the address of the writable mapping. |
299 | memset_s(&writableAddr, sizeof(writableAddr), 0, sizeof(writableAddr)); |
300 | |
301 | #if ENABLE(SEPARATED_WX_HEAP) |
302 | jitWriteSeparateHeapsFunction = reinterpret_cast<JITWriteSeparateHeapsFunction>(writeThunk.code().executableAddress()); |
303 | #endif |
304 | } |
305 | |
306 | #if CPU(ARM64) && USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION) |
307 | MacroAssemblerCodeRef<JITThunkPtrTag> jitWriteThunkGenerator(void* writableAddr, void* stubBase, size_t stubSize) |
308 | { |
309 | using namespace ARM64Registers; |
310 | using TrustedImm32 = MacroAssembler::TrustedImm32; |
311 | |
312 | MacroAssembler jit; |
313 | |
314 | jit.tagReturnAddress(); |
315 | jit.move(MacroAssembler::TrustedImmPtr(writableAddr), x7); |
316 | jit.addPtr(x7, x0); |
317 | |
318 | jit.move(x0, x3); |
319 | MacroAssembler::Jump smallCopy = jit.branch64(MacroAssembler::Below, x2, MacroAssembler::TrustedImm64(64)); |
320 | |
321 | jit.add64(TrustedImm32(32), x3); |
322 | jit.and64(TrustedImm32(-32), x3); |
323 | jit.loadPair64(x1, x12, x13); |
324 | jit.loadPair64(x1, TrustedImm32(16), x14, x15); |
325 | jit.sub64(x3, x0, x5); |
326 | jit.addPtr(x5, x1); |
327 | |
328 | jit.loadPair64(x1, x8, x9); |
329 | jit.loadPair64(x1, TrustedImm32(16), x10, x11); |
330 | jit.add64(TrustedImm32(32), x1); |
331 | jit.sub64(x5, x2); |
332 | jit.storePair64(x12, x13, x0); |
333 | jit.storePair64(x14, x15, x0, TrustedImm32(16)); |
334 | MacroAssembler::Jump cleanup = jit.branchSub64(MacroAssembler::BelowOrEqual, TrustedImm32(64), x2); |
335 | |
336 | MacroAssembler::Label copyLoop = jit.label(); |
337 | jit.storePair64WithNonTemporalAccess(x8, x9, x3); |
338 | jit.storePair64WithNonTemporalAccess(x10, x11, x3, TrustedImm32(16)); |
339 | jit.add64(TrustedImm32(32), x3); |
340 | jit.loadPair64WithNonTemporalAccess(x1, x8, x9); |
341 | jit.loadPair64WithNonTemporalAccess(x1, TrustedImm32(16), x10, x11); |
342 | jit.add64(TrustedImm32(32), x1); |
343 | jit.branchSub64(MacroAssembler::Above, TrustedImm32(32), x2).linkTo(copyLoop, &jit); |
344 | |
345 | cleanup.link(&jit); |
346 | jit.add64(x2, x1); |
347 | jit.loadPair64(x1, x12, x13); |
348 | jit.loadPair64(x1, TrustedImm32(16), x14, x15); |
349 | jit.storePair64(x8, x9, x3); |
350 | jit.storePair64(x10, x11, x3, TrustedImm32(16)); |
351 | jit.addPtr(x2, x3); |
352 | jit.storePair64(x12, x13, x3, TrustedImm32(32)); |
353 | jit.storePair64(x14, x15, x3, TrustedImm32(48)); |
354 | jit.ret(); |
355 | |
356 | MacroAssembler::Label local0 = jit.label(); |
357 | jit.load64(x1, PostIndex(8), x6); |
358 | jit.store64(x6, x3, PostIndex(8)); |
359 | smallCopy.link(&jit); |
360 | jit.branchSub64(MacroAssembler::AboveOrEqual, TrustedImm32(8), x2).linkTo(local0, &jit); |
361 | MacroAssembler::Jump local2 = jit.branchAdd64(MacroAssembler::Equal, TrustedImm32(8), x2); |
362 | MacroAssembler::Label local1 = jit.label(); |
363 | jit.load8(x1, PostIndex(1), x6); |
364 | jit.store8(x6, x3, PostIndex(1)); |
365 | jit.branchSub64(MacroAssembler::NotEqual, TrustedImm32(1), x2).linkTo(local1, &jit); |
366 | local2.link(&jit); |
367 | jit.ret(); |
368 | |
369 | auto stubBaseCodePtr = MacroAssemblerCodePtr<LinkBufferPtrTag>(tagCodePtr<LinkBufferPtrTag>(stubBase)); |
370 | LinkBuffer linkBuffer(jit, stubBaseCodePtr, stubSize); |
371 | // We don't use FINALIZE_CODE() for two reasons. |
372 | // The first is that we don't want the writeable address, as disassembled instructions, |
373 | // to appear in the console or anywhere in memory, via the PrintStream buffer. |
374 | // The second is we can't guarantee that the code is readable when using the |
375 | // asyncDisassembly option as our caller will set our pages execute only. |
376 | return linkBuffer.finalizeCodeWithoutDisassembly<JITThunkPtrTag>(); |
377 | } |
378 | #else // not CPU(ARM64) && USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION) |
379 | static void genericWriteToJITRegion(off_t offset, const void* data, size_t dataSize) |
380 | { |
381 | memcpy((void*)(startOfFixedWritableMemoryPool + offset), data, dataSize); |
382 | } |
383 | |
384 | MacroAssemblerCodeRef<JITThunkPtrTag> jitWriteThunkGenerator(void* address, void*, size_t) |
385 | { |
386 | startOfFixedWritableMemoryPool = reinterpret_cast<uintptr_t>(address); |
387 | void* function = reinterpret_cast<void*>(&genericWriteToJITRegion); |
388 | #if CPU(ARM_THUMB2) |
389 | // Handle thumb offset |
390 | uintptr_t functionAsInt = reinterpret_cast<uintptr_t>(function); |
391 | functionAsInt -= 1; |
392 | function = reinterpret_cast<void*>(functionAsInt); |
393 | #endif |
394 | auto codePtr = MacroAssemblerCodePtr<JITThunkPtrTag>(tagCFunctionPtr<JITThunkPtrTag>(function)); |
395 | return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(codePtr); |
396 | } |
397 | #endif // CPU(ARM64) && USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION) |
398 | |
399 | #else // OS(DARWIN) && HAVE(REMAP_JIT) |
400 | void initializeSeparatedWXHeaps(void*, size_t, void*, size_t) |
401 | { |
402 | } |
403 | #endif |
404 | |
405 | private: |
406 | PageReservation m_reservation; |
407 | MacroAssemblerCodePtr<ExecutableMemoryPtrTag> m_memoryStart; |
408 | MacroAssemblerCodePtr<ExecutableMemoryPtrTag> m_memoryEnd; |
409 | }; |
410 | |
411 | FixedVMPoolExecutableAllocator::~FixedVMPoolExecutableAllocator() |
412 | { |
413 | m_reservation.deallocate(); |
414 | } |
415 | |
416 | void ExecutableAllocator::initializeUnderlyingAllocator() |
417 | { |
418 | ASSERT(!allocator); |
419 | allocator = new FixedVMPoolExecutableAllocator(); |
420 | CodeProfiling::notifyAllocator(allocator); |
421 | } |
422 | |
423 | bool ExecutableAllocator::isValid() const |
424 | { |
425 | if (!allocator) |
426 | return Base::isValid(); |
427 | return !!allocator->bytesReserved(); |
428 | } |
429 | |
430 | bool ExecutableAllocator::underMemoryPressure() |
431 | { |
432 | if (!allocator) |
433 | return Base::underMemoryPressure(); |
434 | MetaAllocator::Statistics statistics = allocator->currentStatistics(); |
435 | return statistics.bytesAllocated > statistics.bytesReserved / 2; |
436 | } |
437 | |
438 | double ExecutableAllocator::memoryPressureMultiplier(size_t addedMemoryUsage) |
439 | { |
440 | if (!allocator) |
441 | return Base::memoryPressureMultiplier(addedMemoryUsage); |
442 | MetaAllocator::Statistics statistics = allocator->currentStatistics(); |
443 | ASSERT(statistics.bytesAllocated <= statistics.bytesReserved); |
444 | size_t bytesAllocated = statistics.bytesAllocated + addedMemoryUsage; |
445 | size_t bytesAvailable = static_cast<size_t>( |
446 | statistics.bytesReserved * (1 - executablePoolReservationFraction)); |
447 | if (bytesAllocated >= bytesAvailable) |
448 | bytesAllocated = bytesAvailable; |
449 | double result = 1.0; |
450 | size_t divisor = bytesAvailable - bytesAllocated; |
451 | if (divisor) |
452 | result = static_cast<double>(bytesAvailable) / divisor; |
453 | if (result < 1.0) |
454 | result = 1.0; |
455 | return result; |
456 | } |
457 | |
458 | RefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort) |
459 | { |
460 | if (!allocator) |
461 | return Base::allocate(sizeInBytes, ownerUID, effort); |
462 | if (Options::logExecutableAllocation()) { |
463 | MetaAllocator::Statistics stats = allocator->currentStatistics(); |
464 | dataLog("Allocating " , sizeInBytes, " bytes of executable memory with " , stats.bytesAllocated, " bytes allocated, " , stats.bytesReserved, " bytes reserved, and " , stats.bytesCommitted, " committed.\n" ); |
465 | } |
466 | |
467 | if (effort != JITCompilationCanFail && Options::reportMustSucceedExecutableAllocations()) { |
468 | dataLog("Allocating " , sizeInBytes, " bytes of executable memory with JITCompilationMustSucceed.\n" ); |
469 | WTFReportBacktrace(); |
470 | } |
471 | |
472 | if (effort == JITCompilationCanFail |
473 | && doExecutableAllocationFuzzingIfEnabled() == PretendToFailExecutableAllocation) |
474 | return nullptr; |
475 | |
476 | if (effort == JITCompilationCanFail) { |
477 | // Don't allow allocations if we are down to reserve. |
478 | MetaAllocator::Statistics statistics = allocator->currentStatistics(); |
479 | size_t bytesAllocated = statistics.bytesAllocated + sizeInBytes; |
480 | size_t bytesAvailable = static_cast<size_t>( |
481 | statistics.bytesReserved * (1 - executablePoolReservationFraction)); |
482 | if (bytesAllocated > bytesAvailable) { |
483 | if (Options::logExecutableAllocation()) |
484 | dataLog("Allocation failed because bytes allocated " , bytesAllocated, " > " , bytesAvailable, " bytes available.\n" ); |
485 | return nullptr; |
486 | } |
487 | } |
488 | |
489 | RefPtr<ExecutableMemoryHandle> result = allocator->allocate(sizeInBytes, ownerUID); |
490 | if (!result) { |
491 | if (effort != JITCompilationCanFail) { |
492 | dataLog("Ran out of executable memory while allocating " , sizeInBytes, " bytes.\n" ); |
493 | CRASH(); |
494 | } |
495 | return nullptr; |
496 | } |
497 | |
498 | #if CPU(ARM64E) |
499 | void* start = allocator->memoryStart(); |
500 | void* end = allocator->memoryEnd(); |
501 | void* resultStart = result->start().untaggedPtr(); |
502 | void* resultEnd = result->end().untaggedPtr(); |
503 | RELEASE_ASSERT(start <= resultStart && resultStart < end); |
504 | RELEASE_ASSERT(start < resultEnd && resultEnd <= end); |
505 | #endif |
506 | return result; |
507 | } |
508 | |
509 | bool ExecutableAllocator::isValidExecutableMemory(const AbstractLocker& locker, void* address) |
510 | { |
511 | if (!allocator) |
512 | return Base::isValidExecutableMemory(locker, address); |
513 | return allocator->isInAllocatedMemory(locker, address); |
514 | } |
515 | |
516 | Lock& ExecutableAllocator::getLock() const |
517 | { |
518 | if (!allocator) |
519 | return Base::getLock(); |
520 | return allocator->getLock(); |
521 | } |
522 | |
523 | size_t ExecutableAllocator::committedByteCount() |
524 | { |
525 | if (!allocator) |
526 | return Base::committedByteCount(); |
527 | return allocator->bytesCommitted(); |
528 | } |
529 | |
530 | #if ENABLE(META_ALLOCATOR_PROFILE) |
531 | void ExecutableAllocator::dumpProfile() |
532 | { |
533 | if (!allocator) |
534 | return; |
535 | allocator->dumpProfile(); |
536 | } |
537 | #endif |
538 | |
539 | void* startOfFixedExecutableMemoryPoolImpl() |
540 | { |
541 | if (!allocator) |
542 | return nullptr; |
543 | return allocator->memoryStart(); |
544 | } |
545 | |
546 | void* endOfFixedExecutableMemoryPoolImpl() |
547 | { |
548 | if (!allocator) |
549 | return nullptr; |
550 | return allocator->memoryEnd(); |
551 | } |
552 | |
553 | bool isJITPC(void* pc) |
554 | { |
555 | return allocator && allocator->isJITPC(pc); |
556 | } |
557 | |
558 | void dumpJITMemory(const void* dst, const void* src, size_t size) |
559 | { |
560 | ASSERT(Options::dumpJITMemoryPath()); |
561 | |
562 | #if OS(DARWIN) |
563 | static int fd = -1; |
564 | static uint8_t* buffer; |
565 | static constexpr size_t bufferSize = fixedExecutableMemoryPoolSize; |
566 | static size_t offset = 0; |
567 | static Lock dumpJITMemoryLock; |
568 | static bool needsToFlush = false; |
569 | static auto flush = [](const AbstractLocker&) { |
570 | if (fd == -1) { |
571 | String path = Options::dumpJITMemoryPath(); |
572 | path = path.replace("%pid" , String::number(getCurrentProcessID())); |
573 | fd = open(FileSystem::fileSystemRepresentation(path).data(), O_CREAT | O_TRUNC | O_APPEND | O_WRONLY | O_EXLOCK | O_NONBLOCK, 0666); |
574 | RELEASE_ASSERT(fd != -1); |
575 | } |
576 | write(fd, buffer, offset); |
577 | offset = 0; |
578 | needsToFlush = false; |
579 | }; |
580 | |
581 | static std::once_flag once; |
582 | static LazyNeverDestroyed<Ref<WorkQueue>> flushQueue; |
583 | std::call_once(once, [] { |
584 | buffer = bitwise_cast<uint8_t*>(malloc(bufferSize)); |
585 | flushQueue.construct(WorkQueue::create("jsc.dumpJITMemory.queue" , WorkQueue::Type::Serial, WorkQueue::QOS::Background)); |
586 | std::atexit([] { |
587 | LockHolder locker(dumpJITMemoryLock); |
588 | flush(locker); |
589 | close(fd); |
590 | fd = -1; |
591 | }); |
592 | }); |
593 | |
594 | static auto enqueueFlush = [](const AbstractLocker&) { |
595 | if (needsToFlush) |
596 | return; |
597 | |
598 | needsToFlush = true; |
599 | flushQueue.get()->dispatchAfter(Seconds(Options::dumpJITMemoryFlushInterval()), [] { |
600 | LockHolder locker(dumpJITMemoryLock); |
601 | if (!needsToFlush) |
602 | return; |
603 | flush(locker); |
604 | }); |
605 | }; |
606 | |
607 | static auto write = [](const AbstractLocker& locker, const void* src, size_t size) { |
608 | if (UNLIKELY(offset + size > bufferSize)) |
609 | flush(locker); |
610 | memcpy(buffer + offset, src, size); |
611 | offset += size; |
612 | enqueueFlush(locker); |
613 | }; |
614 | |
615 | LockHolder locker(dumpJITMemoryLock); |
616 | uint64_t time = mach_absolute_time(); |
617 | uint64_t dst64 = bitwise_cast<uintptr_t>(dst); |
618 | uint64_t size64 = size; |
619 | TraceScope(DumpJITMemoryStart, DumpJITMemoryStop, time, dst64, size64); |
620 | write(locker, &time, sizeof(time)); |
621 | write(locker, &dst64, sizeof(dst64)); |
622 | write(locker, &size64, sizeof(size64)); |
623 | write(locker, src, size); |
624 | #else |
625 | UNUSED_PARAM(dst); |
626 | UNUSED_PARAM(src); |
627 | UNUSED_PARAM(size); |
628 | RELEASE_ASSERT_NOT_REACHED(); |
629 | #endif |
630 | } |
631 | |
632 | } // namespace JSC |
633 | |
634 | #endif // ENABLE(JIT) |
635 | |
636 | namespace JSC { |
637 | |
638 | static ExecutableAllocator* executableAllocator; |
639 | |
640 | void ExecutableAllocator::initialize() |
641 | { |
642 | executableAllocator = new ExecutableAllocator; |
643 | } |
644 | |
645 | ExecutableAllocator& ExecutableAllocator::singleton() |
646 | { |
647 | ASSERT(executableAllocator); |
648 | return *executableAllocator; |
649 | } |
650 | |
651 | } // namespace JSC |
652 | |