1 | /* |
2 | * Copyright (C) 2016-2018 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #include "config.h" |
27 | #include "WasmMemory.h" |
28 | #include "WasmInstance.h" |
29 | |
30 | #if ENABLE(WEBASSEMBLY) |
31 | |
32 | #include "Options.h" |
33 | #include <wtf/DataLog.h> |
34 | #include <wtf/Gigacage.h> |
35 | #include <wtf/Lock.h> |
36 | #include <wtf/OSAllocator.h> |
37 | #include <wtf/PageBlock.h> |
38 | #include <wtf/Platform.h> |
39 | #include <wtf/PrintStream.h> |
40 | #include <wtf/RAMSize.h> |
41 | #include <wtf/Vector.h> |
42 | |
43 | #include <cstring> |
44 | #include <mutex> |
45 | |
46 | namespace JSC { namespace Wasm { |
47 | |
48 | // FIXME: We could be smarter about memset / mmap / madvise. https://bugs.webkit.org/show_bug.cgi?id=170343 |
49 | // FIXME: Give up some of the cached fast memories if the GC determines it's easy to get them back, and they haven't been used in a while. https://bugs.webkit.org/show_bug.cgi?id=170773 |
50 | // FIXME: Limit slow memory size. https://bugs.webkit.org/show_bug.cgi?id=170825 |
51 | |
52 | namespace { |
53 | |
54 | constexpr bool verbose = false; |
55 | |
56 | NEVER_INLINE NO_RETURN_DUE_TO_CRASH void webAssemblyCouldntGetFastMemory() { CRASH(); } |
57 | |
58 | struct MemoryResult { |
59 | enum Kind { |
60 | Success, |
61 | SuccessAndNotifyMemoryPressure, |
62 | SyncTryToReclaimMemory |
63 | }; |
64 | |
65 | static const char* toString(Kind kind) |
66 | { |
67 | switch (kind) { |
68 | case Success: |
69 | return "Success" ; |
70 | case SuccessAndNotifyMemoryPressure: |
71 | return "SuccessAndNotifyMemoryPressure" ; |
72 | case SyncTryToReclaimMemory: |
73 | return "SyncTryToReclaimMemory" ; |
74 | } |
75 | RELEASE_ASSERT_NOT_REACHED(); |
76 | return nullptr; |
77 | } |
78 | |
79 | MemoryResult() { } |
80 | |
81 | MemoryResult(void* basePtr, Kind kind) |
82 | : basePtr(basePtr) |
83 | , kind(kind) |
84 | { |
85 | } |
86 | |
87 | void dump(PrintStream& out) const |
88 | { |
89 | out.print("{basePtr = " , RawPointer(basePtr), ", kind = " , toString(kind), "}" ); |
90 | } |
91 | |
92 | void* basePtr; |
93 | Kind kind; |
94 | }; |
95 | |
96 | class MemoryManager { |
97 | public: |
98 | MemoryManager() |
99 | : m_maxFastMemoryCount(Options::maxNumWebAssemblyFastMemories()) |
100 | { |
101 | } |
102 | |
103 | MemoryResult tryAllocateFastMemory() |
104 | { |
105 | MemoryResult result = [&] { |
106 | auto holder = holdLock(m_lock); |
107 | if (m_fastMemories.size() >= m_maxFastMemoryCount) |
108 | return MemoryResult(nullptr, MemoryResult::SyncTryToReclaimMemory); |
109 | |
110 | void* result = Gigacage::tryAllocateZeroedVirtualPages(Gigacage::Primitive, Memory::fastMappedBytes()); |
111 | if (!result) |
112 | return MemoryResult(nullptr, MemoryResult::SyncTryToReclaimMemory); |
113 | |
114 | m_fastMemories.append(result); |
115 | |
116 | return MemoryResult( |
117 | result, |
118 | m_fastMemories.size() >= m_maxFastMemoryCount / 2 ? MemoryResult::SuccessAndNotifyMemoryPressure : MemoryResult::Success); |
119 | }(); |
120 | |
121 | if (Options::logWebAssemblyMemory()) |
122 | dataLog("Allocated virtual: " , result, "; state: " , *this, "\n" ); |
123 | |
124 | return result; |
125 | } |
126 | |
127 | void freeFastMemory(void* basePtr) |
128 | { |
129 | { |
130 | auto holder = holdLock(m_lock); |
131 | Gigacage::freeVirtualPages(Gigacage::Primitive, basePtr, Memory::fastMappedBytes()); |
132 | m_fastMemories.removeFirst(basePtr); |
133 | } |
134 | |
135 | if (Options::logWebAssemblyMemory()) |
136 | dataLog("Freed virtual; state: " , *this, "\n" ); |
137 | } |
138 | |
139 | bool isAddressInFastMemory(void* address) |
140 | { |
141 | // NOTE: This can be called from a signal handler, but only after we proved that we're in JIT code. |
142 | auto holder = holdLock(m_lock); |
143 | for (void* memory : m_fastMemories) { |
144 | char* start = static_cast<char*>(memory); |
145 | if (start <= address && address <= start + Memory::fastMappedBytes()) |
146 | return true; |
147 | } |
148 | return false; |
149 | } |
150 | |
151 | // We allow people to "commit" more wasm memory than there is on the system since most of the time |
152 | // people don't actually write to most of that memory. There is some chance that this gets us |
153 | // JetSammed but that's possible anyway. |
154 | inline size_t memoryLimit() const { return ramSize() * 3; } |
155 | |
156 | // FIXME: Ideally, bmalloc would have this kind of mechanism. Then, we would just forward to that |
157 | // mechanism here. |
158 | MemoryResult::Kind tryAllocatePhysicalBytes(size_t bytes) |
159 | { |
160 | MemoryResult::Kind result = [&] { |
161 | auto holder = holdLock(m_lock); |
162 | if (m_physicalBytes + bytes > memoryLimit()) |
163 | return MemoryResult::SyncTryToReclaimMemory; |
164 | |
165 | m_physicalBytes += bytes; |
166 | |
167 | if (m_physicalBytes >= memoryLimit() / 2) |
168 | return MemoryResult::SuccessAndNotifyMemoryPressure; |
169 | |
170 | return MemoryResult::Success; |
171 | }(); |
172 | |
173 | if (Options::logWebAssemblyMemory()) |
174 | dataLog("Allocated physical: " , bytes, ", " , MemoryResult::toString(result), "; state: " , *this, "\n" ); |
175 | |
176 | return result; |
177 | } |
178 | |
179 | void freePhysicalBytes(size_t bytes) |
180 | { |
181 | { |
182 | auto holder = holdLock(m_lock); |
183 | m_physicalBytes -= bytes; |
184 | } |
185 | |
186 | if (Options::logWebAssemblyMemory()) |
187 | dataLog("Freed physical: " , bytes, "; state: " , *this, "\n" ); |
188 | } |
189 | |
190 | void dump(PrintStream& out) const |
191 | { |
192 | out.print("fast memories = " , m_fastMemories.size(), "/" , m_maxFastMemoryCount, ", bytes = " , m_physicalBytes, "/" , memoryLimit()); |
193 | } |
194 | |
195 | private: |
196 | Lock m_lock; |
197 | unsigned m_maxFastMemoryCount { 0 }; |
198 | Vector<void*> m_fastMemories; |
199 | size_t m_physicalBytes { 0 }; |
200 | }; |
201 | |
202 | static MemoryManager& memoryManager() |
203 | { |
204 | static std::once_flag onceFlag; |
205 | static MemoryManager* manager; |
206 | std::call_once( |
207 | onceFlag, |
208 | [] { |
209 | manager = new MemoryManager(); |
210 | }); |
211 | return *manager; |
212 | } |
213 | |
214 | template<typename Func> |
215 | bool tryAllocate(const Func& allocate, const WTF::Function<void(Memory::NotifyPressure)>& notifyMemoryPressure, const WTF::Function<void(Memory::SyncTryToReclaim)>& syncTryToReclaimMemory) |
216 | { |
217 | unsigned numTries = 2; |
218 | bool done = false; |
219 | for (unsigned i = 0; i < numTries && !done; ++i) { |
220 | switch (allocate()) { |
221 | case MemoryResult::Success: |
222 | done = true; |
223 | break; |
224 | case MemoryResult::SuccessAndNotifyMemoryPressure: |
225 | if (notifyMemoryPressure) |
226 | notifyMemoryPressure(Memory::NotifyPressureTag); |
227 | done = true; |
228 | break; |
229 | case MemoryResult::SyncTryToReclaimMemory: |
230 | if (i + 1 == numTries) |
231 | break; |
232 | if (syncTryToReclaimMemory) |
233 | syncTryToReclaimMemory(Memory::SyncTryToReclaimTag); |
234 | break; |
235 | } |
236 | } |
237 | return done; |
238 | } |
239 | |
240 | } // anonymous namespace |
241 | |
242 | Memory::Memory() |
243 | { |
244 | } |
245 | |
246 | Memory::Memory(PageCount initial, PageCount maximum, Function<void(NotifyPressure)>&& notifyMemoryPressure, Function<void(SyncTryToReclaim)>&& syncTryToReclaimMemory, WTF::Function<void(GrowSuccess, PageCount, PageCount)>&& growSuccessCallback) |
247 | : m_initial(initial) |
248 | , m_maximum(maximum) |
249 | , m_notifyMemoryPressure(WTFMove(notifyMemoryPressure)) |
250 | , m_syncTryToReclaimMemory(WTFMove(syncTryToReclaimMemory)) |
251 | , m_growSuccessCallback(WTFMove(growSuccessCallback)) |
252 | { |
253 | ASSERT(!initial.bytes()); |
254 | ASSERT(m_mode == MemoryMode::BoundsChecking); |
255 | dataLogLnIf(verbose, "Memory::Memory allocating " , *this); |
256 | ASSERT(!memory()); |
257 | } |
258 | |
259 | Memory::Memory(void* memory, PageCount initial, PageCount maximum, size_t mappedCapacity, MemoryMode mode, Function<void(NotifyPressure)>&& notifyMemoryPressure, Function<void(SyncTryToReclaim)>&& syncTryToReclaimMemory, WTF::Function<void(GrowSuccess, PageCount, PageCount)>&& growSuccessCallback) |
260 | : m_memory(memory, initial.bytes()) |
261 | , m_size(initial.bytes()) |
262 | , m_initial(initial) |
263 | , m_maximum(maximum) |
264 | , m_mappedCapacity(mappedCapacity) |
265 | , m_mode(mode) |
266 | , m_notifyMemoryPressure(WTFMove(notifyMemoryPressure)) |
267 | , m_syncTryToReclaimMemory(WTFMove(syncTryToReclaimMemory)) |
268 | , m_growSuccessCallback(WTFMove(growSuccessCallback)) |
269 | { |
270 | dataLogLnIf(verbose, "Memory::Memory allocating " , *this); |
271 | } |
272 | |
273 | Ref<Memory> Memory::create() |
274 | { |
275 | return adoptRef(*new Memory()); |
276 | } |
277 | |
278 | RefPtr<Memory> Memory::tryCreate(PageCount initial, PageCount maximum, WTF::Function<void(NotifyPressure)>&& notifyMemoryPressure, WTF::Function<void(SyncTryToReclaim)>&& syncTryToReclaimMemory, WTF::Function<void(GrowSuccess, PageCount, PageCount)>&& growSuccessCallback) |
279 | { |
280 | ASSERT(initial); |
281 | RELEASE_ASSERT(!maximum || maximum >= initial); // This should be guaranteed by our caller. |
282 | |
283 | const size_t initialBytes = initial.bytes(); |
284 | const size_t maximumBytes = maximum ? maximum.bytes() : 0; |
285 | |
286 | if (initialBytes > MAX_ARRAY_BUFFER_SIZE) |
287 | return nullptr; // Client will throw OOMError. |
288 | |
289 | if (maximum && !maximumBytes) { |
290 | // User specified a zero maximum, initial size must also be zero. |
291 | RELEASE_ASSERT(!initialBytes); |
292 | return adoptRef(new Memory(initial, maximum, WTFMove(notifyMemoryPressure), WTFMove(syncTryToReclaimMemory), WTFMove(growSuccessCallback))); |
293 | } |
294 | |
295 | bool done = tryAllocate( |
296 | [&] () -> MemoryResult::Kind { |
297 | return memoryManager().tryAllocatePhysicalBytes(initialBytes); |
298 | }, notifyMemoryPressure, syncTryToReclaimMemory); |
299 | if (!done) |
300 | return nullptr; |
301 | |
302 | char* fastMemory = nullptr; |
303 | if (Options::useWebAssemblyFastMemory()) { |
304 | tryAllocate( |
305 | [&] () -> MemoryResult::Kind { |
306 | auto result = memoryManager().tryAllocateFastMemory(); |
307 | fastMemory = bitwise_cast<char*>(result.basePtr); |
308 | return result.kind; |
309 | }, notifyMemoryPressure, syncTryToReclaimMemory); |
310 | } |
311 | |
312 | if (fastMemory) { |
313 | |
314 | if (mprotect(fastMemory + initialBytes, Memory::fastMappedBytes() - initialBytes, PROT_NONE)) { |
315 | dataLog("mprotect failed: " , strerror(errno), "\n" ); |
316 | RELEASE_ASSERT_NOT_REACHED(); |
317 | } |
318 | |
319 | return adoptRef(new Memory(fastMemory, initial, maximum, Memory::fastMappedBytes(), MemoryMode::Signaling, WTFMove(notifyMemoryPressure), WTFMove(syncTryToReclaimMemory), WTFMove(growSuccessCallback))); |
320 | } |
321 | |
322 | if (UNLIKELY(Options::crashIfWebAssemblyCantFastMemory())) |
323 | webAssemblyCouldntGetFastMemory(); |
324 | |
325 | if (!initialBytes) |
326 | return adoptRef(new Memory(initial, maximum, WTFMove(notifyMemoryPressure), WTFMove(syncTryToReclaimMemory), WTFMove(growSuccessCallback))); |
327 | |
328 | void* slowMemory = Gigacage::tryAllocateZeroedVirtualPages(Gigacage::Primitive, initialBytes); |
329 | if (!slowMemory) { |
330 | memoryManager().freePhysicalBytes(initialBytes); |
331 | return nullptr; |
332 | } |
333 | return adoptRef(new Memory(slowMemory, initial, maximum, initialBytes, MemoryMode::BoundsChecking, WTFMove(notifyMemoryPressure), WTFMove(syncTryToReclaimMemory), WTFMove(growSuccessCallback))); |
334 | } |
335 | |
336 | Memory::~Memory() |
337 | { |
338 | if (m_memory) { |
339 | memoryManager().freePhysicalBytes(m_size); |
340 | switch (m_mode) { |
341 | case MemoryMode::Signaling: |
342 | if (mprotect(memory(), Memory::fastMappedBytes(), PROT_READ | PROT_WRITE)) { |
343 | dataLog("mprotect failed: " , strerror(errno), "\n" ); |
344 | RELEASE_ASSERT_NOT_REACHED(); |
345 | } |
346 | memoryManager().freeFastMemory(memory()); |
347 | break; |
348 | case MemoryMode::BoundsChecking: |
349 | Gigacage::freeVirtualPages(Gigacage::Primitive, memory(), m_size); |
350 | break; |
351 | } |
352 | } |
353 | } |
354 | |
355 | size_t Memory::fastMappedRedzoneBytes() |
356 | { |
357 | return static_cast<size_t>(PageCount::pageSize) * Options::webAssemblyFastMemoryRedzonePages(); |
358 | } |
359 | |
360 | size_t Memory::fastMappedBytes() |
361 | { |
362 | static_assert(sizeof(uint64_t) == sizeof(size_t), "We rely on allowing the maximum size of Memory we map to be 2^32 + redzone which is larger than fits in a 32-bit integer that we'd pass to mprotect if this didn't hold." ); |
363 | return static_cast<size_t>(std::numeric_limits<uint32_t>::max()) + fastMappedRedzoneBytes(); |
364 | } |
365 | |
366 | bool Memory::addressIsInActiveFastMemory(void* address) |
367 | { |
368 | return memoryManager().isAddressInFastMemory(address); |
369 | } |
370 | |
371 | Expected<PageCount, Memory::GrowFailReason> Memory::grow(PageCount delta) |
372 | { |
373 | const Wasm::PageCount oldPageCount = sizeInPages(); |
374 | |
375 | if (!delta.isValid()) |
376 | return makeUnexpected(GrowFailReason::InvalidDelta); |
377 | |
378 | const Wasm::PageCount newPageCount = oldPageCount + delta; |
379 | if (!newPageCount || !newPageCount.isValid()) |
380 | return makeUnexpected(GrowFailReason::InvalidGrowSize); |
381 | if (newPageCount.bytes() > MAX_ARRAY_BUFFER_SIZE) |
382 | return makeUnexpected(GrowFailReason::OutOfMemory); |
383 | |
384 | auto success = [&] () { |
385 | m_growSuccessCallback(GrowSuccessTag, oldPageCount, newPageCount); |
386 | // Update cache for instance |
387 | for (auto& instance : m_instances) { |
388 | if (instance.get() != nullptr) |
389 | instance.get()->updateCachedMemory(); |
390 | } |
391 | return oldPageCount; |
392 | }; |
393 | |
394 | if (delta.pageCount() == 0) |
395 | return success(); |
396 | |
397 | dataLogLnIf(verbose, "Memory::grow(" , delta, ") to " , newPageCount, " from " , *this); |
398 | RELEASE_ASSERT(newPageCount > PageCount::fromBytes(m_size)); |
399 | |
400 | if (maximum() && newPageCount > maximum()) |
401 | return makeUnexpected(GrowFailReason::WouldExceedMaximum); |
402 | |
403 | size_t desiredSize = newPageCount.bytes(); |
404 | RELEASE_ASSERT(desiredSize <= MAX_ARRAY_BUFFER_SIZE); |
405 | RELEASE_ASSERT(desiredSize > m_size); |
406 | size_t = desiredSize - m_size; |
407 | RELEASE_ASSERT(extraBytes); |
408 | bool allocationSuccess = tryAllocate( |
409 | [&] () -> MemoryResult::Kind { |
410 | return memoryManager().tryAllocatePhysicalBytes(extraBytes); |
411 | }, m_notifyMemoryPressure, m_syncTryToReclaimMemory); |
412 | if (!allocationSuccess) |
413 | return makeUnexpected(GrowFailReason::OutOfMemory); |
414 | |
415 | switch (mode()) { |
416 | case MemoryMode::BoundsChecking: { |
417 | RELEASE_ASSERT(maximum().bytes() != 0); |
418 | |
419 | void* newMemory = Gigacage::tryAllocateZeroedVirtualPages(Gigacage::Primitive, desiredSize); |
420 | if (!newMemory) |
421 | return makeUnexpected(GrowFailReason::OutOfMemory); |
422 | |
423 | memcpy(newMemory, memory(), m_size); |
424 | if (m_memory) |
425 | Gigacage::freeVirtualPages(Gigacage::Primitive, memory(), m_size); |
426 | m_memory = CagedMemory(newMemory, desiredSize); |
427 | m_mappedCapacity = desiredSize; |
428 | m_size = desiredSize; |
429 | ASSERT(memory() == newMemory); |
430 | return success(); |
431 | } |
432 | case MemoryMode::Signaling: { |
433 | RELEASE_ASSERT(memory()); |
434 | // Signaling memory must have been pre-allocated virtually. |
435 | uint8_t* startAddress = static_cast<uint8_t*>(memory()) + m_size; |
436 | |
437 | dataLogLnIf(verbose, "Marking WebAssembly memory's " , RawPointer(memory()), " as read+write in range [" , RawPointer(startAddress), ", " , RawPointer(startAddress + extraBytes), ")" ); |
438 | if (mprotect(startAddress, extraBytes, PROT_READ | PROT_WRITE)) { |
439 | dataLog("mprotect failed: " , strerror(errno), "\n" ); |
440 | RELEASE_ASSERT_NOT_REACHED(); |
441 | } |
442 | m_memory.recage(m_size, desiredSize); |
443 | m_size = desiredSize; |
444 | return success(); |
445 | } |
446 | } |
447 | |
448 | RELEASE_ASSERT_NOT_REACHED(); |
449 | return oldPageCount; |
450 | } |
451 | |
452 | void Memory::registerInstance(Instance* instance) |
453 | { |
454 | size_t count = m_instances.size(); |
455 | for (size_t index = 0; index < count; index++) { |
456 | if (m_instances.at(index).get() == nullptr) { |
457 | m_instances.at(index) = makeWeakPtr(*instance); |
458 | return; |
459 | } |
460 | } |
461 | m_instances.append(makeWeakPtr(*instance)); |
462 | } |
463 | |
464 | void Memory::dump(PrintStream& out) const |
465 | { |
466 | out.print("Memory at " , RawPointer(memory()), ", size " , m_size, "B capacity " , m_mappedCapacity, "B, initial " , m_initial, " maximum " , m_maximum, " mode " , makeString(m_mode)); |
467 | } |
468 | |
469 | } // namespace JSC |
470 | |
471 | } // namespace Wasm |
472 | |
473 | #endif // ENABLE(WEBASSEMBLY) |
474 | |