1 | /* |
2 | * Copyright (C) 2003-2019 Apple Inc. All rights reserved. |
3 | * Copyright (C) 2007 Eric Seidel <[email protected]> |
4 | * |
5 | * This library is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU Lesser General Public |
7 | * License as published by the Free Software Foundation; either |
8 | * version 2 of the License, or (at your option) any later version. |
9 | * |
10 | * This library is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | * Lesser General Public License for more details. |
14 | * |
15 | * You should have received a copy of the GNU Lesser General Public |
16 | * License along with this library; if not, write to the Free Software |
17 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
18 | * |
19 | */ |
20 | |
21 | #include "config.h" |
22 | #include "Heap.h" |
23 | |
24 | #include "BlockDirectoryInlines.h" |
25 | #include "BuiltinExecutables.h" |
26 | #include "CodeBlock.h" |
27 | #include "CodeBlockSetInlines.h" |
28 | #include "CollectingScope.h" |
29 | #include "ConservativeRoots.h" |
30 | #include "DFGWorklistInlines.h" |
31 | #include "EdenGCActivityCallback.h" |
32 | #include "Exception.h" |
33 | #include "FullGCActivityCallback.h" |
34 | #include "FunctionExecutableInlines.h" |
35 | #include "GCActivityCallback.h" |
36 | #include "GCIncomingRefCountedSetInlines.h" |
37 | #include "GCSegmentedArrayInlines.h" |
38 | #include "GCTypeMap.h" |
39 | #include "HasOwnPropertyCache.h" |
40 | #include "HeapHelperPool.h" |
41 | #include "HeapIterationScope.h" |
42 | #include "HeapProfiler.h" |
43 | #include "HeapSnapshot.h" |
44 | #include "HeapVerifier.h" |
45 | #include "IncrementalSweeper.h" |
46 | #include "InferredValueInlines.h" |
47 | #include "Interpreter.h" |
48 | #include "IsoCellSetInlines.h" |
49 | #include "JITStubRoutineSet.h" |
50 | #include "JITWorklist.h" |
51 | #include "JSCInlines.h" |
52 | #include "JSGlobalObject.h" |
53 | #include "JSLock.h" |
54 | #include "JSVirtualMachineInternal.h" |
55 | #include "JSWeakMap.h" |
56 | #include "JSWeakObjectRef.h" |
57 | #include "JSWeakSet.h" |
58 | #include "JSWebAssemblyCodeBlock.h" |
59 | #include "MachineStackMarker.h" |
60 | #include "MarkStackMergingConstraint.h" |
61 | #include "MarkedSpaceInlines.h" |
62 | #include "MarkingConstraintSet.h" |
63 | #include "PreventCollectionScope.h" |
64 | #include "SamplingProfiler.h" |
65 | #include "ShadowChicken.h" |
66 | #include "SpaceTimeMutatorScheduler.h" |
67 | #include "StochasticSpaceTimeMutatorScheduler.h" |
68 | #include "StopIfNecessaryTimer.h" |
69 | #include "SubspaceInlines.h" |
70 | #include "SuperSampler.h" |
71 | #include "SweepingScope.h" |
72 | #include "SymbolTableInlines.h" |
73 | #include "SynchronousStopTheWorldMutatorScheduler.h" |
74 | #include "TypeProfiler.h" |
75 | #include "TypeProfilerLog.h" |
76 | #include "UnlinkedCodeBlock.h" |
77 | #include "VM.h" |
78 | #include "VisitCounter.h" |
79 | #include "WasmMemory.h" |
80 | #include "WeakMapImplInlines.h" |
81 | #include "WeakSetInlines.h" |
82 | #include <algorithm> |
83 | #include <wtf/CryptographicallyRandomNumber.h> |
84 | #include <wtf/ListDump.h> |
85 | #include <wtf/MainThread.h> |
86 | #include <wtf/ParallelVectorIterator.h> |
87 | #include <wtf/ProcessID.h> |
88 | #include <wtf/RAMSize.h> |
89 | #include <wtf/SimpleStats.h> |
90 | #include <wtf/Threading.h> |
91 | |
92 | #if PLATFORM(IOS_FAMILY) |
93 | #include <bmalloc/bmalloc.h> |
94 | #endif |
95 | |
96 | #if USE(FOUNDATION) |
97 | #include <wtf/spi/cocoa/objcSPI.h> |
98 | #endif |
99 | |
100 | #ifdef JSC_GLIB_API_ENABLED |
101 | #include "JSCGLibWrapperObject.h" |
102 | #endif |
103 | |
104 | namespace JSC { |
105 | |
106 | namespace { |
107 | |
108 | bool verboseStop = false; |
109 | |
110 | double maxPauseMS(double thisPauseMS) |
111 | { |
112 | static double maxPauseMS; |
113 | maxPauseMS = std::max(thisPauseMS, maxPauseMS); |
114 | return maxPauseMS; |
115 | } |
116 | |
117 | size_t minHeapSize(HeapType heapType, size_t ramSize) |
118 | { |
119 | if (heapType == LargeHeap) { |
120 | double result = std::min( |
121 | static_cast<double>(Options::largeHeapSize()), |
122 | ramSize * Options::smallHeapRAMFraction()); |
123 | return static_cast<size_t>(result); |
124 | } |
125 | return Options::smallHeapSize(); |
126 | } |
127 | |
128 | size_t proportionalHeapSize(size_t heapSize, size_t ramSize) |
129 | { |
130 | if (VM::isInMiniMode()) |
131 | return Options::miniVMHeapGrowthFactor() * heapSize; |
132 | |
133 | #if PLATFORM(IOS_FAMILY) |
134 | size_t memoryFootprint = bmalloc::api::memoryFootprint(); |
135 | if (memoryFootprint < ramSize * Options::smallHeapRAMFraction()) |
136 | return Options::smallHeapGrowthFactor() * heapSize; |
137 | if (memoryFootprint < ramSize * Options::mediumHeapRAMFraction()) |
138 | return Options::mediumHeapGrowthFactor() * heapSize; |
139 | #else |
140 | if (heapSize < ramSize * Options::smallHeapRAMFraction()) |
141 | return Options::smallHeapGrowthFactor() * heapSize; |
142 | if (heapSize < ramSize * Options::mediumHeapRAMFraction()) |
143 | return Options::mediumHeapGrowthFactor() * heapSize; |
144 | #endif |
145 | return Options::largeHeapGrowthFactor() * heapSize; |
146 | } |
147 | |
148 | bool isValidSharedInstanceThreadState(VM& vm) |
149 | { |
150 | return vm.currentThreadIsHoldingAPILock(); |
151 | } |
152 | |
153 | bool isValidThreadState(VM& vm) |
154 | { |
155 | if (vm.atomStringTable() != Thread::current().atomStringTable()) |
156 | return false; |
157 | |
158 | if (vm.isSharedInstance() && !isValidSharedInstanceThreadState(vm)) |
159 | return false; |
160 | |
161 | return true; |
162 | } |
163 | |
164 | void recordType(VM& vm, TypeCountSet& set, JSCell* cell) |
165 | { |
166 | const char* typeName = "[unknown]" ; |
167 | const ClassInfo* info = cell->classInfo(vm); |
168 | if (info && info->className) |
169 | typeName = info->className; |
170 | set.add(typeName); |
171 | } |
172 | |
173 | bool measurePhaseTiming() |
174 | { |
175 | return false; |
176 | } |
177 | |
178 | HashMap<const char*, GCTypeMap<SimpleStats>>& timingStats() |
179 | { |
180 | static HashMap<const char*, GCTypeMap<SimpleStats>>* result; |
181 | static std::once_flag once; |
182 | std::call_once( |
183 | once, |
184 | [] { |
185 | result = new HashMap<const char*, GCTypeMap<SimpleStats>>(); |
186 | }); |
187 | return *result; |
188 | } |
189 | |
190 | SimpleStats& timingStats(const char* name, CollectionScope scope) |
191 | { |
192 | return timingStats().add(name, GCTypeMap<SimpleStats>()).iterator->value[scope]; |
193 | } |
194 | |
195 | class TimingScope { |
196 | public: |
197 | TimingScope(Optional<CollectionScope> scope, const char* name) |
198 | : m_scope(scope) |
199 | , m_name(name) |
200 | { |
201 | if (measurePhaseTiming()) |
202 | m_before = MonotonicTime::now(); |
203 | } |
204 | |
205 | TimingScope(Heap& heap, const char* name) |
206 | : TimingScope(heap.collectionScope(), name) |
207 | { |
208 | } |
209 | |
210 | void setScope(Optional<CollectionScope> scope) |
211 | { |
212 | m_scope = scope; |
213 | } |
214 | |
215 | void setScope(Heap& heap) |
216 | { |
217 | setScope(heap.collectionScope()); |
218 | } |
219 | |
220 | ~TimingScope() |
221 | { |
222 | if (measurePhaseTiming()) { |
223 | MonotonicTime after = MonotonicTime::now(); |
224 | Seconds timing = after - m_before; |
225 | SimpleStats& stats = timingStats(m_name, *m_scope); |
226 | stats.add(timing.milliseconds()); |
227 | dataLog("[GC:" , *m_scope, "] " , m_name, " took: " , timing.milliseconds(), "ms (average " , stats.mean(), "ms).\n" ); |
228 | } |
229 | } |
230 | private: |
231 | Optional<CollectionScope> m_scope; |
232 | MonotonicTime m_before; |
233 | const char* m_name; |
234 | }; |
235 | |
236 | } // anonymous namespace |
237 | |
238 | class Heap::HeapThread : public AutomaticThread { |
239 | public: |
240 | HeapThread(const AbstractLocker& locker, Heap& heap) |
241 | : AutomaticThread(locker, heap.m_threadLock, heap.m_threadCondition.copyRef()) |
242 | , m_heap(heap) |
243 | { |
244 | } |
245 | |
246 | const char* name() const override |
247 | { |
248 | return "JSC Heap Collector Thread" ; |
249 | } |
250 | |
251 | protected: |
252 | PollResult poll(const AbstractLocker& locker) override |
253 | { |
254 | if (m_heap.m_threadShouldStop) { |
255 | m_heap.notifyThreadStopping(locker); |
256 | return PollResult::Stop; |
257 | } |
258 | if (m_heap.shouldCollectInCollectorThread(locker)) { |
259 | m_heap.m_collectorThreadIsRunning = true; |
260 | return PollResult::Work; |
261 | } |
262 | m_heap.m_collectorThreadIsRunning = false; |
263 | return PollResult::Wait; |
264 | } |
265 | |
266 | WorkResult work() override |
267 | { |
268 | m_heap.collectInCollectorThread(); |
269 | return WorkResult::Continue; |
270 | } |
271 | |
272 | void threadDidStart() override |
273 | { |
274 | Thread::registerGCThread(GCThreadType::Main); |
275 | } |
276 | |
277 | void threadIsStopping(const AbstractLocker&) override |
278 | { |
279 | m_heap.m_collectorThreadIsRunning = false; |
280 | } |
281 | |
282 | private: |
283 | Heap& m_heap; |
284 | }; |
285 | |
286 | Heap::Heap(VM& vm, HeapType heapType) |
287 | : m_heapType(heapType) |
288 | , m_ramSize(Options::forceRAMSize() ? Options::forceRAMSize() : ramSize()) |
289 | , m_minBytesPerCycle(minHeapSize(m_heapType, m_ramSize)) |
290 | , m_maxEdenSize(m_minBytesPerCycle) |
291 | , m_maxHeapSize(m_minBytesPerCycle) |
292 | , m_objectSpace(this) |
293 | , m_machineThreads(makeUnique<MachineThreads>()) |
294 | , m_collectorSlotVisitor(makeUnique<SlotVisitor>(*this, "C" )) |
295 | , m_mutatorSlotVisitor(makeUnique<SlotVisitor>(*this, "M" )) |
296 | , m_mutatorMarkStack(makeUnique<MarkStackArray>()) |
297 | , m_raceMarkStack(makeUnique<MarkStackArray>()) |
298 | , m_constraintSet(makeUnique<MarkingConstraintSet>(*this)) |
299 | , m_handleSet(vm) |
300 | , m_codeBlocks(makeUnique<CodeBlockSet>()) |
301 | , m_jitStubRoutines(makeUnique<JITStubRoutineSet>()) |
302 | , m_vm(vm) |
303 | // We seed with 10ms so that GCActivityCallback::didAllocate doesn't continuously |
304 | // schedule the timer if we've never done a collection. |
305 | , m_fullActivityCallback(GCActivityCallback::tryCreateFullTimer(this)) |
306 | , m_edenActivityCallback(GCActivityCallback::tryCreateEdenTimer(this)) |
307 | , m_sweeper(adoptRef(*new IncrementalSweeper(this))) |
308 | , m_stopIfNecessaryTimer(adoptRef(*new StopIfNecessaryTimer(vm))) |
309 | , m_sharedCollectorMarkStack(makeUnique<MarkStackArray>()) |
310 | , m_sharedMutatorMarkStack(makeUnique<MarkStackArray>()) |
311 | , m_helperClient(&heapHelperPool()) |
312 | , m_threadLock(Box<Lock>::create()) |
313 | , m_threadCondition(AutomaticThreadCondition::create()) |
314 | { |
315 | m_worldState.store(0); |
316 | |
317 | for (unsigned i = 0, numberOfParallelThreads = heapHelperPool().numberOfThreads(); i < numberOfParallelThreads; ++i) { |
318 | std::unique_ptr<SlotVisitor> visitor = makeUnique<SlotVisitor>(*this, toCString("P" , i + 1)); |
319 | if (Options::optimizeParallelSlotVisitorsForStoppedMutator()) |
320 | visitor->optimizeForStoppedMutator(); |
321 | m_availableParallelSlotVisitors.append(visitor.get()); |
322 | m_parallelSlotVisitors.append(WTFMove(visitor)); |
323 | } |
324 | |
325 | if (Options::useConcurrentGC()) { |
326 | if (Options::useStochasticMutatorScheduler()) |
327 | m_scheduler = makeUnique<StochasticSpaceTimeMutatorScheduler>(*this); |
328 | else |
329 | m_scheduler = makeUnique<SpaceTimeMutatorScheduler>(*this); |
330 | } else { |
331 | // We simulate turning off concurrent GC by making the scheduler say that the world |
332 | // should always be stopped when the collector is running. |
333 | m_scheduler = makeUnique<SynchronousStopTheWorldMutatorScheduler>(); |
334 | } |
335 | |
336 | if (Options::verifyHeap()) |
337 | m_verifier = makeUnique<HeapVerifier>(this, Options::numberOfGCCyclesToRecordForVerification()); |
338 | |
339 | m_collectorSlotVisitor->optimizeForStoppedMutator(); |
340 | |
341 | // When memory is critical, allow allocating 25% of the amount above the critical threshold before collecting. |
342 | size_t memoryAboveCriticalThreshold = static_cast<size_t>(static_cast<double>(m_ramSize) * (1.0 - Options::criticalGCMemoryThreshold())); |
343 | m_maxEdenSizeWhenCritical = memoryAboveCriticalThreshold / 4; |
344 | |
345 | LockHolder locker(*m_threadLock); |
346 | m_thread = adoptRef(new HeapThread(locker, *this)); |
347 | } |
348 | |
349 | Heap::~Heap() |
350 | { |
351 | // Scribble m_worldState to make it clear that the heap has already been destroyed if we crash in checkConn |
352 | m_worldState.store(0xbadbeeffu); |
353 | |
354 | forEachSlotVisitor( |
355 | [&] (SlotVisitor& visitor) { |
356 | visitor.clearMarkStacks(); |
357 | }); |
358 | m_mutatorMarkStack->clear(); |
359 | m_raceMarkStack->clear(); |
360 | |
361 | for (WeakBlock* block : m_logicallyEmptyWeakBlocks) |
362 | WeakBlock::destroy(*this, block); |
363 | } |
364 | |
365 | bool Heap::isPagedOut(MonotonicTime deadline) |
366 | { |
367 | return m_objectSpace.isPagedOut(deadline); |
368 | } |
369 | |
370 | void Heap::dumpHeapStatisticsAtVMDestruction() |
371 | { |
372 | unsigned counter = 0; |
373 | m_objectSpace.forEachBlock([&] (MarkedBlock::Handle* block) { |
374 | unsigned live = 0; |
375 | block->forEachCell([&] (size_t, HeapCell* cell, HeapCell::Kind) { |
376 | if (cell->isLive()) |
377 | live++; |
378 | return IterationStatus::Continue; |
379 | }); |
380 | dataLogLn("[" , counter++, "] " , block->cellSize(), ", " , live, " / " , block->cellsPerBlock(), " " , static_cast<double>(live) / block->cellsPerBlock() * 100, "% " , block->attributes(), " " , block->subspace()->name()); |
381 | block->forEachCell([&] (size_t, HeapCell* heapCell, HeapCell::Kind kind) { |
382 | if (heapCell->isLive() && kind == HeapCell::Kind::JSCell) { |
383 | auto* cell = static_cast<JSCell*>(heapCell); |
384 | if (cell->isObject()) |
385 | dataLogLn(" " , JSValue((JSObject*)cell)); |
386 | else |
387 | dataLogLn(" " , *cell); |
388 | } |
389 | return IterationStatus::Continue; |
390 | }); |
391 | }); |
392 | } |
393 | |
394 | // The VM is being destroyed and the collector will never run again. |
395 | // Run all pending finalizers now because we won't get another chance. |
396 | void Heap::lastChanceToFinalize() |
397 | { |
398 | MonotonicTime before; |
399 | if (Options::logGC()) { |
400 | before = MonotonicTime::now(); |
401 | dataLog("[GC<" , RawPointer(this), ">: shutdown " ); |
402 | } |
403 | |
404 | m_isShuttingDown = true; |
405 | |
406 | RELEASE_ASSERT(!m_vm.entryScope); |
407 | RELEASE_ASSERT(m_mutatorState == MutatorState::Running); |
408 | |
409 | if (m_collectContinuouslyThread) { |
410 | { |
411 | LockHolder locker(m_collectContinuouslyLock); |
412 | m_shouldStopCollectingContinuously = true; |
413 | m_collectContinuouslyCondition.notifyOne(); |
414 | } |
415 | m_collectContinuouslyThread->waitForCompletion(); |
416 | } |
417 | |
418 | if (Options::logGC()) |
419 | dataLog("1" ); |
420 | |
421 | // Prevent new collections from being started. This is probably not even necessary, since we're not |
422 | // going to call into anything that starts collections. Still, this makes the algorithm more |
423 | // obviously sound. |
424 | m_isSafeToCollect = false; |
425 | |
426 | if (Options::logGC()) |
427 | dataLog("2" ); |
428 | |
429 | bool isCollecting; |
430 | { |
431 | auto locker = holdLock(*m_threadLock); |
432 | RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket); |
433 | isCollecting = m_lastServedTicket < m_lastGrantedTicket; |
434 | } |
435 | if (isCollecting) { |
436 | if (Options::logGC()) |
437 | dataLog("...]\n" ); |
438 | |
439 | // Wait for the current collection to finish. |
440 | waitForCollector( |
441 | [&] (const AbstractLocker&) -> bool { |
442 | RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket); |
443 | return m_lastServedTicket == m_lastGrantedTicket; |
444 | }); |
445 | |
446 | if (Options::logGC()) |
447 | dataLog("[GC<" , RawPointer(this), ">: shutdown " ); |
448 | } |
449 | if (Options::logGC()) |
450 | dataLog("3" ); |
451 | |
452 | RELEASE_ASSERT(m_requests.isEmpty()); |
453 | RELEASE_ASSERT(m_lastServedTicket == m_lastGrantedTicket); |
454 | |
455 | // Carefully bring the thread down. |
456 | bool stopped = false; |
457 | { |
458 | LockHolder locker(*m_threadLock); |
459 | stopped = m_thread->tryStop(locker); |
460 | m_threadShouldStop = true; |
461 | if (!stopped) |
462 | m_threadCondition->notifyOne(locker); |
463 | } |
464 | |
465 | if (Options::logGC()) |
466 | dataLog("4" ); |
467 | |
468 | if (!stopped) |
469 | m_thread->join(); |
470 | |
471 | if (Options::logGC()) |
472 | dataLog("5 " ); |
473 | |
474 | if (UNLIKELY(Options::dumpHeapStatisticsAtVMDestruction())) |
475 | dumpHeapStatisticsAtVMDestruction(); |
476 | |
477 | m_arrayBuffers.lastChanceToFinalize(); |
478 | m_objectSpace.stopAllocatingForGood(); |
479 | m_objectSpace.lastChanceToFinalize(); |
480 | releaseDelayedReleasedObjects(); |
481 | |
482 | sweepAllLogicallyEmptyWeakBlocks(); |
483 | |
484 | m_objectSpace.freeMemory(); |
485 | |
486 | if (Options::logGC()) |
487 | dataLog((MonotonicTime::now() - before).milliseconds(), "ms]\n" ); |
488 | } |
489 | |
490 | void Heap::releaseDelayedReleasedObjects() |
491 | { |
492 | #if USE(FOUNDATION) || defined(JSC_GLIB_API_ENABLED) |
493 | // We need to guard against the case that releasing an object can create more objects due to the |
494 | // release calling into JS. When those JS call(s) exit and all locks are being dropped we end up |
495 | // back here and could try to recursively release objects. We guard that with a recursive entry |
496 | // count. Only the initial call will release objects, recursive calls simple return and let the |
497 | // the initial call to the function take care of any objects created during release time. |
498 | // This also means that we need to loop until there are no objects in m_delayedReleaseObjects |
499 | // and use a temp Vector for the actual releasing. |
500 | if (!m_delayedReleaseRecursionCount++) { |
501 | while (!m_delayedReleaseObjects.isEmpty()) { |
502 | ASSERT(m_vm.currentThreadIsHoldingAPILock()); |
503 | |
504 | auto objectsToRelease = WTFMove(m_delayedReleaseObjects); |
505 | |
506 | { |
507 | // We need to drop locks before calling out to arbitrary code. |
508 | JSLock::DropAllLocks dropAllLocks(m_vm); |
509 | |
510 | #if USE(FOUNDATION) |
511 | void* context = objc_autoreleasePoolPush(); |
512 | #endif |
513 | objectsToRelease.clear(); |
514 | #if USE(FOUNDATION) |
515 | objc_autoreleasePoolPop(context); |
516 | #endif |
517 | } |
518 | } |
519 | } |
520 | m_delayedReleaseRecursionCount--; |
521 | #endif |
522 | } |
523 | |
524 | void Heap::(size_t size) |
525 | { |
526 | didAllocate(size); |
527 | collectIfNecessaryOrDefer(); |
528 | } |
529 | |
530 | void Heap::(size_t size) |
531 | { |
532 | // FIXME: Change this to use SaturatedArithmetic when available. |
533 | // https://bugs.webkit.org/show_bug.cgi?id=170411 |
534 | Checked<size_t, RecordOverflow> checkedNewSize = m_deprecatedExtraMemorySize; |
535 | checkedNewSize += size; |
536 | m_deprecatedExtraMemorySize = UNLIKELY(checkedNewSize.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedNewSize.unsafeGet(); |
537 | reportExtraMemoryAllocatedSlowCase(size); |
538 | } |
539 | |
540 | bool Heap::overCriticalMemoryThreshold(MemoryThresholdCallType memoryThresholdCallType) |
541 | { |
542 | #if PLATFORM(IOS_FAMILY) |
543 | if (memoryThresholdCallType == MemoryThresholdCallType::Direct || ++m_precentAvailableMemoryCachedCallCount >= 100) { |
544 | m_overCriticalMemoryThreshold = bmalloc::api::percentAvailableMemoryInUse() > Options::criticalGCMemoryThreshold(); |
545 | m_precentAvailableMemoryCachedCallCount = 0; |
546 | } |
547 | |
548 | return m_overCriticalMemoryThreshold; |
549 | #else |
550 | UNUSED_PARAM(memoryThresholdCallType); |
551 | return false; |
552 | #endif |
553 | } |
554 | |
555 | void Heap::reportAbandonedObjectGraph() |
556 | { |
557 | // Our clients don't know exactly how much memory they |
558 | // are abandoning so we just guess for them. |
559 | size_t abandonedBytes = static_cast<size_t>(0.1 * capacity()); |
560 | |
561 | // We want to accelerate the next collection. Because memory has just |
562 | // been abandoned, the next collection has the potential to |
563 | // be more profitable. Since allocation is the trigger for collection, |
564 | // we hasten the next collection by pretending that we've allocated more memory. |
565 | if (m_fullActivityCallback) { |
566 | m_fullActivityCallback->didAllocate(*this, |
567 | m_sizeAfterLastCollect - m_sizeAfterLastFullCollect + m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect); |
568 | } |
569 | m_bytesAbandonedSinceLastFullCollect += abandonedBytes; |
570 | } |
571 | |
572 | void Heap::protect(JSValue k) |
573 | { |
574 | ASSERT(k); |
575 | ASSERT(m_vm.currentThreadIsHoldingAPILock()); |
576 | |
577 | if (!k.isCell()) |
578 | return; |
579 | |
580 | m_protectedValues.add(k.asCell()); |
581 | } |
582 | |
583 | bool Heap::unprotect(JSValue k) |
584 | { |
585 | ASSERT(k); |
586 | ASSERT(m_vm.currentThreadIsHoldingAPILock()); |
587 | |
588 | if (!k.isCell()) |
589 | return false; |
590 | |
591 | return m_protectedValues.remove(k.asCell()); |
592 | } |
593 | |
594 | void Heap::addReference(JSCell* cell, ArrayBuffer* buffer) |
595 | { |
596 | if (m_arrayBuffers.addReference(cell, buffer)) { |
597 | collectIfNecessaryOrDefer(); |
598 | didAllocate(buffer->gcSizeEstimateInBytes()); |
599 | } |
600 | } |
601 | |
602 | template<typename CellType, typename CellSet> |
603 | void Heap::finalizeMarkedUnconditionalFinalizers(CellSet& cellSet) |
604 | { |
605 | cellSet.forEachMarkedCell( |
606 | [&] (HeapCell* cell, HeapCell::Kind) { |
607 | static_cast<CellType*>(cell)->finalizeUnconditionally(vm()); |
608 | }); |
609 | } |
610 | |
611 | void Heap::finalizeUnconditionalFinalizers() |
612 | { |
613 | vm().builtinExecutables()->finalizeUnconditionally(); |
614 | finalizeMarkedUnconditionalFinalizers<FunctionExecutable>(vm().functionExecutableSpace.space); |
615 | finalizeMarkedUnconditionalFinalizers<SymbolTable>(vm().symbolTableSpace); |
616 | vm().forEachCodeBlockSpace( |
617 | [&] (auto& space) { |
618 | this->finalizeMarkedUnconditionalFinalizers<CodeBlock>(space.set); |
619 | }); |
620 | finalizeMarkedUnconditionalFinalizers<ExecutableToCodeBlockEdge>(vm().executableToCodeBlockEdgesWithFinalizers); |
621 | finalizeMarkedUnconditionalFinalizers<StructureRareData>(vm().structureRareDataSpace); |
622 | finalizeMarkedUnconditionalFinalizers<UnlinkedFunctionExecutable>(vm().unlinkedFunctionExecutableSpace.set); |
623 | if (vm().m_weakSetSpace) |
624 | finalizeMarkedUnconditionalFinalizers<JSWeakSet>(*vm().m_weakSetSpace); |
625 | if (vm().m_weakMapSpace) |
626 | finalizeMarkedUnconditionalFinalizers<JSWeakMap>(*vm().m_weakMapSpace); |
627 | if (vm().m_weakObjectRefSpace) |
628 | finalizeMarkedUnconditionalFinalizers<JSWeakObjectRef>(*vm().m_weakObjectRefSpace); |
629 | if (vm().m_errorInstanceSpace) |
630 | finalizeMarkedUnconditionalFinalizers<ErrorInstance>(*vm().m_errorInstanceSpace); |
631 | |
632 | #if ENABLE(WEBASSEMBLY) |
633 | if (vm().m_webAssemblyCodeBlockSpace) |
634 | finalizeMarkedUnconditionalFinalizers<JSWebAssemblyCodeBlock>(*vm().m_webAssemblyCodeBlockSpace); |
635 | #endif |
636 | } |
637 | |
638 | void Heap::willStartIterating() |
639 | { |
640 | m_objectSpace.willStartIterating(); |
641 | } |
642 | |
643 | void Heap::didFinishIterating() |
644 | { |
645 | m_objectSpace.didFinishIterating(); |
646 | } |
647 | |
648 | void Heap::completeAllJITPlans() |
649 | { |
650 | if (!VM::canUseJIT()) |
651 | return; |
652 | #if ENABLE(JIT) |
653 | JITWorklist::ensureGlobalWorklist().completeAllForVM(m_vm); |
654 | #endif // ENABLE(JIT) |
655 | DFG::completeAllPlansForVM(m_vm); |
656 | } |
657 | |
658 | template<typename Func> |
659 | void Heap::iterateExecutingAndCompilingCodeBlocks(const Func& func) |
660 | { |
661 | m_codeBlocks->iterateCurrentlyExecuting(func); |
662 | if (VM::canUseJIT()) |
663 | DFG::iterateCodeBlocksForGC(m_vm, func); |
664 | } |
665 | |
666 | template<typename Func> |
667 | void Heap::iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks(const Func& func) |
668 | { |
669 | Vector<CodeBlock*, 256> codeBlocks; |
670 | iterateExecutingAndCompilingCodeBlocks( |
671 | [&] (CodeBlock* codeBlock) { |
672 | codeBlocks.append(codeBlock); |
673 | }); |
674 | for (CodeBlock* codeBlock : codeBlocks) |
675 | func(codeBlock); |
676 | } |
677 | |
678 | void Heap::assertMarkStacksEmpty() |
679 | { |
680 | bool ok = true; |
681 | |
682 | if (!m_sharedCollectorMarkStack->isEmpty()) { |
683 | dataLog("FATAL: Shared collector mark stack not empty! It has " , m_sharedCollectorMarkStack->size(), " elements.\n" ); |
684 | ok = false; |
685 | } |
686 | |
687 | if (!m_sharedMutatorMarkStack->isEmpty()) { |
688 | dataLog("FATAL: Shared mutator mark stack not empty! It has " , m_sharedMutatorMarkStack->size(), " elements.\n" ); |
689 | ok = false; |
690 | } |
691 | |
692 | forEachSlotVisitor( |
693 | [&] (SlotVisitor& visitor) { |
694 | if (visitor.isEmpty()) |
695 | return; |
696 | |
697 | dataLog("FATAL: Visitor " , RawPointer(&visitor), " is not empty!\n" ); |
698 | ok = false; |
699 | }); |
700 | |
701 | RELEASE_ASSERT(ok); |
702 | } |
703 | |
704 | void Heap::gatherStackRoots(ConservativeRoots& roots) |
705 | { |
706 | m_machineThreads->gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks, m_currentThreadState, m_currentThread); |
707 | } |
708 | |
709 | void Heap::gatherJSStackRoots(ConservativeRoots& roots) |
710 | { |
711 | #if ENABLE(C_LOOP) |
712 | m_vm.interpreter->cloopStack().gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks); |
713 | #else |
714 | UNUSED_PARAM(roots); |
715 | #endif |
716 | } |
717 | |
718 | void Heap::gatherScratchBufferRoots(ConservativeRoots& roots) |
719 | { |
720 | #if ENABLE(DFG_JIT) |
721 | if (!VM::canUseJIT()) |
722 | return; |
723 | m_vm.gatherScratchBufferRoots(roots); |
724 | #else |
725 | UNUSED_PARAM(roots); |
726 | #endif |
727 | } |
728 | |
729 | void Heap::beginMarking() |
730 | { |
731 | TimingScope timingScope(*this, "Heap::beginMarking" ); |
732 | m_jitStubRoutines->clearMarks(); |
733 | m_objectSpace.beginMarking(); |
734 | setMutatorShouldBeFenced(true); |
735 | } |
736 | |
737 | void Heap::removeDeadCompilerWorklistEntries() |
738 | { |
739 | #if ENABLE(DFG_JIT) |
740 | if (!VM::canUseJIT()) |
741 | return; |
742 | for (unsigned i = DFG::numberOfWorklists(); i--;) |
743 | DFG::existingWorklistForIndex(i).removeDeadPlans(m_vm); |
744 | #endif |
745 | } |
746 | |
747 | bool Heap::isAnalyzingHeap() const |
748 | { |
749 | HeapProfiler* heapProfiler = m_vm.heapProfiler(); |
750 | if (UNLIKELY(heapProfiler)) |
751 | return heapProfiler->activeHeapAnalyzer(); |
752 | return false; |
753 | } |
754 | |
755 | struct : MarkedBlock::CountFunctor { |
756 | (VM& vm, HeapAnalyzer& analyzer) |
757 | : m_vm(vm) |
758 | , m_analyzer(analyzer) |
759 | { |
760 | } |
761 | |
762 | IterationStatus (HeapCell* heapCell, HeapCell::Kind kind) const |
763 | { |
764 | if (isJSCellKind(kind)) { |
765 | JSCell* cell = static_cast<JSCell*>(heapCell); |
766 | cell->methodTable(m_vm)->analyzeHeap(cell, m_analyzer); |
767 | } |
768 | return IterationStatus::Continue; |
769 | } |
770 | |
771 | VM& ; |
772 | HeapAnalyzer& ; |
773 | }; |
774 | |
775 | void Heap::(HeapProfiler& heapProfiler) |
776 | { |
777 | if (auto* analyzer = heapProfiler.activeHeapAnalyzer()) { |
778 | HeapIterationScope heapIterationScope(*this); |
779 | GatherExtraHeapData functor(m_vm, *analyzer); |
780 | m_objectSpace.forEachLiveCell(heapIterationScope, functor); |
781 | } |
782 | } |
783 | |
784 | struct RemoveDeadHeapSnapshotNodes : MarkedBlock::CountFunctor { |
785 | RemoveDeadHeapSnapshotNodes(HeapSnapshot& snapshot) |
786 | : m_snapshot(snapshot) |
787 | { |
788 | } |
789 | |
790 | IterationStatus operator()(HeapCell* cell, HeapCell::Kind kind) const |
791 | { |
792 | if (isJSCellKind(kind)) |
793 | m_snapshot.sweepCell(static_cast<JSCell*>(cell)); |
794 | return IterationStatus::Continue; |
795 | } |
796 | |
797 | HeapSnapshot& m_snapshot; |
798 | }; |
799 | |
800 | void Heap::removeDeadHeapSnapshotNodes(HeapProfiler& heapProfiler) |
801 | { |
802 | if (HeapSnapshot* snapshot = heapProfiler.mostRecentSnapshot()) { |
803 | HeapIterationScope heapIterationScope(*this); |
804 | RemoveDeadHeapSnapshotNodes functor(*snapshot); |
805 | m_objectSpace.forEachDeadCell(heapIterationScope, functor); |
806 | snapshot->shrinkToFit(); |
807 | } |
808 | } |
809 | |
810 | void Heap::updateObjectCounts() |
811 | { |
812 | if (m_collectionScope && m_collectionScope.value() == CollectionScope::Full) |
813 | m_totalBytesVisited = 0; |
814 | |
815 | m_totalBytesVisitedThisCycle = bytesVisited(); |
816 | |
817 | m_totalBytesVisited += m_totalBytesVisitedThisCycle; |
818 | } |
819 | |
820 | void Heap::endMarking() |
821 | { |
822 | forEachSlotVisitor( |
823 | [&] (SlotVisitor& visitor) { |
824 | visitor.reset(); |
825 | }); |
826 | |
827 | assertMarkStacksEmpty(); |
828 | |
829 | RELEASE_ASSERT(m_raceMarkStack->isEmpty()); |
830 | |
831 | m_objectSpace.endMarking(); |
832 | setMutatorShouldBeFenced(Options::forceFencedBarrier()); |
833 | } |
834 | |
835 | size_t Heap::objectCount() |
836 | { |
837 | return m_objectSpace.objectCount(); |
838 | } |
839 | |
840 | size_t Heap::() |
841 | { |
842 | // FIXME: Change this to use SaturatedArithmetic when available. |
843 | // https://bugs.webkit.org/show_bug.cgi?id=170411 |
844 | Checked<size_t, RecordOverflow> checkedTotal = m_extraMemorySize; |
845 | checkedTotal += m_deprecatedExtraMemorySize; |
846 | checkedTotal += m_arrayBuffers.size(); |
847 | size_t total = UNLIKELY(checkedTotal.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedTotal.unsafeGet(); |
848 | |
849 | ASSERT(m_objectSpace.capacity() >= m_objectSpace.size()); |
850 | return std::min(total, std::numeric_limits<size_t>::max() - m_objectSpace.capacity()); |
851 | } |
852 | |
853 | size_t Heap::size() |
854 | { |
855 | return m_objectSpace.size() + extraMemorySize(); |
856 | } |
857 | |
858 | size_t Heap::capacity() |
859 | { |
860 | return m_objectSpace.capacity() + extraMemorySize(); |
861 | } |
862 | |
863 | size_t Heap::protectedGlobalObjectCount() |
864 | { |
865 | size_t result = 0; |
866 | forEachProtectedCell( |
867 | [&] (JSCell* cell) { |
868 | if (cell->isObject() && asObject(cell)->isGlobalObject()) |
869 | result++; |
870 | }); |
871 | return result; |
872 | } |
873 | |
874 | size_t Heap::globalObjectCount() |
875 | { |
876 | HeapIterationScope iterationScope(*this); |
877 | size_t result = 0; |
878 | m_objectSpace.forEachLiveCell( |
879 | iterationScope, |
880 | [&] (HeapCell* heapCell, HeapCell::Kind kind) -> IterationStatus { |
881 | if (!isJSCellKind(kind)) |
882 | return IterationStatus::Continue; |
883 | JSCell* cell = static_cast<JSCell*>(heapCell); |
884 | if (cell->isObject() && asObject(cell)->isGlobalObject()) |
885 | result++; |
886 | return IterationStatus::Continue; |
887 | }); |
888 | return result; |
889 | } |
890 | |
891 | size_t Heap::protectedObjectCount() |
892 | { |
893 | size_t result = 0; |
894 | forEachProtectedCell( |
895 | [&] (JSCell*) { |
896 | result++; |
897 | }); |
898 | return result; |
899 | } |
900 | |
901 | std::unique_ptr<TypeCountSet> Heap::protectedObjectTypeCounts() |
902 | { |
903 | std::unique_ptr<TypeCountSet> result = makeUnique<TypeCountSet>(); |
904 | forEachProtectedCell( |
905 | [&] (JSCell* cell) { |
906 | recordType(vm(), *result, cell); |
907 | }); |
908 | return result; |
909 | } |
910 | |
911 | std::unique_ptr<TypeCountSet> Heap::objectTypeCounts() |
912 | { |
913 | std::unique_ptr<TypeCountSet> result = makeUnique<TypeCountSet>(); |
914 | HeapIterationScope iterationScope(*this); |
915 | m_objectSpace.forEachLiveCell( |
916 | iterationScope, |
917 | [&] (HeapCell* cell, HeapCell::Kind kind) -> IterationStatus { |
918 | if (isJSCellKind(kind)) |
919 | recordType(vm(), *result, static_cast<JSCell*>(cell)); |
920 | return IterationStatus::Continue; |
921 | }); |
922 | return result; |
923 | } |
924 | |
925 | void Heap::deleteAllCodeBlocks(DeleteAllCodeEffort effort) |
926 | { |
927 | if (m_collectionScope && effort == DeleteAllCodeIfNotCollecting) |
928 | return; |
929 | |
930 | VM& vm = m_vm; |
931 | PreventCollectionScope preventCollectionScope(*this); |
932 | |
933 | // If JavaScript is running, it's not safe to delete all JavaScript code, since |
934 | // we'll end up returning to deleted code. |
935 | RELEASE_ASSERT(!vm.entryScope); |
936 | RELEASE_ASSERT(!m_collectionScope); |
937 | |
938 | completeAllJITPlans(); |
939 | |
940 | vm.forEachScriptExecutableSpace( |
941 | [&] (auto& spaceAndSet) { |
942 | HeapIterationScope heapIterationScope(*this); |
943 | auto& set = spaceAndSet.set; |
944 | set.forEachLiveCell( |
945 | [&] (HeapCell* cell, HeapCell::Kind) { |
946 | ScriptExecutable* executable = static_cast<ScriptExecutable*>(cell); |
947 | executable->clearCode(set); |
948 | }); |
949 | }); |
950 | |
951 | #if ENABLE(WEBASSEMBLY) |
952 | { |
953 | // We must ensure that we clear the JS call ICs from Wasm. Otherwise, Wasm will |
954 | // have no idea that we cleared the code from all of the Executables in the |
955 | // VM. This could leave Wasm in an inconsistent state where it has an IC that |
956 | // points into a CodeBlock that could be dead. The IC will still succeed because |
957 | // it uses a callee check, but then it will call into dead code. |
958 | HeapIterationScope heapIterationScope(*this); |
959 | if (vm.m_webAssemblyCodeBlockSpace) { |
960 | vm.m_webAssemblyCodeBlockSpace->forEachLiveCell([&] (HeapCell* cell, HeapCell::Kind kind) { |
961 | ASSERT_UNUSED(kind, kind == HeapCell::JSCell); |
962 | JSWebAssemblyCodeBlock* codeBlock = static_cast<JSWebAssemblyCodeBlock*>(cell); |
963 | codeBlock->clearJSCallICs(vm); |
964 | }); |
965 | } |
966 | } |
967 | #endif |
968 | } |
969 | |
970 | void Heap::deleteAllUnlinkedCodeBlocks(DeleteAllCodeEffort effort) |
971 | { |
972 | if (m_collectionScope && effort == DeleteAllCodeIfNotCollecting) |
973 | return; |
974 | |
975 | VM& vm = m_vm; |
976 | PreventCollectionScope preventCollectionScope(*this); |
977 | |
978 | RELEASE_ASSERT(!m_collectionScope); |
979 | |
980 | HeapIterationScope heapIterationScope(*this); |
981 | vm.unlinkedFunctionExecutableSpace.set.forEachLiveCell( |
982 | [&] (HeapCell* cell, HeapCell::Kind) { |
983 | UnlinkedFunctionExecutable* executable = static_cast<UnlinkedFunctionExecutable*>(cell); |
984 | executable->clearCode(vm); |
985 | }); |
986 | } |
987 | |
988 | void Heap::deleteUnmarkedCompiledCode() |
989 | { |
990 | vm().forEachScriptExecutableSpace([] (auto& space) { space.space.sweep(); }); |
991 | vm().forEachCodeBlockSpace([] (auto& space) { space.space.sweep(); }); // Sweeping must occur before deleting stubs, otherwise the stubs might still think they're alive as they get deleted. |
992 | m_jitStubRoutines->deleteUnmarkedJettisonedStubRoutines(); |
993 | } |
994 | |
995 | void Heap::addToRememberedSet(const JSCell* constCell) |
996 | { |
997 | JSCell* cell = const_cast<JSCell*>(constCell); |
998 | ASSERT(cell); |
999 | ASSERT(!Options::useConcurrentJIT() || !isCompilationThread()); |
1000 | m_barriersExecuted++; |
1001 | if (m_mutatorShouldBeFenced) { |
1002 | WTF::loadLoadFence(); |
1003 | if (!isMarked(cell)) { |
1004 | // During a full collection a store into an unmarked object that had surivived past |
1005 | // collections will manifest as a store to an unmarked PossiblyBlack object. If the |
1006 | // object gets marked at some time after this then it will go down the normal marking |
1007 | // path. So, we don't have to remember this object. We could return here. But we go |
1008 | // further and attempt to re-white the object. |
1009 | |
1010 | RELEASE_ASSERT(m_collectionScope && m_collectionScope.value() == CollectionScope::Full); |
1011 | |
1012 | if (cell->atomicCompareExchangeCellStateStrong(CellState::PossiblyBlack, CellState::DefinitelyWhite) == CellState::PossiblyBlack) { |
1013 | // Now we protect against this race: |
1014 | // |
1015 | // 1) Object starts out black + unmarked. |
1016 | // --> We do isMarked here. |
1017 | // 2) Object is marked and greyed. |
1018 | // 3) Object is scanned and blacked. |
1019 | // --> We do atomicCompareExchangeCellStateStrong here. |
1020 | // |
1021 | // In this case we would have made the object white again, even though it should |
1022 | // be black. This check lets us correct our mistake. This relies on the fact that |
1023 | // isMarked converges monotonically to true. |
1024 | if (isMarked(cell)) { |
1025 | // It's difficult to work out whether the object should be grey or black at |
1026 | // this point. We say black conservatively. |
1027 | cell->setCellState(CellState::PossiblyBlack); |
1028 | } |
1029 | |
1030 | // Either way, we can return. Most likely, the object was not marked, and so the |
1031 | // object is now labeled white. This means that future barrier executions will not |
1032 | // fire. In the unlikely event that the object had become marked, we can still |
1033 | // return anyway, since we proved that the object was not marked at the time that |
1034 | // we executed this slow path. |
1035 | } |
1036 | |
1037 | return; |
1038 | } |
1039 | } else |
1040 | ASSERT(isMarked(cell)); |
1041 | // It could be that the object was *just* marked. This means that the collector may set the |
1042 | // state to DefinitelyGrey and then to PossiblyOldOrBlack at any time. It's OK for us to |
1043 | // race with the collector here. If we win then this is accurate because the object _will_ |
1044 | // get scanned again. If we lose then someone else will barrier the object again. That would |
1045 | // be unfortunate but not the end of the world. |
1046 | cell->setCellState(CellState::PossiblyGrey); |
1047 | m_mutatorMarkStack->append(cell); |
1048 | } |
1049 | |
1050 | void Heap::sweepSynchronously() |
1051 | { |
1052 | MonotonicTime before { }; |
1053 | if (Options::logGC()) { |
1054 | dataLog("Full sweep: " , capacity() / 1024, "kb " ); |
1055 | before = MonotonicTime::now(); |
1056 | } |
1057 | m_objectSpace.sweepBlocks(); |
1058 | m_objectSpace.shrink(); |
1059 | if (Options::logGC()) { |
1060 | MonotonicTime after = MonotonicTime::now(); |
1061 | dataLog("=> " , capacity() / 1024, "kb, " , (after - before).milliseconds(), "ms" ); |
1062 | } |
1063 | } |
1064 | |
1065 | void Heap::collect(Synchronousness synchronousness, GCRequest request) |
1066 | { |
1067 | switch (synchronousness) { |
1068 | case Async: |
1069 | collectAsync(request); |
1070 | return; |
1071 | case Sync: |
1072 | collectSync(request); |
1073 | return; |
1074 | } |
1075 | RELEASE_ASSERT_NOT_REACHED(); |
1076 | } |
1077 | |
1078 | void Heap::collectNow(Synchronousness synchronousness, GCRequest request) |
1079 | { |
1080 | if (validateDFGDoesGC) |
1081 | RELEASE_ASSERT(expectDoesGC()); |
1082 | |
1083 | switch (synchronousness) { |
1084 | case Async: { |
1085 | collectAsync(request); |
1086 | stopIfNecessary(); |
1087 | return; |
1088 | } |
1089 | |
1090 | case Sync: { |
1091 | collectSync(request); |
1092 | |
1093 | DeferGCForAWhile deferGC(*this); |
1094 | if (UNLIKELY(Options::useImmortalObjects())) |
1095 | sweeper().stopSweeping(); |
1096 | |
1097 | bool alreadySweptInCollectSync = shouldSweepSynchronously(); |
1098 | if (!alreadySweptInCollectSync) { |
1099 | if (Options::logGC()) |
1100 | dataLog("[GC<" , RawPointer(this), ">: " ); |
1101 | sweepSynchronously(); |
1102 | if (Options::logGC()) |
1103 | dataLog("]\n" ); |
1104 | } |
1105 | m_objectSpace.assertNoUnswept(); |
1106 | |
1107 | sweepAllLogicallyEmptyWeakBlocks(); |
1108 | return; |
1109 | } } |
1110 | RELEASE_ASSERT_NOT_REACHED(); |
1111 | } |
1112 | |
1113 | void Heap::collectAsync(GCRequest request) |
1114 | { |
1115 | if (validateDFGDoesGC) |
1116 | RELEASE_ASSERT(expectDoesGC()); |
1117 | |
1118 | if (!m_isSafeToCollect) |
1119 | return; |
1120 | |
1121 | bool alreadyRequested = false; |
1122 | { |
1123 | LockHolder locker(*m_threadLock); |
1124 | for (const GCRequest& previousRequest : m_requests) { |
1125 | if (request.subsumedBy(previousRequest)) { |
1126 | alreadyRequested = true; |
1127 | break; |
1128 | } |
1129 | } |
1130 | } |
1131 | if (alreadyRequested) |
1132 | return; |
1133 | |
1134 | requestCollection(request); |
1135 | } |
1136 | |
1137 | void Heap::collectSync(GCRequest request) |
1138 | { |
1139 | if (validateDFGDoesGC) |
1140 | RELEASE_ASSERT(expectDoesGC()); |
1141 | |
1142 | if (!m_isSafeToCollect) |
1143 | return; |
1144 | |
1145 | waitForCollection(requestCollection(request)); |
1146 | } |
1147 | |
1148 | bool Heap::shouldCollectInCollectorThread(const AbstractLocker&) |
1149 | { |
1150 | RELEASE_ASSERT(m_requests.isEmpty() == (m_lastServedTicket == m_lastGrantedTicket)); |
1151 | RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket); |
1152 | |
1153 | if (false) |
1154 | dataLog("Mutator has the conn = " , !!(m_worldState.load() & mutatorHasConnBit), "\n" ); |
1155 | |
1156 | return !m_requests.isEmpty() && !(m_worldState.load() & mutatorHasConnBit); |
1157 | } |
1158 | |
1159 | void Heap::collectInCollectorThread() |
1160 | { |
1161 | for (;;) { |
1162 | RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Collector, nullptr); |
1163 | switch (result) { |
1164 | case RunCurrentPhaseResult::Finished: |
1165 | return; |
1166 | case RunCurrentPhaseResult::Continue: |
1167 | break; |
1168 | case RunCurrentPhaseResult::NeedCurrentThreadState: |
1169 | RELEASE_ASSERT_NOT_REACHED(); |
1170 | break; |
1171 | } |
1172 | } |
1173 | } |
1174 | |
1175 | ALWAYS_INLINE int asInt(CollectorPhase phase) |
1176 | { |
1177 | return static_cast<int>(phase); |
1178 | } |
1179 | |
1180 | void Heap::checkConn(GCConductor conn) |
1181 | { |
1182 | unsigned worldState = m_worldState.load(); |
1183 | switch (conn) { |
1184 | case GCConductor::Mutator: |
1185 | RELEASE_ASSERT(worldState & mutatorHasConnBit, worldState, asInt(m_lastPhase), asInt(m_currentPhase), asInt(m_nextPhase), vm().id(), VM::numberOfIDs(), vm().isEntered()); |
1186 | return; |
1187 | case GCConductor::Collector: |
1188 | RELEASE_ASSERT(!(worldState & mutatorHasConnBit), worldState, asInt(m_lastPhase), asInt(m_currentPhase), asInt(m_nextPhase), vm().id(), VM::numberOfIDs(), vm().isEntered()); |
1189 | return; |
1190 | } |
1191 | RELEASE_ASSERT_NOT_REACHED(); |
1192 | } |
1193 | |
1194 | auto Heap::runCurrentPhase(GCConductor conn, CurrentThreadState* currentThreadState) -> RunCurrentPhaseResult |
1195 | { |
1196 | checkConn(conn); |
1197 | m_currentThreadState = currentThreadState; |
1198 | m_currentThread = &Thread::current(); |
1199 | |
1200 | if (conn == GCConductor::Mutator) |
1201 | sanitizeStackForVM(vm()); |
1202 | |
1203 | // If the collector transfers the conn to the mutator, it leaves us in between phases. |
1204 | if (!finishChangingPhase(conn)) { |
1205 | // A mischevious mutator could repeatedly relinquish the conn back to us. We try to avoid doing |
1206 | // this, but it's probably not the end of the world if it did happen. |
1207 | if (false) |
1208 | dataLog("Conn bounce-back.\n" ); |
1209 | return RunCurrentPhaseResult::Finished; |
1210 | } |
1211 | |
1212 | bool result = false; |
1213 | switch (m_currentPhase) { |
1214 | case CollectorPhase::NotRunning: |
1215 | result = runNotRunningPhase(conn); |
1216 | break; |
1217 | |
1218 | case CollectorPhase::Begin: |
1219 | result = runBeginPhase(conn); |
1220 | break; |
1221 | |
1222 | case CollectorPhase::Fixpoint: |
1223 | if (!currentThreadState && conn == GCConductor::Mutator) |
1224 | return RunCurrentPhaseResult::NeedCurrentThreadState; |
1225 | |
1226 | result = runFixpointPhase(conn); |
1227 | break; |
1228 | |
1229 | case CollectorPhase::Concurrent: |
1230 | result = runConcurrentPhase(conn); |
1231 | break; |
1232 | |
1233 | case CollectorPhase::Reloop: |
1234 | result = runReloopPhase(conn); |
1235 | break; |
1236 | |
1237 | case CollectorPhase::End: |
1238 | result = runEndPhase(conn); |
1239 | break; |
1240 | } |
1241 | |
1242 | return result ? RunCurrentPhaseResult::Continue : RunCurrentPhaseResult::Finished; |
1243 | } |
1244 | |
1245 | NEVER_INLINE bool Heap::runNotRunningPhase(GCConductor conn) |
1246 | { |
1247 | // Check m_requests since the mutator calls this to poll what's going on. |
1248 | { |
1249 | auto locker = holdLock(*m_threadLock); |
1250 | if (m_requests.isEmpty()) |
1251 | return false; |
1252 | } |
1253 | |
1254 | return changePhase(conn, CollectorPhase::Begin); |
1255 | } |
1256 | |
1257 | NEVER_INLINE bool Heap::runBeginPhase(GCConductor conn) |
1258 | { |
1259 | m_currentGCStartTime = MonotonicTime::now(); |
1260 | |
1261 | { |
1262 | LockHolder locker(*m_threadLock); |
1263 | RELEASE_ASSERT(!m_requests.isEmpty()); |
1264 | m_currentRequest = m_requests.first(); |
1265 | } |
1266 | |
1267 | if (Options::logGC()) |
1268 | dataLog("[GC<" , RawPointer(this), ">: START " , gcConductorShortName(conn), " " , capacity() / 1024, "kb " ); |
1269 | |
1270 | m_beforeGC = MonotonicTime::now(); |
1271 | |
1272 | if (!Options::seedOfVMRandomForFuzzer()) |
1273 | vm().random().setSeed(cryptographicallyRandomNumber()); |
1274 | |
1275 | if (m_collectionScope) { |
1276 | dataLog("Collection scope already set during GC: " , *m_collectionScope, "\n" ); |
1277 | RELEASE_ASSERT_NOT_REACHED(); |
1278 | } |
1279 | |
1280 | willStartCollection(); |
1281 | |
1282 | if (UNLIKELY(m_verifier)) { |
1283 | // Verify that live objects from the last GC cycle haven't been corrupted by |
1284 | // mutators before we begin this new GC cycle. |
1285 | m_verifier->verify(HeapVerifier::Phase::BeforeGC); |
1286 | |
1287 | m_verifier->startGC(); |
1288 | m_verifier->gatherLiveCells(HeapVerifier::Phase::BeforeMarking); |
1289 | } |
1290 | |
1291 | prepareForMarking(); |
1292 | |
1293 | if (m_collectionScope && m_collectionScope.value() == CollectionScope::Full) { |
1294 | m_opaqueRoots.clear(); |
1295 | m_collectorSlotVisitor->clearMarkStacks(); |
1296 | m_mutatorMarkStack->clear(); |
1297 | } |
1298 | |
1299 | RELEASE_ASSERT(m_raceMarkStack->isEmpty()); |
1300 | |
1301 | beginMarking(); |
1302 | |
1303 | forEachSlotVisitor( |
1304 | [&] (SlotVisitor& visitor) { |
1305 | visitor.didStartMarking(); |
1306 | }); |
1307 | |
1308 | m_parallelMarkersShouldExit = false; |
1309 | |
1310 | m_helperClient.setFunction( |
1311 | [this] () { |
1312 | SlotVisitor* slotVisitor; |
1313 | { |
1314 | LockHolder locker(m_parallelSlotVisitorLock); |
1315 | RELEASE_ASSERT_WITH_MESSAGE(!m_availableParallelSlotVisitors.isEmpty(), "Parallel SlotVisitors are allocated apriori" ); |
1316 | slotVisitor = m_availableParallelSlotVisitors.takeLast(); |
1317 | } |
1318 | |
1319 | Thread::registerGCThread(GCThreadType::Helper); |
1320 | |
1321 | { |
1322 | ParallelModeEnabler parallelModeEnabler(*slotVisitor); |
1323 | slotVisitor->drainFromShared(SlotVisitor::SlaveDrain); |
1324 | } |
1325 | |
1326 | { |
1327 | LockHolder locker(m_parallelSlotVisitorLock); |
1328 | m_availableParallelSlotVisitors.append(slotVisitor); |
1329 | } |
1330 | }); |
1331 | |
1332 | SlotVisitor& slotVisitor = *m_collectorSlotVisitor; |
1333 | |
1334 | m_constraintSet->didStartMarking(); |
1335 | |
1336 | m_scheduler->beginCollection(); |
1337 | if (Options::logGC()) |
1338 | m_scheduler->log(); |
1339 | |
1340 | // After this, we will almost certainly fall through all of the "slotVisitor.isEmpty()" |
1341 | // checks because bootstrap would have put things into the visitor. So, we should fall |
1342 | // through to draining. |
1343 | |
1344 | if (!slotVisitor.didReachTermination()) { |
1345 | dataLog("Fatal: SlotVisitor should think that GC should terminate before constraint solving, but it does not think this.\n" ); |
1346 | dataLog("slotVisitor.isEmpty(): " , slotVisitor.isEmpty(), "\n" ); |
1347 | dataLog("slotVisitor.collectorMarkStack().isEmpty(): " , slotVisitor.collectorMarkStack().isEmpty(), "\n" ); |
1348 | dataLog("slotVisitor.mutatorMarkStack().isEmpty(): " , slotVisitor.mutatorMarkStack().isEmpty(), "\n" ); |
1349 | dataLog("m_numberOfActiveParallelMarkers: " , m_numberOfActiveParallelMarkers, "\n" ); |
1350 | dataLog("m_sharedCollectorMarkStack->isEmpty(): " , m_sharedCollectorMarkStack->isEmpty(), "\n" ); |
1351 | dataLog("m_sharedMutatorMarkStack->isEmpty(): " , m_sharedMutatorMarkStack->isEmpty(), "\n" ); |
1352 | dataLog("slotVisitor.didReachTermination(): " , slotVisitor.didReachTermination(), "\n" ); |
1353 | RELEASE_ASSERT_NOT_REACHED(); |
1354 | } |
1355 | |
1356 | return changePhase(conn, CollectorPhase::Fixpoint); |
1357 | } |
1358 | |
1359 | NEVER_INLINE bool Heap::runFixpointPhase(GCConductor conn) |
1360 | { |
1361 | RELEASE_ASSERT(conn == GCConductor::Collector || m_currentThreadState); |
1362 | |
1363 | SlotVisitor& slotVisitor = *m_collectorSlotVisitor; |
1364 | |
1365 | if (Options::logGC()) { |
1366 | HashMap<const char*, size_t> visitMap; |
1367 | forEachSlotVisitor( |
1368 | [&] (SlotVisitor& slotVisitor) { |
1369 | visitMap.add(slotVisitor.codeName(), slotVisitor.bytesVisited() / 1024); |
1370 | }); |
1371 | |
1372 | auto perVisitorDump = sortedMapDump( |
1373 | visitMap, |
1374 | [] (const char* a, const char* b) -> bool { |
1375 | return strcmp(a, b) < 0; |
1376 | }, |
1377 | ":" , " " ); |
1378 | |
1379 | dataLog("v=" , bytesVisited() / 1024, "kb (" , perVisitorDump, ") o=" , m_opaqueRoots.size(), " b=" , m_barriersExecuted, " " ); |
1380 | } |
1381 | |
1382 | if (slotVisitor.didReachTermination()) { |
1383 | m_opaqueRoots.deleteOldTables(); |
1384 | |
1385 | m_scheduler->didReachTermination(); |
1386 | |
1387 | assertMarkStacksEmpty(); |
1388 | |
1389 | // FIXME: Take m_mutatorDidRun into account when scheduling constraints. Most likely, |
1390 | // we don't have to execute root constraints again unless the mutator did run. At a |
1391 | // minimum, we could use this for work estimates - but it's probably more than just an |
1392 | // estimate. |
1393 | // https://bugs.webkit.org/show_bug.cgi?id=166828 |
1394 | |
1395 | // Wondering what this does? Look at Heap::addCoreConstraints(). The DOM and others can also |
1396 | // add their own using Heap::addMarkingConstraint(). |
1397 | bool converged = m_constraintSet->executeConvergence(slotVisitor); |
1398 | |
1399 | // FIXME: The slotVisitor.isEmpty() check is most likely not needed. |
1400 | // https://bugs.webkit.org/show_bug.cgi?id=180310 |
1401 | if (converged && slotVisitor.isEmpty()) { |
1402 | assertMarkStacksEmpty(); |
1403 | return changePhase(conn, CollectorPhase::End); |
1404 | } |
1405 | |
1406 | m_scheduler->didExecuteConstraints(); |
1407 | } |
1408 | |
1409 | if (Options::logGC()) |
1410 | dataLog(slotVisitor.collectorMarkStack().size(), "+" , m_mutatorMarkStack->size() + slotVisitor.mutatorMarkStack().size(), " " ); |
1411 | |
1412 | { |
1413 | ParallelModeEnabler enabler(slotVisitor); |
1414 | slotVisitor.drainInParallel(m_scheduler->timeToResume()); |
1415 | } |
1416 | |
1417 | m_scheduler->synchronousDrainingDidStall(); |
1418 | |
1419 | // This is kinda tricky. The termination check looks at: |
1420 | // |
1421 | // - Whether the marking threads are active. If they are not, this means that the marking threads' |
1422 | // SlotVisitors are empty. |
1423 | // - Whether the collector's slot visitor is empty. |
1424 | // - Whether the shared mark stacks are empty. |
1425 | // |
1426 | // This doesn't have to check the mutator SlotVisitor because that one becomes empty after every GC |
1427 | // work increment, so it must be empty now. |
1428 | if (slotVisitor.didReachTermination()) |
1429 | return true; // This is like relooping to the top if runFixpointPhase(). |
1430 | |
1431 | if (!m_scheduler->shouldResume()) |
1432 | return true; |
1433 | |
1434 | m_scheduler->willResume(); |
1435 | |
1436 | if (Options::logGC()) { |
1437 | double thisPauseMS = (MonotonicTime::now() - m_stopTime).milliseconds(); |
1438 | dataLog("p=" , thisPauseMS, "ms (max " , maxPauseMS(thisPauseMS), ")...]\n" ); |
1439 | } |
1440 | |
1441 | // Forgive the mutator for its past failures to keep up. |
1442 | // FIXME: Figure out if moving this to different places results in perf changes. |
1443 | m_incrementBalance = 0; |
1444 | |
1445 | return changePhase(conn, CollectorPhase::Concurrent); |
1446 | } |
1447 | |
1448 | NEVER_INLINE bool Heap::runConcurrentPhase(GCConductor conn) |
1449 | { |
1450 | SlotVisitor& slotVisitor = *m_collectorSlotVisitor; |
1451 | |
1452 | switch (conn) { |
1453 | case GCConductor::Mutator: { |
1454 | // When the mutator has the conn, we poll runConcurrentPhase() on every time someone says |
1455 | // stopIfNecessary(), so on every allocation slow path. When that happens we poll if it's time |
1456 | // to stop and do some work. |
1457 | if (slotVisitor.didReachTermination() |
1458 | || m_scheduler->shouldStop()) |
1459 | return changePhase(conn, CollectorPhase::Reloop); |
1460 | |
1461 | // We could be coming from a collector phase that stuffed our SlotVisitor, so make sure we donate |
1462 | // everything. This is super cheap if the SlotVisitor is already empty. |
1463 | slotVisitor.donateAll(); |
1464 | return false; |
1465 | } |
1466 | case GCConductor::Collector: { |
1467 | { |
1468 | ParallelModeEnabler enabler(slotVisitor); |
1469 | slotVisitor.drainInParallelPassively(m_scheduler->timeToStop()); |
1470 | } |
1471 | return changePhase(conn, CollectorPhase::Reloop); |
1472 | } } |
1473 | |
1474 | RELEASE_ASSERT_NOT_REACHED(); |
1475 | return false; |
1476 | } |
1477 | |
1478 | NEVER_INLINE bool Heap::runReloopPhase(GCConductor conn) |
1479 | { |
1480 | if (Options::logGC()) |
1481 | dataLog("[GC<" , RawPointer(this), ">: " , gcConductorShortName(conn), " " ); |
1482 | |
1483 | m_scheduler->didStop(); |
1484 | |
1485 | if (Options::logGC()) |
1486 | m_scheduler->log(); |
1487 | |
1488 | return changePhase(conn, CollectorPhase::Fixpoint); |
1489 | } |
1490 | |
1491 | NEVER_INLINE bool Heap::runEndPhase(GCConductor conn) |
1492 | { |
1493 | m_scheduler->endCollection(); |
1494 | |
1495 | { |
1496 | auto locker = holdLock(m_markingMutex); |
1497 | m_parallelMarkersShouldExit = true; |
1498 | m_markingConditionVariable.notifyAll(); |
1499 | } |
1500 | m_helperClient.finish(); |
1501 | |
1502 | iterateExecutingAndCompilingCodeBlocks( |
1503 | [&] (CodeBlock* codeBlock) { |
1504 | writeBarrier(codeBlock); |
1505 | }); |
1506 | |
1507 | updateObjectCounts(); |
1508 | endMarking(); |
1509 | |
1510 | if (UNLIKELY(m_verifier)) { |
1511 | m_verifier->gatherLiveCells(HeapVerifier::Phase::AfterMarking); |
1512 | m_verifier->verify(HeapVerifier::Phase::AfterMarking); |
1513 | } |
1514 | |
1515 | if (vm().typeProfiler()) |
1516 | vm().typeProfiler()->invalidateTypeSetCache(vm()); |
1517 | |
1518 | reapWeakHandles(); |
1519 | pruneStaleEntriesFromWeakGCMaps(); |
1520 | sweepArrayBuffers(); |
1521 | snapshotUnswept(); |
1522 | finalizeUnconditionalFinalizers(); |
1523 | removeDeadCompilerWorklistEntries(); |
1524 | notifyIncrementalSweeper(); |
1525 | |
1526 | m_codeBlocks->iterateCurrentlyExecuting( |
1527 | [&] (CodeBlock* codeBlock) { |
1528 | writeBarrier(codeBlock); |
1529 | }); |
1530 | m_codeBlocks->clearCurrentlyExecuting(); |
1531 | |
1532 | m_objectSpace.prepareForAllocation(); |
1533 | updateAllocationLimits(); |
1534 | |
1535 | if (UNLIKELY(m_verifier)) { |
1536 | m_verifier->trimDeadCells(); |
1537 | m_verifier->verify(HeapVerifier::Phase::AfterGC); |
1538 | } |
1539 | |
1540 | didFinishCollection(); |
1541 | |
1542 | if (m_currentRequest.didFinishEndPhase) |
1543 | m_currentRequest.didFinishEndPhase->run(); |
1544 | |
1545 | if (false) { |
1546 | dataLog("Heap state after GC:\n" ); |
1547 | m_objectSpace.dumpBits(); |
1548 | } |
1549 | |
1550 | if (Options::logGC()) { |
1551 | double thisPauseMS = (m_afterGC - m_stopTime).milliseconds(); |
1552 | dataLog("p=" , thisPauseMS, "ms (max " , maxPauseMS(thisPauseMS), "), cycle " , (m_afterGC - m_beforeGC).milliseconds(), "ms END]\n" ); |
1553 | } |
1554 | |
1555 | { |
1556 | auto locker = holdLock(*m_threadLock); |
1557 | m_requests.removeFirst(); |
1558 | m_lastServedTicket++; |
1559 | clearMutatorWaiting(); |
1560 | } |
1561 | ParkingLot::unparkAll(&m_worldState); |
1562 | |
1563 | if (false) |
1564 | dataLog("GC END!\n" ); |
1565 | |
1566 | setNeedFinalize(); |
1567 | |
1568 | m_lastGCStartTime = m_currentGCStartTime; |
1569 | m_lastGCEndTime = MonotonicTime::now(); |
1570 | m_totalGCTime += m_lastGCEndTime - m_lastGCStartTime; |
1571 | |
1572 | return changePhase(conn, CollectorPhase::NotRunning); |
1573 | } |
1574 | |
1575 | bool Heap::changePhase(GCConductor conn, CollectorPhase nextPhase) |
1576 | { |
1577 | checkConn(conn); |
1578 | |
1579 | m_lastPhase = m_currentPhase; |
1580 | m_nextPhase = nextPhase; |
1581 | |
1582 | return finishChangingPhase(conn); |
1583 | } |
1584 | |
1585 | NEVER_INLINE bool Heap::finishChangingPhase(GCConductor conn) |
1586 | { |
1587 | checkConn(conn); |
1588 | |
1589 | if (m_nextPhase == m_currentPhase) |
1590 | return true; |
1591 | |
1592 | if (false) |
1593 | dataLog(conn, ": Going to phase: " , m_nextPhase, " (from " , m_currentPhase, ")\n" ); |
1594 | |
1595 | m_phaseVersion++; |
1596 | |
1597 | bool suspendedBefore = worldShouldBeSuspended(m_currentPhase); |
1598 | bool suspendedAfter = worldShouldBeSuspended(m_nextPhase); |
1599 | |
1600 | if (suspendedBefore != suspendedAfter) { |
1601 | if (suspendedBefore) { |
1602 | RELEASE_ASSERT(!suspendedAfter); |
1603 | |
1604 | resumeThePeriphery(); |
1605 | if (conn == GCConductor::Collector) |
1606 | resumeTheMutator(); |
1607 | else |
1608 | handleNeedFinalize(); |
1609 | } else { |
1610 | RELEASE_ASSERT(!suspendedBefore); |
1611 | RELEASE_ASSERT(suspendedAfter); |
1612 | |
1613 | if (conn == GCConductor::Collector) { |
1614 | waitWhileNeedFinalize(); |
1615 | if (!stopTheMutator()) { |
1616 | if (false) |
1617 | dataLog("Returning false.\n" ); |
1618 | return false; |
1619 | } |
1620 | } else { |
1621 | sanitizeStackForVM(m_vm); |
1622 | handleNeedFinalize(); |
1623 | } |
1624 | stopThePeriphery(conn); |
1625 | } |
1626 | } |
1627 | |
1628 | m_currentPhase = m_nextPhase; |
1629 | return true; |
1630 | } |
1631 | |
1632 | void Heap::stopThePeriphery(GCConductor conn) |
1633 | { |
1634 | if (m_worldIsStopped) { |
1635 | dataLog("FATAL: world already stopped.\n" ); |
1636 | RELEASE_ASSERT_NOT_REACHED(); |
1637 | } |
1638 | |
1639 | if (m_mutatorDidRun) |
1640 | m_mutatorExecutionVersion++; |
1641 | |
1642 | m_mutatorDidRun = false; |
1643 | |
1644 | suspendCompilerThreads(); |
1645 | m_worldIsStopped = true; |
1646 | |
1647 | forEachSlotVisitor( |
1648 | [&] (SlotVisitor& slotVisitor) { |
1649 | slotVisitor.updateMutatorIsStopped(NoLockingNecessary); |
1650 | }); |
1651 | |
1652 | #if ENABLE(JIT) |
1653 | if (VM::canUseJIT()) { |
1654 | DeferGCForAWhile awhile(*this); |
1655 | if (JITWorklist::ensureGlobalWorklist().completeAllForVM(m_vm) |
1656 | && conn == GCConductor::Collector) |
1657 | setGCDidJIT(); |
1658 | } |
1659 | #endif // ENABLE(JIT) |
1660 | UNUSED_PARAM(conn); |
1661 | |
1662 | if (auto* shadowChicken = vm().shadowChicken()) |
1663 | shadowChicken->update(vm(), vm().topCallFrame); |
1664 | |
1665 | m_structureIDTable.flushOldTables(); |
1666 | m_objectSpace.stopAllocating(); |
1667 | |
1668 | m_stopTime = MonotonicTime::now(); |
1669 | } |
1670 | |
1671 | NEVER_INLINE void Heap::resumeThePeriphery() |
1672 | { |
1673 | // Calling resumeAllocating does the Right Thing depending on whether this is the end of a |
1674 | // collection cycle or this is just a concurrent phase within a collection cycle: |
1675 | // - At end of collection cycle: it's a no-op because prepareForAllocation already cleared the |
1676 | // last active block. |
1677 | // - During collection cycle: it reinstates the last active block. |
1678 | m_objectSpace.resumeAllocating(); |
1679 | |
1680 | m_barriersExecuted = 0; |
1681 | |
1682 | if (!m_worldIsStopped) { |
1683 | dataLog("Fatal: collector does not believe that the world is stopped.\n" ); |
1684 | RELEASE_ASSERT_NOT_REACHED(); |
1685 | } |
1686 | m_worldIsStopped = false; |
1687 | |
1688 | // FIXME: This could be vastly improved: we want to grab the locks in the order in which they |
1689 | // become available. We basically want a lockAny() method that will lock whatever lock is available |
1690 | // and tell you which one it locked. That would require teaching ParkingLot how to park on multiple |
1691 | // queues at once, which is totally achievable - it would just require memory allocation, which is |
1692 | // suboptimal but not a disaster. Alternatively, we could replace the SlotVisitor rightToRun lock |
1693 | // with a DLG-style handshake mechanism, but that seems not as general. |
1694 | Vector<SlotVisitor*, 8> slotVisitorsToUpdate; |
1695 | |
1696 | forEachSlotVisitor( |
1697 | [&] (SlotVisitor& slotVisitor) { |
1698 | slotVisitorsToUpdate.append(&slotVisitor); |
1699 | }); |
1700 | |
1701 | for (unsigned countdown = 40; !slotVisitorsToUpdate.isEmpty() && countdown--;) { |
1702 | for (unsigned index = 0; index < slotVisitorsToUpdate.size(); ++index) { |
1703 | SlotVisitor& slotVisitor = *slotVisitorsToUpdate[index]; |
1704 | bool remove = false; |
1705 | if (slotVisitor.hasAcknowledgedThatTheMutatorIsResumed()) |
1706 | remove = true; |
1707 | else if (auto locker = tryHoldLock(slotVisitor.rightToRun())) { |
1708 | slotVisitor.updateMutatorIsStopped(locker); |
1709 | remove = true; |
1710 | } |
1711 | if (remove) { |
1712 | slotVisitorsToUpdate[index--] = slotVisitorsToUpdate.last(); |
1713 | slotVisitorsToUpdate.takeLast(); |
1714 | } |
1715 | } |
1716 | Thread::yield(); |
1717 | } |
1718 | |
1719 | for (SlotVisitor* slotVisitor : slotVisitorsToUpdate) |
1720 | slotVisitor->updateMutatorIsStopped(); |
1721 | |
1722 | resumeCompilerThreads(); |
1723 | } |
1724 | |
1725 | bool Heap::stopTheMutator() |
1726 | { |
1727 | for (;;) { |
1728 | unsigned oldState = m_worldState.load(); |
1729 | if (oldState & stoppedBit) { |
1730 | RELEASE_ASSERT(!(oldState & hasAccessBit)); |
1731 | RELEASE_ASSERT(!(oldState & mutatorWaitingBit)); |
1732 | RELEASE_ASSERT(!(oldState & mutatorHasConnBit)); |
1733 | return true; |
1734 | } |
1735 | |
1736 | if (oldState & mutatorHasConnBit) { |
1737 | RELEASE_ASSERT(!(oldState & hasAccessBit)); |
1738 | RELEASE_ASSERT(!(oldState & stoppedBit)); |
1739 | return false; |
1740 | } |
1741 | |
1742 | if (!(oldState & hasAccessBit)) { |
1743 | RELEASE_ASSERT(!(oldState & mutatorHasConnBit)); |
1744 | RELEASE_ASSERT(!(oldState & mutatorWaitingBit)); |
1745 | // We can stop the world instantly. |
1746 | if (m_worldState.compareExchangeWeak(oldState, oldState | stoppedBit)) |
1747 | return true; |
1748 | continue; |
1749 | } |
1750 | |
1751 | // Transfer the conn to the mutator and bail. |
1752 | RELEASE_ASSERT(oldState & hasAccessBit); |
1753 | RELEASE_ASSERT(!(oldState & stoppedBit)); |
1754 | unsigned newState = (oldState | mutatorHasConnBit) & ~mutatorWaitingBit; |
1755 | if (m_worldState.compareExchangeWeak(oldState, newState)) { |
1756 | if (false) |
1757 | dataLog("Handed off the conn.\n" ); |
1758 | m_stopIfNecessaryTimer->scheduleSoon(); |
1759 | ParkingLot::unparkAll(&m_worldState); |
1760 | return false; |
1761 | } |
1762 | } |
1763 | } |
1764 | |
1765 | NEVER_INLINE void Heap::resumeTheMutator() |
1766 | { |
1767 | if (false) |
1768 | dataLog("Resuming the mutator.\n" ); |
1769 | for (;;) { |
1770 | unsigned oldState = m_worldState.load(); |
1771 | if (!!(oldState & hasAccessBit) != !(oldState & stoppedBit)) { |
1772 | dataLog("Fatal: hasAccess = " , !!(oldState & hasAccessBit), ", stopped = " , !!(oldState & stoppedBit), "\n" ); |
1773 | RELEASE_ASSERT_NOT_REACHED(); |
1774 | } |
1775 | if (oldState & mutatorHasConnBit) { |
1776 | dataLog("Fatal: mutator has the conn.\n" ); |
1777 | RELEASE_ASSERT_NOT_REACHED(); |
1778 | } |
1779 | |
1780 | if (!(oldState & stoppedBit)) { |
1781 | if (false) |
1782 | dataLog("Returning because not stopped.\n" ); |
1783 | return; |
1784 | } |
1785 | |
1786 | if (m_worldState.compareExchangeWeak(oldState, oldState & ~stoppedBit)) { |
1787 | if (false) |
1788 | dataLog("CASing and returning.\n" ); |
1789 | ParkingLot::unparkAll(&m_worldState); |
1790 | return; |
1791 | } |
1792 | } |
1793 | } |
1794 | |
1795 | void Heap::stopIfNecessarySlow() |
1796 | { |
1797 | if (validateDFGDoesGC) |
1798 | RELEASE_ASSERT(expectDoesGC()); |
1799 | |
1800 | while (stopIfNecessarySlow(m_worldState.load())) { } |
1801 | |
1802 | RELEASE_ASSERT(m_worldState.load() & hasAccessBit); |
1803 | RELEASE_ASSERT(!(m_worldState.load() & stoppedBit)); |
1804 | |
1805 | handleGCDidJIT(); |
1806 | handleNeedFinalize(); |
1807 | m_mutatorDidRun = true; |
1808 | } |
1809 | |
1810 | bool Heap::stopIfNecessarySlow(unsigned oldState) |
1811 | { |
1812 | if (validateDFGDoesGC) |
1813 | RELEASE_ASSERT(expectDoesGC()); |
1814 | |
1815 | RELEASE_ASSERT(oldState & hasAccessBit); |
1816 | RELEASE_ASSERT(!(oldState & stoppedBit)); |
1817 | |
1818 | // It's possible for us to wake up with finalization already requested but the world not yet |
1819 | // resumed. If that happens, we can't run finalization yet. |
1820 | if (handleNeedFinalize(oldState)) |
1821 | return true; |
1822 | |
1823 | // FIXME: When entering the concurrent phase, we could arrange for this branch not to fire, and then |
1824 | // have the SlotVisitor do things to the m_worldState to make this branch fire again. That would |
1825 | // prevent us from polling this so much. Ideally, stopIfNecessary would ignore the mutatorHasConnBit |
1826 | // and there would be some other bit indicating whether we were in some GC phase other than the |
1827 | // NotRunning or Concurrent ones. |
1828 | if (oldState & mutatorHasConnBit) |
1829 | collectInMutatorThread(); |
1830 | |
1831 | return false; |
1832 | } |
1833 | |
1834 | NEVER_INLINE void Heap::collectInMutatorThread() |
1835 | { |
1836 | CollectingScope collectingScope(*this); |
1837 | for (;;) { |
1838 | RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Mutator, nullptr); |
1839 | switch (result) { |
1840 | case RunCurrentPhaseResult::Finished: |
1841 | return; |
1842 | case RunCurrentPhaseResult::Continue: |
1843 | break; |
1844 | case RunCurrentPhaseResult::NeedCurrentThreadState: |
1845 | sanitizeStackForVM(m_vm); |
1846 | auto lambda = [&] (CurrentThreadState& state) { |
1847 | for (;;) { |
1848 | RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Mutator, &state); |
1849 | switch (result) { |
1850 | case RunCurrentPhaseResult::Finished: |
1851 | return; |
1852 | case RunCurrentPhaseResult::Continue: |
1853 | break; |
1854 | case RunCurrentPhaseResult::NeedCurrentThreadState: |
1855 | RELEASE_ASSERT_NOT_REACHED(); |
1856 | break; |
1857 | } |
1858 | } |
1859 | }; |
1860 | callWithCurrentThreadState(scopedLambda<void(CurrentThreadState&)>(WTFMove(lambda))); |
1861 | return; |
1862 | } |
1863 | } |
1864 | } |
1865 | |
1866 | template<typename Func> |
1867 | void Heap::waitForCollector(const Func& func) |
1868 | { |
1869 | for (;;) { |
1870 | bool done; |
1871 | { |
1872 | LockHolder locker(*m_threadLock); |
1873 | done = func(locker); |
1874 | if (!done) { |
1875 | setMutatorWaiting(); |
1876 | |
1877 | // At this point, the collector knows that we intend to wait, and he will clear the |
1878 | // waiting bit and then unparkAll when the GC cycle finishes. Clearing the bit |
1879 | // prevents us from parking except if there is also stop-the-world. Unparking after |
1880 | // clearing means that if the clearing happens after we park, then we will unpark. |
1881 | } |
1882 | } |
1883 | |
1884 | // If we're in a stop-the-world scenario, we need to wait for that even if done is true. |
1885 | unsigned oldState = m_worldState.load(); |
1886 | if (stopIfNecessarySlow(oldState)) |
1887 | continue; |
1888 | |
1889 | // FIXME: We wouldn't need this if stopIfNecessarySlow() had a mode where it knew to just |
1890 | // do the collection. |
1891 | relinquishConn(); |
1892 | |
1893 | if (done) { |
1894 | clearMutatorWaiting(); // Clean up just in case. |
1895 | return; |
1896 | } |
1897 | |
1898 | // If mutatorWaitingBit is still set then we want to wait. |
1899 | ParkingLot::compareAndPark(&m_worldState, oldState | mutatorWaitingBit); |
1900 | } |
1901 | } |
1902 | |
1903 | void Heap::acquireAccessSlow() |
1904 | { |
1905 | for (;;) { |
1906 | unsigned oldState = m_worldState.load(); |
1907 | RELEASE_ASSERT(!(oldState & hasAccessBit)); |
1908 | |
1909 | if (oldState & stoppedBit) { |
1910 | if (verboseStop) { |
1911 | dataLog("Stopping in acquireAccess!\n" ); |
1912 | WTFReportBacktrace(); |
1913 | } |
1914 | // Wait until we're not stopped anymore. |
1915 | ParkingLot::compareAndPark(&m_worldState, oldState); |
1916 | continue; |
1917 | } |
1918 | |
1919 | RELEASE_ASSERT(!(oldState & stoppedBit)); |
1920 | unsigned newState = oldState | hasAccessBit; |
1921 | if (m_worldState.compareExchangeWeak(oldState, newState)) { |
1922 | handleGCDidJIT(); |
1923 | handleNeedFinalize(); |
1924 | m_mutatorDidRun = true; |
1925 | stopIfNecessary(); |
1926 | return; |
1927 | } |
1928 | } |
1929 | } |
1930 | |
1931 | void Heap::releaseAccessSlow() |
1932 | { |
1933 | for (;;) { |
1934 | unsigned oldState = m_worldState.load(); |
1935 | if (!(oldState & hasAccessBit)) { |
1936 | dataLog("FATAL: Attempting to release access but the mutator does not have access.\n" ); |
1937 | RELEASE_ASSERT_NOT_REACHED(); |
1938 | } |
1939 | if (oldState & stoppedBit) { |
1940 | dataLog("FATAL: Attempting to release access but the mutator is stopped.\n" ); |
1941 | RELEASE_ASSERT_NOT_REACHED(); |
1942 | } |
1943 | |
1944 | if (handleNeedFinalize(oldState)) |
1945 | continue; |
1946 | |
1947 | unsigned newState = oldState & ~(hasAccessBit | mutatorHasConnBit); |
1948 | |
1949 | if ((oldState & mutatorHasConnBit) |
1950 | && m_nextPhase != m_currentPhase) { |
1951 | // This means that the collector thread had given us the conn so that we would do something |
1952 | // for it. Stop ourselves as we release access. This ensures that acquireAccess blocks. In |
1953 | // the meantime, since we're handing the conn over, the collector will be awoken and it is |
1954 | // sure to have work to do. |
1955 | newState |= stoppedBit; |
1956 | } |
1957 | |
1958 | if (m_worldState.compareExchangeWeak(oldState, newState)) { |
1959 | if (oldState & mutatorHasConnBit) |
1960 | finishRelinquishingConn(); |
1961 | return; |
1962 | } |
1963 | } |
1964 | } |
1965 | |
1966 | bool Heap::relinquishConn(unsigned oldState) |
1967 | { |
1968 | RELEASE_ASSERT(oldState & hasAccessBit); |
1969 | RELEASE_ASSERT(!(oldState & stoppedBit)); |
1970 | |
1971 | if (!(oldState & mutatorHasConnBit)) |
1972 | return false; // Done. |
1973 | |
1974 | if (m_threadShouldStop) |
1975 | return false; |
1976 | |
1977 | if (!m_worldState.compareExchangeWeak(oldState, oldState & ~mutatorHasConnBit)) |
1978 | return true; // Loop around. |
1979 | |
1980 | finishRelinquishingConn(); |
1981 | return true; |
1982 | } |
1983 | |
1984 | void Heap::finishRelinquishingConn() |
1985 | { |
1986 | if (false) |
1987 | dataLog("Relinquished the conn.\n" ); |
1988 | |
1989 | sanitizeStackForVM(m_vm); |
1990 | |
1991 | auto locker = holdLock(*m_threadLock); |
1992 | if (!m_requests.isEmpty()) |
1993 | m_threadCondition->notifyOne(locker); |
1994 | ParkingLot::unparkAll(&m_worldState); |
1995 | } |
1996 | |
1997 | void Heap::relinquishConn() |
1998 | { |
1999 | while (relinquishConn(m_worldState.load())) { } |
2000 | } |
2001 | |
2002 | bool Heap::handleGCDidJIT(unsigned oldState) |
2003 | { |
2004 | RELEASE_ASSERT(oldState & hasAccessBit); |
2005 | if (!(oldState & gcDidJITBit)) |
2006 | return false; |
2007 | if (m_worldState.compareExchangeWeak(oldState, oldState & ~gcDidJITBit)) { |
2008 | WTF::crossModifyingCodeFence(); |
2009 | return true; |
2010 | } |
2011 | return true; |
2012 | } |
2013 | |
2014 | NEVER_INLINE bool Heap::handleNeedFinalize(unsigned oldState) |
2015 | { |
2016 | RELEASE_ASSERT(oldState & hasAccessBit); |
2017 | RELEASE_ASSERT(!(oldState & stoppedBit)); |
2018 | |
2019 | if (!(oldState & needFinalizeBit)) |
2020 | return false; |
2021 | if (m_worldState.compareExchangeWeak(oldState, oldState & ~needFinalizeBit)) { |
2022 | finalize(); |
2023 | // Wake up anyone waiting for us to finalize. Note that they may have woken up already, in |
2024 | // which case they would be waiting for us to release heap access. |
2025 | ParkingLot::unparkAll(&m_worldState); |
2026 | return true; |
2027 | } |
2028 | return true; |
2029 | } |
2030 | |
2031 | void Heap::handleGCDidJIT() |
2032 | { |
2033 | while (handleGCDidJIT(m_worldState.load())) { } |
2034 | } |
2035 | |
2036 | void Heap::handleNeedFinalize() |
2037 | { |
2038 | while (handleNeedFinalize(m_worldState.load())) { } |
2039 | } |
2040 | |
2041 | void Heap::setGCDidJIT() |
2042 | { |
2043 | m_worldState.transaction( |
2044 | [&] (unsigned& state) -> bool { |
2045 | RELEASE_ASSERT(state & stoppedBit); |
2046 | state |= gcDidJITBit; |
2047 | return true; |
2048 | }); |
2049 | } |
2050 | |
2051 | void Heap::setNeedFinalize() |
2052 | { |
2053 | m_worldState.exchangeOr(needFinalizeBit); |
2054 | ParkingLot::unparkAll(&m_worldState); |
2055 | m_stopIfNecessaryTimer->scheduleSoon(); |
2056 | } |
2057 | |
2058 | void Heap::waitWhileNeedFinalize() |
2059 | { |
2060 | for (;;) { |
2061 | unsigned oldState = m_worldState.load(); |
2062 | if (!(oldState & needFinalizeBit)) { |
2063 | // This means that either there was no finalize request or the main thread will finalize |
2064 | // with heap access, so a subsequent call to stopTheWorld() will return only when |
2065 | // finalize finishes. |
2066 | return; |
2067 | } |
2068 | ParkingLot::compareAndPark(&m_worldState, oldState); |
2069 | } |
2070 | } |
2071 | |
2072 | void Heap::setMutatorWaiting() |
2073 | { |
2074 | m_worldState.exchangeOr(mutatorWaitingBit); |
2075 | } |
2076 | |
2077 | void Heap::clearMutatorWaiting() |
2078 | { |
2079 | m_worldState.exchangeAnd(~mutatorWaitingBit); |
2080 | } |
2081 | |
2082 | void Heap::notifyThreadStopping(const AbstractLocker&) |
2083 | { |
2084 | m_threadIsStopping = true; |
2085 | clearMutatorWaiting(); |
2086 | ParkingLot::unparkAll(&m_worldState); |
2087 | } |
2088 | |
2089 | void Heap::finalize() |
2090 | { |
2091 | MonotonicTime before; |
2092 | if (Options::logGC()) { |
2093 | before = MonotonicTime::now(); |
2094 | dataLog("[GC<" , RawPointer(this), ">: finalize " ); |
2095 | } |
2096 | |
2097 | { |
2098 | SweepingScope sweepingScope(*this); |
2099 | deleteUnmarkedCompiledCode(); |
2100 | deleteSourceProviderCaches(); |
2101 | sweepInFinalize(); |
2102 | } |
2103 | |
2104 | if (HasOwnPropertyCache* cache = vm().hasOwnPropertyCache()) |
2105 | cache->clear(); |
2106 | |
2107 | immutableButterflyToStringCache.clear(); |
2108 | |
2109 | for (const HeapFinalizerCallback& callback : m_heapFinalizerCallbacks) |
2110 | callback.run(vm()); |
2111 | |
2112 | if (shouldSweepSynchronously()) |
2113 | sweepSynchronously(); |
2114 | |
2115 | if (Options::logGC()) { |
2116 | MonotonicTime after = MonotonicTime::now(); |
2117 | dataLog((after - before).milliseconds(), "ms]\n" ); |
2118 | } |
2119 | } |
2120 | |
2121 | Heap::Ticket Heap::requestCollection(GCRequest request) |
2122 | { |
2123 | stopIfNecessary(); |
2124 | |
2125 | ASSERT(vm().currentThreadIsHoldingAPILock()); |
2126 | RELEASE_ASSERT(vm().atomStringTable() == Thread::current().atomStringTable()); |
2127 | |
2128 | LockHolder locker(*m_threadLock); |
2129 | // We may be able to steal the conn. That only works if the collector is definitely not running |
2130 | // right now. This is an optimization that prevents the collector thread from ever starting in most |
2131 | // cases. |
2132 | ASSERT(m_lastServedTicket <= m_lastGrantedTicket); |
2133 | if ((m_lastServedTicket == m_lastGrantedTicket) && !m_collectorThreadIsRunning) { |
2134 | if (false) |
2135 | dataLog("Taking the conn.\n" ); |
2136 | m_worldState.exchangeOr(mutatorHasConnBit); |
2137 | } |
2138 | |
2139 | m_requests.append(request); |
2140 | m_lastGrantedTicket++; |
2141 | if (!(m_worldState.load() & mutatorHasConnBit)) |
2142 | m_threadCondition->notifyOne(locker); |
2143 | return m_lastGrantedTicket; |
2144 | } |
2145 | |
2146 | void Heap::waitForCollection(Ticket ticket) |
2147 | { |
2148 | waitForCollector( |
2149 | [&] (const AbstractLocker&) -> bool { |
2150 | return m_lastServedTicket >= ticket; |
2151 | }); |
2152 | } |
2153 | |
2154 | void Heap::sweepInFinalize() |
2155 | { |
2156 | m_objectSpace.sweepPreciseAllocations(); |
2157 | #if ENABLE(WEBASSEMBLY) |
2158 | // We hold onto a lot of memory, so it makes a lot of sense to be swept eagerly. |
2159 | if (vm().m_webAssemblyMemorySpace) |
2160 | vm().m_webAssemblyMemorySpace->sweep(); |
2161 | #endif |
2162 | } |
2163 | |
2164 | void Heap::suspendCompilerThreads() |
2165 | { |
2166 | #if ENABLE(DFG_JIT) |
2167 | // We ensure the worklists so that it's not possible for the mutator to start a new worklist |
2168 | // after we have suspended the ones that he had started before. That's not very expensive since |
2169 | // the worklists use AutomaticThreads anyway. |
2170 | if (!VM::canUseJIT()) |
2171 | return; |
2172 | for (unsigned i = DFG::numberOfWorklists(); i--;) |
2173 | DFG::ensureWorklistForIndex(i).suspendAllThreads(); |
2174 | #endif |
2175 | } |
2176 | |
2177 | void Heap::willStartCollection() |
2178 | { |
2179 | if (Options::logGC()) |
2180 | dataLog("=> " ); |
2181 | |
2182 | if (shouldDoFullCollection()) { |
2183 | m_collectionScope = CollectionScope::Full; |
2184 | m_shouldDoFullCollection = false; |
2185 | if (Options::logGC()) |
2186 | dataLog("FullCollection, " ); |
2187 | if (false) |
2188 | dataLog("Full collection!\n" ); |
2189 | } else { |
2190 | m_collectionScope = CollectionScope::Eden; |
2191 | if (Options::logGC()) |
2192 | dataLog("EdenCollection, " ); |
2193 | if (false) |
2194 | dataLog("Eden collection!\n" ); |
2195 | } |
2196 | if (m_collectionScope && m_collectionScope.value() == CollectionScope::Full) { |
2197 | m_sizeBeforeLastFullCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle; |
2198 | m_extraMemorySize = 0; |
2199 | m_deprecatedExtraMemorySize = 0; |
2200 | #if ENABLE(RESOURCE_USAGE) |
2201 | m_externalMemorySize = 0; |
2202 | #endif |
2203 | |
2204 | if (m_fullActivityCallback) |
2205 | m_fullActivityCallback->willCollect(); |
2206 | } else { |
2207 | ASSERT(m_collectionScope && m_collectionScope.value() == CollectionScope::Eden); |
2208 | m_sizeBeforeLastEdenCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle; |
2209 | } |
2210 | |
2211 | if (m_edenActivityCallback) |
2212 | m_edenActivityCallback->willCollect(); |
2213 | |
2214 | for (auto* observer : m_observers) |
2215 | observer->willGarbageCollect(); |
2216 | } |
2217 | |
2218 | void Heap::prepareForMarking() |
2219 | { |
2220 | m_objectSpace.prepareForMarking(); |
2221 | } |
2222 | |
2223 | void Heap::reapWeakHandles() |
2224 | { |
2225 | m_objectSpace.reapWeakSets(); |
2226 | } |
2227 | |
2228 | void Heap::pruneStaleEntriesFromWeakGCMaps() |
2229 | { |
2230 | if (!m_collectionScope || m_collectionScope.value() != CollectionScope::Full) |
2231 | return; |
2232 | for (WeakGCMapBase* weakGCMap : m_weakGCMaps) |
2233 | weakGCMap->pruneStaleEntries(); |
2234 | } |
2235 | |
2236 | void Heap::sweepArrayBuffers() |
2237 | { |
2238 | m_arrayBuffers.sweep(vm()); |
2239 | } |
2240 | |
2241 | void Heap::snapshotUnswept() |
2242 | { |
2243 | TimingScope timingScope(*this, "Heap::snapshotUnswept" ); |
2244 | m_objectSpace.snapshotUnswept(); |
2245 | } |
2246 | |
2247 | void Heap::deleteSourceProviderCaches() |
2248 | { |
2249 | if (m_lastCollectionScope && m_lastCollectionScope.value() == CollectionScope::Full) |
2250 | m_vm.clearSourceProviderCaches(); |
2251 | } |
2252 | |
2253 | void Heap::notifyIncrementalSweeper() |
2254 | { |
2255 | if (m_collectionScope && m_collectionScope.value() == CollectionScope::Full) { |
2256 | if (!m_logicallyEmptyWeakBlocks.isEmpty()) |
2257 | m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0; |
2258 | } |
2259 | |
2260 | m_sweeper->startSweeping(*this); |
2261 | } |
2262 | |
2263 | void Heap::updateAllocationLimits() |
2264 | { |
2265 | static constexpr bool verbose = false; |
2266 | |
2267 | if (verbose) { |
2268 | dataLog("\n" ); |
2269 | dataLog("bytesAllocatedThisCycle = " , m_bytesAllocatedThisCycle, "\n" ); |
2270 | } |
2271 | |
2272 | // Calculate our current heap size threshold for the purpose of figuring out when we should |
2273 | // run another collection. This isn't the same as either size() or capacity(), though it should |
2274 | // be somewhere between the two. The key is to match the size calculations involved calls to |
2275 | // didAllocate(), while never dangerously underestimating capacity(). In extreme cases of |
2276 | // fragmentation, we may have size() much smaller than capacity(). |
2277 | size_t currentHeapSize = 0; |
2278 | |
2279 | // For marked space, we use the total number of bytes visited. This matches the logic for |
2280 | // BlockDirectory's calls to didAllocate(), which effectively accounts for the total size of |
2281 | // objects allocated rather than blocks used. This will underestimate capacity(), and in case |
2282 | // of fragmentation, this may be substantial. Fortunately, marked space rarely fragments because |
2283 | // cells usually have a narrow range of sizes. So, the underestimation is probably OK. |
2284 | currentHeapSize += m_totalBytesVisited; |
2285 | if (verbose) |
2286 | dataLog("totalBytesVisited = " , m_totalBytesVisited, ", currentHeapSize = " , currentHeapSize, "\n" ); |
2287 | |
2288 | // It's up to the user to ensure that extraMemorySize() ends up corresponding to allocation-time |
2289 | // extra memory reporting. |
2290 | currentHeapSize += extraMemorySize(); |
2291 | if (!ASSERT_DISABLED) { |
2292 | Checked<size_t, RecordOverflow> checkedCurrentHeapSize = m_totalBytesVisited; |
2293 | checkedCurrentHeapSize += extraMemorySize(); |
2294 | ASSERT(!checkedCurrentHeapSize.hasOverflowed() && checkedCurrentHeapSize.unsafeGet() == currentHeapSize); |
2295 | } |
2296 | |
2297 | if (verbose) |
2298 | dataLog("extraMemorySize() = " , extraMemorySize(), ", currentHeapSize = " , currentHeapSize, "\n" ); |
2299 | |
2300 | if (m_collectionScope && m_collectionScope.value() == CollectionScope::Full) { |
2301 | // To avoid pathological GC churn in very small and very large heaps, we set |
2302 | // the new allocation limit based on the current size of the heap, with a |
2303 | // fixed minimum. |
2304 | m_maxHeapSize = std::max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize)); |
2305 | if (verbose) |
2306 | dataLog("Full: maxHeapSize = " , m_maxHeapSize, "\n" ); |
2307 | m_maxEdenSize = m_maxHeapSize - currentHeapSize; |
2308 | if (verbose) |
2309 | dataLog("Full: maxEdenSize = " , m_maxEdenSize, "\n" ); |
2310 | m_sizeAfterLastFullCollect = currentHeapSize; |
2311 | if (verbose) |
2312 | dataLog("Full: sizeAfterLastFullCollect = " , currentHeapSize, "\n" ); |
2313 | m_bytesAbandonedSinceLastFullCollect = 0; |
2314 | if (verbose) |
2315 | dataLog("Full: bytesAbandonedSinceLastFullCollect = " , 0, "\n" ); |
2316 | } else { |
2317 | ASSERT(currentHeapSize >= m_sizeAfterLastCollect); |
2318 | // Theoretically, we shouldn't ever scan more memory than the heap size we planned to have. |
2319 | // But we are sloppy, so we have to defend against the overflow. |
2320 | m_maxEdenSize = currentHeapSize > m_maxHeapSize ? 0 : m_maxHeapSize - currentHeapSize; |
2321 | if (verbose) |
2322 | dataLog("Eden: maxEdenSize = " , m_maxEdenSize, "\n" ); |
2323 | m_sizeAfterLastEdenCollect = currentHeapSize; |
2324 | if (verbose) |
2325 | dataLog("Eden: sizeAfterLastEdenCollect = " , currentHeapSize, "\n" ); |
2326 | double edenToOldGenerationRatio = (double)m_maxEdenSize / (double)m_maxHeapSize; |
2327 | double minEdenToOldGenerationRatio = 1.0 / 3.0; |
2328 | if (edenToOldGenerationRatio < minEdenToOldGenerationRatio) |
2329 | m_shouldDoFullCollection = true; |
2330 | // This seems suspect at first, but what it does is ensure that the nursery size is fixed. |
2331 | m_maxHeapSize += currentHeapSize - m_sizeAfterLastCollect; |
2332 | if (verbose) |
2333 | dataLog("Eden: maxHeapSize = " , m_maxHeapSize, "\n" ); |
2334 | m_maxEdenSize = m_maxHeapSize - currentHeapSize; |
2335 | if (verbose) |
2336 | dataLog("Eden: maxEdenSize = " , m_maxEdenSize, "\n" ); |
2337 | if (m_fullActivityCallback) { |
2338 | ASSERT(currentHeapSize >= m_sizeAfterLastFullCollect); |
2339 | m_fullActivityCallback->didAllocate(*this, currentHeapSize - m_sizeAfterLastFullCollect); |
2340 | } |
2341 | } |
2342 | |
2343 | #if PLATFORM(IOS_FAMILY) |
2344 | // Get critical memory threshold for next cycle. |
2345 | overCriticalMemoryThreshold(MemoryThresholdCallType::Direct); |
2346 | #endif |
2347 | |
2348 | m_sizeAfterLastCollect = currentHeapSize; |
2349 | if (verbose) |
2350 | dataLog("sizeAfterLastCollect = " , m_sizeAfterLastCollect, "\n" ); |
2351 | m_bytesAllocatedThisCycle = 0; |
2352 | |
2353 | if (Options::logGC()) |
2354 | dataLog("=> " , currentHeapSize / 1024, "kb, " ); |
2355 | } |
2356 | |
2357 | void Heap::didFinishCollection() |
2358 | { |
2359 | m_afterGC = MonotonicTime::now(); |
2360 | CollectionScope scope = *m_collectionScope; |
2361 | if (scope == CollectionScope::Full) |
2362 | m_lastFullGCLength = m_afterGC - m_beforeGC; |
2363 | else |
2364 | m_lastEdenGCLength = m_afterGC - m_beforeGC; |
2365 | |
2366 | #if ENABLE(RESOURCE_USAGE) |
2367 | ASSERT(externalMemorySize() <= extraMemorySize()); |
2368 | #endif |
2369 | |
2370 | if (HeapProfiler* heapProfiler = m_vm.heapProfiler()) { |
2371 | gatherExtraHeapData(*heapProfiler); |
2372 | removeDeadHeapSnapshotNodes(*heapProfiler); |
2373 | } |
2374 | |
2375 | if (UNLIKELY(m_verifier)) |
2376 | m_verifier->endGC(); |
2377 | |
2378 | RELEASE_ASSERT(m_collectionScope); |
2379 | m_lastCollectionScope = m_collectionScope; |
2380 | m_collectionScope = WTF::nullopt; |
2381 | |
2382 | for (auto* observer : m_observers) |
2383 | observer->didGarbageCollect(scope); |
2384 | } |
2385 | |
2386 | void Heap::resumeCompilerThreads() |
2387 | { |
2388 | #if ENABLE(DFG_JIT) |
2389 | if (!VM::canUseJIT()) |
2390 | return; |
2391 | for (unsigned i = DFG::numberOfWorklists(); i--;) |
2392 | DFG::existingWorklistForIndex(i).resumeAllThreads(); |
2393 | #endif |
2394 | } |
2395 | |
2396 | GCActivityCallback* Heap::fullActivityCallback() |
2397 | { |
2398 | return m_fullActivityCallback.get(); |
2399 | } |
2400 | |
2401 | GCActivityCallback* Heap::edenActivityCallback() |
2402 | { |
2403 | return m_edenActivityCallback.get(); |
2404 | } |
2405 | |
2406 | IncrementalSweeper& Heap::sweeper() |
2407 | { |
2408 | return m_sweeper.get(); |
2409 | } |
2410 | |
2411 | void Heap::setGarbageCollectionTimerEnabled(bool enable) |
2412 | { |
2413 | if (m_fullActivityCallback) |
2414 | m_fullActivityCallback->setEnabled(enable); |
2415 | if (m_edenActivityCallback) |
2416 | m_edenActivityCallback->setEnabled(enable); |
2417 | } |
2418 | |
2419 | void Heap::didAllocate(size_t bytes) |
2420 | { |
2421 | if (m_edenActivityCallback) |
2422 | m_edenActivityCallback->didAllocate(*this, m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect); |
2423 | m_bytesAllocatedThisCycle += bytes; |
2424 | performIncrement(bytes); |
2425 | } |
2426 | |
2427 | bool Heap::isValidAllocation(size_t) |
2428 | { |
2429 | if (!isValidThreadState(m_vm)) |
2430 | return false; |
2431 | |
2432 | if (isCurrentThreadBusy()) |
2433 | return false; |
2434 | |
2435 | return true; |
2436 | } |
2437 | |
2438 | void Heap::addFinalizer(JSCell* cell, Finalizer finalizer) |
2439 | { |
2440 | WeakSet::allocate(cell, &m_finalizerOwner, reinterpret_cast<void*>(finalizer)); // Balanced by FinalizerOwner::finalize(). |
2441 | } |
2442 | |
2443 | void Heap::FinalizerOwner::finalize(Handle<Unknown> handle, void* context) |
2444 | { |
2445 | HandleSlot slot = handle.slot(); |
2446 | Finalizer finalizer = reinterpret_cast<Finalizer>(context); |
2447 | finalizer(slot->asCell()); |
2448 | WeakSet::deallocate(WeakImpl::asWeakImpl(slot)); |
2449 | } |
2450 | |
2451 | void Heap::collectNowFullIfNotDoneRecently(Synchronousness synchronousness) |
2452 | { |
2453 | if (!m_fullActivityCallback) { |
2454 | collectNow(synchronousness, CollectionScope::Full); |
2455 | return; |
2456 | } |
2457 | |
2458 | if (m_fullActivityCallback->didGCRecently()) { |
2459 | // A synchronous GC was already requested recently so we merely accelerate next collection. |
2460 | reportAbandonedObjectGraph(); |
2461 | return; |
2462 | } |
2463 | |
2464 | m_fullActivityCallback->setDidGCRecently(); |
2465 | collectNow(synchronousness, CollectionScope::Full); |
2466 | } |
2467 | |
2468 | bool Heap::useGenerationalGC() |
2469 | { |
2470 | return Options::useGenerationalGC() && !VM::isInMiniMode(); |
2471 | } |
2472 | |
2473 | bool Heap::shouldSweepSynchronously() |
2474 | { |
2475 | return Options::sweepSynchronously() || VM::isInMiniMode(); |
2476 | } |
2477 | |
2478 | bool Heap::shouldDoFullCollection() |
2479 | { |
2480 | if (!useGenerationalGC()) |
2481 | return true; |
2482 | |
2483 | if (!m_currentRequest.scope) |
2484 | return m_shouldDoFullCollection || overCriticalMemoryThreshold(); |
2485 | return *m_currentRequest.scope == CollectionScope::Full; |
2486 | } |
2487 | |
2488 | void Heap::addLogicallyEmptyWeakBlock(WeakBlock* block) |
2489 | { |
2490 | m_logicallyEmptyWeakBlocks.append(block); |
2491 | } |
2492 | |
2493 | void Heap::sweepAllLogicallyEmptyWeakBlocks() |
2494 | { |
2495 | if (m_logicallyEmptyWeakBlocks.isEmpty()) |
2496 | return; |
2497 | |
2498 | m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0; |
2499 | while (sweepNextLogicallyEmptyWeakBlock()) { } |
2500 | } |
2501 | |
2502 | bool Heap::sweepNextLogicallyEmptyWeakBlock() |
2503 | { |
2504 | if (m_indexOfNextLogicallyEmptyWeakBlockToSweep == WTF::notFound) |
2505 | return false; |
2506 | |
2507 | WeakBlock* block = m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep]; |
2508 | |
2509 | block->sweep(); |
2510 | if (block->isEmpty()) { |
2511 | std::swap(m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep], m_logicallyEmptyWeakBlocks.last()); |
2512 | m_logicallyEmptyWeakBlocks.removeLast(); |
2513 | WeakBlock::destroy(*this, block); |
2514 | } else |
2515 | m_indexOfNextLogicallyEmptyWeakBlockToSweep++; |
2516 | |
2517 | if (m_indexOfNextLogicallyEmptyWeakBlockToSweep >= m_logicallyEmptyWeakBlocks.size()) { |
2518 | m_indexOfNextLogicallyEmptyWeakBlockToSweep = WTF::notFound; |
2519 | return false; |
2520 | } |
2521 | |
2522 | return true; |
2523 | } |
2524 | |
2525 | size_t Heap::visitCount() |
2526 | { |
2527 | size_t result = 0; |
2528 | forEachSlotVisitor( |
2529 | [&] (SlotVisitor& visitor) { |
2530 | result += visitor.visitCount(); |
2531 | }); |
2532 | return result; |
2533 | } |
2534 | |
2535 | size_t Heap::bytesVisited() |
2536 | { |
2537 | size_t result = 0; |
2538 | forEachSlotVisitor( |
2539 | [&] (SlotVisitor& visitor) { |
2540 | result += visitor.bytesVisited(); |
2541 | }); |
2542 | return result; |
2543 | } |
2544 | |
2545 | void Heap::forEachCodeBlockImpl(const ScopedLambda<void(CodeBlock*)>& func) |
2546 | { |
2547 | // We don't know the full set of CodeBlocks until compilation has terminated. |
2548 | completeAllJITPlans(); |
2549 | |
2550 | return m_codeBlocks->iterate(func); |
2551 | } |
2552 | |
2553 | void Heap::forEachCodeBlockIgnoringJITPlansImpl(const AbstractLocker& locker, const ScopedLambda<void(CodeBlock*)>& func) |
2554 | { |
2555 | return m_codeBlocks->iterate(locker, func); |
2556 | } |
2557 | |
2558 | void Heap::writeBarrierSlowPath(const JSCell* from) |
2559 | { |
2560 | if (UNLIKELY(mutatorShouldBeFenced())) { |
2561 | // In this case, the barrierThreshold is the tautological threshold, so from could still be |
2562 | // not black. But we can't know for sure until we fire off a fence. |
2563 | WTF::storeLoadFence(); |
2564 | if (from->cellState() != CellState::PossiblyBlack) |
2565 | return; |
2566 | } |
2567 | |
2568 | addToRememberedSet(from); |
2569 | } |
2570 | |
2571 | bool Heap::isCurrentThreadBusy() |
2572 | { |
2573 | return Thread::mayBeGCThread() || mutatorState() != MutatorState::Running; |
2574 | } |
2575 | |
2576 | void Heap::(size_t size) |
2577 | { |
2578 | size_t* counter = &m_extraMemorySize; |
2579 | |
2580 | for (;;) { |
2581 | size_t oldSize = *counter; |
2582 | // FIXME: Change this to use SaturatedArithmetic when available. |
2583 | // https://bugs.webkit.org/show_bug.cgi?id=170411 |
2584 | Checked<size_t, RecordOverflow> checkedNewSize = oldSize; |
2585 | checkedNewSize += size; |
2586 | size_t newSize = UNLIKELY(checkedNewSize.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedNewSize.unsafeGet(); |
2587 | if (WTF::atomicCompareExchangeWeakRelaxed(counter, oldSize, newSize)) |
2588 | return; |
2589 | } |
2590 | } |
2591 | |
2592 | #if ENABLE(RESOURCE_USAGE) |
2593 | void Heap::reportExternalMemoryVisited(size_t size) |
2594 | { |
2595 | size_t* counter = &m_externalMemorySize; |
2596 | |
2597 | for (;;) { |
2598 | size_t oldSize = *counter; |
2599 | if (WTF::atomicCompareExchangeWeakRelaxed(counter, oldSize, oldSize + size)) |
2600 | return; |
2601 | } |
2602 | } |
2603 | #endif |
2604 | |
2605 | void Heap::collectIfNecessaryOrDefer(GCDeferralContext* deferralContext) |
2606 | { |
2607 | ASSERT(deferralContext || isDeferred() || !DisallowGC::isInEffectOnCurrentThread()); |
2608 | if (validateDFGDoesGC) |
2609 | RELEASE_ASSERT(expectDoesGC()); |
2610 | |
2611 | if (!m_isSafeToCollect) |
2612 | return; |
2613 | |
2614 | switch (mutatorState()) { |
2615 | case MutatorState::Running: |
2616 | case MutatorState::Allocating: |
2617 | break; |
2618 | case MutatorState::Sweeping: |
2619 | case MutatorState::Collecting: |
2620 | return; |
2621 | } |
2622 | if (!Options::useGC()) |
2623 | return; |
2624 | |
2625 | if (mayNeedToStop()) { |
2626 | if (deferralContext) |
2627 | deferralContext->m_shouldGC = true; |
2628 | else if (isDeferred()) |
2629 | m_didDeferGCWork = true; |
2630 | else |
2631 | stopIfNecessary(); |
2632 | } |
2633 | |
2634 | if (UNLIKELY(Options::gcMaxHeapSize())) { |
2635 | if (m_bytesAllocatedThisCycle <= Options::gcMaxHeapSize()) |
2636 | return; |
2637 | } else { |
2638 | size_t bytesAllowedThisCycle = m_maxEdenSize; |
2639 | |
2640 | #if PLATFORM(IOS_FAMILY) |
2641 | if (overCriticalMemoryThreshold()) |
2642 | bytesAllowedThisCycle = std::min(m_maxEdenSizeWhenCritical, bytesAllowedThisCycle); |
2643 | #endif |
2644 | |
2645 | if (m_bytesAllocatedThisCycle <= bytesAllowedThisCycle) |
2646 | return; |
2647 | } |
2648 | |
2649 | if (deferralContext) |
2650 | deferralContext->m_shouldGC = true; |
2651 | else if (isDeferred()) |
2652 | m_didDeferGCWork = true; |
2653 | else { |
2654 | collectAsync(); |
2655 | stopIfNecessary(); // This will immediately start the collection if we have the conn. |
2656 | } |
2657 | } |
2658 | |
2659 | void Heap::decrementDeferralDepthAndGCIfNeededSlow() |
2660 | { |
2661 | // Can't do anything if we're still deferred. |
2662 | if (m_deferralDepth) |
2663 | return; |
2664 | |
2665 | ASSERT(!isDeferred()); |
2666 | |
2667 | m_didDeferGCWork = false; |
2668 | // FIXME: Bring back something like the DeferGCProbability mode. |
2669 | // https://bugs.webkit.org/show_bug.cgi?id=166627 |
2670 | collectIfNecessaryOrDefer(); |
2671 | } |
2672 | |
2673 | void Heap::registerWeakGCMap(WeakGCMapBase* weakGCMap) |
2674 | { |
2675 | m_weakGCMaps.add(weakGCMap); |
2676 | } |
2677 | |
2678 | void Heap::unregisterWeakGCMap(WeakGCMapBase* weakGCMap) |
2679 | { |
2680 | m_weakGCMaps.remove(weakGCMap); |
2681 | } |
2682 | |
2683 | void Heap::didAllocateBlock(size_t capacity) |
2684 | { |
2685 | #if ENABLE(RESOURCE_USAGE) |
2686 | m_blockBytesAllocated += capacity; |
2687 | #else |
2688 | UNUSED_PARAM(capacity); |
2689 | #endif |
2690 | } |
2691 | |
2692 | void Heap::didFreeBlock(size_t capacity) |
2693 | { |
2694 | #if ENABLE(RESOURCE_USAGE) |
2695 | m_blockBytesAllocated -= capacity; |
2696 | #else |
2697 | UNUSED_PARAM(capacity); |
2698 | #endif |
2699 | } |
2700 | |
2701 | void Heap::addCoreConstraints() |
2702 | { |
2703 | m_constraintSet->add( |
2704 | "Cs" , "Conservative Scan" , |
2705 | [this, lastVersion = static_cast<uint64_t>(0)] (SlotVisitor& slotVisitor) mutable { |
2706 | bool shouldNotProduceWork = lastVersion == m_phaseVersion; |
2707 | if (shouldNotProduceWork) |
2708 | return; |
2709 | |
2710 | TimingScope preConvergenceTimingScope(*this, "Constraint: conservative scan" ); |
2711 | m_objectSpace.prepareForConservativeScan(); |
2712 | m_jitStubRoutines->prepareForConservativeScan(); |
2713 | |
2714 | { |
2715 | ConservativeRoots conservativeRoots(*this); |
2716 | SuperSamplerScope superSamplerScope(false); |
2717 | |
2718 | gatherStackRoots(conservativeRoots); |
2719 | gatherJSStackRoots(conservativeRoots); |
2720 | gatherScratchBufferRoots(conservativeRoots); |
2721 | |
2722 | SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::ConservativeScan); |
2723 | slotVisitor.append(conservativeRoots); |
2724 | } |
2725 | if (VM::canUseJIT()) { |
2726 | // JITStubRoutines must be visited after scanning ConservativeRoots since JITStubRoutines depend on the hook executed during gathering ConservativeRoots. |
2727 | SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::JITStubRoutines); |
2728 | m_jitStubRoutines->traceMarkedStubRoutines(slotVisitor); |
2729 | } |
2730 | |
2731 | lastVersion = m_phaseVersion; |
2732 | }, |
2733 | ConstraintVolatility::GreyedByExecution); |
2734 | |
2735 | m_constraintSet->add( |
2736 | "Msr" , "Misc Small Roots" , |
2737 | [this] (SlotVisitor& slotVisitor) { |
2738 | |
2739 | #if JSC_OBJC_API_ENABLED |
2740 | scanExternalRememberedSet(m_vm, slotVisitor); |
2741 | #endif |
2742 | if (m_vm.smallStrings.needsToBeVisited(*m_collectionScope)) { |
2743 | SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::StrongReferences); |
2744 | m_vm.smallStrings.visitStrongReferences(slotVisitor); |
2745 | } |
2746 | |
2747 | { |
2748 | SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::ProtectedValues); |
2749 | for (auto& pair : m_protectedValues) |
2750 | slotVisitor.appendUnbarriered(pair.key); |
2751 | } |
2752 | |
2753 | if (m_markListSet && m_markListSet->size()) { |
2754 | SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::ConservativeScan); |
2755 | MarkedArgumentBuffer::markLists(slotVisitor, *m_markListSet); |
2756 | } |
2757 | |
2758 | { |
2759 | SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::VMExceptions); |
2760 | slotVisitor.appendUnbarriered(m_vm.exception()); |
2761 | slotVisitor.appendUnbarriered(m_vm.lastException()); |
2762 | } |
2763 | }, |
2764 | ConstraintVolatility::GreyedByExecution); |
2765 | |
2766 | m_constraintSet->add( |
2767 | "Sh" , "Strong Handles" , |
2768 | [this] (SlotVisitor& slotVisitor) { |
2769 | SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::StrongHandles); |
2770 | m_handleSet.visitStrongHandles(slotVisitor); |
2771 | }, |
2772 | ConstraintVolatility::GreyedByExecution); |
2773 | |
2774 | m_constraintSet->add( |
2775 | "D" , "Debugger" , |
2776 | [this] (SlotVisitor& slotVisitor) { |
2777 | SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::Debugger); |
2778 | |
2779 | #if ENABLE(SAMPLING_PROFILER) |
2780 | if (SamplingProfiler* samplingProfiler = m_vm.samplingProfiler()) { |
2781 | auto locker = holdLock(samplingProfiler->getLock()); |
2782 | samplingProfiler->processUnverifiedStackTraces(locker); |
2783 | samplingProfiler->visit(slotVisitor); |
2784 | if (Options::logGC() == GCLogging::Verbose) |
2785 | dataLog("Sampling Profiler data:\n" , slotVisitor); |
2786 | } |
2787 | #endif // ENABLE(SAMPLING_PROFILER) |
2788 | |
2789 | if (m_vm.typeProfiler()) |
2790 | m_vm.typeProfilerLog()->visit(slotVisitor); |
2791 | |
2792 | if (auto* shadowChicken = m_vm.shadowChicken()) |
2793 | shadowChicken->visitChildren(slotVisitor); |
2794 | }, |
2795 | ConstraintVolatility::GreyedByExecution); |
2796 | |
2797 | m_constraintSet->add( |
2798 | "Ws" , "Weak Sets" , |
2799 | [this] (SlotVisitor& slotVisitor) { |
2800 | SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::WeakSets); |
2801 | m_objectSpace.visitWeakSets(slotVisitor); |
2802 | }, |
2803 | ConstraintVolatility::GreyedByMarking); |
2804 | |
2805 | m_constraintSet->add( |
2806 | "O" , "Output" , |
2807 | [] (SlotVisitor& slotVisitor) { |
2808 | VM& vm = slotVisitor.vm(); |
2809 | |
2810 | auto callOutputConstraint = [] (SlotVisitor& slotVisitor, HeapCell* heapCell, HeapCell::Kind) { |
2811 | SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::Output); |
2812 | VM& vm = slotVisitor.vm(); |
2813 | JSCell* cell = static_cast<JSCell*>(heapCell); |
2814 | cell->methodTable(vm)->visitOutputConstraints(cell, slotVisitor); |
2815 | }; |
2816 | |
2817 | auto add = [&] (auto& set) { |
2818 | slotVisitor.addParallelConstraintTask(set.forEachMarkedCellInParallel(callOutputConstraint)); |
2819 | }; |
2820 | |
2821 | add(vm.executableToCodeBlockEdgesWithConstraints); |
2822 | if (vm.m_weakMapSpace) |
2823 | add(*vm.m_weakMapSpace); |
2824 | }, |
2825 | ConstraintVolatility::GreyedByMarking, |
2826 | ConstraintParallelism::Parallel); |
2827 | |
2828 | #if ENABLE(DFG_JIT) |
2829 | if (VM::canUseJIT()) { |
2830 | m_constraintSet->add( |
2831 | "Dw" , "DFG Worklists" , |
2832 | [this] (SlotVisitor& slotVisitor) { |
2833 | SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::DFGWorkLists); |
2834 | |
2835 | for (unsigned i = DFG::numberOfWorklists(); i--;) |
2836 | DFG::existingWorklistForIndex(i).visitWeakReferences(slotVisitor); |
2837 | |
2838 | // FIXME: This is almost certainly unnecessary. |
2839 | // https://bugs.webkit.org/show_bug.cgi?id=166829 |
2840 | DFG::iterateCodeBlocksForGC( |
2841 | m_vm, |
2842 | [&] (CodeBlock* codeBlock) { |
2843 | slotVisitor.appendUnbarriered(codeBlock); |
2844 | }); |
2845 | |
2846 | if (Options::logGC() == GCLogging::Verbose) |
2847 | dataLog("DFG Worklists:\n" , slotVisitor); |
2848 | }, |
2849 | ConstraintVolatility::GreyedByMarking); |
2850 | } |
2851 | #endif |
2852 | |
2853 | m_constraintSet->add( |
2854 | "Cb" , "CodeBlocks" , |
2855 | [this] (SlotVisitor& slotVisitor) { |
2856 | SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::CodeBlocks); |
2857 | iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks( |
2858 | [&] (CodeBlock* codeBlock) { |
2859 | // Visit the CodeBlock as a constraint only if it's black. |
2860 | if (isMarked(codeBlock) |
2861 | && codeBlock->cellState() == CellState::PossiblyBlack) |
2862 | slotVisitor.visitAsConstraint(codeBlock); |
2863 | }); |
2864 | }, |
2865 | ConstraintVolatility::SeldomGreyed); |
2866 | |
2867 | m_constraintSet->add(makeUnique<MarkStackMergingConstraint>(*this)); |
2868 | } |
2869 | |
2870 | void Heap::addMarkingConstraint(std::unique_ptr<MarkingConstraint> constraint) |
2871 | { |
2872 | PreventCollectionScope preventCollectionScope(*this); |
2873 | m_constraintSet->add(WTFMove(constraint)); |
2874 | } |
2875 | |
2876 | void Heap::notifyIsSafeToCollect() |
2877 | { |
2878 | MonotonicTime before; |
2879 | if (Options::logGC()) { |
2880 | before = MonotonicTime::now(); |
2881 | dataLog("[GC<" , RawPointer(this), ">: starting " ); |
2882 | } |
2883 | |
2884 | addCoreConstraints(); |
2885 | |
2886 | m_isSafeToCollect = true; |
2887 | |
2888 | if (Options::collectContinuously()) { |
2889 | m_collectContinuouslyThread = Thread::create( |
2890 | "JSC DEBUG Continuous GC" , |
2891 | [this] () { |
2892 | MonotonicTime initialTime = MonotonicTime::now(); |
2893 | Seconds period = Seconds::fromMilliseconds(Options::collectContinuouslyPeriodMS()); |
2894 | while (!m_shouldStopCollectingContinuously) { |
2895 | { |
2896 | LockHolder locker(*m_threadLock); |
2897 | if (m_requests.isEmpty()) { |
2898 | m_requests.append(WTF::nullopt); |
2899 | m_lastGrantedTicket++; |
2900 | m_threadCondition->notifyOne(locker); |
2901 | } |
2902 | } |
2903 | |
2904 | { |
2905 | LockHolder locker(m_collectContinuouslyLock); |
2906 | Seconds elapsed = MonotonicTime::now() - initialTime; |
2907 | Seconds elapsedInPeriod = elapsed % period; |
2908 | MonotonicTime timeToWakeUp = |
2909 | initialTime + elapsed - elapsedInPeriod + period; |
2910 | while (!hasElapsed(timeToWakeUp) && !m_shouldStopCollectingContinuously) { |
2911 | m_collectContinuouslyCondition.waitUntil( |
2912 | m_collectContinuouslyLock, timeToWakeUp); |
2913 | } |
2914 | } |
2915 | } |
2916 | }); |
2917 | } |
2918 | |
2919 | if (Options::logGC()) |
2920 | dataLog((MonotonicTime::now() - before).milliseconds(), "ms]\n" ); |
2921 | } |
2922 | |
2923 | void Heap::preventCollection() |
2924 | { |
2925 | if (!m_isSafeToCollect) |
2926 | return; |
2927 | |
2928 | // This prevents the collectContinuously thread from starting a collection. |
2929 | m_collectContinuouslyLock.lock(); |
2930 | |
2931 | // Wait for all collections to finish. |
2932 | waitForCollector( |
2933 | [&] (const AbstractLocker&) -> bool { |
2934 | ASSERT(m_lastServedTicket <= m_lastGrantedTicket); |
2935 | return m_lastServedTicket == m_lastGrantedTicket; |
2936 | }); |
2937 | |
2938 | // Now a collection can only start if this thread starts it. |
2939 | RELEASE_ASSERT(!m_collectionScope); |
2940 | } |
2941 | |
2942 | void Heap::allowCollection() |
2943 | { |
2944 | if (!m_isSafeToCollect) |
2945 | return; |
2946 | |
2947 | m_collectContinuouslyLock.unlock(); |
2948 | } |
2949 | |
2950 | void Heap::setMutatorShouldBeFenced(bool value) |
2951 | { |
2952 | m_mutatorShouldBeFenced = value; |
2953 | m_barrierThreshold = value ? tautologicalThreshold : blackThreshold; |
2954 | } |
2955 | |
2956 | void Heap::performIncrement(size_t bytes) |
2957 | { |
2958 | if (!m_objectSpace.isMarking()) |
2959 | return; |
2960 | |
2961 | if (isDeferred()) |
2962 | return; |
2963 | |
2964 | m_incrementBalance += bytes * Options::gcIncrementScale(); |
2965 | |
2966 | // Save ourselves from crazy. Since this is an optimization, it's OK to go back to any consistent |
2967 | // state when the double goes wild. |
2968 | if (std::isnan(m_incrementBalance) || std::isinf(m_incrementBalance)) |
2969 | m_incrementBalance = 0; |
2970 | |
2971 | if (m_incrementBalance < static_cast<double>(Options::gcIncrementBytes())) |
2972 | return; |
2973 | |
2974 | double targetBytes = m_incrementBalance; |
2975 | if (targetBytes <= 0) |
2976 | return; |
2977 | targetBytes = std::min(targetBytes, Options::gcIncrementMaxBytes()); |
2978 | |
2979 | SlotVisitor& slotVisitor = *m_mutatorSlotVisitor; |
2980 | ParallelModeEnabler parallelModeEnabler(slotVisitor); |
2981 | size_t bytesVisited = slotVisitor.performIncrementOfDraining(static_cast<size_t>(targetBytes)); |
2982 | // incrementBalance may go negative here because it'll remember how many bytes we overshot. |
2983 | m_incrementBalance -= bytesVisited; |
2984 | } |
2985 | |
2986 | void Heap::addHeapFinalizerCallback(const HeapFinalizerCallback& callback) |
2987 | { |
2988 | m_heapFinalizerCallbacks.append(callback); |
2989 | } |
2990 | |
2991 | void Heap::removeHeapFinalizerCallback(const HeapFinalizerCallback& callback) |
2992 | { |
2993 | m_heapFinalizerCallbacks.removeFirst(callback); |
2994 | } |
2995 | |
2996 | void Heap::setBonusVisitorTask(RefPtr<SharedTask<void(SlotVisitor&)>> task) |
2997 | { |
2998 | auto locker = holdLock(m_markingMutex); |
2999 | m_bonusVisitorTask = task; |
3000 | m_markingConditionVariable.notifyAll(); |
3001 | } |
3002 | |
3003 | void Heap::runTaskInParallel(RefPtr<SharedTask<void(SlotVisitor&)>> task) |
3004 | { |
3005 | unsigned initialRefCount = task->refCount(); |
3006 | setBonusVisitorTask(task); |
3007 | task->run(*m_collectorSlotVisitor); |
3008 | setBonusVisitorTask(nullptr); |
3009 | // The constraint solver expects return of this function to imply termination of the task in all |
3010 | // threads. This ensures that property. |
3011 | { |
3012 | auto locker = holdLock(m_markingMutex); |
3013 | while (task->refCount() > initialRefCount) |
3014 | m_markingConditionVariable.wait(m_markingMutex); |
3015 | } |
3016 | } |
3017 | |
3018 | } // namespace JSC |
3019 | |