1 | /* |
2 | * Copyright (C) 2003-2019 Apple Inc. All rights reserved. |
3 | * Copyright (C) 2007 Eric Seidel <[email protected]> |
4 | * |
5 | * This library is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU Lesser General Public |
7 | * License as published by the Free Software Foundation; either |
8 | * version 2 of the License, or (at your option) any later version. |
9 | * |
10 | * This library is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | * Lesser General Public License for more details. |
14 | * |
15 | * You should have received a copy of the GNU Lesser General Public |
16 | * License along with this library; if not, write to the Free Software |
17 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
18 | * |
19 | */ |
20 | |
21 | #include "config.h" |
22 | #include "Heap.h" |
23 | |
24 | #include "BlockDirectoryInlines.h" |
25 | #include "BuiltinExecutables.h" |
26 | #include "CodeBlock.h" |
27 | #include "CodeBlockSetInlines.h" |
28 | #include "CollectingScope.h" |
29 | #include "ConservativeRoots.h" |
30 | #include "DFGWorklistInlines.h" |
31 | #include "EdenGCActivityCallback.h" |
32 | #include "Exception.h" |
33 | #include "FullGCActivityCallback.h" |
34 | #include "FunctionExecutableInlines.h" |
35 | #include "GCActivityCallback.h" |
36 | #include "GCIncomingRefCountedSetInlines.h" |
37 | #include "GCSegmentedArrayInlines.h" |
38 | #include "GCTypeMap.h" |
39 | #include "HasOwnPropertyCache.h" |
40 | #include "HeapHelperPool.h" |
41 | #include "HeapIterationScope.h" |
42 | #include "HeapProfiler.h" |
43 | #include "HeapSnapshot.h" |
44 | #include "HeapVerifier.h" |
45 | #include "IncrementalSweeper.h" |
46 | #include "InferredValueInlines.h" |
47 | #include "Interpreter.h" |
48 | #include "IsoCellSetInlines.h" |
49 | #include "JITStubRoutineSet.h" |
50 | #include "JITWorklist.h" |
51 | #include "JSCInlines.h" |
52 | #include "JSGlobalObject.h" |
53 | #include "JSLock.h" |
54 | #include "JSVirtualMachineInternal.h" |
55 | #include "JSWeakMap.h" |
56 | #include "JSWeakObjectRef.h" |
57 | #include "JSWeakSet.h" |
58 | #include "JSWebAssemblyCodeBlock.h" |
59 | #include "MachineStackMarker.h" |
60 | #include "MarkStackMergingConstraint.h" |
61 | #include "MarkedSpaceInlines.h" |
62 | #include "MarkingConstraintSet.h" |
63 | #include "PreventCollectionScope.h" |
64 | #include "SamplingProfiler.h" |
65 | #include "ShadowChicken.h" |
66 | #include "SpaceTimeMutatorScheduler.h" |
67 | #include "StochasticSpaceTimeMutatorScheduler.h" |
68 | #include "StopIfNecessaryTimer.h" |
69 | #include "SubspaceInlines.h" |
70 | #include "SuperSampler.h" |
71 | #include "SweepingScope.h" |
72 | #include "SymbolTableInlines.h" |
73 | #include "SynchronousStopTheWorldMutatorScheduler.h" |
74 | #include "TypeProfiler.h" |
75 | #include "TypeProfilerLog.h" |
76 | #include "UnlinkedCodeBlock.h" |
77 | #include "VM.h" |
78 | #include "VisitCounter.h" |
79 | #include "WasmMemory.h" |
80 | #include "WeakMapImplInlines.h" |
81 | #include "WeakSetInlines.h" |
82 | #include <algorithm> |
83 | #include <wtf/ListDump.h> |
84 | #include <wtf/MainThread.h> |
85 | #include <wtf/ParallelVectorIterator.h> |
86 | #include <wtf/ProcessID.h> |
87 | #include <wtf/RAMSize.h> |
88 | #include <wtf/SimpleStats.h> |
89 | #include <wtf/Threading.h> |
90 | |
91 | #if PLATFORM(IOS_FAMILY) |
92 | #include <bmalloc/bmalloc.h> |
93 | #endif |
94 | |
95 | #if USE(FOUNDATION) |
96 | #include <wtf/spi/cocoa/objcSPI.h> |
97 | #endif |
98 | |
99 | #ifdef JSC_GLIB_API_ENABLED |
100 | #include "JSCGLibWrapperObject.h" |
101 | #endif |
102 | |
103 | namespace JSC { |
104 | |
105 | namespace { |
106 | |
107 | bool verboseStop = false; |
108 | |
109 | double maxPauseMS(double thisPauseMS) |
110 | { |
111 | static double maxPauseMS; |
112 | maxPauseMS = std::max(thisPauseMS, maxPauseMS); |
113 | return maxPauseMS; |
114 | } |
115 | |
116 | size_t minHeapSize(HeapType heapType, size_t ramSize) |
117 | { |
118 | if (heapType == LargeHeap) { |
119 | double result = std::min( |
120 | static_cast<double>(Options::largeHeapSize()), |
121 | ramSize * Options::smallHeapRAMFraction()); |
122 | return static_cast<size_t>(result); |
123 | } |
124 | return Options::smallHeapSize(); |
125 | } |
126 | |
127 | size_t proportionalHeapSize(size_t heapSize, size_t ramSize) |
128 | { |
129 | if (VM::isInMiniMode()) |
130 | return Options::miniVMHeapGrowthFactor() * heapSize; |
131 | |
132 | #if PLATFORM(IOS_FAMILY) |
133 | size_t memoryFootprint = bmalloc::api::memoryFootprint(); |
134 | if (memoryFootprint < ramSize * Options::smallHeapRAMFraction()) |
135 | return Options::smallHeapGrowthFactor() * heapSize; |
136 | if (memoryFootprint < ramSize * Options::mediumHeapRAMFraction()) |
137 | return Options::mediumHeapGrowthFactor() * heapSize; |
138 | #else |
139 | if (heapSize < ramSize * Options::smallHeapRAMFraction()) |
140 | return Options::smallHeapGrowthFactor() * heapSize; |
141 | if (heapSize < ramSize * Options::mediumHeapRAMFraction()) |
142 | return Options::mediumHeapGrowthFactor() * heapSize; |
143 | #endif |
144 | return Options::largeHeapGrowthFactor() * heapSize; |
145 | } |
146 | |
147 | bool isValidSharedInstanceThreadState(VM* vm) |
148 | { |
149 | return vm->currentThreadIsHoldingAPILock(); |
150 | } |
151 | |
152 | bool isValidThreadState(VM* vm) |
153 | { |
154 | if (vm->atomStringTable() != Thread::current().atomStringTable()) |
155 | return false; |
156 | |
157 | if (vm->isSharedInstance() && !isValidSharedInstanceThreadState(vm)) |
158 | return false; |
159 | |
160 | return true; |
161 | } |
162 | |
163 | void recordType(VM& vm, TypeCountSet& set, JSCell* cell) |
164 | { |
165 | const char* typeName = "[unknown]" ; |
166 | const ClassInfo* info = cell->classInfo(vm); |
167 | if (info && info->className) |
168 | typeName = info->className; |
169 | set.add(typeName); |
170 | } |
171 | |
172 | bool measurePhaseTiming() |
173 | { |
174 | return false; |
175 | } |
176 | |
177 | HashMap<const char*, GCTypeMap<SimpleStats>>& timingStats() |
178 | { |
179 | static HashMap<const char*, GCTypeMap<SimpleStats>>* result; |
180 | static std::once_flag once; |
181 | std::call_once( |
182 | once, |
183 | [] { |
184 | result = new HashMap<const char*, GCTypeMap<SimpleStats>>(); |
185 | }); |
186 | return *result; |
187 | } |
188 | |
189 | SimpleStats& timingStats(const char* name, CollectionScope scope) |
190 | { |
191 | return timingStats().add(name, GCTypeMap<SimpleStats>()).iterator->value[scope]; |
192 | } |
193 | |
194 | class TimingScope { |
195 | public: |
196 | TimingScope(Optional<CollectionScope> scope, const char* name) |
197 | : m_scope(scope) |
198 | , m_name(name) |
199 | { |
200 | if (measurePhaseTiming()) |
201 | m_before = MonotonicTime::now(); |
202 | } |
203 | |
204 | TimingScope(Heap& heap, const char* name) |
205 | : TimingScope(heap.collectionScope(), name) |
206 | { |
207 | } |
208 | |
209 | void setScope(Optional<CollectionScope> scope) |
210 | { |
211 | m_scope = scope; |
212 | } |
213 | |
214 | void setScope(Heap& heap) |
215 | { |
216 | setScope(heap.collectionScope()); |
217 | } |
218 | |
219 | ~TimingScope() |
220 | { |
221 | if (measurePhaseTiming()) { |
222 | MonotonicTime after = MonotonicTime::now(); |
223 | Seconds timing = after - m_before; |
224 | SimpleStats& stats = timingStats(m_name, *m_scope); |
225 | stats.add(timing.milliseconds()); |
226 | dataLog("[GC:" , *m_scope, "] " , m_name, " took: " , timing.milliseconds(), "ms (average " , stats.mean(), "ms).\n" ); |
227 | } |
228 | } |
229 | private: |
230 | Optional<CollectionScope> m_scope; |
231 | MonotonicTime m_before; |
232 | const char* m_name; |
233 | }; |
234 | |
235 | } // anonymous namespace |
236 | |
237 | class Heap::HeapThread : public AutomaticThread { |
238 | public: |
239 | HeapThread(const AbstractLocker& locker, Heap& heap) |
240 | : AutomaticThread(locker, heap.m_threadLock, heap.m_threadCondition.copyRef()) |
241 | , m_heap(heap) |
242 | { |
243 | } |
244 | |
245 | const char* name() const override |
246 | { |
247 | return "JSC Heap Collector Thread" ; |
248 | } |
249 | |
250 | protected: |
251 | PollResult poll(const AbstractLocker& locker) override |
252 | { |
253 | if (m_heap.m_threadShouldStop) { |
254 | m_heap.notifyThreadStopping(locker); |
255 | return PollResult::Stop; |
256 | } |
257 | if (m_heap.shouldCollectInCollectorThread(locker)) |
258 | return PollResult::Work; |
259 | return PollResult::Wait; |
260 | } |
261 | |
262 | WorkResult work() override |
263 | { |
264 | m_heap.collectInCollectorThread(); |
265 | return WorkResult::Continue; |
266 | } |
267 | |
268 | void threadDidStart() override |
269 | { |
270 | Thread::registerGCThread(GCThreadType::Main); |
271 | } |
272 | |
273 | private: |
274 | Heap& m_heap; |
275 | }; |
276 | |
277 | Heap::Heap(VM* vm, HeapType heapType) |
278 | : m_heapType(heapType) |
279 | , m_ramSize(Options::forceRAMSize() ? Options::forceRAMSize() : ramSize()) |
280 | , m_minBytesPerCycle(minHeapSize(m_heapType, m_ramSize)) |
281 | , m_maxEdenSize(m_minBytesPerCycle) |
282 | , m_maxHeapSize(m_minBytesPerCycle) |
283 | , m_objectSpace(this) |
284 | , m_machineThreads(std::make_unique<MachineThreads>()) |
285 | , m_collectorSlotVisitor(std::make_unique<SlotVisitor>(*this, "C" )) |
286 | , m_mutatorSlotVisitor(std::make_unique<SlotVisitor>(*this, "M" )) |
287 | , m_mutatorMarkStack(std::make_unique<MarkStackArray>()) |
288 | , m_raceMarkStack(std::make_unique<MarkStackArray>()) |
289 | , m_constraintSet(std::make_unique<MarkingConstraintSet>(*this)) |
290 | , m_handleSet(vm) |
291 | , m_codeBlocks(std::make_unique<CodeBlockSet>()) |
292 | , m_jitStubRoutines(std::make_unique<JITStubRoutineSet>()) |
293 | , m_vm(vm) |
294 | // We seed with 10ms so that GCActivityCallback::didAllocate doesn't continuously |
295 | // schedule the timer if we've never done a collection. |
296 | , m_fullActivityCallback(GCActivityCallback::tryCreateFullTimer(this)) |
297 | , m_edenActivityCallback(GCActivityCallback::tryCreateEdenTimer(this)) |
298 | , m_sweeper(adoptRef(*new IncrementalSweeper(this))) |
299 | , m_stopIfNecessaryTimer(adoptRef(*new StopIfNecessaryTimer(vm))) |
300 | , m_sharedCollectorMarkStack(std::make_unique<MarkStackArray>()) |
301 | , m_sharedMutatorMarkStack(std::make_unique<MarkStackArray>()) |
302 | , m_helperClient(&heapHelperPool()) |
303 | , m_threadLock(Box<Lock>::create()) |
304 | , m_threadCondition(AutomaticThreadCondition::create()) |
305 | { |
306 | m_worldState.store(0); |
307 | |
308 | for (unsigned i = 0, numberOfParallelThreads = heapHelperPool().numberOfThreads(); i < numberOfParallelThreads; ++i) { |
309 | std::unique_ptr<SlotVisitor> visitor = std::make_unique<SlotVisitor>(*this, toCString("P" , i + 1)); |
310 | if (Options::optimizeParallelSlotVisitorsForStoppedMutator()) |
311 | visitor->optimizeForStoppedMutator(); |
312 | m_availableParallelSlotVisitors.append(visitor.get()); |
313 | m_parallelSlotVisitors.append(WTFMove(visitor)); |
314 | } |
315 | |
316 | if (Options::useConcurrentGC()) { |
317 | if (Options::useStochasticMutatorScheduler()) |
318 | m_scheduler = std::make_unique<StochasticSpaceTimeMutatorScheduler>(*this); |
319 | else |
320 | m_scheduler = std::make_unique<SpaceTimeMutatorScheduler>(*this); |
321 | } else { |
322 | // We simulate turning off concurrent GC by making the scheduler say that the world |
323 | // should always be stopped when the collector is running. |
324 | m_scheduler = std::make_unique<SynchronousStopTheWorldMutatorScheduler>(); |
325 | } |
326 | |
327 | if (Options::verifyHeap()) |
328 | m_verifier = std::make_unique<HeapVerifier>(this, Options::numberOfGCCyclesToRecordForVerification()); |
329 | |
330 | m_collectorSlotVisitor->optimizeForStoppedMutator(); |
331 | |
332 | // When memory is critical, allow allocating 25% of the amount above the critical threshold before collecting. |
333 | size_t memoryAboveCriticalThreshold = static_cast<size_t>(static_cast<double>(m_ramSize) * (1.0 - Options::criticalGCMemoryThreshold())); |
334 | m_maxEdenSizeWhenCritical = memoryAboveCriticalThreshold / 4; |
335 | |
336 | LockHolder locker(*m_threadLock); |
337 | m_thread = adoptRef(new HeapThread(locker, *this)); |
338 | } |
339 | |
340 | Heap::~Heap() |
341 | { |
342 | forEachSlotVisitor( |
343 | [&] (SlotVisitor& visitor) { |
344 | visitor.clearMarkStacks(); |
345 | }); |
346 | m_mutatorMarkStack->clear(); |
347 | m_raceMarkStack->clear(); |
348 | |
349 | for (WeakBlock* block : m_logicallyEmptyWeakBlocks) |
350 | WeakBlock::destroy(*this, block); |
351 | } |
352 | |
353 | bool Heap::isPagedOut(MonotonicTime deadline) |
354 | { |
355 | return m_objectSpace.isPagedOut(deadline); |
356 | } |
357 | |
358 | void Heap::dumpHeapStatisticsAtVMDestruction() |
359 | { |
360 | unsigned counter = 0; |
361 | m_objectSpace.forEachBlock([&] (MarkedBlock::Handle* block) { |
362 | unsigned live = 0; |
363 | block->forEachCell([&] (HeapCell* cell, HeapCell::Kind) { |
364 | if (cell->isLive()) |
365 | live++; |
366 | return IterationStatus::Continue; |
367 | }); |
368 | dataLogLn("[" , counter++, "] " , block->cellSize(), ", " , live, " / " , block->cellsPerBlock(), " " , static_cast<double>(live) / block->cellsPerBlock() * 100, "% " , block->attributes(), " " , block->subspace()->name()); |
369 | block->forEachCell([&] (HeapCell* heapCell, HeapCell::Kind kind) { |
370 | if (heapCell->isLive() && kind == HeapCell::Kind::JSCell) { |
371 | auto* cell = static_cast<JSCell*>(heapCell); |
372 | if (cell->isObject()) |
373 | dataLogLn(" " , JSValue((JSObject*)cell)); |
374 | else |
375 | dataLogLn(" " , *cell); |
376 | } |
377 | return IterationStatus::Continue; |
378 | }); |
379 | }); |
380 | } |
381 | |
382 | // The VM is being destroyed and the collector will never run again. |
383 | // Run all pending finalizers now because we won't get another chance. |
384 | void Heap::lastChanceToFinalize() |
385 | { |
386 | MonotonicTime before; |
387 | if (Options::logGC()) { |
388 | before = MonotonicTime::now(); |
389 | dataLog("[GC<" , RawPointer(this), ">: shutdown " ); |
390 | } |
391 | |
392 | m_isShuttingDown = true; |
393 | |
394 | RELEASE_ASSERT(!m_vm->entryScope); |
395 | RELEASE_ASSERT(m_mutatorState == MutatorState::Running); |
396 | |
397 | if (m_collectContinuouslyThread) { |
398 | { |
399 | LockHolder locker(m_collectContinuouslyLock); |
400 | m_shouldStopCollectingContinuously = true; |
401 | m_collectContinuouslyCondition.notifyOne(); |
402 | } |
403 | m_collectContinuouslyThread->waitForCompletion(); |
404 | } |
405 | |
406 | if (Options::logGC()) |
407 | dataLog("1" ); |
408 | |
409 | // Prevent new collections from being started. This is probably not even necessary, since we're not |
410 | // going to call into anything that starts collections. Still, this makes the algorithm more |
411 | // obviously sound. |
412 | m_isSafeToCollect = false; |
413 | |
414 | if (Options::logGC()) |
415 | dataLog("2" ); |
416 | |
417 | bool isCollecting; |
418 | { |
419 | auto locker = holdLock(*m_threadLock); |
420 | RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket); |
421 | isCollecting = m_lastServedTicket < m_lastGrantedTicket; |
422 | } |
423 | if (isCollecting) { |
424 | if (Options::logGC()) |
425 | dataLog("...]\n" ); |
426 | |
427 | // Wait for the current collection to finish. |
428 | waitForCollector( |
429 | [&] (const AbstractLocker&) -> bool { |
430 | RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket); |
431 | return m_lastServedTicket == m_lastGrantedTicket; |
432 | }); |
433 | |
434 | if (Options::logGC()) |
435 | dataLog("[GC<" , RawPointer(this), ">: shutdown " ); |
436 | } |
437 | if (Options::logGC()) |
438 | dataLog("3" ); |
439 | |
440 | RELEASE_ASSERT(m_requests.isEmpty()); |
441 | RELEASE_ASSERT(m_lastServedTicket == m_lastGrantedTicket); |
442 | |
443 | // Carefully bring the thread down. |
444 | bool stopped = false; |
445 | { |
446 | LockHolder locker(*m_threadLock); |
447 | stopped = m_thread->tryStop(locker); |
448 | m_threadShouldStop = true; |
449 | if (!stopped) |
450 | m_threadCondition->notifyOne(locker); |
451 | } |
452 | |
453 | if (Options::logGC()) |
454 | dataLog("4" ); |
455 | |
456 | if (!stopped) |
457 | m_thread->join(); |
458 | |
459 | if (Options::logGC()) |
460 | dataLog("5 " ); |
461 | |
462 | if (UNLIKELY(Options::dumpHeapStatisticsAtVMDestruction())) |
463 | dumpHeapStatisticsAtVMDestruction(); |
464 | |
465 | m_arrayBuffers.lastChanceToFinalize(); |
466 | m_objectSpace.stopAllocatingForGood(); |
467 | m_objectSpace.lastChanceToFinalize(); |
468 | releaseDelayedReleasedObjects(); |
469 | |
470 | sweepAllLogicallyEmptyWeakBlocks(); |
471 | |
472 | m_objectSpace.freeMemory(); |
473 | |
474 | if (Options::logGC()) |
475 | dataLog((MonotonicTime::now() - before).milliseconds(), "ms]\n" ); |
476 | } |
477 | |
478 | void Heap::releaseDelayedReleasedObjects() |
479 | { |
480 | #if USE(FOUNDATION) || defined(JSC_GLIB_API_ENABLED) |
481 | // We need to guard against the case that releasing an object can create more objects due to the |
482 | // release calling into JS. When those JS call(s) exit and all locks are being dropped we end up |
483 | // back here and could try to recursively release objects. We guard that with a recursive entry |
484 | // count. Only the initial call will release objects, recursive calls simple return and let the |
485 | // the initial call to the function take care of any objects created during release time. |
486 | // This also means that we need to loop until there are no objects in m_delayedReleaseObjects |
487 | // and use a temp Vector for the actual releasing. |
488 | if (!m_delayedReleaseRecursionCount++) { |
489 | while (!m_delayedReleaseObjects.isEmpty()) { |
490 | ASSERT(m_vm->currentThreadIsHoldingAPILock()); |
491 | |
492 | auto objectsToRelease = WTFMove(m_delayedReleaseObjects); |
493 | |
494 | { |
495 | // We need to drop locks before calling out to arbitrary code. |
496 | JSLock::DropAllLocks dropAllLocks(m_vm); |
497 | |
498 | #if USE(FOUNDATION) |
499 | void* context = objc_autoreleasePoolPush(); |
500 | #endif |
501 | objectsToRelease.clear(); |
502 | #if USE(FOUNDATION) |
503 | objc_autoreleasePoolPop(context); |
504 | #endif |
505 | } |
506 | } |
507 | } |
508 | m_delayedReleaseRecursionCount--; |
509 | #endif |
510 | } |
511 | |
512 | void Heap::(size_t size) |
513 | { |
514 | didAllocate(size); |
515 | collectIfNecessaryOrDefer(); |
516 | } |
517 | |
518 | void Heap::(size_t size) |
519 | { |
520 | // FIXME: Change this to use SaturatedArithmetic when available. |
521 | // https://bugs.webkit.org/show_bug.cgi?id=170411 |
522 | Checked<size_t, RecordOverflow> checkedNewSize = m_deprecatedExtraMemorySize; |
523 | checkedNewSize += size; |
524 | m_deprecatedExtraMemorySize = UNLIKELY(checkedNewSize.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedNewSize.unsafeGet(); |
525 | reportExtraMemoryAllocatedSlowCase(size); |
526 | } |
527 | |
528 | bool Heap::overCriticalMemoryThreshold(MemoryThresholdCallType memoryThresholdCallType) |
529 | { |
530 | #if PLATFORM(IOS_FAMILY) |
531 | if (memoryThresholdCallType == MemoryThresholdCallType::Direct || ++m_precentAvailableMemoryCachedCallCount >= 100) { |
532 | m_overCriticalMemoryThreshold = bmalloc::api::percentAvailableMemoryInUse() > Options::criticalGCMemoryThreshold(); |
533 | m_precentAvailableMemoryCachedCallCount = 0; |
534 | } |
535 | |
536 | return m_overCriticalMemoryThreshold; |
537 | #else |
538 | UNUSED_PARAM(memoryThresholdCallType); |
539 | return false; |
540 | #endif |
541 | } |
542 | |
543 | void Heap::reportAbandonedObjectGraph() |
544 | { |
545 | // Our clients don't know exactly how much memory they |
546 | // are abandoning so we just guess for them. |
547 | size_t abandonedBytes = static_cast<size_t>(0.1 * capacity()); |
548 | |
549 | // We want to accelerate the next collection. Because memory has just |
550 | // been abandoned, the next collection has the potential to |
551 | // be more profitable. Since allocation is the trigger for collection, |
552 | // we hasten the next collection by pretending that we've allocated more memory. |
553 | if (m_fullActivityCallback) { |
554 | m_fullActivityCallback->didAllocate(*this, |
555 | m_sizeAfterLastCollect - m_sizeAfterLastFullCollect + m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect); |
556 | } |
557 | m_bytesAbandonedSinceLastFullCollect += abandonedBytes; |
558 | } |
559 | |
560 | void Heap::protect(JSValue k) |
561 | { |
562 | ASSERT(k); |
563 | ASSERT(m_vm->currentThreadIsHoldingAPILock()); |
564 | |
565 | if (!k.isCell()) |
566 | return; |
567 | |
568 | m_protectedValues.add(k.asCell()); |
569 | } |
570 | |
571 | bool Heap::unprotect(JSValue k) |
572 | { |
573 | ASSERT(k); |
574 | ASSERT(m_vm->currentThreadIsHoldingAPILock()); |
575 | |
576 | if (!k.isCell()) |
577 | return false; |
578 | |
579 | return m_protectedValues.remove(k.asCell()); |
580 | } |
581 | |
582 | void Heap::addReference(JSCell* cell, ArrayBuffer* buffer) |
583 | { |
584 | if (m_arrayBuffers.addReference(cell, buffer)) { |
585 | collectIfNecessaryOrDefer(); |
586 | didAllocate(buffer->gcSizeEstimateInBytes()); |
587 | } |
588 | } |
589 | |
590 | template<typename CellType, typename CellSet> |
591 | void Heap::finalizeMarkedUnconditionalFinalizers(CellSet& cellSet) |
592 | { |
593 | cellSet.forEachMarkedCell( |
594 | [&] (HeapCell* cell, HeapCell::Kind) { |
595 | static_cast<CellType*>(cell)->finalizeUnconditionally(*vm()); |
596 | }); |
597 | } |
598 | |
599 | void Heap::finalizeUnconditionalFinalizers() |
600 | { |
601 | vm()->builtinExecutables()->finalizeUnconditionally(); |
602 | finalizeMarkedUnconditionalFinalizers<FunctionExecutable>(vm()->functionExecutableSpace.space); |
603 | finalizeMarkedUnconditionalFinalizers<SymbolTable>(vm()->symbolTableSpace); |
604 | vm()->forEachCodeBlockSpace( |
605 | [&] (auto& space) { |
606 | this->finalizeMarkedUnconditionalFinalizers<CodeBlock>(space.set); |
607 | }); |
608 | finalizeMarkedUnconditionalFinalizers<ExecutableToCodeBlockEdge>(vm()->executableToCodeBlockEdgesWithFinalizers); |
609 | finalizeMarkedUnconditionalFinalizers<StructureRareData>(vm()->structureRareDataSpace); |
610 | finalizeMarkedUnconditionalFinalizers<UnlinkedFunctionExecutable>(vm()->unlinkedFunctionExecutableSpace.set); |
611 | if (vm()->m_weakSetSpace) |
612 | finalizeMarkedUnconditionalFinalizers<JSWeakSet>(*vm()->m_weakSetSpace); |
613 | if (vm()->m_weakMapSpace) |
614 | finalizeMarkedUnconditionalFinalizers<JSWeakMap>(*vm()->m_weakMapSpace); |
615 | if (vm()->m_weakObjectRefSpace) |
616 | finalizeMarkedUnconditionalFinalizers<JSWeakObjectRef>(*vm()->m_weakObjectRefSpace); |
617 | if (vm()->m_errorInstanceSpace) |
618 | finalizeMarkedUnconditionalFinalizers<ErrorInstance>(*vm()->m_errorInstanceSpace); |
619 | |
620 | #if ENABLE(WEBASSEMBLY) |
621 | if (vm()->m_webAssemblyCodeBlockSpace) |
622 | finalizeMarkedUnconditionalFinalizers<JSWebAssemblyCodeBlock>(*vm()->m_webAssemblyCodeBlockSpace); |
623 | #endif |
624 | } |
625 | |
626 | void Heap::willStartIterating() |
627 | { |
628 | m_objectSpace.willStartIterating(); |
629 | } |
630 | |
631 | void Heap::didFinishIterating() |
632 | { |
633 | m_objectSpace.didFinishIterating(); |
634 | } |
635 | |
636 | void Heap::completeAllJITPlans() |
637 | { |
638 | if (!VM::canUseJIT()) |
639 | return; |
640 | #if ENABLE(JIT) |
641 | JITWorklist::ensureGlobalWorklist().completeAllForVM(*m_vm); |
642 | #endif // ENABLE(JIT) |
643 | DFG::completeAllPlansForVM(*m_vm); |
644 | } |
645 | |
646 | template<typename Func> |
647 | void Heap::iterateExecutingAndCompilingCodeBlocks(const Func& func) |
648 | { |
649 | m_codeBlocks->iterateCurrentlyExecuting(func); |
650 | if (VM::canUseJIT()) |
651 | DFG::iterateCodeBlocksForGC(*m_vm, func); |
652 | } |
653 | |
654 | template<typename Func> |
655 | void Heap::iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks(const Func& func) |
656 | { |
657 | Vector<CodeBlock*, 256> codeBlocks; |
658 | iterateExecutingAndCompilingCodeBlocks( |
659 | [&] (CodeBlock* codeBlock) { |
660 | codeBlocks.append(codeBlock); |
661 | }); |
662 | for (CodeBlock* codeBlock : codeBlocks) |
663 | func(codeBlock); |
664 | } |
665 | |
666 | void Heap::assertMarkStacksEmpty() |
667 | { |
668 | bool ok = true; |
669 | |
670 | if (!m_sharedCollectorMarkStack->isEmpty()) { |
671 | dataLog("FATAL: Shared collector mark stack not empty! It has " , m_sharedCollectorMarkStack->size(), " elements.\n" ); |
672 | ok = false; |
673 | } |
674 | |
675 | if (!m_sharedMutatorMarkStack->isEmpty()) { |
676 | dataLog("FATAL: Shared mutator mark stack not empty! It has " , m_sharedMutatorMarkStack->size(), " elements.\n" ); |
677 | ok = false; |
678 | } |
679 | |
680 | forEachSlotVisitor( |
681 | [&] (SlotVisitor& visitor) { |
682 | if (visitor.isEmpty()) |
683 | return; |
684 | |
685 | dataLog("FATAL: Visitor " , RawPointer(&visitor), " is not empty!\n" ); |
686 | ok = false; |
687 | }); |
688 | |
689 | RELEASE_ASSERT(ok); |
690 | } |
691 | |
692 | void Heap::gatherStackRoots(ConservativeRoots& roots) |
693 | { |
694 | m_machineThreads->gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks, m_currentThreadState, m_currentThread); |
695 | } |
696 | |
697 | void Heap::gatherJSStackRoots(ConservativeRoots& roots) |
698 | { |
699 | #if ENABLE(C_LOOP) |
700 | m_vm->interpreter->cloopStack().gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks); |
701 | #else |
702 | UNUSED_PARAM(roots); |
703 | #endif |
704 | } |
705 | |
706 | void Heap::gatherScratchBufferRoots(ConservativeRoots& roots) |
707 | { |
708 | #if ENABLE(DFG_JIT) |
709 | if (!VM::canUseJIT()) |
710 | return; |
711 | m_vm->gatherScratchBufferRoots(roots); |
712 | #else |
713 | UNUSED_PARAM(roots); |
714 | #endif |
715 | } |
716 | |
717 | void Heap::beginMarking() |
718 | { |
719 | TimingScope timingScope(*this, "Heap::beginMarking" ); |
720 | m_jitStubRoutines->clearMarks(); |
721 | m_objectSpace.beginMarking(); |
722 | setMutatorShouldBeFenced(true); |
723 | } |
724 | |
725 | void Heap::removeDeadCompilerWorklistEntries() |
726 | { |
727 | #if ENABLE(DFG_JIT) |
728 | if (!VM::canUseJIT()) |
729 | return; |
730 | for (unsigned i = DFG::numberOfWorklists(); i--;) |
731 | DFG::existingWorklistForIndex(i).removeDeadPlans(*m_vm); |
732 | #endif |
733 | } |
734 | |
735 | bool Heap::isHeapSnapshotting() const |
736 | { |
737 | HeapProfiler* heapProfiler = m_vm->heapProfiler(); |
738 | if (UNLIKELY(heapProfiler)) |
739 | return heapProfiler->activeSnapshotBuilder(); |
740 | return false; |
741 | } |
742 | |
743 | struct GatherHeapSnapshotData : MarkedBlock::CountFunctor { |
744 | GatherHeapSnapshotData(VM& vm, HeapSnapshotBuilder& builder) |
745 | : m_vm(vm) |
746 | , m_builder(builder) |
747 | { |
748 | } |
749 | |
750 | IterationStatus operator()(HeapCell* heapCell, HeapCell::Kind kind) const |
751 | { |
752 | if (isJSCellKind(kind)) { |
753 | JSCell* cell = static_cast<JSCell*>(heapCell); |
754 | cell->methodTable(m_vm)->heapSnapshot(cell, m_builder); |
755 | } |
756 | return IterationStatus::Continue; |
757 | } |
758 | |
759 | VM& m_vm; |
760 | HeapSnapshotBuilder& m_builder; |
761 | }; |
762 | |
763 | void Heap::(HeapProfiler& heapProfiler) |
764 | { |
765 | if (HeapSnapshotBuilder* builder = heapProfiler.activeSnapshotBuilder()) { |
766 | HeapIterationScope heapIterationScope(*this); |
767 | GatherHeapSnapshotData functor(*m_vm, *builder); |
768 | m_objectSpace.forEachLiveCell(heapIterationScope, functor); |
769 | } |
770 | } |
771 | |
772 | struct RemoveDeadHeapSnapshotNodes : MarkedBlock::CountFunctor { |
773 | RemoveDeadHeapSnapshotNodes(HeapSnapshot& snapshot) |
774 | : m_snapshot(snapshot) |
775 | { |
776 | } |
777 | |
778 | IterationStatus operator()(HeapCell* cell, HeapCell::Kind kind) const |
779 | { |
780 | if (isJSCellKind(kind)) |
781 | m_snapshot.sweepCell(static_cast<JSCell*>(cell)); |
782 | return IterationStatus::Continue; |
783 | } |
784 | |
785 | HeapSnapshot& m_snapshot; |
786 | }; |
787 | |
788 | void Heap::removeDeadHeapSnapshotNodes(HeapProfiler& heapProfiler) |
789 | { |
790 | if (HeapSnapshot* snapshot = heapProfiler.mostRecentSnapshot()) { |
791 | HeapIterationScope heapIterationScope(*this); |
792 | RemoveDeadHeapSnapshotNodes functor(*snapshot); |
793 | m_objectSpace.forEachDeadCell(heapIterationScope, functor); |
794 | snapshot->shrinkToFit(); |
795 | } |
796 | } |
797 | |
798 | void Heap::updateObjectCounts() |
799 | { |
800 | if (m_collectionScope && m_collectionScope.value() == CollectionScope::Full) |
801 | m_totalBytesVisited = 0; |
802 | |
803 | m_totalBytesVisitedThisCycle = bytesVisited(); |
804 | |
805 | m_totalBytesVisited += m_totalBytesVisitedThisCycle; |
806 | } |
807 | |
808 | void Heap::endMarking() |
809 | { |
810 | forEachSlotVisitor( |
811 | [&] (SlotVisitor& visitor) { |
812 | visitor.reset(); |
813 | }); |
814 | |
815 | assertMarkStacksEmpty(); |
816 | |
817 | RELEASE_ASSERT(m_raceMarkStack->isEmpty()); |
818 | |
819 | m_objectSpace.endMarking(); |
820 | setMutatorShouldBeFenced(Options::forceFencedBarrier()); |
821 | } |
822 | |
823 | size_t Heap::objectCount() |
824 | { |
825 | return m_objectSpace.objectCount(); |
826 | } |
827 | |
828 | size_t Heap::() |
829 | { |
830 | // FIXME: Change this to use SaturatedArithmetic when available. |
831 | // https://bugs.webkit.org/show_bug.cgi?id=170411 |
832 | Checked<size_t, RecordOverflow> checkedTotal = m_extraMemorySize; |
833 | checkedTotal += m_deprecatedExtraMemorySize; |
834 | checkedTotal += m_arrayBuffers.size(); |
835 | size_t total = UNLIKELY(checkedTotal.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedTotal.unsafeGet(); |
836 | |
837 | ASSERT(m_objectSpace.capacity() >= m_objectSpace.size()); |
838 | return std::min(total, std::numeric_limits<size_t>::max() - m_objectSpace.capacity()); |
839 | } |
840 | |
841 | size_t Heap::size() |
842 | { |
843 | return m_objectSpace.size() + extraMemorySize(); |
844 | } |
845 | |
846 | size_t Heap::capacity() |
847 | { |
848 | return m_objectSpace.capacity() + extraMemorySize(); |
849 | } |
850 | |
851 | size_t Heap::protectedGlobalObjectCount() |
852 | { |
853 | size_t result = 0; |
854 | forEachProtectedCell( |
855 | [&] (JSCell* cell) { |
856 | if (cell->isObject() && asObject(cell)->isGlobalObject()) |
857 | result++; |
858 | }); |
859 | return result; |
860 | } |
861 | |
862 | size_t Heap::globalObjectCount() |
863 | { |
864 | HeapIterationScope iterationScope(*this); |
865 | size_t result = 0; |
866 | m_objectSpace.forEachLiveCell( |
867 | iterationScope, |
868 | [&] (HeapCell* heapCell, HeapCell::Kind kind) -> IterationStatus { |
869 | if (!isJSCellKind(kind)) |
870 | return IterationStatus::Continue; |
871 | JSCell* cell = static_cast<JSCell*>(heapCell); |
872 | if (cell->isObject() && asObject(cell)->isGlobalObject()) |
873 | result++; |
874 | return IterationStatus::Continue; |
875 | }); |
876 | return result; |
877 | } |
878 | |
879 | size_t Heap::protectedObjectCount() |
880 | { |
881 | size_t result = 0; |
882 | forEachProtectedCell( |
883 | [&] (JSCell*) { |
884 | result++; |
885 | }); |
886 | return result; |
887 | } |
888 | |
889 | std::unique_ptr<TypeCountSet> Heap::protectedObjectTypeCounts() |
890 | { |
891 | std::unique_ptr<TypeCountSet> result = std::make_unique<TypeCountSet>(); |
892 | forEachProtectedCell( |
893 | [&] (JSCell* cell) { |
894 | recordType(*vm(), *result, cell); |
895 | }); |
896 | return result; |
897 | } |
898 | |
899 | std::unique_ptr<TypeCountSet> Heap::objectTypeCounts() |
900 | { |
901 | std::unique_ptr<TypeCountSet> result = std::make_unique<TypeCountSet>(); |
902 | HeapIterationScope iterationScope(*this); |
903 | m_objectSpace.forEachLiveCell( |
904 | iterationScope, |
905 | [&] (HeapCell* cell, HeapCell::Kind kind) -> IterationStatus { |
906 | if (isJSCellKind(kind)) |
907 | recordType(*vm(), *result, static_cast<JSCell*>(cell)); |
908 | return IterationStatus::Continue; |
909 | }); |
910 | return result; |
911 | } |
912 | |
913 | void Heap::deleteAllCodeBlocks(DeleteAllCodeEffort effort) |
914 | { |
915 | if (m_collectionScope && effort == DeleteAllCodeIfNotCollecting) |
916 | return; |
917 | |
918 | VM& vm = *m_vm; |
919 | PreventCollectionScope preventCollectionScope(*this); |
920 | |
921 | // If JavaScript is running, it's not safe to delete all JavaScript code, since |
922 | // we'll end up returning to deleted code. |
923 | RELEASE_ASSERT(!vm.entryScope); |
924 | RELEASE_ASSERT(!m_collectionScope); |
925 | |
926 | completeAllJITPlans(); |
927 | |
928 | vm.forEachScriptExecutableSpace( |
929 | [&] (auto& spaceAndSet) { |
930 | HeapIterationScope heapIterationScope(*this); |
931 | auto& set = spaceAndSet.set; |
932 | set.forEachLiveCell( |
933 | [&] (HeapCell* cell, HeapCell::Kind) { |
934 | ScriptExecutable* executable = static_cast<ScriptExecutable*>(cell); |
935 | executable->clearCode(set); |
936 | }); |
937 | }); |
938 | |
939 | #if ENABLE(WEBASSEMBLY) |
940 | { |
941 | // We must ensure that we clear the JS call ICs from Wasm. Otherwise, Wasm will |
942 | // have no idea that we cleared the code from all of the Executables in the |
943 | // VM. This could leave Wasm in an inconsistent state where it has an IC that |
944 | // points into a CodeBlock that could be dead. The IC will still succeed because |
945 | // it uses a callee check, but then it will call into dead code. |
946 | HeapIterationScope heapIterationScope(*this); |
947 | if (vm.m_webAssemblyCodeBlockSpace) { |
948 | vm.m_webAssemblyCodeBlockSpace->forEachLiveCell([&] (HeapCell* cell, HeapCell::Kind kind) { |
949 | ASSERT_UNUSED(kind, kind == HeapCell::JSCell); |
950 | JSWebAssemblyCodeBlock* codeBlock = static_cast<JSWebAssemblyCodeBlock*>(cell); |
951 | codeBlock->clearJSCallICs(vm); |
952 | }); |
953 | } |
954 | } |
955 | #endif |
956 | } |
957 | |
958 | void Heap::deleteAllUnlinkedCodeBlocks(DeleteAllCodeEffort effort) |
959 | { |
960 | if (m_collectionScope && effort == DeleteAllCodeIfNotCollecting) |
961 | return; |
962 | |
963 | VM& vm = *m_vm; |
964 | PreventCollectionScope preventCollectionScope(*this); |
965 | |
966 | RELEASE_ASSERT(!m_collectionScope); |
967 | |
968 | HeapIterationScope heapIterationScope(*this); |
969 | vm.unlinkedFunctionExecutableSpace.set.forEachLiveCell( |
970 | [&] (HeapCell* cell, HeapCell::Kind) { |
971 | UnlinkedFunctionExecutable* executable = static_cast<UnlinkedFunctionExecutable*>(cell); |
972 | executable->clearCode(vm); |
973 | }); |
974 | } |
975 | |
976 | void Heap::deleteUnmarkedCompiledCode() |
977 | { |
978 | vm()->forEachScriptExecutableSpace([] (auto& space) { space.space.sweep(); }); |
979 | vm()->forEachCodeBlockSpace([] (auto& space) { space.space.sweep(); }); // Sweeping must occur before deleting stubs, otherwise the stubs might still think they're alive as they get deleted. |
980 | m_jitStubRoutines->deleteUnmarkedJettisonedStubRoutines(); |
981 | } |
982 | |
983 | void Heap::addToRememberedSet(const JSCell* constCell) |
984 | { |
985 | JSCell* cell = const_cast<JSCell*>(constCell); |
986 | ASSERT(cell); |
987 | ASSERT(!Options::useConcurrentJIT() || !isCompilationThread()); |
988 | m_barriersExecuted++; |
989 | if (m_mutatorShouldBeFenced) { |
990 | WTF::loadLoadFence(); |
991 | if (!isMarked(cell)) { |
992 | // During a full collection a store into an unmarked object that had surivived past |
993 | // collections will manifest as a store to an unmarked PossiblyBlack object. If the |
994 | // object gets marked at some time after this then it will go down the normal marking |
995 | // path. So, we don't have to remember this object. We could return here. But we go |
996 | // further and attempt to re-white the object. |
997 | |
998 | RELEASE_ASSERT(m_collectionScope && m_collectionScope.value() == CollectionScope::Full); |
999 | |
1000 | if (cell->atomicCompareExchangeCellStateStrong(CellState::PossiblyBlack, CellState::DefinitelyWhite) == CellState::PossiblyBlack) { |
1001 | // Now we protect against this race: |
1002 | // |
1003 | // 1) Object starts out black + unmarked. |
1004 | // --> We do isMarked here. |
1005 | // 2) Object is marked and greyed. |
1006 | // 3) Object is scanned and blacked. |
1007 | // --> We do atomicCompareExchangeCellStateStrong here. |
1008 | // |
1009 | // In this case we would have made the object white again, even though it should |
1010 | // be black. This check lets us correct our mistake. This relies on the fact that |
1011 | // isMarked converges monotonically to true. |
1012 | if (isMarked(cell)) { |
1013 | // It's difficult to work out whether the object should be grey or black at |
1014 | // this point. We say black conservatively. |
1015 | cell->setCellState(CellState::PossiblyBlack); |
1016 | } |
1017 | |
1018 | // Either way, we can return. Most likely, the object was not marked, and so the |
1019 | // object is now labeled white. This means that future barrier executions will not |
1020 | // fire. In the unlikely event that the object had become marked, we can still |
1021 | // return anyway, since we proved that the object was not marked at the time that |
1022 | // we executed this slow path. |
1023 | } |
1024 | |
1025 | return; |
1026 | } |
1027 | } else |
1028 | ASSERT(isMarked(cell)); |
1029 | // It could be that the object was *just* marked. This means that the collector may set the |
1030 | // state to DefinitelyGrey and then to PossiblyOldOrBlack at any time. It's OK for us to |
1031 | // race with the collector here. If we win then this is accurate because the object _will_ |
1032 | // get scanned again. If we lose then someone else will barrier the object again. That would |
1033 | // be unfortunate but not the end of the world. |
1034 | cell->setCellState(CellState::PossiblyGrey); |
1035 | m_mutatorMarkStack->append(cell); |
1036 | } |
1037 | |
1038 | void Heap::sweepSynchronously() |
1039 | { |
1040 | MonotonicTime before { }; |
1041 | if (Options::logGC()) { |
1042 | dataLog("Full sweep: " , capacity() / 1024, "kb " ); |
1043 | before = MonotonicTime::now(); |
1044 | } |
1045 | m_objectSpace.sweep(); |
1046 | m_objectSpace.shrink(); |
1047 | if (Options::logGC()) { |
1048 | MonotonicTime after = MonotonicTime::now(); |
1049 | dataLog("=> " , capacity() / 1024, "kb, " , (after - before).milliseconds(), "ms" ); |
1050 | } |
1051 | } |
1052 | |
1053 | void Heap::collect(Synchronousness synchronousness, GCRequest request) |
1054 | { |
1055 | switch (synchronousness) { |
1056 | case Async: |
1057 | collectAsync(request); |
1058 | return; |
1059 | case Sync: |
1060 | collectSync(request); |
1061 | return; |
1062 | } |
1063 | RELEASE_ASSERT_NOT_REACHED(); |
1064 | } |
1065 | |
1066 | void Heap::collectNow(Synchronousness synchronousness, GCRequest request) |
1067 | { |
1068 | if (validateDFGDoesGC) |
1069 | RELEASE_ASSERT(expectDoesGC()); |
1070 | |
1071 | switch (synchronousness) { |
1072 | case Async: { |
1073 | collectAsync(request); |
1074 | stopIfNecessary(); |
1075 | return; |
1076 | } |
1077 | |
1078 | case Sync: { |
1079 | collectSync(request); |
1080 | |
1081 | DeferGCForAWhile deferGC(*this); |
1082 | if (UNLIKELY(Options::useImmortalObjects())) |
1083 | sweeper().stopSweeping(); |
1084 | |
1085 | bool alreadySweptInCollectSync = shouldSweepSynchronously(); |
1086 | if (!alreadySweptInCollectSync) { |
1087 | if (Options::logGC()) |
1088 | dataLog("[GC<" , RawPointer(this), ">: " ); |
1089 | sweepSynchronously(); |
1090 | if (Options::logGC()) |
1091 | dataLog("]\n" ); |
1092 | } |
1093 | m_objectSpace.assertNoUnswept(); |
1094 | |
1095 | sweepAllLogicallyEmptyWeakBlocks(); |
1096 | return; |
1097 | } } |
1098 | RELEASE_ASSERT_NOT_REACHED(); |
1099 | } |
1100 | |
1101 | void Heap::collectAsync(GCRequest request) |
1102 | { |
1103 | if (validateDFGDoesGC) |
1104 | RELEASE_ASSERT(expectDoesGC()); |
1105 | |
1106 | if (!m_isSafeToCollect) |
1107 | return; |
1108 | |
1109 | bool alreadyRequested = false; |
1110 | { |
1111 | LockHolder locker(*m_threadLock); |
1112 | for (const GCRequest& previousRequest : m_requests) { |
1113 | if (request.subsumedBy(previousRequest)) { |
1114 | alreadyRequested = true; |
1115 | break; |
1116 | } |
1117 | } |
1118 | } |
1119 | if (alreadyRequested) |
1120 | return; |
1121 | |
1122 | requestCollection(request); |
1123 | } |
1124 | |
1125 | void Heap::collectSync(GCRequest request) |
1126 | { |
1127 | if (validateDFGDoesGC) |
1128 | RELEASE_ASSERT(expectDoesGC()); |
1129 | |
1130 | if (!m_isSafeToCollect) |
1131 | return; |
1132 | |
1133 | waitForCollection(requestCollection(request)); |
1134 | } |
1135 | |
1136 | bool Heap::shouldCollectInCollectorThread(const AbstractLocker&) |
1137 | { |
1138 | RELEASE_ASSERT(m_requests.isEmpty() == (m_lastServedTicket == m_lastGrantedTicket)); |
1139 | RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket); |
1140 | |
1141 | if (false) |
1142 | dataLog("Mutator has the conn = " , !!(m_worldState.load() & mutatorHasConnBit), "\n" ); |
1143 | |
1144 | return !m_requests.isEmpty() && !(m_worldState.load() & mutatorHasConnBit); |
1145 | } |
1146 | |
1147 | void Heap::collectInCollectorThread() |
1148 | { |
1149 | for (;;) { |
1150 | RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Collector, nullptr); |
1151 | switch (result) { |
1152 | case RunCurrentPhaseResult::Finished: |
1153 | return; |
1154 | case RunCurrentPhaseResult::Continue: |
1155 | break; |
1156 | case RunCurrentPhaseResult::NeedCurrentThreadState: |
1157 | RELEASE_ASSERT_NOT_REACHED(); |
1158 | break; |
1159 | } |
1160 | } |
1161 | } |
1162 | |
1163 | ALWAYS_INLINE int asInt(CollectorPhase phase) |
1164 | { |
1165 | return static_cast<int>(phase); |
1166 | } |
1167 | |
1168 | void Heap::checkConn(GCConductor conn) |
1169 | { |
1170 | unsigned worldState = m_worldState.load(); |
1171 | switch (conn) { |
1172 | case GCConductor::Mutator: |
1173 | RELEASE_ASSERT(worldState & mutatorHasConnBit, worldState, asInt(m_lastPhase), asInt(m_currentPhase), asInt(m_nextPhase), vm()->id(), VM::numberOfIDs(), vm()->isEntered()); |
1174 | return; |
1175 | case GCConductor::Collector: |
1176 | RELEASE_ASSERT(!(worldState & mutatorHasConnBit), worldState, asInt(m_lastPhase), asInt(m_currentPhase), asInt(m_nextPhase), vm()->id(), VM::numberOfIDs(), vm()->isEntered()); |
1177 | return; |
1178 | } |
1179 | RELEASE_ASSERT_NOT_REACHED(); |
1180 | } |
1181 | |
1182 | auto Heap::runCurrentPhase(GCConductor conn, CurrentThreadState* currentThreadState) -> RunCurrentPhaseResult |
1183 | { |
1184 | checkConn(conn); |
1185 | m_currentThreadState = currentThreadState; |
1186 | m_currentThread = &Thread::current(); |
1187 | |
1188 | if (conn == GCConductor::Mutator) |
1189 | sanitizeStackForVM(vm()); |
1190 | |
1191 | // If the collector transfers the conn to the mutator, it leaves us in between phases. |
1192 | if (!finishChangingPhase(conn)) { |
1193 | // A mischevious mutator could repeatedly relinquish the conn back to us. We try to avoid doing |
1194 | // this, but it's probably not the end of the world if it did happen. |
1195 | if (false) |
1196 | dataLog("Conn bounce-back.\n" ); |
1197 | return RunCurrentPhaseResult::Finished; |
1198 | } |
1199 | |
1200 | bool result = false; |
1201 | switch (m_currentPhase) { |
1202 | case CollectorPhase::NotRunning: |
1203 | result = runNotRunningPhase(conn); |
1204 | break; |
1205 | |
1206 | case CollectorPhase::Begin: |
1207 | result = runBeginPhase(conn); |
1208 | break; |
1209 | |
1210 | case CollectorPhase::Fixpoint: |
1211 | if (!currentThreadState && conn == GCConductor::Mutator) |
1212 | return RunCurrentPhaseResult::NeedCurrentThreadState; |
1213 | |
1214 | result = runFixpointPhase(conn); |
1215 | break; |
1216 | |
1217 | case CollectorPhase::Concurrent: |
1218 | result = runConcurrentPhase(conn); |
1219 | break; |
1220 | |
1221 | case CollectorPhase::Reloop: |
1222 | result = runReloopPhase(conn); |
1223 | break; |
1224 | |
1225 | case CollectorPhase::End: |
1226 | result = runEndPhase(conn); |
1227 | break; |
1228 | } |
1229 | |
1230 | return result ? RunCurrentPhaseResult::Continue : RunCurrentPhaseResult::Finished; |
1231 | } |
1232 | |
1233 | NEVER_INLINE bool Heap::runNotRunningPhase(GCConductor conn) |
1234 | { |
1235 | // Check m_requests since the mutator calls this to poll what's going on. |
1236 | { |
1237 | auto locker = holdLock(*m_threadLock); |
1238 | if (m_requests.isEmpty()) |
1239 | return false; |
1240 | // Check if the mutator has stolen the conn while the collector transitioned from End to NotRunning |
1241 | if (conn == GCConductor::Collector && !!(m_worldState.load() & mutatorHasConnBit)) |
1242 | return false; |
1243 | } |
1244 | |
1245 | return changePhase(conn, CollectorPhase::Begin); |
1246 | } |
1247 | |
1248 | NEVER_INLINE bool Heap::runBeginPhase(GCConductor conn) |
1249 | { |
1250 | m_currentGCStartTime = MonotonicTime::now(); |
1251 | |
1252 | { |
1253 | LockHolder locker(*m_threadLock); |
1254 | RELEASE_ASSERT(!m_requests.isEmpty()); |
1255 | m_currentRequest = m_requests.first(); |
1256 | } |
1257 | |
1258 | if (Options::logGC()) |
1259 | dataLog("[GC<" , RawPointer(this), ">: START " , gcConductorShortName(conn), " " , capacity() / 1024, "kb " ); |
1260 | |
1261 | m_beforeGC = MonotonicTime::now(); |
1262 | |
1263 | if (m_collectionScope) { |
1264 | dataLog("Collection scope already set during GC: " , *m_collectionScope, "\n" ); |
1265 | RELEASE_ASSERT_NOT_REACHED(); |
1266 | } |
1267 | |
1268 | willStartCollection(); |
1269 | |
1270 | if (UNLIKELY(m_verifier)) { |
1271 | // Verify that live objects from the last GC cycle haven't been corrupted by |
1272 | // mutators before we begin this new GC cycle. |
1273 | m_verifier->verify(HeapVerifier::Phase::BeforeGC); |
1274 | |
1275 | m_verifier->startGC(); |
1276 | m_verifier->gatherLiveCells(HeapVerifier::Phase::BeforeMarking); |
1277 | } |
1278 | |
1279 | prepareForMarking(); |
1280 | |
1281 | if (m_collectionScope && m_collectionScope.value() == CollectionScope::Full) { |
1282 | m_opaqueRoots.clear(); |
1283 | m_collectorSlotVisitor->clearMarkStacks(); |
1284 | m_mutatorMarkStack->clear(); |
1285 | } |
1286 | |
1287 | RELEASE_ASSERT(m_raceMarkStack->isEmpty()); |
1288 | |
1289 | beginMarking(); |
1290 | |
1291 | forEachSlotVisitor( |
1292 | [&] (SlotVisitor& visitor) { |
1293 | visitor.didStartMarking(); |
1294 | }); |
1295 | |
1296 | m_parallelMarkersShouldExit = false; |
1297 | |
1298 | m_helperClient.setFunction( |
1299 | [this] () { |
1300 | SlotVisitor* slotVisitor; |
1301 | { |
1302 | LockHolder locker(m_parallelSlotVisitorLock); |
1303 | RELEASE_ASSERT_WITH_MESSAGE(!m_availableParallelSlotVisitors.isEmpty(), "Parallel SlotVisitors are allocated apriori" ); |
1304 | slotVisitor = m_availableParallelSlotVisitors.takeLast(); |
1305 | } |
1306 | |
1307 | Thread::registerGCThread(GCThreadType::Helper); |
1308 | |
1309 | { |
1310 | ParallelModeEnabler parallelModeEnabler(*slotVisitor); |
1311 | slotVisitor->drainFromShared(SlotVisitor::SlaveDrain); |
1312 | } |
1313 | |
1314 | { |
1315 | LockHolder locker(m_parallelSlotVisitorLock); |
1316 | m_availableParallelSlotVisitors.append(slotVisitor); |
1317 | } |
1318 | }); |
1319 | |
1320 | SlotVisitor& slotVisitor = *m_collectorSlotVisitor; |
1321 | |
1322 | m_constraintSet->didStartMarking(); |
1323 | |
1324 | m_scheduler->beginCollection(); |
1325 | if (Options::logGC()) |
1326 | m_scheduler->log(); |
1327 | |
1328 | // After this, we will almost certainly fall through all of the "slotVisitor.isEmpty()" |
1329 | // checks because bootstrap would have put things into the visitor. So, we should fall |
1330 | // through to draining. |
1331 | |
1332 | if (!slotVisitor.didReachTermination()) { |
1333 | dataLog("Fatal: SlotVisitor should think that GC should terminate before constraint solving, but it does not think this.\n" ); |
1334 | dataLog("slotVisitor.isEmpty(): " , slotVisitor.isEmpty(), "\n" ); |
1335 | dataLog("slotVisitor.collectorMarkStack().isEmpty(): " , slotVisitor.collectorMarkStack().isEmpty(), "\n" ); |
1336 | dataLog("slotVisitor.mutatorMarkStack().isEmpty(): " , slotVisitor.mutatorMarkStack().isEmpty(), "\n" ); |
1337 | dataLog("m_numberOfActiveParallelMarkers: " , m_numberOfActiveParallelMarkers, "\n" ); |
1338 | dataLog("m_sharedCollectorMarkStack->isEmpty(): " , m_sharedCollectorMarkStack->isEmpty(), "\n" ); |
1339 | dataLog("m_sharedMutatorMarkStack->isEmpty(): " , m_sharedMutatorMarkStack->isEmpty(), "\n" ); |
1340 | dataLog("slotVisitor.didReachTermination(): " , slotVisitor.didReachTermination(), "\n" ); |
1341 | RELEASE_ASSERT_NOT_REACHED(); |
1342 | } |
1343 | |
1344 | return changePhase(conn, CollectorPhase::Fixpoint); |
1345 | } |
1346 | |
1347 | NEVER_INLINE bool Heap::runFixpointPhase(GCConductor conn) |
1348 | { |
1349 | RELEASE_ASSERT(conn == GCConductor::Collector || m_currentThreadState); |
1350 | |
1351 | SlotVisitor& slotVisitor = *m_collectorSlotVisitor; |
1352 | |
1353 | if (Options::logGC()) { |
1354 | HashMap<const char*, size_t> visitMap; |
1355 | forEachSlotVisitor( |
1356 | [&] (SlotVisitor& slotVisitor) { |
1357 | visitMap.add(slotVisitor.codeName(), slotVisitor.bytesVisited() / 1024); |
1358 | }); |
1359 | |
1360 | auto perVisitorDump = sortedMapDump( |
1361 | visitMap, |
1362 | [] (const char* a, const char* b) -> bool { |
1363 | return strcmp(a, b) < 0; |
1364 | }, |
1365 | ":" , " " ); |
1366 | |
1367 | dataLog("v=" , bytesVisited() / 1024, "kb (" , perVisitorDump, ") o=" , m_opaqueRoots.size(), " b=" , m_barriersExecuted, " " ); |
1368 | } |
1369 | |
1370 | if (slotVisitor.didReachTermination()) { |
1371 | m_opaqueRoots.deleteOldTables(); |
1372 | |
1373 | m_scheduler->didReachTermination(); |
1374 | |
1375 | assertMarkStacksEmpty(); |
1376 | |
1377 | // FIXME: Take m_mutatorDidRun into account when scheduling constraints. Most likely, |
1378 | // we don't have to execute root constraints again unless the mutator did run. At a |
1379 | // minimum, we could use this for work estimates - but it's probably more than just an |
1380 | // estimate. |
1381 | // https://bugs.webkit.org/show_bug.cgi?id=166828 |
1382 | |
1383 | // Wondering what this does? Look at Heap::addCoreConstraints(). The DOM and others can also |
1384 | // add their own using Heap::addMarkingConstraint(). |
1385 | bool converged = m_constraintSet->executeConvergence(slotVisitor); |
1386 | |
1387 | // FIXME: The slotVisitor.isEmpty() check is most likely not needed. |
1388 | // https://bugs.webkit.org/show_bug.cgi?id=180310 |
1389 | if (converged && slotVisitor.isEmpty()) { |
1390 | assertMarkStacksEmpty(); |
1391 | return changePhase(conn, CollectorPhase::End); |
1392 | } |
1393 | |
1394 | m_scheduler->didExecuteConstraints(); |
1395 | } |
1396 | |
1397 | if (Options::logGC()) |
1398 | dataLog(slotVisitor.collectorMarkStack().size(), "+" , m_mutatorMarkStack->size() + slotVisitor.mutatorMarkStack().size(), " " ); |
1399 | |
1400 | { |
1401 | ParallelModeEnabler enabler(slotVisitor); |
1402 | slotVisitor.drainInParallel(m_scheduler->timeToResume()); |
1403 | } |
1404 | |
1405 | m_scheduler->synchronousDrainingDidStall(); |
1406 | |
1407 | // This is kinda tricky. The termination check looks at: |
1408 | // |
1409 | // - Whether the marking threads are active. If they are not, this means that the marking threads' |
1410 | // SlotVisitors are empty. |
1411 | // - Whether the collector's slot visitor is empty. |
1412 | // - Whether the shared mark stacks are empty. |
1413 | // |
1414 | // This doesn't have to check the mutator SlotVisitor because that one becomes empty after every GC |
1415 | // work increment, so it must be empty now. |
1416 | if (slotVisitor.didReachTermination()) |
1417 | return true; // This is like relooping to the top if runFixpointPhase(). |
1418 | |
1419 | if (!m_scheduler->shouldResume()) |
1420 | return true; |
1421 | |
1422 | m_scheduler->willResume(); |
1423 | |
1424 | if (Options::logGC()) { |
1425 | double thisPauseMS = (MonotonicTime::now() - m_stopTime).milliseconds(); |
1426 | dataLog("p=" , thisPauseMS, "ms (max " , maxPauseMS(thisPauseMS), ")...]\n" ); |
1427 | } |
1428 | |
1429 | // Forgive the mutator for its past failures to keep up. |
1430 | // FIXME: Figure out if moving this to different places results in perf changes. |
1431 | m_incrementBalance = 0; |
1432 | |
1433 | return changePhase(conn, CollectorPhase::Concurrent); |
1434 | } |
1435 | |
1436 | NEVER_INLINE bool Heap::runConcurrentPhase(GCConductor conn) |
1437 | { |
1438 | SlotVisitor& slotVisitor = *m_collectorSlotVisitor; |
1439 | |
1440 | switch (conn) { |
1441 | case GCConductor::Mutator: { |
1442 | // When the mutator has the conn, we poll runConcurrentPhase() on every time someone says |
1443 | // stopIfNecessary(), so on every allocation slow path. When that happens we poll if it's time |
1444 | // to stop and do some work. |
1445 | if (slotVisitor.didReachTermination() |
1446 | || m_scheduler->shouldStop()) |
1447 | return changePhase(conn, CollectorPhase::Reloop); |
1448 | |
1449 | // We could be coming from a collector phase that stuffed our SlotVisitor, so make sure we donate |
1450 | // everything. This is super cheap if the SlotVisitor is already empty. |
1451 | slotVisitor.donateAll(); |
1452 | return false; |
1453 | } |
1454 | case GCConductor::Collector: { |
1455 | { |
1456 | ParallelModeEnabler enabler(slotVisitor); |
1457 | slotVisitor.drainInParallelPassively(m_scheduler->timeToStop()); |
1458 | } |
1459 | return changePhase(conn, CollectorPhase::Reloop); |
1460 | } } |
1461 | |
1462 | RELEASE_ASSERT_NOT_REACHED(); |
1463 | return false; |
1464 | } |
1465 | |
1466 | NEVER_INLINE bool Heap::runReloopPhase(GCConductor conn) |
1467 | { |
1468 | if (Options::logGC()) |
1469 | dataLog("[GC<" , RawPointer(this), ">: " , gcConductorShortName(conn), " " ); |
1470 | |
1471 | m_scheduler->didStop(); |
1472 | |
1473 | if (Options::logGC()) |
1474 | m_scheduler->log(); |
1475 | |
1476 | return changePhase(conn, CollectorPhase::Fixpoint); |
1477 | } |
1478 | |
1479 | NEVER_INLINE bool Heap::runEndPhase(GCConductor conn) |
1480 | { |
1481 | m_scheduler->endCollection(); |
1482 | |
1483 | { |
1484 | auto locker = holdLock(m_markingMutex); |
1485 | m_parallelMarkersShouldExit = true; |
1486 | m_markingConditionVariable.notifyAll(); |
1487 | } |
1488 | m_helperClient.finish(); |
1489 | |
1490 | iterateExecutingAndCompilingCodeBlocks( |
1491 | [&] (CodeBlock* codeBlock) { |
1492 | writeBarrier(codeBlock); |
1493 | }); |
1494 | |
1495 | updateObjectCounts(); |
1496 | endMarking(); |
1497 | |
1498 | if (UNLIKELY(m_verifier)) { |
1499 | m_verifier->gatherLiveCells(HeapVerifier::Phase::AfterMarking); |
1500 | m_verifier->verify(HeapVerifier::Phase::AfterMarking); |
1501 | } |
1502 | |
1503 | if (vm()->typeProfiler()) |
1504 | vm()->typeProfiler()->invalidateTypeSetCache(*vm()); |
1505 | |
1506 | reapWeakHandles(); |
1507 | pruneStaleEntriesFromWeakGCMaps(); |
1508 | sweepArrayBuffers(); |
1509 | snapshotUnswept(); |
1510 | finalizeUnconditionalFinalizers(); |
1511 | removeDeadCompilerWorklistEntries(); |
1512 | notifyIncrementalSweeper(); |
1513 | |
1514 | m_codeBlocks->iterateCurrentlyExecuting( |
1515 | [&] (CodeBlock* codeBlock) { |
1516 | writeBarrier(codeBlock); |
1517 | }); |
1518 | m_codeBlocks->clearCurrentlyExecuting(); |
1519 | |
1520 | m_objectSpace.prepareForAllocation(); |
1521 | updateAllocationLimits(); |
1522 | |
1523 | if (UNLIKELY(m_verifier)) { |
1524 | m_verifier->trimDeadCells(); |
1525 | m_verifier->verify(HeapVerifier::Phase::AfterGC); |
1526 | } |
1527 | |
1528 | didFinishCollection(); |
1529 | |
1530 | if (m_currentRequest.didFinishEndPhase) |
1531 | m_currentRequest.didFinishEndPhase->run(); |
1532 | |
1533 | if (false) { |
1534 | dataLog("Heap state after GC:\n" ); |
1535 | m_objectSpace.dumpBits(); |
1536 | } |
1537 | |
1538 | if (Options::logGC()) { |
1539 | double thisPauseMS = (m_afterGC - m_stopTime).milliseconds(); |
1540 | dataLog("p=" , thisPauseMS, "ms (max " , maxPauseMS(thisPauseMS), "), cycle " , (m_afterGC - m_beforeGC).milliseconds(), "ms END]\n" ); |
1541 | } |
1542 | |
1543 | { |
1544 | auto locker = holdLock(*m_threadLock); |
1545 | m_requests.removeFirst(); |
1546 | m_lastServedTicket++; |
1547 | clearMutatorWaiting(); |
1548 | } |
1549 | ParkingLot::unparkAll(&m_worldState); |
1550 | |
1551 | if (false) |
1552 | dataLog("GC END!\n" ); |
1553 | |
1554 | setNeedFinalize(); |
1555 | |
1556 | m_lastGCStartTime = m_currentGCStartTime; |
1557 | m_lastGCEndTime = MonotonicTime::now(); |
1558 | m_totalGCTime += m_lastGCEndTime - m_lastGCStartTime; |
1559 | |
1560 | return changePhase(conn, CollectorPhase::NotRunning); |
1561 | } |
1562 | |
1563 | bool Heap::changePhase(GCConductor conn, CollectorPhase nextPhase) |
1564 | { |
1565 | checkConn(conn); |
1566 | |
1567 | m_lastPhase = m_currentPhase; |
1568 | m_nextPhase = nextPhase; |
1569 | |
1570 | return finishChangingPhase(conn); |
1571 | } |
1572 | |
1573 | NEVER_INLINE bool Heap::finishChangingPhase(GCConductor conn) |
1574 | { |
1575 | checkConn(conn); |
1576 | |
1577 | if (m_nextPhase == m_currentPhase) |
1578 | return true; |
1579 | |
1580 | if (false) |
1581 | dataLog(conn, ": Going to phase: " , m_nextPhase, " (from " , m_currentPhase, ")\n" ); |
1582 | |
1583 | m_phaseVersion++; |
1584 | |
1585 | bool suspendedBefore = worldShouldBeSuspended(m_currentPhase); |
1586 | bool suspendedAfter = worldShouldBeSuspended(m_nextPhase); |
1587 | |
1588 | if (suspendedBefore != suspendedAfter) { |
1589 | if (suspendedBefore) { |
1590 | RELEASE_ASSERT(!suspendedAfter); |
1591 | |
1592 | resumeThePeriphery(); |
1593 | if (conn == GCConductor::Collector) |
1594 | resumeTheMutator(); |
1595 | else |
1596 | handleNeedFinalize(); |
1597 | } else { |
1598 | RELEASE_ASSERT(!suspendedBefore); |
1599 | RELEASE_ASSERT(suspendedAfter); |
1600 | |
1601 | if (conn == GCConductor::Collector) { |
1602 | waitWhileNeedFinalize(); |
1603 | if (!stopTheMutator()) { |
1604 | if (false) |
1605 | dataLog("Returning false.\n" ); |
1606 | return false; |
1607 | } |
1608 | } else { |
1609 | sanitizeStackForVM(m_vm); |
1610 | handleNeedFinalize(); |
1611 | } |
1612 | stopThePeriphery(conn); |
1613 | } |
1614 | } |
1615 | |
1616 | m_currentPhase = m_nextPhase; |
1617 | return true; |
1618 | } |
1619 | |
1620 | void Heap::stopThePeriphery(GCConductor conn) |
1621 | { |
1622 | if (m_worldIsStopped) { |
1623 | dataLog("FATAL: world already stopped.\n" ); |
1624 | RELEASE_ASSERT_NOT_REACHED(); |
1625 | } |
1626 | |
1627 | if (m_mutatorDidRun) |
1628 | m_mutatorExecutionVersion++; |
1629 | |
1630 | m_mutatorDidRun = false; |
1631 | |
1632 | suspendCompilerThreads(); |
1633 | m_worldIsStopped = true; |
1634 | |
1635 | forEachSlotVisitor( |
1636 | [&] (SlotVisitor& slotVisitor) { |
1637 | slotVisitor.updateMutatorIsStopped(NoLockingNecessary); |
1638 | }); |
1639 | |
1640 | #if ENABLE(JIT) |
1641 | if (VM::canUseJIT()) { |
1642 | DeferGCForAWhile awhile(*this); |
1643 | if (JITWorklist::ensureGlobalWorklist().completeAllForVM(*m_vm) |
1644 | && conn == GCConductor::Collector) |
1645 | setGCDidJIT(); |
1646 | } |
1647 | #endif // ENABLE(JIT) |
1648 | UNUSED_PARAM(conn); |
1649 | |
1650 | if (auto* shadowChicken = vm()->shadowChicken()) |
1651 | shadowChicken->update(*vm(), vm()->topCallFrame); |
1652 | |
1653 | m_structureIDTable.flushOldTables(); |
1654 | m_objectSpace.stopAllocating(); |
1655 | |
1656 | m_stopTime = MonotonicTime::now(); |
1657 | } |
1658 | |
1659 | NEVER_INLINE void Heap::resumeThePeriphery() |
1660 | { |
1661 | // Calling resumeAllocating does the Right Thing depending on whether this is the end of a |
1662 | // collection cycle or this is just a concurrent phase within a collection cycle: |
1663 | // - At end of collection cycle: it's a no-op because prepareForAllocation already cleared the |
1664 | // last active block. |
1665 | // - During collection cycle: it reinstates the last active block. |
1666 | m_objectSpace.resumeAllocating(); |
1667 | |
1668 | m_barriersExecuted = 0; |
1669 | |
1670 | if (!m_worldIsStopped) { |
1671 | dataLog("Fatal: collector does not believe that the world is stopped.\n" ); |
1672 | RELEASE_ASSERT_NOT_REACHED(); |
1673 | } |
1674 | m_worldIsStopped = false; |
1675 | |
1676 | // FIXME: This could be vastly improved: we want to grab the locks in the order in which they |
1677 | // become available. We basically want a lockAny() method that will lock whatever lock is available |
1678 | // and tell you which one it locked. That would require teaching ParkingLot how to park on multiple |
1679 | // queues at once, which is totally achievable - it would just require memory allocation, which is |
1680 | // suboptimal but not a disaster. Alternatively, we could replace the SlotVisitor rightToRun lock |
1681 | // with a DLG-style handshake mechanism, but that seems not as general. |
1682 | Vector<SlotVisitor*, 8> slotVisitorsToUpdate; |
1683 | |
1684 | forEachSlotVisitor( |
1685 | [&] (SlotVisitor& slotVisitor) { |
1686 | slotVisitorsToUpdate.append(&slotVisitor); |
1687 | }); |
1688 | |
1689 | for (unsigned countdown = 40; !slotVisitorsToUpdate.isEmpty() && countdown--;) { |
1690 | for (unsigned index = 0; index < slotVisitorsToUpdate.size(); ++index) { |
1691 | SlotVisitor& slotVisitor = *slotVisitorsToUpdate[index]; |
1692 | bool remove = false; |
1693 | if (slotVisitor.hasAcknowledgedThatTheMutatorIsResumed()) |
1694 | remove = true; |
1695 | else if (auto locker = tryHoldLock(slotVisitor.rightToRun())) { |
1696 | slotVisitor.updateMutatorIsStopped(locker); |
1697 | remove = true; |
1698 | } |
1699 | if (remove) { |
1700 | slotVisitorsToUpdate[index--] = slotVisitorsToUpdate.last(); |
1701 | slotVisitorsToUpdate.takeLast(); |
1702 | } |
1703 | } |
1704 | Thread::yield(); |
1705 | } |
1706 | |
1707 | for (SlotVisitor* slotVisitor : slotVisitorsToUpdate) |
1708 | slotVisitor->updateMutatorIsStopped(); |
1709 | |
1710 | resumeCompilerThreads(); |
1711 | } |
1712 | |
1713 | bool Heap::stopTheMutator() |
1714 | { |
1715 | for (;;) { |
1716 | unsigned oldState = m_worldState.load(); |
1717 | if (oldState & stoppedBit) { |
1718 | RELEASE_ASSERT(!(oldState & hasAccessBit)); |
1719 | RELEASE_ASSERT(!(oldState & mutatorWaitingBit)); |
1720 | RELEASE_ASSERT(!(oldState & mutatorHasConnBit)); |
1721 | return true; |
1722 | } |
1723 | |
1724 | if (oldState & mutatorHasConnBit) { |
1725 | RELEASE_ASSERT(!(oldState & hasAccessBit)); |
1726 | RELEASE_ASSERT(!(oldState & stoppedBit)); |
1727 | return false; |
1728 | } |
1729 | |
1730 | if (!(oldState & hasAccessBit)) { |
1731 | RELEASE_ASSERT(!(oldState & mutatorHasConnBit)); |
1732 | RELEASE_ASSERT(!(oldState & mutatorWaitingBit)); |
1733 | // We can stop the world instantly. |
1734 | if (m_worldState.compareExchangeWeak(oldState, oldState | stoppedBit)) |
1735 | return true; |
1736 | continue; |
1737 | } |
1738 | |
1739 | // Transfer the conn to the mutator and bail. |
1740 | RELEASE_ASSERT(oldState & hasAccessBit); |
1741 | RELEASE_ASSERT(!(oldState & stoppedBit)); |
1742 | unsigned newState = (oldState | mutatorHasConnBit) & ~mutatorWaitingBit; |
1743 | if (m_worldState.compareExchangeWeak(oldState, newState)) { |
1744 | if (false) |
1745 | dataLog("Handed off the conn.\n" ); |
1746 | m_stopIfNecessaryTimer->scheduleSoon(); |
1747 | ParkingLot::unparkAll(&m_worldState); |
1748 | return false; |
1749 | } |
1750 | } |
1751 | } |
1752 | |
1753 | NEVER_INLINE void Heap::resumeTheMutator() |
1754 | { |
1755 | if (false) |
1756 | dataLog("Resuming the mutator.\n" ); |
1757 | for (;;) { |
1758 | unsigned oldState = m_worldState.load(); |
1759 | if (!!(oldState & hasAccessBit) != !(oldState & stoppedBit)) { |
1760 | dataLog("Fatal: hasAccess = " , !!(oldState & hasAccessBit), ", stopped = " , !!(oldState & stoppedBit), "\n" ); |
1761 | RELEASE_ASSERT_NOT_REACHED(); |
1762 | } |
1763 | if (oldState & mutatorHasConnBit) { |
1764 | dataLog("Fatal: mutator has the conn.\n" ); |
1765 | RELEASE_ASSERT_NOT_REACHED(); |
1766 | } |
1767 | |
1768 | if (!(oldState & stoppedBit)) { |
1769 | if (false) |
1770 | dataLog("Returning because not stopped.\n" ); |
1771 | return; |
1772 | } |
1773 | |
1774 | if (m_worldState.compareExchangeWeak(oldState, oldState & ~stoppedBit)) { |
1775 | if (false) |
1776 | dataLog("CASing and returning.\n" ); |
1777 | ParkingLot::unparkAll(&m_worldState); |
1778 | return; |
1779 | } |
1780 | } |
1781 | } |
1782 | |
1783 | void Heap::stopIfNecessarySlow() |
1784 | { |
1785 | if (validateDFGDoesGC) |
1786 | RELEASE_ASSERT(expectDoesGC()); |
1787 | |
1788 | while (stopIfNecessarySlow(m_worldState.load())) { } |
1789 | |
1790 | RELEASE_ASSERT(m_worldState.load() & hasAccessBit); |
1791 | RELEASE_ASSERT(!(m_worldState.load() & stoppedBit)); |
1792 | |
1793 | handleGCDidJIT(); |
1794 | handleNeedFinalize(); |
1795 | m_mutatorDidRun = true; |
1796 | } |
1797 | |
1798 | bool Heap::stopIfNecessarySlow(unsigned oldState) |
1799 | { |
1800 | if (validateDFGDoesGC) |
1801 | RELEASE_ASSERT(expectDoesGC()); |
1802 | |
1803 | RELEASE_ASSERT(oldState & hasAccessBit); |
1804 | RELEASE_ASSERT(!(oldState & stoppedBit)); |
1805 | |
1806 | // It's possible for us to wake up with finalization already requested but the world not yet |
1807 | // resumed. If that happens, we can't run finalization yet. |
1808 | if (handleNeedFinalize(oldState)) |
1809 | return true; |
1810 | |
1811 | // FIXME: When entering the concurrent phase, we could arrange for this branch not to fire, and then |
1812 | // have the SlotVisitor do things to the m_worldState to make this branch fire again. That would |
1813 | // prevent us from polling this so much. Ideally, stopIfNecessary would ignore the mutatorHasConnBit |
1814 | // and there would be some other bit indicating whether we were in some GC phase other than the |
1815 | // NotRunning or Concurrent ones. |
1816 | if (oldState & mutatorHasConnBit) |
1817 | collectInMutatorThread(); |
1818 | |
1819 | return false; |
1820 | } |
1821 | |
1822 | NEVER_INLINE void Heap::collectInMutatorThread() |
1823 | { |
1824 | CollectingScope collectingScope(*this); |
1825 | for (;;) { |
1826 | RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Mutator, nullptr); |
1827 | switch (result) { |
1828 | case RunCurrentPhaseResult::Finished: |
1829 | return; |
1830 | case RunCurrentPhaseResult::Continue: |
1831 | break; |
1832 | case RunCurrentPhaseResult::NeedCurrentThreadState: |
1833 | sanitizeStackForVM(m_vm); |
1834 | auto lambda = [&] (CurrentThreadState& state) { |
1835 | for (;;) { |
1836 | RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Mutator, &state); |
1837 | switch (result) { |
1838 | case RunCurrentPhaseResult::Finished: |
1839 | return; |
1840 | case RunCurrentPhaseResult::Continue: |
1841 | break; |
1842 | case RunCurrentPhaseResult::NeedCurrentThreadState: |
1843 | RELEASE_ASSERT_NOT_REACHED(); |
1844 | break; |
1845 | } |
1846 | } |
1847 | }; |
1848 | callWithCurrentThreadState(scopedLambda<void(CurrentThreadState&)>(WTFMove(lambda))); |
1849 | return; |
1850 | } |
1851 | } |
1852 | } |
1853 | |
1854 | template<typename Func> |
1855 | void Heap::waitForCollector(const Func& func) |
1856 | { |
1857 | for (;;) { |
1858 | bool done; |
1859 | { |
1860 | LockHolder locker(*m_threadLock); |
1861 | done = func(locker); |
1862 | if (!done) { |
1863 | setMutatorWaiting(); |
1864 | |
1865 | // At this point, the collector knows that we intend to wait, and he will clear the |
1866 | // waiting bit and then unparkAll when the GC cycle finishes. Clearing the bit |
1867 | // prevents us from parking except if there is also stop-the-world. Unparking after |
1868 | // clearing means that if the clearing happens after we park, then we will unpark. |
1869 | } |
1870 | } |
1871 | |
1872 | // If we're in a stop-the-world scenario, we need to wait for that even if done is true. |
1873 | unsigned oldState = m_worldState.load(); |
1874 | if (stopIfNecessarySlow(oldState)) |
1875 | continue; |
1876 | |
1877 | // FIXME: We wouldn't need this if stopIfNecessarySlow() had a mode where it knew to just |
1878 | // do the collection. |
1879 | relinquishConn(); |
1880 | |
1881 | if (done) { |
1882 | clearMutatorWaiting(); // Clean up just in case. |
1883 | return; |
1884 | } |
1885 | |
1886 | // If mutatorWaitingBit is still set then we want to wait. |
1887 | ParkingLot::compareAndPark(&m_worldState, oldState | mutatorWaitingBit); |
1888 | } |
1889 | } |
1890 | |
1891 | void Heap::acquireAccessSlow() |
1892 | { |
1893 | for (;;) { |
1894 | unsigned oldState = m_worldState.load(); |
1895 | RELEASE_ASSERT(!(oldState & hasAccessBit)); |
1896 | |
1897 | if (oldState & stoppedBit) { |
1898 | if (verboseStop) { |
1899 | dataLog("Stopping in acquireAccess!\n" ); |
1900 | WTFReportBacktrace(); |
1901 | } |
1902 | // Wait until we're not stopped anymore. |
1903 | ParkingLot::compareAndPark(&m_worldState, oldState); |
1904 | continue; |
1905 | } |
1906 | |
1907 | RELEASE_ASSERT(!(oldState & stoppedBit)); |
1908 | unsigned newState = oldState | hasAccessBit; |
1909 | if (m_worldState.compareExchangeWeak(oldState, newState)) { |
1910 | handleGCDidJIT(); |
1911 | handleNeedFinalize(); |
1912 | m_mutatorDidRun = true; |
1913 | stopIfNecessary(); |
1914 | return; |
1915 | } |
1916 | } |
1917 | } |
1918 | |
1919 | void Heap::releaseAccessSlow() |
1920 | { |
1921 | for (;;) { |
1922 | unsigned oldState = m_worldState.load(); |
1923 | if (!(oldState & hasAccessBit)) { |
1924 | dataLog("FATAL: Attempting to release access but the mutator does not have access.\n" ); |
1925 | RELEASE_ASSERT_NOT_REACHED(); |
1926 | } |
1927 | if (oldState & stoppedBit) { |
1928 | dataLog("FATAL: Attempting to release access but the mutator is stopped.\n" ); |
1929 | RELEASE_ASSERT_NOT_REACHED(); |
1930 | } |
1931 | |
1932 | if (handleNeedFinalize(oldState)) |
1933 | continue; |
1934 | |
1935 | unsigned newState = oldState & ~(hasAccessBit | mutatorHasConnBit); |
1936 | |
1937 | if ((oldState & mutatorHasConnBit) |
1938 | && m_nextPhase != m_currentPhase) { |
1939 | // This means that the collector thread had given us the conn so that we would do something |
1940 | // for it. Stop ourselves as we release access. This ensures that acquireAccess blocks. In |
1941 | // the meantime, since we're handing the conn over, the collector will be awoken and it is |
1942 | // sure to have work to do. |
1943 | newState |= stoppedBit; |
1944 | } |
1945 | |
1946 | if (m_worldState.compareExchangeWeak(oldState, newState)) { |
1947 | if (oldState & mutatorHasConnBit) |
1948 | finishRelinquishingConn(); |
1949 | return; |
1950 | } |
1951 | } |
1952 | } |
1953 | |
1954 | bool Heap::relinquishConn(unsigned oldState) |
1955 | { |
1956 | RELEASE_ASSERT(oldState & hasAccessBit); |
1957 | RELEASE_ASSERT(!(oldState & stoppedBit)); |
1958 | |
1959 | if (!(oldState & mutatorHasConnBit)) |
1960 | return false; // Done. |
1961 | |
1962 | if (m_threadShouldStop) |
1963 | return false; |
1964 | |
1965 | if (!m_worldState.compareExchangeWeak(oldState, oldState & ~mutatorHasConnBit)) |
1966 | return true; // Loop around. |
1967 | |
1968 | finishRelinquishingConn(); |
1969 | return true; |
1970 | } |
1971 | |
1972 | void Heap::finishRelinquishingConn() |
1973 | { |
1974 | if (false) |
1975 | dataLog("Relinquished the conn.\n" ); |
1976 | |
1977 | sanitizeStackForVM(m_vm); |
1978 | |
1979 | auto locker = holdLock(*m_threadLock); |
1980 | if (!m_requests.isEmpty()) |
1981 | m_threadCondition->notifyOne(locker); |
1982 | ParkingLot::unparkAll(&m_worldState); |
1983 | } |
1984 | |
1985 | void Heap::relinquishConn() |
1986 | { |
1987 | while (relinquishConn(m_worldState.load())) { } |
1988 | } |
1989 | |
1990 | bool Heap::handleGCDidJIT(unsigned oldState) |
1991 | { |
1992 | RELEASE_ASSERT(oldState & hasAccessBit); |
1993 | if (!(oldState & gcDidJITBit)) |
1994 | return false; |
1995 | if (m_worldState.compareExchangeWeak(oldState, oldState & ~gcDidJITBit)) { |
1996 | WTF::crossModifyingCodeFence(); |
1997 | return true; |
1998 | } |
1999 | return true; |
2000 | } |
2001 | |
2002 | NEVER_INLINE bool Heap::handleNeedFinalize(unsigned oldState) |
2003 | { |
2004 | RELEASE_ASSERT(oldState & hasAccessBit); |
2005 | RELEASE_ASSERT(!(oldState & stoppedBit)); |
2006 | |
2007 | if (!(oldState & needFinalizeBit)) |
2008 | return false; |
2009 | if (m_worldState.compareExchangeWeak(oldState, oldState & ~needFinalizeBit)) { |
2010 | finalize(); |
2011 | // Wake up anyone waiting for us to finalize. Note that they may have woken up already, in |
2012 | // which case they would be waiting for us to release heap access. |
2013 | ParkingLot::unparkAll(&m_worldState); |
2014 | return true; |
2015 | } |
2016 | return true; |
2017 | } |
2018 | |
2019 | void Heap::handleGCDidJIT() |
2020 | { |
2021 | while (handleGCDidJIT(m_worldState.load())) { } |
2022 | } |
2023 | |
2024 | void Heap::handleNeedFinalize() |
2025 | { |
2026 | while (handleNeedFinalize(m_worldState.load())) { } |
2027 | } |
2028 | |
2029 | void Heap::setGCDidJIT() |
2030 | { |
2031 | m_worldState.transaction( |
2032 | [&] (unsigned& state) -> bool { |
2033 | RELEASE_ASSERT(state & stoppedBit); |
2034 | state |= gcDidJITBit; |
2035 | return true; |
2036 | }); |
2037 | } |
2038 | |
2039 | void Heap::setNeedFinalize() |
2040 | { |
2041 | m_worldState.exchangeOr(needFinalizeBit); |
2042 | ParkingLot::unparkAll(&m_worldState); |
2043 | m_stopIfNecessaryTimer->scheduleSoon(); |
2044 | } |
2045 | |
2046 | void Heap::waitWhileNeedFinalize() |
2047 | { |
2048 | for (;;) { |
2049 | unsigned oldState = m_worldState.load(); |
2050 | if (!(oldState & needFinalizeBit)) { |
2051 | // This means that either there was no finalize request or the main thread will finalize |
2052 | // with heap access, so a subsequent call to stopTheWorld() will return only when |
2053 | // finalize finishes. |
2054 | return; |
2055 | } |
2056 | ParkingLot::compareAndPark(&m_worldState, oldState); |
2057 | } |
2058 | } |
2059 | |
2060 | void Heap::setMutatorWaiting() |
2061 | { |
2062 | m_worldState.exchangeOr(mutatorWaitingBit); |
2063 | } |
2064 | |
2065 | void Heap::clearMutatorWaiting() |
2066 | { |
2067 | m_worldState.exchangeAnd(~mutatorWaitingBit); |
2068 | } |
2069 | |
2070 | void Heap::notifyThreadStopping(const AbstractLocker&) |
2071 | { |
2072 | m_threadIsStopping = true; |
2073 | clearMutatorWaiting(); |
2074 | ParkingLot::unparkAll(&m_worldState); |
2075 | } |
2076 | |
2077 | void Heap::finalize() |
2078 | { |
2079 | MonotonicTime before; |
2080 | if (Options::logGC()) { |
2081 | before = MonotonicTime::now(); |
2082 | dataLog("[GC<" , RawPointer(this), ">: finalize " ); |
2083 | } |
2084 | |
2085 | { |
2086 | SweepingScope sweepingScope(*this); |
2087 | deleteUnmarkedCompiledCode(); |
2088 | deleteSourceProviderCaches(); |
2089 | sweepInFinalize(); |
2090 | } |
2091 | |
2092 | if (HasOwnPropertyCache* cache = vm()->hasOwnPropertyCache()) |
2093 | cache->clear(); |
2094 | |
2095 | immutableButterflyToStringCache.clear(); |
2096 | |
2097 | for (const HeapFinalizerCallback& callback : m_heapFinalizerCallbacks) |
2098 | callback.run(*vm()); |
2099 | |
2100 | if (shouldSweepSynchronously()) |
2101 | sweepSynchronously(); |
2102 | |
2103 | if (Options::logGC()) { |
2104 | MonotonicTime after = MonotonicTime::now(); |
2105 | dataLog((after - before).milliseconds(), "ms]\n" ); |
2106 | } |
2107 | } |
2108 | |
2109 | Heap::Ticket Heap::requestCollection(GCRequest request) |
2110 | { |
2111 | stopIfNecessary(); |
2112 | |
2113 | ASSERT(vm()->currentThreadIsHoldingAPILock()); |
2114 | RELEASE_ASSERT(vm()->atomStringTable() == Thread::current().atomStringTable()); |
2115 | |
2116 | LockHolder locker(*m_threadLock); |
2117 | // We may be able to steal the conn. That only works if the collector is definitely not running |
2118 | // right now. This is an optimization that prevents the collector thread from ever starting in most |
2119 | // cases. |
2120 | ASSERT(m_lastServedTicket <= m_lastGrantedTicket); |
2121 | if ((m_lastServedTicket == m_lastGrantedTicket) && (m_currentPhase == CollectorPhase::NotRunning)) { |
2122 | if (false) |
2123 | dataLog("Taking the conn.\n" ); |
2124 | m_worldState.exchangeOr(mutatorHasConnBit); |
2125 | } |
2126 | |
2127 | m_requests.append(request); |
2128 | m_lastGrantedTicket++; |
2129 | if (!(m_worldState.load() & mutatorHasConnBit)) |
2130 | m_threadCondition->notifyOne(locker); |
2131 | return m_lastGrantedTicket; |
2132 | } |
2133 | |
2134 | void Heap::waitForCollection(Ticket ticket) |
2135 | { |
2136 | waitForCollector( |
2137 | [&] (const AbstractLocker&) -> bool { |
2138 | return m_lastServedTicket >= ticket; |
2139 | }); |
2140 | } |
2141 | |
2142 | void Heap::sweepInFinalize() |
2143 | { |
2144 | m_objectSpace.sweepLargeAllocations(); |
2145 | vm()->eagerlySweptDestructibleObjectSpace.sweep(); |
2146 | } |
2147 | |
2148 | void Heap::suspendCompilerThreads() |
2149 | { |
2150 | #if ENABLE(DFG_JIT) |
2151 | // We ensure the worklists so that it's not possible for the mutator to start a new worklist |
2152 | // after we have suspended the ones that he had started before. That's not very expensive since |
2153 | // the worklists use AutomaticThreads anyway. |
2154 | if (!VM::canUseJIT()) |
2155 | return; |
2156 | for (unsigned i = DFG::numberOfWorklists(); i--;) |
2157 | DFG::ensureWorklistForIndex(i).suspendAllThreads(); |
2158 | #endif |
2159 | } |
2160 | |
2161 | void Heap::willStartCollection() |
2162 | { |
2163 | if (Options::logGC()) |
2164 | dataLog("=> " ); |
2165 | |
2166 | if (shouldDoFullCollection()) { |
2167 | m_collectionScope = CollectionScope::Full; |
2168 | m_shouldDoFullCollection = false; |
2169 | if (Options::logGC()) |
2170 | dataLog("FullCollection, " ); |
2171 | if (false) |
2172 | dataLog("Full collection!\n" ); |
2173 | } else { |
2174 | m_collectionScope = CollectionScope::Eden; |
2175 | if (Options::logGC()) |
2176 | dataLog("EdenCollection, " ); |
2177 | if (false) |
2178 | dataLog("Eden collection!\n" ); |
2179 | } |
2180 | if (m_collectionScope && m_collectionScope.value() == CollectionScope::Full) { |
2181 | m_sizeBeforeLastFullCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle; |
2182 | m_extraMemorySize = 0; |
2183 | m_deprecatedExtraMemorySize = 0; |
2184 | #if ENABLE(RESOURCE_USAGE) |
2185 | m_externalMemorySize = 0; |
2186 | #endif |
2187 | |
2188 | if (m_fullActivityCallback) |
2189 | m_fullActivityCallback->willCollect(); |
2190 | } else { |
2191 | ASSERT(m_collectionScope && m_collectionScope.value() == CollectionScope::Eden); |
2192 | m_sizeBeforeLastEdenCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle; |
2193 | } |
2194 | |
2195 | if (m_edenActivityCallback) |
2196 | m_edenActivityCallback->willCollect(); |
2197 | |
2198 | for (auto* observer : m_observers) |
2199 | observer->willGarbageCollect(); |
2200 | } |
2201 | |
2202 | void Heap::prepareForMarking() |
2203 | { |
2204 | m_objectSpace.prepareForMarking(); |
2205 | } |
2206 | |
2207 | void Heap::reapWeakHandles() |
2208 | { |
2209 | m_objectSpace.reapWeakSets(); |
2210 | } |
2211 | |
2212 | void Heap::pruneStaleEntriesFromWeakGCMaps() |
2213 | { |
2214 | if (!m_collectionScope || m_collectionScope.value() != CollectionScope::Full) |
2215 | return; |
2216 | for (WeakGCMapBase* weakGCMap : m_weakGCMaps) |
2217 | weakGCMap->pruneStaleEntries(); |
2218 | } |
2219 | |
2220 | void Heap::sweepArrayBuffers() |
2221 | { |
2222 | m_arrayBuffers.sweep(*vm()); |
2223 | } |
2224 | |
2225 | void Heap::snapshotUnswept() |
2226 | { |
2227 | TimingScope timingScope(*this, "Heap::snapshotUnswept" ); |
2228 | m_objectSpace.snapshotUnswept(); |
2229 | } |
2230 | |
2231 | void Heap::deleteSourceProviderCaches() |
2232 | { |
2233 | if (m_lastCollectionScope && m_lastCollectionScope.value() == CollectionScope::Full) |
2234 | m_vm->clearSourceProviderCaches(); |
2235 | } |
2236 | |
2237 | void Heap::notifyIncrementalSweeper() |
2238 | { |
2239 | if (m_collectionScope && m_collectionScope.value() == CollectionScope::Full) { |
2240 | if (!m_logicallyEmptyWeakBlocks.isEmpty()) |
2241 | m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0; |
2242 | } |
2243 | |
2244 | m_sweeper->startSweeping(*this); |
2245 | } |
2246 | |
2247 | void Heap::updateAllocationLimits() |
2248 | { |
2249 | static const bool verbose = false; |
2250 | |
2251 | if (verbose) { |
2252 | dataLog("\n" ); |
2253 | dataLog("bytesAllocatedThisCycle = " , m_bytesAllocatedThisCycle, "\n" ); |
2254 | } |
2255 | |
2256 | // Calculate our current heap size threshold for the purpose of figuring out when we should |
2257 | // run another collection. This isn't the same as either size() or capacity(), though it should |
2258 | // be somewhere between the two. The key is to match the size calculations involved calls to |
2259 | // didAllocate(), while never dangerously underestimating capacity(). In extreme cases of |
2260 | // fragmentation, we may have size() much smaller than capacity(). |
2261 | size_t currentHeapSize = 0; |
2262 | |
2263 | // For marked space, we use the total number of bytes visited. This matches the logic for |
2264 | // BlockDirectory's calls to didAllocate(), which effectively accounts for the total size of |
2265 | // objects allocated rather than blocks used. This will underestimate capacity(), and in case |
2266 | // of fragmentation, this may be substantial. Fortunately, marked space rarely fragments because |
2267 | // cells usually have a narrow range of sizes. So, the underestimation is probably OK. |
2268 | currentHeapSize += m_totalBytesVisited; |
2269 | if (verbose) |
2270 | dataLog("totalBytesVisited = " , m_totalBytesVisited, ", currentHeapSize = " , currentHeapSize, "\n" ); |
2271 | |
2272 | // It's up to the user to ensure that extraMemorySize() ends up corresponding to allocation-time |
2273 | // extra memory reporting. |
2274 | currentHeapSize += extraMemorySize(); |
2275 | if (!ASSERT_DISABLED) { |
2276 | Checked<size_t, RecordOverflow> checkedCurrentHeapSize = m_totalBytesVisited; |
2277 | checkedCurrentHeapSize += extraMemorySize(); |
2278 | ASSERT(!checkedCurrentHeapSize.hasOverflowed() && checkedCurrentHeapSize.unsafeGet() == currentHeapSize); |
2279 | } |
2280 | |
2281 | if (verbose) |
2282 | dataLog("extraMemorySize() = " , extraMemorySize(), ", currentHeapSize = " , currentHeapSize, "\n" ); |
2283 | |
2284 | if (m_collectionScope && m_collectionScope.value() == CollectionScope::Full) { |
2285 | // To avoid pathological GC churn in very small and very large heaps, we set |
2286 | // the new allocation limit based on the current size of the heap, with a |
2287 | // fixed minimum. |
2288 | m_maxHeapSize = std::max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize)); |
2289 | if (verbose) |
2290 | dataLog("Full: maxHeapSize = " , m_maxHeapSize, "\n" ); |
2291 | m_maxEdenSize = m_maxHeapSize - currentHeapSize; |
2292 | if (verbose) |
2293 | dataLog("Full: maxEdenSize = " , m_maxEdenSize, "\n" ); |
2294 | m_sizeAfterLastFullCollect = currentHeapSize; |
2295 | if (verbose) |
2296 | dataLog("Full: sizeAfterLastFullCollect = " , currentHeapSize, "\n" ); |
2297 | m_bytesAbandonedSinceLastFullCollect = 0; |
2298 | if (verbose) |
2299 | dataLog("Full: bytesAbandonedSinceLastFullCollect = " , 0, "\n" ); |
2300 | } else { |
2301 | ASSERT(currentHeapSize >= m_sizeAfterLastCollect); |
2302 | // Theoretically, we shouldn't ever scan more memory than the heap size we planned to have. |
2303 | // But we are sloppy, so we have to defend against the overflow. |
2304 | m_maxEdenSize = currentHeapSize > m_maxHeapSize ? 0 : m_maxHeapSize - currentHeapSize; |
2305 | if (verbose) |
2306 | dataLog("Eden: maxEdenSize = " , m_maxEdenSize, "\n" ); |
2307 | m_sizeAfterLastEdenCollect = currentHeapSize; |
2308 | if (verbose) |
2309 | dataLog("Eden: sizeAfterLastEdenCollect = " , currentHeapSize, "\n" ); |
2310 | double edenToOldGenerationRatio = (double)m_maxEdenSize / (double)m_maxHeapSize; |
2311 | double minEdenToOldGenerationRatio = 1.0 / 3.0; |
2312 | if (edenToOldGenerationRatio < minEdenToOldGenerationRatio) |
2313 | m_shouldDoFullCollection = true; |
2314 | // This seems suspect at first, but what it does is ensure that the nursery size is fixed. |
2315 | m_maxHeapSize += currentHeapSize - m_sizeAfterLastCollect; |
2316 | if (verbose) |
2317 | dataLog("Eden: maxHeapSize = " , m_maxHeapSize, "\n" ); |
2318 | m_maxEdenSize = m_maxHeapSize - currentHeapSize; |
2319 | if (verbose) |
2320 | dataLog("Eden: maxEdenSize = " , m_maxEdenSize, "\n" ); |
2321 | if (m_fullActivityCallback) { |
2322 | ASSERT(currentHeapSize >= m_sizeAfterLastFullCollect); |
2323 | m_fullActivityCallback->didAllocate(*this, currentHeapSize - m_sizeAfterLastFullCollect); |
2324 | } |
2325 | } |
2326 | |
2327 | #if PLATFORM(IOS_FAMILY) |
2328 | // Get critical memory threshold for next cycle. |
2329 | overCriticalMemoryThreshold(MemoryThresholdCallType::Direct); |
2330 | #endif |
2331 | |
2332 | m_sizeAfterLastCollect = currentHeapSize; |
2333 | if (verbose) |
2334 | dataLog("sizeAfterLastCollect = " , m_sizeAfterLastCollect, "\n" ); |
2335 | m_bytesAllocatedThisCycle = 0; |
2336 | |
2337 | if (Options::logGC()) |
2338 | dataLog("=> " , currentHeapSize / 1024, "kb, " ); |
2339 | } |
2340 | |
2341 | void Heap::didFinishCollection() |
2342 | { |
2343 | m_afterGC = MonotonicTime::now(); |
2344 | CollectionScope scope = *m_collectionScope; |
2345 | if (scope == CollectionScope::Full) |
2346 | m_lastFullGCLength = m_afterGC - m_beforeGC; |
2347 | else |
2348 | m_lastEdenGCLength = m_afterGC - m_beforeGC; |
2349 | |
2350 | #if ENABLE(RESOURCE_USAGE) |
2351 | ASSERT(externalMemorySize() <= extraMemorySize()); |
2352 | #endif |
2353 | |
2354 | if (HeapProfiler* heapProfiler = m_vm->heapProfiler()) { |
2355 | gatherExtraHeapSnapshotData(*heapProfiler); |
2356 | removeDeadHeapSnapshotNodes(*heapProfiler); |
2357 | } |
2358 | |
2359 | if (UNLIKELY(m_verifier)) |
2360 | m_verifier->endGC(); |
2361 | |
2362 | RELEASE_ASSERT(m_collectionScope); |
2363 | m_lastCollectionScope = m_collectionScope; |
2364 | m_collectionScope = WTF::nullopt; |
2365 | |
2366 | for (auto* observer : m_observers) |
2367 | observer->didGarbageCollect(scope); |
2368 | } |
2369 | |
2370 | void Heap::resumeCompilerThreads() |
2371 | { |
2372 | #if ENABLE(DFG_JIT) |
2373 | if (!VM::canUseJIT()) |
2374 | return; |
2375 | for (unsigned i = DFG::numberOfWorklists(); i--;) |
2376 | DFG::existingWorklistForIndex(i).resumeAllThreads(); |
2377 | #endif |
2378 | } |
2379 | |
2380 | GCActivityCallback* Heap::fullActivityCallback() |
2381 | { |
2382 | return m_fullActivityCallback.get(); |
2383 | } |
2384 | |
2385 | GCActivityCallback* Heap::edenActivityCallback() |
2386 | { |
2387 | return m_edenActivityCallback.get(); |
2388 | } |
2389 | |
2390 | IncrementalSweeper& Heap::sweeper() |
2391 | { |
2392 | return m_sweeper.get(); |
2393 | } |
2394 | |
2395 | void Heap::setGarbageCollectionTimerEnabled(bool enable) |
2396 | { |
2397 | if (m_fullActivityCallback) |
2398 | m_fullActivityCallback->setEnabled(enable); |
2399 | if (m_edenActivityCallback) |
2400 | m_edenActivityCallback->setEnabled(enable); |
2401 | } |
2402 | |
2403 | void Heap::didAllocate(size_t bytes) |
2404 | { |
2405 | if (m_edenActivityCallback) |
2406 | m_edenActivityCallback->didAllocate(*this, m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect); |
2407 | m_bytesAllocatedThisCycle += bytes; |
2408 | performIncrement(bytes); |
2409 | } |
2410 | |
2411 | bool Heap::isValidAllocation(size_t) |
2412 | { |
2413 | if (!isValidThreadState(m_vm)) |
2414 | return false; |
2415 | |
2416 | if (isCurrentThreadBusy()) |
2417 | return false; |
2418 | |
2419 | return true; |
2420 | } |
2421 | |
2422 | void Heap::addFinalizer(JSCell* cell, Finalizer finalizer) |
2423 | { |
2424 | WeakSet::allocate(cell, &m_finalizerOwner, reinterpret_cast<void*>(finalizer)); // Balanced by FinalizerOwner::finalize(). |
2425 | } |
2426 | |
2427 | void Heap::FinalizerOwner::finalize(Handle<Unknown> handle, void* context) |
2428 | { |
2429 | HandleSlot slot = handle.slot(); |
2430 | Finalizer finalizer = reinterpret_cast<Finalizer>(context); |
2431 | finalizer(slot->asCell()); |
2432 | WeakSet::deallocate(WeakImpl::asWeakImpl(slot)); |
2433 | } |
2434 | |
2435 | void Heap::collectNowFullIfNotDoneRecently(Synchronousness synchronousness) |
2436 | { |
2437 | if (!m_fullActivityCallback) { |
2438 | collectNow(synchronousness, CollectionScope::Full); |
2439 | return; |
2440 | } |
2441 | |
2442 | if (m_fullActivityCallback->didGCRecently()) { |
2443 | // A synchronous GC was already requested recently so we merely accelerate next collection. |
2444 | reportAbandonedObjectGraph(); |
2445 | return; |
2446 | } |
2447 | |
2448 | m_fullActivityCallback->setDidGCRecently(); |
2449 | collectNow(synchronousness, CollectionScope::Full); |
2450 | } |
2451 | |
2452 | bool Heap::useGenerationalGC() |
2453 | { |
2454 | return Options::useGenerationalGC() && !VM::isInMiniMode(); |
2455 | } |
2456 | |
2457 | bool Heap::shouldSweepSynchronously() |
2458 | { |
2459 | return Options::sweepSynchronously() || VM::isInMiniMode(); |
2460 | } |
2461 | |
2462 | bool Heap::shouldDoFullCollection() |
2463 | { |
2464 | if (!useGenerationalGC()) |
2465 | return true; |
2466 | |
2467 | if (!m_currentRequest.scope) |
2468 | return m_shouldDoFullCollection || overCriticalMemoryThreshold(); |
2469 | return *m_currentRequest.scope == CollectionScope::Full; |
2470 | } |
2471 | |
2472 | void Heap::addLogicallyEmptyWeakBlock(WeakBlock* block) |
2473 | { |
2474 | m_logicallyEmptyWeakBlocks.append(block); |
2475 | } |
2476 | |
2477 | void Heap::sweepAllLogicallyEmptyWeakBlocks() |
2478 | { |
2479 | if (m_logicallyEmptyWeakBlocks.isEmpty()) |
2480 | return; |
2481 | |
2482 | m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0; |
2483 | while (sweepNextLogicallyEmptyWeakBlock()) { } |
2484 | } |
2485 | |
2486 | bool Heap::sweepNextLogicallyEmptyWeakBlock() |
2487 | { |
2488 | if (m_indexOfNextLogicallyEmptyWeakBlockToSweep == WTF::notFound) |
2489 | return false; |
2490 | |
2491 | WeakBlock* block = m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep]; |
2492 | |
2493 | block->sweep(); |
2494 | if (block->isEmpty()) { |
2495 | std::swap(m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep], m_logicallyEmptyWeakBlocks.last()); |
2496 | m_logicallyEmptyWeakBlocks.removeLast(); |
2497 | WeakBlock::destroy(*this, block); |
2498 | } else |
2499 | m_indexOfNextLogicallyEmptyWeakBlockToSweep++; |
2500 | |
2501 | if (m_indexOfNextLogicallyEmptyWeakBlockToSweep >= m_logicallyEmptyWeakBlocks.size()) { |
2502 | m_indexOfNextLogicallyEmptyWeakBlockToSweep = WTF::notFound; |
2503 | return false; |
2504 | } |
2505 | |
2506 | return true; |
2507 | } |
2508 | |
2509 | size_t Heap::visitCount() |
2510 | { |
2511 | size_t result = 0; |
2512 | forEachSlotVisitor( |
2513 | [&] (SlotVisitor& visitor) { |
2514 | result += visitor.visitCount(); |
2515 | }); |
2516 | return result; |
2517 | } |
2518 | |
2519 | size_t Heap::bytesVisited() |
2520 | { |
2521 | size_t result = 0; |
2522 | forEachSlotVisitor( |
2523 | [&] (SlotVisitor& visitor) { |
2524 | result += visitor.bytesVisited(); |
2525 | }); |
2526 | return result; |
2527 | } |
2528 | |
2529 | void Heap::forEachCodeBlockImpl(const ScopedLambda<void(CodeBlock*)>& func) |
2530 | { |
2531 | // We don't know the full set of CodeBlocks until compilation has terminated. |
2532 | completeAllJITPlans(); |
2533 | |
2534 | return m_codeBlocks->iterate(func); |
2535 | } |
2536 | |
2537 | void Heap::forEachCodeBlockIgnoringJITPlansImpl(const AbstractLocker& locker, const ScopedLambda<void(CodeBlock*)>& func) |
2538 | { |
2539 | return m_codeBlocks->iterate(locker, func); |
2540 | } |
2541 | |
2542 | void Heap::writeBarrierSlowPath(const JSCell* from) |
2543 | { |
2544 | if (UNLIKELY(mutatorShouldBeFenced())) { |
2545 | // In this case, the barrierThreshold is the tautological threshold, so from could still be |
2546 | // not black. But we can't know for sure until we fire off a fence. |
2547 | WTF::storeLoadFence(); |
2548 | if (from->cellState() != CellState::PossiblyBlack) |
2549 | return; |
2550 | } |
2551 | |
2552 | addToRememberedSet(from); |
2553 | } |
2554 | |
2555 | bool Heap::isCurrentThreadBusy() |
2556 | { |
2557 | return Thread::mayBeGCThread() || mutatorState() != MutatorState::Running; |
2558 | } |
2559 | |
2560 | void Heap::(size_t size) |
2561 | { |
2562 | size_t* counter = &m_extraMemorySize; |
2563 | |
2564 | for (;;) { |
2565 | size_t oldSize = *counter; |
2566 | // FIXME: Change this to use SaturatedArithmetic when available. |
2567 | // https://bugs.webkit.org/show_bug.cgi?id=170411 |
2568 | Checked<size_t, RecordOverflow> checkedNewSize = oldSize; |
2569 | checkedNewSize += size; |
2570 | size_t newSize = UNLIKELY(checkedNewSize.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedNewSize.unsafeGet(); |
2571 | if (WTF::atomicCompareExchangeWeakRelaxed(counter, oldSize, newSize)) |
2572 | return; |
2573 | } |
2574 | } |
2575 | |
2576 | #if ENABLE(RESOURCE_USAGE) |
2577 | void Heap::reportExternalMemoryVisited(size_t size) |
2578 | { |
2579 | size_t* counter = &m_externalMemorySize; |
2580 | |
2581 | for (;;) { |
2582 | size_t oldSize = *counter; |
2583 | if (WTF::atomicCompareExchangeWeakRelaxed(counter, oldSize, oldSize + size)) |
2584 | return; |
2585 | } |
2586 | } |
2587 | #endif |
2588 | |
2589 | void Heap::collectIfNecessaryOrDefer(GCDeferralContext* deferralContext) |
2590 | { |
2591 | ASSERT(deferralContext || isDeferred() || !DisallowGC::isInEffectOnCurrentThread()); |
2592 | if (validateDFGDoesGC) |
2593 | RELEASE_ASSERT(expectDoesGC()); |
2594 | |
2595 | if (!m_isSafeToCollect) |
2596 | return; |
2597 | |
2598 | switch (mutatorState()) { |
2599 | case MutatorState::Running: |
2600 | case MutatorState::Allocating: |
2601 | break; |
2602 | case MutatorState::Sweeping: |
2603 | case MutatorState::Collecting: |
2604 | return; |
2605 | } |
2606 | if (!Options::useGC()) |
2607 | return; |
2608 | |
2609 | if (mayNeedToStop()) { |
2610 | if (deferralContext) |
2611 | deferralContext->m_shouldGC = true; |
2612 | else if (isDeferred()) |
2613 | m_didDeferGCWork = true; |
2614 | else |
2615 | stopIfNecessary(); |
2616 | } |
2617 | |
2618 | if (UNLIKELY(Options::gcMaxHeapSize())) { |
2619 | if (m_bytesAllocatedThisCycle <= Options::gcMaxHeapSize()) |
2620 | return; |
2621 | } else { |
2622 | size_t bytesAllowedThisCycle = m_maxEdenSize; |
2623 | |
2624 | #if PLATFORM(IOS_FAMILY) |
2625 | if (overCriticalMemoryThreshold()) |
2626 | bytesAllowedThisCycle = std::min(m_maxEdenSizeWhenCritical, bytesAllowedThisCycle); |
2627 | #endif |
2628 | |
2629 | if (m_bytesAllocatedThisCycle <= bytesAllowedThisCycle) |
2630 | return; |
2631 | } |
2632 | |
2633 | if (deferralContext) |
2634 | deferralContext->m_shouldGC = true; |
2635 | else if (isDeferred()) |
2636 | m_didDeferGCWork = true; |
2637 | else { |
2638 | collectAsync(); |
2639 | stopIfNecessary(); // This will immediately start the collection if we have the conn. |
2640 | } |
2641 | } |
2642 | |
2643 | void Heap::decrementDeferralDepthAndGCIfNeededSlow() |
2644 | { |
2645 | // Can't do anything if we're still deferred. |
2646 | if (m_deferralDepth) |
2647 | return; |
2648 | |
2649 | ASSERT(!isDeferred()); |
2650 | |
2651 | m_didDeferGCWork = false; |
2652 | // FIXME: Bring back something like the DeferGCProbability mode. |
2653 | // https://bugs.webkit.org/show_bug.cgi?id=166627 |
2654 | collectIfNecessaryOrDefer(); |
2655 | } |
2656 | |
2657 | void Heap::registerWeakGCMap(WeakGCMapBase* weakGCMap) |
2658 | { |
2659 | m_weakGCMaps.add(weakGCMap); |
2660 | } |
2661 | |
2662 | void Heap::unregisterWeakGCMap(WeakGCMapBase* weakGCMap) |
2663 | { |
2664 | m_weakGCMaps.remove(weakGCMap); |
2665 | } |
2666 | |
2667 | void Heap::didAllocateBlock(size_t capacity) |
2668 | { |
2669 | #if ENABLE(RESOURCE_USAGE) |
2670 | m_blockBytesAllocated += capacity; |
2671 | #else |
2672 | UNUSED_PARAM(capacity); |
2673 | #endif |
2674 | } |
2675 | |
2676 | void Heap::didFreeBlock(size_t capacity) |
2677 | { |
2678 | #if ENABLE(RESOURCE_USAGE) |
2679 | m_blockBytesAllocated -= capacity; |
2680 | #else |
2681 | UNUSED_PARAM(capacity); |
2682 | #endif |
2683 | } |
2684 | |
2685 | void Heap::addCoreConstraints() |
2686 | { |
2687 | m_constraintSet->add( |
2688 | "Cs" , "Conservative Scan" , |
2689 | [this, lastVersion = static_cast<uint64_t>(0)] (SlotVisitor& slotVisitor) mutable { |
2690 | bool shouldNotProduceWork = lastVersion == m_phaseVersion; |
2691 | if (shouldNotProduceWork) |
2692 | return; |
2693 | |
2694 | TimingScope preConvergenceTimingScope(*this, "Constraint: conservative scan" ); |
2695 | m_objectSpace.prepareForConservativeScan(); |
2696 | m_jitStubRoutines->prepareForConservativeScan(); |
2697 | |
2698 | { |
2699 | ConservativeRoots conservativeRoots(*this); |
2700 | SuperSamplerScope superSamplerScope(false); |
2701 | |
2702 | gatherStackRoots(conservativeRoots); |
2703 | gatherJSStackRoots(conservativeRoots); |
2704 | gatherScratchBufferRoots(conservativeRoots); |
2705 | |
2706 | SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::ConservativeScan); |
2707 | slotVisitor.append(conservativeRoots); |
2708 | } |
2709 | if (VM::canUseJIT()) { |
2710 | // JITStubRoutines must be visited after scanning ConservativeRoots since JITStubRoutines depend on the hook executed during gathering ConservativeRoots. |
2711 | SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::JITStubRoutines); |
2712 | m_jitStubRoutines->traceMarkedStubRoutines(slotVisitor); |
2713 | } |
2714 | |
2715 | lastVersion = m_phaseVersion; |
2716 | }, |
2717 | ConstraintVolatility::GreyedByExecution); |
2718 | |
2719 | m_constraintSet->add( |
2720 | "Msr" , "Misc Small Roots" , |
2721 | [this] (SlotVisitor& slotVisitor) { |
2722 | |
2723 | #if JSC_OBJC_API_ENABLED |
2724 | scanExternalRememberedSet(*m_vm, slotVisitor); |
2725 | #endif |
2726 | if (m_vm->smallStrings.needsToBeVisited(*m_collectionScope)) { |
2727 | SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::StrongReferences); |
2728 | m_vm->smallStrings.visitStrongReferences(slotVisitor); |
2729 | } |
2730 | |
2731 | { |
2732 | SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::ProtectedValues); |
2733 | for (auto& pair : m_protectedValues) |
2734 | slotVisitor.appendUnbarriered(pair.key); |
2735 | } |
2736 | |
2737 | if (m_markListSet && m_markListSet->size()) { |
2738 | SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::ConservativeScan); |
2739 | MarkedArgumentBuffer::markLists(slotVisitor, *m_markListSet); |
2740 | } |
2741 | |
2742 | { |
2743 | SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::VMExceptions); |
2744 | slotVisitor.appendUnbarriered(m_vm->exception()); |
2745 | slotVisitor.appendUnbarriered(m_vm->lastException()); |
2746 | } |
2747 | }, |
2748 | ConstraintVolatility::GreyedByExecution); |
2749 | |
2750 | m_constraintSet->add( |
2751 | "Sh" , "Strong Handles" , |
2752 | [this] (SlotVisitor& slotVisitor) { |
2753 | SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::StrongHandles); |
2754 | m_handleSet.visitStrongHandles(slotVisitor); |
2755 | }, |
2756 | ConstraintVolatility::GreyedByExecution); |
2757 | |
2758 | m_constraintSet->add( |
2759 | "D" , "Debugger" , |
2760 | [this] (SlotVisitor& slotVisitor) { |
2761 | SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::Debugger); |
2762 | |
2763 | #if ENABLE(SAMPLING_PROFILER) |
2764 | if (SamplingProfiler* samplingProfiler = m_vm->samplingProfiler()) { |
2765 | LockHolder locker(samplingProfiler->getLock()); |
2766 | samplingProfiler->processUnverifiedStackTraces(); |
2767 | samplingProfiler->visit(slotVisitor); |
2768 | if (Options::logGC() == GCLogging::Verbose) |
2769 | dataLog("Sampling Profiler data:\n" , slotVisitor); |
2770 | } |
2771 | #endif // ENABLE(SAMPLING_PROFILER) |
2772 | |
2773 | if (m_vm->typeProfiler()) |
2774 | m_vm->typeProfilerLog()->visit(slotVisitor); |
2775 | |
2776 | if (auto* shadowChicken = m_vm->shadowChicken()) |
2777 | shadowChicken->visitChildren(slotVisitor); |
2778 | }, |
2779 | ConstraintVolatility::GreyedByExecution); |
2780 | |
2781 | m_constraintSet->add( |
2782 | "Ws" , "Weak Sets" , |
2783 | [this] (SlotVisitor& slotVisitor) { |
2784 | SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::WeakSets); |
2785 | m_objectSpace.visitWeakSets(slotVisitor); |
2786 | }, |
2787 | ConstraintVolatility::GreyedByMarking); |
2788 | |
2789 | m_constraintSet->add( |
2790 | "O" , "Output" , |
2791 | [] (SlotVisitor& slotVisitor) { |
2792 | VM& vm = slotVisitor.vm(); |
2793 | |
2794 | auto callOutputConstraint = [] (SlotVisitor& slotVisitor, HeapCell* heapCell, HeapCell::Kind) { |
2795 | SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::Output); |
2796 | VM& vm = slotVisitor.vm(); |
2797 | JSCell* cell = static_cast<JSCell*>(heapCell); |
2798 | cell->methodTable(vm)->visitOutputConstraints(cell, slotVisitor); |
2799 | }; |
2800 | |
2801 | auto add = [&] (auto& set) { |
2802 | slotVisitor.addParallelConstraintTask(set.forEachMarkedCellInParallel(callOutputConstraint)); |
2803 | }; |
2804 | |
2805 | add(vm.executableToCodeBlockEdgesWithConstraints); |
2806 | if (vm.m_weakMapSpace) |
2807 | add(*vm.m_weakMapSpace); |
2808 | }, |
2809 | ConstraintVolatility::GreyedByMarking, |
2810 | ConstraintParallelism::Parallel); |
2811 | |
2812 | #if ENABLE(DFG_JIT) |
2813 | if (VM::canUseJIT()) { |
2814 | m_constraintSet->add( |
2815 | "Dw" , "DFG Worklists" , |
2816 | [this] (SlotVisitor& slotVisitor) { |
2817 | SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::DFGWorkLists); |
2818 | |
2819 | for (unsigned i = DFG::numberOfWorklists(); i--;) |
2820 | DFG::existingWorklistForIndex(i).visitWeakReferences(slotVisitor); |
2821 | |
2822 | // FIXME: This is almost certainly unnecessary. |
2823 | // https://bugs.webkit.org/show_bug.cgi?id=166829 |
2824 | DFG::iterateCodeBlocksForGC( |
2825 | *m_vm, |
2826 | [&] (CodeBlock* codeBlock) { |
2827 | slotVisitor.appendUnbarriered(codeBlock); |
2828 | }); |
2829 | |
2830 | if (Options::logGC() == GCLogging::Verbose) |
2831 | dataLog("DFG Worklists:\n" , slotVisitor); |
2832 | }, |
2833 | ConstraintVolatility::GreyedByMarking); |
2834 | } |
2835 | #endif |
2836 | |
2837 | m_constraintSet->add( |
2838 | "Cb" , "CodeBlocks" , |
2839 | [this] (SlotVisitor& slotVisitor) { |
2840 | SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::CodeBlocks); |
2841 | iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks( |
2842 | [&] (CodeBlock* codeBlock) { |
2843 | // Visit the CodeBlock as a constraint only if it's black. |
2844 | if (isMarked(codeBlock) |
2845 | && codeBlock->cellState() == CellState::PossiblyBlack) |
2846 | slotVisitor.visitAsConstraint(codeBlock); |
2847 | }); |
2848 | }, |
2849 | ConstraintVolatility::SeldomGreyed); |
2850 | |
2851 | m_constraintSet->add(std::make_unique<MarkStackMergingConstraint>(*this)); |
2852 | } |
2853 | |
2854 | void Heap::addMarkingConstraint(std::unique_ptr<MarkingConstraint> constraint) |
2855 | { |
2856 | PreventCollectionScope preventCollectionScope(*this); |
2857 | m_constraintSet->add(WTFMove(constraint)); |
2858 | } |
2859 | |
2860 | void Heap::notifyIsSafeToCollect() |
2861 | { |
2862 | MonotonicTime before; |
2863 | if (Options::logGC()) { |
2864 | before = MonotonicTime::now(); |
2865 | dataLog("[GC<" , RawPointer(this), ">: starting " ); |
2866 | } |
2867 | |
2868 | addCoreConstraints(); |
2869 | |
2870 | m_isSafeToCollect = true; |
2871 | |
2872 | if (Options::collectContinuously()) { |
2873 | m_collectContinuouslyThread = Thread::create( |
2874 | "JSC DEBUG Continuous GC" , |
2875 | [this] () { |
2876 | MonotonicTime initialTime = MonotonicTime::now(); |
2877 | Seconds period = Seconds::fromMilliseconds(Options::collectContinuouslyPeriodMS()); |
2878 | while (!m_shouldStopCollectingContinuously) { |
2879 | { |
2880 | LockHolder locker(*m_threadLock); |
2881 | if (m_requests.isEmpty()) { |
2882 | m_requests.append(WTF::nullopt); |
2883 | m_lastGrantedTicket++; |
2884 | m_threadCondition->notifyOne(locker); |
2885 | } |
2886 | } |
2887 | |
2888 | { |
2889 | LockHolder locker(m_collectContinuouslyLock); |
2890 | Seconds elapsed = MonotonicTime::now() - initialTime; |
2891 | Seconds elapsedInPeriod = elapsed % period; |
2892 | MonotonicTime timeToWakeUp = |
2893 | initialTime + elapsed - elapsedInPeriod + period; |
2894 | while (!hasElapsed(timeToWakeUp) && !m_shouldStopCollectingContinuously) { |
2895 | m_collectContinuouslyCondition.waitUntil( |
2896 | m_collectContinuouslyLock, timeToWakeUp); |
2897 | } |
2898 | } |
2899 | } |
2900 | }); |
2901 | } |
2902 | |
2903 | if (Options::logGC()) |
2904 | dataLog((MonotonicTime::now() - before).milliseconds(), "ms]\n" ); |
2905 | } |
2906 | |
2907 | void Heap::preventCollection() |
2908 | { |
2909 | if (!m_isSafeToCollect) |
2910 | return; |
2911 | |
2912 | // This prevents the collectContinuously thread from starting a collection. |
2913 | m_collectContinuouslyLock.lock(); |
2914 | |
2915 | // Wait for all collections to finish. |
2916 | waitForCollector( |
2917 | [&] (const AbstractLocker&) -> bool { |
2918 | ASSERT(m_lastServedTicket <= m_lastGrantedTicket); |
2919 | return m_lastServedTicket == m_lastGrantedTicket; |
2920 | }); |
2921 | |
2922 | // Now a collection can only start if this thread starts it. |
2923 | RELEASE_ASSERT(!m_collectionScope); |
2924 | } |
2925 | |
2926 | void Heap::allowCollection() |
2927 | { |
2928 | if (!m_isSafeToCollect) |
2929 | return; |
2930 | |
2931 | m_collectContinuouslyLock.unlock(); |
2932 | } |
2933 | |
2934 | void Heap::setMutatorShouldBeFenced(bool value) |
2935 | { |
2936 | m_mutatorShouldBeFenced = value; |
2937 | m_barrierThreshold = value ? tautologicalThreshold : blackThreshold; |
2938 | } |
2939 | |
2940 | void Heap::performIncrement(size_t bytes) |
2941 | { |
2942 | if (!m_objectSpace.isMarking()) |
2943 | return; |
2944 | |
2945 | if (isDeferred()) |
2946 | return; |
2947 | |
2948 | m_incrementBalance += bytes * Options::gcIncrementScale(); |
2949 | |
2950 | // Save ourselves from crazy. Since this is an optimization, it's OK to go back to any consistent |
2951 | // state when the double goes wild. |
2952 | if (std::isnan(m_incrementBalance) || std::isinf(m_incrementBalance)) |
2953 | m_incrementBalance = 0; |
2954 | |
2955 | if (m_incrementBalance < static_cast<double>(Options::gcIncrementBytes())) |
2956 | return; |
2957 | |
2958 | double targetBytes = m_incrementBalance; |
2959 | if (targetBytes <= 0) |
2960 | return; |
2961 | targetBytes = std::min(targetBytes, Options::gcIncrementMaxBytes()); |
2962 | |
2963 | SlotVisitor& slotVisitor = *m_mutatorSlotVisitor; |
2964 | ParallelModeEnabler parallelModeEnabler(slotVisitor); |
2965 | size_t bytesVisited = slotVisitor.performIncrementOfDraining(static_cast<size_t>(targetBytes)); |
2966 | // incrementBalance may go negative here because it'll remember how many bytes we overshot. |
2967 | m_incrementBalance -= bytesVisited; |
2968 | } |
2969 | |
2970 | void Heap::addHeapFinalizerCallback(const HeapFinalizerCallback& callback) |
2971 | { |
2972 | m_heapFinalizerCallbacks.append(callback); |
2973 | } |
2974 | |
2975 | void Heap::removeHeapFinalizerCallback(const HeapFinalizerCallback& callback) |
2976 | { |
2977 | m_heapFinalizerCallbacks.removeFirst(callback); |
2978 | } |
2979 | |
2980 | void Heap::setBonusVisitorTask(RefPtr<SharedTask<void(SlotVisitor&)>> task) |
2981 | { |
2982 | auto locker = holdLock(m_markingMutex); |
2983 | m_bonusVisitorTask = task; |
2984 | m_markingConditionVariable.notifyAll(); |
2985 | } |
2986 | |
2987 | void Heap::runTaskInParallel(RefPtr<SharedTask<void(SlotVisitor&)>> task) |
2988 | { |
2989 | unsigned initialRefCount = task->refCount(); |
2990 | setBonusVisitorTask(task); |
2991 | task->run(*m_collectorSlotVisitor); |
2992 | setBonusVisitorTask(nullptr); |
2993 | // The constraint solver expects return of this function to imply termination of the task in all |
2994 | // threads. This ensures that property. |
2995 | { |
2996 | auto locker = holdLock(m_markingMutex); |
2997 | while (task->refCount() > initialRefCount) |
2998 | m_markingConditionVariable.wait(m_markingMutex); |
2999 | } |
3000 | } |
3001 | |
3002 | } // namespace JSC |
3003 | |