1 | /* |
2 | * Copyright (C) 2003-2019 Apple Inc. All rights reserved. |
3 | * Copyright (C) 2007 Eric Seidel <[email protected]> |
4 | * |
5 | * This library is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU Lesser General Public |
7 | * License as published by the Free Software Foundation; either |
8 | * version 2 of the License, or (at your option) any later version. |
9 | * |
10 | * This library is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | * Lesser General Public License for more details. |
14 | * |
15 | * You should have received a copy of the GNU Lesser General Public |
16 | * License along with this library; if not, write to the Free Software |
17 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
18 | * |
19 | */ |
20 | |
21 | #include "config.h" |
22 | #include "Heap.h" |
23 | |
24 | #include "BlockDirectoryInlines.h" |
25 | #include "BuiltinExecutables.h" |
26 | #include "CodeBlock.h" |
27 | #include "CodeBlockSetInlines.h" |
28 | #include "CollectingScope.h" |
29 | #include "ConservativeRoots.h" |
30 | #include "DFGWorklistInlines.h" |
31 | #include "EdenGCActivityCallback.h" |
32 | #include "Exception.h" |
33 | #include "FullGCActivityCallback.h" |
34 | #include "FunctionExecutableInlines.h" |
35 | #include "GCActivityCallback.h" |
36 | #include "GCIncomingRefCountedSetInlines.h" |
37 | #include "GCSegmentedArrayInlines.h" |
38 | #include "GCTypeMap.h" |
39 | #include "HasOwnPropertyCache.h" |
40 | #include "HeapHelperPool.h" |
41 | #include "HeapIterationScope.h" |
42 | #include "HeapProfiler.h" |
43 | #include "HeapSnapshot.h" |
44 | #include "HeapVerifier.h" |
45 | #include "IncrementalSweeper.h" |
46 | #include "InferredValueInlines.h" |
47 | #include "Interpreter.h" |
48 | #include "IsoCellSetInlines.h" |
49 | #include "JITStubRoutineSet.h" |
50 | #include "JITWorklist.h" |
51 | #include "JSCInlines.h" |
52 | #include "JSGlobalObject.h" |
53 | #include "JSLock.h" |
54 | #include "JSVirtualMachineInternal.h" |
55 | #include "JSWeakMap.h" |
56 | #include "JSWeakSet.h" |
57 | #include "JSWebAssemblyCodeBlock.h" |
58 | #include "MachineStackMarker.h" |
59 | #include "MarkStackMergingConstraint.h" |
60 | #include "MarkedSpaceInlines.h" |
61 | #include "MarkingConstraintSet.h" |
62 | #include "PreventCollectionScope.h" |
63 | #include "SamplingProfiler.h" |
64 | #include "ShadowChicken.h" |
65 | #include "SpaceTimeMutatorScheduler.h" |
66 | #include "StochasticSpaceTimeMutatorScheduler.h" |
67 | #include "StopIfNecessaryTimer.h" |
68 | #include "SubspaceInlines.h" |
69 | #include "SuperSampler.h" |
70 | #include "SweepingScope.h" |
71 | #include "SymbolTableInlines.h" |
72 | #include "SynchronousStopTheWorldMutatorScheduler.h" |
73 | #include "TypeProfiler.h" |
74 | #include "TypeProfilerLog.h" |
75 | #include "UnlinkedCodeBlock.h" |
76 | #include "VM.h" |
77 | #include "VisitCounter.h" |
78 | #include "WasmMemory.h" |
79 | #include "WeakMapImplInlines.h" |
80 | #include "WeakSetInlines.h" |
81 | #include <algorithm> |
82 | #include <wtf/ListDump.h> |
83 | #include <wtf/MainThread.h> |
84 | #include <wtf/ParallelVectorIterator.h> |
85 | #include <wtf/ProcessID.h> |
86 | #include <wtf/RAMSize.h> |
87 | #include <wtf/SimpleStats.h> |
88 | #include <wtf/Threading.h> |
89 | |
90 | #if PLATFORM(IOS_FAMILY) |
91 | #include <bmalloc/bmalloc.h> |
92 | #endif |
93 | |
94 | #if USE(FOUNDATION) |
95 | #include <wtf/spi/cocoa/objcSPI.h> |
96 | #endif |
97 | |
98 | #if USE(GLIB) |
99 | #include "JSCGLibWrapperObject.h" |
100 | #endif |
101 | |
102 | namespace JSC { |
103 | |
104 | namespace { |
105 | |
106 | bool verboseStop = false; |
107 | |
108 | double maxPauseMS(double thisPauseMS) |
109 | { |
110 | static double maxPauseMS; |
111 | maxPauseMS = std::max(thisPauseMS, maxPauseMS); |
112 | return maxPauseMS; |
113 | } |
114 | |
115 | size_t minHeapSize(HeapType heapType, size_t ramSize) |
116 | { |
117 | if (heapType == LargeHeap) { |
118 | double result = std::min( |
119 | static_cast<double>(Options::largeHeapSize()), |
120 | ramSize * Options::smallHeapRAMFraction()); |
121 | return static_cast<size_t>(result); |
122 | } |
123 | return Options::smallHeapSize(); |
124 | } |
125 | |
126 | size_t proportionalHeapSize(size_t heapSize, size_t ramSize) |
127 | { |
128 | if (VM::isInMiniMode()) |
129 | return Options::miniVMHeapGrowthFactor() * heapSize; |
130 | |
131 | #if PLATFORM(IOS_FAMILY) |
132 | size_t memoryFootprint = bmalloc::api::memoryFootprint(); |
133 | if (memoryFootprint < ramSize * Options::smallHeapRAMFraction()) |
134 | return Options::smallHeapGrowthFactor() * heapSize; |
135 | if (memoryFootprint < ramSize * Options::mediumHeapRAMFraction()) |
136 | return Options::mediumHeapGrowthFactor() * heapSize; |
137 | #else |
138 | if (heapSize < ramSize * Options::smallHeapRAMFraction()) |
139 | return Options::smallHeapGrowthFactor() * heapSize; |
140 | if (heapSize < ramSize * Options::mediumHeapRAMFraction()) |
141 | return Options::mediumHeapGrowthFactor() * heapSize; |
142 | #endif |
143 | return Options::largeHeapGrowthFactor() * heapSize; |
144 | } |
145 | |
146 | bool isValidSharedInstanceThreadState(VM* vm) |
147 | { |
148 | return vm->currentThreadIsHoldingAPILock(); |
149 | } |
150 | |
151 | bool isValidThreadState(VM* vm) |
152 | { |
153 | if (vm->atomStringTable() != Thread::current().atomStringTable()) |
154 | return false; |
155 | |
156 | if (vm->isSharedInstance() && !isValidSharedInstanceThreadState(vm)) |
157 | return false; |
158 | |
159 | return true; |
160 | } |
161 | |
162 | void recordType(VM& vm, TypeCountSet& set, JSCell* cell) |
163 | { |
164 | const char* typeName = "[unknown]" ; |
165 | const ClassInfo* info = cell->classInfo(vm); |
166 | if (info && info->className) |
167 | typeName = info->className; |
168 | set.add(typeName); |
169 | } |
170 | |
171 | bool measurePhaseTiming() |
172 | { |
173 | return false; |
174 | } |
175 | |
176 | HashMap<const char*, GCTypeMap<SimpleStats>>& timingStats() |
177 | { |
178 | static HashMap<const char*, GCTypeMap<SimpleStats>>* result; |
179 | static std::once_flag once; |
180 | std::call_once( |
181 | once, |
182 | [] { |
183 | result = new HashMap<const char*, GCTypeMap<SimpleStats>>(); |
184 | }); |
185 | return *result; |
186 | } |
187 | |
188 | SimpleStats& timingStats(const char* name, CollectionScope scope) |
189 | { |
190 | return timingStats().add(name, GCTypeMap<SimpleStats>()).iterator->value[scope]; |
191 | } |
192 | |
193 | class TimingScope { |
194 | public: |
195 | TimingScope(Optional<CollectionScope> scope, const char* name) |
196 | : m_scope(scope) |
197 | , m_name(name) |
198 | { |
199 | if (measurePhaseTiming()) |
200 | m_before = MonotonicTime::now(); |
201 | } |
202 | |
203 | TimingScope(Heap& heap, const char* name) |
204 | : TimingScope(heap.collectionScope(), name) |
205 | { |
206 | } |
207 | |
208 | void setScope(Optional<CollectionScope> scope) |
209 | { |
210 | m_scope = scope; |
211 | } |
212 | |
213 | void setScope(Heap& heap) |
214 | { |
215 | setScope(heap.collectionScope()); |
216 | } |
217 | |
218 | ~TimingScope() |
219 | { |
220 | if (measurePhaseTiming()) { |
221 | MonotonicTime after = MonotonicTime::now(); |
222 | Seconds timing = after - m_before; |
223 | SimpleStats& stats = timingStats(m_name, *m_scope); |
224 | stats.add(timing.milliseconds()); |
225 | dataLog("[GC:" , *m_scope, "] " , m_name, " took: " , timing.milliseconds(), "ms (average " , stats.mean(), "ms).\n" ); |
226 | } |
227 | } |
228 | private: |
229 | Optional<CollectionScope> m_scope; |
230 | MonotonicTime m_before; |
231 | const char* m_name; |
232 | }; |
233 | |
234 | } // anonymous namespace |
235 | |
236 | class Heap::HeapThread : public AutomaticThread { |
237 | public: |
238 | HeapThread(const AbstractLocker& locker, Heap& heap) |
239 | : AutomaticThread(locker, heap.m_threadLock, heap.m_threadCondition.copyRef()) |
240 | , m_heap(heap) |
241 | { |
242 | } |
243 | |
244 | const char* name() const override |
245 | { |
246 | return "JSC Heap Collector Thread" ; |
247 | } |
248 | |
249 | protected: |
250 | PollResult poll(const AbstractLocker& locker) override |
251 | { |
252 | if (m_heap.m_threadShouldStop) { |
253 | m_heap.notifyThreadStopping(locker); |
254 | return PollResult::Stop; |
255 | } |
256 | if (m_heap.shouldCollectInCollectorThread(locker)) |
257 | return PollResult::Work; |
258 | return PollResult::Wait; |
259 | } |
260 | |
261 | WorkResult work() override |
262 | { |
263 | m_heap.collectInCollectorThread(); |
264 | return WorkResult::Continue; |
265 | } |
266 | |
267 | void threadDidStart() override |
268 | { |
269 | Thread::registerGCThread(GCThreadType::Main); |
270 | } |
271 | |
272 | private: |
273 | Heap& m_heap; |
274 | }; |
275 | |
276 | Heap::Heap(VM* vm, HeapType heapType) |
277 | : m_heapType(heapType) |
278 | , m_ramSize(Options::forceRAMSize() ? Options::forceRAMSize() : ramSize()) |
279 | , m_minBytesPerCycle(minHeapSize(m_heapType, m_ramSize)) |
280 | , m_maxEdenSize(m_minBytesPerCycle) |
281 | , m_maxHeapSize(m_minBytesPerCycle) |
282 | , m_objectSpace(this) |
283 | , m_machineThreads(std::make_unique<MachineThreads>()) |
284 | , m_collectorSlotVisitor(std::make_unique<SlotVisitor>(*this, "C" )) |
285 | , m_mutatorSlotVisitor(std::make_unique<SlotVisitor>(*this, "M" )) |
286 | , m_mutatorMarkStack(std::make_unique<MarkStackArray>()) |
287 | , m_raceMarkStack(std::make_unique<MarkStackArray>()) |
288 | , m_constraintSet(std::make_unique<MarkingConstraintSet>(*this)) |
289 | , m_handleSet(vm) |
290 | , m_codeBlocks(std::make_unique<CodeBlockSet>()) |
291 | , m_jitStubRoutines(std::make_unique<JITStubRoutineSet>()) |
292 | , m_vm(vm) |
293 | // We seed with 10ms so that GCActivityCallback::didAllocate doesn't continuously |
294 | // schedule the timer if we've never done a collection. |
295 | , m_fullActivityCallback(GCActivityCallback::tryCreateFullTimer(this)) |
296 | , m_edenActivityCallback(GCActivityCallback::tryCreateEdenTimer(this)) |
297 | , m_sweeper(adoptRef(*new IncrementalSweeper(this))) |
298 | , m_stopIfNecessaryTimer(adoptRef(*new StopIfNecessaryTimer(vm))) |
299 | , m_sharedCollectorMarkStack(std::make_unique<MarkStackArray>()) |
300 | , m_sharedMutatorMarkStack(std::make_unique<MarkStackArray>()) |
301 | , m_helperClient(&heapHelperPool()) |
302 | , m_threadLock(Box<Lock>::create()) |
303 | , m_threadCondition(AutomaticThreadCondition::create()) |
304 | { |
305 | m_worldState.store(0); |
306 | |
307 | for (unsigned i = 0, numberOfParallelThreads = heapHelperPool().numberOfThreads(); i < numberOfParallelThreads; ++i) { |
308 | std::unique_ptr<SlotVisitor> visitor = std::make_unique<SlotVisitor>(*this, toCString("P" , i + 1)); |
309 | if (Options::optimizeParallelSlotVisitorsForStoppedMutator()) |
310 | visitor->optimizeForStoppedMutator(); |
311 | m_availableParallelSlotVisitors.append(visitor.get()); |
312 | m_parallelSlotVisitors.append(WTFMove(visitor)); |
313 | } |
314 | |
315 | if (Options::useConcurrentGC()) { |
316 | if (Options::useStochasticMutatorScheduler()) |
317 | m_scheduler = std::make_unique<StochasticSpaceTimeMutatorScheduler>(*this); |
318 | else |
319 | m_scheduler = std::make_unique<SpaceTimeMutatorScheduler>(*this); |
320 | } else { |
321 | // We simulate turning off concurrent GC by making the scheduler say that the world |
322 | // should always be stopped when the collector is running. |
323 | m_scheduler = std::make_unique<SynchronousStopTheWorldMutatorScheduler>(); |
324 | } |
325 | |
326 | if (Options::verifyHeap()) |
327 | m_verifier = std::make_unique<HeapVerifier>(this, Options::numberOfGCCyclesToRecordForVerification()); |
328 | |
329 | m_collectorSlotVisitor->optimizeForStoppedMutator(); |
330 | |
331 | // When memory is critical, allow allocating 25% of the amount above the critical threshold before collecting. |
332 | size_t memoryAboveCriticalThreshold = static_cast<size_t>(static_cast<double>(m_ramSize) * (1.0 - Options::criticalGCMemoryThreshold())); |
333 | m_maxEdenSizeWhenCritical = memoryAboveCriticalThreshold / 4; |
334 | |
335 | LockHolder locker(*m_threadLock); |
336 | m_thread = adoptRef(new HeapThread(locker, *this)); |
337 | } |
338 | |
339 | Heap::~Heap() |
340 | { |
341 | forEachSlotVisitor( |
342 | [&] (SlotVisitor& visitor) { |
343 | visitor.clearMarkStacks(); |
344 | }); |
345 | m_mutatorMarkStack->clear(); |
346 | m_raceMarkStack->clear(); |
347 | |
348 | for (WeakBlock* block : m_logicallyEmptyWeakBlocks) |
349 | WeakBlock::destroy(*this, block); |
350 | } |
351 | |
352 | bool Heap::isPagedOut(MonotonicTime deadline) |
353 | { |
354 | return m_objectSpace.isPagedOut(deadline); |
355 | } |
356 | |
357 | void Heap::dumpHeapStatisticsAtVMDestruction() |
358 | { |
359 | unsigned counter = 0; |
360 | m_objectSpace.forEachBlock([&] (MarkedBlock::Handle* block) { |
361 | unsigned live = 0; |
362 | block->forEachCell([&] (HeapCell* cell, HeapCell::Kind) { |
363 | if (cell->isLive()) |
364 | live++; |
365 | return IterationStatus::Continue; |
366 | }); |
367 | dataLogLn("[" , counter++, "] " , block->cellSize(), ", " , live, " / " , block->cellsPerBlock(), " " , static_cast<double>(live) / block->cellsPerBlock() * 100, "% " , block->attributes(), " " , block->subspace()->name()); |
368 | block->forEachCell([&] (HeapCell* heapCell, HeapCell::Kind kind) { |
369 | if (heapCell->isLive() && kind == HeapCell::Kind::JSCell) { |
370 | auto* cell = static_cast<JSCell*>(heapCell); |
371 | if (cell->isObject()) |
372 | dataLogLn(" " , JSValue((JSObject*)cell)); |
373 | else |
374 | dataLogLn(" " , *cell); |
375 | } |
376 | return IterationStatus::Continue; |
377 | }); |
378 | }); |
379 | } |
380 | |
381 | // The VM is being destroyed and the collector will never run again. |
382 | // Run all pending finalizers now because we won't get another chance. |
383 | void Heap::lastChanceToFinalize() |
384 | { |
385 | MonotonicTime before; |
386 | if (Options::logGC()) { |
387 | before = MonotonicTime::now(); |
388 | dataLog("[GC<" , RawPointer(this), ">: shutdown " ); |
389 | } |
390 | |
391 | m_isShuttingDown = true; |
392 | |
393 | RELEASE_ASSERT(!m_vm->entryScope); |
394 | RELEASE_ASSERT(m_mutatorState == MutatorState::Running); |
395 | |
396 | if (m_collectContinuouslyThread) { |
397 | { |
398 | LockHolder locker(m_collectContinuouslyLock); |
399 | m_shouldStopCollectingContinuously = true; |
400 | m_collectContinuouslyCondition.notifyOne(); |
401 | } |
402 | m_collectContinuouslyThread->waitForCompletion(); |
403 | } |
404 | |
405 | if (Options::logGC()) |
406 | dataLog("1" ); |
407 | |
408 | // Prevent new collections from being started. This is probably not even necessary, since we're not |
409 | // going to call into anything that starts collections. Still, this makes the algorithm more |
410 | // obviously sound. |
411 | m_isSafeToCollect = false; |
412 | |
413 | if (Options::logGC()) |
414 | dataLog("2" ); |
415 | |
416 | bool isCollecting; |
417 | { |
418 | auto locker = holdLock(*m_threadLock); |
419 | RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket); |
420 | isCollecting = m_lastServedTicket < m_lastGrantedTicket; |
421 | } |
422 | if (isCollecting) { |
423 | if (Options::logGC()) |
424 | dataLog("...]\n" ); |
425 | |
426 | // Wait for the current collection to finish. |
427 | waitForCollector( |
428 | [&] (const AbstractLocker&) -> bool { |
429 | RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket); |
430 | return m_lastServedTicket == m_lastGrantedTicket; |
431 | }); |
432 | |
433 | if (Options::logGC()) |
434 | dataLog("[GC<" , RawPointer(this), ">: shutdown " ); |
435 | } |
436 | if (Options::logGC()) |
437 | dataLog("3" ); |
438 | |
439 | RELEASE_ASSERT(m_requests.isEmpty()); |
440 | RELEASE_ASSERT(m_lastServedTicket == m_lastGrantedTicket); |
441 | |
442 | // Carefully bring the thread down. |
443 | bool stopped = false; |
444 | { |
445 | LockHolder locker(*m_threadLock); |
446 | stopped = m_thread->tryStop(locker); |
447 | m_threadShouldStop = true; |
448 | if (!stopped) |
449 | m_threadCondition->notifyOne(locker); |
450 | } |
451 | |
452 | if (Options::logGC()) |
453 | dataLog("4" ); |
454 | |
455 | if (!stopped) |
456 | m_thread->join(); |
457 | |
458 | if (Options::logGC()) |
459 | dataLog("5 " ); |
460 | |
461 | if (UNLIKELY(Options::dumpHeapStatisticsAtVMDestruction())) |
462 | dumpHeapStatisticsAtVMDestruction(); |
463 | |
464 | m_arrayBuffers.lastChanceToFinalize(); |
465 | m_objectSpace.stopAllocatingForGood(); |
466 | m_objectSpace.lastChanceToFinalize(); |
467 | releaseDelayedReleasedObjects(); |
468 | |
469 | sweepAllLogicallyEmptyWeakBlocks(); |
470 | |
471 | m_objectSpace.freeMemory(); |
472 | |
473 | if (Options::logGC()) |
474 | dataLog((MonotonicTime::now() - before).milliseconds(), "ms]\n" ); |
475 | } |
476 | |
477 | void Heap::releaseDelayedReleasedObjects() |
478 | { |
479 | #if USE(FOUNDATION) || USE(GLIB) |
480 | // We need to guard against the case that releasing an object can create more objects due to the |
481 | // release calling into JS. When those JS call(s) exit and all locks are being dropped we end up |
482 | // back here and could try to recursively release objects. We guard that with a recursive entry |
483 | // count. Only the initial call will release objects, recursive calls simple return and let the |
484 | // the initial call to the function take care of any objects created during release time. |
485 | // This also means that we need to loop until there are no objects in m_delayedReleaseObjects |
486 | // and use a temp Vector for the actual releasing. |
487 | if (!m_delayedReleaseRecursionCount++) { |
488 | while (!m_delayedReleaseObjects.isEmpty()) { |
489 | ASSERT(m_vm->currentThreadIsHoldingAPILock()); |
490 | |
491 | auto objectsToRelease = WTFMove(m_delayedReleaseObjects); |
492 | |
493 | { |
494 | // We need to drop locks before calling out to arbitrary code. |
495 | JSLock::DropAllLocks dropAllLocks(m_vm); |
496 | |
497 | #if USE(FOUNDATION) |
498 | void* context = objc_autoreleasePoolPush(); |
499 | #endif |
500 | objectsToRelease.clear(); |
501 | #if USE(FOUNDATION) |
502 | objc_autoreleasePoolPop(context); |
503 | #endif |
504 | } |
505 | } |
506 | } |
507 | m_delayedReleaseRecursionCount--; |
508 | #endif |
509 | } |
510 | |
511 | void Heap::(size_t size) |
512 | { |
513 | didAllocate(size); |
514 | collectIfNecessaryOrDefer(); |
515 | } |
516 | |
517 | void Heap::(size_t size) |
518 | { |
519 | // FIXME: Change this to use SaturatedArithmetic when available. |
520 | // https://bugs.webkit.org/show_bug.cgi?id=170411 |
521 | Checked<size_t, RecordOverflow> checkedNewSize = m_deprecatedExtraMemorySize; |
522 | checkedNewSize += size; |
523 | m_deprecatedExtraMemorySize = UNLIKELY(checkedNewSize.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedNewSize.unsafeGet(); |
524 | reportExtraMemoryAllocatedSlowCase(size); |
525 | } |
526 | |
527 | bool Heap::overCriticalMemoryThreshold(MemoryThresholdCallType memoryThresholdCallType) |
528 | { |
529 | #if PLATFORM(IOS_FAMILY) |
530 | if (memoryThresholdCallType == MemoryThresholdCallType::Direct || ++m_precentAvailableMemoryCachedCallCount >= 100) { |
531 | m_overCriticalMemoryThreshold = bmalloc::api::percentAvailableMemoryInUse() > Options::criticalGCMemoryThreshold(); |
532 | m_precentAvailableMemoryCachedCallCount = 0; |
533 | } |
534 | |
535 | return m_overCriticalMemoryThreshold; |
536 | #else |
537 | UNUSED_PARAM(memoryThresholdCallType); |
538 | return false; |
539 | #endif |
540 | } |
541 | |
542 | void Heap::reportAbandonedObjectGraph() |
543 | { |
544 | // Our clients don't know exactly how much memory they |
545 | // are abandoning so we just guess for them. |
546 | size_t abandonedBytes = static_cast<size_t>(0.1 * capacity()); |
547 | |
548 | // We want to accelerate the next collection. Because memory has just |
549 | // been abandoned, the next collection has the potential to |
550 | // be more profitable. Since allocation is the trigger for collection, |
551 | // we hasten the next collection by pretending that we've allocated more memory. |
552 | if (m_fullActivityCallback) { |
553 | m_fullActivityCallback->didAllocate(*this, |
554 | m_sizeAfterLastCollect - m_sizeAfterLastFullCollect + m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect); |
555 | } |
556 | m_bytesAbandonedSinceLastFullCollect += abandonedBytes; |
557 | } |
558 | |
559 | void Heap::protect(JSValue k) |
560 | { |
561 | ASSERT(k); |
562 | ASSERT(m_vm->currentThreadIsHoldingAPILock()); |
563 | |
564 | if (!k.isCell()) |
565 | return; |
566 | |
567 | m_protectedValues.add(k.asCell()); |
568 | } |
569 | |
570 | bool Heap::unprotect(JSValue k) |
571 | { |
572 | ASSERT(k); |
573 | ASSERT(m_vm->currentThreadIsHoldingAPILock()); |
574 | |
575 | if (!k.isCell()) |
576 | return false; |
577 | |
578 | return m_protectedValues.remove(k.asCell()); |
579 | } |
580 | |
581 | void Heap::addReference(JSCell* cell, ArrayBuffer* buffer) |
582 | { |
583 | if (m_arrayBuffers.addReference(cell, buffer)) { |
584 | collectIfNecessaryOrDefer(); |
585 | didAllocate(buffer->gcSizeEstimateInBytes()); |
586 | } |
587 | } |
588 | |
589 | template<typename CellType, typename CellSet> |
590 | void Heap::finalizeMarkedUnconditionalFinalizers(CellSet& cellSet) |
591 | { |
592 | cellSet.forEachMarkedCell( |
593 | [&] (HeapCell* cell, HeapCell::Kind) { |
594 | static_cast<CellType*>(cell)->finalizeUnconditionally(*vm()); |
595 | }); |
596 | } |
597 | |
598 | void Heap::finalizeUnconditionalFinalizers() |
599 | { |
600 | vm()->builtinExecutables()->finalizeUnconditionally(); |
601 | finalizeMarkedUnconditionalFinalizers<FunctionExecutable>(vm()->functionExecutableSpace.space); |
602 | finalizeMarkedUnconditionalFinalizers<SymbolTable>(vm()->symbolTableSpace); |
603 | vm()->forEachCodeBlockSpace( |
604 | [&] (auto& space) { |
605 | this->finalizeMarkedUnconditionalFinalizers<CodeBlock>(space.set); |
606 | }); |
607 | finalizeMarkedUnconditionalFinalizers<ExecutableToCodeBlockEdge>(vm()->executableToCodeBlockEdgesWithFinalizers); |
608 | finalizeMarkedUnconditionalFinalizers<StructureRareData>(vm()->structureRareDataSpace); |
609 | finalizeMarkedUnconditionalFinalizers<UnlinkedFunctionExecutable>(vm()->unlinkedFunctionExecutableSpace.set); |
610 | if (vm()->m_weakSetSpace) |
611 | finalizeMarkedUnconditionalFinalizers<JSWeakSet>(*vm()->m_weakSetSpace); |
612 | if (vm()->m_weakMapSpace) |
613 | finalizeMarkedUnconditionalFinalizers<JSWeakMap>(*vm()->m_weakMapSpace); |
614 | if (vm()->m_errorInstanceSpace) |
615 | finalizeMarkedUnconditionalFinalizers<ErrorInstance>(*vm()->m_errorInstanceSpace); |
616 | |
617 | #if ENABLE(WEBASSEMBLY) |
618 | if (vm()->m_webAssemblyCodeBlockSpace) |
619 | finalizeMarkedUnconditionalFinalizers<JSWebAssemblyCodeBlock>(*vm()->m_webAssemblyCodeBlockSpace); |
620 | #endif |
621 | } |
622 | |
623 | void Heap::willStartIterating() |
624 | { |
625 | m_objectSpace.willStartIterating(); |
626 | } |
627 | |
628 | void Heap::didFinishIterating() |
629 | { |
630 | m_objectSpace.didFinishIterating(); |
631 | } |
632 | |
633 | void Heap::completeAllJITPlans() |
634 | { |
635 | if (!VM::canUseJIT()) |
636 | return; |
637 | #if ENABLE(JIT) |
638 | JITWorklist::ensureGlobalWorklist().completeAllForVM(*m_vm); |
639 | #endif // ENABLE(JIT) |
640 | DFG::completeAllPlansForVM(*m_vm); |
641 | } |
642 | |
643 | template<typename Func> |
644 | void Heap::iterateExecutingAndCompilingCodeBlocks(const Func& func) |
645 | { |
646 | m_codeBlocks->iterateCurrentlyExecuting(func); |
647 | if (VM::canUseJIT()) |
648 | DFG::iterateCodeBlocksForGC(*m_vm, func); |
649 | } |
650 | |
651 | template<typename Func> |
652 | void Heap::iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks(const Func& func) |
653 | { |
654 | Vector<CodeBlock*, 256> codeBlocks; |
655 | iterateExecutingAndCompilingCodeBlocks( |
656 | [&] (CodeBlock* codeBlock) { |
657 | codeBlocks.append(codeBlock); |
658 | }); |
659 | for (CodeBlock* codeBlock : codeBlocks) |
660 | func(codeBlock); |
661 | } |
662 | |
663 | void Heap::assertMarkStacksEmpty() |
664 | { |
665 | bool ok = true; |
666 | |
667 | if (!m_sharedCollectorMarkStack->isEmpty()) { |
668 | dataLog("FATAL: Shared collector mark stack not empty! It has " , m_sharedCollectorMarkStack->size(), " elements.\n" ); |
669 | ok = false; |
670 | } |
671 | |
672 | if (!m_sharedMutatorMarkStack->isEmpty()) { |
673 | dataLog("FATAL: Shared mutator mark stack not empty! It has " , m_sharedMutatorMarkStack->size(), " elements.\n" ); |
674 | ok = false; |
675 | } |
676 | |
677 | forEachSlotVisitor( |
678 | [&] (SlotVisitor& visitor) { |
679 | if (visitor.isEmpty()) |
680 | return; |
681 | |
682 | dataLog("FATAL: Visitor " , RawPointer(&visitor), " is not empty!\n" ); |
683 | ok = false; |
684 | }); |
685 | |
686 | RELEASE_ASSERT(ok); |
687 | } |
688 | |
689 | void Heap::gatherStackRoots(ConservativeRoots& roots) |
690 | { |
691 | m_machineThreads->gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks, m_currentThreadState, m_currentThread); |
692 | } |
693 | |
694 | void Heap::gatherJSStackRoots(ConservativeRoots& roots) |
695 | { |
696 | #if ENABLE(C_LOOP) |
697 | m_vm->interpreter->cloopStack().gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks); |
698 | #else |
699 | UNUSED_PARAM(roots); |
700 | #endif |
701 | } |
702 | |
703 | void Heap::gatherScratchBufferRoots(ConservativeRoots& roots) |
704 | { |
705 | #if ENABLE(DFG_JIT) |
706 | if (!VM::canUseJIT()) |
707 | return; |
708 | m_vm->gatherScratchBufferRoots(roots); |
709 | #else |
710 | UNUSED_PARAM(roots); |
711 | #endif |
712 | } |
713 | |
714 | void Heap::beginMarking() |
715 | { |
716 | TimingScope timingScope(*this, "Heap::beginMarking" ); |
717 | m_jitStubRoutines->clearMarks(); |
718 | m_objectSpace.beginMarking(); |
719 | setMutatorShouldBeFenced(true); |
720 | } |
721 | |
722 | void Heap::removeDeadCompilerWorklistEntries() |
723 | { |
724 | #if ENABLE(DFG_JIT) |
725 | if (!VM::canUseJIT()) |
726 | return; |
727 | for (unsigned i = DFG::numberOfWorklists(); i--;) |
728 | DFG::existingWorklistForIndex(i).removeDeadPlans(*m_vm); |
729 | #endif |
730 | } |
731 | |
732 | bool Heap::isHeapSnapshotting() const |
733 | { |
734 | HeapProfiler* heapProfiler = m_vm->heapProfiler(); |
735 | if (UNLIKELY(heapProfiler)) |
736 | return heapProfiler->activeSnapshotBuilder(); |
737 | return false; |
738 | } |
739 | |
740 | struct GatherHeapSnapshotData : MarkedBlock::CountFunctor { |
741 | GatherHeapSnapshotData(VM& vm, HeapSnapshotBuilder& builder) |
742 | : m_vm(vm) |
743 | , m_builder(builder) |
744 | { |
745 | } |
746 | |
747 | IterationStatus operator()(HeapCell* heapCell, HeapCell::Kind kind) const |
748 | { |
749 | if (isJSCellKind(kind)) { |
750 | JSCell* cell = static_cast<JSCell*>(heapCell); |
751 | cell->methodTable(m_vm)->heapSnapshot(cell, m_builder); |
752 | } |
753 | return IterationStatus::Continue; |
754 | } |
755 | |
756 | VM& m_vm; |
757 | HeapSnapshotBuilder& m_builder; |
758 | }; |
759 | |
760 | void Heap::(HeapProfiler& heapProfiler) |
761 | { |
762 | if (HeapSnapshotBuilder* builder = heapProfiler.activeSnapshotBuilder()) { |
763 | HeapIterationScope heapIterationScope(*this); |
764 | GatherHeapSnapshotData functor(*m_vm, *builder); |
765 | m_objectSpace.forEachLiveCell(heapIterationScope, functor); |
766 | } |
767 | } |
768 | |
769 | struct RemoveDeadHeapSnapshotNodes : MarkedBlock::CountFunctor { |
770 | RemoveDeadHeapSnapshotNodes(HeapSnapshot& snapshot) |
771 | : m_snapshot(snapshot) |
772 | { |
773 | } |
774 | |
775 | IterationStatus operator()(HeapCell* cell, HeapCell::Kind kind) const |
776 | { |
777 | if (isJSCellKind(kind)) |
778 | m_snapshot.sweepCell(static_cast<JSCell*>(cell)); |
779 | return IterationStatus::Continue; |
780 | } |
781 | |
782 | HeapSnapshot& m_snapshot; |
783 | }; |
784 | |
785 | void Heap::removeDeadHeapSnapshotNodes(HeapProfiler& heapProfiler) |
786 | { |
787 | if (HeapSnapshot* snapshot = heapProfiler.mostRecentSnapshot()) { |
788 | HeapIterationScope heapIterationScope(*this); |
789 | RemoveDeadHeapSnapshotNodes functor(*snapshot); |
790 | m_objectSpace.forEachDeadCell(heapIterationScope, functor); |
791 | snapshot->shrinkToFit(); |
792 | } |
793 | } |
794 | |
795 | void Heap::updateObjectCounts() |
796 | { |
797 | if (m_collectionScope && m_collectionScope.value() == CollectionScope::Full) |
798 | m_totalBytesVisited = 0; |
799 | |
800 | m_totalBytesVisitedThisCycle = bytesVisited(); |
801 | |
802 | m_totalBytesVisited += m_totalBytesVisitedThisCycle; |
803 | } |
804 | |
805 | void Heap::endMarking() |
806 | { |
807 | forEachSlotVisitor( |
808 | [&] (SlotVisitor& visitor) { |
809 | visitor.reset(); |
810 | }); |
811 | |
812 | assertMarkStacksEmpty(); |
813 | |
814 | RELEASE_ASSERT(m_raceMarkStack->isEmpty()); |
815 | |
816 | m_objectSpace.endMarking(); |
817 | setMutatorShouldBeFenced(Options::forceFencedBarrier()); |
818 | } |
819 | |
820 | size_t Heap::objectCount() |
821 | { |
822 | return m_objectSpace.objectCount(); |
823 | } |
824 | |
825 | size_t Heap::() |
826 | { |
827 | // FIXME: Change this to use SaturatedArithmetic when available. |
828 | // https://bugs.webkit.org/show_bug.cgi?id=170411 |
829 | Checked<size_t, RecordOverflow> checkedTotal = m_extraMemorySize; |
830 | checkedTotal += m_deprecatedExtraMemorySize; |
831 | checkedTotal += m_arrayBuffers.size(); |
832 | size_t total = UNLIKELY(checkedTotal.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedTotal.unsafeGet(); |
833 | |
834 | ASSERT(m_objectSpace.capacity() >= m_objectSpace.size()); |
835 | return std::min(total, std::numeric_limits<size_t>::max() - m_objectSpace.capacity()); |
836 | } |
837 | |
838 | size_t Heap::size() |
839 | { |
840 | return m_objectSpace.size() + extraMemorySize(); |
841 | } |
842 | |
843 | size_t Heap::capacity() |
844 | { |
845 | return m_objectSpace.capacity() + extraMemorySize(); |
846 | } |
847 | |
848 | size_t Heap::protectedGlobalObjectCount() |
849 | { |
850 | size_t result = 0; |
851 | forEachProtectedCell( |
852 | [&] (JSCell* cell) { |
853 | if (cell->isObject() && asObject(cell)->isGlobalObject()) |
854 | result++; |
855 | }); |
856 | return result; |
857 | } |
858 | |
859 | size_t Heap::globalObjectCount() |
860 | { |
861 | HeapIterationScope iterationScope(*this); |
862 | size_t result = 0; |
863 | m_objectSpace.forEachLiveCell( |
864 | iterationScope, |
865 | [&] (HeapCell* heapCell, HeapCell::Kind kind) -> IterationStatus { |
866 | if (!isJSCellKind(kind)) |
867 | return IterationStatus::Continue; |
868 | JSCell* cell = static_cast<JSCell*>(heapCell); |
869 | if (cell->isObject() && asObject(cell)->isGlobalObject()) |
870 | result++; |
871 | return IterationStatus::Continue; |
872 | }); |
873 | return result; |
874 | } |
875 | |
876 | size_t Heap::protectedObjectCount() |
877 | { |
878 | size_t result = 0; |
879 | forEachProtectedCell( |
880 | [&] (JSCell*) { |
881 | result++; |
882 | }); |
883 | return result; |
884 | } |
885 | |
886 | std::unique_ptr<TypeCountSet> Heap::protectedObjectTypeCounts() |
887 | { |
888 | std::unique_ptr<TypeCountSet> result = std::make_unique<TypeCountSet>(); |
889 | forEachProtectedCell( |
890 | [&] (JSCell* cell) { |
891 | recordType(*vm(), *result, cell); |
892 | }); |
893 | return result; |
894 | } |
895 | |
896 | std::unique_ptr<TypeCountSet> Heap::objectTypeCounts() |
897 | { |
898 | std::unique_ptr<TypeCountSet> result = std::make_unique<TypeCountSet>(); |
899 | HeapIterationScope iterationScope(*this); |
900 | m_objectSpace.forEachLiveCell( |
901 | iterationScope, |
902 | [&] (HeapCell* cell, HeapCell::Kind kind) -> IterationStatus { |
903 | if (isJSCellKind(kind)) |
904 | recordType(*vm(), *result, static_cast<JSCell*>(cell)); |
905 | return IterationStatus::Continue; |
906 | }); |
907 | return result; |
908 | } |
909 | |
910 | void Heap::deleteAllCodeBlocks(DeleteAllCodeEffort effort) |
911 | { |
912 | if (m_collectionScope && effort == DeleteAllCodeIfNotCollecting) |
913 | return; |
914 | |
915 | VM& vm = *m_vm; |
916 | PreventCollectionScope preventCollectionScope(*this); |
917 | |
918 | // If JavaScript is running, it's not safe to delete all JavaScript code, since |
919 | // we'll end up returning to deleted code. |
920 | RELEASE_ASSERT(!vm.entryScope); |
921 | RELEASE_ASSERT(!m_collectionScope); |
922 | |
923 | completeAllJITPlans(); |
924 | |
925 | vm.forEachScriptExecutableSpace( |
926 | [&] (auto& spaceAndSet) { |
927 | HeapIterationScope heapIterationScope(*this); |
928 | auto& set = spaceAndSet.set; |
929 | set.forEachLiveCell( |
930 | [&] (HeapCell* cell, HeapCell::Kind) { |
931 | ScriptExecutable* executable = static_cast<ScriptExecutable*>(cell); |
932 | executable->clearCode(set); |
933 | }); |
934 | }); |
935 | |
936 | #if ENABLE(WEBASSEMBLY) |
937 | { |
938 | // We must ensure that we clear the JS call ICs from Wasm. Otherwise, Wasm will |
939 | // have no idea that we cleared the code from all of the Executables in the |
940 | // VM. This could leave Wasm in an inconsistent state where it has an IC that |
941 | // points into a CodeBlock that could be dead. The IC will still succeed because |
942 | // it uses a callee check, but then it will call into dead code. |
943 | HeapIterationScope heapIterationScope(*this); |
944 | if (vm.m_webAssemblyCodeBlockSpace) { |
945 | vm.m_webAssemblyCodeBlockSpace->forEachLiveCell([&] (HeapCell* cell, HeapCell::Kind kind) { |
946 | ASSERT_UNUSED(kind, kind == HeapCell::JSCell); |
947 | JSWebAssemblyCodeBlock* codeBlock = static_cast<JSWebAssemblyCodeBlock*>(cell); |
948 | codeBlock->clearJSCallICs(vm); |
949 | }); |
950 | } |
951 | } |
952 | #endif |
953 | } |
954 | |
955 | void Heap::deleteAllUnlinkedCodeBlocks(DeleteAllCodeEffort effort) |
956 | { |
957 | if (m_collectionScope && effort == DeleteAllCodeIfNotCollecting) |
958 | return; |
959 | |
960 | VM& vm = *m_vm; |
961 | PreventCollectionScope preventCollectionScope(*this); |
962 | |
963 | RELEASE_ASSERT(!m_collectionScope); |
964 | |
965 | HeapIterationScope heapIterationScope(*this); |
966 | vm.unlinkedFunctionExecutableSpace.set.forEachLiveCell( |
967 | [&] (HeapCell* cell, HeapCell::Kind) { |
968 | UnlinkedFunctionExecutable* executable = static_cast<UnlinkedFunctionExecutable*>(cell); |
969 | executable->clearCode(vm); |
970 | }); |
971 | } |
972 | |
973 | void Heap::deleteUnmarkedCompiledCode() |
974 | { |
975 | vm()->forEachScriptExecutableSpace([] (auto& space) { space.space.sweep(); }); |
976 | vm()->forEachCodeBlockSpace([] (auto& space) { space.space.sweep(); }); // Sweeping must occur before deleting stubs, otherwise the stubs might still think they're alive as they get deleted. |
977 | m_jitStubRoutines->deleteUnmarkedJettisonedStubRoutines(); |
978 | } |
979 | |
980 | void Heap::addToRememberedSet(const JSCell* constCell) |
981 | { |
982 | JSCell* cell = const_cast<JSCell*>(constCell); |
983 | ASSERT(cell); |
984 | ASSERT(!Options::useConcurrentJIT() || !isCompilationThread()); |
985 | m_barriersExecuted++; |
986 | if (m_mutatorShouldBeFenced) { |
987 | WTF::loadLoadFence(); |
988 | if (!isMarked(cell)) { |
989 | // During a full collection a store into an unmarked object that had surivived past |
990 | // collections will manifest as a store to an unmarked PossiblyBlack object. If the |
991 | // object gets marked at some time after this then it will go down the normal marking |
992 | // path. So, we don't have to remember this object. We could return here. But we go |
993 | // further and attempt to re-white the object. |
994 | |
995 | RELEASE_ASSERT(m_collectionScope && m_collectionScope.value() == CollectionScope::Full); |
996 | |
997 | if (cell->atomicCompareExchangeCellStateStrong(CellState::PossiblyBlack, CellState::DefinitelyWhite) == CellState::PossiblyBlack) { |
998 | // Now we protect against this race: |
999 | // |
1000 | // 1) Object starts out black + unmarked. |
1001 | // --> We do isMarked here. |
1002 | // 2) Object is marked and greyed. |
1003 | // 3) Object is scanned and blacked. |
1004 | // --> We do atomicCompareExchangeCellStateStrong here. |
1005 | // |
1006 | // In this case we would have made the object white again, even though it should |
1007 | // be black. This check lets us correct our mistake. This relies on the fact that |
1008 | // isMarked converges monotonically to true. |
1009 | if (isMarked(cell)) { |
1010 | // It's difficult to work out whether the object should be grey or black at |
1011 | // this point. We say black conservatively. |
1012 | cell->setCellState(CellState::PossiblyBlack); |
1013 | } |
1014 | |
1015 | // Either way, we can return. Most likely, the object was not marked, and so the |
1016 | // object is now labeled white. This means that future barrier executions will not |
1017 | // fire. In the unlikely event that the object had become marked, we can still |
1018 | // return anyway, since we proved that the object was not marked at the time that |
1019 | // we executed this slow path. |
1020 | } |
1021 | |
1022 | return; |
1023 | } |
1024 | } else |
1025 | ASSERT(isMarked(cell)); |
1026 | // It could be that the object was *just* marked. This means that the collector may set the |
1027 | // state to DefinitelyGrey and then to PossiblyOldOrBlack at any time. It's OK for us to |
1028 | // race with the collector here. If we win then this is accurate because the object _will_ |
1029 | // get scanned again. If we lose then someone else will barrier the object again. That would |
1030 | // be unfortunate but not the end of the world. |
1031 | cell->setCellState(CellState::PossiblyGrey); |
1032 | m_mutatorMarkStack->append(cell); |
1033 | } |
1034 | |
1035 | void Heap::sweepSynchronously() |
1036 | { |
1037 | MonotonicTime before { }; |
1038 | if (Options::logGC()) { |
1039 | dataLog("Full sweep: " , capacity() / 1024, "kb " ); |
1040 | before = MonotonicTime::now(); |
1041 | } |
1042 | m_objectSpace.sweep(); |
1043 | m_objectSpace.shrink(); |
1044 | if (Options::logGC()) { |
1045 | MonotonicTime after = MonotonicTime::now(); |
1046 | dataLog("=> " , capacity() / 1024, "kb, " , (after - before).milliseconds(), "ms" ); |
1047 | } |
1048 | } |
1049 | |
1050 | void Heap::collect(Synchronousness synchronousness, GCRequest request) |
1051 | { |
1052 | switch (synchronousness) { |
1053 | case Async: |
1054 | collectAsync(request); |
1055 | return; |
1056 | case Sync: |
1057 | collectSync(request); |
1058 | return; |
1059 | } |
1060 | RELEASE_ASSERT_NOT_REACHED(); |
1061 | } |
1062 | |
1063 | void Heap::collectNow(Synchronousness synchronousness, GCRequest request) |
1064 | { |
1065 | if (validateDFGDoesGC) |
1066 | RELEASE_ASSERT(expectDoesGC()); |
1067 | |
1068 | switch (synchronousness) { |
1069 | case Async: { |
1070 | collectAsync(request); |
1071 | stopIfNecessary(); |
1072 | return; |
1073 | } |
1074 | |
1075 | case Sync: { |
1076 | collectSync(request); |
1077 | |
1078 | DeferGCForAWhile deferGC(*this); |
1079 | if (UNLIKELY(Options::useImmortalObjects())) |
1080 | sweeper().stopSweeping(); |
1081 | |
1082 | bool alreadySweptInCollectSync = shouldSweepSynchronously(); |
1083 | if (!alreadySweptInCollectSync) { |
1084 | if (Options::logGC()) |
1085 | dataLog("[GC<" , RawPointer(this), ">: " ); |
1086 | sweepSynchronously(); |
1087 | if (Options::logGC()) |
1088 | dataLog("]\n" ); |
1089 | } |
1090 | m_objectSpace.assertNoUnswept(); |
1091 | |
1092 | sweepAllLogicallyEmptyWeakBlocks(); |
1093 | return; |
1094 | } } |
1095 | RELEASE_ASSERT_NOT_REACHED(); |
1096 | } |
1097 | |
1098 | void Heap::collectAsync(GCRequest request) |
1099 | { |
1100 | if (validateDFGDoesGC) |
1101 | RELEASE_ASSERT(expectDoesGC()); |
1102 | |
1103 | if (!m_isSafeToCollect) |
1104 | return; |
1105 | |
1106 | bool alreadyRequested = false; |
1107 | { |
1108 | LockHolder locker(*m_threadLock); |
1109 | for (const GCRequest& previousRequest : m_requests) { |
1110 | if (request.subsumedBy(previousRequest)) { |
1111 | alreadyRequested = true; |
1112 | break; |
1113 | } |
1114 | } |
1115 | } |
1116 | if (alreadyRequested) |
1117 | return; |
1118 | |
1119 | requestCollection(request); |
1120 | } |
1121 | |
1122 | void Heap::collectSync(GCRequest request) |
1123 | { |
1124 | if (validateDFGDoesGC) |
1125 | RELEASE_ASSERT(expectDoesGC()); |
1126 | |
1127 | if (!m_isSafeToCollect) |
1128 | return; |
1129 | |
1130 | waitForCollection(requestCollection(request)); |
1131 | } |
1132 | |
1133 | bool Heap::shouldCollectInCollectorThread(const AbstractLocker&) |
1134 | { |
1135 | RELEASE_ASSERT(m_requests.isEmpty() == (m_lastServedTicket == m_lastGrantedTicket)); |
1136 | RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket); |
1137 | |
1138 | if (false) |
1139 | dataLog("Mutator has the conn = " , !!(m_worldState.load() & mutatorHasConnBit), "\n" ); |
1140 | |
1141 | return !m_requests.isEmpty() && !(m_worldState.load() & mutatorHasConnBit); |
1142 | } |
1143 | |
1144 | void Heap::collectInCollectorThread() |
1145 | { |
1146 | for (;;) { |
1147 | RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Collector, nullptr); |
1148 | switch (result) { |
1149 | case RunCurrentPhaseResult::Finished: |
1150 | return; |
1151 | case RunCurrentPhaseResult::Continue: |
1152 | break; |
1153 | case RunCurrentPhaseResult::NeedCurrentThreadState: |
1154 | RELEASE_ASSERT_NOT_REACHED(); |
1155 | break; |
1156 | } |
1157 | } |
1158 | } |
1159 | |
1160 | ALWAYS_INLINE int asInt(CollectorPhase phase) |
1161 | { |
1162 | return static_cast<int>(phase); |
1163 | } |
1164 | |
1165 | void Heap::checkConn(GCConductor conn) |
1166 | { |
1167 | unsigned worldState = m_worldState.load(); |
1168 | switch (conn) { |
1169 | case GCConductor::Mutator: |
1170 | RELEASE_ASSERT(worldState & mutatorHasConnBit, worldState, asInt(m_lastPhase), asInt(m_currentPhase), asInt(m_nextPhase), vm()->id(), VM::numberOfIDs(), vm()->isEntered()); |
1171 | return; |
1172 | case GCConductor::Collector: |
1173 | RELEASE_ASSERT(!(worldState & mutatorHasConnBit), worldState, asInt(m_lastPhase), asInt(m_currentPhase), asInt(m_nextPhase), vm()->id(), VM::numberOfIDs(), vm()->isEntered()); |
1174 | return; |
1175 | } |
1176 | RELEASE_ASSERT_NOT_REACHED(); |
1177 | } |
1178 | |
1179 | auto Heap::runCurrentPhase(GCConductor conn, CurrentThreadState* currentThreadState) -> RunCurrentPhaseResult |
1180 | { |
1181 | checkConn(conn); |
1182 | m_currentThreadState = currentThreadState; |
1183 | m_currentThread = &Thread::current(); |
1184 | |
1185 | if (conn == GCConductor::Mutator) |
1186 | sanitizeStackForVM(vm()); |
1187 | |
1188 | // If the collector transfers the conn to the mutator, it leaves us in between phases. |
1189 | if (!finishChangingPhase(conn)) { |
1190 | // A mischevious mutator could repeatedly relinquish the conn back to us. We try to avoid doing |
1191 | // this, but it's probably not the end of the world if it did happen. |
1192 | if (false) |
1193 | dataLog("Conn bounce-back.\n" ); |
1194 | return RunCurrentPhaseResult::Finished; |
1195 | } |
1196 | |
1197 | bool result = false; |
1198 | switch (m_currentPhase) { |
1199 | case CollectorPhase::NotRunning: |
1200 | result = runNotRunningPhase(conn); |
1201 | break; |
1202 | |
1203 | case CollectorPhase::Begin: |
1204 | result = runBeginPhase(conn); |
1205 | break; |
1206 | |
1207 | case CollectorPhase::Fixpoint: |
1208 | if (!currentThreadState && conn == GCConductor::Mutator) |
1209 | return RunCurrentPhaseResult::NeedCurrentThreadState; |
1210 | |
1211 | result = runFixpointPhase(conn); |
1212 | break; |
1213 | |
1214 | case CollectorPhase::Concurrent: |
1215 | result = runConcurrentPhase(conn); |
1216 | break; |
1217 | |
1218 | case CollectorPhase::Reloop: |
1219 | result = runReloopPhase(conn); |
1220 | break; |
1221 | |
1222 | case CollectorPhase::End: |
1223 | result = runEndPhase(conn); |
1224 | break; |
1225 | } |
1226 | |
1227 | return result ? RunCurrentPhaseResult::Continue : RunCurrentPhaseResult::Finished; |
1228 | } |
1229 | |
1230 | NEVER_INLINE bool Heap::runNotRunningPhase(GCConductor conn) |
1231 | { |
1232 | // Check m_requests since the mutator calls this to poll what's going on. |
1233 | { |
1234 | auto locker = holdLock(*m_threadLock); |
1235 | if (m_requests.isEmpty()) |
1236 | return false; |
1237 | } |
1238 | |
1239 | return changePhase(conn, CollectorPhase::Begin); |
1240 | } |
1241 | |
1242 | NEVER_INLINE bool Heap::runBeginPhase(GCConductor conn) |
1243 | { |
1244 | m_currentGCStartTime = MonotonicTime::now(); |
1245 | |
1246 | { |
1247 | LockHolder locker(*m_threadLock); |
1248 | RELEASE_ASSERT(!m_requests.isEmpty()); |
1249 | m_currentRequest = m_requests.first(); |
1250 | } |
1251 | |
1252 | if (Options::logGC()) |
1253 | dataLog("[GC<" , RawPointer(this), ">: START " , gcConductorShortName(conn), " " , capacity() / 1024, "kb " ); |
1254 | |
1255 | m_beforeGC = MonotonicTime::now(); |
1256 | |
1257 | if (m_collectionScope) { |
1258 | dataLog("Collection scope already set during GC: " , *m_collectionScope, "\n" ); |
1259 | RELEASE_ASSERT_NOT_REACHED(); |
1260 | } |
1261 | |
1262 | willStartCollection(); |
1263 | |
1264 | if (UNLIKELY(m_verifier)) { |
1265 | // Verify that live objects from the last GC cycle haven't been corrupted by |
1266 | // mutators before we begin this new GC cycle. |
1267 | m_verifier->verify(HeapVerifier::Phase::BeforeGC); |
1268 | |
1269 | m_verifier->startGC(); |
1270 | m_verifier->gatherLiveCells(HeapVerifier::Phase::BeforeMarking); |
1271 | } |
1272 | |
1273 | prepareForMarking(); |
1274 | |
1275 | if (m_collectionScope && m_collectionScope.value() == CollectionScope::Full) { |
1276 | m_opaqueRoots.clear(); |
1277 | m_collectorSlotVisitor->clearMarkStacks(); |
1278 | m_mutatorMarkStack->clear(); |
1279 | } |
1280 | |
1281 | RELEASE_ASSERT(m_raceMarkStack->isEmpty()); |
1282 | |
1283 | beginMarking(); |
1284 | |
1285 | forEachSlotVisitor( |
1286 | [&] (SlotVisitor& visitor) { |
1287 | visitor.didStartMarking(); |
1288 | }); |
1289 | |
1290 | m_parallelMarkersShouldExit = false; |
1291 | |
1292 | m_helperClient.setFunction( |
1293 | [this] () { |
1294 | SlotVisitor* slotVisitor; |
1295 | { |
1296 | LockHolder locker(m_parallelSlotVisitorLock); |
1297 | RELEASE_ASSERT_WITH_MESSAGE(!m_availableParallelSlotVisitors.isEmpty(), "Parallel SlotVisitors are allocated apriori" ); |
1298 | slotVisitor = m_availableParallelSlotVisitors.takeLast(); |
1299 | } |
1300 | |
1301 | Thread::registerGCThread(GCThreadType::Helper); |
1302 | |
1303 | { |
1304 | ParallelModeEnabler parallelModeEnabler(*slotVisitor); |
1305 | slotVisitor->drainFromShared(SlotVisitor::SlaveDrain); |
1306 | } |
1307 | |
1308 | { |
1309 | LockHolder locker(m_parallelSlotVisitorLock); |
1310 | m_availableParallelSlotVisitors.append(slotVisitor); |
1311 | } |
1312 | }); |
1313 | |
1314 | SlotVisitor& slotVisitor = *m_collectorSlotVisitor; |
1315 | |
1316 | m_constraintSet->didStartMarking(); |
1317 | |
1318 | m_scheduler->beginCollection(); |
1319 | if (Options::logGC()) |
1320 | m_scheduler->log(); |
1321 | |
1322 | // After this, we will almost certainly fall through all of the "slotVisitor.isEmpty()" |
1323 | // checks because bootstrap would have put things into the visitor. So, we should fall |
1324 | // through to draining. |
1325 | |
1326 | if (!slotVisitor.didReachTermination()) { |
1327 | dataLog("Fatal: SlotVisitor should think that GC should terminate before constraint solving, but it does not think this.\n" ); |
1328 | dataLog("slotVisitor.isEmpty(): " , slotVisitor.isEmpty(), "\n" ); |
1329 | dataLog("slotVisitor.collectorMarkStack().isEmpty(): " , slotVisitor.collectorMarkStack().isEmpty(), "\n" ); |
1330 | dataLog("slotVisitor.mutatorMarkStack().isEmpty(): " , slotVisitor.mutatorMarkStack().isEmpty(), "\n" ); |
1331 | dataLog("m_numberOfActiveParallelMarkers: " , m_numberOfActiveParallelMarkers, "\n" ); |
1332 | dataLog("m_sharedCollectorMarkStack->isEmpty(): " , m_sharedCollectorMarkStack->isEmpty(), "\n" ); |
1333 | dataLog("m_sharedMutatorMarkStack->isEmpty(): " , m_sharedMutatorMarkStack->isEmpty(), "\n" ); |
1334 | dataLog("slotVisitor.didReachTermination(): " , slotVisitor.didReachTermination(), "\n" ); |
1335 | RELEASE_ASSERT_NOT_REACHED(); |
1336 | } |
1337 | |
1338 | return changePhase(conn, CollectorPhase::Fixpoint); |
1339 | } |
1340 | |
1341 | NEVER_INLINE bool Heap::runFixpointPhase(GCConductor conn) |
1342 | { |
1343 | RELEASE_ASSERT(conn == GCConductor::Collector || m_currentThreadState); |
1344 | |
1345 | SlotVisitor& slotVisitor = *m_collectorSlotVisitor; |
1346 | |
1347 | if (Options::logGC()) { |
1348 | HashMap<const char*, size_t> visitMap; |
1349 | forEachSlotVisitor( |
1350 | [&] (SlotVisitor& slotVisitor) { |
1351 | visitMap.add(slotVisitor.codeName(), slotVisitor.bytesVisited() / 1024); |
1352 | }); |
1353 | |
1354 | auto perVisitorDump = sortedMapDump( |
1355 | visitMap, |
1356 | [] (const char* a, const char* b) -> bool { |
1357 | return strcmp(a, b) < 0; |
1358 | }, |
1359 | ":" , " " ); |
1360 | |
1361 | dataLog("v=" , bytesVisited() / 1024, "kb (" , perVisitorDump, ") o=" , m_opaqueRoots.size(), " b=" , m_barriersExecuted, " " ); |
1362 | } |
1363 | |
1364 | if (slotVisitor.didReachTermination()) { |
1365 | m_opaqueRoots.deleteOldTables(); |
1366 | |
1367 | m_scheduler->didReachTermination(); |
1368 | |
1369 | assertMarkStacksEmpty(); |
1370 | |
1371 | // FIXME: Take m_mutatorDidRun into account when scheduling constraints. Most likely, |
1372 | // we don't have to execute root constraints again unless the mutator did run. At a |
1373 | // minimum, we could use this for work estimates - but it's probably more than just an |
1374 | // estimate. |
1375 | // https://bugs.webkit.org/show_bug.cgi?id=166828 |
1376 | |
1377 | // Wondering what this does? Look at Heap::addCoreConstraints(). The DOM and others can also |
1378 | // add their own using Heap::addMarkingConstraint(). |
1379 | bool converged = m_constraintSet->executeConvergence(slotVisitor); |
1380 | |
1381 | // FIXME: The slotVisitor.isEmpty() check is most likely not needed. |
1382 | // https://bugs.webkit.org/show_bug.cgi?id=180310 |
1383 | if (converged && slotVisitor.isEmpty()) { |
1384 | assertMarkStacksEmpty(); |
1385 | return changePhase(conn, CollectorPhase::End); |
1386 | } |
1387 | |
1388 | m_scheduler->didExecuteConstraints(); |
1389 | } |
1390 | |
1391 | if (Options::logGC()) |
1392 | dataLog(slotVisitor.collectorMarkStack().size(), "+" , m_mutatorMarkStack->size() + slotVisitor.mutatorMarkStack().size(), " " ); |
1393 | |
1394 | { |
1395 | ParallelModeEnabler enabler(slotVisitor); |
1396 | slotVisitor.drainInParallel(m_scheduler->timeToResume()); |
1397 | } |
1398 | |
1399 | m_scheduler->synchronousDrainingDidStall(); |
1400 | |
1401 | // This is kinda tricky. The termination check looks at: |
1402 | // |
1403 | // - Whether the marking threads are active. If they are not, this means that the marking threads' |
1404 | // SlotVisitors are empty. |
1405 | // - Whether the collector's slot visitor is empty. |
1406 | // - Whether the shared mark stacks are empty. |
1407 | // |
1408 | // This doesn't have to check the mutator SlotVisitor because that one becomes empty after every GC |
1409 | // work increment, so it must be empty now. |
1410 | if (slotVisitor.didReachTermination()) |
1411 | return true; // This is like relooping to the top if runFixpointPhase(). |
1412 | |
1413 | if (!m_scheduler->shouldResume()) |
1414 | return true; |
1415 | |
1416 | m_scheduler->willResume(); |
1417 | |
1418 | if (Options::logGC()) { |
1419 | double thisPauseMS = (MonotonicTime::now() - m_stopTime).milliseconds(); |
1420 | dataLog("p=" , thisPauseMS, "ms (max " , maxPauseMS(thisPauseMS), ")...]\n" ); |
1421 | } |
1422 | |
1423 | // Forgive the mutator for its past failures to keep up. |
1424 | // FIXME: Figure out if moving this to different places results in perf changes. |
1425 | m_incrementBalance = 0; |
1426 | |
1427 | return changePhase(conn, CollectorPhase::Concurrent); |
1428 | } |
1429 | |
1430 | NEVER_INLINE bool Heap::runConcurrentPhase(GCConductor conn) |
1431 | { |
1432 | SlotVisitor& slotVisitor = *m_collectorSlotVisitor; |
1433 | |
1434 | switch (conn) { |
1435 | case GCConductor::Mutator: { |
1436 | // When the mutator has the conn, we poll runConcurrentPhase() on every time someone says |
1437 | // stopIfNecessary(), so on every allocation slow path. When that happens we poll if it's time |
1438 | // to stop and do some work. |
1439 | if (slotVisitor.didReachTermination() |
1440 | || m_scheduler->shouldStop()) |
1441 | return changePhase(conn, CollectorPhase::Reloop); |
1442 | |
1443 | // We could be coming from a collector phase that stuffed our SlotVisitor, so make sure we donate |
1444 | // everything. This is super cheap if the SlotVisitor is already empty. |
1445 | slotVisitor.donateAll(); |
1446 | return false; |
1447 | } |
1448 | case GCConductor::Collector: { |
1449 | { |
1450 | ParallelModeEnabler enabler(slotVisitor); |
1451 | slotVisitor.drainInParallelPassively(m_scheduler->timeToStop()); |
1452 | } |
1453 | return changePhase(conn, CollectorPhase::Reloop); |
1454 | } } |
1455 | |
1456 | RELEASE_ASSERT_NOT_REACHED(); |
1457 | return false; |
1458 | } |
1459 | |
1460 | NEVER_INLINE bool Heap::runReloopPhase(GCConductor conn) |
1461 | { |
1462 | if (Options::logGC()) |
1463 | dataLog("[GC<" , RawPointer(this), ">: " , gcConductorShortName(conn), " " ); |
1464 | |
1465 | m_scheduler->didStop(); |
1466 | |
1467 | if (Options::logGC()) |
1468 | m_scheduler->log(); |
1469 | |
1470 | return changePhase(conn, CollectorPhase::Fixpoint); |
1471 | } |
1472 | |
1473 | NEVER_INLINE bool Heap::runEndPhase(GCConductor conn) |
1474 | { |
1475 | m_scheduler->endCollection(); |
1476 | |
1477 | { |
1478 | auto locker = holdLock(m_markingMutex); |
1479 | m_parallelMarkersShouldExit = true; |
1480 | m_markingConditionVariable.notifyAll(); |
1481 | } |
1482 | m_helperClient.finish(); |
1483 | |
1484 | iterateExecutingAndCompilingCodeBlocks( |
1485 | [&] (CodeBlock* codeBlock) { |
1486 | writeBarrier(codeBlock); |
1487 | }); |
1488 | |
1489 | updateObjectCounts(); |
1490 | endMarking(); |
1491 | |
1492 | if (UNLIKELY(m_verifier)) { |
1493 | m_verifier->gatherLiveCells(HeapVerifier::Phase::AfterMarking); |
1494 | m_verifier->verify(HeapVerifier::Phase::AfterMarking); |
1495 | } |
1496 | |
1497 | if (vm()->typeProfiler()) |
1498 | vm()->typeProfiler()->invalidateTypeSetCache(*vm()); |
1499 | |
1500 | reapWeakHandles(); |
1501 | pruneStaleEntriesFromWeakGCMaps(); |
1502 | sweepArrayBuffers(); |
1503 | snapshotUnswept(); |
1504 | finalizeUnconditionalFinalizers(); |
1505 | removeDeadCompilerWorklistEntries(); |
1506 | notifyIncrementalSweeper(); |
1507 | |
1508 | m_codeBlocks->iterateCurrentlyExecuting( |
1509 | [&] (CodeBlock* codeBlock) { |
1510 | writeBarrier(codeBlock); |
1511 | }); |
1512 | m_codeBlocks->clearCurrentlyExecuting(); |
1513 | |
1514 | m_objectSpace.prepareForAllocation(); |
1515 | updateAllocationLimits(); |
1516 | |
1517 | if (UNLIKELY(m_verifier)) { |
1518 | m_verifier->trimDeadCells(); |
1519 | m_verifier->verify(HeapVerifier::Phase::AfterGC); |
1520 | } |
1521 | |
1522 | didFinishCollection(); |
1523 | |
1524 | if (m_currentRequest.didFinishEndPhase) |
1525 | m_currentRequest.didFinishEndPhase->run(); |
1526 | |
1527 | if (false) { |
1528 | dataLog("Heap state after GC:\n" ); |
1529 | m_objectSpace.dumpBits(); |
1530 | } |
1531 | |
1532 | if (Options::logGC()) { |
1533 | double thisPauseMS = (m_afterGC - m_stopTime).milliseconds(); |
1534 | dataLog("p=" , thisPauseMS, "ms (max " , maxPauseMS(thisPauseMS), "), cycle " , (m_afterGC - m_beforeGC).milliseconds(), "ms END]\n" ); |
1535 | } |
1536 | |
1537 | { |
1538 | auto locker = holdLock(*m_threadLock); |
1539 | m_requests.removeFirst(); |
1540 | m_lastServedTicket++; |
1541 | clearMutatorWaiting(); |
1542 | } |
1543 | ParkingLot::unparkAll(&m_worldState); |
1544 | |
1545 | if (false) |
1546 | dataLog("GC END!\n" ); |
1547 | |
1548 | setNeedFinalize(); |
1549 | |
1550 | m_lastGCStartTime = m_currentGCStartTime; |
1551 | m_lastGCEndTime = MonotonicTime::now(); |
1552 | m_totalGCTime += m_lastGCEndTime - m_lastGCStartTime; |
1553 | |
1554 | return changePhase(conn, CollectorPhase::NotRunning); |
1555 | } |
1556 | |
1557 | bool Heap::changePhase(GCConductor conn, CollectorPhase nextPhase) |
1558 | { |
1559 | checkConn(conn); |
1560 | |
1561 | m_lastPhase = m_currentPhase; |
1562 | m_nextPhase = nextPhase; |
1563 | |
1564 | return finishChangingPhase(conn); |
1565 | } |
1566 | |
1567 | NEVER_INLINE bool Heap::finishChangingPhase(GCConductor conn) |
1568 | { |
1569 | checkConn(conn); |
1570 | |
1571 | if (m_nextPhase == m_currentPhase) |
1572 | return true; |
1573 | |
1574 | if (false) |
1575 | dataLog(conn, ": Going to phase: " , m_nextPhase, " (from " , m_currentPhase, ")\n" ); |
1576 | |
1577 | m_phaseVersion++; |
1578 | |
1579 | bool suspendedBefore = worldShouldBeSuspended(m_currentPhase); |
1580 | bool suspendedAfter = worldShouldBeSuspended(m_nextPhase); |
1581 | |
1582 | if (suspendedBefore != suspendedAfter) { |
1583 | if (suspendedBefore) { |
1584 | RELEASE_ASSERT(!suspendedAfter); |
1585 | |
1586 | resumeThePeriphery(); |
1587 | if (conn == GCConductor::Collector) |
1588 | resumeTheMutator(); |
1589 | else |
1590 | handleNeedFinalize(); |
1591 | } else { |
1592 | RELEASE_ASSERT(!suspendedBefore); |
1593 | RELEASE_ASSERT(suspendedAfter); |
1594 | |
1595 | if (conn == GCConductor::Collector) { |
1596 | waitWhileNeedFinalize(); |
1597 | if (!stopTheMutator()) { |
1598 | if (false) |
1599 | dataLog("Returning false.\n" ); |
1600 | return false; |
1601 | } |
1602 | } else { |
1603 | sanitizeStackForVM(m_vm); |
1604 | handleNeedFinalize(); |
1605 | } |
1606 | stopThePeriphery(conn); |
1607 | } |
1608 | } |
1609 | |
1610 | m_currentPhase = m_nextPhase; |
1611 | return true; |
1612 | } |
1613 | |
1614 | void Heap::stopThePeriphery(GCConductor conn) |
1615 | { |
1616 | if (m_worldIsStopped) { |
1617 | dataLog("FATAL: world already stopped.\n" ); |
1618 | RELEASE_ASSERT_NOT_REACHED(); |
1619 | } |
1620 | |
1621 | if (m_mutatorDidRun) |
1622 | m_mutatorExecutionVersion++; |
1623 | |
1624 | m_mutatorDidRun = false; |
1625 | |
1626 | suspendCompilerThreads(); |
1627 | m_worldIsStopped = true; |
1628 | |
1629 | forEachSlotVisitor( |
1630 | [&] (SlotVisitor& slotVisitor) { |
1631 | slotVisitor.updateMutatorIsStopped(NoLockingNecessary); |
1632 | }); |
1633 | |
1634 | #if ENABLE(JIT) |
1635 | if (VM::canUseJIT()) { |
1636 | DeferGCForAWhile awhile(*this); |
1637 | if (JITWorklist::ensureGlobalWorklist().completeAllForVM(*m_vm) |
1638 | && conn == GCConductor::Collector) |
1639 | setGCDidJIT(); |
1640 | } |
1641 | #endif // ENABLE(JIT) |
1642 | UNUSED_PARAM(conn); |
1643 | |
1644 | if (auto* shadowChicken = vm()->shadowChicken()) |
1645 | shadowChicken->update(*vm(), vm()->topCallFrame); |
1646 | |
1647 | m_structureIDTable.flushOldTables(); |
1648 | m_objectSpace.stopAllocating(); |
1649 | |
1650 | m_stopTime = MonotonicTime::now(); |
1651 | } |
1652 | |
1653 | NEVER_INLINE void Heap::resumeThePeriphery() |
1654 | { |
1655 | // Calling resumeAllocating does the Right Thing depending on whether this is the end of a |
1656 | // collection cycle or this is just a concurrent phase within a collection cycle: |
1657 | // - At end of collection cycle: it's a no-op because prepareForAllocation already cleared the |
1658 | // last active block. |
1659 | // - During collection cycle: it reinstates the last active block. |
1660 | m_objectSpace.resumeAllocating(); |
1661 | |
1662 | m_barriersExecuted = 0; |
1663 | |
1664 | if (!m_worldIsStopped) { |
1665 | dataLog("Fatal: collector does not believe that the world is stopped.\n" ); |
1666 | RELEASE_ASSERT_NOT_REACHED(); |
1667 | } |
1668 | m_worldIsStopped = false; |
1669 | |
1670 | // FIXME: This could be vastly improved: we want to grab the locks in the order in which they |
1671 | // become available. We basically want a lockAny() method that will lock whatever lock is available |
1672 | // and tell you which one it locked. That would require teaching ParkingLot how to park on multiple |
1673 | // queues at once, which is totally achievable - it would just require memory allocation, which is |
1674 | // suboptimal but not a disaster. Alternatively, we could replace the SlotVisitor rightToRun lock |
1675 | // with a DLG-style handshake mechanism, but that seems not as general. |
1676 | Vector<SlotVisitor*, 8> slotVisitorsToUpdate; |
1677 | |
1678 | forEachSlotVisitor( |
1679 | [&] (SlotVisitor& slotVisitor) { |
1680 | slotVisitorsToUpdate.append(&slotVisitor); |
1681 | }); |
1682 | |
1683 | for (unsigned countdown = 40; !slotVisitorsToUpdate.isEmpty() && countdown--;) { |
1684 | for (unsigned index = 0; index < slotVisitorsToUpdate.size(); ++index) { |
1685 | SlotVisitor& slotVisitor = *slotVisitorsToUpdate[index]; |
1686 | bool remove = false; |
1687 | if (slotVisitor.hasAcknowledgedThatTheMutatorIsResumed()) |
1688 | remove = true; |
1689 | else if (auto locker = tryHoldLock(slotVisitor.rightToRun())) { |
1690 | slotVisitor.updateMutatorIsStopped(locker); |
1691 | remove = true; |
1692 | } |
1693 | if (remove) { |
1694 | slotVisitorsToUpdate[index--] = slotVisitorsToUpdate.last(); |
1695 | slotVisitorsToUpdate.takeLast(); |
1696 | } |
1697 | } |
1698 | Thread::yield(); |
1699 | } |
1700 | |
1701 | for (SlotVisitor* slotVisitor : slotVisitorsToUpdate) |
1702 | slotVisitor->updateMutatorIsStopped(); |
1703 | |
1704 | resumeCompilerThreads(); |
1705 | } |
1706 | |
1707 | bool Heap::stopTheMutator() |
1708 | { |
1709 | for (;;) { |
1710 | unsigned oldState = m_worldState.load(); |
1711 | if (oldState & stoppedBit) { |
1712 | RELEASE_ASSERT(!(oldState & hasAccessBit)); |
1713 | RELEASE_ASSERT(!(oldState & mutatorWaitingBit)); |
1714 | RELEASE_ASSERT(!(oldState & mutatorHasConnBit)); |
1715 | return true; |
1716 | } |
1717 | |
1718 | if (oldState & mutatorHasConnBit) { |
1719 | RELEASE_ASSERT(!(oldState & hasAccessBit)); |
1720 | RELEASE_ASSERT(!(oldState & stoppedBit)); |
1721 | return false; |
1722 | } |
1723 | |
1724 | if (!(oldState & hasAccessBit)) { |
1725 | RELEASE_ASSERT(!(oldState & mutatorHasConnBit)); |
1726 | RELEASE_ASSERT(!(oldState & mutatorWaitingBit)); |
1727 | // We can stop the world instantly. |
1728 | if (m_worldState.compareExchangeWeak(oldState, oldState | stoppedBit)) |
1729 | return true; |
1730 | continue; |
1731 | } |
1732 | |
1733 | // Transfer the conn to the mutator and bail. |
1734 | RELEASE_ASSERT(oldState & hasAccessBit); |
1735 | RELEASE_ASSERT(!(oldState & stoppedBit)); |
1736 | unsigned newState = (oldState | mutatorHasConnBit) & ~mutatorWaitingBit; |
1737 | if (m_worldState.compareExchangeWeak(oldState, newState)) { |
1738 | if (false) |
1739 | dataLog("Handed off the conn.\n" ); |
1740 | m_stopIfNecessaryTimer->scheduleSoon(); |
1741 | ParkingLot::unparkAll(&m_worldState); |
1742 | return false; |
1743 | } |
1744 | } |
1745 | } |
1746 | |
1747 | NEVER_INLINE void Heap::resumeTheMutator() |
1748 | { |
1749 | if (false) |
1750 | dataLog("Resuming the mutator.\n" ); |
1751 | for (;;) { |
1752 | unsigned oldState = m_worldState.load(); |
1753 | if (!!(oldState & hasAccessBit) != !(oldState & stoppedBit)) { |
1754 | dataLog("Fatal: hasAccess = " , !!(oldState & hasAccessBit), ", stopped = " , !!(oldState & stoppedBit), "\n" ); |
1755 | RELEASE_ASSERT_NOT_REACHED(); |
1756 | } |
1757 | if (oldState & mutatorHasConnBit) { |
1758 | dataLog("Fatal: mutator has the conn.\n" ); |
1759 | RELEASE_ASSERT_NOT_REACHED(); |
1760 | } |
1761 | |
1762 | if (!(oldState & stoppedBit)) { |
1763 | if (false) |
1764 | dataLog("Returning because not stopped.\n" ); |
1765 | return; |
1766 | } |
1767 | |
1768 | if (m_worldState.compareExchangeWeak(oldState, oldState & ~stoppedBit)) { |
1769 | if (false) |
1770 | dataLog("CASing and returning.\n" ); |
1771 | ParkingLot::unparkAll(&m_worldState); |
1772 | return; |
1773 | } |
1774 | } |
1775 | } |
1776 | |
1777 | void Heap::stopIfNecessarySlow() |
1778 | { |
1779 | if (validateDFGDoesGC) |
1780 | RELEASE_ASSERT(expectDoesGC()); |
1781 | |
1782 | while (stopIfNecessarySlow(m_worldState.load())) { } |
1783 | |
1784 | RELEASE_ASSERT(m_worldState.load() & hasAccessBit); |
1785 | RELEASE_ASSERT(!(m_worldState.load() & stoppedBit)); |
1786 | |
1787 | handleGCDidJIT(); |
1788 | handleNeedFinalize(); |
1789 | m_mutatorDidRun = true; |
1790 | } |
1791 | |
1792 | bool Heap::stopIfNecessarySlow(unsigned oldState) |
1793 | { |
1794 | if (validateDFGDoesGC) |
1795 | RELEASE_ASSERT(expectDoesGC()); |
1796 | |
1797 | RELEASE_ASSERT(oldState & hasAccessBit); |
1798 | RELEASE_ASSERT(!(oldState & stoppedBit)); |
1799 | |
1800 | // It's possible for us to wake up with finalization already requested but the world not yet |
1801 | // resumed. If that happens, we can't run finalization yet. |
1802 | if (handleNeedFinalize(oldState)) |
1803 | return true; |
1804 | |
1805 | // FIXME: When entering the concurrent phase, we could arrange for this branch not to fire, and then |
1806 | // have the SlotVisitor do things to the m_worldState to make this branch fire again. That would |
1807 | // prevent us from polling this so much. Ideally, stopIfNecessary would ignore the mutatorHasConnBit |
1808 | // and there would be some other bit indicating whether we were in some GC phase other than the |
1809 | // NotRunning or Concurrent ones. |
1810 | if (oldState & mutatorHasConnBit) |
1811 | collectInMutatorThread(); |
1812 | |
1813 | return false; |
1814 | } |
1815 | |
1816 | NEVER_INLINE void Heap::collectInMutatorThread() |
1817 | { |
1818 | CollectingScope collectingScope(*this); |
1819 | for (;;) { |
1820 | RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Mutator, nullptr); |
1821 | switch (result) { |
1822 | case RunCurrentPhaseResult::Finished: |
1823 | return; |
1824 | case RunCurrentPhaseResult::Continue: |
1825 | break; |
1826 | case RunCurrentPhaseResult::NeedCurrentThreadState: |
1827 | sanitizeStackForVM(m_vm); |
1828 | auto lambda = [&] (CurrentThreadState& state) { |
1829 | for (;;) { |
1830 | RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Mutator, &state); |
1831 | switch (result) { |
1832 | case RunCurrentPhaseResult::Finished: |
1833 | return; |
1834 | case RunCurrentPhaseResult::Continue: |
1835 | break; |
1836 | case RunCurrentPhaseResult::NeedCurrentThreadState: |
1837 | RELEASE_ASSERT_NOT_REACHED(); |
1838 | break; |
1839 | } |
1840 | } |
1841 | }; |
1842 | callWithCurrentThreadState(scopedLambda<void(CurrentThreadState&)>(WTFMove(lambda))); |
1843 | return; |
1844 | } |
1845 | } |
1846 | } |
1847 | |
1848 | template<typename Func> |
1849 | void Heap::waitForCollector(const Func& func) |
1850 | { |
1851 | for (;;) { |
1852 | bool done; |
1853 | { |
1854 | LockHolder locker(*m_threadLock); |
1855 | done = func(locker); |
1856 | if (!done) { |
1857 | setMutatorWaiting(); |
1858 | |
1859 | // At this point, the collector knows that we intend to wait, and he will clear the |
1860 | // waiting bit and then unparkAll when the GC cycle finishes. Clearing the bit |
1861 | // prevents us from parking except if there is also stop-the-world. Unparking after |
1862 | // clearing means that if the clearing happens after we park, then we will unpark. |
1863 | } |
1864 | } |
1865 | |
1866 | // If we're in a stop-the-world scenario, we need to wait for that even if done is true. |
1867 | unsigned oldState = m_worldState.load(); |
1868 | if (stopIfNecessarySlow(oldState)) |
1869 | continue; |
1870 | |
1871 | // FIXME: We wouldn't need this if stopIfNecessarySlow() had a mode where it knew to just |
1872 | // do the collection. |
1873 | relinquishConn(); |
1874 | |
1875 | if (done) { |
1876 | clearMutatorWaiting(); // Clean up just in case. |
1877 | return; |
1878 | } |
1879 | |
1880 | // If mutatorWaitingBit is still set then we want to wait. |
1881 | ParkingLot::compareAndPark(&m_worldState, oldState | mutatorWaitingBit); |
1882 | } |
1883 | } |
1884 | |
1885 | void Heap::acquireAccessSlow() |
1886 | { |
1887 | for (;;) { |
1888 | unsigned oldState = m_worldState.load(); |
1889 | RELEASE_ASSERT(!(oldState & hasAccessBit)); |
1890 | |
1891 | if (oldState & stoppedBit) { |
1892 | if (verboseStop) { |
1893 | dataLog("Stopping in acquireAccess!\n" ); |
1894 | WTFReportBacktrace(); |
1895 | } |
1896 | // Wait until we're not stopped anymore. |
1897 | ParkingLot::compareAndPark(&m_worldState, oldState); |
1898 | continue; |
1899 | } |
1900 | |
1901 | RELEASE_ASSERT(!(oldState & stoppedBit)); |
1902 | unsigned newState = oldState | hasAccessBit; |
1903 | if (m_worldState.compareExchangeWeak(oldState, newState)) { |
1904 | handleGCDidJIT(); |
1905 | handleNeedFinalize(); |
1906 | m_mutatorDidRun = true; |
1907 | stopIfNecessary(); |
1908 | return; |
1909 | } |
1910 | } |
1911 | } |
1912 | |
1913 | void Heap::releaseAccessSlow() |
1914 | { |
1915 | for (;;) { |
1916 | unsigned oldState = m_worldState.load(); |
1917 | if (!(oldState & hasAccessBit)) { |
1918 | dataLog("FATAL: Attempting to release access but the mutator does not have access.\n" ); |
1919 | RELEASE_ASSERT_NOT_REACHED(); |
1920 | } |
1921 | if (oldState & stoppedBit) { |
1922 | dataLog("FATAL: Attempting to release access but the mutator is stopped.\n" ); |
1923 | RELEASE_ASSERT_NOT_REACHED(); |
1924 | } |
1925 | |
1926 | if (handleNeedFinalize(oldState)) |
1927 | continue; |
1928 | |
1929 | unsigned newState = oldState & ~(hasAccessBit | mutatorHasConnBit); |
1930 | |
1931 | if ((oldState & mutatorHasConnBit) |
1932 | && m_nextPhase != m_currentPhase) { |
1933 | // This means that the collector thread had given us the conn so that we would do something |
1934 | // for it. Stop ourselves as we release access. This ensures that acquireAccess blocks. In |
1935 | // the meantime, since we're handing the conn over, the collector will be awoken and it is |
1936 | // sure to have work to do. |
1937 | newState |= stoppedBit; |
1938 | } |
1939 | |
1940 | if (m_worldState.compareExchangeWeak(oldState, newState)) { |
1941 | if (oldState & mutatorHasConnBit) |
1942 | finishRelinquishingConn(); |
1943 | return; |
1944 | } |
1945 | } |
1946 | } |
1947 | |
1948 | bool Heap::relinquishConn(unsigned oldState) |
1949 | { |
1950 | RELEASE_ASSERT(oldState & hasAccessBit); |
1951 | RELEASE_ASSERT(!(oldState & stoppedBit)); |
1952 | |
1953 | if (!(oldState & mutatorHasConnBit)) |
1954 | return false; // Done. |
1955 | |
1956 | if (m_threadShouldStop) |
1957 | return false; |
1958 | |
1959 | if (!m_worldState.compareExchangeWeak(oldState, oldState & ~mutatorHasConnBit)) |
1960 | return true; // Loop around. |
1961 | |
1962 | finishRelinquishingConn(); |
1963 | return true; |
1964 | } |
1965 | |
1966 | void Heap::finishRelinquishingConn() |
1967 | { |
1968 | if (false) |
1969 | dataLog("Relinquished the conn.\n" ); |
1970 | |
1971 | sanitizeStackForVM(m_vm); |
1972 | |
1973 | auto locker = holdLock(*m_threadLock); |
1974 | if (!m_requests.isEmpty()) |
1975 | m_threadCondition->notifyOne(locker); |
1976 | ParkingLot::unparkAll(&m_worldState); |
1977 | } |
1978 | |
1979 | void Heap::relinquishConn() |
1980 | { |
1981 | while (relinquishConn(m_worldState.load())) { } |
1982 | } |
1983 | |
1984 | bool Heap::handleGCDidJIT(unsigned oldState) |
1985 | { |
1986 | RELEASE_ASSERT(oldState & hasAccessBit); |
1987 | if (!(oldState & gcDidJITBit)) |
1988 | return false; |
1989 | if (m_worldState.compareExchangeWeak(oldState, oldState & ~gcDidJITBit)) { |
1990 | WTF::crossModifyingCodeFence(); |
1991 | return true; |
1992 | } |
1993 | return true; |
1994 | } |
1995 | |
1996 | NEVER_INLINE bool Heap::handleNeedFinalize(unsigned oldState) |
1997 | { |
1998 | RELEASE_ASSERT(oldState & hasAccessBit); |
1999 | RELEASE_ASSERT(!(oldState & stoppedBit)); |
2000 | |
2001 | if (!(oldState & needFinalizeBit)) |
2002 | return false; |
2003 | if (m_worldState.compareExchangeWeak(oldState, oldState & ~needFinalizeBit)) { |
2004 | finalize(); |
2005 | // Wake up anyone waiting for us to finalize. Note that they may have woken up already, in |
2006 | // which case they would be waiting for us to release heap access. |
2007 | ParkingLot::unparkAll(&m_worldState); |
2008 | return true; |
2009 | } |
2010 | return true; |
2011 | } |
2012 | |
2013 | void Heap::handleGCDidJIT() |
2014 | { |
2015 | while (handleGCDidJIT(m_worldState.load())) { } |
2016 | } |
2017 | |
2018 | void Heap::handleNeedFinalize() |
2019 | { |
2020 | while (handleNeedFinalize(m_worldState.load())) { } |
2021 | } |
2022 | |
2023 | void Heap::setGCDidJIT() |
2024 | { |
2025 | m_worldState.transaction( |
2026 | [&] (unsigned& state) -> bool { |
2027 | RELEASE_ASSERT(state & stoppedBit); |
2028 | state |= gcDidJITBit; |
2029 | return true; |
2030 | }); |
2031 | } |
2032 | |
2033 | void Heap::setNeedFinalize() |
2034 | { |
2035 | m_worldState.exchangeOr(needFinalizeBit); |
2036 | ParkingLot::unparkAll(&m_worldState); |
2037 | m_stopIfNecessaryTimer->scheduleSoon(); |
2038 | } |
2039 | |
2040 | void Heap::waitWhileNeedFinalize() |
2041 | { |
2042 | for (;;) { |
2043 | unsigned oldState = m_worldState.load(); |
2044 | if (!(oldState & needFinalizeBit)) { |
2045 | // This means that either there was no finalize request or the main thread will finalize |
2046 | // with heap access, so a subsequent call to stopTheWorld() will return only when |
2047 | // finalize finishes. |
2048 | return; |
2049 | } |
2050 | ParkingLot::compareAndPark(&m_worldState, oldState); |
2051 | } |
2052 | } |
2053 | |
2054 | void Heap::setMutatorWaiting() |
2055 | { |
2056 | m_worldState.exchangeOr(mutatorWaitingBit); |
2057 | } |
2058 | |
2059 | void Heap::clearMutatorWaiting() |
2060 | { |
2061 | m_worldState.exchangeAnd(~mutatorWaitingBit); |
2062 | } |
2063 | |
2064 | void Heap::notifyThreadStopping(const AbstractLocker&) |
2065 | { |
2066 | m_threadIsStopping = true; |
2067 | clearMutatorWaiting(); |
2068 | ParkingLot::unparkAll(&m_worldState); |
2069 | } |
2070 | |
2071 | void Heap::finalize() |
2072 | { |
2073 | MonotonicTime before; |
2074 | if (Options::logGC()) { |
2075 | before = MonotonicTime::now(); |
2076 | dataLog("[GC<" , RawPointer(this), ">: finalize " ); |
2077 | } |
2078 | |
2079 | { |
2080 | SweepingScope sweepingScope(*this); |
2081 | deleteUnmarkedCompiledCode(); |
2082 | deleteSourceProviderCaches(); |
2083 | sweepInFinalize(); |
2084 | } |
2085 | |
2086 | if (HasOwnPropertyCache* cache = vm()->hasOwnPropertyCache()) |
2087 | cache->clear(); |
2088 | |
2089 | immutableButterflyToStringCache.clear(); |
2090 | |
2091 | for (const HeapFinalizerCallback& callback : m_heapFinalizerCallbacks) |
2092 | callback.run(*vm()); |
2093 | |
2094 | if (shouldSweepSynchronously()) |
2095 | sweepSynchronously(); |
2096 | |
2097 | if (Options::logGC()) { |
2098 | MonotonicTime after = MonotonicTime::now(); |
2099 | dataLog((after - before).milliseconds(), "ms]\n" ); |
2100 | } |
2101 | } |
2102 | |
2103 | Heap::Ticket Heap::requestCollection(GCRequest request) |
2104 | { |
2105 | stopIfNecessary(); |
2106 | |
2107 | ASSERT(vm()->currentThreadIsHoldingAPILock()); |
2108 | RELEASE_ASSERT(vm()->atomStringTable() == Thread::current().atomStringTable()); |
2109 | |
2110 | LockHolder locker(*m_threadLock); |
2111 | // We may be able to steal the conn. That only works if the collector is definitely not running |
2112 | // right now. This is an optimization that prevents the collector thread from ever starting in most |
2113 | // cases. |
2114 | ASSERT(m_lastServedTicket <= m_lastGrantedTicket); |
2115 | if ((m_lastServedTicket == m_lastGrantedTicket) && (m_currentPhase == CollectorPhase::NotRunning)) { |
2116 | if (false) |
2117 | dataLog("Taking the conn.\n" ); |
2118 | m_worldState.exchangeOr(mutatorHasConnBit); |
2119 | } |
2120 | |
2121 | m_requests.append(request); |
2122 | m_lastGrantedTicket++; |
2123 | if (!(m_worldState.load() & mutatorHasConnBit)) |
2124 | m_threadCondition->notifyOne(locker); |
2125 | return m_lastGrantedTicket; |
2126 | } |
2127 | |
2128 | void Heap::waitForCollection(Ticket ticket) |
2129 | { |
2130 | waitForCollector( |
2131 | [&] (const AbstractLocker&) -> bool { |
2132 | return m_lastServedTicket >= ticket; |
2133 | }); |
2134 | } |
2135 | |
2136 | void Heap::sweepInFinalize() |
2137 | { |
2138 | m_objectSpace.sweepLargeAllocations(); |
2139 | vm()->eagerlySweptDestructibleObjectSpace.sweep(); |
2140 | } |
2141 | |
2142 | void Heap::suspendCompilerThreads() |
2143 | { |
2144 | #if ENABLE(DFG_JIT) |
2145 | // We ensure the worklists so that it's not possible for the mutator to start a new worklist |
2146 | // after we have suspended the ones that he had started before. That's not very expensive since |
2147 | // the worklists use AutomaticThreads anyway. |
2148 | if (!VM::canUseJIT()) |
2149 | return; |
2150 | for (unsigned i = DFG::numberOfWorklists(); i--;) |
2151 | DFG::ensureWorklistForIndex(i).suspendAllThreads(); |
2152 | #endif |
2153 | } |
2154 | |
2155 | void Heap::willStartCollection() |
2156 | { |
2157 | if (Options::logGC()) |
2158 | dataLog("=> " ); |
2159 | |
2160 | if (shouldDoFullCollection()) { |
2161 | m_collectionScope = CollectionScope::Full; |
2162 | m_shouldDoFullCollection = false; |
2163 | if (Options::logGC()) |
2164 | dataLog("FullCollection, " ); |
2165 | if (false) |
2166 | dataLog("Full collection!\n" ); |
2167 | } else { |
2168 | m_collectionScope = CollectionScope::Eden; |
2169 | if (Options::logGC()) |
2170 | dataLog("EdenCollection, " ); |
2171 | if (false) |
2172 | dataLog("Eden collection!\n" ); |
2173 | } |
2174 | if (m_collectionScope && m_collectionScope.value() == CollectionScope::Full) { |
2175 | m_sizeBeforeLastFullCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle; |
2176 | m_extraMemorySize = 0; |
2177 | m_deprecatedExtraMemorySize = 0; |
2178 | #if ENABLE(RESOURCE_USAGE) |
2179 | m_externalMemorySize = 0; |
2180 | #endif |
2181 | |
2182 | if (m_fullActivityCallback) |
2183 | m_fullActivityCallback->willCollect(); |
2184 | } else { |
2185 | ASSERT(m_collectionScope && m_collectionScope.value() == CollectionScope::Eden); |
2186 | m_sizeBeforeLastEdenCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle; |
2187 | } |
2188 | |
2189 | if (m_edenActivityCallback) |
2190 | m_edenActivityCallback->willCollect(); |
2191 | |
2192 | for (auto* observer : m_observers) |
2193 | observer->willGarbageCollect(); |
2194 | } |
2195 | |
2196 | void Heap::prepareForMarking() |
2197 | { |
2198 | m_objectSpace.prepareForMarking(); |
2199 | } |
2200 | |
2201 | void Heap::reapWeakHandles() |
2202 | { |
2203 | m_objectSpace.reapWeakSets(); |
2204 | } |
2205 | |
2206 | void Heap::pruneStaleEntriesFromWeakGCMaps() |
2207 | { |
2208 | if (!m_collectionScope || m_collectionScope.value() != CollectionScope::Full) |
2209 | return; |
2210 | for (WeakGCMapBase* weakGCMap : m_weakGCMaps) |
2211 | weakGCMap->pruneStaleEntries(); |
2212 | } |
2213 | |
2214 | void Heap::sweepArrayBuffers() |
2215 | { |
2216 | m_arrayBuffers.sweep(*vm()); |
2217 | } |
2218 | |
2219 | void Heap::snapshotUnswept() |
2220 | { |
2221 | TimingScope timingScope(*this, "Heap::snapshotUnswept" ); |
2222 | m_objectSpace.snapshotUnswept(); |
2223 | } |
2224 | |
2225 | void Heap::deleteSourceProviderCaches() |
2226 | { |
2227 | if (m_lastCollectionScope && m_lastCollectionScope.value() == CollectionScope::Full) |
2228 | m_vm->clearSourceProviderCaches(); |
2229 | } |
2230 | |
2231 | void Heap::notifyIncrementalSweeper() |
2232 | { |
2233 | if (m_collectionScope && m_collectionScope.value() == CollectionScope::Full) { |
2234 | if (!m_logicallyEmptyWeakBlocks.isEmpty()) |
2235 | m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0; |
2236 | } |
2237 | |
2238 | m_sweeper->startSweeping(*this); |
2239 | } |
2240 | |
2241 | void Heap::updateAllocationLimits() |
2242 | { |
2243 | static const bool verbose = false; |
2244 | |
2245 | if (verbose) { |
2246 | dataLog("\n" ); |
2247 | dataLog("bytesAllocatedThisCycle = " , m_bytesAllocatedThisCycle, "\n" ); |
2248 | } |
2249 | |
2250 | // Calculate our current heap size threshold for the purpose of figuring out when we should |
2251 | // run another collection. This isn't the same as either size() or capacity(), though it should |
2252 | // be somewhere between the two. The key is to match the size calculations involved calls to |
2253 | // didAllocate(), while never dangerously underestimating capacity(). In extreme cases of |
2254 | // fragmentation, we may have size() much smaller than capacity(). |
2255 | size_t currentHeapSize = 0; |
2256 | |
2257 | // For marked space, we use the total number of bytes visited. This matches the logic for |
2258 | // BlockDirectory's calls to didAllocate(), which effectively accounts for the total size of |
2259 | // objects allocated rather than blocks used. This will underestimate capacity(), and in case |
2260 | // of fragmentation, this may be substantial. Fortunately, marked space rarely fragments because |
2261 | // cells usually have a narrow range of sizes. So, the underestimation is probably OK. |
2262 | currentHeapSize += m_totalBytesVisited; |
2263 | if (verbose) |
2264 | dataLog("totalBytesVisited = " , m_totalBytesVisited, ", currentHeapSize = " , currentHeapSize, "\n" ); |
2265 | |
2266 | // It's up to the user to ensure that extraMemorySize() ends up corresponding to allocation-time |
2267 | // extra memory reporting. |
2268 | currentHeapSize += extraMemorySize(); |
2269 | if (!ASSERT_DISABLED) { |
2270 | Checked<size_t, RecordOverflow> checkedCurrentHeapSize = m_totalBytesVisited; |
2271 | checkedCurrentHeapSize += extraMemorySize(); |
2272 | ASSERT(!checkedCurrentHeapSize.hasOverflowed() && checkedCurrentHeapSize.unsafeGet() == currentHeapSize); |
2273 | } |
2274 | |
2275 | if (verbose) |
2276 | dataLog("extraMemorySize() = " , extraMemorySize(), ", currentHeapSize = " , currentHeapSize, "\n" ); |
2277 | |
2278 | if (m_collectionScope && m_collectionScope.value() == CollectionScope::Full) { |
2279 | // To avoid pathological GC churn in very small and very large heaps, we set |
2280 | // the new allocation limit based on the current size of the heap, with a |
2281 | // fixed minimum. |
2282 | m_maxHeapSize = std::max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize)); |
2283 | if (verbose) |
2284 | dataLog("Full: maxHeapSize = " , m_maxHeapSize, "\n" ); |
2285 | m_maxEdenSize = m_maxHeapSize - currentHeapSize; |
2286 | if (verbose) |
2287 | dataLog("Full: maxEdenSize = " , m_maxEdenSize, "\n" ); |
2288 | m_sizeAfterLastFullCollect = currentHeapSize; |
2289 | if (verbose) |
2290 | dataLog("Full: sizeAfterLastFullCollect = " , currentHeapSize, "\n" ); |
2291 | m_bytesAbandonedSinceLastFullCollect = 0; |
2292 | if (verbose) |
2293 | dataLog("Full: bytesAbandonedSinceLastFullCollect = " , 0, "\n" ); |
2294 | } else { |
2295 | ASSERT(currentHeapSize >= m_sizeAfterLastCollect); |
2296 | // Theoretically, we shouldn't ever scan more memory than the heap size we planned to have. |
2297 | // But we are sloppy, so we have to defend against the overflow. |
2298 | m_maxEdenSize = currentHeapSize > m_maxHeapSize ? 0 : m_maxHeapSize - currentHeapSize; |
2299 | if (verbose) |
2300 | dataLog("Eden: maxEdenSize = " , m_maxEdenSize, "\n" ); |
2301 | m_sizeAfterLastEdenCollect = currentHeapSize; |
2302 | if (verbose) |
2303 | dataLog("Eden: sizeAfterLastEdenCollect = " , currentHeapSize, "\n" ); |
2304 | double edenToOldGenerationRatio = (double)m_maxEdenSize / (double)m_maxHeapSize; |
2305 | double minEdenToOldGenerationRatio = 1.0 / 3.0; |
2306 | if (edenToOldGenerationRatio < minEdenToOldGenerationRatio) |
2307 | m_shouldDoFullCollection = true; |
2308 | // This seems suspect at first, but what it does is ensure that the nursery size is fixed. |
2309 | m_maxHeapSize += currentHeapSize - m_sizeAfterLastCollect; |
2310 | if (verbose) |
2311 | dataLog("Eden: maxHeapSize = " , m_maxHeapSize, "\n" ); |
2312 | m_maxEdenSize = m_maxHeapSize - currentHeapSize; |
2313 | if (verbose) |
2314 | dataLog("Eden: maxEdenSize = " , m_maxEdenSize, "\n" ); |
2315 | if (m_fullActivityCallback) { |
2316 | ASSERT(currentHeapSize >= m_sizeAfterLastFullCollect); |
2317 | m_fullActivityCallback->didAllocate(*this, currentHeapSize - m_sizeAfterLastFullCollect); |
2318 | } |
2319 | } |
2320 | |
2321 | #if PLATFORM(IOS_FAMILY) |
2322 | // Get critical memory threshold for next cycle. |
2323 | overCriticalMemoryThreshold(MemoryThresholdCallType::Direct); |
2324 | #endif |
2325 | |
2326 | m_sizeAfterLastCollect = currentHeapSize; |
2327 | if (verbose) |
2328 | dataLog("sizeAfterLastCollect = " , m_sizeAfterLastCollect, "\n" ); |
2329 | m_bytesAllocatedThisCycle = 0; |
2330 | |
2331 | if (Options::logGC()) |
2332 | dataLog("=> " , currentHeapSize / 1024, "kb, " ); |
2333 | } |
2334 | |
2335 | void Heap::didFinishCollection() |
2336 | { |
2337 | m_afterGC = MonotonicTime::now(); |
2338 | CollectionScope scope = *m_collectionScope; |
2339 | if (scope == CollectionScope::Full) |
2340 | m_lastFullGCLength = m_afterGC - m_beforeGC; |
2341 | else |
2342 | m_lastEdenGCLength = m_afterGC - m_beforeGC; |
2343 | |
2344 | #if ENABLE(RESOURCE_USAGE) |
2345 | ASSERT(externalMemorySize() <= extraMemorySize()); |
2346 | #endif |
2347 | |
2348 | if (HeapProfiler* heapProfiler = m_vm->heapProfiler()) { |
2349 | gatherExtraHeapSnapshotData(*heapProfiler); |
2350 | removeDeadHeapSnapshotNodes(*heapProfiler); |
2351 | } |
2352 | |
2353 | if (UNLIKELY(m_verifier)) |
2354 | m_verifier->endGC(); |
2355 | |
2356 | RELEASE_ASSERT(m_collectionScope); |
2357 | m_lastCollectionScope = m_collectionScope; |
2358 | m_collectionScope = WTF::nullopt; |
2359 | |
2360 | for (auto* observer : m_observers) |
2361 | observer->didGarbageCollect(scope); |
2362 | } |
2363 | |
2364 | void Heap::resumeCompilerThreads() |
2365 | { |
2366 | #if ENABLE(DFG_JIT) |
2367 | if (!VM::canUseJIT()) |
2368 | return; |
2369 | for (unsigned i = DFG::numberOfWorklists(); i--;) |
2370 | DFG::existingWorklistForIndex(i).resumeAllThreads(); |
2371 | #endif |
2372 | } |
2373 | |
2374 | GCActivityCallback* Heap::fullActivityCallback() |
2375 | { |
2376 | return m_fullActivityCallback.get(); |
2377 | } |
2378 | |
2379 | GCActivityCallback* Heap::edenActivityCallback() |
2380 | { |
2381 | return m_edenActivityCallback.get(); |
2382 | } |
2383 | |
2384 | IncrementalSweeper& Heap::sweeper() |
2385 | { |
2386 | return m_sweeper.get(); |
2387 | } |
2388 | |
2389 | void Heap::setGarbageCollectionTimerEnabled(bool enable) |
2390 | { |
2391 | if (m_fullActivityCallback) |
2392 | m_fullActivityCallback->setEnabled(enable); |
2393 | if (m_edenActivityCallback) |
2394 | m_edenActivityCallback->setEnabled(enable); |
2395 | } |
2396 | |
2397 | void Heap::didAllocate(size_t bytes) |
2398 | { |
2399 | if (m_edenActivityCallback) |
2400 | m_edenActivityCallback->didAllocate(*this, m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect); |
2401 | m_bytesAllocatedThisCycle += bytes; |
2402 | performIncrement(bytes); |
2403 | } |
2404 | |
2405 | bool Heap::isValidAllocation(size_t) |
2406 | { |
2407 | if (!isValidThreadState(m_vm)) |
2408 | return false; |
2409 | |
2410 | if (isCurrentThreadBusy()) |
2411 | return false; |
2412 | |
2413 | return true; |
2414 | } |
2415 | |
2416 | void Heap::addFinalizer(JSCell* cell, Finalizer finalizer) |
2417 | { |
2418 | WeakSet::allocate(cell, &m_finalizerOwner, reinterpret_cast<void*>(finalizer)); // Balanced by FinalizerOwner::finalize(). |
2419 | } |
2420 | |
2421 | void Heap::FinalizerOwner::finalize(Handle<Unknown> handle, void* context) |
2422 | { |
2423 | HandleSlot slot = handle.slot(); |
2424 | Finalizer finalizer = reinterpret_cast<Finalizer>(context); |
2425 | finalizer(slot->asCell()); |
2426 | WeakSet::deallocate(WeakImpl::asWeakImpl(slot)); |
2427 | } |
2428 | |
2429 | void Heap::collectNowFullIfNotDoneRecently(Synchronousness synchronousness) |
2430 | { |
2431 | if (!m_fullActivityCallback) { |
2432 | collectNow(synchronousness, CollectionScope::Full); |
2433 | return; |
2434 | } |
2435 | |
2436 | if (m_fullActivityCallback->didGCRecently()) { |
2437 | // A synchronous GC was already requested recently so we merely accelerate next collection. |
2438 | reportAbandonedObjectGraph(); |
2439 | return; |
2440 | } |
2441 | |
2442 | m_fullActivityCallback->setDidGCRecently(); |
2443 | collectNow(synchronousness, CollectionScope::Full); |
2444 | } |
2445 | |
2446 | bool Heap::useGenerationalGC() |
2447 | { |
2448 | return Options::useGenerationalGC() && !VM::isInMiniMode(); |
2449 | } |
2450 | |
2451 | bool Heap::shouldSweepSynchronously() |
2452 | { |
2453 | return Options::sweepSynchronously() || VM::isInMiniMode(); |
2454 | } |
2455 | |
2456 | bool Heap::shouldDoFullCollection() |
2457 | { |
2458 | if (!useGenerationalGC()) |
2459 | return true; |
2460 | |
2461 | if (!m_currentRequest.scope) |
2462 | return m_shouldDoFullCollection || overCriticalMemoryThreshold(); |
2463 | return *m_currentRequest.scope == CollectionScope::Full; |
2464 | } |
2465 | |
2466 | void Heap::addLogicallyEmptyWeakBlock(WeakBlock* block) |
2467 | { |
2468 | m_logicallyEmptyWeakBlocks.append(block); |
2469 | } |
2470 | |
2471 | void Heap::sweepAllLogicallyEmptyWeakBlocks() |
2472 | { |
2473 | if (m_logicallyEmptyWeakBlocks.isEmpty()) |
2474 | return; |
2475 | |
2476 | m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0; |
2477 | while (sweepNextLogicallyEmptyWeakBlock()) { } |
2478 | } |
2479 | |
2480 | bool Heap::sweepNextLogicallyEmptyWeakBlock() |
2481 | { |
2482 | if (m_indexOfNextLogicallyEmptyWeakBlockToSweep == WTF::notFound) |
2483 | return false; |
2484 | |
2485 | WeakBlock* block = m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep]; |
2486 | |
2487 | block->sweep(); |
2488 | if (block->isEmpty()) { |
2489 | std::swap(m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep], m_logicallyEmptyWeakBlocks.last()); |
2490 | m_logicallyEmptyWeakBlocks.removeLast(); |
2491 | WeakBlock::destroy(*this, block); |
2492 | } else |
2493 | m_indexOfNextLogicallyEmptyWeakBlockToSweep++; |
2494 | |
2495 | if (m_indexOfNextLogicallyEmptyWeakBlockToSweep >= m_logicallyEmptyWeakBlocks.size()) { |
2496 | m_indexOfNextLogicallyEmptyWeakBlockToSweep = WTF::notFound; |
2497 | return false; |
2498 | } |
2499 | |
2500 | return true; |
2501 | } |
2502 | |
2503 | size_t Heap::visitCount() |
2504 | { |
2505 | size_t result = 0; |
2506 | forEachSlotVisitor( |
2507 | [&] (SlotVisitor& visitor) { |
2508 | result += visitor.visitCount(); |
2509 | }); |
2510 | return result; |
2511 | } |
2512 | |
2513 | size_t Heap::bytesVisited() |
2514 | { |
2515 | size_t result = 0; |
2516 | forEachSlotVisitor( |
2517 | [&] (SlotVisitor& visitor) { |
2518 | result += visitor.bytesVisited(); |
2519 | }); |
2520 | return result; |
2521 | } |
2522 | |
2523 | void Heap::forEachCodeBlockImpl(const ScopedLambda<void(CodeBlock*)>& func) |
2524 | { |
2525 | // We don't know the full set of CodeBlocks until compilation has terminated. |
2526 | completeAllJITPlans(); |
2527 | |
2528 | return m_codeBlocks->iterate(func); |
2529 | } |
2530 | |
2531 | void Heap::forEachCodeBlockIgnoringJITPlansImpl(const AbstractLocker& locker, const ScopedLambda<void(CodeBlock*)>& func) |
2532 | { |
2533 | return m_codeBlocks->iterate(locker, func); |
2534 | } |
2535 | |
2536 | void Heap::writeBarrierSlowPath(const JSCell* from) |
2537 | { |
2538 | if (UNLIKELY(mutatorShouldBeFenced())) { |
2539 | // In this case, the barrierThreshold is the tautological threshold, so from could still be |
2540 | // not black. But we can't know for sure until we fire off a fence. |
2541 | WTF::storeLoadFence(); |
2542 | if (from->cellState() != CellState::PossiblyBlack) |
2543 | return; |
2544 | } |
2545 | |
2546 | addToRememberedSet(from); |
2547 | } |
2548 | |
2549 | bool Heap::isCurrentThreadBusy() |
2550 | { |
2551 | return Thread::mayBeGCThread() || mutatorState() != MutatorState::Running; |
2552 | } |
2553 | |
2554 | void Heap::(size_t size) |
2555 | { |
2556 | size_t* counter = &m_extraMemorySize; |
2557 | |
2558 | for (;;) { |
2559 | size_t oldSize = *counter; |
2560 | // FIXME: Change this to use SaturatedArithmetic when available. |
2561 | // https://bugs.webkit.org/show_bug.cgi?id=170411 |
2562 | Checked<size_t, RecordOverflow> checkedNewSize = oldSize; |
2563 | checkedNewSize += size; |
2564 | size_t newSize = UNLIKELY(checkedNewSize.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedNewSize.unsafeGet(); |
2565 | if (WTF::atomicCompareExchangeWeakRelaxed(counter, oldSize, newSize)) |
2566 | return; |
2567 | } |
2568 | } |
2569 | |
2570 | #if ENABLE(RESOURCE_USAGE) |
2571 | void Heap::reportExternalMemoryVisited(size_t size) |
2572 | { |
2573 | size_t* counter = &m_externalMemorySize; |
2574 | |
2575 | for (;;) { |
2576 | size_t oldSize = *counter; |
2577 | if (WTF::atomicCompareExchangeWeakRelaxed(counter, oldSize, oldSize + size)) |
2578 | return; |
2579 | } |
2580 | } |
2581 | #endif |
2582 | |
2583 | void Heap::collectIfNecessaryOrDefer(GCDeferralContext* deferralContext) |
2584 | { |
2585 | ASSERT(deferralContext || isDeferred() || !DisallowGC::isInEffectOnCurrentThread()); |
2586 | if (validateDFGDoesGC) |
2587 | RELEASE_ASSERT(expectDoesGC()); |
2588 | |
2589 | if (!m_isSafeToCollect) |
2590 | return; |
2591 | |
2592 | switch (mutatorState()) { |
2593 | case MutatorState::Running: |
2594 | case MutatorState::Allocating: |
2595 | break; |
2596 | case MutatorState::Sweeping: |
2597 | case MutatorState::Collecting: |
2598 | return; |
2599 | } |
2600 | if (!Options::useGC()) |
2601 | return; |
2602 | |
2603 | if (mayNeedToStop()) { |
2604 | if (deferralContext) |
2605 | deferralContext->m_shouldGC = true; |
2606 | else if (isDeferred()) |
2607 | m_didDeferGCWork = true; |
2608 | else |
2609 | stopIfNecessary(); |
2610 | } |
2611 | |
2612 | if (UNLIKELY(Options::gcMaxHeapSize())) { |
2613 | if (m_bytesAllocatedThisCycle <= Options::gcMaxHeapSize()) |
2614 | return; |
2615 | } else { |
2616 | size_t bytesAllowedThisCycle = m_maxEdenSize; |
2617 | |
2618 | #if PLATFORM(IOS_FAMILY) |
2619 | if (overCriticalMemoryThreshold()) |
2620 | bytesAllowedThisCycle = std::min(m_maxEdenSizeWhenCritical, bytesAllowedThisCycle); |
2621 | #endif |
2622 | |
2623 | if (m_bytesAllocatedThisCycle <= bytesAllowedThisCycle) |
2624 | return; |
2625 | } |
2626 | |
2627 | if (deferralContext) |
2628 | deferralContext->m_shouldGC = true; |
2629 | else if (isDeferred()) |
2630 | m_didDeferGCWork = true; |
2631 | else { |
2632 | collectAsync(); |
2633 | stopIfNecessary(); // This will immediately start the collection if we have the conn. |
2634 | } |
2635 | } |
2636 | |
2637 | void Heap::decrementDeferralDepthAndGCIfNeededSlow() |
2638 | { |
2639 | // Can't do anything if we're still deferred. |
2640 | if (m_deferralDepth) |
2641 | return; |
2642 | |
2643 | ASSERT(!isDeferred()); |
2644 | |
2645 | m_didDeferGCWork = false; |
2646 | // FIXME: Bring back something like the DeferGCProbability mode. |
2647 | // https://bugs.webkit.org/show_bug.cgi?id=166627 |
2648 | collectIfNecessaryOrDefer(); |
2649 | } |
2650 | |
2651 | void Heap::registerWeakGCMap(WeakGCMapBase* weakGCMap) |
2652 | { |
2653 | m_weakGCMaps.add(weakGCMap); |
2654 | } |
2655 | |
2656 | void Heap::unregisterWeakGCMap(WeakGCMapBase* weakGCMap) |
2657 | { |
2658 | m_weakGCMaps.remove(weakGCMap); |
2659 | } |
2660 | |
2661 | void Heap::didAllocateBlock(size_t capacity) |
2662 | { |
2663 | #if ENABLE(RESOURCE_USAGE) |
2664 | m_blockBytesAllocated += capacity; |
2665 | #else |
2666 | UNUSED_PARAM(capacity); |
2667 | #endif |
2668 | } |
2669 | |
2670 | void Heap::didFreeBlock(size_t capacity) |
2671 | { |
2672 | #if ENABLE(RESOURCE_USAGE) |
2673 | m_blockBytesAllocated -= capacity; |
2674 | #else |
2675 | UNUSED_PARAM(capacity); |
2676 | #endif |
2677 | } |
2678 | |
2679 | void Heap::addCoreConstraints() |
2680 | { |
2681 | m_constraintSet->add( |
2682 | "Cs" , "Conservative Scan" , |
2683 | [this, lastVersion = static_cast<uint64_t>(0)] (SlotVisitor& slotVisitor) mutable { |
2684 | bool shouldNotProduceWork = lastVersion == m_phaseVersion; |
2685 | if (shouldNotProduceWork) |
2686 | return; |
2687 | |
2688 | TimingScope preConvergenceTimingScope(*this, "Constraint: conservative scan" ); |
2689 | m_objectSpace.prepareForConservativeScan(); |
2690 | m_jitStubRoutines->prepareForConservativeScan(); |
2691 | |
2692 | { |
2693 | ConservativeRoots conservativeRoots(*this); |
2694 | SuperSamplerScope superSamplerScope(false); |
2695 | |
2696 | gatherStackRoots(conservativeRoots); |
2697 | gatherJSStackRoots(conservativeRoots); |
2698 | gatherScratchBufferRoots(conservativeRoots); |
2699 | |
2700 | SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::ConservativeScan); |
2701 | slotVisitor.append(conservativeRoots); |
2702 | } |
2703 | if (VM::canUseJIT()) { |
2704 | // JITStubRoutines must be visited after scanning ConservativeRoots since JITStubRoutines depend on the hook executed during gathering ConservativeRoots. |
2705 | SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::JITStubRoutines); |
2706 | m_jitStubRoutines->traceMarkedStubRoutines(slotVisitor); |
2707 | } |
2708 | |
2709 | lastVersion = m_phaseVersion; |
2710 | }, |
2711 | ConstraintVolatility::GreyedByExecution); |
2712 | |
2713 | m_constraintSet->add( |
2714 | "Msr" , "Misc Small Roots" , |
2715 | [this] (SlotVisitor& slotVisitor) { |
2716 | |
2717 | #if JSC_OBJC_API_ENABLED |
2718 | scanExternalRememberedSet(*m_vm, slotVisitor); |
2719 | #endif |
2720 | if (m_vm->smallStrings.needsToBeVisited(*m_collectionScope)) { |
2721 | SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::StrongReferences); |
2722 | m_vm->smallStrings.visitStrongReferences(slotVisitor); |
2723 | } |
2724 | |
2725 | { |
2726 | SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::ProtectedValues); |
2727 | for (auto& pair : m_protectedValues) |
2728 | slotVisitor.appendUnbarriered(pair.key); |
2729 | } |
2730 | |
2731 | if (m_markListSet && m_markListSet->size()) { |
2732 | SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::ConservativeScan); |
2733 | MarkedArgumentBuffer::markLists(slotVisitor, *m_markListSet); |
2734 | } |
2735 | |
2736 | { |
2737 | SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::VMExceptions); |
2738 | slotVisitor.appendUnbarriered(m_vm->exception()); |
2739 | slotVisitor.appendUnbarriered(m_vm->lastException()); |
2740 | } |
2741 | }, |
2742 | ConstraintVolatility::GreyedByExecution); |
2743 | |
2744 | m_constraintSet->add( |
2745 | "Sh" , "Strong Handles" , |
2746 | [this] (SlotVisitor& slotVisitor) { |
2747 | SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::StrongHandles); |
2748 | m_handleSet.visitStrongHandles(slotVisitor); |
2749 | }, |
2750 | ConstraintVolatility::GreyedByExecution); |
2751 | |
2752 | m_constraintSet->add( |
2753 | "D" , "Debugger" , |
2754 | [this] (SlotVisitor& slotVisitor) { |
2755 | SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::Debugger); |
2756 | |
2757 | #if ENABLE(SAMPLING_PROFILER) |
2758 | if (SamplingProfiler* samplingProfiler = m_vm->samplingProfiler()) { |
2759 | LockHolder locker(samplingProfiler->getLock()); |
2760 | samplingProfiler->processUnverifiedStackTraces(); |
2761 | samplingProfiler->visit(slotVisitor); |
2762 | if (Options::logGC() == GCLogging::Verbose) |
2763 | dataLog("Sampling Profiler data:\n" , slotVisitor); |
2764 | } |
2765 | #endif // ENABLE(SAMPLING_PROFILER) |
2766 | |
2767 | if (m_vm->typeProfiler()) |
2768 | m_vm->typeProfilerLog()->visit(slotVisitor); |
2769 | |
2770 | if (auto* shadowChicken = m_vm->shadowChicken()) |
2771 | shadowChicken->visitChildren(slotVisitor); |
2772 | }, |
2773 | ConstraintVolatility::GreyedByExecution); |
2774 | |
2775 | m_constraintSet->add( |
2776 | "Ws" , "Weak Sets" , |
2777 | [this] (SlotVisitor& slotVisitor) { |
2778 | SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::WeakSets); |
2779 | m_objectSpace.visitWeakSets(slotVisitor); |
2780 | }, |
2781 | ConstraintVolatility::GreyedByMarking); |
2782 | |
2783 | m_constraintSet->add( |
2784 | "O" , "Output" , |
2785 | [] (SlotVisitor& slotVisitor) { |
2786 | VM& vm = slotVisitor.vm(); |
2787 | |
2788 | auto callOutputConstraint = [] (SlotVisitor& slotVisitor, HeapCell* heapCell, HeapCell::Kind) { |
2789 | SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::Output); |
2790 | VM& vm = slotVisitor.vm(); |
2791 | JSCell* cell = static_cast<JSCell*>(heapCell); |
2792 | cell->methodTable(vm)->visitOutputConstraints(cell, slotVisitor); |
2793 | }; |
2794 | |
2795 | auto add = [&] (auto& set) { |
2796 | slotVisitor.addParallelConstraintTask(set.forEachMarkedCellInParallel(callOutputConstraint)); |
2797 | }; |
2798 | |
2799 | add(vm.executableToCodeBlockEdgesWithConstraints); |
2800 | if (vm.m_weakMapSpace) |
2801 | add(*vm.m_weakMapSpace); |
2802 | }, |
2803 | ConstraintVolatility::GreyedByMarking, |
2804 | ConstraintParallelism::Parallel); |
2805 | |
2806 | #if ENABLE(DFG_JIT) |
2807 | if (VM::canUseJIT()) { |
2808 | m_constraintSet->add( |
2809 | "Dw" , "DFG Worklists" , |
2810 | [this] (SlotVisitor& slotVisitor) { |
2811 | SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::DFGWorkLists); |
2812 | |
2813 | for (unsigned i = DFG::numberOfWorklists(); i--;) |
2814 | DFG::existingWorklistForIndex(i).visitWeakReferences(slotVisitor); |
2815 | |
2816 | // FIXME: This is almost certainly unnecessary. |
2817 | // https://bugs.webkit.org/show_bug.cgi?id=166829 |
2818 | DFG::iterateCodeBlocksForGC( |
2819 | *m_vm, |
2820 | [&] (CodeBlock* codeBlock) { |
2821 | slotVisitor.appendUnbarriered(codeBlock); |
2822 | }); |
2823 | |
2824 | if (Options::logGC() == GCLogging::Verbose) |
2825 | dataLog("DFG Worklists:\n" , slotVisitor); |
2826 | }, |
2827 | ConstraintVolatility::GreyedByMarking); |
2828 | } |
2829 | #endif |
2830 | |
2831 | m_constraintSet->add( |
2832 | "Cb" , "CodeBlocks" , |
2833 | [this] (SlotVisitor& slotVisitor) { |
2834 | SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::CodeBlocks); |
2835 | iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks( |
2836 | [&] (CodeBlock* codeBlock) { |
2837 | // Visit the CodeBlock as a constraint only if it's black. |
2838 | if (isMarked(codeBlock) |
2839 | && codeBlock->cellState() == CellState::PossiblyBlack) |
2840 | slotVisitor.visitAsConstraint(codeBlock); |
2841 | }); |
2842 | }, |
2843 | ConstraintVolatility::SeldomGreyed); |
2844 | |
2845 | m_constraintSet->add(std::make_unique<MarkStackMergingConstraint>(*this)); |
2846 | } |
2847 | |
2848 | void Heap::addMarkingConstraint(std::unique_ptr<MarkingConstraint> constraint) |
2849 | { |
2850 | PreventCollectionScope preventCollectionScope(*this); |
2851 | m_constraintSet->add(WTFMove(constraint)); |
2852 | } |
2853 | |
2854 | void Heap::notifyIsSafeToCollect() |
2855 | { |
2856 | MonotonicTime before; |
2857 | if (Options::logGC()) { |
2858 | before = MonotonicTime::now(); |
2859 | dataLog("[GC<" , RawPointer(this), ">: starting " ); |
2860 | } |
2861 | |
2862 | addCoreConstraints(); |
2863 | |
2864 | m_isSafeToCollect = true; |
2865 | |
2866 | if (Options::collectContinuously()) { |
2867 | m_collectContinuouslyThread = Thread::create( |
2868 | "JSC DEBUG Continuous GC" , |
2869 | [this] () { |
2870 | MonotonicTime initialTime = MonotonicTime::now(); |
2871 | Seconds period = Seconds::fromMilliseconds(Options::collectContinuouslyPeriodMS()); |
2872 | while (!m_shouldStopCollectingContinuously) { |
2873 | { |
2874 | LockHolder locker(*m_threadLock); |
2875 | if (m_requests.isEmpty()) { |
2876 | m_requests.append(WTF::nullopt); |
2877 | m_lastGrantedTicket++; |
2878 | m_threadCondition->notifyOne(locker); |
2879 | } |
2880 | } |
2881 | |
2882 | { |
2883 | LockHolder locker(m_collectContinuouslyLock); |
2884 | Seconds elapsed = MonotonicTime::now() - initialTime; |
2885 | Seconds elapsedInPeriod = elapsed % period; |
2886 | MonotonicTime timeToWakeUp = |
2887 | initialTime + elapsed - elapsedInPeriod + period; |
2888 | while (!hasElapsed(timeToWakeUp) && !m_shouldStopCollectingContinuously) { |
2889 | m_collectContinuouslyCondition.waitUntil( |
2890 | m_collectContinuouslyLock, timeToWakeUp); |
2891 | } |
2892 | } |
2893 | } |
2894 | }); |
2895 | } |
2896 | |
2897 | if (Options::logGC()) |
2898 | dataLog((MonotonicTime::now() - before).milliseconds(), "ms]\n" ); |
2899 | } |
2900 | |
2901 | void Heap::preventCollection() |
2902 | { |
2903 | if (!m_isSafeToCollect) |
2904 | return; |
2905 | |
2906 | // This prevents the collectContinuously thread from starting a collection. |
2907 | m_collectContinuouslyLock.lock(); |
2908 | |
2909 | // Wait for all collections to finish. |
2910 | waitForCollector( |
2911 | [&] (const AbstractLocker&) -> bool { |
2912 | ASSERT(m_lastServedTicket <= m_lastGrantedTicket); |
2913 | return m_lastServedTicket == m_lastGrantedTicket; |
2914 | }); |
2915 | |
2916 | // Now a collection can only start if this thread starts it. |
2917 | RELEASE_ASSERT(!m_collectionScope); |
2918 | } |
2919 | |
2920 | void Heap::allowCollection() |
2921 | { |
2922 | if (!m_isSafeToCollect) |
2923 | return; |
2924 | |
2925 | m_collectContinuouslyLock.unlock(); |
2926 | } |
2927 | |
2928 | void Heap::setMutatorShouldBeFenced(bool value) |
2929 | { |
2930 | m_mutatorShouldBeFenced = value; |
2931 | m_barrierThreshold = value ? tautologicalThreshold : blackThreshold; |
2932 | } |
2933 | |
2934 | void Heap::performIncrement(size_t bytes) |
2935 | { |
2936 | if (!m_objectSpace.isMarking()) |
2937 | return; |
2938 | |
2939 | if (isDeferred()) |
2940 | return; |
2941 | |
2942 | m_incrementBalance += bytes * Options::gcIncrementScale(); |
2943 | |
2944 | // Save ourselves from crazy. Since this is an optimization, it's OK to go back to any consistent |
2945 | // state when the double goes wild. |
2946 | if (std::isnan(m_incrementBalance) || std::isinf(m_incrementBalance)) |
2947 | m_incrementBalance = 0; |
2948 | |
2949 | if (m_incrementBalance < static_cast<double>(Options::gcIncrementBytes())) |
2950 | return; |
2951 | |
2952 | double targetBytes = m_incrementBalance; |
2953 | if (targetBytes <= 0) |
2954 | return; |
2955 | targetBytes = std::min(targetBytes, Options::gcIncrementMaxBytes()); |
2956 | |
2957 | SlotVisitor& slotVisitor = *m_mutatorSlotVisitor; |
2958 | ParallelModeEnabler parallelModeEnabler(slotVisitor); |
2959 | size_t bytesVisited = slotVisitor.performIncrementOfDraining(static_cast<size_t>(targetBytes)); |
2960 | // incrementBalance may go negative here because it'll remember how many bytes we overshot. |
2961 | m_incrementBalance -= bytesVisited; |
2962 | } |
2963 | |
2964 | void Heap::addHeapFinalizerCallback(const HeapFinalizerCallback& callback) |
2965 | { |
2966 | m_heapFinalizerCallbacks.append(callback); |
2967 | } |
2968 | |
2969 | void Heap::removeHeapFinalizerCallback(const HeapFinalizerCallback& callback) |
2970 | { |
2971 | m_heapFinalizerCallbacks.removeFirst(callback); |
2972 | } |
2973 | |
2974 | void Heap::setBonusVisitorTask(RefPtr<SharedTask<void(SlotVisitor&)>> task) |
2975 | { |
2976 | auto locker = holdLock(m_markingMutex); |
2977 | m_bonusVisitorTask = task; |
2978 | m_markingConditionVariable.notifyAll(); |
2979 | } |
2980 | |
2981 | void Heap::runTaskInParallel(RefPtr<SharedTask<void(SlotVisitor&)>> task) |
2982 | { |
2983 | unsigned initialRefCount = task->refCount(); |
2984 | setBonusVisitorTask(task); |
2985 | task->run(*m_collectorSlotVisitor); |
2986 | setBonusVisitorTask(nullptr); |
2987 | // The constraint solver expects return of this function to imply termination of the task in all |
2988 | // threads. This ensures that property. |
2989 | { |
2990 | auto locker = holdLock(m_markingMutex); |
2991 | while (task->refCount() > initialRefCount) |
2992 | m_markingConditionVariable.wait(m_markingMutex); |
2993 | } |
2994 | } |
2995 | |
2996 | } // namespace JSC |
2997 | |