1/*
2 * Copyright (C) 2012-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23 * THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "SlotVisitor.h"
28
29#include "BlockDirectoryInlines.h"
30#include "CPU.h"
31#include "ConservativeRoots.h"
32#include "GCSegmentedArrayInlines.h"
33#include "HeapAnalyzer.h"
34#include "HeapCellInlines.h"
35#include "HeapProfiler.h"
36#include "IntegrityInlines.h"
37#include "JSArray.h"
38#include "JSDestructibleObject.h"
39#include "JSObject.h"
40#include "JSString.h"
41#include "JSCInlines.h"
42#include "MarkedBlockInlines.h"
43#include "MarkingConstraintSolver.h"
44#include "SlotVisitorInlines.h"
45#include "StopIfNecessaryTimer.h"
46#include "SuperSampler.h"
47#include "VM.h"
48#include <wtf/ListDump.h>
49#include <wtf/Lock.h>
50#include <wtf/StdLibExtras.h>
51
52namespace JSC {
53
54#if ENABLE(GC_VALIDATION)
55static void validate(JSCell* cell)
56{
57 RELEASE_ASSERT(cell);
58
59 if (!cell->structure()) {
60 dataLogF("cell at %p has a null structure\n" , cell);
61 CRASH();
62 }
63
64 // Both the cell's structure, and the cell's structure's structure should be the Structure Structure.
65 // I hate this sentence.
66 VM& vm = cell->vm();
67 if (cell->structure()->structure()->JSCell::classInfo(vm) != cell->structure()->JSCell::classInfo(vm)) {
68 const char* parentClassName = 0;
69 const char* ourClassName = 0;
70 if (cell->structure()->structure() && cell->structure()->structure()->JSCell::classInfo(vm))
71 parentClassName = cell->structure()->structure()->JSCell::classInfo(vm)->className;
72 if (cell->structure()->JSCell::classInfo(vm))
73 ourClassName = cell->structure()->JSCell::classInfo(vm)->className;
74 dataLogF("parent structure (%p <%s>) of cell at %p doesn't match cell's structure (%p <%s>)\n",
75 cell->structure()->structure(), parentClassName, cell, cell->structure(), ourClassName);
76 CRASH();
77 }
78
79 // Make sure we can walk the ClassInfo chain
80 const ClassInfo* info = cell->classInfo(vm);
81 do { } while ((info = info->parentClass));
82}
83#endif
84
85SlotVisitor::SlotVisitor(Heap& heap, CString codeName)
86 : m_bytesVisited(0)
87 , m_visitCount(0)
88 , m_isInParallelMode(false)
89 , m_markingVersion(MarkedSpace::initialVersion)
90 , m_heap(heap)
91 , m_codeName(codeName)
92#if !ASSERT_DISABLED
93 , m_isCheckingForDefaultMarkViolation(false)
94 , m_isDraining(false)
95#endif
96{
97}
98
99SlotVisitor::~SlotVisitor()
100{
101 clearMarkStacks();
102}
103
104void SlotVisitor::didStartMarking()
105{
106 auto scope = heap()->collectionScope();
107 if (scope) {
108 switch (*scope) {
109 case CollectionScope::Eden:
110 reset();
111 break;
112 case CollectionScope::Full:
113 m_extraMemorySize = 0;
114 break;
115 }
116 }
117
118 if (HeapProfiler* heapProfiler = vm().heapProfiler())
119 m_heapAnalyzer = heapProfiler->activeHeapAnalyzer();
120
121 m_markingVersion = heap()->objectSpace().markingVersion();
122}
123
124void SlotVisitor::reset()
125{
126 m_bytesVisited = 0;
127 m_visitCount = 0;
128 m_heapAnalyzer = nullptr;
129 RELEASE_ASSERT(!m_currentCell);
130}
131
132void SlotVisitor::clearMarkStacks()
133{
134 forEachMarkStack(
135 [&] (MarkStackArray& stack) -> IterationStatus {
136 stack.clear();
137 return IterationStatus::Continue;
138 });
139}
140
141void SlotVisitor::append(const ConservativeRoots& conservativeRoots)
142{
143 HeapCell** roots = conservativeRoots.roots();
144 size_t size = conservativeRoots.size();
145 for (size_t i = 0; i < size; ++i)
146 appendJSCellOrAuxiliary(roots[i]);
147}
148
149void SlotVisitor::appendJSCellOrAuxiliary(HeapCell* heapCell)
150{
151 if (!heapCell)
152 return;
153
154 ASSERT(!m_isCheckingForDefaultMarkViolation);
155
156 auto validateCell = [&] (JSCell* jsCell) {
157 StructureID structureID = jsCell->structureID();
158
159 auto die = [&] (const char* text) {
160 WTF::dataFile().atomically(
161 [&] (PrintStream& out) {
162 out.print(text);
163 out.print("GC type: ", heap()->collectionScope(), "\n");
164 out.print("Object at: ", RawPointer(jsCell), "\n");
165#if USE(JSVALUE64)
166 out.print("Structure ID: ", structureID, " (0x", format("%x", structureID), ")\n");
167 out.print("Structure ID table size: ", heap()->structureIDTable().size(), "\n");
168#else
169 out.print("Structure: ", RawPointer(structureID), "\n");
170#endif
171 out.print("Object contents:");
172 for (unsigned i = 0; i < 2; ++i)
173 out.print(" ", format("0x%016llx", bitwise_cast<uint64_t*>(jsCell)[i]));
174 out.print("\n");
175 CellContainer container = jsCell->cellContainer();
176 out.print("Is marked: ", container.isMarked(jsCell), "\n");
177 out.print("Is newly allocated: ", container.isNewlyAllocated(jsCell), "\n");
178 if (container.isMarkedBlock()) {
179 MarkedBlock& block = container.markedBlock();
180 out.print("Block: ", RawPointer(&block), "\n");
181 block.handle().dumpState(out);
182 out.print("\n");
183 out.print("Is marked raw: ", block.isMarkedRaw(jsCell), "\n");
184 out.print("Marking version: ", block.markingVersion(), "\n");
185 out.print("Heap marking version: ", heap()->objectSpace().markingVersion(), "\n");
186 out.print("Is newly allocated raw: ", block.isNewlyAllocated(jsCell), "\n");
187 out.print("Newly allocated version: ", block.newlyAllocatedVersion(), "\n");
188 out.print("Heap newly allocated version: ", heap()->objectSpace().newlyAllocatedVersion(), "\n");
189 }
190 UNREACHABLE_FOR_PLATFORM();
191 });
192 };
193
194 // It's not OK for the structure to be null at any GC scan point. We must not GC while
195 // an object is not fully initialized.
196 if (!structureID)
197 die("GC scan found corrupt object: structureID is zero!\n");
198
199 // It's not OK for the structure to be nuked at any GC scan point.
200 if (isNuked(structureID))
201 die("GC scan found object in bad state: structureID is nuked!\n");
202
203#if USE(JSVALUE64)
204 // This detects the worst of the badness.
205 if (!heap()->structureIDTable().isValid(structureID))
206 die("GC scan found corrupt object: structureID is invalid!\n");
207#endif
208 };
209
210 // In debug mode, we validate before marking since this makes it clearer what the problem
211 // was. It's also slower, so we don't do it normally.
212 if (!ASSERT_DISABLED && isJSCellKind(heapCell->cellKind()))
213 validateCell(static_cast<JSCell*>(heapCell));
214
215 if (Heap::testAndSetMarked(m_markingVersion, heapCell))
216 return;
217
218 switch (heapCell->cellKind()) {
219 case HeapCell::JSCell:
220 case HeapCell::JSCellWithInteriorPointers: {
221 // We have ample budget to perform validation here.
222
223 JSCell* jsCell = static_cast<JSCell*>(heapCell);
224 validateCell(jsCell);
225 Integrity::auditCell(vm(), jsCell);
226
227 jsCell->setCellState(CellState::PossiblyGrey);
228
229 appendToMarkStack(jsCell);
230 return;
231 }
232
233 case HeapCell::Auxiliary: {
234 noteLiveAuxiliaryCell(heapCell);
235 return;
236 } }
237}
238
239void SlotVisitor::appendSlow(JSCell* cell, Dependency dependency)
240{
241 if (UNLIKELY(m_heapAnalyzer))
242 m_heapAnalyzer->analyzeEdge(m_currentCell, cell, m_rootMarkReason);
243
244 appendHiddenSlowImpl(cell, dependency);
245}
246
247void SlotVisitor::appendHiddenSlow(JSCell* cell, Dependency dependency)
248{
249 appendHiddenSlowImpl(cell, dependency);
250}
251
252ALWAYS_INLINE void SlotVisitor::appendHiddenSlowImpl(JSCell* cell, Dependency dependency)
253{
254 ASSERT(!m_isCheckingForDefaultMarkViolation);
255
256#if ENABLE(GC_VALIDATION)
257 validate(cell);
258#endif
259
260 if (cell->isPreciseAllocation())
261 setMarkedAndAppendToMarkStack(cell->preciseAllocation(), cell, dependency);
262 else
263 setMarkedAndAppendToMarkStack(cell->markedBlock(), cell, dependency);
264}
265
266template<typename ContainerType>
267ALWAYS_INLINE void SlotVisitor::setMarkedAndAppendToMarkStack(ContainerType& container, JSCell* cell, Dependency dependency)
268{
269 if (container.testAndSetMarked(cell, dependency))
270 return;
271
272 ASSERT(cell->structure());
273
274 // Indicate that the object is grey and that:
275 // In case of concurrent GC: it's the first time it is grey in this GC cycle.
276 // In case of eden collection: it's a new object that became grey rather than an old remembered object.
277 cell->setCellState(CellState::PossiblyGrey);
278
279 appendToMarkStack(container, cell);
280}
281
282void SlotVisitor::appendToMarkStack(JSCell* cell)
283{
284 if (cell->isPreciseAllocation())
285 appendToMarkStack(cell->preciseAllocation(), cell);
286 else
287 appendToMarkStack(cell->markedBlock(), cell);
288}
289
290template<typename ContainerType>
291ALWAYS_INLINE void SlotVisitor::appendToMarkStack(ContainerType& container, JSCell* cell)
292{
293 ASSERT(m_heap.isMarked(cell));
294#if CPU(X86_64)
295 if (Options::dumpZappedCellCrashData()) {
296 if (UNLIKELY(cell->isZapped()))
297 reportZappedCellAndCrash(cell);
298 }
299#endif
300 ASSERT(!cell->isZapped());
301
302 container.noteMarked();
303
304 m_visitCount++;
305 m_bytesVisited += container.cellSize();
306
307 m_collectorStack.append(cell);
308}
309
310void SlotVisitor::markAuxiliary(const void* base)
311{
312 HeapCell* cell = bitwise_cast<HeapCell*>(base);
313
314 ASSERT(cell->heap() == heap());
315
316 if (Heap::testAndSetMarked(m_markingVersion, cell))
317 return;
318
319 noteLiveAuxiliaryCell(cell);
320}
321
322void SlotVisitor::noteLiveAuxiliaryCell(HeapCell* cell)
323{
324 // We get here once per GC under these circumstances:
325 //
326 // Eden collection: if the cell was allocated since the last collection and is live somehow.
327 //
328 // Full collection: if the cell is live somehow.
329
330 CellContainer container = cell->cellContainer();
331
332 container.assertValidCell(vm(), cell);
333 container.noteMarked();
334
335 m_visitCount++;
336
337 size_t cellSize = container.cellSize();
338 m_bytesVisited += cellSize;
339 m_nonCellVisitCount += cellSize;
340}
341
342class SetCurrentCellScope {
343public:
344 SetCurrentCellScope(SlotVisitor& visitor, const JSCell* cell)
345 : m_visitor(visitor)
346 {
347 ASSERT(!m_visitor.m_currentCell);
348 m_visitor.m_currentCell = const_cast<JSCell*>(cell);
349 }
350
351 ~SetCurrentCellScope()
352 {
353 ASSERT(m_visitor.m_currentCell);
354 m_visitor.m_currentCell = nullptr;
355 }
356
357private:
358 SlotVisitor& m_visitor;
359};
360
361ALWAYS_INLINE void SlotVisitor::visitChildren(const JSCell* cell)
362{
363 ASSERT(m_heap.isMarked(cell));
364
365 SetCurrentCellScope currentCellScope(*this, cell);
366
367 if (false) {
368 dataLog("Visiting ", RawPointer(cell));
369 if (!m_isFirstVisit)
370 dataLog(" (subsequent)");
371 dataLog("\n");
372 }
373
374 // Funny story: it's possible for the object to be black already, if we barrier the object at
375 // about the same time that it's marked. That's fine. It's a gnarly and super-rare race. It's
376 // not clear to me that it would be correct or profitable to bail here if the object is already
377 // black.
378
379 cell->setCellState(CellState::PossiblyBlack);
380
381 WTF::storeLoadFence();
382
383 switch (cell->type()) {
384 case StringType:
385 JSString::visitChildren(const_cast<JSCell*>(cell), *this);
386 break;
387
388 case FinalObjectType:
389 JSFinalObject::visitChildren(const_cast<JSCell*>(cell), *this);
390 break;
391
392 case ArrayType:
393 JSArray::visitChildren(const_cast<JSCell*>(cell), *this);
394 break;
395
396 default:
397 // FIXME: This could be so much better.
398 // https://bugs.webkit.org/show_bug.cgi?id=162462
399#if CPU(X86_64)
400 if (Options::dumpZappedCellCrashData()) {
401 Structure* structure = cell->structure(vm());
402 if (LIKELY(structure)) {
403 const MethodTable* methodTable = &structure->classInfo()->methodTable;
404 methodTable->visitChildren(const_cast<JSCell*>(cell), *this);
405 break;
406 }
407 reportZappedCellAndCrash(const_cast<JSCell*>(cell));
408 }
409#endif
410 cell->methodTable(vm())->visitChildren(const_cast<JSCell*>(cell), *this);
411 break;
412 }
413
414 if (UNLIKELY(m_heapAnalyzer)) {
415 if (m_isFirstVisit)
416 m_heapAnalyzer->analyzeNode(const_cast<JSCell*>(cell));
417 }
418}
419
420void SlotVisitor::visitAsConstraint(const JSCell* cell)
421{
422 m_isFirstVisit = false;
423 visitChildren(cell);
424}
425
426inline void SlotVisitor::propagateExternalMemoryVisitedIfNecessary()
427{
428 if (m_isFirstVisit) {
429 if (m_extraMemorySize.hasOverflowed())
430 heap()->reportExtraMemoryVisited(std::numeric_limits<size_t>::max());
431 else if (m_extraMemorySize)
432 heap()->reportExtraMemoryVisited(m_extraMemorySize.unsafeGet());
433 m_extraMemorySize = 0;
434 }
435}
436
437void SlotVisitor::donateKnownParallel(MarkStackArray& from, MarkStackArray& to)
438{
439 // NOTE: Because we re-try often, we can afford to be conservative, and
440 // assume that donating is not profitable.
441
442 // Avoid locking when a thread reaches a dead end in the object graph.
443 if (from.size() < 2)
444 return;
445
446 // If there's already some shared work queued up, be conservative and assume
447 // that donating more is not profitable.
448 if (to.size())
449 return;
450
451 // If we're contending on the lock, be conservative and assume that another
452 // thread is already donating.
453 std::unique_lock<Lock> lock(m_heap.m_markingMutex, std::try_to_lock);
454 if (!lock.owns_lock())
455 return;
456
457 // Otherwise, assume that a thread will go idle soon, and donate.
458 from.donateSomeCellsTo(to);
459
460 m_heap.m_markingConditionVariable.notifyAll();
461}
462
463void SlotVisitor::donateKnownParallel()
464{
465 forEachMarkStack(
466 [&] (MarkStackArray& stack) -> IterationStatus {
467 donateKnownParallel(stack, correspondingGlobalStack(stack));
468 return IterationStatus::Continue;
469 });
470}
471
472void SlotVisitor::updateMutatorIsStopped(const AbstractLocker&)
473{
474 m_mutatorIsStopped = (m_heap.worldIsStopped() & m_canOptimizeForStoppedMutator);
475}
476
477void SlotVisitor::updateMutatorIsStopped()
478{
479 if (mutatorIsStoppedIsUpToDate())
480 return;
481 updateMutatorIsStopped(holdLock(m_rightToRun));
482}
483
484bool SlotVisitor::hasAcknowledgedThatTheMutatorIsResumed() const
485{
486 return !m_mutatorIsStopped;
487}
488
489bool SlotVisitor::mutatorIsStoppedIsUpToDate() const
490{
491 return m_mutatorIsStopped == (m_heap.worldIsStopped() & m_canOptimizeForStoppedMutator);
492}
493
494void SlotVisitor::optimizeForStoppedMutator()
495{
496 m_canOptimizeForStoppedMutator = true;
497}
498
499NEVER_INLINE void SlotVisitor::drain(MonotonicTime timeout)
500{
501 if (!m_isInParallelMode) {
502 dataLog("FATAL: attempting to drain when not in parallel mode.\n");
503 RELEASE_ASSERT_NOT_REACHED();
504 }
505
506 auto locker = holdLock(m_rightToRun);
507
508 while (!hasElapsed(timeout)) {
509 updateMutatorIsStopped(locker);
510 IterationStatus status = forEachMarkStack(
511 [&] (MarkStackArray& stack) -> IterationStatus {
512 if (stack.isEmpty())
513 return IterationStatus::Continue;
514
515 stack.refill();
516
517 m_isFirstVisit = (&stack == &m_collectorStack);
518
519 for (unsigned countdown = Options::minimumNumberOfScansBetweenRebalance(); stack.canRemoveLast() && countdown--;)
520 visitChildren(stack.removeLast());
521 return IterationStatus::Done;
522 });
523 propagateExternalMemoryVisitedIfNecessary();
524 if (status == IterationStatus::Continue)
525 break;
526
527 m_rightToRun.safepoint();
528 donateKnownParallel();
529 }
530}
531
532size_t SlotVisitor::performIncrementOfDraining(size_t bytesRequested)
533{
534 RELEASE_ASSERT(m_isInParallelMode);
535
536 size_t cellsRequested = bytesRequested / MarkedBlock::atomSize;
537 {
538 auto locker = holdLock(m_heap.m_markingMutex);
539 forEachMarkStack(
540 [&] (MarkStackArray& stack) -> IterationStatus {
541 cellsRequested -= correspondingGlobalStack(stack).transferTo(stack, cellsRequested);
542 return cellsRequested ? IterationStatus::Continue : IterationStatus::Done;
543 });
544 }
545
546 size_t cellBytesVisited = 0;
547 m_nonCellVisitCount = 0;
548
549 auto bytesVisited = [&] () -> size_t {
550 return cellBytesVisited + m_nonCellVisitCount;
551 };
552
553 auto isDone = [&] () -> bool {
554 return bytesVisited() >= bytesRequested;
555 };
556
557 {
558 auto locker = holdLock(m_rightToRun);
559
560 while (!isDone()) {
561 updateMutatorIsStopped(locker);
562 IterationStatus status = forEachMarkStack(
563 [&] (MarkStackArray& stack) -> IterationStatus {
564 if (stack.isEmpty() || isDone())
565 return IterationStatus::Continue;
566
567 stack.refill();
568
569 m_isFirstVisit = (&stack == &m_collectorStack);
570
571 unsigned countdown = Options::minimumNumberOfScansBetweenRebalance();
572 while (countdown && stack.canRemoveLast() && !isDone()) {
573 const JSCell* cell = stack.removeLast();
574 cellBytesVisited += cell->cellSize();
575 visitChildren(cell);
576 countdown--;
577 }
578 return IterationStatus::Done;
579 });
580 propagateExternalMemoryVisitedIfNecessary();
581 if (status == IterationStatus::Continue)
582 break;
583 m_rightToRun.safepoint();
584 donateKnownParallel();
585 }
586 }
587
588 donateAll();
589
590 return bytesVisited();
591}
592
593bool SlotVisitor::didReachTermination()
594{
595 LockHolder locker(m_heap.m_markingMutex);
596 return didReachTermination(locker);
597}
598
599bool SlotVisitor::didReachTermination(const AbstractLocker& locker)
600{
601 return !m_heap.m_numberOfActiveParallelMarkers
602 && !hasWork(locker);
603}
604
605bool SlotVisitor::hasWork(const AbstractLocker&)
606{
607 return !isEmpty()
608 || !m_heap.m_sharedCollectorMarkStack->isEmpty()
609 || !m_heap.m_sharedMutatorMarkStack->isEmpty();
610}
611
612NEVER_INLINE SlotVisitor::SharedDrainResult SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode, MonotonicTime timeout)
613{
614 ASSERT(m_isInParallelMode);
615
616 ASSERT(Options::numberOfGCMarkers());
617
618 bool isActive = false;
619 while (true) {
620 RefPtr<SharedTask<void(SlotVisitor&)>> bonusTask;
621
622 {
623 auto locker = holdLock(m_heap.m_markingMutex);
624 if (isActive)
625 m_heap.m_numberOfActiveParallelMarkers--;
626 m_heap.m_numberOfWaitingParallelMarkers++;
627
628 if (sharedDrainMode == MasterDrain) {
629 while (true) {
630 if (hasElapsed(timeout))
631 return SharedDrainResult::TimedOut;
632
633 if (didReachTermination(locker)) {
634 m_heap.m_markingConditionVariable.notifyAll();
635 return SharedDrainResult::Done;
636 }
637
638 if (hasWork(locker))
639 break;
640
641 m_heap.m_markingConditionVariable.waitUntil(m_heap.m_markingMutex, timeout);
642 }
643 } else {
644 ASSERT(sharedDrainMode == SlaveDrain);
645
646 if (hasElapsed(timeout))
647 return SharedDrainResult::TimedOut;
648
649 if (didReachTermination(locker)) {
650 m_heap.m_markingConditionVariable.notifyAll();
651
652 // If we're in concurrent mode, then we know that the mutator will eventually do
653 // the right thing because:
654 // - It's possible that the collector has the conn. In that case, the collector will
655 // wake up from the notification above. This will happen if the app released heap
656 // access. Native apps can spend a lot of time with heap access released.
657 // - It's possible that the mutator will allocate soon. Then it will check if we
658 // reached termination. This is the most likely outcome in programs that allocate
659 // a lot.
660 // - WebCore never releases access. But WebCore has a runloop. The runloop will check
661 // if we reached termination.
662 // So, this tells the runloop that it's got things to do.
663 m_heap.m_stopIfNecessaryTimer->scheduleSoon();
664 }
665
666 auto isReady = [&] () -> bool {
667 return hasWork(locker)
668 || m_heap.m_bonusVisitorTask
669 || m_heap.m_parallelMarkersShouldExit;
670 };
671
672 m_heap.m_markingConditionVariable.waitUntil(m_heap.m_markingMutex, timeout, isReady);
673
674 if (!hasWork(locker)
675 && m_heap.m_bonusVisitorTask)
676 bonusTask = m_heap.m_bonusVisitorTask;
677
678 if (m_heap.m_parallelMarkersShouldExit)
679 return SharedDrainResult::Done;
680 }
681
682 if (!bonusTask && isEmpty()) {
683 forEachMarkStack(
684 [&] (MarkStackArray& stack) -> IterationStatus {
685 stack.stealSomeCellsFrom(
686 correspondingGlobalStack(stack),
687 m_heap.m_numberOfWaitingParallelMarkers);
688 return IterationStatus::Continue;
689 });
690 }
691
692 m_heap.m_numberOfActiveParallelMarkers++;
693 m_heap.m_numberOfWaitingParallelMarkers--;
694 }
695
696 if (bonusTask) {
697 bonusTask->run(*this);
698
699 // The main thread could still be running, and may run for a while. Unless we clear the task
700 // ourselves, we will keep looping around trying to run the task.
701 {
702 auto locker = holdLock(m_heap.m_markingMutex);
703 if (m_heap.m_bonusVisitorTask == bonusTask)
704 m_heap.m_bonusVisitorTask = nullptr;
705 bonusTask = nullptr;
706 m_heap.m_markingConditionVariable.notifyAll();
707 }
708 } else {
709 RELEASE_ASSERT(!isEmpty());
710 drain(timeout);
711 }
712
713 isActive = true;
714 }
715}
716
717SlotVisitor::SharedDrainResult SlotVisitor::drainInParallel(MonotonicTime timeout)
718{
719 donateAndDrain(timeout);
720 return drainFromShared(MasterDrain, timeout);
721}
722
723SlotVisitor::SharedDrainResult SlotVisitor::drainInParallelPassively(MonotonicTime timeout)
724{
725 ASSERT(m_isInParallelMode);
726
727 ASSERT(Options::numberOfGCMarkers());
728
729 if (Options::numberOfGCMarkers() == 1
730 || (m_heap.m_worldState.load() & Heap::mutatorWaitingBit)
731 || !m_heap.hasHeapAccess()
732 || m_heap.worldIsStopped()) {
733 // This is an optimization over drainInParallel() when we have a concurrent mutator but
734 // otherwise it is not profitable.
735 return drainInParallel(timeout);
736 }
737
738 donateAll(holdLock(m_heap.m_markingMutex));
739 return waitForTermination(timeout);
740}
741
742SlotVisitor::SharedDrainResult SlotVisitor::waitForTermination(MonotonicTime timeout)
743{
744 auto locker = holdLock(m_heap.m_markingMutex);
745 for (;;) {
746 if (hasElapsed(timeout))
747 return SharedDrainResult::TimedOut;
748
749 if (didReachTermination(locker)) {
750 m_heap.m_markingConditionVariable.notifyAll();
751 return SharedDrainResult::Done;
752 }
753
754 m_heap.m_markingConditionVariable.waitUntil(m_heap.m_markingMutex, timeout);
755 }
756}
757
758void SlotVisitor::donateAll()
759{
760 if (isEmpty())
761 return;
762
763 donateAll(holdLock(m_heap.m_markingMutex));
764}
765
766void SlotVisitor::donateAll(const AbstractLocker&)
767{
768 forEachMarkStack(
769 [&] (MarkStackArray& stack) -> IterationStatus {
770 stack.transferTo(correspondingGlobalStack(stack));
771 return IterationStatus::Continue;
772 });
773
774 m_heap.m_markingConditionVariable.notifyAll();
775}
776
777void SlotVisitor::donate()
778{
779 if (!m_isInParallelMode) {
780 dataLog("FATAL: Attempting to donate when not in parallel mode.\n");
781 RELEASE_ASSERT_NOT_REACHED();
782 }
783
784 if (Options::numberOfGCMarkers() == 1)
785 return;
786
787 donateKnownParallel();
788}
789
790void SlotVisitor::donateAndDrain(MonotonicTime timeout)
791{
792 donate();
793 drain(timeout);
794}
795
796void SlotVisitor::didRace(const VisitRaceKey& race)
797{
798 if (Options::verboseVisitRace())
799 dataLog(toCString("GC visit race: ", race, "\n"));
800
801 auto locker = holdLock(heap()->m_raceMarkStackLock);
802 JSCell* cell = race.cell();
803 cell->setCellState(CellState::PossiblyGrey);
804 heap()->m_raceMarkStack->append(cell);
805}
806
807void SlotVisitor::dump(PrintStream& out) const
808{
809 out.print("Collector: [", pointerListDump(collectorMarkStack()), "], Mutator: [", pointerListDump(mutatorMarkStack()), "]");
810}
811
812MarkStackArray& SlotVisitor::correspondingGlobalStack(MarkStackArray& stack)
813{
814 if (&stack == &m_collectorStack)
815 return *m_heap.m_sharedCollectorMarkStack;
816 RELEASE_ASSERT(&stack == &m_mutatorStack);
817 return *m_heap.m_sharedMutatorMarkStack;
818}
819
820void SlotVisitor::addParallelConstraintTask(RefPtr<SharedTask<void(SlotVisitor&)>> task)
821{
822 RELEASE_ASSERT(m_currentSolver);
823 RELEASE_ASSERT(m_currentConstraint);
824 RELEASE_ASSERT(task);
825
826 m_currentSolver->addParallelTask(task, *m_currentConstraint);
827}
828
829#if CPU(X86_64)
830NEVER_INLINE NO_RETURN_DUE_TO_CRASH NOT_TAIL_CALLED void SlotVisitor::reportZappedCellAndCrash(JSCell* cell)
831{
832 MarkedBlock::Handle* foundBlockHandle = nullptr;
833 uint64_t* cellWords = reinterpret_cast_ptr<uint64_t*>(cell);
834
835 uintptr_t cellAddress = bitwise_cast<uintptr_t>(cell);
836 uint64_t headerWord = cellWords[0];
837 uint64_t zapReasonAndMore = cellWords[1];
838 unsigned subspaceHash = 0;
839 size_t cellSize = 0;
840
841 m_heap.objectSpace().forEachBlock([&] (MarkedBlock::Handle* blockHandle) {
842 if (blockHandle->contains(cell)) {
843 foundBlockHandle = blockHandle;
844 return IterationStatus::Done;
845 }
846 return IterationStatus::Continue;
847 });
848
849 uint64_t variousState = 0;
850 MarkedBlock* foundBlock = nullptr;
851 if (foundBlockHandle) {
852 foundBlock = &foundBlockHandle->block();
853 subspaceHash = StringHasher::computeHash(foundBlockHandle->subspace()->name());
854 cellSize = foundBlockHandle->cellSize();
855
856 variousState |= static_cast<uint64_t>(foundBlockHandle->isFreeListed()) << 0;
857 variousState |= static_cast<uint64_t>(foundBlockHandle->isAllocated()) << 1;
858 variousState |= static_cast<uint64_t>(foundBlockHandle->isEmpty()) << 2;
859 variousState |= static_cast<uint64_t>(foundBlockHandle->needsDestruction()) << 3;
860 variousState |= static_cast<uint64_t>(foundBlock->isNewlyAllocated(cell)) << 4;
861
862 ptrdiff_t cellOffset = cellAddress - reinterpret_cast<uint64_t>(foundBlockHandle->start());
863 bool cellIsProperlyAligned = !(cellOffset % cellSize);
864 variousState |= static_cast<uint64_t>(cellIsProperlyAligned) << 5;
865 }
866
867 CRASH_WITH_INFO(cellAddress, headerWord, zapReasonAndMore, subspaceHash, cellSize, foundBlock, variousState);
868}
869#endif // PLATFORM(MAC)
870
871} // namespace JSC
872