1 | /* |
2 | * Copyright (C) 2012-2019 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' |
14 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, |
15 | * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS |
17 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
18 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
19 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
20 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
21 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
22 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF |
23 | * THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #include "config.h" |
27 | #include "SlotVisitor.h" |
28 | |
29 | #include "CPU.h" |
30 | #include "ConservativeRoots.h" |
31 | #include "GCSegmentedArrayInlines.h" |
32 | #include "HeapCellInlines.h" |
33 | #include "HeapProfiler.h" |
34 | #include "HeapSnapshotBuilder.h" |
35 | #include "JSArray.h" |
36 | #include "JSDestructibleObject.h" |
37 | #include "JSObject.h" |
38 | #include "JSString.h" |
39 | #include "JSCInlines.h" |
40 | #include "MarkingConstraintSolver.h" |
41 | #include "SlotVisitorInlines.h" |
42 | #include "StopIfNecessaryTimer.h" |
43 | #include "SuperSampler.h" |
44 | #include "VM.h" |
45 | #include <wtf/ListDump.h> |
46 | #include <wtf/Lock.h> |
47 | |
48 | namespace JSC { |
49 | |
50 | #if ENABLE(GC_VALIDATION) |
51 | static void validate(JSCell* cell) |
52 | { |
53 | RELEASE_ASSERT(cell); |
54 | |
55 | if (!cell->structure()) { |
56 | dataLogF("cell at %p has a null structure\n" , cell); |
57 | CRASH(); |
58 | } |
59 | |
60 | // Both the cell's structure, and the cell's structure's structure should be the Structure Structure. |
61 | // I hate this sentence. |
62 | VM& vm = *cell->vm(); |
63 | if (cell->structure()->structure()->JSCell::classInfo(vm) != cell->structure()->JSCell::classInfo(vm)) { |
64 | const char* parentClassName = 0; |
65 | const char* ourClassName = 0; |
66 | if (cell->structure()->structure() && cell->structure()->structure()->JSCell::classInfo(vm)) |
67 | parentClassName = cell->structure()->structure()->JSCell::classInfo(vm)->className; |
68 | if (cell->structure()->JSCell::classInfo(vm)) |
69 | ourClassName = cell->structure()->JSCell::classInfo(vm)->className; |
70 | dataLogF("parent structure (%p <%s>) of cell at %p doesn't match cell's structure (%p <%s>)\n" , |
71 | cell->structure()->structure(), parentClassName, cell, cell->structure(), ourClassName); |
72 | CRASH(); |
73 | } |
74 | |
75 | // Make sure we can walk the ClassInfo chain |
76 | const ClassInfo* info = cell->classInfo(vm); |
77 | do { } while ((info = info->parentClass)); |
78 | } |
79 | #endif |
80 | |
81 | SlotVisitor::SlotVisitor(Heap& heap, CString codeName) |
82 | : m_bytesVisited(0) |
83 | , m_visitCount(0) |
84 | , m_isInParallelMode(false) |
85 | , m_markingVersion(MarkedSpace::initialVersion) |
86 | , m_heap(heap) |
87 | , m_codeName(codeName) |
88 | #if !ASSERT_DISABLED |
89 | , m_isCheckingForDefaultMarkViolation(false) |
90 | , m_isDraining(false) |
91 | #endif |
92 | { |
93 | } |
94 | |
95 | SlotVisitor::~SlotVisitor() |
96 | { |
97 | clearMarkStacks(); |
98 | } |
99 | |
100 | void SlotVisitor::didStartMarking() |
101 | { |
102 | auto scope = heap()->collectionScope(); |
103 | if (scope) { |
104 | switch (*scope) { |
105 | case CollectionScope::Eden: |
106 | reset(); |
107 | break; |
108 | case CollectionScope::Full: |
109 | m_extraMemorySize = 0; |
110 | break; |
111 | } |
112 | } |
113 | |
114 | if (HeapProfiler* heapProfiler = vm().heapProfiler()) |
115 | m_heapSnapshotBuilder = heapProfiler->activeSnapshotBuilder(); |
116 | |
117 | m_markingVersion = heap()->objectSpace().markingVersion(); |
118 | } |
119 | |
120 | void SlotVisitor::reset() |
121 | { |
122 | m_bytesVisited = 0; |
123 | m_visitCount = 0; |
124 | m_heapSnapshotBuilder = nullptr; |
125 | RELEASE_ASSERT(!m_currentCell); |
126 | } |
127 | |
128 | void SlotVisitor::clearMarkStacks() |
129 | { |
130 | forEachMarkStack( |
131 | [&] (MarkStackArray& stack) -> IterationStatus { |
132 | stack.clear(); |
133 | return IterationStatus::Continue; |
134 | }); |
135 | } |
136 | |
137 | void SlotVisitor::append(const ConservativeRoots& conservativeRoots) |
138 | { |
139 | HeapCell** roots = conservativeRoots.roots(); |
140 | size_t size = conservativeRoots.size(); |
141 | for (size_t i = 0; i < size; ++i) |
142 | appendJSCellOrAuxiliary(roots[i]); |
143 | } |
144 | |
145 | void SlotVisitor::appendJSCellOrAuxiliary(HeapCell* heapCell) |
146 | { |
147 | if (!heapCell) |
148 | return; |
149 | |
150 | ASSERT(!m_isCheckingForDefaultMarkViolation); |
151 | |
152 | auto validateCell = [&] (JSCell* jsCell) { |
153 | StructureID structureID = jsCell->structureID(); |
154 | |
155 | auto die = [&] (const char* text) { |
156 | WTF::dataFile().atomically( |
157 | [&] (PrintStream& out) { |
158 | out.print(text); |
159 | out.print("GC type: " , heap()->collectionScope(), "\n" ); |
160 | out.print("Object at: " , RawPointer(jsCell), "\n" ); |
161 | #if USE(JSVALUE64) |
162 | out.print("Structure ID: " , structureID, " (0x" , format("%x" , structureID), ")\n" ); |
163 | out.print("Structure ID table size: " , heap()->structureIDTable().size(), "\n" ); |
164 | #else |
165 | out.print("Structure: " , RawPointer(structureID), "\n" ); |
166 | #endif |
167 | out.print("Object contents:" ); |
168 | for (unsigned i = 0; i < 2; ++i) |
169 | out.print(" " , format("0x%016llx" , bitwise_cast<uint64_t*>(jsCell)[i])); |
170 | out.print("\n" ); |
171 | CellContainer container = jsCell->cellContainer(); |
172 | out.print("Is marked: " , container.isMarked(jsCell), "\n" ); |
173 | out.print("Is newly allocated: " , container.isNewlyAllocated(jsCell), "\n" ); |
174 | if (container.isMarkedBlock()) { |
175 | MarkedBlock& block = container.markedBlock(); |
176 | out.print("Block: " , RawPointer(&block), "\n" ); |
177 | block.handle().dumpState(out); |
178 | out.print("\n" ); |
179 | out.print("Is marked raw: " , block.isMarkedRaw(jsCell), "\n" ); |
180 | out.print("Marking version: " , block.markingVersion(), "\n" ); |
181 | out.print("Heap marking version: " , heap()->objectSpace().markingVersion(), "\n" ); |
182 | out.print("Is newly allocated raw: " , block.isNewlyAllocated(jsCell), "\n" ); |
183 | out.print("Newly allocated version: " , block.newlyAllocatedVersion(), "\n" ); |
184 | out.print("Heap newly allocated version: " , heap()->objectSpace().newlyAllocatedVersion(), "\n" ); |
185 | } |
186 | UNREACHABLE_FOR_PLATFORM(); |
187 | }); |
188 | }; |
189 | |
190 | // It's not OK for the structure to be null at any GC scan point. We must not GC while |
191 | // an object is not fully initialized. |
192 | if (!structureID) |
193 | die("GC scan found corrupt object: structureID is zero!\n" ); |
194 | |
195 | // It's not OK for the structure to be nuked at any GC scan point. |
196 | if (isNuked(structureID)) |
197 | die("GC scan found object in bad state: structureID is nuked!\n" ); |
198 | |
199 | #if USE(JSVALUE64) |
200 | // This detects the worst of the badness. |
201 | if (!heap()->structureIDTable().isValid(structureID)) |
202 | die("GC scan found corrupt object: structureID is invalid!\n" ); |
203 | #endif |
204 | }; |
205 | |
206 | // In debug mode, we validate before marking since this makes it clearer what the problem |
207 | // was. It's also slower, so we don't do it normally. |
208 | if (!ASSERT_DISABLED && isJSCellKind(heapCell->cellKind())) |
209 | validateCell(static_cast<JSCell*>(heapCell)); |
210 | |
211 | if (Heap::testAndSetMarked(m_markingVersion, heapCell)) |
212 | return; |
213 | |
214 | switch (heapCell->cellKind()) { |
215 | case HeapCell::JSCell: |
216 | case HeapCell::JSCellWithInteriorPointers: { |
217 | // We have ample budget to perform validation here. |
218 | |
219 | JSCell* jsCell = static_cast<JSCell*>(heapCell); |
220 | validateCell(jsCell); |
221 | |
222 | jsCell->setCellState(CellState::PossiblyGrey); |
223 | |
224 | appendToMarkStack(jsCell); |
225 | return; |
226 | } |
227 | |
228 | case HeapCell::Auxiliary: { |
229 | noteLiveAuxiliaryCell(heapCell); |
230 | return; |
231 | } } |
232 | } |
233 | |
234 | void SlotVisitor::appendSlow(JSCell* cell, Dependency dependency) |
235 | { |
236 | if (UNLIKELY(m_heapSnapshotBuilder)) |
237 | m_heapSnapshotBuilder->appendEdge(m_currentCell, cell, m_rootMarkReason); |
238 | |
239 | appendHiddenSlowImpl(cell, dependency); |
240 | } |
241 | |
242 | void SlotVisitor::appendHiddenSlow(JSCell* cell, Dependency dependency) |
243 | { |
244 | appendHiddenSlowImpl(cell, dependency); |
245 | } |
246 | |
247 | ALWAYS_INLINE void SlotVisitor::appendHiddenSlowImpl(JSCell* cell, Dependency dependency) |
248 | { |
249 | ASSERT(!m_isCheckingForDefaultMarkViolation); |
250 | |
251 | #if ENABLE(GC_VALIDATION) |
252 | validate(cell); |
253 | #endif |
254 | |
255 | if (cell->isLargeAllocation()) |
256 | setMarkedAndAppendToMarkStack(cell->largeAllocation(), cell, dependency); |
257 | else |
258 | setMarkedAndAppendToMarkStack(cell->markedBlock(), cell, dependency); |
259 | } |
260 | |
261 | template<typename ContainerType> |
262 | ALWAYS_INLINE void SlotVisitor::setMarkedAndAppendToMarkStack(ContainerType& container, JSCell* cell, Dependency dependency) |
263 | { |
264 | if (container.testAndSetMarked(cell, dependency)) |
265 | return; |
266 | |
267 | ASSERT(cell->structure()); |
268 | |
269 | // Indicate that the object is grey and that: |
270 | // In case of concurrent GC: it's the first time it is grey in this GC cycle. |
271 | // In case of eden collection: it's a new object that became grey rather than an old remembered object. |
272 | cell->setCellState(CellState::PossiblyGrey); |
273 | |
274 | appendToMarkStack(container, cell); |
275 | } |
276 | |
277 | void SlotVisitor::appendToMarkStack(JSCell* cell) |
278 | { |
279 | if (cell->isLargeAllocation()) |
280 | appendToMarkStack(cell->largeAllocation(), cell); |
281 | else |
282 | appendToMarkStack(cell->markedBlock(), cell); |
283 | } |
284 | |
285 | template<typename ContainerType> |
286 | ALWAYS_INLINE void SlotVisitor::appendToMarkStack(ContainerType& container, JSCell* cell) |
287 | { |
288 | ASSERT(m_heap.isMarked(cell)); |
289 | ASSERT(!cell->isZapped()); |
290 | |
291 | container.noteMarked(); |
292 | |
293 | m_visitCount++; |
294 | m_bytesVisited += container.cellSize(); |
295 | |
296 | m_collectorStack.append(cell); |
297 | } |
298 | |
299 | void SlotVisitor::appendToMutatorMarkStack(const JSCell* cell) |
300 | { |
301 | m_mutatorStack.append(cell); |
302 | } |
303 | |
304 | void SlotVisitor::markAuxiliary(const void* base) |
305 | { |
306 | HeapCell* cell = bitwise_cast<HeapCell*>(base); |
307 | |
308 | ASSERT(cell->heap() == heap()); |
309 | |
310 | if (Heap::testAndSetMarked(m_markingVersion, cell)) |
311 | return; |
312 | |
313 | noteLiveAuxiliaryCell(cell); |
314 | } |
315 | |
316 | void SlotVisitor::noteLiveAuxiliaryCell(HeapCell* cell) |
317 | { |
318 | // We get here once per GC under these circumstances: |
319 | // |
320 | // Eden collection: if the cell was allocated since the last collection and is live somehow. |
321 | // |
322 | // Full collection: if the cell is live somehow. |
323 | |
324 | CellContainer container = cell->cellContainer(); |
325 | |
326 | container.assertValidCell(vm(), cell); |
327 | container.noteMarked(); |
328 | |
329 | m_visitCount++; |
330 | |
331 | size_t cellSize = container.cellSize(); |
332 | m_bytesVisited += cellSize; |
333 | m_nonCellVisitCount += cellSize; |
334 | } |
335 | |
336 | class SetCurrentCellScope { |
337 | public: |
338 | SetCurrentCellScope(SlotVisitor& visitor, const JSCell* cell) |
339 | : m_visitor(visitor) |
340 | { |
341 | ASSERT(!m_visitor.m_currentCell); |
342 | m_visitor.m_currentCell = const_cast<JSCell*>(cell); |
343 | } |
344 | |
345 | ~SetCurrentCellScope() |
346 | { |
347 | ASSERT(m_visitor.m_currentCell); |
348 | m_visitor.m_currentCell = nullptr; |
349 | } |
350 | |
351 | private: |
352 | SlotVisitor& m_visitor; |
353 | }; |
354 | |
355 | ALWAYS_INLINE void SlotVisitor::visitChildren(const JSCell* cell) |
356 | { |
357 | ASSERT(m_heap.isMarked(cell)); |
358 | |
359 | SetCurrentCellScope currentCellScope(*this, cell); |
360 | |
361 | if (false) { |
362 | dataLog("Visiting " , RawPointer(cell)); |
363 | if (!m_isFirstVisit) |
364 | dataLog(" (subsequent)" ); |
365 | dataLog("\n" ); |
366 | } |
367 | |
368 | // Funny story: it's possible for the object to be black already, if we barrier the object at |
369 | // about the same time that it's marked. That's fine. It's a gnarly and super-rare race. It's |
370 | // not clear to me that it would be correct or profitable to bail here if the object is already |
371 | // black. |
372 | |
373 | cell->setCellState(CellState::PossiblyBlack); |
374 | |
375 | WTF::storeLoadFence(); |
376 | |
377 | switch (cell->type()) { |
378 | case StringType: |
379 | JSString::visitChildren(const_cast<JSCell*>(cell), *this); |
380 | break; |
381 | |
382 | case FinalObjectType: |
383 | JSFinalObject::visitChildren(const_cast<JSCell*>(cell), *this); |
384 | break; |
385 | |
386 | case ArrayType: |
387 | JSArray::visitChildren(const_cast<JSCell*>(cell), *this); |
388 | break; |
389 | |
390 | default: |
391 | // FIXME: This could be so much better. |
392 | // https://bugs.webkit.org/show_bug.cgi?id=162462 |
393 | cell->methodTable(vm())->visitChildren(const_cast<JSCell*>(cell), *this); |
394 | break; |
395 | } |
396 | |
397 | if (UNLIKELY(m_heapSnapshotBuilder)) { |
398 | if (m_isFirstVisit) |
399 | m_heapSnapshotBuilder->appendNode(const_cast<JSCell*>(cell)); |
400 | } |
401 | } |
402 | |
403 | void SlotVisitor::visitAsConstraint(const JSCell* cell) |
404 | { |
405 | m_isFirstVisit = false; |
406 | visitChildren(cell); |
407 | } |
408 | |
409 | inline void SlotVisitor::propagateExternalMemoryVisitedIfNecessary() |
410 | { |
411 | if (m_isFirstVisit) { |
412 | if (m_extraMemorySize.hasOverflowed()) |
413 | heap()->reportExtraMemoryVisited(std::numeric_limits<size_t>::max()); |
414 | else if (m_extraMemorySize) |
415 | heap()->reportExtraMemoryVisited(m_extraMemorySize.unsafeGet()); |
416 | m_extraMemorySize = 0; |
417 | } |
418 | } |
419 | |
420 | void SlotVisitor::donateKnownParallel(MarkStackArray& from, MarkStackArray& to) |
421 | { |
422 | // NOTE: Because we re-try often, we can afford to be conservative, and |
423 | // assume that donating is not profitable. |
424 | |
425 | // Avoid locking when a thread reaches a dead end in the object graph. |
426 | if (from.size() < 2) |
427 | return; |
428 | |
429 | // If there's already some shared work queued up, be conservative and assume |
430 | // that donating more is not profitable. |
431 | if (to.size()) |
432 | return; |
433 | |
434 | // If we're contending on the lock, be conservative and assume that another |
435 | // thread is already donating. |
436 | std::unique_lock<Lock> lock(m_heap.m_markingMutex, std::try_to_lock); |
437 | if (!lock.owns_lock()) |
438 | return; |
439 | |
440 | // Otherwise, assume that a thread will go idle soon, and donate. |
441 | from.donateSomeCellsTo(to); |
442 | |
443 | m_heap.m_markingConditionVariable.notifyAll(); |
444 | } |
445 | |
446 | void SlotVisitor::donateKnownParallel() |
447 | { |
448 | forEachMarkStack( |
449 | [&] (MarkStackArray& stack) -> IterationStatus { |
450 | donateKnownParallel(stack, correspondingGlobalStack(stack)); |
451 | return IterationStatus::Continue; |
452 | }); |
453 | } |
454 | |
455 | void SlotVisitor::updateMutatorIsStopped(const AbstractLocker&) |
456 | { |
457 | m_mutatorIsStopped = (m_heap.worldIsStopped() & m_canOptimizeForStoppedMutator); |
458 | } |
459 | |
460 | void SlotVisitor::updateMutatorIsStopped() |
461 | { |
462 | if (mutatorIsStoppedIsUpToDate()) |
463 | return; |
464 | updateMutatorIsStopped(holdLock(m_rightToRun)); |
465 | } |
466 | |
467 | bool SlotVisitor::hasAcknowledgedThatTheMutatorIsResumed() const |
468 | { |
469 | return !m_mutatorIsStopped; |
470 | } |
471 | |
472 | bool SlotVisitor::mutatorIsStoppedIsUpToDate() const |
473 | { |
474 | return m_mutatorIsStopped == (m_heap.worldIsStopped() & m_canOptimizeForStoppedMutator); |
475 | } |
476 | |
477 | void SlotVisitor::optimizeForStoppedMutator() |
478 | { |
479 | m_canOptimizeForStoppedMutator = true; |
480 | } |
481 | |
482 | NEVER_INLINE void SlotVisitor::drain(MonotonicTime timeout) |
483 | { |
484 | if (!m_isInParallelMode) { |
485 | dataLog("FATAL: attempting to drain when not in parallel mode.\n" ); |
486 | RELEASE_ASSERT_NOT_REACHED(); |
487 | } |
488 | |
489 | auto locker = holdLock(m_rightToRun); |
490 | |
491 | while (!hasElapsed(timeout)) { |
492 | updateMutatorIsStopped(locker); |
493 | IterationStatus status = forEachMarkStack( |
494 | [&] (MarkStackArray& stack) -> IterationStatus { |
495 | if (stack.isEmpty()) |
496 | return IterationStatus::Continue; |
497 | |
498 | stack.refill(); |
499 | |
500 | m_isFirstVisit = (&stack == &m_collectorStack); |
501 | |
502 | for (unsigned countdown = Options::minimumNumberOfScansBetweenRebalance(); stack.canRemoveLast() && countdown--;) |
503 | visitChildren(stack.removeLast()); |
504 | return IterationStatus::Done; |
505 | }); |
506 | propagateExternalMemoryVisitedIfNecessary(); |
507 | if (status == IterationStatus::Continue) |
508 | break; |
509 | |
510 | m_rightToRun.safepoint(); |
511 | donateKnownParallel(); |
512 | } |
513 | } |
514 | |
515 | size_t SlotVisitor::performIncrementOfDraining(size_t bytesRequested) |
516 | { |
517 | RELEASE_ASSERT(m_isInParallelMode); |
518 | |
519 | size_t cellsRequested = bytesRequested / MarkedBlock::atomSize; |
520 | { |
521 | auto locker = holdLock(m_heap.m_markingMutex); |
522 | forEachMarkStack( |
523 | [&] (MarkStackArray& stack) -> IterationStatus { |
524 | cellsRequested -= correspondingGlobalStack(stack).transferTo(stack, cellsRequested); |
525 | return cellsRequested ? IterationStatus::Continue : IterationStatus::Done; |
526 | }); |
527 | } |
528 | |
529 | size_t cellBytesVisited = 0; |
530 | m_nonCellVisitCount = 0; |
531 | |
532 | auto bytesVisited = [&] () -> size_t { |
533 | return cellBytesVisited + m_nonCellVisitCount; |
534 | }; |
535 | |
536 | auto isDone = [&] () -> bool { |
537 | return bytesVisited() >= bytesRequested; |
538 | }; |
539 | |
540 | { |
541 | auto locker = holdLock(m_rightToRun); |
542 | |
543 | while (!isDone()) { |
544 | updateMutatorIsStopped(locker); |
545 | IterationStatus status = forEachMarkStack( |
546 | [&] (MarkStackArray& stack) -> IterationStatus { |
547 | if (stack.isEmpty() || isDone()) |
548 | return IterationStatus::Continue; |
549 | |
550 | stack.refill(); |
551 | |
552 | m_isFirstVisit = (&stack == &m_collectorStack); |
553 | |
554 | unsigned countdown = Options::minimumNumberOfScansBetweenRebalance(); |
555 | while (countdown && stack.canRemoveLast() && !isDone()) { |
556 | const JSCell* cell = stack.removeLast(); |
557 | cellBytesVisited += cell->cellSize(); |
558 | visitChildren(cell); |
559 | countdown--; |
560 | } |
561 | return IterationStatus::Done; |
562 | }); |
563 | propagateExternalMemoryVisitedIfNecessary(); |
564 | if (status == IterationStatus::Continue) |
565 | break; |
566 | m_rightToRun.safepoint(); |
567 | donateKnownParallel(); |
568 | } |
569 | } |
570 | |
571 | donateAll(); |
572 | |
573 | return bytesVisited(); |
574 | } |
575 | |
576 | bool SlotVisitor::didReachTermination() |
577 | { |
578 | LockHolder locker(m_heap.m_markingMutex); |
579 | return didReachTermination(locker); |
580 | } |
581 | |
582 | bool SlotVisitor::didReachTermination(const AbstractLocker& locker) |
583 | { |
584 | return !m_heap.m_numberOfActiveParallelMarkers |
585 | && !hasWork(locker); |
586 | } |
587 | |
588 | bool SlotVisitor::hasWork(const AbstractLocker&) |
589 | { |
590 | return !isEmpty() |
591 | || !m_heap.m_sharedCollectorMarkStack->isEmpty() |
592 | || !m_heap.m_sharedMutatorMarkStack->isEmpty(); |
593 | } |
594 | |
595 | NEVER_INLINE SlotVisitor::SharedDrainResult SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode, MonotonicTime timeout) |
596 | { |
597 | ASSERT(m_isInParallelMode); |
598 | |
599 | ASSERT(Options::numberOfGCMarkers()); |
600 | |
601 | bool isActive = false; |
602 | while (true) { |
603 | RefPtr<SharedTask<void(SlotVisitor&)>> bonusTask; |
604 | |
605 | { |
606 | auto locker = holdLock(m_heap.m_markingMutex); |
607 | if (isActive) |
608 | m_heap.m_numberOfActiveParallelMarkers--; |
609 | m_heap.m_numberOfWaitingParallelMarkers++; |
610 | |
611 | if (sharedDrainMode == MasterDrain) { |
612 | while (true) { |
613 | if (hasElapsed(timeout)) |
614 | return SharedDrainResult::TimedOut; |
615 | |
616 | if (didReachTermination(locker)) { |
617 | m_heap.m_markingConditionVariable.notifyAll(); |
618 | return SharedDrainResult::Done; |
619 | } |
620 | |
621 | if (hasWork(locker)) |
622 | break; |
623 | |
624 | m_heap.m_markingConditionVariable.waitUntil(m_heap.m_markingMutex, timeout); |
625 | } |
626 | } else { |
627 | ASSERT(sharedDrainMode == SlaveDrain); |
628 | |
629 | if (hasElapsed(timeout)) |
630 | return SharedDrainResult::TimedOut; |
631 | |
632 | if (didReachTermination(locker)) { |
633 | m_heap.m_markingConditionVariable.notifyAll(); |
634 | |
635 | // If we're in concurrent mode, then we know that the mutator will eventually do |
636 | // the right thing because: |
637 | // - It's possible that the collector has the conn. In that case, the collector will |
638 | // wake up from the notification above. This will happen if the app released heap |
639 | // access. Native apps can spend a lot of time with heap access released. |
640 | // - It's possible that the mutator will allocate soon. Then it will check if we |
641 | // reached termination. This is the most likely outcome in programs that allocate |
642 | // a lot. |
643 | // - WebCore never releases access. But WebCore has a runloop. The runloop will check |
644 | // if we reached termination. |
645 | // So, this tells the runloop that it's got things to do. |
646 | m_heap.m_stopIfNecessaryTimer->scheduleSoon(); |
647 | } |
648 | |
649 | auto isReady = [&] () -> bool { |
650 | return hasWork(locker) |
651 | || m_heap.m_bonusVisitorTask |
652 | || m_heap.m_parallelMarkersShouldExit; |
653 | }; |
654 | |
655 | m_heap.m_markingConditionVariable.waitUntil(m_heap.m_markingMutex, timeout, isReady); |
656 | |
657 | if (!hasWork(locker) |
658 | && m_heap.m_bonusVisitorTask) |
659 | bonusTask = m_heap.m_bonusVisitorTask; |
660 | |
661 | if (m_heap.m_parallelMarkersShouldExit) |
662 | return SharedDrainResult::Done; |
663 | } |
664 | |
665 | if (!bonusTask && isEmpty()) { |
666 | forEachMarkStack( |
667 | [&] (MarkStackArray& stack) -> IterationStatus { |
668 | stack.stealSomeCellsFrom( |
669 | correspondingGlobalStack(stack), |
670 | m_heap.m_numberOfWaitingParallelMarkers); |
671 | return IterationStatus::Continue; |
672 | }); |
673 | } |
674 | |
675 | m_heap.m_numberOfActiveParallelMarkers++; |
676 | m_heap.m_numberOfWaitingParallelMarkers--; |
677 | } |
678 | |
679 | if (bonusTask) { |
680 | bonusTask->run(*this); |
681 | |
682 | // The main thread could still be running, and may run for a while. Unless we clear the task |
683 | // ourselves, we will keep looping around trying to run the task. |
684 | { |
685 | auto locker = holdLock(m_heap.m_markingMutex); |
686 | if (m_heap.m_bonusVisitorTask == bonusTask) |
687 | m_heap.m_bonusVisitorTask = nullptr; |
688 | bonusTask = nullptr; |
689 | m_heap.m_markingConditionVariable.notifyAll(); |
690 | } |
691 | } else { |
692 | RELEASE_ASSERT(!isEmpty()); |
693 | drain(timeout); |
694 | } |
695 | |
696 | isActive = true; |
697 | } |
698 | } |
699 | |
700 | SlotVisitor::SharedDrainResult SlotVisitor::drainInParallel(MonotonicTime timeout) |
701 | { |
702 | donateAndDrain(timeout); |
703 | return drainFromShared(MasterDrain, timeout); |
704 | } |
705 | |
706 | SlotVisitor::SharedDrainResult SlotVisitor::drainInParallelPassively(MonotonicTime timeout) |
707 | { |
708 | ASSERT(m_isInParallelMode); |
709 | |
710 | ASSERT(Options::numberOfGCMarkers()); |
711 | |
712 | if (Options::numberOfGCMarkers() == 1 |
713 | || (m_heap.m_worldState.load() & Heap::mutatorWaitingBit) |
714 | || !m_heap.hasHeapAccess() |
715 | || m_heap.worldIsStopped()) { |
716 | // This is an optimization over drainInParallel() when we have a concurrent mutator but |
717 | // otherwise it is not profitable. |
718 | return drainInParallel(timeout); |
719 | } |
720 | |
721 | donateAll(holdLock(m_heap.m_markingMutex)); |
722 | return waitForTermination(timeout); |
723 | } |
724 | |
725 | SlotVisitor::SharedDrainResult SlotVisitor::waitForTermination(MonotonicTime timeout) |
726 | { |
727 | auto locker = holdLock(m_heap.m_markingMutex); |
728 | for (;;) { |
729 | if (hasElapsed(timeout)) |
730 | return SharedDrainResult::TimedOut; |
731 | |
732 | if (didReachTermination(locker)) { |
733 | m_heap.m_markingConditionVariable.notifyAll(); |
734 | return SharedDrainResult::Done; |
735 | } |
736 | |
737 | m_heap.m_markingConditionVariable.waitUntil(m_heap.m_markingMutex, timeout); |
738 | } |
739 | } |
740 | |
741 | void SlotVisitor::donateAll() |
742 | { |
743 | if (isEmpty()) |
744 | return; |
745 | |
746 | donateAll(holdLock(m_heap.m_markingMutex)); |
747 | } |
748 | |
749 | void SlotVisitor::donateAll(const AbstractLocker&) |
750 | { |
751 | forEachMarkStack( |
752 | [&] (MarkStackArray& stack) -> IterationStatus { |
753 | stack.transferTo(correspondingGlobalStack(stack)); |
754 | return IterationStatus::Continue; |
755 | }); |
756 | |
757 | m_heap.m_markingConditionVariable.notifyAll(); |
758 | } |
759 | |
760 | void SlotVisitor::donate() |
761 | { |
762 | if (!m_isInParallelMode) { |
763 | dataLog("FATAL: Attempting to donate when not in parallel mode.\n" ); |
764 | RELEASE_ASSERT_NOT_REACHED(); |
765 | } |
766 | |
767 | if (Options::numberOfGCMarkers() == 1) |
768 | return; |
769 | |
770 | donateKnownParallel(); |
771 | } |
772 | |
773 | void SlotVisitor::donateAndDrain(MonotonicTime timeout) |
774 | { |
775 | donate(); |
776 | drain(timeout); |
777 | } |
778 | |
779 | void SlotVisitor::didRace(const VisitRaceKey& race) |
780 | { |
781 | if (Options::verboseVisitRace()) |
782 | dataLog(toCString("GC visit race: " , race, "\n" )); |
783 | |
784 | auto locker = holdLock(heap()->m_raceMarkStackLock); |
785 | JSCell* cell = race.cell(); |
786 | cell->setCellState(CellState::PossiblyGrey); |
787 | heap()->m_raceMarkStack->append(cell); |
788 | } |
789 | |
790 | void SlotVisitor::dump(PrintStream& out) const |
791 | { |
792 | out.print("Collector: [" , pointerListDump(collectorMarkStack()), "], Mutator: [" , pointerListDump(mutatorMarkStack()), "]" ); |
793 | } |
794 | |
795 | MarkStackArray& SlotVisitor::correspondingGlobalStack(MarkStackArray& stack) |
796 | { |
797 | if (&stack == &m_collectorStack) |
798 | return *m_heap.m_sharedCollectorMarkStack; |
799 | RELEASE_ASSERT(&stack == &m_mutatorStack); |
800 | return *m_heap.m_sharedMutatorMarkStack; |
801 | } |
802 | |
803 | void SlotVisitor::addParallelConstraintTask(RefPtr<SharedTask<void(SlotVisitor&)>> task) |
804 | { |
805 | RELEASE_ASSERT(m_currentSolver); |
806 | RELEASE_ASSERT(m_currentConstraint); |
807 | RELEASE_ASSERT(task); |
808 | |
809 | m_currentSolver->addParallelTask(task, *m_currentConstraint); |
810 | } |
811 | |
812 | } // namespace JSC |
813 | |