1/*
2 * Copyright (C) 1999-2000 Harri Porten ([email protected])
3 * Copyright (C) 2001 Peter Kelly ([email protected])
4 * Copyright (C) 2003-2019 Apple Inc. All rights reserved.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 *
20 */
21
22#pragma once
23
24#include "ArrayBuffer.h"
25#include "CellState.h"
26#include "CollectionScope.h"
27#include "CollectorPhase.h"
28#include "DeleteAllCodeEffort.h"
29#include "GCConductor.h"
30#include "GCIncomingRefCountedSet.h"
31#include "GCMemoryOperations.h"
32#include "GCRequest.h"
33#include "HandleSet.h"
34#include "HeapFinalizerCallback.h"
35#include "HeapObserver.h"
36#include "MarkedBlock.h"
37#include "MarkedSpace.h"
38#include "MutatorState.h"
39#include "Options.h"
40#include "StructureIDTable.h"
41#include "Synchronousness.h"
42#include "WeakHandleOwner.h"
43#include <wtf/AutomaticThread.h>
44#include <wtf/ConcurrentPtrHashSet.h>
45#include <wtf/Deque.h>
46#include <wtf/HashCountedSet.h>
47#include <wtf/HashSet.h>
48#include <wtf/Markable.h>
49#include <wtf/ParallelHelperPool.h>
50#include <wtf/Threading.h>
51
52namespace JSC {
53
54class CodeBlock;
55class CodeBlockSet;
56class CollectingScope;
57class ConservativeRoots;
58class GCDeferralContext;
59class EdenGCActivityCallback;
60class FullGCActivityCallback;
61class GCActivityCallback;
62class GCAwareJITStubRoutine;
63class Heap;
64class HeapProfiler;
65class HeapVerifier;
66class IncrementalSweeper;
67class JITStubRoutine;
68class JITStubRoutineSet;
69class JSCell;
70class JSImmutableButterfly;
71class JSValue;
72class LLIntOffsetsExtractor;
73class MachineThreads;
74class MarkStackArray;
75class MarkStackMergingConstraint;
76class BlockDirectory;
77class MarkedArgumentBuffer;
78class MarkingConstraint;
79class MarkingConstraintSet;
80class MutatorScheduler;
81class RunningScope;
82class SlotVisitor;
83class SpaceTimeMutatorScheduler;
84class StopIfNecessaryTimer;
85class SweepingScope;
86class VM;
87class WeakGCMapBase;
88struct CurrentThreadState;
89
90#ifdef JSC_GLIB_API_ENABLED
91class JSCGLibWrapperObject;
92#endif
93
94namespace DFG {
95class SpeculativeJIT;
96class Worklist;
97}
98
99#if !ASSERT_DISABLED
100#define ENABLE_DFG_DOES_GC_VALIDATION 1
101#else
102#define ENABLE_DFG_DOES_GC_VALIDATION 0
103#endif
104constexpr bool validateDFGDoesGC = ENABLE_DFG_DOES_GC_VALIDATION;
105
106typedef HashCountedSet<JSCell*> ProtectCountSet;
107typedef HashCountedSet<const char*> TypeCountSet;
108
109enum HeapType { SmallHeap, LargeHeap };
110
111class HeapUtil;
112
113class Heap {
114 WTF_MAKE_NONCOPYABLE(Heap);
115public:
116 friend class JIT;
117 friend class DFG::SpeculativeJIT;
118 static Heap* heap(const JSValue); // 0 for immediate values
119 static Heap* heap(const HeapCell*);
120
121 // This constant determines how many blocks we iterate between checks of our
122 // deadline when calling Heap::isPagedOut. Decreasing it will cause us to detect
123 // overstepping our deadline more quickly, while increasing it will cause
124 // our scan to run faster.
125 static constexpr unsigned s_timeCheckResolution = 16;
126
127 bool isMarked(const void*);
128 static bool testAndSetMarked(HeapVersion, const void*);
129
130 static size_t cellSize(const void*);
131
132 void writeBarrier(const JSCell* from);
133 void writeBarrier(const JSCell* from, JSValue to);
134 void writeBarrier(const JSCell* from, JSCell* to);
135
136 void writeBarrierWithoutFence(const JSCell* from);
137
138 void mutatorFence();
139
140 // Take this if you know that from->cellState() < barrierThreshold.
141 JS_EXPORT_PRIVATE void writeBarrierSlowPath(const JSCell* from);
142
143 Heap(VM&, HeapType);
144 ~Heap();
145 void lastChanceToFinalize();
146 void releaseDelayedReleasedObjects();
147
148 VM& vm() const;
149
150 MarkedSpace& objectSpace() { return m_objectSpace; }
151 MachineThreads& machineThreads() { return *m_machineThreads; }
152
153 SlotVisitor& collectorSlotVisitor() { return *m_collectorSlotVisitor; }
154
155 JS_EXPORT_PRIVATE GCActivityCallback* fullActivityCallback();
156 JS_EXPORT_PRIVATE GCActivityCallback* edenActivityCallback();
157 JS_EXPORT_PRIVATE void setGarbageCollectionTimerEnabled(bool);
158
159 JS_EXPORT_PRIVATE IncrementalSweeper& sweeper();
160
161 void addObserver(HeapObserver* observer) { m_observers.append(observer); }
162 void removeObserver(HeapObserver* observer) { m_observers.removeFirst(observer); }
163
164 MutatorState mutatorState() const { return m_mutatorState; }
165 Optional<CollectionScope> collectionScope() const { return m_collectionScope; }
166 bool hasHeapAccess() const;
167 bool worldIsStopped() const;
168 bool worldIsRunning() const { return !worldIsStopped(); }
169
170 // We're always busy on the collection threads. On the main thread, this returns true if we're
171 // helping heap.
172 JS_EXPORT_PRIVATE bool isCurrentThreadBusy();
173
174 typedef void (*Finalizer)(JSCell*);
175 JS_EXPORT_PRIVATE void addFinalizer(JSCell*, Finalizer);
176
177 void notifyIsSafeToCollect();
178 bool isSafeToCollect() const { return m_isSafeToCollect; }
179
180 bool isShuttingDown() const { return m_isShuttingDown; }
181
182 JS_EXPORT_PRIVATE bool isAnalyzingHeap() const;
183
184 JS_EXPORT_PRIVATE void sweepSynchronously();
185
186 bool shouldCollectHeuristic();
187
188 // Queue up a collection. Returns immediately. This will not queue a collection if a collection
189 // of equal or greater strength exists. Full collections are stronger than WTF::nullopt collections
190 // and WTF::nullopt collections are stronger than Eden collections. WTF::nullopt means that the GC can
191 // choose Eden or Full. This implies that if you request a GC while that GC is ongoing, nothing
192 // will happen.
193 JS_EXPORT_PRIVATE void collectAsync(GCRequest = GCRequest());
194
195 // Queue up a collection and wait for it to complete. This won't return until you get your own
196 // complete collection. For example, if there was an ongoing asynchronous collection at the time
197 // you called this, then this would wait for that one to complete and then trigger your
198 // collection and then return. In weird cases, there could be multiple GC requests in the backlog
199 // and this will wait for that backlog before running its GC and returning.
200 JS_EXPORT_PRIVATE void collectSync(GCRequest = GCRequest());
201
202 JS_EXPORT_PRIVATE void collect(Synchronousness, GCRequest = GCRequest());
203
204 // Like collect(), but in the case of Async this will stopIfNecessary() and in the case of
205 // Sync this will sweep synchronously.
206 JS_EXPORT_PRIVATE void collectNow(Synchronousness, GCRequest = GCRequest());
207
208 JS_EXPORT_PRIVATE void collectNowFullIfNotDoneRecently(Synchronousness);
209
210 void collectIfNecessaryOrDefer(GCDeferralContext* = nullptr);
211
212 void completeAllJITPlans();
213
214 // Use this API to report non-GC memory referenced by GC objects. Be sure to
215 // call both of these functions: Calling only one may trigger catastropic
216 // memory growth.
217 void reportExtraMemoryAllocated(size_t);
218 JS_EXPORT_PRIVATE void reportExtraMemoryVisited(size_t);
219
220#if ENABLE(RESOURCE_USAGE)
221 // Use this API to report the subset of extra memory that lives outside this process.
222 JS_EXPORT_PRIVATE void reportExternalMemoryVisited(size_t);
223 size_t externalMemorySize() { return m_externalMemorySize; }
224#endif
225
226 // Use this API to report non-GC memory if you can't use the better API above.
227 void deprecatedReportExtraMemory(size_t);
228
229 JS_EXPORT_PRIVATE void reportAbandonedObjectGraph();
230
231 JS_EXPORT_PRIVATE void protect(JSValue);
232 JS_EXPORT_PRIVATE bool unprotect(JSValue); // True when the protect count drops to 0.
233
234 JS_EXPORT_PRIVATE size_t extraMemorySize(); // Non-GC memory referenced by GC objects.
235 JS_EXPORT_PRIVATE size_t size();
236 JS_EXPORT_PRIVATE size_t capacity();
237 JS_EXPORT_PRIVATE size_t objectCount();
238 JS_EXPORT_PRIVATE size_t globalObjectCount();
239 JS_EXPORT_PRIVATE size_t protectedObjectCount();
240 JS_EXPORT_PRIVATE size_t protectedGlobalObjectCount();
241 JS_EXPORT_PRIVATE std::unique_ptr<TypeCountSet> protectedObjectTypeCounts();
242 JS_EXPORT_PRIVATE std::unique_ptr<TypeCountSet> objectTypeCounts();
243
244 HashSet<MarkedArgumentBuffer*>& markListSet();
245
246 template<typename Functor> void forEachProtectedCell(const Functor&);
247 template<typename Functor> void forEachCodeBlock(const Functor&);
248 template<typename Functor> void forEachCodeBlockIgnoringJITPlans(const AbstractLocker& codeBlockSetLocker, const Functor&);
249
250 HandleSet* handleSet() { return &m_handleSet; }
251
252 void willStartIterating();
253 void didFinishIterating();
254
255 Seconds lastFullGCLength() const { return m_lastFullGCLength; }
256 Seconds lastEdenGCLength() const { return m_lastEdenGCLength; }
257 void increaseLastFullGCLength(Seconds amount) { m_lastFullGCLength += amount; }
258
259 size_t sizeBeforeLastEdenCollection() const { return m_sizeBeforeLastEdenCollect; }
260 size_t sizeAfterLastEdenCollection() const { return m_sizeAfterLastEdenCollect; }
261 size_t sizeBeforeLastFullCollection() const { return m_sizeBeforeLastFullCollect; }
262 size_t sizeAfterLastFullCollection() const { return m_sizeAfterLastFullCollect; }
263
264 void deleteAllCodeBlocks(DeleteAllCodeEffort);
265 void deleteAllUnlinkedCodeBlocks(DeleteAllCodeEffort);
266
267 void didAllocate(size_t);
268 bool isPagedOut(MonotonicTime deadline);
269
270 const JITStubRoutineSet& jitStubRoutines() { return *m_jitStubRoutines; }
271
272 void addReference(JSCell*, ArrayBuffer*);
273
274 bool isDeferred() const { return !!m_deferralDepth; }
275
276 StructureIDTable& structureIDTable() { return m_structureIDTable; }
277
278 CodeBlockSet& codeBlockSet() { return *m_codeBlocks; }
279
280#if USE(FOUNDATION)
281 template<typename T> void releaseSoon(RetainPtr<T>&&);
282#endif
283#ifdef JSC_GLIB_API_ENABLED
284 void releaseSoon(std::unique_ptr<JSCGLibWrapperObject>&&);
285#endif
286
287 JS_EXPORT_PRIVATE void registerWeakGCMap(WeakGCMapBase* weakGCMap);
288 JS_EXPORT_PRIVATE void unregisterWeakGCMap(WeakGCMapBase* weakGCMap);
289
290 void addLogicallyEmptyWeakBlock(WeakBlock*);
291
292#if ENABLE(RESOURCE_USAGE)
293 size_t blockBytesAllocated() const { return m_blockBytesAllocated; }
294#endif
295
296 void didAllocateBlock(size_t capacity);
297 void didFreeBlock(size_t capacity);
298
299 bool mutatorShouldBeFenced() const { return m_mutatorShouldBeFenced; }
300 const bool* addressOfMutatorShouldBeFenced() const { return &m_mutatorShouldBeFenced; }
301
302 unsigned barrierThreshold() const { return m_barrierThreshold; }
303 const unsigned* addressOfBarrierThreshold() const { return &m_barrierThreshold; }
304
305#if ENABLE(DFG_DOES_GC_VALIDATION)
306 bool expectDoesGC() const { return m_expectDoesGC; }
307 void setExpectDoesGC(bool value) { m_expectDoesGC = value; }
308 bool* addressOfExpectDoesGC() { return &m_expectDoesGC; }
309#else
310 bool expectDoesGC() const { UNREACHABLE_FOR_PLATFORM(); return true; }
311 void setExpectDoesGC(bool) { UNREACHABLE_FOR_PLATFORM(); }
312 bool* addressOfExpectDoesGC() { UNREACHABLE_FOR_PLATFORM(); return nullptr; }
313#endif
314
315 // If true, the GC believes that the mutator is currently messing with the heap. We call this
316 // "having heap access". The GC may block if the mutator is in this state. If false, the GC may
317 // currently be doing things to the heap that make the heap unsafe to access for the mutator.
318 bool hasAccess() const;
319
320 // If the mutator does not currently have heap access, this function will acquire it. If the GC
321 // is currently using the lack of heap access to do dangerous things to the heap then this
322 // function will block, waiting for the GC to finish. It's not valid to call this if the mutator
323 // already has heap access. The mutator is required to precisely track whether or not it has
324 // heap access.
325 //
326 // It's totally fine to acquireAccess() upon VM instantiation and keep it that way. This is how
327 // WebCore uses us. For most other clients, JSLock does acquireAccess()/releaseAccess() for you.
328 void acquireAccess();
329
330 // Releases heap access. If the GC is blocking waiting to do bad things to the heap, it will be
331 // allowed to run now.
332 //
333 // Ordinarily, you should use the ReleaseHeapAccessScope to release and then reacquire heap
334 // access. You should do this anytime you're about do perform a blocking operation, like waiting
335 // on the ParkingLot.
336 void releaseAccess();
337
338 // This is like a super optimized way of saying:
339 //
340 // releaseAccess()
341 // acquireAccess()
342 //
343 // The fast path is an inlined relaxed load and branch. The slow path will block the mutator if
344 // the GC wants to do bad things to the heap.
345 //
346 // All allocations logically call this. As an optimization to improve GC progress, you can call
347 // this anywhere that you can afford a load-branch and where an object allocation would have been
348 // safe.
349 //
350 // The GC will also push a stopIfNecessary() event onto the runloop of the thread that
351 // instantiated the VM whenever it wants the mutator to stop. This means that if you never block
352 // but instead use the runloop to wait for events, then you could safely run in a mode where the
353 // mutator has permanent heap access (like the DOM does). If you have good event handling
354 // discipline (i.e. you don't block the runloop) then you can be sure that stopIfNecessary() will
355 // already be called for you at the right times.
356 void stopIfNecessary();
357
358 // This gives the conn to the collector.
359 void relinquishConn();
360
361 bool mayNeedToStop();
362
363 void performIncrement(size_t bytes);
364
365 // This is a much stronger kind of stopping of the collector, and it may require waiting for a
366 // while. This is meant to be a legacy API for clients of collectAllGarbage that expect that there
367 // is no GC before or after that function call. After calling this, you are free to start GCs
368 // yourself but you can be sure that none are running.
369 //
370 // This both prevents new collections from being started asynchronously and waits for any
371 // outstanding collections to complete.
372 void preventCollection();
373 void allowCollection();
374
375 uint64_t mutatorExecutionVersion() const { return m_mutatorExecutionVersion; }
376 uint64_t phaseVersion() const { return m_phaseVersion; }
377
378 JS_EXPORT_PRIVATE void addMarkingConstraint(std::unique_ptr<MarkingConstraint>);
379
380 size_t numOpaqueRoots() const { return m_opaqueRoots.size(); }
381
382 HeapVerifier* verifier() const { return m_verifier.get(); }
383
384 void addHeapFinalizerCallback(const HeapFinalizerCallback&);
385 void removeHeapFinalizerCallback(const HeapFinalizerCallback&);
386
387 void runTaskInParallel(RefPtr<SharedTask<void(SlotVisitor&)>>);
388
389 template<typename Func>
390 void runFunctionInParallel(const Func& func)
391 {
392 runTaskInParallel(createSharedTask<void(SlotVisitor&)>(func));
393 }
394
395 template<typename Func>
396 void forEachSlotVisitor(const Func&);
397
398 Seconds totalGCTime() const { return m_totalGCTime; }
399
400 HashMap<JSImmutableButterfly*, JSString*> immutableButterflyToStringCache;
401
402private:
403 friend class AllocatingScope;
404 friend class CodeBlock;
405 friend class CollectingScope;
406 friend class DeferGC;
407 friend class DeferGCForAWhile;
408 friend class GCAwareJITStubRoutine;
409 friend class GCLogging;
410 friend class GCThread;
411 friend class HandleSet;
412 friend class HeapUtil;
413 friend class HeapVerifier;
414 friend class JITStubRoutine;
415 friend class LLIntOffsetsExtractor;
416 friend class MarkStackMergingConstraint;
417 friend class MarkedSpace;
418 friend class BlockDirectory;
419 friend class MarkedBlock;
420 friend class RunningScope;
421 friend class SlotVisitor;
422 friend class SpaceTimeMutatorScheduler;
423 friend class StochasticSpaceTimeMutatorScheduler;
424 friend class SweepingScope;
425 friend class IncrementalSweeper;
426 friend class VM;
427 friend class WeakSet;
428
429 class HeapThread;
430 friend class HeapThread;
431
432 static constexpr size_t minExtraMemory = 256;
433
434 class FinalizerOwner : public WeakHandleOwner {
435 void finalize(Handle<Unknown>, void* context) override;
436 };
437
438 JS_EXPORT_PRIVATE bool isValidAllocation(size_t);
439 JS_EXPORT_PRIVATE void reportExtraMemoryAllocatedSlowCase(size_t);
440 JS_EXPORT_PRIVATE void deprecatedReportExtraMemorySlowCase(size_t);
441
442 bool shouldCollectInCollectorThread(const AbstractLocker&);
443 void collectInCollectorThread();
444
445 void checkConn(GCConductor);
446
447 enum class RunCurrentPhaseResult {
448 Finished,
449 Continue,
450 NeedCurrentThreadState
451 };
452 RunCurrentPhaseResult runCurrentPhase(GCConductor, CurrentThreadState*);
453
454 // Returns true if we should keep doing things.
455 bool runNotRunningPhase(GCConductor);
456 bool runBeginPhase(GCConductor);
457 bool runFixpointPhase(GCConductor);
458 bool runConcurrentPhase(GCConductor);
459 bool runReloopPhase(GCConductor);
460 bool runEndPhase(GCConductor);
461 bool changePhase(GCConductor, CollectorPhase);
462 bool finishChangingPhase(GCConductor);
463
464 void collectInMutatorThread();
465
466 void stopThePeriphery(GCConductor);
467 void resumeThePeriphery();
468
469 // Returns true if the mutator is stopped, false if the mutator has the conn now.
470 bool stopTheMutator();
471 void resumeTheMutator();
472
473 JS_EXPORT_PRIVATE void stopIfNecessarySlow();
474 bool stopIfNecessarySlow(unsigned extraStateBits);
475
476 template<typename Func>
477 void waitForCollector(const Func&);
478
479 JS_EXPORT_PRIVATE void acquireAccessSlow();
480 JS_EXPORT_PRIVATE void releaseAccessSlow();
481
482 bool handleGCDidJIT(unsigned);
483 void handleGCDidJIT();
484
485 bool handleNeedFinalize(unsigned);
486 void handleNeedFinalize();
487
488 bool relinquishConn(unsigned);
489 void finishRelinquishingConn();
490
491 void setGCDidJIT();
492 void setNeedFinalize();
493 void waitWhileNeedFinalize();
494
495 void setMutatorWaiting();
496 void clearMutatorWaiting();
497 void notifyThreadStopping(const AbstractLocker&);
498
499 typedef uint64_t Ticket;
500 Ticket requestCollection(GCRequest);
501 void waitForCollection(Ticket);
502
503 void suspendCompilerThreads();
504 void willStartCollection();
505 void prepareForMarking();
506
507 void gatherStackRoots(ConservativeRoots&);
508 void gatherJSStackRoots(ConservativeRoots&);
509 void gatherScratchBufferRoots(ConservativeRoots&);
510 void beginMarking();
511 void visitCompilerWorklistWeakReferences();
512 void removeDeadCompilerWorklistEntries();
513 void updateObjectCounts();
514 void endMarking();
515
516 void reapWeakHandles();
517 void pruneStaleEntriesFromWeakGCMaps();
518 void sweepArrayBuffers();
519 void snapshotUnswept();
520 void deleteSourceProviderCaches();
521 void notifyIncrementalSweeper();
522 void harvestWeakReferences();
523
524 template<typename CellType, typename CellSet>
525 void finalizeMarkedUnconditionalFinalizers(CellSet&);
526
527 void finalizeUnconditionalFinalizers();
528
529 void deleteUnmarkedCompiledCode();
530 JS_EXPORT_PRIVATE void addToRememberedSet(const JSCell*);
531 void updateAllocationLimits();
532 void didFinishCollection();
533 void resumeCompilerThreads();
534 void gatherExtraHeapData(HeapProfiler&);
535 void removeDeadHeapSnapshotNodes(HeapProfiler&);
536 void finalize();
537 void sweepInFinalize();
538
539 void sweepAllLogicallyEmptyWeakBlocks();
540 bool sweepNextLogicallyEmptyWeakBlock();
541
542 bool shouldDoFullCollection();
543
544 void incrementDeferralDepth();
545 void decrementDeferralDepth();
546 void decrementDeferralDepthAndGCIfNeeded();
547 JS_EXPORT_PRIVATE void decrementDeferralDepthAndGCIfNeededSlow();
548
549 size_t visitCount();
550 size_t bytesVisited();
551
552 void forEachCodeBlockImpl(const ScopedLambda<void(CodeBlock*)>&);
553 void forEachCodeBlockIgnoringJITPlansImpl(const AbstractLocker& codeBlockSetLocker, const ScopedLambda<void(CodeBlock*)>&);
554
555 void setMutatorShouldBeFenced(bool value);
556
557 void addCoreConstraints();
558
559 enum class MemoryThresholdCallType {
560 Cached,
561 Direct
562 };
563
564 bool overCriticalMemoryThreshold(MemoryThresholdCallType memoryThresholdCallType = MemoryThresholdCallType::Cached);
565
566 template<typename Func>
567 void iterateExecutingAndCompilingCodeBlocks(const Func&);
568
569 template<typename Func>
570 void iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks(const Func&);
571
572 void assertMarkStacksEmpty();
573
574 void setBonusVisitorTask(RefPtr<SharedTask<void(SlotVisitor&)>>);
575
576 void dumpHeapStatisticsAtVMDestruction();
577
578 static bool useGenerationalGC();
579 static bool shouldSweepSynchronously();
580
581 const HeapType m_heapType;
582 MutatorState m_mutatorState { MutatorState::Running };
583 const size_t m_ramSize;
584 const size_t m_minBytesPerCycle;
585 size_t m_sizeAfterLastCollect { 0 };
586 size_t m_sizeAfterLastFullCollect { 0 };
587 size_t m_sizeBeforeLastFullCollect { 0 };
588 size_t m_sizeAfterLastEdenCollect { 0 };
589 size_t m_sizeBeforeLastEdenCollect { 0 };
590
591 size_t m_bytesAllocatedThisCycle { 0 };
592 size_t m_bytesAbandonedSinceLastFullCollect { 0 };
593 size_t m_maxEdenSize;
594 size_t m_maxEdenSizeWhenCritical;
595 size_t m_maxHeapSize;
596 size_t m_totalBytesVisited { 0 };
597 size_t m_totalBytesVisitedThisCycle { 0 };
598 double m_incrementBalance { 0 };
599
600 bool m_shouldDoFullCollection { false };
601 Markable<CollectionScope, EnumMarkableTraits<CollectionScope>> m_collectionScope;
602 Markable<CollectionScope, EnumMarkableTraits<CollectionScope>> m_lastCollectionScope;
603 Lock m_raceMarkStackLock;
604#if ENABLE(DFG_DOES_GC_VALIDATION)
605 bool m_expectDoesGC { true };
606#endif
607
608 StructureIDTable m_structureIDTable;
609 MarkedSpace m_objectSpace;
610 GCIncomingRefCountedSet<ArrayBuffer> m_arrayBuffers;
611 size_t m_extraMemorySize { 0 };
612 size_t m_deprecatedExtraMemorySize { 0 };
613
614 HashSet<const JSCell*> m_copyingRememberedSet;
615
616 ProtectCountSet m_protectedValues;
617 std::unique_ptr<HashSet<MarkedArgumentBuffer*>> m_markListSet;
618
619 std::unique_ptr<MachineThreads> m_machineThreads;
620
621 std::unique_ptr<SlotVisitor> m_collectorSlotVisitor;
622 std::unique_ptr<SlotVisitor> m_mutatorSlotVisitor;
623 std::unique_ptr<MarkStackArray> m_mutatorMarkStack;
624 std::unique_ptr<MarkStackArray> m_raceMarkStack;
625 std::unique_ptr<MarkingConstraintSet> m_constraintSet;
626
627 // We pool the slot visitors used by parallel marking threads. It's useful to be able to
628 // enumerate over them, and it's useful to have them cache some small amount of memory from
629 // one GC to the next. GC marking threads claim these at the start of marking, and return
630 // them at the end.
631 Vector<std::unique_ptr<SlotVisitor>> m_parallelSlotVisitors;
632 Vector<SlotVisitor*> m_availableParallelSlotVisitors;
633
634 HandleSet m_handleSet;
635 std::unique_ptr<CodeBlockSet> m_codeBlocks;
636 std::unique_ptr<JITStubRoutineSet> m_jitStubRoutines;
637 FinalizerOwner m_finalizerOwner;
638
639 Lock m_parallelSlotVisitorLock;
640 bool m_isSafeToCollect { false };
641 bool m_isShuttingDown { false };
642 bool m_mutatorShouldBeFenced { Options::forceFencedBarrier() };
643
644 unsigned m_barrierThreshold { Options::forceFencedBarrier() ? tautologicalThreshold : blackThreshold };
645
646 VM& m_vm;
647 Seconds m_lastFullGCLength { 10_ms };
648 Seconds m_lastEdenGCLength { 10_ms };
649
650 Vector<WeakBlock*> m_logicallyEmptyWeakBlocks;
651 size_t m_indexOfNextLogicallyEmptyWeakBlockToSweep { WTF::notFound };
652
653 RefPtr<FullGCActivityCallback> m_fullActivityCallback;
654 RefPtr<GCActivityCallback> m_edenActivityCallback;
655 Ref<IncrementalSweeper> m_sweeper;
656 Ref<StopIfNecessaryTimer> m_stopIfNecessaryTimer;
657
658 Vector<HeapObserver*> m_observers;
659
660 Vector<HeapFinalizerCallback> m_heapFinalizerCallbacks;
661
662 std::unique_ptr<HeapVerifier> m_verifier;
663
664#if USE(FOUNDATION)
665 Vector<RetainPtr<CFTypeRef>> m_delayedReleaseObjects;
666 unsigned m_delayedReleaseRecursionCount { 0 };
667#endif
668#ifdef JSC_GLIB_API_ENABLED
669 Vector<std::unique_ptr<JSCGLibWrapperObject>> m_delayedReleaseObjects;
670 unsigned m_delayedReleaseRecursionCount { 0 };
671#endif
672 unsigned m_deferralDepth { 0 };
673
674 HashSet<WeakGCMapBase*> m_weakGCMaps;
675
676 std::unique_ptr<MarkStackArray> m_sharedCollectorMarkStack;
677 std::unique_ptr<MarkStackArray> m_sharedMutatorMarkStack;
678 unsigned m_numberOfActiveParallelMarkers { 0 };
679 unsigned m_numberOfWaitingParallelMarkers { 0 };
680
681 ConcurrentPtrHashSet m_opaqueRoots;
682 static constexpr size_t s_blockFragmentLength = 32;
683
684 ParallelHelperClient m_helperClient;
685 RefPtr<SharedTask<void(SlotVisitor&)>> m_bonusVisitorTask;
686
687#if ENABLE(RESOURCE_USAGE)
688 size_t m_blockBytesAllocated { 0 };
689 size_t m_externalMemorySize { 0 };
690#endif
691
692 std::unique_ptr<MutatorScheduler> m_scheduler;
693
694 static constexpr unsigned mutatorHasConnBit = 1u << 0u; // Must also be protected by threadLock.
695 static constexpr unsigned stoppedBit = 1u << 1u; // Only set when !hasAccessBit
696 static constexpr unsigned hasAccessBit = 1u << 2u;
697 static constexpr unsigned gcDidJITBit = 1u << 3u; // Set when the GC did some JITing, so on resume we need to cpuid.
698 static constexpr unsigned needFinalizeBit = 1u << 4u;
699 static constexpr unsigned mutatorWaitingBit = 1u << 5u; // Allows the mutator to use this as a condition variable.
700 Atomic<unsigned> m_worldState;
701 bool m_worldIsStopped { false };
702 Lock m_visitRaceLock;
703 Lock m_markingMutex;
704 Condition m_markingConditionVariable;
705
706 MonotonicTime m_beforeGC;
707 MonotonicTime m_afterGC;
708 MonotonicTime m_stopTime;
709
710 Deque<GCRequest> m_requests;
711 GCRequest m_currentRequest;
712 Ticket m_lastServedTicket { 0 };
713 Ticket m_lastGrantedTicket { 0 };
714
715 CollectorPhase m_lastPhase { CollectorPhase::NotRunning };
716 CollectorPhase m_currentPhase { CollectorPhase::NotRunning };
717 CollectorPhase m_nextPhase { CollectorPhase::NotRunning };
718 bool m_collectorThreadIsRunning { false };
719 bool m_threadShouldStop { false };
720 bool m_threadIsStopping { false };
721 bool m_mutatorDidRun { true };
722 bool m_didDeferGCWork { false };
723 bool m_shouldStopCollectingContinuously { false };
724
725 uint64_t m_mutatorExecutionVersion { 0 };
726 uint64_t m_phaseVersion { 0 };
727 Box<Lock> m_threadLock;
728 Ref<AutomaticThreadCondition> m_threadCondition; // The mutator must not wait on this. It would cause a deadlock.
729 RefPtr<AutomaticThread> m_thread;
730
731 RefPtr<Thread> m_collectContinuouslyThread { nullptr };
732
733 MonotonicTime m_lastGCStartTime;
734 MonotonicTime m_lastGCEndTime;
735 MonotonicTime m_currentGCStartTime;
736 Seconds m_totalGCTime;
737
738 uintptr_t m_barriersExecuted { 0 };
739
740 CurrentThreadState* m_currentThreadState { nullptr };
741 Thread* m_currentThread { nullptr }; // It's OK if this becomes a dangling pointer.
742
743#if PLATFORM(IOS_FAMILY)
744 unsigned m_precentAvailableMemoryCachedCallCount;
745 bool m_overCriticalMemoryThreshold;
746#endif
747
748 bool m_parallelMarkersShouldExit { false };
749 Lock m_collectContinuouslyLock;
750 Condition m_collectContinuouslyCondition;
751};
752
753} // namespace JSC
754