1 | /* |
2 | * Copyright (C) 1999-2000 Harri Porten ([email protected]) |
3 | * Copyright (C) 2001 Peter Kelly ([email protected]) |
4 | * Copyright (C) 2003-2019 Apple Inc. All rights reserved. |
5 | * |
6 | * This library is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU Lesser General Public |
8 | * License as published by the Free Software Foundation; either |
9 | * version 2 of the License, or (at your option) any later version. |
10 | * |
11 | * This library is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 | * Lesser General Public License for more details. |
15 | * |
16 | * You should have received a copy of the GNU Lesser General Public |
17 | * License along with this library; if not, write to the Free Software |
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
19 | * |
20 | */ |
21 | |
22 | #pragma once |
23 | |
24 | #include "ArrayBuffer.h" |
25 | #include "CellState.h" |
26 | #include "CollectionScope.h" |
27 | #include "CollectorPhase.h" |
28 | #include "DeleteAllCodeEffort.h" |
29 | #include "GCConductor.h" |
30 | #include "GCIncomingRefCountedSet.h" |
31 | #include "GCRequest.h" |
32 | #include "HandleSet.h" |
33 | #include "HeapFinalizerCallback.h" |
34 | #include "HeapObserver.h" |
35 | #include "MarkedBlock.h" |
36 | #include "MarkedSpace.h" |
37 | #include "MutatorState.h" |
38 | #include "Options.h" |
39 | #include "StructureIDTable.h" |
40 | #include "Synchronousness.h" |
41 | #include "WeakHandleOwner.h" |
42 | #include <wtf/AutomaticThread.h> |
43 | #include <wtf/ConcurrentPtrHashSet.h> |
44 | #include <wtf/Deque.h> |
45 | #include <wtf/HashCountedSet.h> |
46 | #include <wtf/HashSet.h> |
47 | #include <wtf/Markable.h> |
48 | #include <wtf/ParallelHelperPool.h> |
49 | #include <wtf/Threading.h> |
50 | |
51 | namespace JSC { |
52 | |
53 | class CodeBlock; |
54 | class CodeBlockSet; |
55 | class CollectingScope; |
56 | class ConservativeRoots; |
57 | class GCDeferralContext; |
58 | class EdenGCActivityCallback; |
59 | class FullGCActivityCallback; |
60 | class GCActivityCallback; |
61 | class GCAwareJITStubRoutine; |
62 | class Heap; |
63 | class HeapProfiler; |
64 | class HeapVerifier; |
65 | class IncrementalSweeper; |
66 | class JITStubRoutine; |
67 | class JITStubRoutineSet; |
68 | class JSCell; |
69 | class JSImmutableButterfly; |
70 | class JSValue; |
71 | class ; |
72 | class MachineThreads; |
73 | class MarkStackArray; |
74 | class MarkStackMergingConstraint; |
75 | class BlockDirectory; |
76 | class MarkedArgumentBuffer; |
77 | class MarkingConstraint; |
78 | class MarkingConstraintSet; |
79 | class MutatorScheduler; |
80 | class RunningScope; |
81 | class SlotVisitor; |
82 | class SpaceTimeMutatorScheduler; |
83 | class StopIfNecessaryTimer; |
84 | class SweepingScope; |
85 | class VM; |
86 | class WeakGCMapBase; |
87 | struct CurrentThreadState; |
88 | |
89 | #ifdef JSC_GLIB_API_ENABLED |
90 | class JSCGLibWrapperObject; |
91 | #endif |
92 | |
93 | namespace DFG { |
94 | class SpeculativeJIT; |
95 | class Worklist; |
96 | } |
97 | |
98 | #if !ASSERT_DISABLED |
99 | #define ENABLE_DFG_DOES_GC_VALIDATION 1 |
100 | #else |
101 | #define ENABLE_DFG_DOES_GC_VALIDATION 0 |
102 | #endif |
103 | constexpr bool validateDFGDoesGC = ENABLE_DFG_DOES_GC_VALIDATION; |
104 | |
105 | typedef HashCountedSet<JSCell*> ProtectCountSet; |
106 | typedef HashCountedSet<const char*> TypeCountSet; |
107 | |
108 | enum HeapType { SmallHeap, LargeHeap }; |
109 | |
110 | class HeapUtil; |
111 | |
112 | class Heap { |
113 | WTF_MAKE_NONCOPYABLE(Heap); |
114 | public: |
115 | friend class JIT; |
116 | friend class DFG::SpeculativeJIT; |
117 | static Heap* heap(const JSValue); // 0 for immediate values |
118 | static Heap* heap(const HeapCell*); |
119 | |
120 | // This constant determines how many blocks we iterate between checks of our |
121 | // deadline when calling Heap::isPagedOut. Decreasing it will cause us to detect |
122 | // overstepping our deadline more quickly, while increasing it will cause |
123 | // our scan to run faster. |
124 | static const unsigned s_timeCheckResolution = 16; |
125 | |
126 | bool isMarked(const void*); |
127 | static bool testAndSetMarked(HeapVersion, const void*); |
128 | |
129 | static size_t cellSize(const void*); |
130 | |
131 | void writeBarrier(const JSCell* from); |
132 | void writeBarrier(const JSCell* from, JSValue to); |
133 | void writeBarrier(const JSCell* from, JSCell* to); |
134 | |
135 | void writeBarrierWithoutFence(const JSCell* from); |
136 | |
137 | void mutatorFence(); |
138 | |
139 | // Take this if you know that from->cellState() < barrierThreshold. |
140 | JS_EXPORT_PRIVATE void writeBarrierSlowPath(const JSCell* from); |
141 | |
142 | Heap(VM*, HeapType); |
143 | ~Heap(); |
144 | void lastChanceToFinalize(); |
145 | void releaseDelayedReleasedObjects(); |
146 | |
147 | VM* vm() const; |
148 | |
149 | MarkedSpace& objectSpace() { return m_objectSpace; } |
150 | MachineThreads& machineThreads() { return *m_machineThreads; } |
151 | |
152 | SlotVisitor& collectorSlotVisitor() { return *m_collectorSlotVisitor; } |
153 | |
154 | JS_EXPORT_PRIVATE GCActivityCallback* fullActivityCallback(); |
155 | JS_EXPORT_PRIVATE GCActivityCallback* edenActivityCallback(); |
156 | JS_EXPORT_PRIVATE void setGarbageCollectionTimerEnabled(bool); |
157 | |
158 | JS_EXPORT_PRIVATE IncrementalSweeper& sweeper(); |
159 | |
160 | void addObserver(HeapObserver* observer) { m_observers.append(observer); } |
161 | void removeObserver(HeapObserver* observer) { m_observers.removeFirst(observer); } |
162 | |
163 | MutatorState mutatorState() const { return m_mutatorState; } |
164 | Optional<CollectionScope> collectionScope() const { return m_collectionScope; } |
165 | bool hasHeapAccess() const; |
166 | bool worldIsStopped() const; |
167 | bool worldIsRunning() const { return !worldIsStopped(); } |
168 | |
169 | // We're always busy on the collection threads. On the main thread, this returns true if we're |
170 | // helping heap. |
171 | JS_EXPORT_PRIVATE bool isCurrentThreadBusy(); |
172 | |
173 | typedef void (*Finalizer)(JSCell*); |
174 | JS_EXPORT_PRIVATE void addFinalizer(JSCell*, Finalizer); |
175 | |
176 | void notifyIsSafeToCollect(); |
177 | bool isSafeToCollect() const { return m_isSafeToCollect; } |
178 | |
179 | bool isShuttingDown() const { return m_isShuttingDown; } |
180 | |
181 | JS_EXPORT_PRIVATE bool isHeapSnapshotting() const; |
182 | |
183 | JS_EXPORT_PRIVATE void sweepSynchronously(); |
184 | |
185 | bool shouldCollectHeuristic(); |
186 | |
187 | // Queue up a collection. Returns immediately. This will not queue a collection if a collection |
188 | // of equal or greater strength exists. Full collections are stronger than WTF::nullopt collections |
189 | // and WTF::nullopt collections are stronger than Eden collections. WTF::nullopt means that the GC can |
190 | // choose Eden or Full. This implies that if you request a GC while that GC is ongoing, nothing |
191 | // will happen. |
192 | JS_EXPORT_PRIVATE void collectAsync(GCRequest = GCRequest()); |
193 | |
194 | // Queue up a collection and wait for it to complete. This won't return until you get your own |
195 | // complete collection. For example, if there was an ongoing asynchronous collection at the time |
196 | // you called this, then this would wait for that one to complete and then trigger your |
197 | // collection and then return. In weird cases, there could be multiple GC requests in the backlog |
198 | // and this will wait for that backlog before running its GC and returning. |
199 | JS_EXPORT_PRIVATE void collectSync(GCRequest = GCRequest()); |
200 | |
201 | JS_EXPORT_PRIVATE void collect(Synchronousness, GCRequest = GCRequest()); |
202 | |
203 | // Like collect(), but in the case of Async this will stopIfNecessary() and in the case of |
204 | // Sync this will sweep synchronously. |
205 | JS_EXPORT_PRIVATE void collectNow(Synchronousness, GCRequest = GCRequest()); |
206 | |
207 | JS_EXPORT_PRIVATE void collectNowFullIfNotDoneRecently(Synchronousness); |
208 | |
209 | void collectIfNecessaryOrDefer(GCDeferralContext* = nullptr); |
210 | |
211 | void completeAllJITPlans(); |
212 | |
213 | // Use this API to report non-GC memory referenced by GC objects. Be sure to |
214 | // call both of these functions: Calling only one may trigger catastropic |
215 | // memory growth. |
216 | void (size_t); |
217 | JS_EXPORT_PRIVATE void (size_t); |
218 | |
219 | #if ENABLE(RESOURCE_USAGE) |
220 | // Use this API to report the subset of extra memory that lives outside this process. |
221 | JS_EXPORT_PRIVATE void reportExternalMemoryVisited(size_t); |
222 | size_t externalMemorySize() { return m_externalMemorySize; } |
223 | #endif |
224 | |
225 | // Use this API to report non-GC memory if you can't use the better API above. |
226 | void (size_t); |
227 | |
228 | JS_EXPORT_PRIVATE void reportAbandonedObjectGraph(); |
229 | |
230 | JS_EXPORT_PRIVATE void protect(JSValue); |
231 | JS_EXPORT_PRIVATE bool unprotect(JSValue); // True when the protect count drops to 0. |
232 | |
233 | JS_EXPORT_PRIVATE size_t (); // Non-GC memory referenced by GC objects. |
234 | JS_EXPORT_PRIVATE size_t size(); |
235 | JS_EXPORT_PRIVATE size_t capacity(); |
236 | JS_EXPORT_PRIVATE size_t objectCount(); |
237 | JS_EXPORT_PRIVATE size_t globalObjectCount(); |
238 | JS_EXPORT_PRIVATE size_t protectedObjectCount(); |
239 | JS_EXPORT_PRIVATE size_t protectedGlobalObjectCount(); |
240 | JS_EXPORT_PRIVATE std::unique_ptr<TypeCountSet> protectedObjectTypeCounts(); |
241 | JS_EXPORT_PRIVATE std::unique_ptr<TypeCountSet> objectTypeCounts(); |
242 | |
243 | HashSet<MarkedArgumentBuffer*>& markListSet(); |
244 | |
245 | template<typename Functor> void forEachProtectedCell(const Functor&); |
246 | template<typename Functor> void forEachCodeBlock(const Functor&); |
247 | template<typename Functor> void forEachCodeBlockIgnoringJITPlans(const AbstractLocker& codeBlockSetLocker, const Functor&); |
248 | |
249 | HandleSet* handleSet() { return &m_handleSet; } |
250 | |
251 | void willStartIterating(); |
252 | void didFinishIterating(); |
253 | |
254 | Seconds lastFullGCLength() const { return m_lastFullGCLength; } |
255 | Seconds lastEdenGCLength() const { return m_lastEdenGCLength; } |
256 | void increaseLastFullGCLength(Seconds amount) { m_lastFullGCLength += amount; } |
257 | |
258 | size_t sizeBeforeLastEdenCollection() const { return m_sizeBeforeLastEdenCollect; } |
259 | size_t sizeAfterLastEdenCollection() const { return m_sizeAfterLastEdenCollect; } |
260 | size_t sizeBeforeLastFullCollection() const { return m_sizeBeforeLastFullCollect; } |
261 | size_t sizeAfterLastFullCollection() const { return m_sizeAfterLastFullCollect; } |
262 | |
263 | void deleteAllCodeBlocks(DeleteAllCodeEffort); |
264 | void deleteAllUnlinkedCodeBlocks(DeleteAllCodeEffort); |
265 | |
266 | void didAllocate(size_t); |
267 | bool isPagedOut(MonotonicTime deadline); |
268 | |
269 | const JITStubRoutineSet& jitStubRoutines() { return *m_jitStubRoutines; } |
270 | |
271 | void addReference(JSCell*, ArrayBuffer*); |
272 | |
273 | bool isDeferred() const { return !!m_deferralDepth; } |
274 | |
275 | StructureIDTable& structureIDTable() { return m_structureIDTable; } |
276 | |
277 | CodeBlockSet& codeBlockSet() { return *m_codeBlocks; } |
278 | |
279 | #if USE(FOUNDATION) |
280 | template<typename T> void releaseSoon(RetainPtr<T>&&); |
281 | #endif |
282 | #ifdef JSC_GLIB_API_ENABLED |
283 | void releaseSoon(std::unique_ptr<JSCGLibWrapperObject>&&); |
284 | #endif |
285 | |
286 | JS_EXPORT_PRIVATE void registerWeakGCMap(WeakGCMapBase* weakGCMap); |
287 | JS_EXPORT_PRIVATE void unregisterWeakGCMap(WeakGCMapBase* weakGCMap); |
288 | |
289 | void addLogicallyEmptyWeakBlock(WeakBlock*); |
290 | |
291 | #if ENABLE(RESOURCE_USAGE) |
292 | size_t blockBytesAllocated() const { return m_blockBytesAllocated; } |
293 | #endif |
294 | |
295 | void didAllocateBlock(size_t capacity); |
296 | void didFreeBlock(size_t capacity); |
297 | |
298 | bool mutatorShouldBeFenced() const { return m_mutatorShouldBeFenced; } |
299 | const bool* addressOfMutatorShouldBeFenced() const { return &m_mutatorShouldBeFenced; } |
300 | |
301 | unsigned barrierThreshold() const { return m_barrierThreshold; } |
302 | const unsigned* addressOfBarrierThreshold() const { return &m_barrierThreshold; } |
303 | |
304 | #if ENABLE(DFG_DOES_GC_VALIDATION) |
305 | bool expectDoesGC() const { return m_expectDoesGC; } |
306 | void setExpectDoesGC(bool value) { m_expectDoesGC = value; } |
307 | bool* addressOfExpectDoesGC() { return &m_expectDoesGC; } |
308 | #else |
309 | bool expectDoesGC() const { UNREACHABLE_FOR_PLATFORM(); return true; } |
310 | void setExpectDoesGC(bool) { UNREACHABLE_FOR_PLATFORM(); } |
311 | bool* addressOfExpectDoesGC() { UNREACHABLE_FOR_PLATFORM(); return nullptr; } |
312 | #endif |
313 | |
314 | // If true, the GC believes that the mutator is currently messing with the heap. We call this |
315 | // "having heap access". The GC may block if the mutator is in this state. If false, the GC may |
316 | // currently be doing things to the heap that make the heap unsafe to access for the mutator. |
317 | bool hasAccess() const; |
318 | |
319 | // If the mutator does not currently have heap access, this function will acquire it. If the GC |
320 | // is currently using the lack of heap access to do dangerous things to the heap then this |
321 | // function will block, waiting for the GC to finish. It's not valid to call this if the mutator |
322 | // already has heap access. The mutator is required to precisely track whether or not it has |
323 | // heap access. |
324 | // |
325 | // It's totally fine to acquireAccess() upon VM instantiation and keep it that way. This is how |
326 | // WebCore uses us. For most other clients, JSLock does acquireAccess()/releaseAccess() for you. |
327 | void acquireAccess(); |
328 | |
329 | // Releases heap access. If the GC is blocking waiting to do bad things to the heap, it will be |
330 | // allowed to run now. |
331 | // |
332 | // Ordinarily, you should use the ReleaseHeapAccessScope to release and then reacquire heap |
333 | // access. You should do this anytime you're about do perform a blocking operation, like waiting |
334 | // on the ParkingLot. |
335 | void releaseAccess(); |
336 | |
337 | // This is like a super optimized way of saying: |
338 | // |
339 | // releaseAccess() |
340 | // acquireAccess() |
341 | // |
342 | // The fast path is an inlined relaxed load and branch. The slow path will block the mutator if |
343 | // the GC wants to do bad things to the heap. |
344 | // |
345 | // All allocations logically call this. As an optimization to improve GC progress, you can call |
346 | // this anywhere that you can afford a load-branch and where an object allocation would have been |
347 | // safe. |
348 | // |
349 | // The GC will also push a stopIfNecessary() event onto the runloop of the thread that |
350 | // instantiated the VM whenever it wants the mutator to stop. This means that if you never block |
351 | // but instead use the runloop to wait for events, then you could safely run in a mode where the |
352 | // mutator has permanent heap access (like the DOM does). If you have good event handling |
353 | // discipline (i.e. you don't block the runloop) then you can be sure that stopIfNecessary() will |
354 | // already be called for you at the right times. |
355 | void stopIfNecessary(); |
356 | |
357 | // This gives the conn to the collector. |
358 | void relinquishConn(); |
359 | |
360 | bool mayNeedToStop(); |
361 | |
362 | void performIncrement(size_t bytes); |
363 | |
364 | // This is a much stronger kind of stopping of the collector, and it may require waiting for a |
365 | // while. This is meant to be a legacy API for clients of collectAllGarbage that expect that there |
366 | // is no GC before or after that function call. After calling this, you are free to start GCs |
367 | // yourself but you can be sure that none are running. |
368 | // |
369 | // This both prevents new collections from being started asynchronously and waits for any |
370 | // outstanding collections to complete. |
371 | void preventCollection(); |
372 | void allowCollection(); |
373 | |
374 | uint64_t mutatorExecutionVersion() const { return m_mutatorExecutionVersion; } |
375 | uint64_t phaseVersion() const { return m_phaseVersion; } |
376 | |
377 | JS_EXPORT_PRIVATE void addMarkingConstraint(std::unique_ptr<MarkingConstraint>); |
378 | |
379 | size_t numOpaqueRoots() const { return m_opaqueRoots.size(); } |
380 | |
381 | HeapVerifier* verifier() const { return m_verifier.get(); } |
382 | |
383 | void addHeapFinalizerCallback(const HeapFinalizerCallback&); |
384 | void removeHeapFinalizerCallback(const HeapFinalizerCallback&); |
385 | |
386 | void runTaskInParallel(RefPtr<SharedTask<void(SlotVisitor&)>>); |
387 | |
388 | template<typename Func> |
389 | void runFunctionInParallel(const Func& func) |
390 | { |
391 | runTaskInParallel(createSharedTask<void(SlotVisitor&)>(func)); |
392 | } |
393 | |
394 | template<typename Func> |
395 | void forEachSlotVisitor(const Func&); |
396 | |
397 | Seconds totalGCTime() const { return m_totalGCTime; } |
398 | |
399 | HashMap<JSImmutableButterfly*, JSString*> immutableButterflyToStringCache; |
400 | |
401 | private: |
402 | friend class AllocatingScope; |
403 | friend class CodeBlock; |
404 | friend class CollectingScope; |
405 | friend class DeferGC; |
406 | friend class DeferGCForAWhile; |
407 | friend class GCAwareJITStubRoutine; |
408 | friend class GCLogging; |
409 | friend class GCThread; |
410 | friend class HandleSet; |
411 | friend class HeapUtil; |
412 | friend class HeapVerifier; |
413 | friend class JITStubRoutine; |
414 | friend class LLIntOffsetsExtractor; |
415 | friend class MarkStackMergingConstraint; |
416 | friend class MarkedSpace; |
417 | friend class BlockDirectory; |
418 | friend class MarkedBlock; |
419 | friend class RunningScope; |
420 | friend class SlotVisitor; |
421 | friend class SpaceTimeMutatorScheduler; |
422 | friend class StochasticSpaceTimeMutatorScheduler; |
423 | friend class SweepingScope; |
424 | friend class IncrementalSweeper; |
425 | friend class VM; |
426 | friend class WeakSet; |
427 | |
428 | class HeapThread; |
429 | friend class HeapThread; |
430 | |
431 | static const size_t = 256; |
432 | |
433 | class FinalizerOwner : public WeakHandleOwner { |
434 | void finalize(Handle<Unknown>, void* context) override; |
435 | }; |
436 | |
437 | JS_EXPORT_PRIVATE bool isValidAllocation(size_t); |
438 | JS_EXPORT_PRIVATE void (size_t); |
439 | JS_EXPORT_PRIVATE void (size_t); |
440 | |
441 | bool shouldCollectInCollectorThread(const AbstractLocker&); |
442 | void collectInCollectorThread(); |
443 | |
444 | void checkConn(GCConductor); |
445 | |
446 | enum class RunCurrentPhaseResult { |
447 | Finished, |
448 | Continue, |
449 | NeedCurrentThreadState |
450 | }; |
451 | RunCurrentPhaseResult runCurrentPhase(GCConductor, CurrentThreadState*); |
452 | |
453 | // Returns true if we should keep doing things. |
454 | bool runNotRunningPhase(GCConductor); |
455 | bool runBeginPhase(GCConductor); |
456 | bool runFixpointPhase(GCConductor); |
457 | bool runConcurrentPhase(GCConductor); |
458 | bool runReloopPhase(GCConductor); |
459 | bool runEndPhase(GCConductor); |
460 | bool changePhase(GCConductor, CollectorPhase); |
461 | bool finishChangingPhase(GCConductor); |
462 | |
463 | void collectInMutatorThread(); |
464 | |
465 | void stopThePeriphery(GCConductor); |
466 | void resumeThePeriphery(); |
467 | |
468 | // Returns true if the mutator is stopped, false if the mutator has the conn now. |
469 | bool stopTheMutator(); |
470 | void resumeTheMutator(); |
471 | |
472 | JS_EXPORT_PRIVATE void stopIfNecessarySlow(); |
473 | bool stopIfNecessarySlow(unsigned ); |
474 | |
475 | template<typename Func> |
476 | void waitForCollector(const Func&); |
477 | |
478 | JS_EXPORT_PRIVATE void acquireAccessSlow(); |
479 | JS_EXPORT_PRIVATE void releaseAccessSlow(); |
480 | |
481 | bool handleGCDidJIT(unsigned); |
482 | void handleGCDidJIT(); |
483 | |
484 | bool handleNeedFinalize(unsigned); |
485 | void handleNeedFinalize(); |
486 | |
487 | bool relinquishConn(unsigned); |
488 | void finishRelinquishingConn(); |
489 | |
490 | void setGCDidJIT(); |
491 | void setNeedFinalize(); |
492 | void waitWhileNeedFinalize(); |
493 | |
494 | void setMutatorWaiting(); |
495 | void clearMutatorWaiting(); |
496 | void notifyThreadStopping(const AbstractLocker&); |
497 | |
498 | typedef uint64_t Ticket; |
499 | Ticket requestCollection(GCRequest); |
500 | void waitForCollection(Ticket); |
501 | |
502 | void suspendCompilerThreads(); |
503 | void willStartCollection(); |
504 | void prepareForMarking(); |
505 | |
506 | void gatherStackRoots(ConservativeRoots&); |
507 | void gatherJSStackRoots(ConservativeRoots&); |
508 | void gatherScratchBufferRoots(ConservativeRoots&); |
509 | void beginMarking(); |
510 | void visitCompilerWorklistWeakReferences(); |
511 | void removeDeadCompilerWorklistEntries(); |
512 | void updateObjectCounts(); |
513 | void endMarking(); |
514 | |
515 | void reapWeakHandles(); |
516 | void pruneStaleEntriesFromWeakGCMaps(); |
517 | void sweepArrayBuffers(); |
518 | void snapshotUnswept(); |
519 | void deleteSourceProviderCaches(); |
520 | void notifyIncrementalSweeper(); |
521 | void harvestWeakReferences(); |
522 | |
523 | template<typename CellType, typename CellSet> |
524 | void finalizeMarkedUnconditionalFinalizers(CellSet&); |
525 | |
526 | void finalizeUnconditionalFinalizers(); |
527 | |
528 | void deleteUnmarkedCompiledCode(); |
529 | JS_EXPORT_PRIVATE void addToRememberedSet(const JSCell*); |
530 | void updateAllocationLimits(); |
531 | void didFinishCollection(); |
532 | void resumeCompilerThreads(); |
533 | void (HeapProfiler&); |
534 | void removeDeadHeapSnapshotNodes(HeapProfiler&); |
535 | void finalize(); |
536 | void sweepInFinalize(); |
537 | |
538 | void sweepAllLogicallyEmptyWeakBlocks(); |
539 | bool sweepNextLogicallyEmptyWeakBlock(); |
540 | |
541 | bool shouldDoFullCollection(); |
542 | |
543 | void incrementDeferralDepth(); |
544 | void decrementDeferralDepth(); |
545 | void decrementDeferralDepthAndGCIfNeeded(); |
546 | JS_EXPORT_PRIVATE void decrementDeferralDepthAndGCIfNeededSlow(); |
547 | |
548 | size_t visitCount(); |
549 | size_t bytesVisited(); |
550 | |
551 | void forEachCodeBlockImpl(const ScopedLambda<void(CodeBlock*)>&); |
552 | void forEachCodeBlockIgnoringJITPlansImpl(const AbstractLocker& codeBlockSetLocker, const ScopedLambda<void(CodeBlock*)>&); |
553 | |
554 | void setMutatorShouldBeFenced(bool value); |
555 | |
556 | void addCoreConstraints(); |
557 | |
558 | enum class MemoryThresholdCallType { |
559 | Cached, |
560 | Direct |
561 | }; |
562 | |
563 | bool overCriticalMemoryThreshold(MemoryThresholdCallType memoryThresholdCallType = MemoryThresholdCallType::Cached); |
564 | |
565 | template<typename Func> |
566 | void iterateExecutingAndCompilingCodeBlocks(const Func&); |
567 | |
568 | template<typename Func> |
569 | void iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks(const Func&); |
570 | |
571 | void assertMarkStacksEmpty(); |
572 | |
573 | void setBonusVisitorTask(RefPtr<SharedTask<void(SlotVisitor&)>>); |
574 | |
575 | void dumpHeapStatisticsAtVMDestruction(); |
576 | |
577 | static bool useGenerationalGC(); |
578 | static bool shouldSweepSynchronously(); |
579 | |
580 | const HeapType m_heapType; |
581 | MutatorState m_mutatorState { MutatorState::Running }; |
582 | const size_t m_ramSize; |
583 | const size_t m_minBytesPerCycle; |
584 | size_t m_sizeAfterLastCollect { 0 }; |
585 | size_t m_sizeAfterLastFullCollect { 0 }; |
586 | size_t m_sizeBeforeLastFullCollect { 0 }; |
587 | size_t m_sizeAfterLastEdenCollect { 0 }; |
588 | size_t m_sizeBeforeLastEdenCollect { 0 }; |
589 | |
590 | size_t m_bytesAllocatedThisCycle { 0 }; |
591 | size_t m_bytesAbandonedSinceLastFullCollect { 0 }; |
592 | size_t m_maxEdenSize; |
593 | size_t m_maxEdenSizeWhenCritical; |
594 | size_t m_maxHeapSize; |
595 | size_t m_totalBytesVisited { 0 }; |
596 | size_t m_totalBytesVisitedThisCycle { 0 }; |
597 | double m_incrementBalance { 0 }; |
598 | |
599 | bool m_shouldDoFullCollection { false }; |
600 | Markable<CollectionScope, EnumMarkableTraits<CollectionScope>> m_collectionScope; |
601 | Markable<CollectionScope, EnumMarkableTraits<CollectionScope>> m_lastCollectionScope; |
602 | Lock m_raceMarkStackLock; |
603 | #if ENABLE(DFG_DOES_GC_VALIDATION) |
604 | bool m_expectDoesGC { true }; |
605 | #endif |
606 | |
607 | StructureIDTable m_structureIDTable; |
608 | MarkedSpace m_objectSpace; |
609 | GCIncomingRefCountedSet<ArrayBuffer> m_arrayBuffers; |
610 | size_t { 0 }; |
611 | size_t { 0 }; |
612 | |
613 | HashSet<const JSCell*> m_copyingRememberedSet; |
614 | |
615 | ProtectCountSet m_protectedValues; |
616 | std::unique_ptr<HashSet<MarkedArgumentBuffer*>> m_markListSet; |
617 | |
618 | std::unique_ptr<MachineThreads> m_machineThreads; |
619 | |
620 | std::unique_ptr<SlotVisitor> m_collectorSlotVisitor; |
621 | std::unique_ptr<SlotVisitor> m_mutatorSlotVisitor; |
622 | std::unique_ptr<MarkStackArray> m_mutatorMarkStack; |
623 | std::unique_ptr<MarkStackArray> m_raceMarkStack; |
624 | std::unique_ptr<MarkingConstraintSet> m_constraintSet; |
625 | |
626 | // We pool the slot visitors used by parallel marking threads. It's useful to be able to |
627 | // enumerate over them, and it's useful to have them cache some small amount of memory from |
628 | // one GC to the next. GC marking threads claim these at the start of marking, and return |
629 | // them at the end. |
630 | Vector<std::unique_ptr<SlotVisitor>> m_parallelSlotVisitors; |
631 | Vector<SlotVisitor*> m_availableParallelSlotVisitors; |
632 | |
633 | HandleSet m_handleSet; |
634 | std::unique_ptr<CodeBlockSet> m_codeBlocks; |
635 | std::unique_ptr<JITStubRoutineSet> m_jitStubRoutines; |
636 | FinalizerOwner m_finalizerOwner; |
637 | |
638 | Lock m_parallelSlotVisitorLock; |
639 | bool m_isSafeToCollect { false }; |
640 | bool m_isShuttingDown { false }; |
641 | bool m_mutatorShouldBeFenced { Options::forceFencedBarrier() }; |
642 | |
643 | unsigned m_barrierThreshold { Options::forceFencedBarrier() ? tautologicalThreshold : blackThreshold }; |
644 | |
645 | VM* m_vm; |
646 | Seconds m_lastFullGCLength { 10_ms }; |
647 | Seconds m_lastEdenGCLength { 10_ms }; |
648 | |
649 | Vector<WeakBlock*> m_logicallyEmptyWeakBlocks; |
650 | size_t m_indexOfNextLogicallyEmptyWeakBlockToSweep { WTF::notFound }; |
651 | |
652 | RefPtr<FullGCActivityCallback> m_fullActivityCallback; |
653 | RefPtr<GCActivityCallback> m_edenActivityCallback; |
654 | Ref<IncrementalSweeper> m_sweeper; |
655 | Ref<StopIfNecessaryTimer> m_stopIfNecessaryTimer; |
656 | |
657 | Vector<HeapObserver*> m_observers; |
658 | |
659 | Vector<HeapFinalizerCallback> m_heapFinalizerCallbacks; |
660 | |
661 | std::unique_ptr<HeapVerifier> m_verifier; |
662 | |
663 | #if USE(FOUNDATION) |
664 | Vector<RetainPtr<CFTypeRef>> m_delayedReleaseObjects; |
665 | unsigned m_delayedReleaseRecursionCount { 0 }; |
666 | #endif |
667 | #ifdef JSC_GLIB_API_ENABLED |
668 | Vector<std::unique_ptr<JSCGLibWrapperObject>> m_delayedReleaseObjects; |
669 | unsigned m_delayedReleaseRecursionCount { 0 }; |
670 | #endif |
671 | unsigned m_deferralDepth { 0 }; |
672 | |
673 | HashSet<WeakGCMapBase*> m_weakGCMaps; |
674 | |
675 | std::unique_ptr<MarkStackArray> m_sharedCollectorMarkStack; |
676 | std::unique_ptr<MarkStackArray> m_sharedMutatorMarkStack; |
677 | unsigned m_numberOfActiveParallelMarkers { 0 }; |
678 | unsigned m_numberOfWaitingParallelMarkers { 0 }; |
679 | |
680 | ConcurrentPtrHashSet m_opaqueRoots; |
681 | static const size_t s_blockFragmentLength = 32; |
682 | |
683 | ParallelHelperClient m_helperClient; |
684 | RefPtr<SharedTask<void(SlotVisitor&)>> m_bonusVisitorTask; |
685 | |
686 | #if ENABLE(RESOURCE_USAGE) |
687 | size_t m_blockBytesAllocated { 0 }; |
688 | size_t m_externalMemorySize { 0 }; |
689 | #endif |
690 | |
691 | std::unique_ptr<MutatorScheduler> m_scheduler; |
692 | |
693 | static const unsigned mutatorHasConnBit = 1u << 0u; // Must also be protected by threadLock. |
694 | static const unsigned stoppedBit = 1u << 1u; // Only set when !hasAccessBit |
695 | static const unsigned hasAccessBit = 1u << 2u; |
696 | static const unsigned gcDidJITBit = 1u << 3u; // Set when the GC did some JITing, so on resume we need to cpuid. |
697 | static const unsigned needFinalizeBit = 1u << 4u; |
698 | static const unsigned mutatorWaitingBit = 1u << 5u; // Allows the mutator to use this as a condition variable. |
699 | Atomic<unsigned> m_worldState; |
700 | bool m_worldIsStopped { false }; |
701 | Lock m_visitRaceLock; |
702 | Lock m_markingMutex; |
703 | Condition m_markingConditionVariable; |
704 | |
705 | MonotonicTime m_beforeGC; |
706 | MonotonicTime m_afterGC; |
707 | MonotonicTime m_stopTime; |
708 | |
709 | Deque<GCRequest> m_requests; |
710 | GCRequest m_currentRequest; |
711 | Ticket m_lastServedTicket { 0 }; |
712 | Ticket m_lastGrantedTicket { 0 }; |
713 | |
714 | CollectorPhase m_lastPhase { CollectorPhase::NotRunning }; |
715 | CollectorPhase m_currentPhase { CollectorPhase::NotRunning }; |
716 | CollectorPhase m_nextPhase { CollectorPhase::NotRunning }; |
717 | bool m_threadShouldStop { false }; |
718 | bool m_threadIsStopping { false }; |
719 | bool m_mutatorDidRun { true }; |
720 | bool m_didDeferGCWork { false }; |
721 | bool m_shouldStopCollectingContinuously { false }; |
722 | |
723 | uint64_t m_mutatorExecutionVersion { 0 }; |
724 | uint64_t m_phaseVersion { 0 }; |
725 | Box<Lock> m_threadLock; |
726 | Ref<AutomaticThreadCondition> m_threadCondition; // The mutator must not wait on this. It would cause a deadlock. |
727 | RefPtr<AutomaticThread> m_thread; |
728 | |
729 | RefPtr<Thread> m_collectContinuouslyThread { nullptr }; |
730 | |
731 | MonotonicTime m_lastGCStartTime; |
732 | MonotonicTime m_lastGCEndTime; |
733 | MonotonicTime m_currentGCStartTime; |
734 | Seconds m_totalGCTime; |
735 | |
736 | uintptr_t m_barriersExecuted { 0 }; |
737 | |
738 | CurrentThreadState* m_currentThreadState { nullptr }; |
739 | Thread* m_currentThread { nullptr }; // It's OK if this becomes a dangling pointer. |
740 | |
741 | #if PLATFORM(IOS_FAMILY) |
742 | unsigned m_precentAvailableMemoryCachedCallCount; |
743 | bool m_overCriticalMemoryThreshold; |
744 | #endif |
745 | |
746 | bool { false }; |
747 | Lock m_collectContinuouslyLock; |
748 | Condition m_collectContinuouslyCondition; |
749 | }; |
750 | |
751 | } // namespace JSC |
752 | |