1 | /* |
2 | * Copyright (C) 1999-2000 Harri Porten ([email protected]) |
3 | * Copyright (C) 2001 Peter Kelly ([email protected]) |
4 | * Copyright (C) 2003-2018 Apple Inc. All rights reserved. |
5 | * |
6 | * This library is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU Lesser General Public |
8 | * License as published by the Free Software Foundation; either |
9 | * version 2 of the License, or (at your option) any later version. |
10 | * |
11 | * This library is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 | * Lesser General Public License for more details. |
15 | * |
16 | * You should have received a copy of the GNU Lesser General Public |
17 | * License along with this library; if not, write to the Free Software |
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
19 | * |
20 | */ |
21 | |
22 | #pragma once |
23 | |
24 | #include "BlockDirectory.h" |
25 | #include "IterationStatus.h" |
26 | #include "LargeAllocation.h" |
27 | #include "MarkedBlock.h" |
28 | #include "MarkedBlockSet.h" |
29 | #include <array> |
30 | #include <wtf/Bag.h> |
31 | #include <wtf/HashSet.h> |
32 | #include <wtf/Noncopyable.h> |
33 | #include <wtf/RetainPtr.h> |
34 | #include <wtf/SentinelLinkedList.h> |
35 | #include <wtf/SinglyLinkedListWithTail.h> |
36 | #include <wtf/Vector.h> |
37 | |
38 | namespace JSC { |
39 | |
40 | class CompleteSubspace; |
41 | class Heap; |
42 | class HeapIterationScope; |
43 | class ; |
44 | class Subspace; |
45 | class WeakSet; |
46 | |
47 | typedef uint32_t HeapVersion; |
48 | |
49 | class MarkedSpace { |
50 | WTF_MAKE_NONCOPYABLE(MarkedSpace); |
51 | public: |
52 | // sizeStep is really a synonym for atomSize; it's no accident that they are the same. |
53 | static constexpr size_t sizeStep = MarkedBlock::atomSize; |
54 | |
55 | // Sizes up to this amount get a size class for each size step. |
56 | static constexpr size_t preciseCutoff = 80; |
57 | |
58 | // The amount of available payload in a block is the block's size minus the footer. |
59 | static constexpr size_t blockPayload = MarkedBlock::payloadSize; |
60 | |
61 | // The largest cell we're willing to allocate in a MarkedBlock the "normal way" (i.e. using size |
62 | // classes, rather than a large allocation) is half the size of the payload, rounded down. This |
63 | // ensures that we only use the size class approach if it means being able to pack two things |
64 | // into one block. |
65 | static constexpr size_t largeCutoff = (blockPayload / 2) & ~(sizeStep - 1); |
66 | |
67 | // We have an extra size class for size zero. |
68 | static constexpr size_t numSizeClasses = largeCutoff / sizeStep + 1; |
69 | |
70 | static constexpr HeapVersion nullVersion = 0; // The version of freshly allocated blocks. |
71 | static constexpr HeapVersion initialVersion = 2; // The version that the heap starts out with. Set to make sure that nextVersion(nullVersion) != initialVersion. |
72 | |
73 | static HeapVersion nextVersion(HeapVersion version) |
74 | { |
75 | version++; |
76 | if (version == nullVersion) |
77 | version = initialVersion; |
78 | return version; |
79 | } |
80 | |
81 | static size_t sizeClassToIndex(size_t size) |
82 | { |
83 | return (size + sizeStep - 1) / sizeStep; |
84 | } |
85 | |
86 | static size_t indexToSizeClass(size_t index) |
87 | { |
88 | size_t result = index * sizeStep; |
89 | ASSERT(sizeClassToIndex(result) == index); |
90 | return result; |
91 | } |
92 | |
93 | MarkedSpace(Heap*); |
94 | ~MarkedSpace(); |
95 | |
96 | Heap* heap() const { return m_heap; } |
97 | |
98 | void lastChanceToFinalize(); // Must call stopAllocatingForGood first. |
99 | void freeMemory(); |
100 | |
101 | static size_t optimalSizeFor(size_t); |
102 | |
103 | void prepareForAllocation(); |
104 | |
105 | void visitWeakSets(SlotVisitor&); |
106 | void reapWeakSets(); |
107 | |
108 | MarkedBlockSet& blocks() { return m_blocks; } |
109 | |
110 | void willStartIterating(); |
111 | bool isIterating() const { return m_isIterating; } |
112 | void didFinishIterating(); |
113 | |
114 | void stopAllocating(); |
115 | void stopAllocatingForGood(); |
116 | void resumeAllocating(); // If we just stopped allocation but we didn't do a collection, we need to resume allocation. |
117 | |
118 | void prepareForMarking(); |
119 | |
120 | void prepareForConservativeScan(); |
121 | |
122 | typedef HashSet<MarkedBlock*>::iterator BlockIterator; |
123 | |
124 | template<typename Functor> void forEachLiveCell(HeapIterationScope&, const Functor&); |
125 | template<typename Functor> void forEachDeadCell(HeapIterationScope&, const Functor&); |
126 | template<typename Functor> void forEachBlock(const Functor&); |
127 | |
128 | void shrink(); |
129 | void freeBlock(MarkedBlock::Handle*); |
130 | void freeOrShrinkBlock(MarkedBlock::Handle*); |
131 | |
132 | void didAddBlock(MarkedBlock::Handle*); |
133 | void didConsumeFreeList(MarkedBlock::Handle*); |
134 | void didAllocateInBlock(MarkedBlock::Handle*); |
135 | |
136 | void beginMarking(); |
137 | void endMarking(); |
138 | void snapshotUnswept(); |
139 | void clearNewlyAllocated(); |
140 | void sweep(); |
141 | void sweepLargeAllocations(); |
142 | void assertNoUnswept(); |
143 | size_t objectCount(); |
144 | size_t size(); |
145 | size_t capacity(); |
146 | |
147 | bool isPagedOut(MonotonicTime deadline); |
148 | |
149 | HeapVersion markingVersion() const { return m_markingVersion; } |
150 | HeapVersion newlyAllocatedVersion() const { return m_newlyAllocatedVersion; } |
151 | |
152 | const Vector<LargeAllocation*>& largeAllocations() const { return m_largeAllocations; } |
153 | unsigned largeAllocationsNurseryOffset() const { return m_largeAllocationsNurseryOffset; } |
154 | unsigned largeAllocationsOffsetForThisCollection() const { return m_largeAllocationsOffsetForThisCollection; } |
155 | |
156 | // These are cached pointers and offsets for quickly searching the large allocations that are |
157 | // relevant to this collection. |
158 | LargeAllocation** largeAllocationsForThisCollectionBegin() const { return m_largeAllocationsForThisCollectionBegin; } |
159 | LargeAllocation** largeAllocationsForThisCollectionEnd() const { return m_largeAllocationsForThisCollectionEnd; } |
160 | unsigned largeAllocationsForThisCollectionSize() const { return m_largeAllocationsForThisCollectionSize; } |
161 | |
162 | BlockDirectory* firstDirectory() const { return m_directories.first(); } |
163 | |
164 | Lock& directoryLock() { return m_directoryLock; } |
165 | void addBlockDirectory(const AbstractLocker&, BlockDirectory*); |
166 | |
167 | // When this is true it means that we have flipped but the mark bits haven't converged yet. |
168 | bool isMarking() const { return m_isMarking; } |
169 | |
170 | WeakSet* activeWeakSetsBegin() { return m_activeWeakSets.begin(); } |
171 | WeakSet* activeWeakSetsEnd() { return m_activeWeakSets.end(); } |
172 | WeakSet* newActiveWeakSetsBegin() { return m_newActiveWeakSets.begin(); } |
173 | WeakSet* newActiveWeakSetsEnd() { return m_newActiveWeakSets.end(); } |
174 | |
175 | void dumpBits(PrintStream& = WTF::dataFile()); |
176 | |
177 | JS_EXPORT_PRIVATE static std::array<size_t, numSizeClasses> s_sizeClassForSizeStep; |
178 | |
179 | private: |
180 | friend class CompleteSubspace; |
181 | friend class LLIntOffsetsExtractor; |
182 | friend class JIT; |
183 | friend class WeakSet; |
184 | friend class Subspace; |
185 | |
186 | // Use this version when calling from within the GC where we know that the directories |
187 | // have already been stopped. |
188 | template<typename Functor> void forEachLiveCell(const Functor&); |
189 | |
190 | static void initializeSizeClassForStepSize(); |
191 | |
192 | void initializeSubspace(Subspace&); |
193 | |
194 | template<typename Functor> inline void forEachDirectory(const Functor&); |
195 | |
196 | void addActiveWeakSet(WeakSet*); |
197 | |
198 | Vector<Subspace*> m_subspaces; |
199 | |
200 | Vector<LargeAllocation*> m_largeAllocations; |
201 | unsigned m_largeAllocationsNurseryOffset { 0 }; |
202 | unsigned m_largeAllocationsOffsetForThisCollection { 0 }; |
203 | unsigned m_largeAllocationsNurseryOffsetForSweep { 0 }; |
204 | unsigned m_largeAllocationsForThisCollectionSize { 0 }; |
205 | LargeAllocation** m_largeAllocationsForThisCollectionBegin { nullptr }; |
206 | LargeAllocation** m_largeAllocationsForThisCollectionEnd { nullptr }; |
207 | |
208 | Heap* m_heap; |
209 | size_t m_capacity { 0 }; |
210 | HeapVersion m_markingVersion { initialVersion }; |
211 | HeapVersion m_newlyAllocatedVersion { initialVersion }; |
212 | bool m_isIterating { false }; |
213 | bool m_isMarking { false }; |
214 | Lock m_directoryLock; |
215 | MarkedBlockSet m_blocks; |
216 | |
217 | SentinelLinkedList<WeakSet, BasicRawSentinelNode<WeakSet>> m_activeWeakSets; |
218 | SentinelLinkedList<WeakSet, BasicRawSentinelNode<WeakSet>> m_newActiveWeakSets; |
219 | |
220 | SinglyLinkedListWithTail<BlockDirectory> m_directories; |
221 | |
222 | friend class HeapVerifier; |
223 | }; |
224 | |
225 | template <typename Functor> inline void MarkedSpace::forEachBlock(const Functor& functor) |
226 | { |
227 | forEachDirectory( |
228 | [&] (BlockDirectory& directory) -> IterationStatus { |
229 | directory.forEachBlock(functor); |
230 | return IterationStatus::Continue; |
231 | }); |
232 | } |
233 | |
234 | template <typename Functor> |
235 | void MarkedSpace::forEachDirectory(const Functor& functor) |
236 | { |
237 | for (BlockDirectory* directory = m_directories.first(); directory; directory = directory->nextDirectory()) { |
238 | if (functor(*directory) == IterationStatus::Done) |
239 | return; |
240 | } |
241 | } |
242 | |
243 | ALWAYS_INLINE size_t MarkedSpace::optimalSizeFor(size_t bytes) |
244 | { |
245 | ASSERT(bytes); |
246 | if (bytes <= preciseCutoff) |
247 | return WTF::roundUpToMultipleOf<sizeStep>(bytes); |
248 | if (bytes <= largeCutoff) |
249 | return s_sizeClassForSizeStep[sizeClassToIndex(bytes)]; |
250 | return bytes; |
251 | } |
252 | |
253 | } // namespace JSC |
254 | |