1 | /* |
2 | * Copyright (C) 2012-2018 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #pragma once |
27 | |
28 | #include "AllocationFailureMode.h" |
29 | #include "CellAttributes.h" |
30 | #include "FreeList.h" |
31 | #include "LocalAllocator.h" |
32 | #include "MarkedBlock.h" |
33 | #include <wtf/DataLog.h> |
34 | #include <wtf/FastBitVector.h> |
35 | #include <wtf/MonotonicTime.h> |
36 | #include <wtf/SharedTask.h> |
37 | #include <wtf/Vector.h> |
38 | |
39 | namespace JSC { |
40 | |
41 | class GCDeferralContext; |
42 | class Heap; |
43 | class IsoCellSet; |
44 | class MarkedSpace; |
45 | class ; |
46 | |
47 | #define FOR_EACH_BLOCK_DIRECTORY_BIT(macro) \ |
48 | macro(live, Live) /* The set of block indices that have actual blocks. */\ |
49 | macro(empty, Empty) /* The set of all blocks that have no live objects. */ \ |
50 | macro(allocated, Allocated) /* The set of all blocks that are full of live objects. */\ |
51 | macro(canAllocateButNotEmpty, CanAllocateButNotEmpty) /* The set of all blocks are neither empty nor retired (i.e. are more than minMarkedBlockUtilization full). */ \ |
52 | macro(destructible, Destructible) /* The set of all blocks that may have destructors to run. */\ |
53 | macro(eden, Eden) /* The set of all blocks that have new objects since the last GC. */\ |
54 | macro(unswept, Unswept) /* The set of all blocks that could be swept by the incremental sweeper. */\ |
55 | \ |
56 | /* These are computed during marking. */\ |
57 | macro(markingNotEmpty, MarkingNotEmpty) /* The set of all blocks that are not empty. */ \ |
58 | macro(markingRetired, MarkingRetired) /* The set of all blocks that are retired. */ |
59 | |
60 | // FIXME: We defined canAllocateButNotEmpty and empty to be exclusive: |
61 | // |
62 | // canAllocateButNotEmpty & empty == 0 |
63 | // |
64 | // Instead of calling it canAllocate and making it inclusive: |
65 | // |
66 | // canAllocate & empty == empty |
67 | // |
68 | // The latter is probably better. I'll leave it to a future bug to fix that, since breathing on |
69 | // this code leads to regressions for days, and it's not clear that making this change would |
70 | // improve perf since it would not change the collector's behavior, and either way the directory |
71 | // has to look at both bitvectors. |
72 | // https://bugs.webkit.org/show_bug.cgi?id=162121 |
73 | |
74 | class BlockDirectory { |
75 | WTF_MAKE_NONCOPYABLE(BlockDirectory); |
76 | WTF_MAKE_FAST_ALLOCATED; |
77 | |
78 | friend class LLIntOffsetsExtractor; |
79 | |
80 | public: |
81 | BlockDirectory(Heap*, size_t cellSize); |
82 | ~BlockDirectory(); |
83 | void setSubspace(Subspace*); |
84 | void lastChanceToFinalize(); |
85 | void prepareForAllocation(); |
86 | void stopAllocating(); |
87 | void stopAllocatingForGood(); |
88 | void resumeAllocating(); |
89 | void beginMarkingForFullCollection(); |
90 | void endMarking(); |
91 | void snapshotUnsweptForEdenCollection(); |
92 | void snapshotUnsweptForFullCollection(); |
93 | void sweep(); |
94 | void shrink(); |
95 | void assertNoUnswept(); |
96 | size_t cellSize() const { return m_cellSize; } |
97 | const CellAttributes& attributes() const { return m_attributes; } |
98 | bool needsDestruction() const { return m_attributes.destruction == NeedsDestruction; } |
99 | DestructionMode destruction() const { return m_attributes.destruction; } |
100 | HeapCell::Kind cellKind() const { return m_attributes.cellKind; } |
101 | Heap* heap() { return m_heap; } |
102 | |
103 | bool isFreeListedCell(const void* target); |
104 | |
105 | template<typename Functor> void forEachBlock(const Functor&); |
106 | template<typename Functor> void forEachNotEmptyBlock(const Functor&); |
107 | |
108 | RefPtr<SharedTask<MarkedBlock::Handle*()>> parallelNotEmptyBlockSource(); |
109 | |
110 | void addBlock(MarkedBlock::Handle*); |
111 | void removeBlock(MarkedBlock::Handle*); |
112 | |
113 | bool isPagedOut(MonotonicTime deadline); |
114 | |
115 | Lock& bitvectorLock() { return m_bitvectorLock; } |
116 | |
117 | #define BLOCK_DIRECTORY_BIT_ACCESSORS(lowerBitName, capitalBitName) \ |
118 | bool is ## capitalBitName(const AbstractLocker&, size_t index) const { return m_ ## lowerBitName[index]; } \ |
119 | bool is ## capitalBitName(const AbstractLocker& locker, MarkedBlock::Handle* block) const { return is ## capitalBitName(locker, block->index()); } \ |
120 | void setIs ## capitalBitName(const AbstractLocker&, size_t index, bool value) { m_ ## lowerBitName[index] = value; } \ |
121 | void setIs ## capitalBitName(const AbstractLocker& locker, MarkedBlock::Handle* block, bool value) { setIs ## capitalBitName(locker, block->index(), value); } |
122 | FOR_EACH_BLOCK_DIRECTORY_BIT(BLOCK_DIRECTORY_BIT_ACCESSORS) |
123 | #undef BLOCK_DIRECTORY_BIT_ACCESSORS |
124 | |
125 | template<typename Func> |
126 | void forEachBitVector(const AbstractLocker&, const Func& func) |
127 | { |
128 | #define BLOCK_DIRECTORY_BIT_CALLBACK(lowerBitName, capitalBitName) \ |
129 | func(m_ ## lowerBitName); |
130 | FOR_EACH_BLOCK_DIRECTORY_BIT(BLOCK_DIRECTORY_BIT_CALLBACK); |
131 | #undef BLOCK_DIRECTORY_BIT_CALLBACK |
132 | } |
133 | |
134 | template<typename Func> |
135 | void forEachBitVectorWithName(const AbstractLocker&, const Func& func) |
136 | { |
137 | #define BLOCK_DIRECTORY_BIT_CALLBACK(lowerBitName, capitalBitName) \ |
138 | func(m_ ## lowerBitName, #capitalBitName); |
139 | FOR_EACH_BLOCK_DIRECTORY_BIT(BLOCK_DIRECTORY_BIT_CALLBACK); |
140 | #undef BLOCK_DIRECTORY_BIT_CALLBACK |
141 | } |
142 | |
143 | BlockDirectory* nextDirectory() const { return m_nextDirectory; } |
144 | BlockDirectory* nextDirectoryInSubspace() const { return m_nextDirectoryInSubspace; } |
145 | BlockDirectory* nextDirectoryInAlignedMemoryAllocator() const { return m_nextDirectoryInAlignedMemoryAllocator; } |
146 | |
147 | void setNextDirectory(BlockDirectory* directory) { m_nextDirectory = directory; } |
148 | void setNextDirectoryInSubspace(BlockDirectory* directory) { m_nextDirectoryInSubspace = directory; } |
149 | void setNextDirectoryInAlignedMemoryAllocator(BlockDirectory* directory) { m_nextDirectoryInAlignedMemoryAllocator = directory; } |
150 | |
151 | MarkedBlock::Handle* findEmptyBlockToSteal(); |
152 | |
153 | MarkedBlock::Handle* findBlockToSweep(); |
154 | |
155 | Subspace* subspace() const { return m_subspace; } |
156 | MarkedSpace& markedSpace() const; |
157 | |
158 | void dump(PrintStream&) const; |
159 | void dumpBits(PrintStream& = WTF::dataFile()); |
160 | |
161 | private: |
162 | friend class IsoCellSet; |
163 | friend class LocalAllocator; |
164 | friend class LocalSideAllocator; |
165 | friend class MarkedBlock; |
166 | |
167 | MarkedBlock::Handle* findBlockForAllocation(LocalAllocator&); |
168 | |
169 | MarkedBlock::Handle* tryAllocateBlock(); |
170 | |
171 | Vector<MarkedBlock::Handle*> m_blocks; |
172 | Vector<unsigned> m_freeBlockIndices; |
173 | |
174 | // Mutator uses this to guard resizing the bitvectors. Those things in the GC that may run |
175 | // concurrently to the mutator must lock this when accessing the bitvectors. |
176 | #define BLOCK_DIRECTORY_BIT_DECLARATION(lowerBitName, capitalBitName) \ |
177 | FastBitVector m_ ## lowerBitName; |
178 | FOR_EACH_BLOCK_DIRECTORY_BIT(BLOCK_DIRECTORY_BIT_DECLARATION) |
179 | #undef BLOCK_DIRECTORY_BIT_DECLARATION |
180 | Lock m_bitvectorLock; |
181 | Lock m_localAllocatorsLock; |
182 | CellAttributes m_attributes; |
183 | |
184 | unsigned m_cellSize; |
185 | |
186 | // After you do something to a block based on one of these cursors, you clear the bit in the |
187 | // corresponding bitvector and leave the cursor where it was. |
188 | size_t m_emptyCursor { 0 }; |
189 | size_t m_unsweptCursor { 0 }; // Points to the next block that is a candidate for incremental sweeping. |
190 | |
191 | // FIXME: All of these should probably be references. |
192 | // https://bugs.webkit.org/show_bug.cgi?id=166988 |
193 | Heap* m_heap { nullptr }; |
194 | Subspace* m_subspace { nullptr }; |
195 | BlockDirectory* m_nextDirectory { nullptr }; |
196 | BlockDirectory* m_nextDirectoryInSubspace { nullptr }; |
197 | BlockDirectory* m_nextDirectoryInAlignedMemoryAllocator { nullptr }; |
198 | |
199 | SentinelLinkedList<LocalAllocator, BasicRawSentinelNode<LocalAllocator>> m_localAllocators; |
200 | }; |
201 | |
202 | } // namespace JSC |
203 | |