1/*
2 * Copyright (C) 2012-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "BlockDirectory.h"
28
29#include "BlockDirectoryInlines.h"
30#include "GCActivityCallback.h"
31#include "Heap.h"
32#include "IncrementalSweeper.h"
33#include "JSCInlines.h"
34#include "MarkedBlockInlines.h"
35#include "SubspaceInlines.h"
36#include "SuperSampler.h"
37#include "VM.h"
38
39namespace JSC {
40
41BlockDirectory::BlockDirectory(size_t cellSize)
42 : m_cellSize(static_cast<unsigned>(cellSize))
43{
44}
45
46BlockDirectory::~BlockDirectory()
47{
48 auto locker = holdLock(m_localAllocatorsLock);
49 while (!m_localAllocators.isEmpty())
50 m_localAllocators.begin()->remove();
51}
52
53void BlockDirectory::setSubspace(Subspace* subspace)
54{
55 m_attributes = subspace->attributes();
56 m_subspace = subspace;
57}
58
59bool BlockDirectory::isPagedOut(MonotonicTime deadline)
60{
61 unsigned itersSinceLastTimeCheck = 0;
62 for (auto* block : m_blocks) {
63 if (block)
64 block->block().populatePage();
65 ++itersSinceLastTimeCheck;
66 if (itersSinceLastTimeCheck >= Heap::s_timeCheckResolution) {
67 MonotonicTime currentTime = MonotonicTime::now();
68 if (currentTime > deadline)
69 return true;
70 itersSinceLastTimeCheck = 0;
71 }
72 }
73 return false;
74}
75
76MarkedBlock::Handle* BlockDirectory::findEmptyBlockToSteal()
77{
78 m_emptyCursor = m_bits.empty().findBit(m_emptyCursor, true);
79 if (m_emptyCursor >= m_blocks.size())
80 return nullptr;
81 return m_blocks[m_emptyCursor];
82}
83
84MarkedBlock::Handle* BlockDirectory::findBlockForAllocation(LocalAllocator& allocator)
85{
86 for (;;) {
87 allocator.m_allocationCursor = (m_bits.canAllocateButNotEmpty() | m_bits.empty()).findBit(allocator.m_allocationCursor, true);
88 if (allocator.m_allocationCursor >= m_blocks.size())
89 return nullptr;
90
91 size_t blockIndex = allocator.m_allocationCursor++;
92 MarkedBlock::Handle* result = m_blocks[blockIndex];
93 setIsCanAllocateButNotEmpty(NoLockingNecessary, blockIndex, false);
94 return result;
95 }
96}
97
98MarkedBlock::Handle* BlockDirectory::tryAllocateBlock(Heap& heap)
99{
100 SuperSamplerScope superSamplerScope(false);
101
102 MarkedBlock::Handle* handle = MarkedBlock::tryCreate(heap, subspace()->alignedMemoryAllocator());
103 if (!handle)
104 return nullptr;
105
106 markedSpace().didAddBlock(handle);
107
108 return handle;
109}
110
111void BlockDirectory::addBlock(MarkedBlock::Handle* block)
112{
113 size_t index;
114 if (m_freeBlockIndices.isEmpty()) {
115 index = m_blocks.size();
116
117 size_t oldCapacity = m_blocks.capacity();
118 m_blocks.append(block);
119 if (m_blocks.capacity() != oldCapacity) {
120 ASSERT(m_bits.numBits() == oldCapacity);
121 ASSERT(m_blocks.capacity() > oldCapacity);
122
123 LockHolder locker(m_bitvectorLock);
124 subspace()->didResizeBits(m_blocks.capacity());
125 m_bits.resize(m_blocks.capacity());
126 }
127 } else {
128 index = m_freeBlockIndices.takeLast();
129 ASSERT(!m_blocks[index]);
130 m_blocks[index] = block;
131 }
132
133 forEachBitVector(
134 NoLockingNecessary,
135 [&](auto vectorRef) {
136 ASSERT_UNUSED(vectorRef, !vectorRef[index]);
137 });
138
139 // This is the point at which the block learns of its cellSize() and attributes().
140 block->didAddToDirectory(this, index);
141
142 setIsLive(NoLockingNecessary, index, true);
143 setIsEmpty(NoLockingNecessary, index, true);
144}
145
146void BlockDirectory::removeBlock(MarkedBlock::Handle* block)
147{
148 ASSERT(block->directory() == this);
149 ASSERT(m_blocks[block->index()] == block);
150
151 subspace()->didRemoveBlock(block->index());
152
153 m_blocks[block->index()] = nullptr;
154 m_freeBlockIndices.append(block->index());
155
156 forEachBitVector(
157 holdLock(m_bitvectorLock),
158 [&](auto vectorRef) {
159 vectorRef[block->index()] = false;
160 });
161
162 block->didRemoveFromDirectory();
163}
164
165void BlockDirectory::stopAllocating()
166{
167 if (false)
168 dataLog(RawPointer(this), ": BlockDirectory::stopAllocating!\n");
169 m_localAllocators.forEach(
170 [&] (LocalAllocator* allocator) {
171 allocator->stopAllocating();
172 });
173}
174
175void BlockDirectory::prepareForAllocation()
176{
177 m_localAllocators.forEach(
178 [&] (LocalAllocator* allocator) {
179 allocator->prepareForAllocation();
180 });
181
182 m_unsweptCursor = 0;
183 m_emptyCursor = 0;
184
185 m_bits.eden().clearAll();
186
187 if (UNLIKELY(Options::useImmortalObjects())) {
188 // FIXME: Make this work again.
189 // https://bugs.webkit.org/show_bug.cgi?id=162296
190 RELEASE_ASSERT_NOT_REACHED();
191 }
192}
193
194void BlockDirectory::stopAllocatingForGood()
195{
196 if (false)
197 dataLog(RawPointer(this), ": BlockDirectory::stopAllocatingForGood!\n");
198
199 m_localAllocators.forEach(
200 [&] (LocalAllocator* allocator) {
201 allocator->stopAllocatingForGood();
202 });
203
204 auto locker = holdLock(m_localAllocatorsLock);
205 while (!m_localAllocators.isEmpty())
206 m_localAllocators.begin()->remove();
207}
208
209void BlockDirectory::lastChanceToFinalize()
210{
211 forEachBlock(
212 [&] (MarkedBlock::Handle* block) {
213 block->lastChanceToFinalize();
214 });
215}
216
217void BlockDirectory::resumeAllocating()
218{
219 m_localAllocators.forEach(
220 [&] (LocalAllocator* allocator) {
221 allocator->resumeAllocating();
222 });
223}
224
225void BlockDirectory::beginMarkingForFullCollection()
226{
227 // Mark bits are sticky and so is our summary of mark bits. We only clear these during full
228 // collections, so if you survived the last collection you will survive the next one so long
229 // as the next one is eden.
230 m_bits.markingNotEmpty().clearAll();
231 m_bits.markingRetired().clearAll();
232}
233
234void BlockDirectory::endMarking()
235{
236 m_bits.allocated().clearAll();
237
238 // It's surprising and frustrating to comprehend, but the end-of-marking flip does not need to
239 // know what kind of collection it is. That knowledge is already encoded in the m_markingXYZ
240 // vectors.
241
242 m_bits.empty() = m_bits.live() & ~m_bits.markingNotEmpty();
243 m_bits.canAllocateButNotEmpty() = m_bits.live() & m_bits.markingNotEmpty() & ~m_bits.markingRetired();
244
245 if (needsDestruction()) {
246 // There are some blocks that we didn't allocate out of in the last cycle, but we swept them. This
247 // will forget that we did that and we will end up sweeping them again and attempting to call their
248 // destructors again. That's fine because of zapping. The only time when we cannot forget is when
249 // we just allocate a block or when we move a block from one size class to another. That doesn't
250 // happen here.
251 m_bits.destructible() = m_bits.live();
252 }
253
254 if (false) {
255 dataLog("Bits for ", m_cellSize, ", ", m_attributes, " after endMarking:\n");
256 dumpBits(WTF::dataFile());
257 }
258}
259
260void BlockDirectory::snapshotUnsweptForEdenCollection()
261{
262 m_bits.unswept() |= m_bits.eden();
263}
264
265void BlockDirectory::snapshotUnsweptForFullCollection()
266{
267 m_bits.unswept() = m_bits.live();
268}
269
270MarkedBlock::Handle* BlockDirectory::findBlockToSweep()
271{
272 m_unsweptCursor = m_bits.unswept().findBit(m_unsweptCursor, true);
273 if (m_unsweptCursor >= m_blocks.size())
274 return nullptr;
275 return m_blocks[m_unsweptCursor];
276}
277
278void BlockDirectory::sweep()
279{
280 m_bits.unswept().forEachSetBit(
281 [&] (size_t index) {
282 MarkedBlock::Handle* block = m_blocks[index];
283 block->sweep(nullptr);
284 });
285}
286
287void BlockDirectory::shrink()
288{
289 (m_bits.empty() & ~m_bits.destructible()).forEachSetBit(
290 [&] (size_t index) {
291 markedSpace().freeBlock(m_blocks[index]);
292 });
293}
294
295void BlockDirectory::assertNoUnswept()
296{
297 if (ASSERT_DISABLED)
298 return;
299
300 if (m_bits.unswept().isEmpty())
301 return;
302
303 dataLog("Assertion failed: unswept not empty in ", *this, ".\n");
304 dumpBits();
305 ASSERT_NOT_REACHED();
306}
307
308RefPtr<SharedTask<MarkedBlock::Handle*()>> BlockDirectory::parallelNotEmptyBlockSource()
309{
310 class Task : public SharedTask<MarkedBlock::Handle*()> {
311 public:
312 Task(BlockDirectory& directory)
313 : m_directory(directory)
314 {
315 }
316
317 MarkedBlock::Handle* run() override
318 {
319 if (m_done)
320 return nullptr;
321 auto locker = holdLock(m_lock);
322 m_index = m_directory.m_bits.markingNotEmpty().findBit(m_index, true);
323 if (m_index >= m_directory.m_blocks.size()) {
324 m_done = true;
325 return nullptr;
326 }
327 return m_directory.m_blocks[m_index++];
328 }
329
330 private:
331 BlockDirectory& m_directory;
332 size_t m_index { 0 };
333 Lock m_lock;
334 bool m_done { false };
335 };
336
337 return adoptRef(new Task(*this));
338}
339
340void BlockDirectory::dump(PrintStream& out) const
341{
342 out.print(RawPointer(this), ":", m_cellSize, "/", m_attributes);
343}
344
345void BlockDirectory::dumpBits(PrintStream& out)
346{
347 unsigned maxNameLength = 0;
348 forEachBitVectorWithName(
349 NoLockingNecessary,
350 [&](auto vectorRef, const char* name) {
351 UNUSED_PARAM(vectorRef);
352 unsigned length = strlen(name);
353 maxNameLength = std::max(maxNameLength, length);
354 });
355
356 forEachBitVectorWithName(
357 NoLockingNecessary,
358 [&](auto vectorRef, const char* name) {
359 out.print(" ", name, ": ");
360 for (unsigned i = maxNameLength - strlen(name); i--;)
361 out.print(" ");
362 out.print(vectorRef, "\n");
363 });
364}
365
366MarkedSpace& BlockDirectory::markedSpace() const
367{
368 return m_subspace->space();
369}
370
371bool BlockDirectory::isFreeListedCell(const void* target)
372{
373 bool result = false;
374 m_localAllocators.forEach(
375 [&] (LocalAllocator* allocator) {
376 result |= allocator->isFreeListedCell(target);
377 });
378 return result;
379}
380
381} // namespace JSC
382
383