1 | /* |
2 | * Copyright (C) 2011-2018 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' |
14 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, |
15 | * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS |
17 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
18 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
19 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
20 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
21 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
22 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF |
23 | * THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #include "config.h" |
27 | #include "MarkedBlock.h" |
28 | |
29 | #include "AlignedMemoryAllocator.h" |
30 | #include "BlockDirectoryInlines.h" |
31 | #include "FreeListInlines.h" |
32 | #include "JSCast.h" |
33 | #include "JSDestructibleObject.h" |
34 | #include "JSCInlines.h" |
35 | #include "MarkedBlockInlines.h" |
36 | #include "SuperSampler.h" |
37 | #include "SweepingScope.h" |
38 | #include <wtf/CommaPrinter.h> |
39 | |
40 | namespace JSC { |
41 | namespace MarkedBlockInternal { |
42 | static constexpr bool verbose = false; |
43 | } |
44 | |
45 | const size_t MarkedBlock::blockSize; |
46 | |
47 | static const bool computeBalance = false; |
48 | static size_t balance; |
49 | |
50 | MarkedBlock::Handle* MarkedBlock::tryCreate(Heap& heap, AlignedMemoryAllocator* alignedMemoryAllocator) |
51 | { |
52 | if (computeBalance) { |
53 | balance++; |
54 | if (!(balance % 10)) |
55 | dataLog("MarkedBlock Balance: " , balance, "\n" ); |
56 | } |
57 | void* blockSpace = alignedMemoryAllocator->tryAllocateAlignedMemory(blockSize, blockSize); |
58 | if (!blockSpace) |
59 | return nullptr; |
60 | if (scribbleFreeCells()) |
61 | scribble(blockSpace, blockSize); |
62 | return new Handle(heap, alignedMemoryAllocator, blockSpace); |
63 | } |
64 | |
65 | MarkedBlock::Handle::Handle(Heap& heap, AlignedMemoryAllocator* alignedMemoryAllocator, void* blockSpace) |
66 | : m_alignedMemoryAllocator(alignedMemoryAllocator) |
67 | , m_weakSet(heap.vm(), CellContainer()) |
68 | { |
69 | m_block = new (NotNull, blockSpace) MarkedBlock(*heap.vm(), *this); |
70 | |
71 | m_weakSet.setContainer(*m_block); |
72 | |
73 | heap.didAllocateBlock(blockSize); |
74 | } |
75 | |
76 | MarkedBlock::Handle::~Handle() |
77 | { |
78 | Heap& heap = *this->heap(); |
79 | if (computeBalance) { |
80 | balance--; |
81 | if (!(balance % 10)) |
82 | dataLog("MarkedBlock Balance: " , balance, "\n" ); |
83 | } |
84 | removeFromDirectory(); |
85 | m_block->~MarkedBlock(); |
86 | m_alignedMemoryAllocator->freeAlignedMemory(m_block); |
87 | heap.didFreeBlock(blockSize); |
88 | } |
89 | |
90 | MarkedBlock::MarkedBlock(VM& vm, Handle& handle) |
91 | { |
92 | new (&footer()) Footer(vm, handle); |
93 | if (MarkedBlockInternal::verbose) |
94 | dataLog(RawPointer(this), ": Allocated.\n" ); |
95 | } |
96 | |
97 | MarkedBlock::~MarkedBlock() |
98 | { |
99 | footer().~Footer(); |
100 | } |
101 | |
102 | MarkedBlock::Footer::Footer(VM& vm, Handle& handle) |
103 | : m_handle(handle) |
104 | , m_vm(&vm) |
105 | , m_markingVersion(MarkedSpace::nullVersion) |
106 | , m_newlyAllocatedVersion(MarkedSpace::nullVersion) |
107 | { |
108 | } |
109 | |
110 | MarkedBlock::Footer::() |
111 | { |
112 | } |
113 | |
114 | void MarkedBlock::Handle::unsweepWithNoNewlyAllocated() |
115 | { |
116 | RELEASE_ASSERT(m_isFreeListed); |
117 | m_isFreeListed = false; |
118 | } |
119 | |
120 | void MarkedBlock::Handle::setIsFreeListed() |
121 | { |
122 | m_directory->setIsEmpty(NoLockingNecessary, this, false); |
123 | m_isFreeListed = true; |
124 | } |
125 | |
126 | void MarkedBlock::Handle::stopAllocating(const FreeList& freeList) |
127 | { |
128 | auto locker = holdLock(blockFooter().m_lock); |
129 | |
130 | if (MarkedBlockInternal::verbose) |
131 | dataLog(RawPointer(this), ": MarkedBlock::Handle::stopAllocating!\n" ); |
132 | ASSERT(!directory()->isAllocated(NoLockingNecessary, this)); |
133 | |
134 | if (!isFreeListed()) { |
135 | if (MarkedBlockInternal::verbose) |
136 | dataLog("There ain't no newly allocated.\n" ); |
137 | // This means that we either didn't use this block at all for allocation since last GC, |
138 | // or someone had already done stopAllocating() before. |
139 | ASSERT(freeList.allocationWillFail()); |
140 | return; |
141 | } |
142 | |
143 | if (MarkedBlockInternal::verbose) |
144 | dataLog("Free list: " , freeList, "\n" ); |
145 | |
146 | // Roll back to a coherent state for Heap introspection. Cells newly |
147 | // allocated from our free list are not currently marked, so we need another |
148 | // way to tell what's live vs dead. |
149 | |
150 | blockFooter().m_newlyAllocated.clearAll(); |
151 | blockFooter().m_newlyAllocatedVersion = heap()->objectSpace().newlyAllocatedVersion(); |
152 | |
153 | forEachCell( |
154 | [&] (HeapCell* cell, HeapCell::Kind) -> IterationStatus { |
155 | block().setNewlyAllocated(cell); |
156 | return IterationStatus::Continue; |
157 | }); |
158 | |
159 | freeList.forEach( |
160 | [&] (HeapCell* cell) { |
161 | if (MarkedBlockInternal::verbose) |
162 | dataLog("Free cell: " , RawPointer(cell), "\n" ); |
163 | if (m_attributes.destruction == NeedsDestruction) |
164 | cell->zap(); |
165 | block().clearNewlyAllocated(cell); |
166 | }); |
167 | |
168 | m_isFreeListed = false; |
169 | } |
170 | |
171 | void MarkedBlock::Handle::lastChanceToFinalize() |
172 | { |
173 | directory()->setIsAllocated(NoLockingNecessary, this, false); |
174 | directory()->setIsDestructible(NoLockingNecessary, this, true); |
175 | blockFooter().m_marks.clearAll(); |
176 | block().clearHasAnyMarked(); |
177 | blockFooter().m_markingVersion = heap()->objectSpace().markingVersion(); |
178 | m_weakSet.lastChanceToFinalize(); |
179 | blockFooter().m_newlyAllocated.clearAll(); |
180 | blockFooter().m_newlyAllocatedVersion = heap()->objectSpace().newlyAllocatedVersion(); |
181 | sweep(nullptr); |
182 | } |
183 | |
184 | void MarkedBlock::Handle::resumeAllocating(FreeList& freeList) |
185 | { |
186 | { |
187 | auto locker = holdLock(blockFooter().m_lock); |
188 | |
189 | if (MarkedBlockInternal::verbose) |
190 | dataLog(RawPointer(this), ": MarkedBlock::Handle::resumeAllocating!\n" ); |
191 | ASSERT(!directory()->isAllocated(NoLockingNecessary, this)); |
192 | ASSERT(!isFreeListed()); |
193 | |
194 | if (!block().hasAnyNewlyAllocated()) { |
195 | if (MarkedBlockInternal::verbose) |
196 | dataLog("There ain't no newly allocated.\n" ); |
197 | // This means we had already exhausted the block when we stopped allocation. |
198 | freeList.clear(); |
199 | return; |
200 | } |
201 | } |
202 | |
203 | // Re-create our free list from before stopping allocation. Note that this may return an empty |
204 | // freelist, in which case the block will still be Marked! |
205 | sweep(&freeList); |
206 | } |
207 | |
208 | void MarkedBlock::Handle::zap(const FreeList& freeList) |
209 | { |
210 | freeList.forEach( |
211 | [&] (HeapCell* cell) { |
212 | if (m_attributes.destruction == NeedsDestruction) |
213 | cell->zap(); |
214 | }); |
215 | } |
216 | |
217 | void MarkedBlock::aboutToMarkSlow(HeapVersion markingVersion) |
218 | { |
219 | ASSERT(vm()->heap.objectSpace().isMarking()); |
220 | auto locker = holdLock(footer().m_lock); |
221 | |
222 | if (!areMarksStale(markingVersion)) |
223 | return; |
224 | |
225 | BlockDirectory* directory = handle().directory(); |
226 | |
227 | if (handle().directory()->isAllocated(holdLock(directory->bitvectorLock()), &handle()) |
228 | || !marksConveyLivenessDuringMarking(markingVersion)) { |
229 | if (MarkedBlockInternal::verbose) |
230 | dataLog(RawPointer(this), ": Clearing marks without doing anything else.\n" ); |
231 | // We already know that the block is full and is already recognized as such, or that the |
232 | // block did not survive the previous GC. So, we can clear mark bits the old fashioned |
233 | // way. Note that it's possible for such a block to have newlyAllocated with an up-to- |
234 | // date version! If it does, then we want to leave the newlyAllocated alone, since that |
235 | // means that we had allocated in this previously empty block but did not fill it up, so |
236 | // we created a newlyAllocated. |
237 | footer().m_marks.clearAll(); |
238 | } else { |
239 | if (MarkedBlockInternal::verbose) |
240 | dataLog(RawPointer(this), ": Doing things.\n" ); |
241 | HeapVersion newlyAllocatedVersion = space()->newlyAllocatedVersion(); |
242 | if (footer().m_newlyAllocatedVersion == newlyAllocatedVersion) { |
243 | // When do we get here? The block could not have been filled up. The newlyAllocated bits would |
244 | // have had to be created since the end of the last collection. The only things that create |
245 | // them are aboutToMarkSlow, lastChanceToFinalize, and stopAllocating. If it had been |
246 | // aboutToMarkSlow, then we shouldn't be here since the marks wouldn't be stale anymore. It |
247 | // cannot be lastChanceToFinalize. So it must be stopAllocating. That means that we just |
248 | // computed the newlyAllocated bits just before the start of an increment. When we are in that |
249 | // mode, it seems as if newlyAllocated should subsume marks. |
250 | ASSERT(footer().m_newlyAllocated.subsumes(footer().m_marks)); |
251 | footer().m_marks.clearAll(); |
252 | } else { |
253 | footer().m_newlyAllocated.setAndClear(footer().m_marks); |
254 | footer().m_newlyAllocatedVersion = newlyAllocatedVersion; |
255 | } |
256 | } |
257 | clearHasAnyMarked(); |
258 | WTF::storeStoreFence(); |
259 | footer().m_markingVersion = markingVersion; |
260 | |
261 | // This means we're the first ones to mark any object in this block. |
262 | directory->setIsMarkingNotEmpty(holdLock(directory->bitvectorLock()), &handle(), true); |
263 | } |
264 | |
265 | void MarkedBlock::resetAllocated() |
266 | { |
267 | footer().m_newlyAllocated.clearAll(); |
268 | footer().m_newlyAllocatedVersion = MarkedSpace::nullVersion; |
269 | } |
270 | |
271 | void MarkedBlock::resetMarks() |
272 | { |
273 | // We want aboutToMarkSlow() to see what the mark bits were after the last collection. It uses |
274 | // the version number to distinguish between the marks having already been stale before |
275 | // beginMarking(), or just stale now that beginMarking() bumped the version. If we have a version |
276 | // wraparound, then we will call this method before resetting the version to null. When the |
277 | // version is null, aboutToMarkSlow() will assume that the marks were not stale as of before |
278 | // beginMarking(). Hence the need to whip the marks into shape. |
279 | if (areMarksStale()) |
280 | footer().m_marks.clearAll(); |
281 | footer().m_markingVersion = MarkedSpace::nullVersion; |
282 | } |
283 | |
284 | #if !ASSERT_DISABLED |
285 | void MarkedBlock::assertMarksNotStale() |
286 | { |
287 | ASSERT(footer().m_markingVersion == vm()->heap.objectSpace().markingVersion()); |
288 | } |
289 | #endif // !ASSERT_DISABLED |
290 | |
291 | bool MarkedBlock::() |
292 | { |
293 | return areMarksStale(vm()->heap.objectSpace().markingVersion()); |
294 | } |
295 | |
296 | bool MarkedBlock::Handle::areMarksStale() |
297 | { |
298 | return m_block->areMarksStale(); |
299 | } |
300 | |
301 | bool MarkedBlock::isMarked(const void* p) |
302 | { |
303 | return isMarked(vm()->heap.objectSpace().markingVersion(), p); |
304 | } |
305 | |
306 | void MarkedBlock::Handle::didConsumeFreeList() |
307 | { |
308 | auto locker = holdLock(blockFooter().m_lock); |
309 | if (MarkedBlockInternal::verbose) |
310 | dataLog(RawPointer(this), ": MarkedBlock::Handle::didConsumeFreeList!\n" ); |
311 | ASSERT(isFreeListed()); |
312 | m_isFreeListed = false; |
313 | directory()->setIsAllocated(NoLockingNecessary, this, true); |
314 | } |
315 | |
316 | size_t MarkedBlock::markCount() |
317 | { |
318 | return areMarksStale() ? 0 : footer().m_marks.count(); |
319 | } |
320 | |
321 | void MarkedBlock::clearHasAnyMarked() |
322 | { |
323 | footer().m_biasedMarkCount = footer().m_markCountBias; |
324 | } |
325 | |
326 | void MarkedBlock::noteMarkedSlow() |
327 | { |
328 | BlockDirectory* directory = handle().directory(); |
329 | directory->setIsMarkingRetired(holdLock(directory->bitvectorLock()), &handle(), true); |
330 | } |
331 | |
332 | void MarkedBlock::Handle::removeFromDirectory() |
333 | { |
334 | if (!m_directory) |
335 | return; |
336 | |
337 | m_directory->removeBlock(this); |
338 | } |
339 | |
340 | void MarkedBlock::Handle::didAddToDirectory(BlockDirectory* directory, size_t index) |
341 | { |
342 | ASSERT(m_index == std::numeric_limits<size_t>::max()); |
343 | ASSERT(!m_directory); |
344 | |
345 | RELEASE_ASSERT(directory->subspace()->alignedMemoryAllocator() == m_alignedMemoryAllocator); |
346 | |
347 | m_index = index; |
348 | m_directory = directory; |
349 | blockFooter().m_subspace = directory->subspace(); |
350 | |
351 | size_t cellSize = directory->cellSize(); |
352 | m_atomsPerCell = (cellSize + atomSize - 1) / atomSize; |
353 | m_endAtom = endAtom - m_atomsPerCell + 1; |
354 | |
355 | m_attributes = directory->attributes(); |
356 | |
357 | if (!isJSCellKind(m_attributes.cellKind)) |
358 | RELEASE_ASSERT(m_attributes.destruction == DoesNotNeedDestruction); |
359 | |
360 | double markCountBias = -(Options::minMarkedBlockUtilization() * cellsPerBlock()); |
361 | |
362 | // The mark count bias should be comfortably within this range. |
363 | RELEASE_ASSERT(markCountBias > static_cast<double>(std::numeric_limits<int16_t>::min())); |
364 | RELEASE_ASSERT(markCountBias < 0); |
365 | |
366 | // This means we haven't marked anything yet. |
367 | blockFooter().m_biasedMarkCount = blockFooter().m_markCountBias = static_cast<int16_t>(markCountBias); |
368 | } |
369 | |
370 | void MarkedBlock::Handle::didRemoveFromDirectory() |
371 | { |
372 | ASSERT(m_index != std::numeric_limits<size_t>::max()); |
373 | ASSERT(m_directory); |
374 | |
375 | m_index = std::numeric_limits<size_t>::max(); |
376 | m_directory = nullptr; |
377 | blockFooter().m_subspace = nullptr; |
378 | } |
379 | |
380 | #if !ASSERT_DISABLED |
381 | void MarkedBlock::assertValidCell(VM& vm, HeapCell* cell) const |
382 | { |
383 | RELEASE_ASSERT(&vm == this->vm()); |
384 | RELEASE_ASSERT(const_cast<MarkedBlock*>(this)->handle().cellAlign(cell) == cell); |
385 | } |
386 | #endif |
387 | |
388 | void MarkedBlock::Handle::dumpState(PrintStream& out) |
389 | { |
390 | CommaPrinter comma; |
391 | directory()->forEachBitVectorWithName( |
392 | holdLock(directory()->bitvectorLock()), |
393 | [&] (FastBitVector& bitvector, const char* name) { |
394 | out.print(comma, name, ":" , bitvector[index()] ? "YES" : "no" ); |
395 | }); |
396 | } |
397 | |
398 | Subspace* MarkedBlock::Handle::subspace() const |
399 | { |
400 | return directory()->subspace(); |
401 | } |
402 | |
403 | void MarkedBlock::Handle::sweep(FreeList* freeList) |
404 | { |
405 | SweepingScope sweepingScope(*heap()); |
406 | |
407 | SweepMode sweepMode = freeList ? SweepToFreeList : SweepOnly; |
408 | |
409 | m_directory->setIsUnswept(NoLockingNecessary, this, false); |
410 | |
411 | m_weakSet.sweep(); |
412 | |
413 | bool needsDestruction = m_attributes.destruction == NeedsDestruction |
414 | && m_directory->isDestructible(NoLockingNecessary, this); |
415 | |
416 | if (sweepMode == SweepOnly && !needsDestruction) |
417 | return; |
418 | |
419 | if (m_isFreeListed) { |
420 | dataLog("FATAL: " , RawPointer(this), "->sweep: block is free-listed.\n" ); |
421 | RELEASE_ASSERT_NOT_REACHED(); |
422 | } |
423 | |
424 | if (isAllocated()) { |
425 | dataLog("FATAL: " , RawPointer(this), "->sweep: block is allocated.\n" ); |
426 | RELEASE_ASSERT_NOT_REACHED(); |
427 | } |
428 | |
429 | if (space()->isMarking()) |
430 | blockFooter().m_lock.lock(); |
431 | |
432 | subspace()->didBeginSweepingToFreeList(this); |
433 | |
434 | if (needsDestruction) { |
435 | subspace()->finishSweep(*this, freeList); |
436 | return; |
437 | } |
438 | |
439 | // Handle the no-destructor specializations here, since we have the most of those. This |
440 | // ensures that they don't get re-specialized for every destructor space. |
441 | |
442 | EmptyMode emptyMode = this->emptyMode(); |
443 | ScribbleMode scribbleMode = this->scribbleMode(); |
444 | NewlyAllocatedMode newlyAllocatedMode = this->newlyAllocatedMode(); |
445 | MarksMode marksMode = this->marksMode(); |
446 | |
447 | auto trySpecialized = [&] () -> bool { |
448 | if (sweepMode != SweepToFreeList) |
449 | return false; |
450 | if (scribbleMode != DontScribble) |
451 | return false; |
452 | if (newlyAllocatedMode != DoesNotHaveNewlyAllocated) |
453 | return false; |
454 | |
455 | switch (emptyMode) { |
456 | case IsEmpty: |
457 | switch (marksMode) { |
458 | case MarksNotStale: |
459 | specializedSweep<true, IsEmpty, SweepToFreeList, BlockHasNoDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksNotStale>(freeList, IsEmpty, SweepToFreeList, BlockHasNoDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksNotStale, [] (VM&, JSCell*) { }); |
460 | return true; |
461 | case MarksStale: |
462 | specializedSweep<true, IsEmpty, SweepToFreeList, BlockHasNoDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksStale>(freeList, IsEmpty, SweepToFreeList, BlockHasNoDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksStale, [] (VM&, JSCell*) { }); |
463 | return true; |
464 | } |
465 | break; |
466 | case NotEmpty: |
467 | switch (marksMode) { |
468 | case MarksNotStale: |
469 | specializedSweep<true, NotEmpty, SweepToFreeList, BlockHasNoDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksNotStale>(freeList, IsEmpty, SweepToFreeList, BlockHasNoDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksNotStale, [] (VM&, JSCell*) { }); |
470 | return true; |
471 | case MarksStale: |
472 | specializedSweep<true, NotEmpty, SweepToFreeList, BlockHasNoDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksStale>(freeList, IsEmpty, SweepToFreeList, BlockHasNoDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksStale, [] (VM&, JSCell*) { }); |
473 | return true; |
474 | } |
475 | break; |
476 | } |
477 | |
478 | return false; |
479 | }; |
480 | |
481 | if (trySpecialized()) |
482 | return; |
483 | |
484 | // The template arguments don't matter because the first one is false. |
485 | specializedSweep<false, IsEmpty, SweepOnly, BlockHasNoDestructors, DontScribble, HasNewlyAllocated, MarksStale>(freeList, emptyMode, sweepMode, BlockHasNoDestructors, scribbleMode, newlyAllocatedMode, marksMode, [] (VM&, JSCell*) { }); |
486 | } |
487 | |
488 | bool MarkedBlock::Handle::isFreeListedCell(const void* target) const |
489 | { |
490 | ASSERT(isFreeListed()); |
491 | return m_directory->isFreeListedCell(target); |
492 | } |
493 | |
494 | } // namespace JSC |
495 | |
496 | namespace WTF { |
497 | |
498 | void printInternal(PrintStream& out, JSC::MarkedBlock::Handle::SweepMode mode) |
499 | { |
500 | switch (mode) { |
501 | case JSC::MarkedBlock::Handle::SweepToFreeList: |
502 | out.print("SweepToFreeList" ); |
503 | return; |
504 | case JSC::MarkedBlock::Handle::SweepOnly: |
505 | out.print("SweepOnly" ); |
506 | return; |
507 | } |
508 | RELEASE_ASSERT_NOT_REACHED(); |
509 | } |
510 | |
511 | } // namespace WTF |
512 | |
513 | |