1 | /* |
2 | * Copyright (C) 1999-2000 Harri Porten ([email protected]) |
3 | * Copyright (C) 2001 Peter Kelly ([email protected]) |
4 | * Copyright (C) 2003-2018 Apple Inc. All rights reserved. |
5 | * |
6 | * This library is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU Lesser General Public |
8 | * License as published by the Free Software Foundation; either |
9 | * version 2 of the License, or (at your option) any later version. |
10 | * |
11 | * This library is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 | * Lesser General Public License for more details. |
15 | * |
16 | * You should have received a copy of the GNU Lesser General Public |
17 | * License along with this library; if not, write to the Free Software |
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
19 | * |
20 | */ |
21 | |
22 | #pragma once |
23 | |
24 | #include "CellAttributes.h" |
25 | #include "DestructionMode.h" |
26 | #include "HeapCell.h" |
27 | #include "IterationStatus.h" |
28 | #include "WeakSet.h" |
29 | #include <wtf/Atomics.h> |
30 | #include <wtf/Bitmap.h> |
31 | #include <wtf/HashFunctions.h> |
32 | #include <wtf/CountingLock.h> |
33 | #include <wtf/StdLibExtras.h> |
34 | |
35 | namespace JSC { |
36 | |
37 | class AlignedMemoryAllocator; |
38 | class FreeList; |
39 | class Heap; |
40 | class JSCell; |
41 | class BlockDirectory; |
42 | class MarkedSpace; |
43 | class SlotVisitor; |
44 | class Subspace; |
45 | |
46 | typedef uint32_t HeapVersion; |
47 | |
48 | // A marked block is a page-aligned container for heap-allocated objects. |
49 | // Objects are allocated within cells of the marked block. For a given |
50 | // marked block, all cells have the same size. Objects smaller than the |
51 | // cell size may be allocated in the marked block, in which case the |
52 | // allocation suffers from internal fragmentation: wasted space whose |
53 | // size is equal to the difference between the cell size and the object |
54 | // size. |
55 | |
56 | class MarkedBlock { |
57 | WTF_MAKE_NONCOPYABLE(MarkedBlock); |
58 | friend class LLIntOffsetsExtractor; |
59 | friend struct VerifyMarked; |
60 | |
61 | public: |
62 | class Footer; |
63 | class Handle; |
64 | private: |
65 | friend class Footer; |
66 | friend class Handle; |
67 | public: |
68 | static constexpr size_t atomSize = 16; // bytes |
69 | |
70 | // Block size must be at least as large as the system page size. |
71 | #if CPU(PPC64) || CPU(PPC64LE) || CPU(PPC) || CPU(UNKNOWN) |
72 | static constexpr size_t blockSize = 64 * KB; |
73 | #else |
74 | static constexpr size_t blockSize = 16 * KB; |
75 | #endif |
76 | |
77 | static constexpr size_t blockMask = ~(blockSize - 1); // blockSize must be a power of two. |
78 | |
79 | static constexpr size_t atomsPerBlock = blockSize / atomSize; |
80 | |
81 | static_assert(!(MarkedBlock::atomSize & (MarkedBlock::atomSize - 1)), "MarkedBlock::atomSize must be a power of two." ); |
82 | static_assert(!(MarkedBlock::blockSize & (MarkedBlock::blockSize - 1)), "MarkedBlock::blockSize must be a power of two." ); |
83 | |
84 | struct VoidFunctor { |
85 | typedef void ReturnType; |
86 | void returnValue() { } |
87 | }; |
88 | |
89 | class CountFunctor { |
90 | public: |
91 | typedef size_t ReturnType; |
92 | |
93 | CountFunctor() : m_count(0) { } |
94 | void count(size_t count) const { m_count += count; } |
95 | ReturnType returnValue() const { return m_count; } |
96 | |
97 | private: |
98 | // FIXME: This is mutable because we're using a functor rather than C++ lambdas. |
99 | // https://bugs.webkit.org/show_bug.cgi?id=159644 |
100 | mutable ReturnType m_count; |
101 | }; |
102 | |
103 | class Handle { |
104 | WTF_MAKE_NONCOPYABLE(Handle); |
105 | WTF_MAKE_FAST_ALLOCATED; |
106 | friend class LLIntOffsetsExtractor; |
107 | friend class MarkedBlock; |
108 | friend struct VerifyMarked; |
109 | public: |
110 | |
111 | ~Handle(); |
112 | |
113 | MarkedBlock& block(); |
114 | MarkedBlock::Footer& blockFooter(); |
115 | |
116 | void* cellAlign(void*); |
117 | |
118 | bool isEmpty(); |
119 | |
120 | void lastChanceToFinalize(); |
121 | |
122 | BlockDirectory* directory() const; |
123 | Subspace* subspace() const; |
124 | AlignedMemoryAllocator* alignedMemoryAllocator() const; |
125 | Heap* heap() const; |
126 | inline MarkedSpace* space() const; |
127 | VM* vm() const; |
128 | WeakSet& weakSet(); |
129 | |
130 | enum SweepMode { SweepOnly, SweepToFreeList }; |
131 | |
132 | // Sweeping ensures that destructors get called and removes the block from the unswept |
133 | // set. Sweeping to free list also removes the block from the empty set, if it was in that |
134 | // set. Sweeping with SweepOnly may add this block to the empty set, if the block is found |
135 | // to be empty. The free-list being null implies SweepOnly. |
136 | // |
137 | // Note that you need to make sure that the empty bit reflects reality. If it's not set |
138 | // and the block is freshly created, then we'll make the mistake of running destructors in |
139 | // the block. If it's not set and the block has nothing marked, then we'll make the |
140 | // mistake of making a pop freelist rather than a bump freelist. |
141 | void sweep(FreeList*); |
142 | |
143 | // This is to be called by Subspace. |
144 | template<typename DestroyFunc> |
145 | void finishSweepKnowingHeapCellType(FreeList*, const DestroyFunc&); |
146 | |
147 | void unsweepWithNoNewlyAllocated(); |
148 | |
149 | void zap(const FreeList&); |
150 | |
151 | void shrink(); |
152 | |
153 | void visitWeakSet(SlotVisitor&); |
154 | void reapWeakSet(); |
155 | |
156 | // While allocating from a free list, MarkedBlock temporarily has bogus |
157 | // cell liveness data. To restore accurate cell liveness data, call one |
158 | // of these functions: |
159 | void didConsumeFreeList(); // Call this once you've allocated all the items in the free list. |
160 | void stopAllocating(const FreeList&); |
161 | void resumeAllocating(FreeList&); // Call this if you canonicalized a block for some non-collection related purpose. |
162 | |
163 | size_t cellSize(); |
164 | inline unsigned cellsPerBlock(); |
165 | |
166 | const CellAttributes& attributes() const; |
167 | DestructionMode destruction() const; |
168 | bool needsDestruction() const; |
169 | HeapCell::Kind cellKind() const; |
170 | |
171 | size_t markCount(); |
172 | size_t size(); |
173 | |
174 | bool isAllocated(); |
175 | |
176 | bool isLive(HeapVersion markingVersion, HeapVersion newlyAllocatedVersion, bool isMarking, const HeapCell*); |
177 | inline bool isLiveCell(HeapVersion markingVersion, HeapVersion newlyAllocatedVersion, bool isMarking, const void*); |
178 | |
179 | bool isLive(const HeapCell*); |
180 | bool isLiveCell(const void*); |
181 | |
182 | bool isFreeListedCell(const void* target) const; |
183 | |
184 | template <typename Functor> IterationStatus forEachCell(const Functor&); |
185 | template <typename Functor> inline IterationStatus forEachLiveCell(const Functor&); |
186 | template <typename Functor> inline IterationStatus forEachDeadCell(const Functor&); |
187 | template <typename Functor> inline IterationStatus forEachMarkedCell(const Functor&); |
188 | |
189 | JS_EXPORT_PRIVATE bool areMarksStale(); |
190 | bool areMarksStaleForSweep(); |
191 | |
192 | void assertMarksNotStale(); |
193 | |
194 | bool isFreeListed() const { return m_isFreeListed; } |
195 | |
196 | size_t index() const { return m_index; } |
197 | |
198 | void removeFromDirectory(); |
199 | |
200 | void didAddToDirectory(BlockDirectory*, size_t index); |
201 | void didRemoveFromDirectory(); |
202 | |
203 | void dumpState(PrintStream&); |
204 | |
205 | private: |
206 | Handle(Heap&, AlignedMemoryAllocator*, void*); |
207 | |
208 | enum SweepDestructionMode { BlockHasNoDestructors, BlockHasDestructors, BlockHasDestructorsAndCollectorIsRunning }; |
209 | enum ScribbleMode { DontScribble, Scribble }; |
210 | enum EmptyMode { IsEmpty, NotEmpty }; |
211 | enum NewlyAllocatedMode { HasNewlyAllocated, DoesNotHaveNewlyAllocated }; |
212 | enum MarksMode { MarksStale, MarksNotStale }; |
213 | |
214 | SweepDestructionMode sweepDestructionMode(); |
215 | EmptyMode emptyMode(); |
216 | ScribbleMode scribbleMode(); |
217 | NewlyAllocatedMode newlyAllocatedMode(); |
218 | MarksMode marksMode(); |
219 | |
220 | template<bool, EmptyMode, SweepMode, SweepDestructionMode, ScribbleMode, NewlyAllocatedMode, MarksMode, typename DestroyFunc> |
221 | void specializedSweep(FreeList*, EmptyMode, SweepMode, SweepDestructionMode, ScribbleMode, NewlyAllocatedMode, MarksMode, const DestroyFunc&); |
222 | |
223 | void setIsFreeListed(); |
224 | |
225 | MarkedBlock::Handle* m_prev { nullptr }; |
226 | MarkedBlock::Handle* m_next { nullptr }; |
227 | |
228 | size_t m_atomsPerCell { std::numeric_limits<size_t>::max() }; |
229 | size_t m_endAtom { std::numeric_limits<size_t>::max() }; // This is a fuzzy end. Always test for < m_endAtom. |
230 | |
231 | CellAttributes m_attributes; |
232 | bool m_isFreeListed { false }; |
233 | |
234 | AlignedMemoryAllocator* m_alignedMemoryAllocator { nullptr }; |
235 | BlockDirectory* m_directory { nullptr }; |
236 | size_t m_index { std::numeric_limits<size_t>::max() }; |
237 | WeakSet m_weakSet; |
238 | |
239 | MarkedBlock* m_block { nullptr }; |
240 | }; |
241 | |
242 | private: |
243 | static constexpr size_t atomAlignmentMask = atomSize - 1; |
244 | |
245 | typedef char Atom[atomSize]; |
246 | |
247 | public: |
248 | class { |
249 | public: |
250 | Footer(VM&, Handle&); |
251 | (); |
252 | |
253 | private: |
254 | friend class LLIntOffsetsExtractor; |
255 | friend class MarkedBlock; |
256 | |
257 | Handle& m_handle; |
258 | VM* ; |
259 | Subspace* ; |
260 | |
261 | CountingLock ; |
262 | |
263 | // The actual mark count can be computed by doing: m_biasedMarkCount - m_markCountBias. Note |
264 | // that this count is racy. It will accurately detect whether or not exactly zero things were |
265 | // marked, but if N things got marked, then this may report anything in the range [1, N] (or |
266 | // before unbiased, it would be [1 + m_markCountBias, N + m_markCountBias].) |
267 | int16_t ; |
268 | |
269 | // We bias the mark count so that if m_biasedMarkCount >= 0 then the block should be retired. |
270 | // We go to all this trouble to make marking a bit faster: this way, marking knows when to |
271 | // retire a block using a js/jns on m_biasedMarkCount. |
272 | // |
273 | // For example, if a block has room for 100 objects and retirement happens whenever 90% are |
274 | // live, then m_markCountBias will be -90. This way, when marking begins, this will cause us to |
275 | // set m_biasedMarkCount to -90 as well, since: |
276 | // |
277 | // m_biasedMarkCount = actualMarkCount + m_markCountBias. |
278 | // |
279 | // Marking an object will increment m_biasedMarkCount. Once 90 objects get marked, we will have |
280 | // m_biasedMarkCount = 0, which will trigger retirement. In other words, we want to set |
281 | // m_markCountBias like so: |
282 | // |
283 | // m_markCountBias = -(minMarkedBlockUtilization * cellsPerBlock) |
284 | // |
285 | // All of this also means that you can detect if any objects are marked by doing: |
286 | // |
287 | // m_biasedMarkCount != m_markCountBias |
288 | int16_t ; |
289 | |
290 | HeapVersion ; |
291 | HeapVersion ; |
292 | |
293 | Bitmap<atomsPerBlock> ; |
294 | Bitmap<atomsPerBlock> ; |
295 | }; |
296 | |
297 | private: |
298 | Footer& footer(); |
299 | const Footer& footer() const; |
300 | |
301 | public: |
302 | static constexpr size_t endAtom = (blockSize - sizeof(Footer)) / atomSize; |
303 | static constexpr size_t payloadSize = endAtom * atomSize; |
304 | static constexpr size_t = blockSize - payloadSize; |
305 | |
306 | static_assert(payloadSize == ((blockSize - sizeof(MarkedBlock::Footer)) & ~(atomSize - 1)), "Payload size computed the alternate way should give the same result" ); |
307 | |
308 | static MarkedBlock::Handle* tryCreate(Heap&, AlignedMemoryAllocator*); |
309 | |
310 | Handle& handle(); |
311 | const Handle& handle() const; |
312 | |
313 | VM* vm() const; |
314 | inline Heap* heap() const; |
315 | inline MarkedSpace* space() const; |
316 | |
317 | static bool isAtomAligned(const void*); |
318 | static MarkedBlock* blockFor(const void*); |
319 | size_t atomNumber(const void*); |
320 | |
321 | size_t markCount(); |
322 | |
323 | bool isMarked(const void*); |
324 | bool isMarked(HeapVersion markingVersion, const void*); |
325 | bool isMarked(const void*, Dependency); |
326 | bool testAndSetMarked(const void*, Dependency); |
327 | |
328 | bool isAtom(const void*); |
329 | void clearMarked(const void*); |
330 | |
331 | bool isNewlyAllocated(const void*); |
332 | void setNewlyAllocated(const void*); |
333 | void clearNewlyAllocated(const void*); |
334 | const Bitmap<atomsPerBlock>& newlyAllocated() const; |
335 | |
336 | HeapVersion newlyAllocatedVersion() const { return footer().m_newlyAllocatedVersion; } |
337 | |
338 | inline bool isNewlyAllocatedStale() const; |
339 | |
340 | inline bool hasAnyNewlyAllocated(); |
341 | void resetAllocated(); |
342 | |
343 | size_t cellSize(); |
344 | const CellAttributes& attributes() const; |
345 | |
346 | bool hasAnyMarked() const; |
347 | void noteMarked(); |
348 | #if ASSERT_DISABLED |
349 | void assertValidCell(VM&, HeapCell*) const { } |
350 | #else |
351 | void assertValidCell(VM&, HeapCell*) const; |
352 | #endif |
353 | |
354 | WeakSet& weakSet(); |
355 | |
356 | JS_EXPORT_PRIVATE bool (); |
357 | bool areMarksStale(HeapVersion markingVersion); |
358 | |
359 | Dependency aboutToMark(HeapVersion markingVersion); |
360 | |
361 | #if ASSERT_DISABLED |
362 | void assertMarksNotStale() { } |
363 | #else |
364 | JS_EXPORT_PRIVATE void assertMarksNotStale(); |
365 | #endif |
366 | |
367 | void resetMarks(); |
368 | |
369 | bool isMarkedRaw(const void* p); |
370 | HeapVersion markingVersion() const { return footer().m_markingVersion; } |
371 | |
372 | const Bitmap<atomsPerBlock>& marks() const; |
373 | |
374 | CountingLock& lock() { return footer().m_lock; } |
375 | |
376 | Subspace* subspace() const { return footer().m_subspace; } |
377 | |
378 | void populatePage() const |
379 | { |
380 | *bitwise_cast<volatile uint8_t*>(&footer()); |
381 | } |
382 | |
383 | static constexpr size_t = endAtom * atomSize; |
384 | |
385 | private: |
386 | MarkedBlock(VM&, Handle&); |
387 | ~MarkedBlock(); |
388 | Atom* atoms(); |
389 | |
390 | JS_EXPORT_PRIVATE void aboutToMarkSlow(HeapVersion markingVersion); |
391 | void clearHasAnyMarked(); |
392 | |
393 | void noteMarkedSlow(); |
394 | |
395 | inline bool marksConveyLivenessDuringMarking(HeapVersion markingVersion); |
396 | inline bool marksConveyLivenessDuringMarking(HeapVersion myMarkingVersion, HeapVersion markingVersion); |
397 | }; |
398 | |
399 | inline MarkedBlock::Footer& MarkedBlock::() |
400 | { |
401 | return *bitwise_cast<MarkedBlock::Footer*>(atoms() + endAtom); |
402 | } |
403 | |
404 | inline const MarkedBlock::Footer& MarkedBlock::() const |
405 | { |
406 | return const_cast<MarkedBlock*>(this)->footer(); |
407 | } |
408 | |
409 | inline MarkedBlock::Handle& MarkedBlock::handle() |
410 | { |
411 | return footer().m_handle; |
412 | } |
413 | |
414 | inline const MarkedBlock::Handle& MarkedBlock::handle() const |
415 | { |
416 | return const_cast<MarkedBlock*>(this)->handle(); |
417 | } |
418 | |
419 | inline MarkedBlock& MarkedBlock::Handle::block() |
420 | { |
421 | return *m_block; |
422 | } |
423 | |
424 | inline MarkedBlock::Footer& MarkedBlock::Handle::blockFooter() |
425 | { |
426 | return block().footer(); |
427 | } |
428 | |
429 | inline MarkedBlock::Atom* MarkedBlock::atoms() |
430 | { |
431 | return reinterpret_cast<Atom*>(this); |
432 | } |
433 | |
434 | inline bool MarkedBlock::isAtomAligned(const void* p) |
435 | { |
436 | return !(reinterpret_cast<uintptr_t>(p) & atomAlignmentMask); |
437 | } |
438 | |
439 | inline void* MarkedBlock::Handle::cellAlign(void* p) |
440 | { |
441 | uintptr_t base = reinterpret_cast<uintptr_t>(block().atoms()); |
442 | uintptr_t bits = reinterpret_cast<uintptr_t>(p); |
443 | bits -= base; |
444 | bits -= bits % cellSize(); |
445 | bits += base; |
446 | return reinterpret_cast<void*>(bits); |
447 | } |
448 | |
449 | inline MarkedBlock* MarkedBlock::blockFor(const void* p) |
450 | { |
451 | return reinterpret_cast<MarkedBlock*>(reinterpret_cast<uintptr_t>(p) & blockMask); |
452 | } |
453 | |
454 | inline BlockDirectory* MarkedBlock::Handle::directory() const |
455 | { |
456 | return m_directory; |
457 | } |
458 | |
459 | inline AlignedMemoryAllocator* MarkedBlock::Handle::alignedMemoryAllocator() const |
460 | { |
461 | return m_alignedMemoryAllocator; |
462 | } |
463 | |
464 | inline Heap* MarkedBlock::Handle::heap() const |
465 | { |
466 | return m_weakSet.heap(); |
467 | } |
468 | |
469 | inline VM* MarkedBlock::Handle::vm() const |
470 | { |
471 | return m_weakSet.vm(); |
472 | } |
473 | |
474 | inline VM* MarkedBlock::vm() const |
475 | { |
476 | return footer().m_vm; |
477 | } |
478 | |
479 | inline WeakSet& MarkedBlock::Handle::weakSet() |
480 | { |
481 | return m_weakSet; |
482 | } |
483 | |
484 | inline WeakSet& MarkedBlock::weakSet() |
485 | { |
486 | return handle().weakSet(); |
487 | } |
488 | |
489 | inline void MarkedBlock::Handle::shrink() |
490 | { |
491 | m_weakSet.shrink(); |
492 | } |
493 | |
494 | inline void MarkedBlock::Handle::visitWeakSet(SlotVisitor& visitor) |
495 | { |
496 | return m_weakSet.visit(visitor); |
497 | } |
498 | |
499 | inline void MarkedBlock::Handle::reapWeakSet() |
500 | { |
501 | m_weakSet.reap(); |
502 | } |
503 | |
504 | inline size_t MarkedBlock::Handle::cellSize() |
505 | { |
506 | return m_atomsPerCell * atomSize; |
507 | } |
508 | |
509 | inline size_t MarkedBlock::cellSize() |
510 | { |
511 | return handle().cellSize(); |
512 | } |
513 | |
514 | inline const CellAttributes& MarkedBlock::Handle::attributes() const |
515 | { |
516 | return m_attributes; |
517 | } |
518 | |
519 | inline const CellAttributes& MarkedBlock::attributes() const |
520 | { |
521 | return handle().attributes(); |
522 | } |
523 | |
524 | inline bool MarkedBlock::Handle::needsDestruction() const |
525 | { |
526 | return m_attributes.destruction == NeedsDestruction; |
527 | } |
528 | |
529 | inline DestructionMode MarkedBlock::Handle::destruction() const |
530 | { |
531 | return m_attributes.destruction; |
532 | } |
533 | |
534 | inline HeapCell::Kind MarkedBlock::Handle::cellKind() const |
535 | { |
536 | return m_attributes.cellKind; |
537 | } |
538 | |
539 | inline size_t MarkedBlock::Handle::markCount() |
540 | { |
541 | return m_block->markCount(); |
542 | } |
543 | |
544 | inline size_t MarkedBlock::Handle::size() |
545 | { |
546 | return markCount() * cellSize(); |
547 | } |
548 | |
549 | inline size_t MarkedBlock::atomNumber(const void* p) |
550 | { |
551 | return (reinterpret_cast<uintptr_t>(p) - reinterpret_cast<uintptr_t>(this)) / atomSize; |
552 | } |
553 | |
554 | inline bool MarkedBlock::(HeapVersion markingVersion) |
555 | { |
556 | return markingVersion != footer().m_markingVersion; |
557 | } |
558 | |
559 | inline Dependency MarkedBlock::aboutToMark(HeapVersion markingVersion) |
560 | { |
561 | HeapVersion version = footer().m_markingVersion; |
562 | if (UNLIKELY(version != markingVersion)) |
563 | aboutToMarkSlow(markingVersion); |
564 | return Dependency::fence(version); |
565 | } |
566 | |
567 | inline void MarkedBlock::Handle::assertMarksNotStale() |
568 | { |
569 | block().assertMarksNotStale(); |
570 | } |
571 | |
572 | inline bool MarkedBlock::isMarkedRaw(const void* p) |
573 | { |
574 | return footer().m_marks.get(atomNumber(p)); |
575 | } |
576 | |
577 | inline bool MarkedBlock::isMarked(HeapVersion markingVersion, const void* p) |
578 | { |
579 | HeapVersion version = footer().m_markingVersion; |
580 | if (UNLIKELY(version != markingVersion)) |
581 | return false; |
582 | return footer().m_marks.get(atomNumber(p), Dependency::fence(version)); |
583 | } |
584 | |
585 | inline bool MarkedBlock::isMarked(const void* p, Dependency dependency) |
586 | { |
587 | assertMarksNotStale(); |
588 | return footer().m_marks.get(atomNumber(p), dependency); |
589 | } |
590 | |
591 | inline bool MarkedBlock::testAndSetMarked(const void* p, Dependency dependency) |
592 | { |
593 | assertMarksNotStale(); |
594 | return footer().m_marks.concurrentTestAndSet(atomNumber(p), dependency); |
595 | } |
596 | |
597 | inline const Bitmap<MarkedBlock::atomsPerBlock>& MarkedBlock::marks() const |
598 | { |
599 | return footer().m_marks; |
600 | } |
601 | |
602 | inline bool MarkedBlock::isNewlyAllocated(const void* p) |
603 | { |
604 | return footer().m_newlyAllocated.get(atomNumber(p)); |
605 | } |
606 | |
607 | inline void MarkedBlock::setNewlyAllocated(const void* p) |
608 | { |
609 | footer().m_newlyAllocated.set(atomNumber(p)); |
610 | } |
611 | |
612 | inline void MarkedBlock::clearNewlyAllocated(const void* p) |
613 | { |
614 | footer().m_newlyAllocated.clear(atomNumber(p)); |
615 | } |
616 | |
617 | inline const Bitmap<MarkedBlock::atomsPerBlock>& MarkedBlock::newlyAllocated() const |
618 | { |
619 | return footer().m_newlyAllocated; |
620 | } |
621 | |
622 | inline bool MarkedBlock::isAtom(const void* p) |
623 | { |
624 | ASSERT(MarkedBlock::isAtomAligned(p)); |
625 | size_t atomNumber = this->atomNumber(p); |
626 | if (atomNumber % handle().m_atomsPerCell) // Filters pointers into cell middles. |
627 | return false; |
628 | if (atomNumber >= handle().m_endAtom) // Filters pointers into invalid cells out of the range. |
629 | return false; |
630 | return true; |
631 | } |
632 | |
633 | template <typename Functor> |
634 | inline IterationStatus MarkedBlock::Handle::forEachCell(const Functor& functor) |
635 | { |
636 | HeapCell::Kind kind = m_attributes.cellKind; |
637 | for (size_t i = 0; i < m_endAtom; i += m_atomsPerCell) { |
638 | HeapCell* cell = reinterpret_cast_ptr<HeapCell*>(&m_block->atoms()[i]); |
639 | if (functor(cell, kind) == IterationStatus::Done) |
640 | return IterationStatus::Done; |
641 | } |
642 | return IterationStatus::Continue; |
643 | } |
644 | |
645 | inline bool MarkedBlock::hasAnyMarked() const |
646 | { |
647 | return footer().m_biasedMarkCount != footer().m_markCountBias; |
648 | } |
649 | |
650 | inline void MarkedBlock::noteMarked() |
651 | { |
652 | // This is racy by design. We don't want to pay the price of an atomic increment! |
653 | int16_t biasedMarkCount = footer().m_biasedMarkCount; |
654 | ++biasedMarkCount; |
655 | footer().m_biasedMarkCount = biasedMarkCount; |
656 | if (UNLIKELY(!biasedMarkCount)) |
657 | noteMarkedSlow(); |
658 | } |
659 | |
660 | } // namespace JSC |
661 | |
662 | namespace WTF { |
663 | |
664 | struct MarkedBlockHash : PtrHash<JSC::MarkedBlock*> { |
665 | static unsigned hash(JSC::MarkedBlock* const& key) |
666 | { |
667 | // Aligned VM regions tend to be monotonically increasing integers, |
668 | // which is a great hash function, but we have to remove the low bits, |
669 | // since they're always zero, which is a terrible hash function! |
670 | return reinterpret_cast<uintptr_t>(key) / JSC::MarkedBlock::blockSize; |
671 | } |
672 | }; |
673 | |
674 | template<> struct DefaultHash<JSC::MarkedBlock*> { |
675 | typedef MarkedBlockHash Hash; |
676 | }; |
677 | |
678 | void printInternal(PrintStream& out, JSC::MarkedBlock::Handle::SweepMode); |
679 | |
680 | } // namespace WTF |
681 | |