1/*
2 * Copyright (C) 2017 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#pragma once
27
28#include "AtomIndices.h"
29#include "IsoCellSet.h"
30#include "MarkedBlockInlines.h"
31
32namespace JSC {
33
34inline bool IsoCellSet::add(HeapCell* cell)
35{
36 if (cell->isPreciseAllocation())
37 return !m_lowerTierBits.concurrentTestAndSet(cell->preciseAllocation().lowerTierIndex());
38 AtomIndices atomIndices(cell);
39 auto& bitsPtrRef = m_bits[atomIndices.blockIndex];
40 auto* bits = bitsPtrRef.get();
41 if (UNLIKELY(!bits))
42 bits = addSlow(atomIndices.blockIndex);
43 return !bits->concurrentTestAndSet(atomIndices.atomNumber);
44}
45
46inline bool IsoCellSet::remove(HeapCell* cell)
47{
48 if (cell->isPreciseAllocation())
49 return !m_lowerTierBits.concurrentTestAndClear(cell->preciseAllocation().lowerTierIndex());
50 AtomIndices atomIndices(cell);
51 auto& bitsPtrRef = m_bits[atomIndices.blockIndex];
52 auto* bits = bitsPtrRef.get();
53 if (!bits)
54 return false;
55 return bits->concurrentTestAndClear(atomIndices.atomNumber);
56}
57
58inline bool IsoCellSet::contains(HeapCell* cell) const
59{
60 if (cell->isPreciseAllocation())
61 return !m_lowerTierBits.get(cell->preciseAllocation().lowerTierIndex());
62 AtomIndices atomIndices(cell);
63 auto* bits = m_bits[atomIndices.blockIndex].get();
64 if (bits)
65 return bits->get(atomIndices.atomNumber);
66 return false;
67}
68
69template<typename Func>
70void IsoCellSet::forEachMarkedCell(const Func& func)
71{
72 BlockDirectory& directory = m_subspace.m_directory;
73 (directory.m_bits.markingNotEmpty() & m_blocksWithBits).forEachSetBit(
74 [&] (size_t blockIndex) {
75 MarkedBlock::Handle* block = directory.m_blocks[blockIndex];
76
77 auto* bits = m_bits[blockIndex].get();
78 block->forEachMarkedCell(
79 [&] (size_t atomNumber, HeapCell* cell, HeapCell::Kind kind) -> IterationStatus {
80 if (bits->get(atomNumber))
81 func(cell, kind);
82 return IterationStatus::Continue;
83 });
84 });
85
86 CellAttributes attributes = m_subspace.attributes();
87 m_subspace.forEachPreciseAllocation(
88 [&] (PreciseAllocation* allocation) {
89 if (m_lowerTierBits.get(allocation->lowerTierIndex()) && allocation->isMarked())
90 func(allocation->cell(), attributes.cellKind);
91 });
92}
93
94template<typename Func>
95Ref<SharedTask<void(SlotVisitor&)>> IsoCellSet::forEachMarkedCellInParallel(const Func& func)
96{
97 class Task : public SharedTask<void(SlotVisitor&)> {
98 public:
99 Task(IsoCellSet& set, const Func& func)
100 : m_set(set)
101 , m_blockSource(set.parallelNotEmptyMarkedBlockSource())
102 , m_func(func)
103 {
104 }
105
106 void run(SlotVisitor& visitor) override
107 {
108 while (MarkedBlock::Handle* handle = m_blockSource->run()) {
109 size_t blockIndex = handle->index();
110 auto* bits = m_set.m_bits[blockIndex].get();
111 handle->forEachMarkedCell(
112 [&] (size_t atomNumber, HeapCell* cell, HeapCell::Kind kind) -> IterationStatus {
113 if (bits->get(atomNumber))
114 m_func(visitor, cell, kind);
115 return IterationStatus::Continue;
116 });
117 }
118
119 {
120 auto locker = holdLock(m_lock);
121 if (!m_needToVisitPreciseAllocations)
122 return;
123 m_needToVisitPreciseAllocations = false;
124 }
125
126 CellAttributes attributes = m_set.m_subspace.attributes();
127 m_set.m_subspace.forEachPreciseAllocation(
128 [&] (PreciseAllocation* allocation) {
129 if (m_set.m_lowerTierBits.get(allocation->lowerTierIndex()) && allocation->isMarked())
130 m_func(visitor, allocation->cell(), attributes.cellKind);
131 });
132 }
133
134 private:
135 IsoCellSet& m_set;
136 Ref<SharedTask<MarkedBlock::Handle*()>> m_blockSource;
137 Func m_func;
138 Lock m_lock;
139 bool m_needToVisitPreciseAllocations { true };
140 };
141
142 return adoptRef(*new Task(*this, func));
143}
144
145template<typename Func>
146void IsoCellSet::forEachLiveCell(const Func& func)
147{
148 BlockDirectory& directory = m_subspace.m_directory;
149 m_blocksWithBits.forEachSetBit(
150 [&] (size_t blockIndex) {
151 MarkedBlock::Handle* block = directory.m_blocks[blockIndex];
152
153 auto* bits = m_bits[blockIndex].get();
154 block->forEachCell(
155 [&] (size_t atomNumber, HeapCell* cell, HeapCell::Kind kind) -> IterationStatus {
156 if (bits->get(atomNumber) && block->isLive(cell))
157 func(cell, kind);
158 return IterationStatus::Continue;
159 });
160 });
161
162 CellAttributes attributes = m_subspace.attributes();
163 m_subspace.forEachPreciseAllocation(
164 [&] (PreciseAllocation* allocation) {
165 if (m_lowerTierBits.get(allocation->lowerTierIndex()) && allocation->isLive())
166 func(allocation->cell(), attributes.cellKind);
167 });
168}
169
170inline void IsoCellSet::clearLowerTierCell(unsigned index)
171{
172 m_lowerTierBits.concurrentTestAndClear(index);
173}
174
175} // namespace JSC
176
177