1/*
2 * Copyright (C) 2017 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#pragma once
27
28#include "AtomIndices.h"
29#include "IsoCellSet.h"
30#include "MarkedBlockInlines.h"
31
32namespace JSC {
33
34inline bool IsoCellSet::add(HeapCell* cell)
35{
36 AtomIndices atomIndices(cell);
37 auto& bitsPtrRef = m_bits[atomIndices.blockIndex];
38 auto* bits = bitsPtrRef.get();
39 if (UNLIKELY(!bits))
40 bits = addSlow(atomIndices.blockIndex);
41 return !bits->concurrentTestAndSet(atomIndices.atomNumber);
42}
43
44inline bool IsoCellSet::remove(HeapCell* cell)
45{
46 AtomIndices atomIndices(cell);
47 auto& bitsPtrRef = m_bits[atomIndices.blockIndex];
48 auto* bits = bitsPtrRef.get();
49 if (!bits)
50 return false;
51 return bits->concurrentTestAndClear(atomIndices.atomNumber);
52}
53
54inline bool IsoCellSet::contains(HeapCell* cell) const
55{
56 AtomIndices atomIndices(cell);
57 auto* bits = m_bits[atomIndices.blockIndex].get();
58 if (bits)
59 return bits->get(atomIndices.atomNumber);
60 return false;
61}
62
63template<typename Func>
64void IsoCellSet::forEachMarkedCell(const Func& func)
65{
66 BlockDirectory& directory = m_subspace.m_directory;
67 (directory.m_markingNotEmpty & m_blocksWithBits).forEachSetBit(
68 [&] (size_t blockIndex) {
69 MarkedBlock::Handle* block = directory.m_blocks[blockIndex];
70
71 auto* bits = m_bits[blockIndex].get();
72 block->forEachMarkedCell(
73 [&] (size_t atomNumber, HeapCell* cell, HeapCell::Kind kind) -> IterationStatus {
74 if (bits->get(atomNumber))
75 func(cell, kind);
76 return IterationStatus::Continue;
77 });
78 });
79}
80
81template<typename Func>
82Ref<SharedTask<void(SlotVisitor&)>> IsoCellSet::forEachMarkedCellInParallel(const Func& func)
83{
84 class Task : public SharedTask<void(SlotVisitor&)> {
85 public:
86 Task(IsoCellSet& set, const Func& func)
87 : m_set(set)
88 , m_blockSource(set.parallelNotEmptyMarkedBlockSource())
89 , m_func(func)
90 {
91 }
92
93 void run(SlotVisitor& visitor) override
94 {
95 while (MarkedBlock::Handle* handle = m_blockSource->run()) {
96 size_t blockIndex = handle->index();
97 auto* bits = m_set.m_bits[blockIndex].get();
98 handle->forEachMarkedCell(
99 [&] (size_t atomNumber, HeapCell* cell, HeapCell::Kind kind) -> IterationStatus {
100 if (bits->get(atomNumber))
101 m_func(visitor, cell, kind);
102 return IterationStatus::Continue;
103 });
104 }
105 }
106
107 private:
108 IsoCellSet& m_set;
109 Ref<SharedTask<MarkedBlock::Handle*()>> m_blockSource;
110 Func m_func;
111 Lock m_lock;
112 };
113
114 return adoptRef(*new Task(*this, func));
115}
116
117template<typename Func>
118void IsoCellSet::forEachLiveCell(const Func& func)
119{
120 BlockDirectory& directory = m_subspace.m_directory;
121 m_blocksWithBits.forEachSetBit(
122 [&] (size_t blockIndex) {
123 MarkedBlock::Handle* block = directory.m_blocks[blockIndex];
124
125 // FIXME: We could optimize this by checking our bits before querying isLive.
126 // OOPS! (need bug URL)
127 auto* bits = m_bits[blockIndex].get();
128 block->forEachLiveCell(
129 [&] (size_t atomNumber, HeapCell* cell, HeapCell::Kind kind) -> IterationStatus {
130 if (bits->get(atomNumber))
131 func(cell, kind);
132 return IterationStatus::Continue;
133 });
134 });
135}
136
137} // namespace JSC
138
139