1/*
2 * Copyright (C) 2016-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#pragma once
27
28#include "MarkedBlock.h"
29#include "WeakSet.h"
30
31namespace JSC {
32
33class IsoSubspace;
34class SlotVisitor;
35
36// WebKit has a good malloc that already knows what to do for large allocations. The GC shouldn't
37// have to think about such things. That's where PreciseAllocation comes in. We will allocate large
38// objects directly using malloc, and put the PreciseAllocation header just before them. We can detect
39// when a HeapCell* is a PreciseAllocation because it will have the MarkedBlock::atomSize / 2 bit set.
40
41class PreciseAllocation : public PackedRawSentinelNode<PreciseAllocation> {
42public:
43 friend class LLIntOffsetsExtractor;
44 friend class IsoSubspace;
45
46 static PreciseAllocation* tryCreate(Heap&, size_t, Subspace*, unsigned indexInSpace);
47
48 static PreciseAllocation* createForLowerTier(Heap&, size_t, Subspace*, uint8_t lowerTierIndex);
49 PreciseAllocation* reuseForLowerTier();
50
51 PreciseAllocation* tryReallocate(size_t, Subspace*);
52
53 ~PreciseAllocation();
54
55 static PreciseAllocation* fromCell(const void* cell)
56 {
57 return bitwise_cast<PreciseAllocation*>(bitwise_cast<char*>(cell) - headerSize());
58 }
59
60 HeapCell* cell() const
61 {
62 return bitwise_cast<HeapCell*>(bitwise_cast<char*>(this) + headerSize());
63 }
64
65 static bool isPreciseAllocation(HeapCell* cell)
66 {
67 return bitwise_cast<uintptr_t>(cell) & halfAlignment;
68 }
69
70 Subspace* subspace() const { return m_subspace; }
71
72 void lastChanceToFinalize();
73
74 Heap* heap() const { return m_weakSet.heap(); }
75 VM& vm() const { return m_weakSet.vm(); }
76 WeakSet& weakSet() { return m_weakSet; }
77
78 unsigned indexInSpace() { return m_indexInSpace; }
79 void setIndexInSpace(unsigned indexInSpace) { m_indexInSpace = indexInSpace; }
80
81 void shrink();
82
83 void visitWeakSet(SlotVisitor&);
84 void reapWeakSet();
85
86 void clearNewlyAllocated() { m_isNewlyAllocated = false; }
87 void flip();
88
89 bool isNewlyAllocated() const { return m_isNewlyAllocated; }
90 ALWAYS_INLINE bool isMarked() { return m_isMarked.load(std::memory_order_relaxed); }
91 ALWAYS_INLINE bool isMarked(HeapCell*) { return isMarked(); }
92 ALWAYS_INLINE bool isMarked(HeapCell*, Dependency) { return isMarked(); }
93 ALWAYS_INLINE bool isMarked(HeapVersion, HeapCell*) { return isMarked(); }
94 bool isLive() { return isMarked() || isNewlyAllocated(); }
95
96 bool hasValidCell() const { return m_hasValidCell; }
97
98 bool isEmpty();
99
100 size_t cellSize() const { return m_cellSize; }
101
102 uint8_t lowerTierIndex() const { return m_lowerTierIndex; }
103
104 bool aboveLowerBound(const void* rawPtr)
105 {
106 char* ptr = bitwise_cast<char*>(rawPtr);
107 char* begin = bitwise_cast<char*>(cell());
108 return ptr >= begin;
109 }
110
111 bool belowUpperBound(const void* rawPtr)
112 {
113 char* ptr = bitwise_cast<char*>(rawPtr);
114 char* begin = bitwise_cast<char*>(cell());
115 char* end = begin + cellSize();
116 // We cannot #include IndexingHeader.h because reasons. The fact that IndexingHeader is 8
117 // bytes is wired deep into our engine, so this isn't so bad.
118 size_t sizeOfIndexingHeader = 8;
119 return ptr <= end + sizeOfIndexingHeader;
120 }
121
122 bool contains(const void* rawPtr)
123 {
124 return aboveLowerBound(rawPtr) && belowUpperBound(rawPtr);
125 }
126
127 const CellAttributes& attributes() const { return m_attributes; }
128
129 Dependency aboutToMark(HeapVersion) { return Dependency(); }
130
131 ALWAYS_INLINE bool testAndSetMarked()
132 {
133 // This method is usually called when the object is already marked. This avoids us
134 // having to CAS in that case. It's profitable to reduce the total amount of CAS
135 // traffic.
136 if (isMarked())
137 return true;
138 return m_isMarked.compareExchangeStrong(false, true);
139 }
140 ALWAYS_INLINE bool testAndSetMarked(HeapCell*, Dependency) { return testAndSetMarked(); }
141 void clearMarked() { m_isMarked.store(false); }
142
143 void noteMarked() { }
144
145#if ASSERT_DISABLED
146 void assertValidCell(VM&, HeapCell*) const { }
147#else
148 void assertValidCell(VM&, HeapCell*) const;
149#endif
150
151 void sweep();
152
153 void destroy();
154
155 void dump(PrintStream&) const;
156
157 bool isLowerTier() const { return m_lowerTierIndex != UINT8_MAX; }
158
159 static constexpr unsigned alignment = MarkedBlock::atomSize;
160 static constexpr unsigned halfAlignment = alignment / 2;
161 static constexpr unsigned headerSize() { return ((sizeof(PreciseAllocation) + halfAlignment - 1) & ~(halfAlignment - 1)) | halfAlignment; }
162
163private:
164 PreciseAllocation(Heap&, size_t, Subspace*, unsigned indexInSpace, bool adjustedAlignment);
165
166 void* basePointer() const;
167
168 unsigned m_indexInSpace { 0 };
169 size_t m_cellSize;
170 bool m_isNewlyAllocated : 1;
171 bool m_hasValidCell : 1;
172 bool m_adjustedAlignment : 1;
173 Atomic<bool> m_isMarked;
174 CellAttributes m_attributes;
175 uint8_t m_lowerTierIndex { UINT8_MAX };
176 Subspace* m_subspace;
177 WeakSet m_weakSet;
178};
179
180inline void* PreciseAllocation::basePointer() const
181{
182 if (m_adjustedAlignment)
183 return bitwise_cast<char*>(this) - halfAlignment;
184 return bitwise_cast<void*>(this);
185}
186
187} // namespace JSC
188
189