1/*
2 * Copyright (C) 2018-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "LocalAllocator.h"
28
29#include "AllocatingScope.h"
30#include "FreeListInlines.h"
31#include "GCDeferralContext.h"
32#include "JSCInlines.h"
33#include "LocalAllocatorInlines.h"
34#include "Options.h"
35#include "SuperSampler.h"
36
37namespace JSC {
38
39LocalAllocator::LocalAllocator(BlockDirectory* directory)
40 : m_directory(directory)
41 , m_freeList(directory->m_cellSize)
42{
43 auto locker = holdLock(directory->m_localAllocatorsLock);
44 directory->m_localAllocators.append(this);
45}
46
47void LocalAllocator::reset()
48{
49 m_freeList.clear();
50 m_currentBlock = nullptr;
51 m_lastActiveBlock = nullptr;
52 m_allocationCursor = 0;
53}
54
55LocalAllocator::~LocalAllocator()
56{
57 if (isOnList()) {
58 auto locker = holdLock(m_directory->m_localAllocatorsLock);
59 remove();
60 }
61
62 bool ok = true;
63 if (!m_freeList.allocationWillFail()) {
64 dataLog("FATAL: ", RawPointer(this), "->~LocalAllocator has non-empty free-list.\n");
65 ok = false;
66 }
67 if (m_currentBlock) {
68 dataLog("FATAL: ", RawPointer(this), "->~LocalAllocator has non-null current block.\n");
69 ok = false;
70 }
71 if (m_lastActiveBlock) {
72 dataLog("FATAL: ", RawPointer(this), "->~LocalAllocator has non-null last active block.\n");
73 ok = false;
74 }
75 RELEASE_ASSERT(ok);
76}
77
78void LocalAllocator::stopAllocating()
79{
80 ASSERT(!m_lastActiveBlock);
81 if (!m_currentBlock) {
82 ASSERT(m_freeList.allocationWillFail());
83 return;
84 }
85
86 m_currentBlock->stopAllocating(m_freeList);
87 m_lastActiveBlock = m_currentBlock;
88 m_currentBlock = nullptr;
89 m_freeList.clear();
90}
91
92void LocalAllocator::resumeAllocating()
93{
94 if (!m_lastActiveBlock)
95 return;
96
97 m_lastActiveBlock->resumeAllocating(m_freeList);
98 m_currentBlock = m_lastActiveBlock;
99 m_lastActiveBlock = nullptr;
100}
101
102void LocalAllocator::prepareForAllocation()
103{
104 reset();
105}
106
107void LocalAllocator::stopAllocatingForGood()
108{
109 stopAllocating();
110 reset();
111}
112
113void* LocalAllocator::allocateSlowCase(Heap& heap, GCDeferralContext* deferralContext, AllocationFailureMode failureMode)
114{
115 SuperSamplerScope superSamplerScope(false);
116 ASSERT(heap.vm().currentThreadIsHoldingAPILock());
117 doTestCollectionsIfNeeded(heap, deferralContext);
118
119 ASSERT(!m_directory->markedSpace().isIterating());
120 heap.didAllocate(m_freeList.originalSize());
121
122 didConsumeFreeList();
123
124 AllocatingScope helpingHeap(heap);
125
126 heap.collectIfNecessaryOrDefer(deferralContext);
127
128 // Goofy corner case: the GC called a callback and now this directory has a currentBlock. This only
129 // happens when running WebKit tests, which inject a callback into the GC's finalization.
130 if (UNLIKELY(m_currentBlock))
131 return allocate(heap, deferralContext, failureMode);
132
133 void* result = tryAllocateWithoutCollecting();
134
135 if (LIKELY(result != nullptr))
136 return result;
137
138 Subspace* subspace = m_directory->m_subspace;
139 if (subspace->isIsoSubspace()) {
140 if (void* result = static_cast<IsoSubspace*>(subspace)->tryAllocateFromLowerTier())
141 return result;
142 }
143
144 MarkedBlock::Handle* block = m_directory->tryAllocateBlock(heap);
145 if (!block) {
146 if (failureMode == AllocationFailureMode::Assert)
147 RELEASE_ASSERT_NOT_REACHED();
148 else
149 return nullptr;
150 }
151 m_directory->addBlock(block);
152 result = allocateIn(block);
153 ASSERT(result);
154 return result;
155}
156
157void LocalAllocator::didConsumeFreeList()
158{
159 if (m_currentBlock)
160 m_currentBlock->didConsumeFreeList();
161
162 m_freeList.clear();
163 m_currentBlock = nullptr;
164}
165
166void* LocalAllocator::tryAllocateWithoutCollecting()
167{
168 // FIXME: If we wanted this to be used for real multi-threaded allocations then we would have to
169 // come up with some concurrency protocol here. That protocol would need to be able to handle:
170 //
171 // - The basic case of multiple LocalAllocators trying to do an allocationCursor search on the
172 // same bitvector. That probably needs the bitvector lock at least.
173 //
174 // - The harder case of some LocalAllocator triggering a steal from a different BlockDirectory
175 // via a search in the AlignedMemoryAllocator's list. Who knows what locks that needs.
176 //
177 // One way to make this work is to have a single per-Heap lock that protects all mutator lock
178 // allocation slow paths. That would probably be scalable enough for years. It would certainly be
179 // for using TLC allocation from JIT threads.
180 // https://bugs.webkit.org/show_bug.cgi?id=181635
181
182 SuperSamplerScope superSamplerScope(false);
183
184 ASSERT(!m_currentBlock);
185 ASSERT(m_freeList.allocationWillFail());
186
187 for (;;) {
188 MarkedBlock::Handle* block = m_directory->findBlockForAllocation(*this);
189 if (!block)
190 break;
191
192 if (void* result = tryAllocateIn(block))
193 return result;
194 }
195
196 if (Options::stealEmptyBlocksFromOtherAllocators()) {
197 if (MarkedBlock::Handle* block = m_directory->m_subspace->findEmptyBlockToSteal()) {
198 RELEASE_ASSERT(block->alignedMemoryAllocator() == m_directory->m_subspace->alignedMemoryAllocator());
199
200 block->sweep(nullptr);
201
202 // It's good that this clears canAllocateButNotEmpty as well as all other bits,
203 // because there is a remote chance that a block may have both canAllocateButNotEmpty
204 // and empty set at the same time.
205 block->removeFromDirectory();
206 m_directory->addBlock(block);
207 return allocateIn(block);
208 }
209 }
210
211 return nullptr;
212}
213
214void* LocalAllocator::allocateIn(MarkedBlock::Handle* block)
215{
216 void* result = tryAllocateIn(block);
217 RELEASE_ASSERT(result);
218 return result;
219}
220
221void* LocalAllocator::tryAllocateIn(MarkedBlock::Handle* block)
222{
223 ASSERT(block);
224 ASSERT(!block->isFreeListed());
225
226 block->sweep(&m_freeList);
227
228 // It's possible to stumble on a completely full block. Marking tries to retire these, but
229 // that algorithm is racy and may forget to do it sometimes.
230 if (m_freeList.allocationWillFail()) {
231 ASSERT(block->isFreeListed());
232 block->unsweepWithNoNewlyAllocated();
233 ASSERT(!block->isFreeListed());
234 ASSERT(!m_directory->isEmpty(NoLockingNecessary, block));
235 ASSERT(!m_directory->isCanAllocateButNotEmpty(NoLockingNecessary, block));
236 return nullptr;
237 }
238
239 m_currentBlock = block;
240
241 void* result = m_freeList.allocate(
242 [] () -> HeapCell* {
243 RELEASE_ASSERT_NOT_REACHED();
244 return nullptr;
245 });
246 m_directory->setIsEden(NoLockingNecessary, m_currentBlock, true);
247 m_directory->markedSpace().didAllocateInBlock(m_currentBlock);
248 return result;
249}
250
251void LocalAllocator::doTestCollectionsIfNeeded(Heap& heap, GCDeferralContext* deferralContext)
252{
253 if (!Options::slowPathAllocsBetweenGCs())
254 return;
255
256 static unsigned allocationCount = 0;
257 if (!allocationCount) {
258 if (!heap.isDeferred()) {
259 if (deferralContext)
260 deferralContext->m_shouldGC = true;
261 else
262 heap.collectNow(Sync, CollectionScope::Full);
263 }
264 }
265 if (++allocationCount >= Options::slowPathAllocsBetweenGCs())
266 allocationCount = 0;
267}
268
269bool LocalAllocator::isFreeListedCell(const void* target) const
270{
271 // This abomination exists to detect when an object is in the dead-but-not-destructed state.
272 // Therefore, it's not even clear that this needs to do anything beyond returning "false", since
273 // if we know that the block owning the object is free-listed, then it's impossible for any
274 // objects to be in the dead-but-not-destructed state.
275 // FIXME: Get rid of this abomination. https://bugs.webkit.org/show_bug.cgi?id=181655
276 return m_freeList.contains(bitwise_cast<HeapCell*>(target));
277}
278
279} // namespace JSC
280
281