1/*
2 * Copyright (C) 2017-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "Subspace.h"
28
29#include "AlignedMemoryAllocator.h"
30#include "AllocatorInlines.h"
31#include "BlockDirectoryInlines.h"
32#include "JSCInlines.h"
33#include "LocalAllocatorInlines.h"
34#include "MarkedBlockInlines.h"
35#include "MarkedSpaceInlines.h"
36#include "PreventCollectionScope.h"
37#include "SubspaceInlines.h"
38
39namespace JSC {
40
41CompleteSubspace::CompleteSubspace(CString name, Heap& heap, HeapCellType* heapCellType, AlignedMemoryAllocator* alignedMemoryAllocator)
42 : Subspace(name, heap)
43{
44 initialize(heapCellType, alignedMemoryAllocator);
45}
46
47CompleteSubspace::~CompleteSubspace()
48{
49}
50
51Allocator CompleteSubspace::allocatorFor(size_t size, AllocatorForMode mode)
52{
53 return allocatorForNonVirtual(size, mode);
54}
55
56void* CompleteSubspace::allocate(VM& vm, size_t size, GCDeferralContext* deferralContext, AllocationFailureMode failureMode)
57{
58 return allocateNonVirtual(vm, size, deferralContext, failureMode);
59}
60
61Allocator CompleteSubspace::allocatorForSlow(size_t size)
62{
63 size_t index = MarkedSpace::sizeClassToIndex(size);
64 size_t sizeClass = MarkedSpace::s_sizeClassForSizeStep[index];
65 if (!sizeClass)
66 return Allocator();
67
68 // This is written in such a way that it's OK for the JIT threads to end up here if they want
69 // to generate code that uses some allocator that hadn't been used yet. Note that a possibly-
70 // just-as-good solution would be to return null if we're in the JIT since the JIT treats null
71 // allocator as "please always take the slow path". But, that could lead to performance
72 // surprises and the algorithm here is pretty easy. Only this code has to hold the lock, to
73 // prevent simultaneously BlockDirectory creations from multiple threads. This code ensures
74 // that any "forEachAllocator" traversals will only see this allocator after it's initialized
75 // enough: it will have
76 auto locker = holdLock(m_space.directoryLock());
77 if (Allocator allocator = m_allocatorForSizeStep[index])
78 return allocator;
79
80 if (false)
81 dataLog("Creating BlockDirectory/LocalAllocator for ", m_name, ", ", attributes(), ", ", sizeClass, ".\n");
82
83 std::unique_ptr<BlockDirectory> uniqueDirectory = makeUnique<BlockDirectory>(sizeClass);
84 BlockDirectory* directory = uniqueDirectory.get();
85 m_directories.append(WTFMove(uniqueDirectory));
86
87 directory->setSubspace(this);
88 m_space.addBlockDirectory(locker, directory);
89
90 std::unique_ptr<LocalAllocator> uniqueLocalAllocator =
91 makeUnique<LocalAllocator>(directory);
92 LocalAllocator* localAllocator = uniqueLocalAllocator.get();
93 m_localAllocators.append(WTFMove(uniqueLocalAllocator));
94
95 Allocator allocator(localAllocator);
96
97 index = MarkedSpace::sizeClassToIndex(sizeClass);
98 for (;;) {
99 if (MarkedSpace::s_sizeClassForSizeStep[index] != sizeClass)
100 break;
101
102 m_allocatorForSizeStep[index] = allocator;
103
104 if (!index--)
105 break;
106 }
107
108 directory->setNextDirectoryInSubspace(m_firstDirectory);
109 m_alignedMemoryAllocator->registerDirectory(m_space.heap(), directory);
110 WTF::storeStoreFence();
111 m_firstDirectory = directory;
112 return allocator;
113}
114
115void* CompleteSubspace::allocateSlow(VM& vm, size_t size, GCDeferralContext* deferralContext, AllocationFailureMode failureMode)
116{
117 void* result = tryAllocateSlow(vm, size, deferralContext);
118 if (failureMode == AllocationFailureMode::Assert)
119 RELEASE_ASSERT(result);
120 return result;
121}
122
123void* CompleteSubspace::tryAllocateSlow(VM& vm, size_t size, GCDeferralContext* deferralContext)
124{
125 if (validateDFGDoesGC)
126 RELEASE_ASSERT(vm.heap.expectDoesGC());
127
128 sanitizeStackForVM(vm);
129
130 if (Allocator allocator = allocatorFor(size, AllocatorForMode::EnsureAllocator))
131 return allocator.allocate(vm.heap, deferralContext, AllocationFailureMode::ReturnNull);
132
133 if (size <= Options::preciseAllocationCutoff()
134 && size <= MarkedSpace::largeCutoff) {
135 dataLog("FATAL: attampting to allocate small object using large allocation.\n");
136 dataLog("Requested allocation size: ", size, "\n");
137 RELEASE_ASSERT_NOT_REACHED();
138 }
139
140 vm.heap.collectIfNecessaryOrDefer(deferralContext);
141
142 size = WTF::roundUpToMultipleOf<MarkedSpace::sizeStep>(size);
143 PreciseAllocation* allocation = PreciseAllocation::tryCreate(vm.heap, size, this, m_space.m_preciseAllocations.size());
144 if (!allocation)
145 return nullptr;
146
147 m_space.m_preciseAllocations.append(allocation);
148 if (auto* set = m_space.preciseAllocationSet())
149 set->add(allocation->cell());
150 ASSERT(allocation->indexInSpace() == m_space.m_preciseAllocations.size() - 1);
151 vm.heap.didAllocate(size);
152 m_space.m_capacity += size;
153
154 m_preciseAllocations.append(allocation);
155
156 return allocation->cell();
157}
158
159void* CompleteSubspace::reallocatePreciseAllocationNonVirtual(VM& vm, HeapCell* oldCell, size_t size, GCDeferralContext* deferralContext, AllocationFailureMode failureMode)
160{
161 if (validateDFGDoesGC)
162 RELEASE_ASSERT(vm.heap.expectDoesGC());
163
164 // The following conditions are met in Butterfly for example.
165 ASSERT(oldCell->isPreciseAllocation());
166
167 PreciseAllocation* oldAllocation = &oldCell->preciseAllocation();
168 ASSERT(oldAllocation->cellSize() <= size);
169 ASSERT(oldAllocation->weakSet().isTriviallyDestructible());
170 ASSERT(oldAllocation->attributes().destruction == DoesNotNeedDestruction);
171 ASSERT(oldAllocation->attributes().cellKind == HeapCell::Auxiliary);
172 ASSERT(size > MarkedSpace::largeCutoff);
173
174 sanitizeStackForVM(vm);
175
176 if (size <= Options::preciseAllocationCutoff()
177 && size <= MarkedSpace::largeCutoff) {
178 dataLog("FATAL: attampting to allocate small object using large allocation.\n");
179 dataLog("Requested allocation size: ", size, "\n");
180 RELEASE_ASSERT_NOT_REACHED();
181 }
182
183 vm.heap.collectIfNecessaryOrDefer(deferralContext);
184
185 size = WTF::roundUpToMultipleOf<MarkedSpace::sizeStep>(size);
186 size_t difference = size - oldAllocation->cellSize();
187 unsigned oldIndexInSpace = oldAllocation->indexInSpace();
188 if (oldAllocation->isOnList())
189 oldAllocation->remove();
190
191 PreciseAllocation* allocation = oldAllocation->tryReallocate(size, this);
192 if (!allocation) {
193 RELEASE_ASSERT(failureMode != AllocationFailureMode::Assert);
194 m_preciseAllocations.append(oldAllocation);
195 return nullptr;
196 }
197 ASSERT(oldIndexInSpace == allocation->indexInSpace());
198
199 // If reallocation changes the address, we should update HashSet.
200 if (oldAllocation != allocation) {
201 if (auto* set = m_space.preciseAllocationSet()) {
202 set->remove(oldAllocation->cell());
203 set->add(allocation->cell());
204 }
205 }
206
207 m_space.m_preciseAllocations[oldIndexInSpace] = allocation;
208 vm.heap.didAllocate(difference);
209 m_space.m_capacity += difference;
210
211 m_preciseAllocations.append(allocation);
212
213 return allocation->cell();
214}
215
216} // namespace JSC
217
218