1 | /* |
2 | * Copyright (C) 2017-2019 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #include "config.h" |
27 | #include "Subspace.h" |
28 | |
29 | #include "AlignedMemoryAllocator.h" |
30 | #include "AllocatorInlines.h" |
31 | #include "BlockDirectoryInlines.h" |
32 | #include "JSCInlines.h" |
33 | #include "LocalAllocatorInlines.h" |
34 | #include "MarkedBlockInlines.h" |
35 | #include "PreventCollectionScope.h" |
36 | #include "SubspaceInlines.h" |
37 | |
38 | namespace JSC { |
39 | |
40 | CompleteSubspace::CompleteSubspace(CString name, Heap& heap, HeapCellType* heapCellType, AlignedMemoryAllocator* alignedMemoryAllocator) |
41 | : Subspace(name, heap) |
42 | { |
43 | initialize(heapCellType, alignedMemoryAllocator); |
44 | } |
45 | |
46 | CompleteSubspace::~CompleteSubspace() |
47 | { |
48 | } |
49 | |
50 | Allocator CompleteSubspace::allocatorFor(size_t size, AllocatorForMode mode) |
51 | { |
52 | return allocatorForNonVirtual(size, mode); |
53 | } |
54 | |
55 | void* CompleteSubspace::allocate(VM& vm, size_t size, GCDeferralContext* deferralContext, AllocationFailureMode failureMode) |
56 | { |
57 | return allocateNonVirtual(vm, size, deferralContext, failureMode); |
58 | } |
59 | |
60 | Allocator CompleteSubspace::allocatorForSlow(size_t size) |
61 | { |
62 | size_t index = MarkedSpace::sizeClassToIndex(size); |
63 | size_t sizeClass = MarkedSpace::s_sizeClassForSizeStep[index]; |
64 | if (!sizeClass) |
65 | return Allocator(); |
66 | |
67 | // This is written in such a way that it's OK for the JIT threads to end up here if they want |
68 | // to generate code that uses some allocator that hadn't been used yet. Note that a possibly- |
69 | // just-as-good solution would be to return null if we're in the JIT since the JIT treats null |
70 | // allocator as "please always take the slow path". But, that could lead to performance |
71 | // surprises and the algorithm here is pretty easy. Only this code has to hold the lock, to |
72 | // prevent simultaneously BlockDirectory creations from multiple threads. This code ensures |
73 | // that any "forEachAllocator" traversals will only see this allocator after it's initialized |
74 | // enough: it will have |
75 | auto locker = holdLock(m_space.directoryLock()); |
76 | if (Allocator allocator = m_allocatorForSizeStep[index]) |
77 | return allocator; |
78 | |
79 | if (false) |
80 | dataLog("Creating BlockDirectory/LocalAllocator for " , m_name, ", " , attributes(), ", " , sizeClass, ".\n" ); |
81 | |
82 | std::unique_ptr<BlockDirectory> uniqueDirectory = |
83 | std::make_unique<BlockDirectory>(m_space.heap(), sizeClass); |
84 | BlockDirectory* directory = uniqueDirectory.get(); |
85 | m_directories.append(WTFMove(uniqueDirectory)); |
86 | |
87 | directory->setSubspace(this); |
88 | m_space.addBlockDirectory(locker, directory); |
89 | |
90 | std::unique_ptr<LocalAllocator> uniqueLocalAllocator = |
91 | std::make_unique<LocalAllocator>(directory); |
92 | LocalAllocator* localAllocator = uniqueLocalAllocator.get(); |
93 | m_localAllocators.append(WTFMove(uniqueLocalAllocator)); |
94 | |
95 | Allocator allocator(localAllocator); |
96 | |
97 | index = MarkedSpace::sizeClassToIndex(sizeClass); |
98 | for (;;) { |
99 | if (MarkedSpace::s_sizeClassForSizeStep[index] != sizeClass) |
100 | break; |
101 | |
102 | m_allocatorForSizeStep[index] = allocator; |
103 | |
104 | if (!index--) |
105 | break; |
106 | } |
107 | |
108 | directory->setNextDirectoryInSubspace(m_firstDirectory); |
109 | m_alignedMemoryAllocator->registerDirectory(directory); |
110 | WTF::storeStoreFence(); |
111 | m_firstDirectory = directory; |
112 | return allocator; |
113 | } |
114 | |
115 | void* CompleteSubspace::allocateSlow(VM& vm, size_t size, GCDeferralContext* deferralContext, AllocationFailureMode failureMode) |
116 | { |
117 | void* result = tryAllocateSlow(vm, size, deferralContext); |
118 | if (failureMode == AllocationFailureMode::Assert) |
119 | RELEASE_ASSERT(result); |
120 | return result; |
121 | } |
122 | |
123 | void* CompleteSubspace::tryAllocateSlow(VM& vm, size_t size, GCDeferralContext* deferralContext) |
124 | { |
125 | if (validateDFGDoesGC) |
126 | RELEASE_ASSERT(vm.heap.expectDoesGC()); |
127 | |
128 | sanitizeStackForVM(&vm); |
129 | |
130 | if (Allocator allocator = allocatorFor(size, AllocatorForMode::EnsureAllocator)) |
131 | return allocator.allocate(deferralContext, AllocationFailureMode::ReturnNull); |
132 | |
133 | if (size <= Options::largeAllocationCutoff() |
134 | && size <= MarkedSpace::largeCutoff) { |
135 | dataLog("FATAL: attampting to allocate small object using large allocation.\n" ); |
136 | dataLog("Requested allocation size: " , size, "\n" ); |
137 | RELEASE_ASSERT_NOT_REACHED(); |
138 | } |
139 | |
140 | vm.heap.collectIfNecessaryOrDefer(deferralContext); |
141 | |
142 | size = WTF::roundUpToMultipleOf<MarkedSpace::sizeStep>(size); |
143 | LargeAllocation* allocation = LargeAllocation::tryCreate(vm.heap, size, this, m_space.m_largeAllocations.size()); |
144 | if (!allocation) |
145 | return nullptr; |
146 | |
147 | m_space.m_largeAllocations.append(allocation); |
148 | ASSERT(allocation->indexInSpace() == m_space.m_largeAllocations.size() - 1); |
149 | vm.heap.didAllocate(size); |
150 | m_space.m_capacity += size; |
151 | |
152 | m_largeAllocations.append(allocation); |
153 | |
154 | return allocation->cell(); |
155 | } |
156 | |
157 | void* CompleteSubspace::reallocateLargeAllocationNonVirtual(VM& vm, HeapCell* oldCell, size_t size, GCDeferralContext* deferralContext, AllocationFailureMode failureMode) |
158 | { |
159 | if (validateDFGDoesGC) |
160 | RELEASE_ASSERT(vm.heap.expectDoesGC()); |
161 | |
162 | // The following conditions are met in Butterfly for example. |
163 | ASSERT(oldCell->isLargeAllocation()); |
164 | |
165 | LargeAllocation* oldAllocation = &oldCell->largeAllocation(); |
166 | ASSERT(oldAllocation->cellSize() <= size); |
167 | ASSERT(oldAllocation->weakSet().isTriviallyDestructible()); |
168 | ASSERT(oldAllocation->attributes().destruction == DoesNotNeedDestruction); |
169 | ASSERT(oldAllocation->attributes().cellKind == HeapCell::Auxiliary); |
170 | ASSERT(size > MarkedSpace::largeCutoff); |
171 | |
172 | sanitizeStackForVM(&vm); |
173 | |
174 | if (size <= Options::largeAllocationCutoff() |
175 | && size <= MarkedSpace::largeCutoff) { |
176 | dataLog("FATAL: attampting to allocate small object using large allocation.\n" ); |
177 | dataLog("Requested allocation size: " , size, "\n" ); |
178 | RELEASE_ASSERT_NOT_REACHED(); |
179 | } |
180 | |
181 | vm.heap.collectIfNecessaryOrDefer(deferralContext); |
182 | |
183 | size = WTF::roundUpToMultipleOf<MarkedSpace::sizeStep>(size); |
184 | size_t difference = size - oldAllocation->cellSize(); |
185 | unsigned oldIndexInSpace = oldAllocation->indexInSpace(); |
186 | if (oldAllocation->isOnList()) |
187 | oldAllocation->remove(); |
188 | |
189 | LargeAllocation* allocation = oldAllocation->tryReallocate(size, this); |
190 | if (!allocation) { |
191 | RELEASE_ASSERT(failureMode != AllocationFailureMode::Assert); |
192 | m_largeAllocations.append(oldAllocation); |
193 | return nullptr; |
194 | } |
195 | ASSERT(oldIndexInSpace == allocation->indexInSpace()); |
196 | |
197 | m_space.m_largeAllocations[oldIndexInSpace] = allocation; |
198 | vm.heap.didAllocate(difference); |
199 | m_space.m_capacity += difference; |
200 | |
201 | m_largeAllocations.append(allocation); |
202 | |
203 | return allocation->cell(); |
204 | } |
205 | |
206 | } // namespace JSC |
207 | |
208 | |