1/*
2 * Copyright (C) 2011-2018 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of Apple Inc. ("Apple") nor the names of
14 * its contributors may be used to endorse or promote products derived
15 * from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
18 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
21 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "config.h"
30#include <wtf/MetaAllocator.h>
31
32#include <wtf/DataLog.h>
33#include <wtf/FastMalloc.h>
34#include <wtf/ProcessID.h>
35
36namespace WTF {
37
38MetaAllocator::~MetaAllocator()
39{
40 for (FreeSpaceNode* node = m_freeSpaceSizeMap.first(); node;) {
41 FreeSpaceNode* next = node->successor();
42 m_freeSpaceSizeMap.remove(node);
43 freeFreeSpaceNode(node);
44 node = next;
45 }
46#ifndef NDEBUG
47 ASSERT(!m_mallocBalance);
48#endif
49}
50
51void MetaAllocatorTracker::notify(MetaAllocatorHandle* handle)
52{
53 m_allocations.insert(handle);
54}
55
56void MetaAllocatorTracker::release(MetaAllocatorHandle* handle)
57{
58 m_allocations.remove(handle);
59}
60
61ALWAYS_INLINE void MetaAllocator::release(MetaAllocatorHandle* handle)
62{
63 LockHolder locker(&m_lock);
64 if (handle->sizeInBytes()) {
65 void* start = handle->start().untaggedPtr();
66 size_t sizeInBytes = handle->sizeInBytes();
67 decrementPageOccupancy(start, sizeInBytes);
68 addFreeSpaceFromReleasedHandle(FreeSpacePtr(start), sizeInBytes);
69 }
70
71 if (UNLIKELY(!!m_tracker))
72 m_tracker->release(handle);
73}
74
75MetaAllocatorHandle::MetaAllocatorHandle(MetaAllocator* allocator, void* start, size_t sizeInBytes, void* ownerUID)
76 : m_allocator(allocator)
77 , m_start(start)
78 , m_end(reinterpret_cast<char*>(start) + sizeInBytes)
79 , m_ownerUID(ownerUID)
80{
81 ASSERT(allocator);
82 ASSERT(start);
83 ASSERT(sizeInBytes);
84}
85
86MetaAllocatorHandle::~MetaAllocatorHandle()
87{
88 ASSERT(m_allocator);
89 m_allocator->release(this);
90}
91
92void MetaAllocatorHandle::shrink(size_t newSizeInBytes)
93{
94 size_t sizeInBytes = this->sizeInBytes();
95 ASSERT(newSizeInBytes <= sizeInBytes);
96
97 LockHolder locker(&m_allocator->m_lock);
98
99 newSizeInBytes = m_allocator->roundUp(newSizeInBytes);
100
101 ASSERT(newSizeInBytes <= sizeInBytes);
102
103 if (newSizeInBytes == sizeInBytes)
104 return;
105
106 uintptr_t freeStart = m_start.untaggedPtr<uintptr_t>() + newSizeInBytes;
107 size_t freeSize = sizeInBytes - newSizeInBytes;
108 uintptr_t freeEnd = freeStart + freeSize;
109
110 uintptr_t firstCompletelyFreePage = (freeStart + m_allocator->m_pageSize - 1) & ~(m_allocator->m_pageSize - 1);
111 if (firstCompletelyFreePage < freeEnd)
112 m_allocator->decrementPageOccupancy(reinterpret_cast<void*>(firstCompletelyFreePage), freeSize - (firstCompletelyFreePage - freeStart));
113
114 m_allocator->addFreeSpaceFromReleasedHandle(MetaAllocator::FreeSpacePtr(freeStart), freeSize);
115
116 m_end = m_start + newSizeInBytes;
117}
118
119void MetaAllocatorHandle::dump(PrintStream& out) const
120{
121 out.print(RawPointer(start().untaggedPtr()), "...", RawPointer(end().untaggedPtr()));
122}
123
124MetaAllocator::MetaAllocator(size_t allocationGranule, size_t pageSize)
125 : m_allocationGranule(allocationGranule)
126 , m_pageSize(pageSize)
127 , m_bytesAllocated(0)
128 , m_bytesReserved(0)
129 , m_bytesCommitted(0)
130 , m_tracker(0)
131#ifndef NDEBUG
132 , m_mallocBalance(0)
133#endif
134#if ENABLE(META_ALLOCATOR_PROFILE)
135 , m_numAllocations(0)
136 , m_numFrees(0)
137#endif
138{
139 for (m_logPageSize = 0; m_logPageSize < 32; ++m_logPageSize) {
140 if (static_cast<size_t>(1) << m_logPageSize == m_pageSize)
141 break;
142 }
143
144 ASSERT(static_cast<size_t>(1) << m_logPageSize == m_pageSize);
145
146 for (m_logAllocationGranule = 0; m_logAllocationGranule < 32; ++m_logAllocationGranule) {
147 if (static_cast<size_t>(1) << m_logAllocationGranule == m_allocationGranule)
148 break;
149 }
150
151 ASSERT(static_cast<size_t>(1) << m_logAllocationGranule == m_allocationGranule);
152}
153
154RefPtr<MetaAllocatorHandle> MetaAllocator::allocate(size_t sizeInBytes, void* ownerUID)
155{
156 LockHolder locker(&m_lock);
157
158 if (!sizeInBytes)
159 return nullptr;
160
161 sizeInBytes = roundUp(sizeInBytes);
162
163 FreeSpacePtr start = findAndRemoveFreeSpace(sizeInBytes);
164 if (!start) {
165 size_t requestedNumberOfPages = (sizeInBytes + m_pageSize - 1) >> m_logPageSize;
166 size_t numberOfPages = requestedNumberOfPages;
167
168 start = allocateNewSpace(numberOfPages);
169 if (!start)
170 return nullptr;
171
172 ASSERT(numberOfPages >= requestedNumberOfPages);
173
174 size_t roundedUpSize = numberOfPages << m_logPageSize;
175
176 ASSERT(roundedUpSize >= sizeInBytes);
177
178 m_bytesReserved += roundedUpSize;
179
180 if (roundedUpSize > sizeInBytes) {
181 FreeSpacePtr freeSpaceStart = start + sizeInBytes;
182 size_t freeSpaceSize = roundedUpSize - sizeInBytes;
183 addFreeSpace(freeSpaceStart, freeSpaceSize);
184 }
185 }
186 incrementPageOccupancy(start.untaggedPtr(), sizeInBytes);
187 m_bytesAllocated += sizeInBytes;
188#if ENABLE(META_ALLOCATOR_PROFILE)
189 m_numAllocations++;
190#endif
191
192 auto handle = adoptRef(*new MetaAllocatorHandle(this, start.untaggedPtr(), sizeInBytes, ownerUID));
193
194 if (UNLIKELY(!!m_tracker))
195 m_tracker->notify(handle.ptr());
196
197 return handle;
198}
199
200MetaAllocator::Statistics MetaAllocator::currentStatistics()
201{
202 LockHolder locker(&m_lock);
203 Statistics result;
204 result.bytesAllocated = m_bytesAllocated;
205 result.bytesReserved = m_bytesReserved;
206 result.bytesCommitted = m_bytesCommitted;
207 return result;
208}
209
210MetaAllocator::FreeSpacePtr MetaAllocator::findAndRemoveFreeSpace(size_t sizeInBytes)
211{
212 FreeSpaceNode* node = m_freeSpaceSizeMap.findLeastGreaterThanOrEqual(sizeInBytes);
213
214 if (!node)
215 return 0;
216
217 size_t nodeSizeInBytes = node->sizeInBytes();
218 ASSERT(nodeSizeInBytes >= sizeInBytes);
219
220 m_freeSpaceSizeMap.remove(node);
221
222 FreeSpacePtr result;
223
224 if (nodeSizeInBytes == sizeInBytes) {
225 // Easy case: perfect fit, so just remove the node entirely.
226 result = node->m_start;
227
228 m_freeSpaceStartAddressMap.remove(node->m_start);
229 m_freeSpaceEndAddressMap.remove(node->m_end);
230 freeFreeSpaceNode(node);
231 } else {
232 // Try to be a good citizen and ensure that the returned chunk of memory
233 // straddles as few pages as possible, but only insofar as doing so will
234 // not increase fragmentation. The intuition is that minimizing
235 // fragmentation is a strictly higher priority than minimizing the number
236 // of committed pages, since in the long run, smaller fragmentation means
237 // fewer committed pages and fewer failures in general.
238
239 uintptr_t nodeStartAsInt = node->m_start.untaggedPtr<uintptr_t>();
240 uintptr_t firstPage = nodeStartAsInt >> m_logPageSize;
241 uintptr_t lastPage = (nodeStartAsInt + nodeSizeInBytes - 1) >> m_logPageSize;
242
243 uintptr_t lastPageForLeftAllocation = (nodeStartAsInt + sizeInBytes - 1) >> m_logPageSize;
244 uintptr_t firstPageForRightAllocation = (nodeStartAsInt + nodeSizeInBytes - sizeInBytes) >> m_logPageSize;
245
246 if (lastPageForLeftAllocation - firstPage + 1 <= lastPage - firstPageForRightAllocation + 1) {
247 // Allocate in the left side of the returned chunk, and slide the node to the right.
248 result = node->m_start;
249
250 m_freeSpaceStartAddressMap.remove(node->m_start);
251
252 node->m_start += sizeInBytes;
253
254 m_freeSpaceSizeMap.insert(node);
255 m_freeSpaceStartAddressMap.add(node->m_start, node);
256 } else {
257 // Allocate in the right size of the returned chunk, and slide the node to the left;
258
259 result = node->m_end - sizeInBytes;
260
261 m_freeSpaceEndAddressMap.remove(node->m_end);
262
263 node->m_end = result;
264
265 m_freeSpaceSizeMap.insert(node);
266 m_freeSpaceEndAddressMap.add(result, node);
267 }
268 }
269
270#if ENABLE(META_ALLOCATOR_PROFILE)
271 dumpProfile();
272#endif
273
274 return result;
275}
276
277void MetaAllocator::addFreeSpaceFromReleasedHandle(FreeSpacePtr start, size_t sizeInBytes)
278{
279#if ENABLE(META_ALLOCATOR_PROFILE)
280 m_numFrees++;
281#endif
282 m_bytesAllocated -= sizeInBytes;
283 addFreeSpace(start, sizeInBytes);
284}
285
286void MetaAllocator::addFreshFreeSpace(void* start, size_t sizeInBytes)
287{
288 LockHolder locker(&m_lock);
289 m_bytesReserved += sizeInBytes;
290 addFreeSpace(FreeSpacePtr(start), sizeInBytes);
291}
292
293size_t MetaAllocator::debugFreeSpaceSize()
294{
295#ifndef NDEBUG
296 LockHolder locker(&m_lock);
297 size_t result = 0;
298 for (FreeSpaceNode* node = m_freeSpaceSizeMap.first(); node; node = node->successor())
299 result += node->sizeInBytes();
300 return result;
301#else
302 CRASH();
303 return 0;
304#endif
305}
306
307void MetaAllocator::addFreeSpace(FreeSpacePtr start, size_t sizeInBytes)
308{
309 FreeSpacePtr end = start + sizeInBytes;
310
311 HashMap<FreeSpacePtr, FreeSpaceNode*>::iterator leftNeighbor = m_freeSpaceEndAddressMap.find(start);
312 HashMap<FreeSpacePtr, FreeSpaceNode*>::iterator rightNeighbor = m_freeSpaceStartAddressMap.find(end);
313
314 if (leftNeighbor != m_freeSpaceEndAddressMap.end()) {
315 // We have something we can coalesce with on the left. Remove it from the tree, and
316 // remove its end from the end address map.
317
318 ASSERT(leftNeighbor->value->m_end == leftNeighbor->key);
319
320 FreeSpaceNode* leftNode = leftNeighbor->value;
321
322 FreeSpacePtr leftEnd = leftNode->m_end;
323
324 ASSERT(leftEnd == start);
325
326 m_freeSpaceSizeMap.remove(leftNode);
327 m_freeSpaceEndAddressMap.remove(leftEnd);
328
329 // Now check if there is also something to coalesce with on the right.
330 if (rightNeighbor != m_freeSpaceStartAddressMap.end()) {
331 // Freeing something in the middle of free blocks. Coalesce both left and
332 // right, whilst removing the right neighbor from the maps.
333
334 ASSERT(rightNeighbor->value->m_start == rightNeighbor->key);
335
336 FreeSpaceNode* rightNode = rightNeighbor->value;
337 FreeSpacePtr rightStart = rightNeighbor->key;
338 size_t rightSize = rightNode->sizeInBytes();
339 FreeSpacePtr rightEnd = rightNode->m_end;
340
341 ASSERT(rightStart == end);
342 ASSERT(leftNode->m_start + (leftNode->sizeInBytes() + sizeInBytes + rightSize) == rightEnd);
343
344 m_freeSpaceSizeMap.remove(rightNode);
345 m_freeSpaceStartAddressMap.remove(rightStart);
346 m_freeSpaceEndAddressMap.remove(rightEnd);
347
348 freeFreeSpaceNode(rightNode);
349
350 leftNode->m_end += (sizeInBytes + rightSize);
351
352 m_freeSpaceSizeMap.insert(leftNode);
353 m_freeSpaceEndAddressMap.add(rightEnd, leftNode);
354 } else {
355 leftNode->m_end += sizeInBytes;
356
357 m_freeSpaceSizeMap.insert(leftNode);
358 m_freeSpaceEndAddressMap.add(end, leftNode);
359 }
360 } else {
361 // Cannot coalesce with left; try to see if we can coalesce with right.
362
363 if (rightNeighbor != m_freeSpaceStartAddressMap.end()) {
364 FreeSpaceNode* rightNode = rightNeighbor->value;
365 FreeSpacePtr rightStart = rightNeighbor->key;
366
367 ASSERT(rightStart == end);
368 ASSERT(start + (sizeInBytes + rightNode->sizeInBytes()) == rightNode->m_end);
369
370 m_freeSpaceSizeMap.remove(rightNode);
371 m_freeSpaceStartAddressMap.remove(rightStart);
372
373 rightNode->m_start = start;
374
375 m_freeSpaceSizeMap.insert(rightNode);
376 m_freeSpaceStartAddressMap.add(start, rightNode);
377 } else {
378 // Nothing to coalesce with, so create a new free space node and add it.
379
380 FreeSpaceNode* node = allocFreeSpaceNode();
381
382 node->m_start = start;
383 node->m_end = start + sizeInBytes;
384
385 m_freeSpaceSizeMap.insert(node);
386 m_freeSpaceStartAddressMap.add(start, node);
387 m_freeSpaceEndAddressMap.add(end, node);
388 }
389 }
390
391#if ENABLE(META_ALLOCATOR_PROFILE)
392 dumpProfile();
393#endif
394}
395
396void MetaAllocator::incrementPageOccupancy(void* address, size_t sizeInBytes)
397{
398 uintptr_t firstPage = reinterpret_cast<uintptr_t>(address) >> m_logPageSize;
399 uintptr_t lastPage = (reinterpret_cast<uintptr_t>(address) + sizeInBytes - 1) >> m_logPageSize;
400
401 for (uintptr_t page = firstPage; page <= lastPage; ++page) {
402 HashMap<uintptr_t, size_t>::iterator iter = m_pageOccupancyMap.find(page);
403 if (iter == m_pageOccupancyMap.end()) {
404 m_pageOccupancyMap.add(page, 1);
405 m_bytesCommitted += m_pageSize;
406 notifyNeedPage(reinterpret_cast<void*>(page << m_logPageSize));
407 } else
408 iter->value++;
409 }
410}
411
412void MetaAllocator::decrementPageOccupancy(void* address, size_t sizeInBytes)
413{
414 uintptr_t firstPage = reinterpret_cast<uintptr_t>(address) >> m_logPageSize;
415 uintptr_t lastPage = (reinterpret_cast<uintptr_t>(address) + sizeInBytes - 1) >> m_logPageSize;
416
417 for (uintptr_t page = firstPage; page <= lastPage; ++page) {
418 HashMap<uintptr_t, size_t>::iterator iter = m_pageOccupancyMap.find(page);
419 ASSERT(iter != m_pageOccupancyMap.end());
420 if (!--(iter->value)) {
421 m_pageOccupancyMap.remove(iter);
422 m_bytesCommitted -= m_pageSize;
423 notifyPageIsFree(reinterpret_cast<void*>(page << m_logPageSize));
424 }
425 }
426}
427
428bool MetaAllocator::isInAllocatedMemory(const AbstractLocker&, void* address)
429{
430 ASSERT(m_lock.isLocked());
431 uintptr_t page = reinterpret_cast<uintptr_t>(address) >> m_logPageSize;
432 return m_pageOccupancyMap.contains(page);
433}
434
435size_t MetaAllocator::roundUp(size_t sizeInBytes)
436{
437 if (std::numeric_limits<size_t>::max() - m_allocationGranule <= sizeInBytes)
438 CRASH();
439 return (sizeInBytes + m_allocationGranule - 1) & ~(m_allocationGranule - 1);
440}
441
442MetaAllocator::FreeSpaceNode* MetaAllocator::allocFreeSpaceNode()
443{
444#ifndef NDEBUG
445 m_mallocBalance++;
446#endif
447 return new (NotNull, fastMalloc(sizeof(FreeSpaceNode))) FreeSpaceNode();
448}
449
450void MetaAllocator::freeFreeSpaceNode(FreeSpaceNode* node)
451{
452#ifndef NDEBUG
453 m_mallocBalance--;
454#endif
455 fastFree(node);
456}
457
458#if ENABLE(META_ALLOCATOR_PROFILE)
459void MetaAllocator::dumpProfile()
460{
461 dataLogF(
462 "%d: MetaAllocator(%p): num allocations = %u, num frees = %u, allocated = %lu, reserved = %lu, committed = %lu\n",
463 getCurrentProcessID(), this, m_numAllocations, m_numFrees, m_bytesAllocated, m_bytesReserved, m_bytesCommitted);
464}
465#endif
466
467} // namespace WTF
468
469
470