1 | // |
2 | // Copyright 2019 The ANGLE Project Authors. All rights reserved. |
3 | // Use of this source code is governed by a BSD-style license that can be |
4 | // found in the LICENSE file. |
5 | // |
6 | // PoolAlloc.cpp: |
7 | // Implements the class methods for PoolAllocator and Allocation classes. |
8 | // |
9 | |
10 | #include "common/PoolAlloc.h" |
11 | |
12 | #include <assert.h> |
13 | #include <stdint.h> |
14 | #include <stdio.h> |
15 | |
16 | #include "common/angleutils.h" |
17 | #include "common/debug.h" |
18 | #include "common/mathutil.h" |
19 | #include "common/platform.h" |
20 | #include "common/tls.h" |
21 | |
22 | namespace angle |
23 | { |
24 | |
25 | // |
26 | // Implement the functionality of the PoolAllocator class, which |
27 | // is documented in PoolAlloc.h. |
28 | // |
29 | PoolAllocator::PoolAllocator(int growthIncrement, int allocationAlignment) |
30 | : mAlignment(allocationAlignment), |
31 | #if !defined(ANGLE_DISABLE_POOL_ALLOC) |
32 | mPageSize(growthIncrement), |
33 | mFreeList(0), |
34 | mInUseList(0), |
35 | mNumCalls(0), |
36 | mTotalBytes(0), |
37 | #endif |
38 | mLocked(false) |
39 | { |
40 | #if !defined(ANGLE_DISABLE_POOL_ALLOC) |
41 | if (mAlignment == 1) |
42 | { |
43 | // This is a special fast-path where fastAllocation() is enabled |
44 | mAlignmentMask = 0; |
45 | mHeaderSkip = sizeof(Header); |
46 | } |
47 | else |
48 | { |
49 | #endif |
50 | // |
51 | // Adjust mAlignment to be at least pointer aligned and |
52 | // power of 2. |
53 | // |
54 | size_t minAlign = sizeof(void *); |
55 | mAlignment &= ~(minAlign - 1); |
56 | if (mAlignment < minAlign) |
57 | mAlignment = minAlign; |
58 | mAlignment = gl::ceilPow2(static_cast<unsigned int>(mAlignment)); |
59 | mAlignmentMask = mAlignment - 1; |
60 | |
61 | #if !defined(ANGLE_DISABLE_POOL_ALLOC) |
62 | // |
63 | // Align header skip |
64 | // |
65 | mHeaderSkip = minAlign; |
66 | if (mHeaderSkip < sizeof(Header)) |
67 | { |
68 | mHeaderSkip = rx::roundUp(sizeof(Header), mAlignment); |
69 | } |
70 | } |
71 | // |
72 | // Don't allow page sizes we know are smaller than all common |
73 | // OS page sizes. |
74 | // |
75 | if (mPageSize < 4 * 1024) |
76 | mPageSize = 4 * 1024; |
77 | // |
78 | // A large mCurrentPageOffset indicates a new page needs to |
79 | // be obtained to allocate memory. |
80 | // |
81 | mCurrentPageOffset = mPageSize; |
82 | #else // !defined(ANGLE_DISABLE_POOL_ALLOC) |
83 | mStack.push_back({}); |
84 | #endif |
85 | } |
86 | |
87 | PoolAllocator::~PoolAllocator() |
88 | { |
89 | #if !defined(ANGLE_DISABLE_POOL_ALLOC) |
90 | while (mInUseList) |
91 | { |
92 | Header *next = mInUseList->nextPage; |
93 | mInUseList->~Header(); |
94 | delete[] reinterpret_cast<char *>(mInUseList); |
95 | mInUseList = next; |
96 | } |
97 | // We should not check the guard blocks |
98 | // here, because we did it already when the block was |
99 | // placed into the free list. |
100 | // |
101 | while (mFreeList) |
102 | { |
103 | Header *next = mFreeList->nextPage; |
104 | delete[] reinterpret_cast<char *>(mFreeList); |
105 | mFreeList = next; |
106 | } |
107 | #else // !defined(ANGLE_DISABLE_POOL_ALLOC) |
108 | for (auto &allocs : mStack) |
109 | { |
110 | for (auto alloc : allocs) |
111 | { |
112 | free(alloc); |
113 | } |
114 | } |
115 | mStack.clear(); |
116 | #endif |
117 | } |
118 | |
119 | // |
120 | // Check a single guard block for damage |
121 | // |
122 | void Allocation::checkGuardBlock(unsigned char *blockMem, |
123 | unsigned char val, |
124 | const char *locText) const |
125 | { |
126 | #if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS) |
127 | for (size_t x = 0; x < kGuardBlockSize; x++) |
128 | { |
129 | if (blockMem[x] != val) |
130 | { |
131 | char assertMsg[80]; |
132 | // We don't print the assert message. It's here just to be helpful. |
133 | snprintf(assertMsg, sizeof(assertMsg), |
134 | "PoolAlloc: Damage %s %zu byte allocation at 0x%p\n" , locText, mSize, data()); |
135 | assert(0 && "PoolAlloc: Damage in guard block" ); |
136 | } |
137 | } |
138 | #endif |
139 | } |
140 | |
141 | void PoolAllocator::push() |
142 | { |
143 | #if !defined(ANGLE_DISABLE_POOL_ALLOC) |
144 | AllocState state = {mCurrentPageOffset, mInUseList}; |
145 | |
146 | mStack.push_back(state); |
147 | |
148 | // |
149 | // Indicate there is no current page to allocate from. |
150 | // |
151 | mCurrentPageOffset = mPageSize; |
152 | #else // !defined(ANGLE_DISABLE_POOL_ALLOC) |
153 | mStack.push_back({}); |
154 | #endif |
155 | } |
156 | |
157 | // |
158 | // Do a mass-deallocation of all the individual allocations |
159 | // that have occurred since the last push(), or since the |
160 | // last pop(), or since the object's creation. |
161 | // |
162 | // The deallocated pages are saved for future allocations. |
163 | // |
164 | void PoolAllocator::pop() |
165 | { |
166 | if (mStack.size() < 1) |
167 | return; |
168 | |
169 | #if !defined(ANGLE_DISABLE_POOL_ALLOC) |
170 | Header *page = mStack.back().page; |
171 | mCurrentPageOffset = mStack.back().offset; |
172 | |
173 | while (mInUseList != page) |
174 | { |
175 | // invoke destructor to free allocation list |
176 | mInUseList->~Header(); |
177 | |
178 | Header *nextInUse = mInUseList->nextPage; |
179 | if (mInUseList->pageCount > 1) |
180 | delete[] reinterpret_cast<char *>(mInUseList); |
181 | else |
182 | { |
183 | mInUseList->nextPage = mFreeList; |
184 | mFreeList = mInUseList; |
185 | } |
186 | mInUseList = nextInUse; |
187 | } |
188 | |
189 | mStack.pop_back(); |
190 | #else // !defined(ANGLE_DISABLE_POOL_ALLOC) |
191 | for (auto &alloc : mStack.back()) |
192 | { |
193 | free(alloc); |
194 | } |
195 | mStack.pop_back(); |
196 | #endif |
197 | } |
198 | |
199 | // |
200 | // Do a mass-deallocation of all the individual allocations |
201 | // that have occurred. |
202 | // |
203 | void PoolAllocator::popAll() |
204 | { |
205 | while (mStack.size() > 0) |
206 | pop(); |
207 | } |
208 | |
209 | void *PoolAllocator::allocate(size_t numBytes) |
210 | { |
211 | ASSERT(!mLocked); |
212 | |
213 | #if !defined(ANGLE_DISABLE_POOL_ALLOC) |
214 | // |
215 | // Just keep some interesting statistics. |
216 | // |
217 | ++mNumCalls; |
218 | mTotalBytes += numBytes; |
219 | |
220 | // If we are using guard blocks, all allocations are bracketed by |
221 | // them: [guardblock][allocation][guardblock]. numBytes is how |
222 | // much memory the caller asked for. allocationSize is the total |
223 | // size including guard blocks. In release build, |
224 | // kGuardBlockSize=0 and this all gets optimized away. |
225 | size_t allocationSize = Allocation::AllocationSize(numBytes) + mAlignment; |
226 | // Detect integer overflow. |
227 | if (allocationSize < numBytes) |
228 | return 0; |
229 | |
230 | // |
231 | // Do the allocation, most likely case first, for efficiency. |
232 | // This step could be moved to be inline sometime. |
233 | // |
234 | if (allocationSize <= mPageSize - mCurrentPageOffset) |
235 | { |
236 | // |
237 | // Safe to allocate from mCurrentPageOffset. |
238 | // |
239 | unsigned char *memory = reinterpret_cast<unsigned char *>(mInUseList) + mCurrentPageOffset; |
240 | mCurrentPageOffset += allocationSize; |
241 | mCurrentPageOffset = (mCurrentPageOffset + mAlignmentMask) & ~mAlignmentMask; |
242 | |
243 | return initializeAllocation(mInUseList, memory, numBytes); |
244 | } |
245 | |
246 | if (allocationSize > mPageSize - mHeaderSkip) |
247 | { |
248 | // |
249 | // Do a multi-page allocation. Don't mix these with the others. |
250 | // The OS is efficient in allocating and freeing multiple pages. |
251 | // |
252 | size_t numBytesToAlloc = allocationSize + mHeaderSkip; |
253 | // Detect integer overflow. |
254 | if (numBytesToAlloc < allocationSize) |
255 | return 0; |
256 | |
257 | Header *memory = reinterpret_cast<Header *>(::new char[numBytesToAlloc]); |
258 | if (memory == 0) |
259 | return 0; |
260 | |
261 | // Use placement-new to initialize header |
262 | new (memory) Header(mInUseList, (numBytesToAlloc + mPageSize - 1) / mPageSize); |
263 | mInUseList = memory; |
264 | |
265 | mCurrentPageOffset = mPageSize; // make next allocation come from a new page |
266 | |
267 | // No guard blocks for multi-page allocations (yet) |
268 | void *unalignedPtr = |
269 | reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(memory) + mHeaderSkip); |
270 | return std::align(mAlignment, numBytes, unalignedPtr, allocationSize); |
271 | } |
272 | unsigned char *newPageAddr = |
273 | static_cast<unsigned char *>(allocateNewPage(numBytes, allocationSize)); |
274 | return initializeAllocation(mInUseList, newPageAddr, numBytes); |
275 | #else // !defined(ANGLE_DISABLE_POOL_ALLOC) |
276 | void *alloc = malloc(numBytes + mAlignmentMask); |
277 | mStack.back().push_back(alloc); |
278 | |
279 | intptr_t intAlloc = reinterpret_cast<intptr_t>(alloc); |
280 | intAlloc = (intAlloc + mAlignmentMask) & ~mAlignmentMask; |
281 | return reinterpret_cast<void *>(intAlloc); |
282 | #endif |
283 | } |
284 | |
285 | #if !defined(ANGLE_DISABLE_POOL_ALLOC) |
286 | void *PoolAllocator::allocateNewPage(size_t numBytes, size_t allocationSize) |
287 | { |
288 | // |
289 | // Need a simple page to allocate from. |
290 | // |
291 | Header *memory; |
292 | if (mFreeList) |
293 | { |
294 | memory = mFreeList; |
295 | mFreeList = mFreeList->nextPage; |
296 | } |
297 | else |
298 | { |
299 | memory = reinterpret_cast<Header *>(::new char[mPageSize]); |
300 | if (memory == 0) |
301 | return 0; |
302 | } |
303 | // Use placement-new to initialize header |
304 | new (memory) Header(mInUseList, 1); |
305 | mInUseList = memory; |
306 | |
307 | unsigned char *ret = reinterpret_cast<unsigned char *>(mInUseList) + mHeaderSkip; |
308 | mCurrentPageOffset = (mHeaderSkip + allocationSize + mAlignmentMask) & ~mAlignmentMask; |
309 | return ret; |
310 | } |
311 | #endif |
312 | |
313 | void PoolAllocator::lock() |
314 | { |
315 | ASSERT(!mLocked); |
316 | mLocked = true; |
317 | } |
318 | |
319 | void PoolAllocator::unlock() |
320 | { |
321 | ASSERT(mLocked); |
322 | mLocked = false; |
323 | } |
324 | |
325 | // |
326 | // Check all allocations in a list for damage by calling check on each. |
327 | // |
328 | void Allocation::checkAllocList() const |
329 | { |
330 | for (const Allocation *alloc = this; alloc != 0; alloc = alloc->mPrevAlloc) |
331 | alloc->check(); |
332 | } |
333 | |
334 | } // namespace angle |
335 | |