1 | // |
2 | // Copyright 2019 The ANGLE Project Authors. All rights reserved. |
3 | // Use of this source code is governed by a BSD-style license that can be |
4 | // found in the LICENSE file. |
5 | // |
6 | // PoolAlloc.h: |
7 | // Defines the class interface for PoolAllocator and the Allocation |
8 | // class that it uses internally. |
9 | // |
10 | |
11 | #ifndef COMMON_POOLALLOC_H_ |
12 | #define COMMON_POOLALLOC_H_ |
13 | |
14 | #if !defined(NDEBUG) |
15 | # define ANGLE_POOL_ALLOC_GUARD_BLOCKS // define to enable guard block sanity checking |
16 | #endif |
17 | |
18 | // |
19 | // This header defines an allocator that can be used to efficiently |
20 | // allocate a large number of small requests for heap memory, with the |
21 | // intention that they are not individually deallocated, but rather |
22 | // collectively deallocated at one time. |
23 | // |
24 | // This simultaneously |
25 | // |
26 | // * Makes each individual allocation much more efficient; the |
27 | // typical allocation is trivial. |
28 | // * Completely avoids the cost of doing individual deallocation. |
29 | // * Saves the trouble of tracking down and plugging a large class of leaks. |
30 | // |
31 | // Individual classes can use this allocator by supplying their own |
32 | // new and delete methods. |
33 | // |
34 | |
35 | #include <stddef.h> |
36 | #include <string.h> |
37 | #include <memory> |
38 | #include <vector> |
39 | |
40 | #include "angleutils.h" |
41 | #include "common/debug.h" |
42 | |
43 | namespace angle |
44 | { |
45 | // If we are using guard blocks, we must track each individual |
46 | // allocation. If we aren't using guard blocks, these |
47 | // never get instantiated, so won't have any impact. |
48 | // |
49 | |
50 | class Allocation |
51 | { |
52 | public: |
53 | Allocation(size_t size, unsigned char *mem, Allocation *prev = 0) |
54 | : mSize(size), mMem(mem), mPrevAlloc(prev) |
55 | { |
56 | // Allocations are bracketed: |
57 | // [allocationHeader][initialGuardBlock][userData][finalGuardBlock] |
58 | // This would be cleaner with if (kGuardBlockSize)..., but that |
59 | // makes the compiler print warnings about 0 length memsets, |
60 | // even with the if() protecting them. |
61 | #if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS) |
62 | memset(preGuard(), kGuardBlockBeginVal, kGuardBlockSize); |
63 | memset(data(), kUserDataFill, mSize); |
64 | memset(postGuard(), kGuardBlockEndVal, kGuardBlockSize); |
65 | #endif |
66 | } |
67 | |
68 | void check() const |
69 | { |
70 | checkGuardBlock(preGuard(), kGuardBlockBeginVal, "before" ); |
71 | checkGuardBlock(postGuard(), kGuardBlockEndVal, "after" ); |
72 | } |
73 | |
74 | void checkAllocList() const; |
75 | |
76 | // Return total size needed to accommodate user buffer of 'size', |
77 | // plus our tracking data. |
78 | static size_t AllocationSize(size_t size) { return size + 2 * kGuardBlockSize + HeaderSize(); } |
79 | |
80 | // Offset from surrounding buffer to get to user data buffer. |
81 | static unsigned char *OffsetAllocation(unsigned char *m) |
82 | { |
83 | return m + kGuardBlockSize + HeaderSize(); |
84 | } |
85 | |
86 | private: |
87 | void checkGuardBlock(unsigned char *blockMem, unsigned char val, const char *locText) const; |
88 | |
89 | // Find offsets to pre and post guard blocks, and user data buffer |
90 | unsigned char *preGuard() const { return mMem + HeaderSize(); } |
91 | unsigned char *data() const { return preGuard() + kGuardBlockSize; } |
92 | unsigned char *postGuard() const { return data() + mSize; } |
93 | size_t mSize; // size of the user data area |
94 | unsigned char *mMem; // beginning of our allocation (pts to header) |
95 | Allocation *mPrevAlloc; // prior allocation in the chain |
96 | |
97 | static constexpr unsigned char kGuardBlockBeginVal = 0xfb; |
98 | static constexpr unsigned char kGuardBlockEndVal = 0xfe; |
99 | static constexpr unsigned char kUserDataFill = 0xcd; |
100 | #if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS) |
101 | static constexpr size_t kGuardBlockSize = 16; |
102 | static constexpr size_t HeaderSize() { return sizeof(Allocation); } |
103 | #else |
104 | static constexpr size_t kGuardBlockSize = 0; |
105 | static constexpr size_t () { return 0; } |
106 | #endif |
107 | }; |
108 | |
109 | // |
110 | // There are several stacks. One is to track the pushing and popping |
111 | // of the user, and not yet implemented. The others are simply a |
112 | // repositories of free pages or used pages. |
113 | // |
114 | // Page stacks are linked together with a simple header at the beginning |
115 | // of each allocation obtained from the underlying OS. Multi-page allocations |
116 | // are returned to the OS. Individual page allocations are kept for future |
117 | // re-use. |
118 | // |
119 | // The "page size" used is not, nor must it match, the underlying OS |
120 | // page size. But, having it be about that size or equal to a set of |
121 | // pages is likely most optimal. |
122 | // |
123 | class PoolAllocator : angle::NonCopyable |
124 | { |
125 | public: |
126 | static const int kDefaultAlignment = 16; |
127 | // |
128 | // Create PoolAllocator. If alignment is be set to 1 byte then fastAllocate() |
129 | // function can be used to make allocations with less overhead. |
130 | // |
131 | PoolAllocator(int growthIncrement = 8 * 1024, int allocationAlignment = kDefaultAlignment); |
132 | |
133 | // |
134 | // Don't call the destructor just to free up the memory, call pop() |
135 | // |
136 | ~PoolAllocator(); |
137 | |
138 | // |
139 | // Call push() to establish a new place to pop memory to. Does not |
140 | // have to be called to get things started. |
141 | // |
142 | void push(); |
143 | |
144 | // |
145 | // Call pop() to free all memory allocated since the last call to push(), |
146 | // or if no last call to push, frees all memory since first allocation. |
147 | // |
148 | void pop(); |
149 | |
150 | // |
151 | // Call popAll() to free all memory allocated. |
152 | // |
153 | void popAll(); |
154 | |
155 | // |
156 | // Call allocate() to actually acquire memory. Returns 0 if no memory |
157 | // available, otherwise a properly aligned pointer to 'numBytes' of memory. |
158 | // |
159 | void *allocate(size_t numBytes); |
160 | |
161 | // |
162 | // Call fastAllocate() for a faster allocate function that does minimal bookkeeping |
163 | // preCondition: Allocator must have been created w/ alignment of 1 |
164 | ANGLE_INLINE uint8_t *fastAllocate(size_t numBytes) |
165 | { |
166 | #if defined(ANGLE_DISABLE_POOL_ALLOC) |
167 | return reinterpret_cast<uint8_t *>(allocate(numBytes)); |
168 | #else |
169 | ASSERT(mAlignment == 1); |
170 | // No multi-page allocations |
171 | ASSERT(numBytes <= (mPageSize - mHeaderSkip)); |
172 | // |
173 | // Do the allocation, most likely case inline first, for efficiency. |
174 | // |
175 | if (numBytes <= mPageSize - mCurrentPageOffset) |
176 | { |
177 | // |
178 | // Safe to allocate from mCurrentPageOffset. |
179 | // |
180 | uint8_t *memory = reinterpret_cast<uint8_t *>(mInUseList) + mCurrentPageOffset; |
181 | mCurrentPageOffset += numBytes; |
182 | return memory; |
183 | } |
184 | return reinterpret_cast<uint8_t *>(allocateNewPage(numBytes, numBytes)); |
185 | #endif |
186 | } |
187 | |
188 | // |
189 | // There is no deallocate. The point of this class is that |
190 | // deallocation can be skipped by the user of it, as the model |
191 | // of use is to simultaneously deallocate everything at once |
192 | // by calling pop(), and to not have to solve memory leak problems. |
193 | // |
194 | |
195 | // Catch unwanted allocations. |
196 | // TODO(jmadill): Remove this when we remove the global allocator. |
197 | void lock(); |
198 | void unlock(); |
199 | |
200 | private: |
201 | size_t mAlignment; // all returned allocations will be aligned at |
202 | // this granularity, which will be a power of 2 |
203 | size_t mAlignmentMask; |
204 | #if !defined(ANGLE_DISABLE_POOL_ALLOC) |
205 | friend struct Header; |
206 | |
207 | struct |
208 | { |
209 | (Header *nextPage, size_t pageCount) |
210 | : nextPage(nextPage), |
211 | pageCount(pageCount) |
212 | # if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS) |
213 | , |
214 | lastAllocation(0) |
215 | # endif |
216 | {} |
217 | |
218 | () |
219 | { |
220 | # if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS) |
221 | if (lastAllocation) |
222 | lastAllocation->checkAllocList(); |
223 | # endif |
224 | } |
225 | |
226 | Header *; |
227 | size_t ; |
228 | # if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS) |
229 | Allocation *lastAllocation; |
230 | # endif |
231 | }; |
232 | |
233 | struct AllocState |
234 | { |
235 | size_t offset; |
236 | Header *page; |
237 | }; |
238 | using AllocStack = std::vector<AllocState>; |
239 | |
240 | // Slow path of allocation when we have to get a new page. |
241 | void *allocateNewPage(size_t numBytes, size_t allocationSize); |
242 | // Track allocations if and only if we're using guard blocks |
243 | void *(Header *block, unsigned char *memory, size_t numBytes) |
244 | { |
245 | # if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS) |
246 | new (memory) Allocation(numBytes + mAlignment, memory, block->lastAllocation); |
247 | block->lastAllocation = reinterpret_cast<Allocation *>(memory); |
248 | # endif |
249 | // The OffsetAllocation() call is optimized away if !defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS) |
250 | void *unalignedPtr = Allocation::OffsetAllocation(memory); |
251 | size_t alignedBytes = numBytes + mAlignment; |
252 | return std::align(mAlignment, numBytes, unalignedPtr, alignedBytes); |
253 | } |
254 | |
255 | size_t mPageSize; // granularity of allocation from the OS |
256 | size_t ; // amount of memory to skip to make room for the |
257 | // header (basically, size of header, rounded |
258 | // up to make it aligned |
259 | size_t mCurrentPageOffset; // next offset in top of inUseList to allocate from |
260 | Header *mFreeList; // list of popped memory |
261 | Header *mInUseList; // list of all memory currently being used |
262 | AllocStack mStack; // stack of where to allocate from, to partition pool |
263 | |
264 | int mNumCalls; // just an interesting statistic |
265 | size_t mTotalBytes; // just an interesting statistic |
266 | |
267 | #else // !defined(ANGLE_DISABLE_POOL_ALLOC) |
268 | std::vector<std::vector<void *>> mStack; |
269 | #endif |
270 | |
271 | bool mLocked; |
272 | }; |
273 | |
274 | } // namespace angle |
275 | |
276 | #endif // COMMON_POOLALLOC_H_ |
277 | |