blob: 6ec3c40b522bd57270e29dca9055ba59d0f8ef75 [file] [log] [blame]
John Bauman66b8ab22014-05-06 15:57:45 -04001//
2// Copyright (c) 2002-2012 The ANGLE Project Authors. All rights reserved.
3// Use of this source code is governed by a BSD-style license that can be
4// found in the LICENSE file.
5//
6
7#include "compiler/PoolAlloc.h"
8
9#ifndef _MSC_VER
10#include <stdint.h>
11#endif
12#include <stdio.h>
13
14#include "compiler/InitializeGlobals.h"
15#include "compiler/osinclude.h"
16
17OS_TLSIndex PoolIndex = OS_INVALID_TLS_INDEX;
18
John Bauman66b8ab22014-05-06 15:57:45 -040019bool InitializePoolIndex()
20{
Nicolas Capens978ddc52014-11-11 12:42:08 -050021 assert(PoolIndex == OS_INVALID_TLS_INDEX);
John Bauman66b8ab22014-05-06 15:57:45 -040022
Nicolas Capens978ddc52014-11-11 12:42:08 -050023 PoolIndex = OS_AllocTLSIndex();
24 return PoolIndex != OS_INVALID_TLS_INDEX;
John Bauman66b8ab22014-05-06 15:57:45 -040025}
26
27void FreePoolIndex()
28{
Nicolas Capens978ddc52014-11-11 12:42:08 -050029 assert(PoolIndex != OS_INVALID_TLS_INDEX);
30
John Bauman66b8ab22014-05-06 15:57:45 -040031 OS_FreeTLSIndex(PoolIndex);
Nicolas Capens978ddc52014-11-11 12:42:08 -050032 PoolIndex = OS_INVALID_TLS_INDEX;
John Bauman66b8ab22014-05-06 15:57:45 -040033}
34
Nicolas Capens978ddc52014-11-11 12:42:08 -050035TPoolAllocator* GetGlobalPoolAllocator()
John Bauman66b8ab22014-05-06 15:57:45 -040036{
Nicolas Capens978ddc52014-11-11 12:42:08 -050037 assert(PoolIndex != OS_INVALID_TLS_INDEX);
38 return static_cast<TPoolAllocator*>(OS_GetTLSValue(PoolIndex));
John Bauman66b8ab22014-05-06 15:57:45 -040039}
40
41void SetGlobalPoolAllocator(TPoolAllocator* poolAllocator)
42{
Nicolas Capens978ddc52014-11-11 12:42:08 -050043 assert(PoolIndex != OS_INVALID_TLS_INDEX);
44 OS_SetTLSValue(PoolIndex, poolAllocator);
John Bauman66b8ab22014-05-06 15:57:45 -040045}
46
47//
48// Implement the functionality of the TPoolAllocator class, which
49// is documented in PoolAlloc.h.
50//
51TPoolAllocator::TPoolAllocator(int growthIncrement, int allocationAlignment) :
52 pageSize(growthIncrement),
53 alignment(allocationAlignment),
54 freeList(0),
55 inUseList(0),
56 numCalls(0),
57 totalBytes(0)
58{
59 //
60 // Don't allow page sizes we know are smaller than all common
61 // OS page sizes.
62 //
63 if (pageSize < 4*1024)
64 pageSize = 4*1024;
65
66 //
67 // A large currentPageOffset indicates a new page needs to
68 // be obtained to allocate memory.
69 //
70 currentPageOffset = pageSize;
71
72 //
73 // Adjust alignment to be at least pointer aligned and
74 // power of 2.
75 //
76 size_t minAlign = sizeof(void*);
77 alignment &= ~(minAlign - 1);
78 if (alignment < minAlign)
79 alignment = minAlign;
80 size_t a = 1;
81 while (a < alignment)
82 a <<= 1;
83 alignment = a;
84 alignmentMask = a - 1;
85
86 //
87 // Align header skip
88 //
89 headerSkip = minAlign;
90 if (headerSkip < sizeof(tHeader)) {
91 headerSkip = (sizeof(tHeader) + alignmentMask) & ~alignmentMask;
92 }
93}
94
95TPoolAllocator::~TPoolAllocator()
96{
97 while (inUseList) {
98 tHeader* next = inUseList->nextPage;
99 inUseList->~tHeader();
100 delete [] reinterpret_cast<char*>(inUseList);
101 inUseList = next;
102 }
103
104 // We should not check the guard blocks
105 // here, because we did it already when the block was
106 // placed into the free list.
107 //
108 while (freeList) {
109 tHeader* next = freeList->nextPage;
110 delete [] reinterpret_cast<char*>(freeList);
111 freeList = next;
112 }
113}
114
115// Support MSVC++ 6.0
116const unsigned char TAllocation::guardBlockBeginVal = 0xfb;
117const unsigned char TAllocation::guardBlockEndVal = 0xfe;
118const unsigned char TAllocation::userDataFill = 0xcd;
119
120#ifdef GUARD_BLOCKS
121 const size_t TAllocation::guardBlockSize = 16;
122#else
123 const size_t TAllocation::guardBlockSize = 0;
124#endif
125
126//
127// Check a single guard block for damage
128//
129void TAllocation::checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const
130{
131#ifdef GUARD_BLOCKS
132 for (size_t x = 0; x < guardBlockSize; x++) {
133 if (blockMem[x] != val) {
134 char assertMsg[80];
135
136 // We don't print the assert message. It's here just to be helpful.
137 #if defined(_MSC_VER)
138 _snprintf(assertMsg, sizeof(assertMsg), "PoolAlloc: Damage %s %Iu byte allocation at 0x%p\n",
139 locText, size, data());
140 #else
141 snprintf(assertMsg, sizeof(assertMsg), "PoolAlloc: Damage %s %zu byte allocation at 0x%p\n",
142 locText, size, data());
143 #endif
144 assert(0 && "PoolAlloc: Damage in guard block");
145 }
146 }
147#endif
148}
149
150
151void TPoolAllocator::push()
152{
153 tAllocState state = { currentPageOffset, inUseList };
154
155 stack.push_back(state);
156
157 //
158 // Indicate there is no current page to allocate from.
159 //
160 currentPageOffset = pageSize;
161}
162
163//
164// Do a mass-deallocation of all the individual allocations
165// that have occurred since the last push(), or since the
166// last pop(), or since the object's creation.
167//
168// The deallocated pages are saved for future allocations.
169//
170void TPoolAllocator::pop()
171{
172 if (stack.size() < 1)
173 return;
174
175 tHeader* page = stack.back().page;
176 currentPageOffset = stack.back().offset;
177
178 while (inUseList != page) {
179 // invoke destructor to free allocation list
180 inUseList->~tHeader();
181
182 tHeader* nextInUse = inUseList->nextPage;
183 if (inUseList->pageCount > 1)
184 delete [] reinterpret_cast<char*>(inUseList);
185 else {
186 inUseList->nextPage = freeList;
187 freeList = inUseList;
188 }
189 inUseList = nextInUse;
190 }
191
192 stack.pop_back();
193}
194
195//
196// Do a mass-deallocation of all the individual allocations
197// that have occurred.
198//
199void TPoolAllocator::popAll()
200{
201 while (stack.size() > 0)
202 pop();
203}
204
205void* TPoolAllocator::allocate(size_t numBytes)
206{
John Bauman66b8ab22014-05-06 15:57:45 -0400207 //
208 // Just keep some interesting statistics.
209 //
210 ++numCalls;
211 totalBytes += numBytes;
212
Nicolas Capensff7f1002014-11-11 11:31:47 -0500213 // If we are using guard blocks, all allocations are bracketed by
214 // them: [guardblock][allocation][guardblock]. numBytes is how
215 // much memory the caller asked for. allocationSize is the total
216 // size including guard blocks. In release build,
217 // guardBlockSize=0 and this all gets optimized away.
218 size_t allocationSize = TAllocation::allocationSize(numBytes);
219 // Detect integer overflow.
220 if (allocationSize < numBytes)
221 return 0;
222
John Bauman66b8ab22014-05-06 15:57:45 -0400223 //
224 // Do the allocation, most likely case first, for efficiency.
225 // This step could be moved to be inline sometime.
226 //
Nicolas Capensff7f1002014-11-11 11:31:47 -0500227 if (allocationSize <= pageSize - currentPageOffset) {
John Bauman66b8ab22014-05-06 15:57:45 -0400228 //
229 // Safe to allocate from currentPageOffset.
230 //
231 unsigned char* memory = reinterpret_cast<unsigned char *>(inUseList) + currentPageOffset;
232 currentPageOffset += allocationSize;
233 currentPageOffset = (currentPageOffset + alignmentMask) & ~alignmentMask;
234
235 return initializeAllocation(inUseList, memory, numBytes);
236 }
237
Nicolas Capensff7f1002014-11-11 11:31:47 -0500238 if (allocationSize > pageSize - headerSkip) {
John Bauman66b8ab22014-05-06 15:57:45 -0400239 //
240 // Do a multi-page allocation. Don't mix these with the others.
241 // The OS is efficient and allocating and free-ing multiple pages.
242 //
243 size_t numBytesToAlloc = allocationSize + headerSkip;
Nicolas Capensff7f1002014-11-11 11:31:47 -0500244 // Detect integer overflow.
245 if (numBytesToAlloc < allocationSize)
246 return 0;
247
John Bauman66b8ab22014-05-06 15:57:45 -0400248 tHeader* memory = reinterpret_cast<tHeader*>(::new char[numBytesToAlloc]);
249 if (memory == 0)
250 return 0;
251
252 // Use placement-new to initialize header
253 new(memory) tHeader(inUseList, (numBytesToAlloc + pageSize - 1) / pageSize);
254 inUseList = memory;
255
256 currentPageOffset = pageSize; // make next allocation come from a new page
257
258 // No guard blocks for multi-page allocations (yet)
259 return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(memory) + headerSkip);
260 }
261
262 //
263 // Need a simple page to allocate from.
264 //
265 tHeader* memory;
266 if (freeList) {
267 memory = freeList;
268 freeList = freeList->nextPage;
269 } else {
270 memory = reinterpret_cast<tHeader*>(::new char[pageSize]);
271 if (memory == 0)
272 return 0;
273 }
274
275 // Use placement-new to initialize header
276 new(memory) tHeader(inUseList, 1);
277 inUseList = memory;
278
279 unsigned char* ret = reinterpret_cast<unsigned char *>(inUseList) + headerSkip;
280 currentPageOffset = (headerSkip + allocationSize + alignmentMask) & ~alignmentMask;
281
282 return initializeAllocation(inUseList, ret, numBytes);
283}
284
285
286//
287// Check all allocations in a list for damage by calling check on each.
288//
289void TAllocation::checkAllocList() const
290{
291 for (const TAllocation* alloc = this; alloc != 0; alloc = alloc->prevAlloc)
292 alloc->check();
293}