blob: c0ae1ec547d3e7537c8757166971cf98972d59e7 [file] [log] [blame]
John Bauman66b8ab22014-05-06 15:57:45 -04001//
2// Copyright (c) 2002-2012 The ANGLE Project Authors. All rights reserved.
3// Use of this source code is governed by a BSD-style license that can be
4// found in the LICENSE file.
5//
6
7#include "compiler/PoolAlloc.h"
8
9#ifndef _MSC_VER
10#include <stdint.h>
11#endif
12#include <stdio.h>
13
14#include "compiler/InitializeGlobals.h"
15#include "compiler/osinclude.h"
16
17OS_TLSIndex PoolIndex = OS_INVALID_TLS_INDEX;
18
19void InitializeGlobalPools()
20{
21 TThreadGlobalPools* globalPools= static_cast<TThreadGlobalPools*>(OS_GetTLSValue(PoolIndex));
22 if (globalPools)
23 return;
24
25 TThreadGlobalPools* threadData = new TThreadGlobalPools();
26 threadData->globalPoolAllocator = 0;
27
28 OS_SetTLSValue(PoolIndex, threadData);
29}
30
31void FreeGlobalPools()
32{
33 // Release the allocated memory for this thread.
34 TThreadGlobalPools* globalPools= static_cast<TThreadGlobalPools*>(OS_GetTLSValue(PoolIndex));
35 if (!globalPools)
36 return;
37
38 delete globalPools;
39}
40
41bool InitializePoolIndex()
42{
43 // Allocate a TLS index.
44 if ((PoolIndex = OS_AllocTLSIndex()) == OS_INVALID_TLS_INDEX)
45 return false;
46
47 return true;
48}
49
50void FreePoolIndex()
51{
52 // Release the TLS index.
53 OS_FreeTLSIndex(PoolIndex);
54}
55
56TPoolAllocator& GetGlobalPoolAllocator()
57{
58 TThreadGlobalPools* threadData = static_cast<TThreadGlobalPools*>(OS_GetTLSValue(PoolIndex));
59
60 return *threadData->globalPoolAllocator;
61}
62
63void SetGlobalPoolAllocator(TPoolAllocator* poolAllocator)
64{
65 TThreadGlobalPools* threadData = static_cast<TThreadGlobalPools*>(OS_GetTLSValue(PoolIndex));
66
67 threadData->globalPoolAllocator = poolAllocator;
68}
69
70//
71// Implement the functionality of the TPoolAllocator class, which
72// is documented in PoolAlloc.h.
73//
74TPoolAllocator::TPoolAllocator(int growthIncrement, int allocationAlignment) :
75 pageSize(growthIncrement),
76 alignment(allocationAlignment),
77 freeList(0),
78 inUseList(0),
79 numCalls(0),
80 totalBytes(0)
81{
82 //
83 // Don't allow page sizes we know are smaller than all common
84 // OS page sizes.
85 //
86 if (pageSize < 4*1024)
87 pageSize = 4*1024;
88
89 //
90 // A large currentPageOffset indicates a new page needs to
91 // be obtained to allocate memory.
92 //
93 currentPageOffset = pageSize;
94
95 //
96 // Adjust alignment to be at least pointer aligned and
97 // power of 2.
98 //
99 size_t minAlign = sizeof(void*);
100 alignment &= ~(minAlign - 1);
101 if (alignment < minAlign)
102 alignment = minAlign;
103 size_t a = 1;
104 while (a < alignment)
105 a <<= 1;
106 alignment = a;
107 alignmentMask = a - 1;
108
109 //
110 // Align header skip
111 //
112 headerSkip = minAlign;
113 if (headerSkip < sizeof(tHeader)) {
114 headerSkip = (sizeof(tHeader) + alignmentMask) & ~alignmentMask;
115 }
116}
117
118TPoolAllocator::~TPoolAllocator()
119{
120 while (inUseList) {
121 tHeader* next = inUseList->nextPage;
122 inUseList->~tHeader();
123 delete [] reinterpret_cast<char*>(inUseList);
124 inUseList = next;
125 }
126
127 // We should not check the guard blocks
128 // here, because we did it already when the block was
129 // placed into the free list.
130 //
131 while (freeList) {
132 tHeader* next = freeList->nextPage;
133 delete [] reinterpret_cast<char*>(freeList);
134 freeList = next;
135 }
136}
137
138// Support MSVC++ 6.0
139const unsigned char TAllocation::guardBlockBeginVal = 0xfb;
140const unsigned char TAllocation::guardBlockEndVal = 0xfe;
141const unsigned char TAllocation::userDataFill = 0xcd;
142
143#ifdef GUARD_BLOCKS
144 const size_t TAllocation::guardBlockSize = 16;
145#else
146 const size_t TAllocation::guardBlockSize = 0;
147#endif
148
149//
150// Check a single guard block for damage
151//
152void TAllocation::checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const
153{
154#ifdef GUARD_BLOCKS
155 for (size_t x = 0; x < guardBlockSize; x++) {
156 if (blockMem[x] != val) {
157 char assertMsg[80];
158
159 // We don't print the assert message. It's here just to be helpful.
160 #if defined(_MSC_VER)
161 _snprintf(assertMsg, sizeof(assertMsg), "PoolAlloc: Damage %s %Iu byte allocation at 0x%p\n",
162 locText, size, data());
163 #else
164 snprintf(assertMsg, sizeof(assertMsg), "PoolAlloc: Damage %s %zu byte allocation at 0x%p\n",
165 locText, size, data());
166 #endif
167 assert(0 && "PoolAlloc: Damage in guard block");
168 }
169 }
170#endif
171}
172
173
174void TPoolAllocator::push()
175{
176 tAllocState state = { currentPageOffset, inUseList };
177
178 stack.push_back(state);
179
180 //
181 // Indicate there is no current page to allocate from.
182 //
183 currentPageOffset = pageSize;
184}
185
186//
187// Do a mass-deallocation of all the individual allocations
188// that have occurred since the last push(), or since the
189// last pop(), or since the object's creation.
190//
191// The deallocated pages are saved for future allocations.
192//
193void TPoolAllocator::pop()
194{
195 if (stack.size() < 1)
196 return;
197
198 tHeader* page = stack.back().page;
199 currentPageOffset = stack.back().offset;
200
201 while (inUseList != page) {
202 // invoke destructor to free allocation list
203 inUseList->~tHeader();
204
205 tHeader* nextInUse = inUseList->nextPage;
206 if (inUseList->pageCount > 1)
207 delete [] reinterpret_cast<char*>(inUseList);
208 else {
209 inUseList->nextPage = freeList;
210 freeList = inUseList;
211 }
212 inUseList = nextInUse;
213 }
214
215 stack.pop_back();
216}
217
218//
219// Do a mass-deallocation of all the individual allocations
220// that have occurred.
221//
222void TPoolAllocator::popAll()
223{
224 while (stack.size() > 0)
225 pop();
226}
227
228void* TPoolAllocator::allocate(size_t numBytes)
229{
230 // If we are using guard blocks, all allocations are bracketed by
231 // them: [guardblock][allocation][guardblock]. numBytes is how
232 // much memory the caller asked for. allocationSize is the total
233 // size including guard blocks. In release build,
234 // guardBlockSize=0 and this all gets optimized away.
235 size_t allocationSize = TAllocation::allocationSize(numBytes);
236
237 //
238 // Just keep some interesting statistics.
239 //
240 ++numCalls;
241 totalBytes += numBytes;
242
243 //
244 // Do the allocation, most likely case first, for efficiency.
245 // This step could be moved to be inline sometime.
246 //
247 if (currentPageOffset + allocationSize <= pageSize) {
248 //
249 // Safe to allocate from currentPageOffset.
250 //
251 unsigned char* memory = reinterpret_cast<unsigned char *>(inUseList) + currentPageOffset;
252 currentPageOffset += allocationSize;
253 currentPageOffset = (currentPageOffset + alignmentMask) & ~alignmentMask;
254
255 return initializeAllocation(inUseList, memory, numBytes);
256 }
257
258 if (allocationSize + headerSkip > pageSize) {
259 //
260 // Do a multi-page allocation. Don't mix these with the others.
261 // The OS is efficient and allocating and free-ing multiple pages.
262 //
263 size_t numBytesToAlloc = allocationSize + headerSkip;
264 tHeader* memory = reinterpret_cast<tHeader*>(::new char[numBytesToAlloc]);
265 if (memory == 0)
266 return 0;
267
268 // Use placement-new to initialize header
269 new(memory) tHeader(inUseList, (numBytesToAlloc + pageSize - 1) / pageSize);
270 inUseList = memory;
271
272 currentPageOffset = pageSize; // make next allocation come from a new page
273
274 // No guard blocks for multi-page allocations (yet)
275 return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(memory) + headerSkip);
276 }
277
278 //
279 // Need a simple page to allocate from.
280 //
281 tHeader* memory;
282 if (freeList) {
283 memory = freeList;
284 freeList = freeList->nextPage;
285 } else {
286 memory = reinterpret_cast<tHeader*>(::new char[pageSize]);
287 if (memory == 0)
288 return 0;
289 }
290
291 // Use placement-new to initialize header
292 new(memory) tHeader(inUseList, 1);
293 inUseList = memory;
294
295 unsigned char* ret = reinterpret_cast<unsigned char *>(inUseList) + headerSkip;
296 currentPageOffset = (headerSkip + allocationSize + alignmentMask) & ~alignmentMask;
297
298 return initializeAllocation(inUseList, ret, numBytes);
299}
300
301
302//
303// Check all allocations in a list for damage by calling check on each.
304//
305void TAllocation::checkAllocList() const
306{
307 for (const TAllocation* alloc = this; alloc != 0; alloc = alloc->prevAlloc)
308 alloc->check();
309}