John Bauman | 66b8ab2 | 2014-05-06 15:57:45 -0400 | [diff] [blame] | 1 | // |
| 2 | // Copyright (c) 2002-2012 The ANGLE Project Authors. All rights reserved. |
| 3 | // Use of this source code is governed by a BSD-style license that can be |
| 4 | // found in the LICENSE file. |
| 5 | // |
| 6 | |
| 7 | #include "compiler/PoolAlloc.h" |
| 8 | |
| 9 | #ifndef _MSC_VER |
| 10 | #include <stdint.h> |
| 11 | #endif |
| 12 | #include <stdio.h> |
| 13 | |
| 14 | #include "compiler/InitializeGlobals.h" |
| 15 | #include "compiler/osinclude.h" |
| 16 | |
| 17 | OS_TLSIndex PoolIndex = OS_INVALID_TLS_INDEX; |
| 18 | |
| 19 | void InitializeGlobalPools() |
| 20 | { |
| 21 | TThreadGlobalPools* globalPools= static_cast<TThreadGlobalPools*>(OS_GetTLSValue(PoolIndex)); |
| 22 | if (globalPools) |
| 23 | return; |
| 24 | |
| 25 | TThreadGlobalPools* threadData = new TThreadGlobalPools(); |
| 26 | threadData->globalPoolAllocator = 0; |
| 27 | |
| 28 | OS_SetTLSValue(PoolIndex, threadData); |
| 29 | } |
| 30 | |
| 31 | void FreeGlobalPools() |
| 32 | { |
| 33 | // Release the allocated memory for this thread. |
| 34 | TThreadGlobalPools* globalPools= static_cast<TThreadGlobalPools*>(OS_GetTLSValue(PoolIndex)); |
| 35 | if (!globalPools) |
| 36 | return; |
| 37 | |
| 38 | delete globalPools; |
| 39 | } |
| 40 | |
| 41 | bool InitializePoolIndex() |
| 42 | { |
| 43 | // Allocate a TLS index. |
| 44 | if ((PoolIndex = OS_AllocTLSIndex()) == OS_INVALID_TLS_INDEX) |
| 45 | return false; |
| 46 | |
| 47 | return true; |
| 48 | } |
| 49 | |
| 50 | void FreePoolIndex() |
| 51 | { |
| 52 | // Release the TLS index. |
| 53 | OS_FreeTLSIndex(PoolIndex); |
| 54 | } |
| 55 | |
| 56 | TPoolAllocator& GetGlobalPoolAllocator() |
| 57 | { |
| 58 | TThreadGlobalPools* threadData = static_cast<TThreadGlobalPools*>(OS_GetTLSValue(PoolIndex)); |
| 59 | |
| 60 | return *threadData->globalPoolAllocator; |
| 61 | } |
| 62 | |
| 63 | void SetGlobalPoolAllocator(TPoolAllocator* poolAllocator) |
| 64 | { |
| 65 | TThreadGlobalPools* threadData = static_cast<TThreadGlobalPools*>(OS_GetTLSValue(PoolIndex)); |
| 66 | |
| 67 | threadData->globalPoolAllocator = poolAllocator; |
| 68 | } |
| 69 | |
| 70 | // |
| 71 | // Implement the functionality of the TPoolAllocator class, which |
| 72 | // is documented in PoolAlloc.h. |
| 73 | // |
| 74 | TPoolAllocator::TPoolAllocator(int growthIncrement, int allocationAlignment) : |
| 75 | pageSize(growthIncrement), |
| 76 | alignment(allocationAlignment), |
| 77 | freeList(0), |
| 78 | inUseList(0), |
| 79 | numCalls(0), |
| 80 | totalBytes(0) |
| 81 | { |
| 82 | // |
| 83 | // Don't allow page sizes we know are smaller than all common |
| 84 | // OS page sizes. |
| 85 | // |
| 86 | if (pageSize < 4*1024) |
| 87 | pageSize = 4*1024; |
| 88 | |
| 89 | // |
| 90 | // A large currentPageOffset indicates a new page needs to |
| 91 | // be obtained to allocate memory. |
| 92 | // |
| 93 | currentPageOffset = pageSize; |
| 94 | |
| 95 | // |
| 96 | // Adjust alignment to be at least pointer aligned and |
| 97 | // power of 2. |
| 98 | // |
| 99 | size_t minAlign = sizeof(void*); |
| 100 | alignment &= ~(minAlign - 1); |
| 101 | if (alignment < minAlign) |
| 102 | alignment = minAlign; |
| 103 | size_t a = 1; |
| 104 | while (a < alignment) |
| 105 | a <<= 1; |
| 106 | alignment = a; |
| 107 | alignmentMask = a - 1; |
| 108 | |
| 109 | // |
| 110 | // Align header skip |
| 111 | // |
| 112 | headerSkip = minAlign; |
| 113 | if (headerSkip < sizeof(tHeader)) { |
| 114 | headerSkip = (sizeof(tHeader) + alignmentMask) & ~alignmentMask; |
| 115 | } |
| 116 | } |
| 117 | |
| 118 | TPoolAllocator::~TPoolAllocator() |
| 119 | { |
| 120 | while (inUseList) { |
| 121 | tHeader* next = inUseList->nextPage; |
| 122 | inUseList->~tHeader(); |
| 123 | delete [] reinterpret_cast<char*>(inUseList); |
| 124 | inUseList = next; |
| 125 | } |
| 126 | |
| 127 | // We should not check the guard blocks |
| 128 | // here, because we did it already when the block was |
| 129 | // placed into the free list. |
| 130 | // |
| 131 | while (freeList) { |
| 132 | tHeader* next = freeList->nextPage; |
| 133 | delete [] reinterpret_cast<char*>(freeList); |
| 134 | freeList = next; |
| 135 | } |
| 136 | } |
| 137 | |
| 138 | // Support MSVC++ 6.0 |
| 139 | const unsigned char TAllocation::guardBlockBeginVal = 0xfb; |
| 140 | const unsigned char TAllocation::guardBlockEndVal = 0xfe; |
| 141 | const unsigned char TAllocation::userDataFill = 0xcd; |
| 142 | |
| 143 | #ifdef GUARD_BLOCKS |
| 144 | const size_t TAllocation::guardBlockSize = 16; |
| 145 | #else |
| 146 | const size_t TAllocation::guardBlockSize = 0; |
| 147 | #endif |
| 148 | |
| 149 | // |
| 150 | // Check a single guard block for damage |
| 151 | // |
| 152 | void TAllocation::checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const |
| 153 | { |
| 154 | #ifdef GUARD_BLOCKS |
| 155 | for (size_t x = 0; x < guardBlockSize; x++) { |
| 156 | if (blockMem[x] != val) { |
| 157 | char assertMsg[80]; |
| 158 | |
| 159 | // We don't print the assert message. It's here just to be helpful. |
| 160 | #if defined(_MSC_VER) |
| 161 | _snprintf(assertMsg, sizeof(assertMsg), "PoolAlloc: Damage %s %Iu byte allocation at 0x%p\n", |
| 162 | locText, size, data()); |
| 163 | #else |
| 164 | snprintf(assertMsg, sizeof(assertMsg), "PoolAlloc: Damage %s %zu byte allocation at 0x%p\n", |
| 165 | locText, size, data()); |
| 166 | #endif |
| 167 | assert(0 && "PoolAlloc: Damage in guard block"); |
| 168 | } |
| 169 | } |
| 170 | #endif |
| 171 | } |
| 172 | |
| 173 | |
| 174 | void TPoolAllocator::push() |
| 175 | { |
| 176 | tAllocState state = { currentPageOffset, inUseList }; |
| 177 | |
| 178 | stack.push_back(state); |
| 179 | |
| 180 | // |
| 181 | // Indicate there is no current page to allocate from. |
| 182 | // |
| 183 | currentPageOffset = pageSize; |
| 184 | } |
| 185 | |
| 186 | // |
| 187 | // Do a mass-deallocation of all the individual allocations |
| 188 | // that have occurred since the last push(), or since the |
| 189 | // last pop(), or since the object's creation. |
| 190 | // |
| 191 | // The deallocated pages are saved for future allocations. |
| 192 | // |
| 193 | void TPoolAllocator::pop() |
| 194 | { |
| 195 | if (stack.size() < 1) |
| 196 | return; |
| 197 | |
| 198 | tHeader* page = stack.back().page; |
| 199 | currentPageOffset = stack.back().offset; |
| 200 | |
| 201 | while (inUseList != page) { |
| 202 | // invoke destructor to free allocation list |
| 203 | inUseList->~tHeader(); |
| 204 | |
| 205 | tHeader* nextInUse = inUseList->nextPage; |
| 206 | if (inUseList->pageCount > 1) |
| 207 | delete [] reinterpret_cast<char*>(inUseList); |
| 208 | else { |
| 209 | inUseList->nextPage = freeList; |
| 210 | freeList = inUseList; |
| 211 | } |
| 212 | inUseList = nextInUse; |
| 213 | } |
| 214 | |
| 215 | stack.pop_back(); |
| 216 | } |
| 217 | |
| 218 | // |
| 219 | // Do a mass-deallocation of all the individual allocations |
| 220 | // that have occurred. |
| 221 | // |
| 222 | void TPoolAllocator::popAll() |
| 223 | { |
| 224 | while (stack.size() > 0) |
| 225 | pop(); |
| 226 | } |
| 227 | |
| 228 | void* TPoolAllocator::allocate(size_t numBytes) |
| 229 | { |
John Bauman | 66b8ab2 | 2014-05-06 15:57:45 -0400 | [diff] [blame] | 230 | // |
| 231 | // Just keep some interesting statistics. |
| 232 | // |
| 233 | ++numCalls; |
| 234 | totalBytes += numBytes; |
| 235 | |
Nicolas Capens | ff7f100 | 2014-11-11 11:31:47 -0500 | [diff] [blame^] | 236 | // If we are using guard blocks, all allocations are bracketed by |
| 237 | // them: [guardblock][allocation][guardblock]. numBytes is how |
| 238 | // much memory the caller asked for. allocationSize is the total |
| 239 | // size including guard blocks. In release build, |
| 240 | // guardBlockSize=0 and this all gets optimized away. |
| 241 | size_t allocationSize = TAllocation::allocationSize(numBytes); |
| 242 | // Detect integer overflow. |
| 243 | if (allocationSize < numBytes) |
| 244 | return 0; |
| 245 | |
John Bauman | 66b8ab2 | 2014-05-06 15:57:45 -0400 | [diff] [blame] | 246 | // |
| 247 | // Do the allocation, most likely case first, for efficiency. |
| 248 | // This step could be moved to be inline sometime. |
| 249 | // |
Nicolas Capens | ff7f100 | 2014-11-11 11:31:47 -0500 | [diff] [blame^] | 250 | if (allocationSize <= pageSize - currentPageOffset) { |
John Bauman | 66b8ab2 | 2014-05-06 15:57:45 -0400 | [diff] [blame] | 251 | // |
| 252 | // Safe to allocate from currentPageOffset. |
| 253 | // |
| 254 | unsigned char* memory = reinterpret_cast<unsigned char *>(inUseList) + currentPageOffset; |
| 255 | currentPageOffset += allocationSize; |
| 256 | currentPageOffset = (currentPageOffset + alignmentMask) & ~alignmentMask; |
| 257 | |
| 258 | return initializeAllocation(inUseList, memory, numBytes); |
| 259 | } |
| 260 | |
Nicolas Capens | ff7f100 | 2014-11-11 11:31:47 -0500 | [diff] [blame^] | 261 | if (allocationSize > pageSize - headerSkip) { |
John Bauman | 66b8ab2 | 2014-05-06 15:57:45 -0400 | [diff] [blame] | 262 | // |
| 263 | // Do a multi-page allocation. Don't mix these with the others. |
| 264 | // The OS is efficient and allocating and free-ing multiple pages. |
| 265 | // |
| 266 | size_t numBytesToAlloc = allocationSize + headerSkip; |
Nicolas Capens | ff7f100 | 2014-11-11 11:31:47 -0500 | [diff] [blame^] | 267 | // Detect integer overflow. |
| 268 | if (numBytesToAlloc < allocationSize) |
| 269 | return 0; |
| 270 | |
John Bauman | 66b8ab2 | 2014-05-06 15:57:45 -0400 | [diff] [blame] | 271 | tHeader* memory = reinterpret_cast<tHeader*>(::new char[numBytesToAlloc]); |
| 272 | if (memory == 0) |
| 273 | return 0; |
| 274 | |
| 275 | // Use placement-new to initialize header |
| 276 | new(memory) tHeader(inUseList, (numBytesToAlloc + pageSize - 1) / pageSize); |
| 277 | inUseList = memory; |
| 278 | |
| 279 | currentPageOffset = pageSize; // make next allocation come from a new page |
| 280 | |
| 281 | // No guard blocks for multi-page allocations (yet) |
| 282 | return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(memory) + headerSkip); |
| 283 | } |
| 284 | |
| 285 | // |
| 286 | // Need a simple page to allocate from. |
| 287 | // |
| 288 | tHeader* memory; |
| 289 | if (freeList) { |
| 290 | memory = freeList; |
| 291 | freeList = freeList->nextPage; |
| 292 | } else { |
| 293 | memory = reinterpret_cast<tHeader*>(::new char[pageSize]); |
| 294 | if (memory == 0) |
| 295 | return 0; |
| 296 | } |
| 297 | |
| 298 | // Use placement-new to initialize header |
| 299 | new(memory) tHeader(inUseList, 1); |
| 300 | inUseList = memory; |
| 301 | |
| 302 | unsigned char* ret = reinterpret_cast<unsigned char *>(inUseList) + headerSkip; |
| 303 | currentPageOffset = (headerSkip + allocationSize + alignmentMask) & ~alignmentMask; |
| 304 | |
| 305 | return initializeAllocation(inUseList, ret, numBytes); |
| 306 | } |
| 307 | |
| 308 | |
| 309 | // |
| 310 | // Check all allocations in a list for damage by calling check on each. |
| 311 | // |
| 312 | void TAllocation::checkAllocList() const |
| 313 | { |
| 314 | for (const TAllocation* alloc = this; alloc != 0; alloc = alloc->prevAlloc) |
| 315 | alloc->check(); |
| 316 | } |