blob: 12350234f9836d011a6c8d87e9aa2a09fc878410 [file] [log] [blame]
Nicolas Capens0bac2852016-05-07 06:09:58 -04001// Copyright 2016 The SwiftShader Authors. All Rights Reserved.
John Bauman66b8ab22014-05-06 15:57:45 -04002//
Nicolas Capens0bac2852016-05-07 06:09:58 -04003// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
John Bauman66b8ab22014-05-06 15:57:45 -04006//
Nicolas Capens0bac2852016-05-07 06:09:58 -04007// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
John Bauman66b8ab22014-05-06 15:57:45 -040014
Nicolas Capenscc863da2015-01-21 15:50:55 -050015#include "PoolAlloc.h"
John Bauman66b8ab22014-05-06 15:57:45 -040016
17#ifndef _MSC_VER
18#include <stdint.h>
19#endif
20#include <stdio.h>
21
Nicolas Capenscc863da2015-01-21 15:50:55 -050022#include "InitializeGlobals.h"
23#include "osinclude.h"
John Bauman66b8ab22014-05-06 15:57:45 -040024
25OS_TLSIndex PoolIndex = OS_INVALID_TLS_INDEX;
26
John Bauman66b8ab22014-05-06 15:57:45 -040027bool InitializePoolIndex()
28{
Nicolas Capens0bac2852016-05-07 06:09:58 -040029 assert(PoolIndex == OS_INVALID_TLS_INDEX);
John Bauman66b8ab22014-05-06 15:57:45 -040030
Nicolas Capens0bac2852016-05-07 06:09:58 -040031 PoolIndex = OS_AllocTLSIndex();
32 return PoolIndex != OS_INVALID_TLS_INDEX;
John Bauman66b8ab22014-05-06 15:57:45 -040033}
34
35void FreePoolIndex()
36{
Nicolas Capens0bac2852016-05-07 06:09:58 -040037 assert(PoolIndex != OS_INVALID_TLS_INDEX);
Nicolas Capens978ddc52014-11-11 12:42:08 -050038
Nicolas Capens0bac2852016-05-07 06:09:58 -040039 OS_FreeTLSIndex(PoolIndex);
40 PoolIndex = OS_INVALID_TLS_INDEX;
John Bauman66b8ab22014-05-06 15:57:45 -040041}
42
Nicolas Capens978ddc52014-11-11 12:42:08 -050043TPoolAllocator* GetGlobalPoolAllocator()
John Bauman66b8ab22014-05-06 15:57:45 -040044{
Nicolas Capens0bac2852016-05-07 06:09:58 -040045 assert(PoolIndex != OS_INVALID_TLS_INDEX);
46 return static_cast<TPoolAllocator*>(OS_GetTLSValue(PoolIndex));
John Bauman66b8ab22014-05-06 15:57:45 -040047}
48
49void SetGlobalPoolAllocator(TPoolAllocator* poolAllocator)
50{
Nicolas Capens0bac2852016-05-07 06:09:58 -040051 assert(PoolIndex != OS_INVALID_TLS_INDEX);
52 OS_SetTLSValue(PoolIndex, poolAllocator);
John Bauman66b8ab22014-05-06 15:57:45 -040053}
54
55//
56// Implement the functionality of the TPoolAllocator class, which
57// is documented in PoolAlloc.h.
58//
Nicolas Capens0bac2852016-05-07 06:09:58 -040059TPoolAllocator::TPoolAllocator(int growthIncrement, int allocationAlignment) :
Corentin Wallez3d7c7862017-10-31 18:05:38 -040060 alignment(allocationAlignment)
61#if !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC)
62 , pageSize(growthIncrement),
Nicolas Capens0bac2852016-05-07 06:09:58 -040063 freeList(0),
64 inUseList(0),
65 numCalls(0),
66 totalBytes(0)
Corentin Wallez3d7c7862017-10-31 18:05:38 -040067#endif
John Bauman66b8ab22014-05-06 15:57:45 -040068{
Nicolas Capens0bac2852016-05-07 06:09:58 -040069 //
Nicolas Capens0bac2852016-05-07 06:09:58 -040070 // Adjust alignment to be at least pointer aligned and
71 // power of 2.
72 //
73 size_t minAlign = sizeof(void*);
74 alignment &= ~(minAlign - 1);
75 if (alignment < minAlign)
76 alignment = minAlign;
77 size_t a = 1;
78 while (a < alignment)
79 a <<= 1;
80 alignment = a;
81 alignmentMask = a - 1;
John Bauman66b8ab22014-05-06 15:57:45 -040082
Corentin Wallez3d7c7862017-10-31 18:05:38 -040083#if !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC)
84 //
85 // Don't allow page sizes we know are smaller than all common
86 // OS page sizes.
87 //
88 if (pageSize < 4*1024)
89 pageSize = 4*1024;
90
91 //
92 // A large currentPageOffset indicates a new page needs to
93 // be obtained to allocate memory.
94 //
95 currentPageOffset = pageSize;
96
Nicolas Capens0bac2852016-05-07 06:09:58 -040097 //
98 // Align header skip
99 //
100 headerSkip = minAlign;
101 if (headerSkip < sizeof(tHeader)) {
102 headerSkip = (sizeof(tHeader) + alignmentMask) & ~alignmentMask;
103 }
Corentin Wallez3d7c7862017-10-31 18:05:38 -0400104#else // !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC)
105 mStack.push_back({});
106#endif
John Bauman66b8ab22014-05-06 15:57:45 -0400107}
108
109TPoolAllocator::~TPoolAllocator()
110{
Corentin Wallez3d7c7862017-10-31 18:05:38 -0400111#if !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC)
Nicolas Capens0bac2852016-05-07 06:09:58 -0400112 while (inUseList) {
113 tHeader* next = inUseList->nextPage;
114 inUseList->~tHeader();
115 delete [] reinterpret_cast<char*>(inUseList);
116 inUseList = next;
117 }
John Bauman66b8ab22014-05-06 15:57:45 -0400118
Nicolas Capens0bac2852016-05-07 06:09:58 -0400119 // We should not check the guard blocks
120 // here, because we did it already when the block was
121 // placed into the free list.
122 //
123 while (freeList) {
124 tHeader* next = freeList->nextPage;
125 delete [] reinterpret_cast<char*>(freeList);
126 freeList = next;
127 }
Corentin Wallez3d7c7862017-10-31 18:05:38 -0400128#else // !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC)
129 for (auto& allocs : mStack) {
130 for (auto alloc : allocs) {
131 free(alloc);
132 }
133 }
134 mStack.clear();
135#endif
John Bauman66b8ab22014-05-06 15:57:45 -0400136}
137
138// Support MSVC++ 6.0
139const unsigned char TAllocation::guardBlockBeginVal = 0xfb;
140const unsigned char TAllocation::guardBlockEndVal = 0xfe;
141const unsigned char TAllocation::userDataFill = 0xcd;
142
143#ifdef GUARD_BLOCKS
Nicolas Capens0bac2852016-05-07 06:09:58 -0400144 const size_t TAllocation::guardBlockSize = 16;
John Bauman66b8ab22014-05-06 15:57:45 -0400145#else
Nicolas Capens0bac2852016-05-07 06:09:58 -0400146 const size_t TAllocation::guardBlockSize = 0;
John Bauman66b8ab22014-05-06 15:57:45 -0400147#endif
148
149//
150// Check a single guard block for damage
151//
152void TAllocation::checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const
153{
154#ifdef GUARD_BLOCKS
Nicolas Capens0bac2852016-05-07 06:09:58 -0400155 for (size_t x = 0; x < guardBlockSize; x++) {
156 if (blockMem[x] != val) {
157 char assertMsg[80];
John Bauman66b8ab22014-05-06 15:57:45 -0400158
Nicolas Capens0bac2852016-05-07 06:09:58 -0400159 // We don't print the assert message. It's here just to be helpful.
160 #if defined(_MSC_VER)
161 _snprintf(assertMsg, sizeof(assertMsg), "PoolAlloc: Damage %s %Iu byte allocation at 0x%p\n",
162 locText, size, data());
163 #else
164 snprintf(assertMsg, sizeof(assertMsg), "PoolAlloc: Damage %s %zu byte allocation at 0x%p\n",
165 locText, size, data());
166 #endif
167 assert(0 && "PoolAlloc: Damage in guard block");
168 }
169 }
John Bauman66b8ab22014-05-06 15:57:45 -0400170#endif
171}
172
173
174void TPoolAllocator::push()
175{
Corentin Wallez3d7c7862017-10-31 18:05:38 -0400176#if !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC)
Nicolas Capens0bac2852016-05-07 06:09:58 -0400177 tAllocState state = { currentPageOffset, inUseList };
John Bauman66b8ab22014-05-06 15:57:45 -0400178
Corentin Wallez3d7c7862017-10-31 18:05:38 -0400179 mStack.push_back(state);
Nicolas Capens0bac2852016-05-07 06:09:58 -0400180
181 //
182 // Indicate there is no current page to allocate from.
183 //
184 currentPageOffset = pageSize;
Corentin Wallez3d7c7862017-10-31 18:05:38 -0400185#else // !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC)
186 mStack.push_back({});
187#endif
John Bauman66b8ab22014-05-06 15:57:45 -0400188}
189
190//
191// Do a mass-deallocation of all the individual allocations
192// that have occurred since the last push(), or since the
193// last pop(), or since the object's creation.
194//
195// The deallocated pages are saved for future allocations.
196//
197void TPoolAllocator::pop()
198{
Corentin Wallez3d7c7862017-10-31 18:05:38 -0400199 if (mStack.size() < 1)
Nicolas Capens0bac2852016-05-07 06:09:58 -0400200 return;
John Bauman66b8ab22014-05-06 15:57:45 -0400201
Corentin Wallez3d7c7862017-10-31 18:05:38 -0400202#if !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC)
203 tHeader* page = mStack.back().page;
204 currentPageOffset = mStack.back().offset;
John Bauman66b8ab22014-05-06 15:57:45 -0400205
Nicolas Capens0bac2852016-05-07 06:09:58 -0400206 while (inUseList != page) {
207 // invoke destructor to free allocation list
208 inUseList->~tHeader();
John Bauman66b8ab22014-05-06 15:57:45 -0400209
Nicolas Capens0bac2852016-05-07 06:09:58 -0400210 tHeader* nextInUse = inUseList->nextPage;
211 if (inUseList->pageCount > 1)
212 delete [] reinterpret_cast<char*>(inUseList);
213 else {
214 inUseList->nextPage = freeList;
215 freeList = inUseList;
216 }
217 inUseList = nextInUse;
218 }
219
Corentin Wallez3d7c7862017-10-31 18:05:38 -0400220 mStack.pop_back();
221#else // !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC)
222 for (auto alloc : mStack.back()) {
223 free(alloc);
224 }
225 mStack.pop_back();
226#endif
John Bauman66b8ab22014-05-06 15:57:45 -0400227}
228
229//
230// Do a mass-deallocation of all the individual allocations
231// that have occurred.
232//
233void TPoolAllocator::popAll()
234{
Corentin Wallez3d7c7862017-10-31 18:05:38 -0400235 while (mStack.size() > 0)
Nicolas Capens0bac2852016-05-07 06:09:58 -0400236 pop();
John Bauman66b8ab22014-05-06 15:57:45 -0400237}
238
239void* TPoolAllocator::allocate(size_t numBytes)
240{
Corentin Wallez3d7c7862017-10-31 18:05:38 -0400241#if !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC)
Nicolas Capens0bac2852016-05-07 06:09:58 -0400242 //
243 // Just keep some interesting statistics.
244 //
245 ++numCalls;
246 totalBytes += numBytes;
John Bauman66b8ab22014-05-06 15:57:45 -0400247
Nicolas Capens0bac2852016-05-07 06:09:58 -0400248 // If we are using guard blocks, all allocations are bracketed by
249 // them: [guardblock][allocation][guardblock]. numBytes is how
250 // much memory the caller asked for. allocationSize is the total
251 // size including guard blocks. In release build,
252 // guardBlockSize=0 and this all gets optimized away.
253 size_t allocationSize = TAllocation::allocationSize(numBytes);
254 // Detect integer overflow.
255 if (allocationSize < numBytes)
256 return 0;
Nicolas Capensff7f1002014-11-11 11:31:47 -0500257
Nicolas Capens0bac2852016-05-07 06:09:58 -0400258 //
259 // Do the allocation, most likely case first, for efficiency.
260 // This step could be moved to be inline sometime.
261 //
262 if (allocationSize <= pageSize - currentPageOffset) {
263 //
264 // Safe to allocate from currentPageOffset.
265 //
266 unsigned char* memory = reinterpret_cast<unsigned char *>(inUseList) + currentPageOffset;
267 currentPageOffset += allocationSize;
268 currentPageOffset = (currentPageOffset + alignmentMask) & ~alignmentMask;
John Bauman66b8ab22014-05-06 15:57:45 -0400269
Nicolas Capens0bac2852016-05-07 06:09:58 -0400270 return initializeAllocation(inUseList, memory, numBytes);
271 }
John Bauman66b8ab22014-05-06 15:57:45 -0400272
Nicolas Capens0bac2852016-05-07 06:09:58 -0400273 if (allocationSize > pageSize - headerSkip) {
274 //
275 // Do a multi-page allocation. Don't mix these with the others.
276 // The OS is efficient and allocating and free-ing multiple pages.
277 //
278 size_t numBytesToAlloc = allocationSize + headerSkip;
279 // Detect integer overflow.
280 if (numBytesToAlloc < allocationSize)
281 return 0;
Nicolas Capensff7f1002014-11-11 11:31:47 -0500282
Nicolas Capens0bac2852016-05-07 06:09:58 -0400283 tHeader* memory = reinterpret_cast<tHeader*>(::new char[numBytesToAlloc]);
284 if (memory == 0)
285 return 0;
John Bauman66b8ab22014-05-06 15:57:45 -0400286
Nicolas Capens0bac2852016-05-07 06:09:58 -0400287 // Use placement-new to initialize header
288 new(memory) tHeader(inUseList, (numBytesToAlloc + pageSize - 1) / pageSize);
289 inUseList = memory;
John Bauman66b8ab22014-05-06 15:57:45 -0400290
Nicolas Capens0bac2852016-05-07 06:09:58 -0400291 currentPageOffset = pageSize; // make next allocation come from a new page
John Bauman66b8ab22014-05-06 15:57:45 -0400292
Nicolas Capens0bac2852016-05-07 06:09:58 -0400293 // No guard blocks for multi-page allocations (yet)
294 return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(memory) + headerSkip);
295 }
John Bauman66b8ab22014-05-06 15:57:45 -0400296
Nicolas Capens0bac2852016-05-07 06:09:58 -0400297 //
298 // Need a simple page to allocate from.
299 //
300 tHeader* memory;
301 if (freeList) {
302 memory = freeList;
303 freeList = freeList->nextPage;
304 } else {
305 memory = reinterpret_cast<tHeader*>(::new char[pageSize]);
306 if (memory == 0)
307 return 0;
308 }
John Bauman66b8ab22014-05-06 15:57:45 -0400309
Nicolas Capens0bac2852016-05-07 06:09:58 -0400310 // Use placement-new to initialize header
311 new(memory) tHeader(inUseList, 1);
312 inUseList = memory;
John Bauman66b8ab22014-05-06 15:57:45 -0400313
Nicolas Capens0bac2852016-05-07 06:09:58 -0400314 unsigned char* ret = reinterpret_cast<unsigned char *>(inUseList) + headerSkip;
315 currentPageOffset = (headerSkip + allocationSize + alignmentMask) & ~alignmentMask;
316
317 return initializeAllocation(inUseList, ret, numBytes);
Corentin Wallez3d7c7862017-10-31 18:05:38 -0400318#else // !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC)
319 void *alloc = malloc(numBytes + alignmentMask);
320 mStack.back().push_back(alloc);
321
322 intptr_t intAlloc = reinterpret_cast<intptr_t>(alloc);
323 intAlloc = (intAlloc + alignmentMask) & ~alignmentMask;
324 return reinterpret_cast<void *>(intAlloc);
325#endif
John Bauman66b8ab22014-05-06 15:57:45 -0400326}
327
328
329//
330// Check all allocations in a list for damage by calling check on each.
331//
332void TAllocation::checkAllocList() const
333{
Nicolas Capens0bac2852016-05-07 06:09:58 -0400334 for (const TAllocation* alloc = this; alloc != 0; alloc = alloc->prevAlloc)
335 alloc->check();
John Bauman66b8ab22014-05-06 15:57:45 -0400336}