blob: 0526e103ea75f5fd7147a771a9090759b7d43eab [file] [log] [blame]
Corentin Wallezf07e3bd2017-04-20 14:38:20 -04001// Copyright 2017 The NXT Authors
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15#include "CommandAllocator.h"
16
17#include "Math.h"
18
19#include <cassert>
20#include <climits>
21#include <cstdlib>
22#define ASSERT assert
23
24namespace backend {
25
26 constexpr uint32_t EndOfBlock = UINT_MAX;//std::numeric_limits<uint32_t>::max();
27 constexpr uint32_t AdditionalData = UINT_MAX - 1;//std::numeric_limits<uint32_t>::max();
28
29 // TODO(cwallez@chromium.org): figure out a way to have more type safety for the iterator
30
31 CommandIterator::CommandIterator()
32 : endOfBlock(EndOfBlock) {
33 Reset();
34 }
35
36 CommandIterator::~CommandIterator() {
37 ASSERT(dataWasDestroyed);
38
39 if (!IsEmpty()) {
40 for (auto& block : blocks) {
41 free(block.block);
42 }
43 }
44 }
45
46 CommandIterator::CommandIterator(CommandIterator&& other)
47 : endOfBlock(EndOfBlock) {
48 if (!other.IsEmpty()) {
49 blocks = std::move(other.blocks);
50 other.Reset();
51 }
52 other.DataWasDestroyed();
53 Reset();
54 }
55
56 CommandIterator& CommandIterator::operator=(CommandIterator&& other) {
57 if (!other.IsEmpty()) {
58 blocks = std::move(other.blocks);
59 other.Reset();
60 } else {
61 blocks.clear();
62 }
63 other.DataWasDestroyed();
64 Reset();
65 return *this;
66 }
67
68 CommandIterator::CommandIterator(CommandAllocator&& allocator)
69 : blocks(allocator.AcquireBlocks()), endOfBlock(EndOfBlock) {
70 Reset();
71 }
72
73 CommandIterator& CommandIterator::operator=(CommandAllocator&& allocator) {
74 blocks = allocator.AcquireBlocks();
75 Reset();
76 return *this;
77 }
78
79 void CommandIterator::Reset() {
80 currentBlock = 0;
81
82 if (blocks.empty()) {
83 // This will case the first NextCommandId call to try to move to the next
84 // block and stop the iteration immediately, without special casing the
85 // initialization.
86 currentPtr = reinterpret_cast<uint8_t*>(&endOfBlock);
87 blocks.emplace_back();
88 blocks[0].size = sizeof(endOfBlock);
89 blocks[0].block = currentPtr;
90 } else {
91 currentPtr = Align(blocks[0].block, alignof(uint32_t));
92 }
93 }
94
95 void CommandIterator::DataWasDestroyed() {
96 dataWasDestroyed = true;
97 }
98
99 bool CommandIterator::IsEmpty() const {
100 return blocks[0].block == reinterpret_cast<const uint8_t*>(&endOfBlock);
101 }
102
103 bool CommandIterator::NextCommandId(uint32_t* commandId) {
104 uint8_t* idPtr = Align(currentPtr, alignof(uint32_t));
105 ASSERT(idPtr + sizeof(uint32_t) <= blocks[currentBlock].block + blocks[currentBlock].size);
106
107 uint32_t id = *reinterpret_cast<uint32_t*>(idPtr);
108
109 if (id == EndOfBlock) {
110 currentBlock++;
111 if (currentBlock >= blocks.size()) {
112 Reset();
113 return false;
114 }
115 currentPtr = Align(blocks[currentBlock].block, alignof(uint32_t));
116 return NextCommandId(commandId);
117 }
118
119 currentPtr = idPtr + sizeof(uint32_t);
120 *commandId = id;
121 return true;
122 }
123
124 void* CommandIterator::NextCommand(size_t commandSize, size_t commandAlignment) {
125 uint8_t* commandPtr = Align(currentPtr, commandAlignment);
126 ASSERT(commandPtr + sizeof(commandSize) <= blocks[currentBlock].block + blocks[currentBlock].size);
127
128 currentPtr = commandPtr + commandSize;
129 return commandPtr;
130 }
131
132 void* CommandIterator::NextData(size_t dataSize, size_t dataAlignment) {
133 uint32_t id;
134 bool hasId = NextCommandId(&id);
135 ASSERT(hasId);
136 ASSERT(id == AdditionalData);
137
138 return NextCommand(dataSize, dataAlignment);
139 }
140
141 // Potential TODO(cwallez@chromium.org):
142 // - Host the size and pointer to next block in the block itself to avoid having an allocation in the vector
143 // - Assume T's alignof is, say 64bits, static assert it, and make commandAlignment a constant in Allocate
144 // - Be able to optimize allocation to one block, for command buffers expected to live long to avoid cache misses
145 // - Better block allocation, maybe have NXT API to say command buffer is going to have size close to another
146
147 CommandAllocator::CommandAllocator()
148 : currentPtr(reinterpret_cast<uint8_t*>(&dummyEnum[0])), endPtr(reinterpret_cast<uint8_t*>(&dummyEnum[1])) {
149 }
150
151 CommandAllocator::~CommandAllocator() {
152 ASSERT(blocks.empty());
153 }
154
155 CommandBlocks&& CommandAllocator::AcquireBlocks() {
156 ASSERT(currentPtr != nullptr && endPtr != nullptr);
157 ASSERT(IsAligned(currentPtr, alignof(uint32_t)));
158 ASSERT(currentPtr + sizeof(uint32_t) <= endPtr);
159 *reinterpret_cast<uint32_t*>(currentPtr) = EndOfBlock;
160
161 currentPtr = nullptr;
162 endPtr = nullptr;
163 return std::move(blocks);
164 }
165
166 uint8_t* CommandAllocator::Allocate(uint32_t commandId, size_t commandSize, size_t commandAlignment) {
167 ASSERT(currentPtr != nullptr);
168 ASSERT(endPtr != nullptr);
169 ASSERT(commandId != EndOfBlock);
170
171 // It should always be possible to allocate one id, for EndOfBlock tagging,
172 ASSERT(IsAligned(currentPtr, alignof(uint32_t)));
173 ASSERT(currentPtr + sizeof(uint32_t) <= endPtr);
174 uint32_t* idAlloc = reinterpret_cast<uint32_t*>(currentPtr);
175
176 uint8_t* commandAlloc = Align(currentPtr + sizeof(uint32_t), commandAlignment);
177 uint8_t* nextPtr = Align(commandAlloc + commandSize, alignof(uint32_t));
178
179 // When there is not enough space, we signal the EndOfBlock, so that the iterator nows to
180 // move to the next one. EndOfBlock on the last block means the end of the commands.
181 if (nextPtr + sizeof(uint32_t) > endPtr) {
182
183 // Even if we are not able to get another block, the list of commands will be well-formed
184 // and iterable as this block will be that last one.
185 *idAlloc = EndOfBlock;
186
187 // Make sure we have space for current allocation, plus end of block and alignment padding
188 // for the first id.
189 if (!GetNewBlock(nextPtr - currentPtr + sizeof(uint32_t) + alignof(uint32_t))) {
190 return nullptr;
191 }
192 return Allocate(commandId, commandSize, commandAlignment);
193 }
194
195 *idAlloc = commandId;
196 currentPtr = nextPtr;
197 return commandAlloc;
198 }
199
200 uint8_t* CommandAllocator::AllocateData(size_t commandSize, size_t commandAlignment) {
201 return Allocate(AdditionalData, commandSize, commandAlignment);
202 }
203
204 bool CommandAllocator::GetNewBlock(size_t minimumSize) {
205 // Allocate blocks doubling sizes each time, to a maximum of 16k (or at least minimumSize).
206 lastAllocationSize = std::max(minimumSize, std::min(lastAllocationSize * 2, size_t(16384)));
207
208 uint8_t* block = reinterpret_cast<uint8_t*>(malloc(lastAllocationSize));
209 if (block == nullptr) {
210 return false;
211 }
212
213 blocks.push_back({lastAllocationSize, block});
214 currentPtr = Align(block, alignof(uint32_t));
215 endPtr = block + lastAllocationSize;
216 return true;
217 }
218
219}