blob: bc9513f1e4c61c6ef2bc2374978d72f8e0e76a2b [file] [log] [blame]
Corentin Wallezf07e3bd2017-04-20 14:38:20 -04001// Copyright 2017 The NXT Authors
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
Corentin Wallezfffe6df2017-07-06 14:41:13 -040015#include "backend/CommandAllocator.h"
Corentin Wallezf07e3bd2017-04-20 14:38:20 -040016
Corentin Wallezfffe6df2017-07-06 14:41:13 -040017#include "common/Math.h"
Corentin Wallezf07e3bd2017-04-20 14:38:20 -040018
Corentin Wallez944b60f2017-05-29 11:33:33 -070019#include <algorithm>
Corentin Wallezf07e3bd2017-04-20 14:38:20 -040020#include <cassert>
21#include <climits>
22#include <cstdlib>
23#define ASSERT assert
24
25namespace backend {
26
27 constexpr uint32_t EndOfBlock = UINT_MAX;//std::numeric_limits<uint32_t>::max();
28 constexpr uint32_t AdditionalData = UINT_MAX - 1;//std::numeric_limits<uint32_t>::max();
29
30 // TODO(cwallez@chromium.org): figure out a way to have more type safety for the iterator
31
32 CommandIterator::CommandIterator()
33 : endOfBlock(EndOfBlock) {
34 Reset();
35 }
36
37 CommandIterator::~CommandIterator() {
38 ASSERT(dataWasDestroyed);
39
40 if (!IsEmpty()) {
41 for (auto& block : blocks) {
42 free(block.block);
43 }
44 }
45 }
46
47 CommandIterator::CommandIterator(CommandIterator&& other)
48 : endOfBlock(EndOfBlock) {
49 if (!other.IsEmpty()) {
50 blocks = std::move(other.blocks);
51 other.Reset();
52 }
53 other.DataWasDestroyed();
54 Reset();
55 }
56
57 CommandIterator& CommandIterator::operator=(CommandIterator&& other) {
58 if (!other.IsEmpty()) {
59 blocks = std::move(other.blocks);
60 other.Reset();
61 } else {
62 blocks.clear();
63 }
64 other.DataWasDestroyed();
65 Reset();
66 return *this;
67 }
68
69 CommandIterator::CommandIterator(CommandAllocator&& allocator)
70 : blocks(allocator.AcquireBlocks()), endOfBlock(EndOfBlock) {
71 Reset();
72 }
73
74 CommandIterator& CommandIterator::operator=(CommandAllocator&& allocator) {
75 blocks = allocator.AcquireBlocks();
76 Reset();
77 return *this;
78 }
79
80 void CommandIterator::Reset() {
81 currentBlock = 0;
82
83 if (blocks.empty()) {
84 // This will case the first NextCommandId call to try to move to the next
85 // block and stop the iteration immediately, without special casing the
86 // initialization.
87 currentPtr = reinterpret_cast<uint8_t*>(&endOfBlock);
88 blocks.emplace_back();
89 blocks[0].size = sizeof(endOfBlock);
90 blocks[0].block = currentPtr;
91 } else {
92 currentPtr = Align(blocks[0].block, alignof(uint32_t));
93 }
94 }
95
96 void CommandIterator::DataWasDestroyed() {
97 dataWasDestroyed = true;
98 }
99
100 bool CommandIterator::IsEmpty() const {
101 return blocks[0].block == reinterpret_cast<const uint8_t*>(&endOfBlock);
102 }
103
104 bool CommandIterator::NextCommandId(uint32_t* commandId) {
105 uint8_t* idPtr = Align(currentPtr, alignof(uint32_t));
106 ASSERT(idPtr + sizeof(uint32_t) <= blocks[currentBlock].block + blocks[currentBlock].size);
107
108 uint32_t id = *reinterpret_cast<uint32_t*>(idPtr);
109
110 if (id == EndOfBlock) {
111 currentBlock++;
112 if (currentBlock >= blocks.size()) {
113 Reset();
114 return false;
115 }
116 currentPtr = Align(blocks[currentBlock].block, alignof(uint32_t));
117 return NextCommandId(commandId);
118 }
119
120 currentPtr = idPtr + sizeof(uint32_t);
121 *commandId = id;
122 return true;
123 }
124
125 void* CommandIterator::NextCommand(size_t commandSize, size_t commandAlignment) {
126 uint8_t* commandPtr = Align(currentPtr, commandAlignment);
127 ASSERT(commandPtr + sizeof(commandSize) <= blocks[currentBlock].block + blocks[currentBlock].size);
128
129 currentPtr = commandPtr + commandSize;
130 return commandPtr;
131 }
132
133 void* CommandIterator::NextData(size_t dataSize, size_t dataAlignment) {
134 uint32_t id;
135 bool hasId = NextCommandId(&id);
136 ASSERT(hasId);
137 ASSERT(id == AdditionalData);
138
139 return NextCommand(dataSize, dataAlignment);
140 }
141
142 // Potential TODO(cwallez@chromium.org):
143 // - Host the size and pointer to next block in the block itself to avoid having an allocation in the vector
144 // - Assume T's alignof is, say 64bits, static assert it, and make commandAlignment a constant in Allocate
145 // - Be able to optimize allocation to one block, for command buffers expected to live long to avoid cache misses
146 // - Better block allocation, maybe have NXT API to say command buffer is going to have size close to another
147
148 CommandAllocator::CommandAllocator()
149 : currentPtr(reinterpret_cast<uint8_t*>(&dummyEnum[0])), endPtr(reinterpret_cast<uint8_t*>(&dummyEnum[1])) {
150 }
151
152 CommandAllocator::~CommandAllocator() {
153 ASSERT(blocks.empty());
154 }
155
156 CommandBlocks&& CommandAllocator::AcquireBlocks() {
157 ASSERT(currentPtr != nullptr && endPtr != nullptr);
158 ASSERT(IsAligned(currentPtr, alignof(uint32_t)));
159 ASSERT(currentPtr + sizeof(uint32_t) <= endPtr);
160 *reinterpret_cast<uint32_t*>(currentPtr) = EndOfBlock;
161
162 currentPtr = nullptr;
163 endPtr = nullptr;
164 return std::move(blocks);
165 }
166
167 uint8_t* CommandAllocator::Allocate(uint32_t commandId, size_t commandSize, size_t commandAlignment) {
168 ASSERT(currentPtr != nullptr);
169 ASSERT(endPtr != nullptr);
170 ASSERT(commandId != EndOfBlock);
171
172 // It should always be possible to allocate one id, for EndOfBlock tagging,
173 ASSERT(IsAligned(currentPtr, alignof(uint32_t)));
174 ASSERT(currentPtr + sizeof(uint32_t) <= endPtr);
175 uint32_t* idAlloc = reinterpret_cast<uint32_t*>(currentPtr);
176
177 uint8_t* commandAlloc = Align(currentPtr + sizeof(uint32_t), commandAlignment);
178 uint8_t* nextPtr = Align(commandAlloc + commandSize, alignof(uint32_t));
179
180 // When there is not enough space, we signal the EndOfBlock, so that the iterator nows to
181 // move to the next one. EndOfBlock on the last block means the end of the commands.
182 if (nextPtr + sizeof(uint32_t) > endPtr) {
183
184 // Even if we are not able to get another block, the list of commands will be well-formed
185 // and iterable as this block will be that last one.
186 *idAlloc = EndOfBlock;
187
188 // Make sure we have space for current allocation, plus end of block and alignment padding
189 // for the first id.
190 if (!GetNewBlock(nextPtr - currentPtr + sizeof(uint32_t) + alignof(uint32_t))) {
191 return nullptr;
192 }
193 return Allocate(commandId, commandSize, commandAlignment);
194 }
195
196 *idAlloc = commandId;
197 currentPtr = nextPtr;
198 return commandAlloc;
199 }
200
201 uint8_t* CommandAllocator::AllocateData(size_t commandSize, size_t commandAlignment) {
202 return Allocate(AdditionalData, commandSize, commandAlignment);
203 }
204
205 bool CommandAllocator::GetNewBlock(size_t minimumSize) {
206 // Allocate blocks doubling sizes each time, to a maximum of 16k (or at least minimumSize).
207 lastAllocationSize = std::max(minimumSize, std::min(lastAllocationSize * 2, size_t(16384)));
208
209 uint8_t* block = reinterpret_cast<uint8_t*>(malloc(lastAllocationSize));
210 if (block == nullptr) {
211 return false;
212 }
213
214 blocks.push_back({lastAllocationSize, block});
215 currentPtr = Align(block, alignof(uint32_t));
216 endPtr = block + lastAllocationSize;
217 return true;
218 }
219
220}