blob: e665d196cb595c8f46ee0055a1441529d64a2b87 [file] [log] [blame]
Corentin Wallezf07e3bd2017-04-20 14:38:20 -04001// Copyright 2017 The NXT Authors
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
Corentin Wallezfffe6df2017-07-06 14:41:13 -040015#include "backend/CommandAllocator.h"
Corentin Wallezf07e3bd2017-04-20 14:38:20 -040016
Corentin Wallezfd589f32017-07-10 13:46:05 -040017#include "common/Assert.h"
Corentin Wallezfffe6df2017-07-06 14:41:13 -040018#include "common/Math.h"
Corentin Wallezf07e3bd2017-04-20 14:38:20 -040019
Corentin Wallez944b60f2017-05-29 11:33:33 -070020#include <algorithm>
Corentin Wallezf07e3bd2017-04-20 14:38:20 -040021#include <climits>
22#include <cstdlib>
Corentin Wallezf07e3bd2017-04-20 14:38:20 -040023
24namespace backend {
25
26 constexpr uint32_t EndOfBlock = UINT_MAX;//std::numeric_limits<uint32_t>::max();
27 constexpr uint32_t AdditionalData = UINT_MAX - 1;//std::numeric_limits<uint32_t>::max();
28
29 // TODO(cwallez@chromium.org): figure out a way to have more type safety for the iterator
30
31 CommandIterator::CommandIterator()
32 : endOfBlock(EndOfBlock) {
33 Reset();
34 }
35
36 CommandIterator::~CommandIterator() {
37 ASSERT(dataWasDestroyed);
38
39 if (!IsEmpty()) {
40 for (auto& block : blocks) {
41 free(block.block);
42 }
43 }
44 }
45
46 CommandIterator::CommandIterator(CommandIterator&& other)
47 : endOfBlock(EndOfBlock) {
48 if (!other.IsEmpty()) {
49 blocks = std::move(other.blocks);
50 other.Reset();
51 }
52 other.DataWasDestroyed();
53 Reset();
54 }
55
56 CommandIterator& CommandIterator::operator=(CommandIterator&& other) {
57 if (!other.IsEmpty()) {
58 blocks = std::move(other.blocks);
59 other.Reset();
60 } else {
61 blocks.clear();
62 }
63 other.DataWasDestroyed();
64 Reset();
65 return *this;
66 }
67
68 CommandIterator::CommandIterator(CommandAllocator&& allocator)
69 : blocks(allocator.AcquireBlocks()), endOfBlock(EndOfBlock) {
70 Reset();
71 }
72
73 CommandIterator& CommandIterator::operator=(CommandAllocator&& allocator) {
74 blocks = allocator.AcquireBlocks();
75 Reset();
76 return *this;
77 }
78
79 void CommandIterator::Reset() {
80 currentBlock = 0;
81
82 if (blocks.empty()) {
83 // This will case the first NextCommandId call to try to move to the next
84 // block and stop the iteration immediately, without special casing the
85 // initialization.
86 currentPtr = reinterpret_cast<uint8_t*>(&endOfBlock);
87 blocks.emplace_back();
88 blocks[0].size = sizeof(endOfBlock);
89 blocks[0].block = currentPtr;
90 } else {
Austin Eng8867e5d2017-07-14 18:53:07 -040091 currentPtr = AlignPtr(blocks[0].block, alignof(uint32_t));
Corentin Wallezf07e3bd2017-04-20 14:38:20 -040092 }
93 }
94
95 void CommandIterator::DataWasDestroyed() {
96 dataWasDestroyed = true;
97 }
98
99 bool CommandIterator::IsEmpty() const {
100 return blocks[0].block == reinterpret_cast<const uint8_t*>(&endOfBlock);
101 }
102
103 bool CommandIterator::NextCommandId(uint32_t* commandId) {
Austin Eng8867e5d2017-07-14 18:53:07 -0400104 uint8_t* idPtr = AlignPtr(currentPtr, alignof(uint32_t));
Corentin Wallezf07e3bd2017-04-20 14:38:20 -0400105 ASSERT(idPtr + sizeof(uint32_t) <= blocks[currentBlock].block + blocks[currentBlock].size);
106
107 uint32_t id = *reinterpret_cast<uint32_t*>(idPtr);
108
109 if (id == EndOfBlock) {
110 currentBlock++;
111 if (currentBlock >= blocks.size()) {
112 Reset();
113 return false;
114 }
Austin Eng8867e5d2017-07-14 18:53:07 -0400115 currentPtr = AlignPtr(blocks[currentBlock].block, alignof(uint32_t));
Corentin Wallezf07e3bd2017-04-20 14:38:20 -0400116 return NextCommandId(commandId);
117 }
118
119 currentPtr = idPtr + sizeof(uint32_t);
120 *commandId = id;
121 return true;
122 }
123
124 void* CommandIterator::NextCommand(size_t commandSize, size_t commandAlignment) {
Austin Eng8867e5d2017-07-14 18:53:07 -0400125 uint8_t* commandPtr = AlignPtr(currentPtr, commandAlignment);
Corentin Wallezf07e3bd2017-04-20 14:38:20 -0400126 ASSERT(commandPtr + sizeof(commandSize) <= blocks[currentBlock].block + blocks[currentBlock].size);
127
128 currentPtr = commandPtr + commandSize;
129 return commandPtr;
130 }
131
132 void* CommandIterator::NextData(size_t dataSize, size_t dataAlignment) {
133 uint32_t id;
134 bool hasId = NextCommandId(&id);
135 ASSERT(hasId);
136 ASSERT(id == AdditionalData);
137
138 return NextCommand(dataSize, dataAlignment);
139 }
140
141 // Potential TODO(cwallez@chromium.org):
142 // - Host the size and pointer to next block in the block itself to avoid having an allocation in the vector
143 // - Assume T's alignof is, say 64bits, static assert it, and make commandAlignment a constant in Allocate
144 // - Be able to optimize allocation to one block, for command buffers expected to live long to avoid cache misses
145 // - Better block allocation, maybe have NXT API to say command buffer is going to have size close to another
146
147 CommandAllocator::CommandAllocator()
148 : currentPtr(reinterpret_cast<uint8_t*>(&dummyEnum[0])), endPtr(reinterpret_cast<uint8_t*>(&dummyEnum[1])) {
149 }
150
151 CommandAllocator::~CommandAllocator() {
152 ASSERT(blocks.empty());
153 }
154
155 CommandBlocks&& CommandAllocator::AcquireBlocks() {
156 ASSERT(currentPtr != nullptr && endPtr != nullptr);
Austin Engae48c952017-08-17 14:02:16 -0400157 ASSERT(IsPtrAligned(currentPtr, alignof(uint32_t)));
Corentin Wallezf07e3bd2017-04-20 14:38:20 -0400158 ASSERT(currentPtr + sizeof(uint32_t) <= endPtr);
159 *reinterpret_cast<uint32_t*>(currentPtr) = EndOfBlock;
160
161 currentPtr = nullptr;
162 endPtr = nullptr;
163 return std::move(blocks);
164 }
165
166 uint8_t* CommandAllocator::Allocate(uint32_t commandId, size_t commandSize, size_t commandAlignment) {
167 ASSERT(currentPtr != nullptr);
168 ASSERT(endPtr != nullptr);
169 ASSERT(commandId != EndOfBlock);
170
171 // It should always be possible to allocate one id, for EndOfBlock tagging,
Austin Engae48c952017-08-17 14:02:16 -0400172 ASSERT(IsPtrAligned(currentPtr, alignof(uint32_t)));
Corentin Wallezf07e3bd2017-04-20 14:38:20 -0400173 ASSERT(currentPtr + sizeof(uint32_t) <= endPtr);
174 uint32_t* idAlloc = reinterpret_cast<uint32_t*>(currentPtr);
175
Austin Eng8867e5d2017-07-14 18:53:07 -0400176 uint8_t* commandAlloc = AlignPtr(currentPtr + sizeof(uint32_t), commandAlignment);
177 uint8_t* nextPtr = AlignPtr(commandAlloc + commandSize, alignof(uint32_t));
Corentin Wallezf07e3bd2017-04-20 14:38:20 -0400178
179 // When there is not enough space, we signal the EndOfBlock, so that the iterator nows to
180 // move to the next one. EndOfBlock on the last block means the end of the commands.
181 if (nextPtr + sizeof(uint32_t) > endPtr) {
182
183 // Even if we are not able to get another block, the list of commands will be well-formed
184 // and iterable as this block will be that last one.
185 *idAlloc = EndOfBlock;
186
187 // Make sure we have space for current allocation, plus end of block and alignment padding
188 // for the first id.
Kai Ninomiya78c8b832017-07-21 17:00:22 -0700189 ASSERT(nextPtr > currentPtr);
190 if (!GetNewBlock(static_cast<size_t>(nextPtr - currentPtr) + sizeof(uint32_t) + alignof(uint32_t))) {
Corentin Wallezf07e3bd2017-04-20 14:38:20 -0400191 return nullptr;
192 }
193 return Allocate(commandId, commandSize, commandAlignment);
194 }
195
196 *idAlloc = commandId;
197 currentPtr = nextPtr;
198 return commandAlloc;
199 }
200
201 uint8_t* CommandAllocator::AllocateData(size_t commandSize, size_t commandAlignment) {
202 return Allocate(AdditionalData, commandSize, commandAlignment);
203 }
204
205 bool CommandAllocator::GetNewBlock(size_t minimumSize) {
206 // Allocate blocks doubling sizes each time, to a maximum of 16k (or at least minimumSize).
207 lastAllocationSize = std::max(minimumSize, std::min(lastAllocationSize * 2, size_t(16384)));
208
209 uint8_t* block = reinterpret_cast<uint8_t*>(malloc(lastAllocationSize));
210 if (block == nullptr) {
211 return false;
212 }
213
214 blocks.push_back({lastAllocationSize, block});
Austin Eng8867e5d2017-07-14 18:53:07 -0400215 currentPtr = AlignPtr(block, alignof(uint32_t));
Corentin Wallezf07e3bd2017-04-20 14:38:20 -0400216 endPtr = block + lastAllocationSize;
217 return true;
218 }
219
220}