blob: db008070bdb675d080806877e0c8d251dc76e09f [file] [log] [blame]
Jeremy Gebben140a0a52021-12-15 18:22:34 -07001/* Copyright (c) 2015-2022 The Khronos Group Inc.
2 * Copyright (c) 2015-2022 Valve Corporation
3 * Copyright (c) 2015-2022 LunarG, Inc.
4 * Copyright (C) 2015-2022 Google Inc.
Jeremy Gebben4af0aa82021-09-08 09:35:16 -06005 * Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 * Author: Courtney Goeltzenleuchter <courtneygo@google.com>
20 * Author: Tobin Ehlis <tobine@google.com>
21 * Author: Chris Forbes <chrisf@ijw.co.nz>
22 * Author: Mark Lobodzinski <mark@lunarg.com>
23 * Author: Dave Houlton <daveh@lunarg.com>
24 * Author: John Zulauf <jzulauf@lunarg.com>
25 * Author: Tobias Hector <tobias.hector@amd.com>
26 */
27#include "queue_state.h"
Jeremy Gebben4af0aa82021-09-08 09:35:16 -060028#include "cmd_buffer_state.h"
Jeremy Gebben57642982021-09-14 14:14:55 -060029#include "state_tracker.h"
Jeremy Gebben4af0aa82021-09-08 09:35:16 -060030
Jeremy Gebben15332642021-12-15 19:33:15 -070031using SemOp = SEMAPHORE_STATE::SemOp;
Jeremy Gebben57642982021-09-14 14:14:55 -060032
Jeremy Gebben15332642021-12-15 19:33:15 -070033uint64_t QUEUE_STATE::Submit(CB_SUBMISSION &&submission) {
Jeremy Gebben57642982021-09-14 14:14:55 -060034 for (auto &cb_node : submission.cbs) {
Jeremy Gebben332d4dd2022-01-01 12:40:02 -070035 auto cb_guard = cb_node->WriteLock();
Jeremy Gebben57642982021-09-14 14:14:55 -060036 for (auto *secondary_cmd_buffer : cb_node->linkedCommandBuffers) {
Jeremy Gebben332d4dd2022-01-01 12:40:02 -070037 auto secondary_guard = secondary_cmd_buffer->WriteLock();
Jeremy Gebben57642982021-09-14 14:14:55 -060038 secondary_cmd_buffer->IncrementResources();
39 }
40 cb_node->IncrementResources();
41 // increment use count for all bound objects including secondary cbs
42 cb_node->BeginUse();
43 cb_node->Submit(submission.perf_submit_pass);
44 }
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -070045 // Lock required for queue / semaphore operations, but not for command buffer
46 // processing above.
47 auto guard = WriteLock();
Jeremy Gebben98faf722022-07-06 15:44:29 -060048 submission.seq = seq_ + submissions_.size() + 1;
Jeremy Gebben15332642021-12-15 19:33:15 -070049 bool retire_early = false;
Jeremy Gebben57642982021-09-14 14:14:55 -060050 for (auto &wait : submission.wait_semaphores) {
Jeremy Gebben98faf722022-07-06 15:44:29 -060051 wait.semaphore->EnqueueWait(this, submission.seq, wait.payload);
Jeremy Gebben57642982021-09-14 14:14:55 -060052 wait.semaphore->BeginUse();
53 }
54
55 for (auto &signal : submission.signal_semaphores) {
Jeremy Gebben98faf722022-07-06 15:44:29 -060056 if (signal.semaphore->EnqueueSignal(this, submission.seq, signal.payload)) {
Jeremy Gebben4af0aa82021-09-08 09:35:16 -060057 retire_early = true;
58 }
Jeremy Gebben57642982021-09-14 14:14:55 -060059 signal.semaphore->BeginUse();
Jeremy Gebben4af0aa82021-09-08 09:35:16 -060060 }
Jeremy Gebben4af0aa82021-09-08 09:35:16 -060061
Jeremy Gebben57642982021-09-14 14:14:55 -060062 if (submission.fence) {
Jeremy Gebben98faf722022-07-06 15:44:29 -060063 if (submission.fence->EnqueueSignal(this, submission.seq)) {
Jeremy Gebben57642982021-09-14 14:14:55 -060064 retire_early = true;
Jeremy Gebben4af0aa82021-09-08 09:35:16 -060065 }
Jeremy Gebben57642982021-09-14 14:14:55 -060066 submission.fence->BeginUse();
Jeremy Gebben4af0aa82021-09-08 09:35:16 -060067 }
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -070068 submissions_.emplace_back(std::move(submission));
Jeremy Gebben98faf722022-07-06 15:44:29 -060069 return retire_early ? submission.seq : 0;
Jeremy Gebben4af0aa82021-09-08 09:35:16 -060070}
71
ziga-lunarg81b56d32022-03-21 22:22:05 +010072bool QUEUE_STATE::HasWait(VkSemaphore semaphore, VkFence fence) const {
73 auto guard = ReadLock();
74 for (const auto &submission : submissions_) {
75 if (fence != VK_NULL_HANDLE && submission.fence && submission.fence->Handle().Cast<VkFence>() == fence) {
76 return true;
77 }
78 for (const auto &wait_semaphore : submission.wait_semaphores) {
79 if (wait_semaphore.semaphore->Handle().Cast<VkSemaphore>() == semaphore) {
80 return true;
81 }
82 }
83 }
84 return false;
85}
86
Jeremy Gebben15332642021-12-15 19:33:15 -070087static void MergeResults(SEMAPHORE_STATE::RetireResult &results, const SEMAPHORE_STATE::RetireResult &sem_result) {
88 for (auto &entry : sem_result) {
89 auto &last_seq = results[entry.first];
90 last_seq = std::max(last_seq, entry.second);
91 }
92}
93
94layer_data::optional<CB_SUBMISSION> QUEUE_STATE::NextSubmission(uint64_t until_seq) {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -070095 // Pop the next submission off of the queue so that Retire() doesn't need to worry
96 // about locking.
97 auto guard = WriteLock();
Jeremy Gebben15332642021-12-15 19:33:15 -070098 layer_data::optional<CB_SUBMISSION> result;
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -070099 if (seq_ < until_seq && !submissions_.empty()) {
100 result.emplace(std::move(submissions_.front()));
101 submissions_.pop_front();
Jeremy Gebben15332642021-12-15 19:33:15 -0700102 seq_++;
103 }
104 return result;
105}
106
107void QUEUE_STATE::Retire(uint64_t until_seq) {
108 SEMAPHORE_STATE::RetireResult other_queue_seqs;
109
110 layer_data::optional<CB_SUBMISSION> submission;
Jeremy Gebben4af0aa82021-09-08 09:35:16 -0600111
112 // Roll this queue forward, one submission at a time.
Jeremy Gebben15332642021-12-15 19:33:15 -0700113 while ((submission = NextSubmission(until_seq))) {
114 for (auto &wait : submission->wait_semaphores) {
115 auto result = wait.semaphore->Retire(this, wait.payload);
116 MergeResults(other_queue_seqs, result);
Jeremy Gebben57642982021-09-14 14:14:55 -0600117 wait.semaphore->EndUse();
Jeremy Gebben4af0aa82021-09-08 09:35:16 -0600118 }
ziga-lunarg69aa72f2022-03-29 15:24:35 +0200119 for (auto &signal : submission->signal_semaphores) {
120 auto result = signal.semaphore->Retire(this, signal.payload);
121 // in the case of timeline semaphores, signaling at payload == N
122 // may unblock waiting queues for payload <= N so we need to
123 // process them
124 MergeResults(other_queue_seqs, result);
125 signal.semaphore->EndUse();
126 }
Jeremy Gebben15332642021-12-15 19:33:15 -0700127 // Handle updates to how far the current queue has progressed
128 // without going recursive when we call Retire on other_queue_seqs
129 // below.
130 auto self_update = other_queue_seqs.find(this);
131 if (self_update != other_queue_seqs.end()) {
132 until_seq = std::max(until_seq, self_update->second);
133 other_queue_seqs.erase(self_update);
134 }
Jeremy Gebben4af0aa82021-09-08 09:35:16 -0600135
ziga-lunarg96c7d822022-02-28 19:39:17 +0100136 auto is_query_updated_after = [this](const QueryObject &query_object) {
137 for (const auto &submission : submissions_) {
138 for (uint32_t j = 0; j < submission.cbs.size(); ++j) {
139 const auto &next_cb_node = submission.cbs[j];
140 if (!next_cb_node) {
141 continue;
142 }
143 if (next_cb_node->updatedQueries.find(query_object) != next_cb_node->updatedQueries.end()) {
144 return true;
145 }
146 }
147 }
148 return false;
149 };
150
Jeremy Gebben15332642021-12-15 19:33:15 -0700151 for (auto &cb_node : submission->cbs) {
Jeremy Gebben332d4dd2022-01-01 12:40:02 -0700152 auto cb_guard = cb_node->WriteLock();
Jeremy Gebben57642982021-09-14 14:14:55 -0600153 for (auto *secondary_cmd_buffer : cb_node->linkedCommandBuffers) {
Jeremy Gebben332d4dd2022-01-01 12:40:02 -0700154 auto secondary_guard = secondary_cmd_buffer->WriteLock();
ziga-lunarg96c7d822022-02-28 19:39:17 +0100155 secondary_cmd_buffer->Retire(submission->perf_submit_pass, is_query_updated_after);
Jeremy Gebben4af0aa82021-09-08 09:35:16 -0600156 }
ziga-lunarg96c7d822022-02-28 19:39:17 +0100157 cb_node->Retire(submission->perf_submit_pass, is_query_updated_after);
Jeremy Gebben57642982021-09-14 14:14:55 -0600158 cb_node->EndUse();
Jeremy Gebben4af0aa82021-09-08 09:35:16 -0600159 }
160
Jeremy Gebben15332642021-12-15 19:33:15 -0700161 if (submission->fence) {
Jeremy Gebben98faf722022-07-06 15:44:29 -0600162 submission->fence->Retire(this, submission->seq);
Jeremy Gebben15332642021-12-15 19:33:15 -0700163 submission->fence->EndUse();
Jeremy Gebben4af0aa82021-09-08 09:35:16 -0600164 }
Jeremy Gebben4af0aa82021-09-08 09:35:16 -0600165 }
166
167 // Roll other queues forward to the highest seq we saw a wait for
168 for (const auto &qs : other_queue_seqs) {
Jeremy Gebben57642982021-09-14 14:14:55 -0600169 qs.first->Retire(qs.second);
Jeremy Gebben4af0aa82021-09-08 09:35:16 -0600170 }
Jeremy Gebben4af0aa82021-09-08 09:35:16 -0600171}
172
Jeremy Gebben140a0a52021-12-15 18:22:34 -0700173bool FENCE_STATE::EnqueueSignal(QUEUE_STATE *queue_state, uint64_t next_seq) {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700174 auto guard = WriteLock();
Jeremy Gebben140a0a52021-12-15 18:22:34 -0700175 if (scope_ != kSyncScopeInternal) {
176 return true;
177 }
178 // Mark fence in use
179 state_ = FENCE_INFLIGHT;
180 queue_ = queue_state;
181 seq_ = next_seq;
182 return false;
183}
184
Jeremy Gebben98faf722022-07-06 15:44:29 -0600185// Retire from a non-queue operation, such as vkWaitForFences()
186void FENCE_STATE::Retire() {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700187 QUEUE_STATE *q = nullptr;
188 uint64_t seq = 0;
189 {
190 // Hold the lock only while updating members, but not
191 // while calling QUEUE_STATE::Retire()
192 auto guard = WriteLock();
Jeremy Gebben98faf722022-07-06 15:44:29 -0600193 if (state_ == FENCE_INFLIGHT) {
194 if (scope_ == kSyncScopeInternal) {
195 q = queue_;
196 seq = seq_;
197 }
198 queue_ = nullptr;
199 seq_ = 0;
200 state_ = FENCE_RETIRED;
Jeremy Gebben4af0aa82021-09-08 09:35:16 -0600201 }
Jeremy Gebben98faf722022-07-06 15:44:29 -0600202 }
203 if (q) {
204 q->Retire(seq);
205 }
206}
207
208// Retire from a queue operation
209void FENCE_STATE::Retire(const QUEUE_STATE *queue_state, uint64_t seq) {
210 auto guard = WriteLock();
211 if (state_ == FENCE_INFLIGHT && queue_ != nullptr && queue_ == queue_state && seq_ == seq) {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700212 queue_ = nullptr;
213 seq_ = 0;
214 state_ = FENCE_RETIRED;
Jeremy Gebben140a0a52021-12-15 18:22:34 -0700215 }
Jeremy Gebben140a0a52021-12-15 18:22:34 -0700216}
217
218void FENCE_STATE::Reset() {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700219 auto guard = WriteLock();
Jeremy Gebben140a0a52021-12-15 18:22:34 -0700220 if (scope_ == kSyncScopeInternal) {
221 state_ = FENCE_UNSIGNALED;
222 } else if (scope_ == kSyncScopeExternalTemporary) {
223 scope_ = kSyncScopeInternal;
224 }
225}
226
227void FENCE_STATE::Import(VkExternalFenceHandleTypeFlagBits handle_type, VkFenceImportFlags flags) {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700228 auto guard = WriteLock();
Jeremy Gebben140a0a52021-12-15 18:22:34 -0700229 if (scope_ != kSyncScopeExternalPermanent) {
230 if ((handle_type == VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT || flags & VK_FENCE_IMPORT_TEMPORARY_BIT) &&
231 scope_ == kSyncScopeInternal) {
232 scope_ = kSyncScopeExternalTemporary;
233 } else {
234 scope_ = kSyncScopeExternalPermanent;
235 }
236 }
237}
238
239void FENCE_STATE::Export(VkExternalFenceHandleTypeFlagBits handle_type) {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700240 auto guard = WriteLock();
Jeremy Gebben140a0a52021-12-15 18:22:34 -0700241 if (handle_type != VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT) {
242 // Export with reference transference becomes external
243 scope_ = kSyncScopeExternalPermanent;
244 } else if (scope_ == kSyncScopeInternal) {
245 // Export with copy transference has a side effect of resetting the fence
246 state_ = FENCE_UNSIGNALED;
Jeremy Gebben4af0aa82021-09-08 09:35:16 -0600247 }
248}
249
Jeremy Gebben15332642021-12-15 19:33:15 -0700250bool SEMAPHORE_STATE::EnqueueSignal(QUEUE_STATE *queue, uint64_t queue_seq, uint64_t &payload) {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700251 auto guard = WriteLock();
Jeremy Gebben15332642021-12-15 19:33:15 -0700252 if (scope_ != kSyncScopeInternal) {
253 return true; // retire early
254 }
255 if (type == VK_SEMAPHORE_TYPE_BINARY) {
256 payload = next_payload_++;
257 }
258 operations_.emplace(SemOp{kSignal, queue, queue_seq, payload});
259 return false;
260}
261
262void SEMAPHORE_STATE::EnqueueWait(QUEUE_STATE *queue, uint64_t queue_seq, uint64_t &payload) {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700263 auto guard = WriteLock();
Jeremy Gebben15332642021-12-15 19:33:15 -0700264 switch (scope_) {
265 case kSyncScopeExternalTemporary:
266 scope_ = kSyncScopeInternal;
267 break;
268 default:
269 break;
270 }
271 if (type == VK_SEMAPHORE_TYPE_BINARY) {
272 payload = next_payload_++;
273 }
274 operations_.emplace(SemOp{kWait, queue, queue_seq, payload});
275}
276
277void SEMAPHORE_STATE::EnqueueAcquire() {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700278 auto guard = WriteLock();
Jeremy Gebben15332642021-12-15 19:33:15 -0700279 assert(type == VK_SEMAPHORE_TYPE_BINARY);
280 operations_.emplace(SemOp{kBinaryAcquire, nullptr, 0, next_payload_++});
281}
282
283void SEMAPHORE_STATE::EnqueuePresent(QUEUE_STATE *queue) {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700284 auto guard = WriteLock();
Jeremy Gebben15332642021-12-15 19:33:15 -0700285 assert(type == VK_SEMAPHORE_TYPE_BINARY);
286 operations_.emplace(SemOp{kBinaryPresent, queue, 0, next_payload_++});
287}
288
289layer_data::optional<SemOp> SEMAPHORE_STATE::LastOp(std::function<bool(const SemOp &)> filter) const {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700290 auto guard = ReadLock();
Jeremy Gebben15332642021-12-15 19:33:15 -0700291 layer_data::optional<SemOp> result;
292
293 for (auto pos = operations_.rbegin(); pos != operations_.rend(); ++pos) {
294 if (!filter || filter(*pos)) {
295 result.emplace(*pos);
296 break;
Jeremy Gebben57642982021-09-14 14:14:55 -0600297 }
Jeremy Gebben15332642021-12-15 19:33:15 -0700298 }
299 return result;
300}
301
302bool SEMAPHORE_STATE::CanBeSignaled() const {
303 if (type == VK_SEMAPHORE_TYPE_TIMELINE) {
304 return true;
305 }
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700306 // both LastOp() and Completed() lock, so no locking needed in this method.
Jeremy Gebben15332642021-12-15 19:33:15 -0700307 auto op = LastOp();
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700308 if (op) {
309 return op->CanBeSignaled();
310 }
311 auto comp = Completed();
312 return comp.CanBeSignaled();
Jeremy Gebben15332642021-12-15 19:33:15 -0700313}
314
315bool SEMAPHORE_STATE::CanBeWaited() const {
316 if (type == VK_SEMAPHORE_TYPE_TIMELINE) {
317 return true;
318 }
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700319 // both LastOp() and Completed() lock, so no locking needed in this method.
Jeremy Gebben15332642021-12-15 19:33:15 -0700320 auto op = LastOp();
321 if (op) {
322 return op->op_type == kSignal || op->op_type == kBinaryAcquire;
323 }
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700324 auto comp = Completed();
325 return comp.op_type == kSignal || comp.op_type == kBinaryAcquire;
Jeremy Gebben15332642021-12-15 19:33:15 -0700326}
327
ziga-lunarga635db52022-04-14 19:24:08 +0200328VkQueue SEMAPHORE_STATE::AnotherQueueWaitsBinary(VkQueue queue) const {
329 if (type == VK_SEMAPHORE_TYPE_TIMELINE) {
330 return VK_NULL_HANDLE;
331 }
332 auto guard = ReadLock();
333
334 for (auto pos = operations_.rbegin(); pos != operations_.rend(); ++pos) {
335 if (pos->op_type == kWait && pos->queue->Queue() != queue) {
336 return pos->queue->Queue();
337 }
338 }
339 return VK_NULL_HANDLE;
340}
341
Jeremy Gebben15332642021-12-15 19:33:15 -0700342SEMAPHORE_STATE::RetireResult SEMAPHORE_STATE::Retire(QUEUE_STATE *queue, uint64_t payload) {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700343 auto guard = WriteLock();
Jeremy Gebben15332642021-12-15 19:33:15 -0700344 RetireResult result;
345
346 while (!operations_.empty() && operations_.begin()->payload <= payload) {
347 completed_ = *operations_.begin();
348 operations_.erase(operations_.begin());
349 // Note: even though presentation is directed to a queue, there is no direct ordering between QP and subsequent work,
350 // so QP (and its semaphore waits) /never/ participate in any completion proof. Likewise, Acquire is not associated
351 // with a queue.
352 if (completed_.op_type != kBinaryAcquire && completed_.op_type != kBinaryPresent) {
353 auto &last_seq = result[completed_.queue];
354 last_seq = std::max(last_seq, completed_.seq);
Jeremy Gebben4af0aa82021-09-08 09:35:16 -0600355 }
356 }
Jeremy Gebben15332642021-12-15 19:33:15 -0700357 return result;
358}
359
360void SEMAPHORE_STATE::RetireTimeline(uint64_t payload) {
361 if (type == VK_SEMAPHORE_TYPE_TIMELINE) {
362 auto results = Retire(nullptr, payload);
363 for (auto &entry : results) {
364 entry.first->Retire(entry.second);
365 }
366 }
367}
368
369void SEMAPHORE_STATE::Import(VkExternalSemaphoreHandleTypeFlagBits handle_type, VkSemaphoreImportFlags flags) {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700370 auto guard = WriteLock();
Jeremy Gebben15332642021-12-15 19:33:15 -0700371 if (scope_ != kSyncScopeExternalPermanent) {
372 if ((handle_type == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT || flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT) &&
373 scope_ == kSyncScopeInternal) {
374 scope_ = kSyncScopeExternalTemporary;
375 } else {
376 scope_ = kSyncScopeExternalPermanent;
377 }
378 }
379}
380
381void SEMAPHORE_STATE::Export(VkExternalSemaphoreHandleTypeFlagBits handle_type) {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700382 auto guard = WriteLock();
Jeremy Gebben15332642021-12-15 19:33:15 -0700383 if (handle_type != VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) {
384 // Cannot track semaphore state once it is exported, except for Sync FD handle types which have copy transference
385 scope_ = kSyncScopeExternalPermanent;
386 }
Jeremy Gebben4af0aa82021-09-08 09:35:16 -0600387}