blob: f75296ac5cd2eea24b1caae2385fcd0f2796100b [file] [log] [blame]
Jeremy Gebben140a0a52021-12-15 18:22:34 -07001/* Copyright (c) 2015-2022 The Khronos Group Inc.
2 * Copyright (c) 2015-2022 Valve Corporation
3 * Copyright (c) 2015-2022 LunarG, Inc.
4 * Copyright (C) 2015-2022 Google Inc.
Jeremy Gebben4af0aa82021-09-08 09:35:16 -06005 * Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 * Author: Courtney Goeltzenleuchter <courtneygo@google.com>
20 * Author: Tobin Ehlis <tobine@google.com>
21 * Author: Chris Forbes <chrisf@ijw.co.nz>
22 * Author: Mark Lobodzinski <mark@lunarg.com>
23 * Author: Dave Houlton <daveh@lunarg.com>
24 * Author: John Zulauf <jzulauf@lunarg.com>
25 * Author: Tobias Hector <tobias.hector@amd.com>
26 */
27#include "queue_state.h"
Jeremy Gebben4af0aa82021-09-08 09:35:16 -060028#include "cmd_buffer_state.h"
Jeremy Gebben57642982021-09-14 14:14:55 -060029#include "state_tracker.h"
Jeremy Gebben4af0aa82021-09-08 09:35:16 -060030
Jeremy Gebben15332642021-12-15 19:33:15 -070031using SemOp = SEMAPHORE_STATE::SemOp;
Jeremy Gebben57642982021-09-14 14:14:55 -060032
Jeremy Gebben15332642021-12-15 19:33:15 -070033uint64_t QUEUE_STATE::Submit(CB_SUBMISSION &&submission) {
Jeremy Gebben57642982021-09-14 14:14:55 -060034 for (auto &cb_node : submission.cbs) {
Jeremy Gebben332d4dd2022-01-01 12:40:02 -070035 auto cb_guard = cb_node->WriteLock();
Jeremy Gebben57642982021-09-14 14:14:55 -060036 for (auto *secondary_cmd_buffer : cb_node->linkedCommandBuffers) {
Jeremy Gebben332d4dd2022-01-01 12:40:02 -070037 auto secondary_guard = secondary_cmd_buffer->WriteLock();
Jeremy Gebben57642982021-09-14 14:14:55 -060038 secondary_cmd_buffer->IncrementResources();
39 }
40 cb_node->IncrementResources();
41 // increment use count for all bound objects including secondary cbs
42 cb_node->BeginUse();
43 cb_node->Submit(submission.perf_submit_pass);
44 }
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -070045 // Lock required for queue / semaphore operations, but not for command buffer
46 // processing above.
47 auto guard = WriteLock();
Jeremy Gebben98faf722022-07-06 15:44:29 -060048 submission.seq = seq_ + submissions_.size() + 1;
Jeremy Gebben15332642021-12-15 19:33:15 -070049 bool retire_early = false;
Jeremy Gebben57642982021-09-14 14:14:55 -060050 for (auto &wait : submission.wait_semaphores) {
Jeremy Gebben98faf722022-07-06 15:44:29 -060051 wait.semaphore->EnqueueWait(this, submission.seq, wait.payload);
Jeremy Gebben57642982021-09-14 14:14:55 -060052 wait.semaphore->BeginUse();
53 }
54
55 for (auto &signal : submission.signal_semaphores) {
Jeremy Gebben98faf722022-07-06 15:44:29 -060056 if (signal.semaphore->EnqueueSignal(this, submission.seq, signal.payload)) {
Jeremy Gebben4af0aa82021-09-08 09:35:16 -060057 retire_early = true;
58 }
Jeremy Gebben57642982021-09-14 14:14:55 -060059 signal.semaphore->BeginUse();
Jeremy Gebben4af0aa82021-09-08 09:35:16 -060060 }
Jeremy Gebben4af0aa82021-09-08 09:35:16 -060061
Jeremy Gebben57642982021-09-14 14:14:55 -060062 if (submission.fence) {
Jeremy Gebben98faf722022-07-06 15:44:29 -060063 if (submission.fence->EnqueueSignal(this, submission.seq)) {
Jeremy Gebben57642982021-09-14 14:14:55 -060064 retire_early = true;
Jeremy Gebben4af0aa82021-09-08 09:35:16 -060065 }
Jeremy Gebben57642982021-09-14 14:14:55 -060066 submission.fence->BeginUse();
Jeremy Gebben4af0aa82021-09-08 09:35:16 -060067 }
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -070068 submissions_.emplace_back(std::move(submission));
Jeremy Gebben98faf722022-07-06 15:44:29 -060069 return retire_early ? submission.seq : 0;
Jeremy Gebben4af0aa82021-09-08 09:35:16 -060070}
71
ziga-lunarg81b56d32022-03-21 22:22:05 +010072bool QUEUE_STATE::HasWait(VkSemaphore semaphore, VkFence fence) const {
73 auto guard = ReadLock();
74 for (const auto &submission : submissions_) {
75 if (fence != VK_NULL_HANDLE && submission.fence && submission.fence->Handle().Cast<VkFence>() == fence) {
76 return true;
77 }
78 for (const auto &wait_semaphore : submission.wait_semaphores) {
79 if (wait_semaphore.semaphore->Handle().Cast<VkSemaphore>() == semaphore) {
80 return true;
81 }
82 }
83 }
84 return false;
85}
86
Jeremy Gebben15332642021-12-15 19:33:15 -070087static void MergeResults(SEMAPHORE_STATE::RetireResult &results, const SEMAPHORE_STATE::RetireResult &sem_result) {
88 for (auto &entry : sem_result) {
89 auto &last_seq = results[entry.first];
90 last_seq = std::max(last_seq, entry.second);
91 }
92}
93
94layer_data::optional<CB_SUBMISSION> QUEUE_STATE::NextSubmission(uint64_t until_seq) {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -070095 // Pop the next submission off of the queue so that Retire() doesn't need to worry
96 // about locking.
97 auto guard = WriteLock();
Jeremy Gebben15332642021-12-15 19:33:15 -070098 layer_data::optional<CB_SUBMISSION> result;
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -070099 if (seq_ < until_seq && !submissions_.empty()) {
100 result.emplace(std::move(submissions_.front()));
101 submissions_.pop_front();
Jeremy Gebben15332642021-12-15 19:33:15 -0700102 seq_++;
103 }
104 return result;
105}
106
107void QUEUE_STATE::Retire(uint64_t until_seq) {
108 SEMAPHORE_STATE::RetireResult other_queue_seqs;
109
110 layer_data::optional<CB_SUBMISSION> submission;
Jeremy Gebben4af0aa82021-09-08 09:35:16 -0600111
112 // Roll this queue forward, one submission at a time.
Jeremy Gebben15332642021-12-15 19:33:15 -0700113 while ((submission = NextSubmission(until_seq))) {
114 for (auto &wait : submission->wait_semaphores) {
115 auto result = wait.semaphore->Retire(this, wait.payload);
116 MergeResults(other_queue_seqs, result);
Jeremy Gebben57642982021-09-14 14:14:55 -0600117 wait.semaphore->EndUse();
Jeremy Gebben4af0aa82021-09-08 09:35:16 -0600118 }
ziga-lunarg69aa72f2022-03-29 15:24:35 +0200119 for (auto &signal : submission->signal_semaphores) {
120 auto result = signal.semaphore->Retire(this, signal.payload);
121 // in the case of timeline semaphores, signaling at payload == N
122 // may unblock waiting queues for payload <= N so we need to
123 // process them
124 MergeResults(other_queue_seqs, result);
125 signal.semaphore->EndUse();
126 }
Jeremy Gebben15332642021-12-15 19:33:15 -0700127 // Handle updates to how far the current queue has progressed
128 // without going recursive when we call Retire on other_queue_seqs
129 // below.
130 auto self_update = other_queue_seqs.find(this);
131 if (self_update != other_queue_seqs.end()) {
132 until_seq = std::max(until_seq, self_update->second);
133 other_queue_seqs.erase(self_update);
134 }
Jeremy Gebben4af0aa82021-09-08 09:35:16 -0600135
ziga-lunarg96c7d822022-02-28 19:39:17 +0100136 auto is_query_updated_after = [this](const QueryObject &query_object) {
137 for (const auto &submission : submissions_) {
Jeremy Gebbenc615f792022-09-19 16:31:24 -0600138 if (query_object.perf_pass != submission.perf_submit_pass) {
139 continue;
140 }
ziga-lunarg96c7d822022-02-28 19:39:17 +0100141 for (uint32_t j = 0; j < submission.cbs.size(); ++j) {
142 const auto &next_cb_node = submission.cbs[j];
143 if (!next_cb_node) {
144 continue;
145 }
Jeremy Gebbenc615f792022-09-19 16:31:24 -0600146 if (next_cb_node->UpdatesQuery(query_object)) {
ziga-lunarg96c7d822022-02-28 19:39:17 +0100147 return true;
148 }
149 }
150 }
151 return false;
152 };
153
Jeremy Gebben15332642021-12-15 19:33:15 -0700154 for (auto &cb_node : submission->cbs) {
Jeremy Gebben332d4dd2022-01-01 12:40:02 -0700155 auto cb_guard = cb_node->WriteLock();
Jeremy Gebben57642982021-09-14 14:14:55 -0600156 for (auto *secondary_cmd_buffer : cb_node->linkedCommandBuffers) {
Jeremy Gebben332d4dd2022-01-01 12:40:02 -0700157 auto secondary_guard = secondary_cmd_buffer->WriteLock();
ziga-lunarg96c7d822022-02-28 19:39:17 +0100158 secondary_cmd_buffer->Retire(submission->perf_submit_pass, is_query_updated_after);
Jeremy Gebben4af0aa82021-09-08 09:35:16 -0600159 }
ziga-lunarg96c7d822022-02-28 19:39:17 +0100160 cb_node->Retire(submission->perf_submit_pass, is_query_updated_after);
Jeremy Gebben57642982021-09-14 14:14:55 -0600161 cb_node->EndUse();
Jeremy Gebben4af0aa82021-09-08 09:35:16 -0600162 }
163
Jeremy Gebben15332642021-12-15 19:33:15 -0700164 if (submission->fence) {
Jeremy Gebben98faf722022-07-06 15:44:29 -0600165 submission->fence->Retire(this, submission->seq);
Jeremy Gebben15332642021-12-15 19:33:15 -0700166 submission->fence->EndUse();
Jeremy Gebben4af0aa82021-09-08 09:35:16 -0600167 }
Jeremy Gebben4af0aa82021-09-08 09:35:16 -0600168 }
169
170 // Roll other queues forward to the highest seq we saw a wait for
171 for (const auto &qs : other_queue_seqs) {
Jeremy Gebben57642982021-09-14 14:14:55 -0600172 qs.first->Retire(qs.second);
Jeremy Gebben4af0aa82021-09-08 09:35:16 -0600173 }
Jeremy Gebben4af0aa82021-09-08 09:35:16 -0600174}
175
Jeremy Gebben140a0a52021-12-15 18:22:34 -0700176bool FENCE_STATE::EnqueueSignal(QUEUE_STATE *queue_state, uint64_t next_seq) {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700177 auto guard = WriteLock();
Jeremy Gebben140a0a52021-12-15 18:22:34 -0700178 if (scope_ != kSyncScopeInternal) {
179 return true;
180 }
181 // Mark fence in use
182 state_ = FENCE_INFLIGHT;
183 queue_ = queue_state;
184 seq_ = next_seq;
185 return false;
186}
187
Jeremy Gebben98faf722022-07-06 15:44:29 -0600188// Retire from a non-queue operation, such as vkWaitForFences()
189void FENCE_STATE::Retire() {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700190 QUEUE_STATE *q = nullptr;
191 uint64_t seq = 0;
192 {
193 // Hold the lock only while updating members, but not
194 // while calling QUEUE_STATE::Retire()
195 auto guard = WriteLock();
Jeremy Gebben98faf722022-07-06 15:44:29 -0600196 if (state_ == FENCE_INFLIGHT) {
197 if (scope_ == kSyncScopeInternal) {
198 q = queue_;
199 seq = seq_;
200 }
201 queue_ = nullptr;
202 seq_ = 0;
203 state_ = FENCE_RETIRED;
Jeremy Gebben4af0aa82021-09-08 09:35:16 -0600204 }
Jeremy Gebben98faf722022-07-06 15:44:29 -0600205 }
206 if (q) {
207 q->Retire(seq);
208 }
209}
210
211// Retire from a queue operation
212void FENCE_STATE::Retire(const QUEUE_STATE *queue_state, uint64_t seq) {
213 auto guard = WriteLock();
214 if (state_ == FENCE_INFLIGHT && queue_ != nullptr && queue_ == queue_state && seq_ == seq) {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700215 queue_ = nullptr;
216 seq_ = 0;
217 state_ = FENCE_RETIRED;
Jeremy Gebben140a0a52021-12-15 18:22:34 -0700218 }
Jeremy Gebben140a0a52021-12-15 18:22:34 -0700219}
220
221void FENCE_STATE::Reset() {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700222 auto guard = WriteLock();
Jeremy Gebbend57adc82022-09-21 12:47:00 -0600223 queue_ = nullptr;
224 seq_ = 0;
225 // spec: If any member of pFences currently has its payload imported with temporary permanence,
226 // that fence’s prior permanent payload is first restored. The remaining operations described
227 // therefore operate on the restored payload.
228 if (scope_ == kSyncScopeExternalTemporary) {
229 scope_ = kSyncScopeInternal;
230 }
Jeremy Gebben140a0a52021-12-15 18:22:34 -0700231 if (scope_ == kSyncScopeInternal) {
232 state_ = FENCE_UNSIGNALED;
Jeremy Gebben140a0a52021-12-15 18:22:34 -0700233 }
234}
235
236void FENCE_STATE::Import(VkExternalFenceHandleTypeFlagBits handle_type, VkFenceImportFlags flags) {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700237 auto guard = WriteLock();
Jeremy Gebben140a0a52021-12-15 18:22:34 -0700238 if (scope_ != kSyncScopeExternalPermanent) {
239 if ((handle_type == VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT || flags & VK_FENCE_IMPORT_TEMPORARY_BIT) &&
240 scope_ == kSyncScopeInternal) {
241 scope_ = kSyncScopeExternalTemporary;
242 } else {
243 scope_ = kSyncScopeExternalPermanent;
244 }
245 }
246}
247
248void FENCE_STATE::Export(VkExternalFenceHandleTypeFlagBits handle_type) {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700249 auto guard = WriteLock();
Jeremy Gebben140a0a52021-12-15 18:22:34 -0700250 if (handle_type != VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT) {
251 // Export with reference transference becomes external
252 scope_ = kSyncScopeExternalPermanent;
253 } else if (scope_ == kSyncScopeInternal) {
254 // Export with copy transference has a side effect of resetting the fence
255 state_ = FENCE_UNSIGNALED;
Jeremy Gebben4af0aa82021-09-08 09:35:16 -0600256 }
257}
258
Jeremy Gebben15332642021-12-15 19:33:15 -0700259bool SEMAPHORE_STATE::EnqueueSignal(QUEUE_STATE *queue, uint64_t queue_seq, uint64_t &payload) {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700260 auto guard = WriteLock();
Jeremy Gebben15332642021-12-15 19:33:15 -0700261 if (scope_ != kSyncScopeInternal) {
262 return true; // retire early
263 }
264 if (type == VK_SEMAPHORE_TYPE_BINARY) {
265 payload = next_payload_++;
266 }
267 operations_.emplace(SemOp{kSignal, queue, queue_seq, payload});
268 return false;
269}
270
271void SEMAPHORE_STATE::EnqueueWait(QUEUE_STATE *queue, uint64_t queue_seq, uint64_t &payload) {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700272 auto guard = WriteLock();
Jeremy Gebben15332642021-12-15 19:33:15 -0700273 switch (scope_) {
274 case kSyncScopeExternalTemporary:
275 scope_ = kSyncScopeInternal;
276 break;
277 default:
278 break;
279 }
280 if (type == VK_SEMAPHORE_TYPE_BINARY) {
281 payload = next_payload_++;
282 }
283 operations_.emplace(SemOp{kWait, queue, queue_seq, payload});
284}
285
286void SEMAPHORE_STATE::EnqueueAcquire() {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700287 auto guard = WriteLock();
Jeremy Gebben15332642021-12-15 19:33:15 -0700288 assert(type == VK_SEMAPHORE_TYPE_BINARY);
289 operations_.emplace(SemOp{kBinaryAcquire, nullptr, 0, next_payload_++});
290}
291
292void SEMAPHORE_STATE::EnqueuePresent(QUEUE_STATE *queue) {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700293 auto guard = WriteLock();
Jeremy Gebben15332642021-12-15 19:33:15 -0700294 assert(type == VK_SEMAPHORE_TYPE_BINARY);
295 operations_.emplace(SemOp{kBinaryPresent, queue, 0, next_payload_++});
296}
297
298layer_data::optional<SemOp> SEMAPHORE_STATE::LastOp(std::function<bool(const SemOp &)> filter) const {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700299 auto guard = ReadLock();
Jeremy Gebben15332642021-12-15 19:33:15 -0700300 layer_data::optional<SemOp> result;
301
302 for (auto pos = operations_.rbegin(); pos != operations_.rend(); ++pos) {
303 if (!filter || filter(*pos)) {
304 result.emplace(*pos);
305 break;
Jeremy Gebben57642982021-09-14 14:14:55 -0600306 }
Jeremy Gebben15332642021-12-15 19:33:15 -0700307 }
308 return result;
309}
310
311bool SEMAPHORE_STATE::CanBeSignaled() const {
312 if (type == VK_SEMAPHORE_TYPE_TIMELINE) {
313 return true;
314 }
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700315 // both LastOp() and Completed() lock, so no locking needed in this method.
Jeremy Gebben15332642021-12-15 19:33:15 -0700316 auto op = LastOp();
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700317 if (op) {
318 return op->CanBeSignaled();
319 }
320 auto comp = Completed();
321 return comp.CanBeSignaled();
Jeremy Gebben15332642021-12-15 19:33:15 -0700322}
323
324bool SEMAPHORE_STATE::CanBeWaited() const {
325 if (type == VK_SEMAPHORE_TYPE_TIMELINE) {
326 return true;
327 }
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700328 // both LastOp() and Completed() lock, so no locking needed in this method.
Jeremy Gebben15332642021-12-15 19:33:15 -0700329 auto op = LastOp();
330 if (op) {
331 return op->op_type == kSignal || op->op_type == kBinaryAcquire;
332 }
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700333 auto comp = Completed();
334 return comp.op_type == kSignal || comp.op_type == kBinaryAcquire;
Jeremy Gebben15332642021-12-15 19:33:15 -0700335}
336
ziga-lunarga635db52022-04-14 19:24:08 +0200337VkQueue SEMAPHORE_STATE::AnotherQueueWaitsBinary(VkQueue queue) const {
338 if (type == VK_SEMAPHORE_TYPE_TIMELINE) {
339 return VK_NULL_HANDLE;
340 }
341 auto guard = ReadLock();
342
343 for (auto pos = operations_.rbegin(); pos != operations_.rend(); ++pos) {
344 if (pos->op_type == kWait && pos->queue->Queue() != queue) {
345 return pos->queue->Queue();
346 }
347 }
348 return VK_NULL_HANDLE;
349}
350
Jeremy Gebben15332642021-12-15 19:33:15 -0700351SEMAPHORE_STATE::RetireResult SEMAPHORE_STATE::Retire(QUEUE_STATE *queue, uint64_t payload) {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700352 auto guard = WriteLock();
Jeremy Gebben15332642021-12-15 19:33:15 -0700353 RetireResult result;
354
355 while (!operations_.empty() && operations_.begin()->payload <= payload) {
356 completed_ = *operations_.begin();
357 operations_.erase(operations_.begin());
358 // Note: even though presentation is directed to a queue, there is no direct ordering between QP and subsequent work,
359 // so QP (and its semaphore waits) /never/ participate in any completion proof. Likewise, Acquire is not associated
360 // with a queue.
361 if (completed_.op_type != kBinaryAcquire && completed_.op_type != kBinaryPresent) {
362 auto &last_seq = result[completed_.queue];
363 last_seq = std::max(last_seq, completed_.seq);
Jeremy Gebben4af0aa82021-09-08 09:35:16 -0600364 }
365 }
Jeremy Gebben15332642021-12-15 19:33:15 -0700366 return result;
367}
368
369void SEMAPHORE_STATE::RetireTimeline(uint64_t payload) {
370 if (type == VK_SEMAPHORE_TYPE_TIMELINE) {
371 auto results = Retire(nullptr, payload);
372 for (auto &entry : results) {
373 entry.first->Retire(entry.second);
374 }
375 }
376}
377
378void SEMAPHORE_STATE::Import(VkExternalSemaphoreHandleTypeFlagBits handle_type, VkSemaphoreImportFlags flags) {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700379 auto guard = WriteLock();
Jeremy Gebben15332642021-12-15 19:33:15 -0700380 if (scope_ != kSyncScopeExternalPermanent) {
381 if ((handle_type == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT || flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT) &&
382 scope_ == kSyncScopeInternal) {
383 scope_ = kSyncScopeExternalTemporary;
384 } else {
385 scope_ = kSyncScopeExternalPermanent;
386 }
387 }
388}
389
390void SEMAPHORE_STATE::Export(VkExternalSemaphoreHandleTypeFlagBits handle_type) {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700391 auto guard = WriteLock();
Jeremy Gebben15332642021-12-15 19:33:15 -0700392 if (handle_type != VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) {
393 // Cannot track semaphore state once it is exported, except for Sync FD handle types which have copy transference
394 scope_ = kSyncScopeExternalPermanent;
395 }
Jeremy Gebben4af0aa82021-09-08 09:35:16 -0600396}