blob: 8d5115792a80a3e4a5f60b4b20ca41ffbf27012b [file] [log] [blame]
Jeremy Gebben140a0a52021-12-15 18:22:34 -07001/* Copyright (c) 2015-2022 The Khronos Group Inc.
2 * Copyright (c) 2015-2022 Valve Corporation
3 * Copyright (c) 2015-2022 LunarG, Inc.
4 * Copyright (C) 2015-2022 Google Inc.
Jeremy Gebben4af0aa82021-09-08 09:35:16 -06005 * Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 * Author: Courtney Goeltzenleuchter <courtneygo@google.com>
20 * Author: Tobin Ehlis <tobine@google.com>
21 * Author: Chris Forbes <chrisf@ijw.co.nz>
22 * Author: Mark Lobodzinski <mark@lunarg.com>
23 * Author: Dave Houlton <daveh@lunarg.com>
24 * Author: John Zulauf <jzulauf@lunarg.com>
25 * Author: Tobias Hector <tobias.hector@amd.com>
26 */
27#include "queue_state.h"
Jeremy Gebben4af0aa82021-09-08 09:35:16 -060028#include "cmd_buffer_state.h"
Jeremy Gebben57642982021-09-14 14:14:55 -060029#include "state_tracker.h"
Jeremy Gebben4af0aa82021-09-08 09:35:16 -060030
Jeremy Gebben15332642021-12-15 19:33:15 -070031using SemOp = SEMAPHORE_STATE::SemOp;
Jeremy Gebben57642982021-09-14 14:14:55 -060032
Jeremy Gebben15332642021-12-15 19:33:15 -070033uint64_t QUEUE_STATE::Submit(CB_SUBMISSION &&submission) {
Jeremy Gebben57642982021-09-14 14:14:55 -060034 for (auto &cb_node : submission.cbs) {
Jeremy Gebben332d4dd2022-01-01 12:40:02 -070035 auto cb_guard = cb_node->WriteLock();
Jeremy Gebben57642982021-09-14 14:14:55 -060036 for (auto *secondary_cmd_buffer : cb_node->linkedCommandBuffers) {
Jeremy Gebben332d4dd2022-01-01 12:40:02 -070037 auto secondary_guard = secondary_cmd_buffer->WriteLock();
Jeremy Gebben57642982021-09-14 14:14:55 -060038 secondary_cmd_buffer->IncrementResources();
39 }
40 cb_node->IncrementResources();
41 // increment use count for all bound objects including secondary cbs
42 cb_node->BeginUse();
43 cb_node->Submit(submission.perf_submit_pass);
44 }
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -070045 // Lock required for queue / semaphore operations, but not for command buffer
46 // processing above.
47 auto guard = WriteLock();
48 const uint64_t next_seq = seq_ + submissions_.size() + 1;
Jeremy Gebben15332642021-12-15 19:33:15 -070049 bool retire_early = false;
Jeremy Gebben57642982021-09-14 14:14:55 -060050 for (auto &wait : submission.wait_semaphores) {
Jeremy Gebben15332642021-12-15 19:33:15 -070051 wait.semaphore->EnqueueWait(this, next_seq, wait.payload);
Jeremy Gebben57642982021-09-14 14:14:55 -060052 wait.semaphore->BeginUse();
53 }
54
55 for (auto &signal : submission.signal_semaphores) {
Jeremy Gebben15332642021-12-15 19:33:15 -070056 if (signal.semaphore->EnqueueSignal(this, next_seq, signal.payload)) {
Jeremy Gebben4af0aa82021-09-08 09:35:16 -060057 retire_early = true;
58 }
Jeremy Gebben57642982021-09-14 14:14:55 -060059 signal.semaphore->BeginUse();
Jeremy Gebben4af0aa82021-09-08 09:35:16 -060060 }
Jeremy Gebben4af0aa82021-09-08 09:35:16 -060061
Jeremy Gebben57642982021-09-14 14:14:55 -060062 if (submission.fence) {
Jeremy Gebben140a0a52021-12-15 18:22:34 -070063 if (submission.fence->EnqueueSignal(this, next_seq)) {
Jeremy Gebben57642982021-09-14 14:14:55 -060064 retire_early = true;
Jeremy Gebben4af0aa82021-09-08 09:35:16 -060065 }
Jeremy Gebben57642982021-09-14 14:14:55 -060066 submission.fence->BeginUse();
Jeremy Gebben4af0aa82021-09-08 09:35:16 -060067 }
Jeremy Gebben57642982021-09-14 14:14:55 -060068
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -070069 submissions_.emplace_back(std::move(submission));
Jeremy Gebben57642982021-09-14 14:14:55 -060070 return retire_early ? next_seq : 0;
Jeremy Gebben4af0aa82021-09-08 09:35:16 -060071}
72
ziga-lunarg81b56d32022-03-21 22:22:05 +010073bool QUEUE_STATE::HasWait(VkSemaphore semaphore, VkFence fence) const {
74 auto guard = ReadLock();
75 for (const auto &submission : submissions_) {
76 if (fence != VK_NULL_HANDLE && submission.fence && submission.fence->Handle().Cast<VkFence>() == fence) {
77 return true;
78 }
79 for (const auto &wait_semaphore : submission.wait_semaphores) {
80 if (wait_semaphore.semaphore->Handle().Cast<VkSemaphore>() == semaphore) {
81 return true;
82 }
83 }
84 }
85 return false;
86}
87
Jeremy Gebben15332642021-12-15 19:33:15 -070088static void MergeResults(SEMAPHORE_STATE::RetireResult &results, const SEMAPHORE_STATE::RetireResult &sem_result) {
89 for (auto &entry : sem_result) {
90 auto &last_seq = results[entry.first];
91 last_seq = std::max(last_seq, entry.second);
92 }
93}
94
95layer_data::optional<CB_SUBMISSION> QUEUE_STATE::NextSubmission(uint64_t until_seq) {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -070096 // Pop the next submission off of the queue so that Retire() doesn't need to worry
97 // about locking.
98 auto guard = WriteLock();
Jeremy Gebben15332642021-12-15 19:33:15 -070099 layer_data::optional<CB_SUBMISSION> result;
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700100 if (seq_ < until_seq && !submissions_.empty()) {
101 result.emplace(std::move(submissions_.front()));
102 submissions_.pop_front();
Jeremy Gebben15332642021-12-15 19:33:15 -0700103 seq_++;
104 }
105 return result;
106}
107
108void QUEUE_STATE::Retire(uint64_t until_seq) {
109 SEMAPHORE_STATE::RetireResult other_queue_seqs;
110
111 layer_data::optional<CB_SUBMISSION> submission;
Jeremy Gebben4af0aa82021-09-08 09:35:16 -0600112
113 // Roll this queue forward, one submission at a time.
Jeremy Gebben15332642021-12-15 19:33:15 -0700114 while ((submission = NextSubmission(until_seq))) {
115 for (auto &wait : submission->wait_semaphores) {
116 auto result = wait.semaphore->Retire(this, wait.payload);
117 MergeResults(other_queue_seqs, result);
Jeremy Gebben57642982021-09-14 14:14:55 -0600118 wait.semaphore->EndUse();
Jeremy Gebben4af0aa82021-09-08 09:35:16 -0600119 }
ziga-lunarg69aa72f2022-03-29 15:24:35 +0200120 for (auto &signal : submission->signal_semaphores) {
121 auto result = signal.semaphore->Retire(this, signal.payload);
122 // in the case of timeline semaphores, signaling at payload == N
123 // may unblock waiting queues for payload <= N so we need to
124 // process them
125 MergeResults(other_queue_seqs, result);
126 signal.semaphore->EndUse();
127 }
Jeremy Gebben15332642021-12-15 19:33:15 -0700128 // Handle updates to how far the current queue has progressed
129 // without going recursive when we call Retire on other_queue_seqs
130 // below.
131 auto self_update = other_queue_seqs.find(this);
132 if (self_update != other_queue_seqs.end()) {
133 until_seq = std::max(until_seq, self_update->second);
134 other_queue_seqs.erase(self_update);
135 }
Jeremy Gebben4af0aa82021-09-08 09:35:16 -0600136
ziga-lunarg96c7d822022-02-28 19:39:17 +0100137 auto is_query_updated_after = [this](const QueryObject &query_object) {
138 for (const auto &submission : submissions_) {
139 for (uint32_t j = 0; j < submission.cbs.size(); ++j) {
140 const auto &next_cb_node = submission.cbs[j];
141 if (!next_cb_node) {
142 continue;
143 }
144 if (next_cb_node->updatedQueries.find(query_object) != next_cb_node->updatedQueries.end()) {
145 return true;
146 }
147 }
148 }
149 return false;
150 };
151
Jeremy Gebben15332642021-12-15 19:33:15 -0700152 for (auto &cb_node : submission->cbs) {
Jeremy Gebben332d4dd2022-01-01 12:40:02 -0700153 auto cb_guard = cb_node->WriteLock();
Jeremy Gebben57642982021-09-14 14:14:55 -0600154 for (auto *secondary_cmd_buffer : cb_node->linkedCommandBuffers) {
Jeremy Gebben332d4dd2022-01-01 12:40:02 -0700155 auto secondary_guard = secondary_cmd_buffer->WriteLock();
ziga-lunarg96c7d822022-02-28 19:39:17 +0100156 secondary_cmd_buffer->Retire(submission->perf_submit_pass, is_query_updated_after);
Jeremy Gebben4af0aa82021-09-08 09:35:16 -0600157 }
ziga-lunarg96c7d822022-02-28 19:39:17 +0100158 cb_node->Retire(submission->perf_submit_pass, is_query_updated_after);
Jeremy Gebben57642982021-09-14 14:14:55 -0600159 cb_node->EndUse();
Jeremy Gebben4af0aa82021-09-08 09:35:16 -0600160 }
161
Jeremy Gebben15332642021-12-15 19:33:15 -0700162 if (submission->fence) {
163 submission->fence->Retire(false);
164 submission->fence->EndUse();
Jeremy Gebben4af0aa82021-09-08 09:35:16 -0600165 }
Jeremy Gebben4af0aa82021-09-08 09:35:16 -0600166 }
167
168 // Roll other queues forward to the highest seq we saw a wait for
169 for (const auto &qs : other_queue_seqs) {
Jeremy Gebben57642982021-09-14 14:14:55 -0600170 qs.first->Retire(qs.second);
Jeremy Gebben4af0aa82021-09-08 09:35:16 -0600171 }
Jeremy Gebben4af0aa82021-09-08 09:35:16 -0600172}
173
Jeremy Gebben140a0a52021-12-15 18:22:34 -0700174bool FENCE_STATE::EnqueueSignal(QUEUE_STATE *queue_state, uint64_t next_seq) {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700175 auto guard = WriteLock();
Jeremy Gebben140a0a52021-12-15 18:22:34 -0700176 if (scope_ != kSyncScopeInternal) {
177 return true;
178 }
179 // Mark fence in use
180 state_ = FENCE_INFLIGHT;
181 queue_ = queue_state;
182 seq_ = next_seq;
183 return false;
184}
185
186void FENCE_STATE::Retire(bool notify_queue) {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700187 QUEUE_STATE *q = nullptr;
188 uint64_t seq = 0;
189 {
190 // Hold the lock only while updating members, but not
191 // while calling QUEUE_STATE::Retire()
192 auto guard = WriteLock();
193 if (scope_ == kSyncScopeInternal) {
194 q = queue_;
195 seq = seq_;
Jeremy Gebben4af0aa82021-09-08 09:35:16 -0600196 }
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700197 queue_ = nullptr;
198 seq_ = 0;
199 state_ = FENCE_RETIRED;
Jeremy Gebben140a0a52021-12-15 18:22:34 -0700200 }
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700201 if (q && notify_queue) {
202 q->Retire(seq);
203 }
Jeremy Gebben140a0a52021-12-15 18:22:34 -0700204}
205
206void FENCE_STATE::Reset() {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700207 auto guard = WriteLock();
Jeremy Gebben140a0a52021-12-15 18:22:34 -0700208 if (scope_ == kSyncScopeInternal) {
209 state_ = FENCE_UNSIGNALED;
210 } else if (scope_ == kSyncScopeExternalTemporary) {
211 scope_ = kSyncScopeInternal;
212 }
213}
214
215void FENCE_STATE::Import(VkExternalFenceHandleTypeFlagBits handle_type, VkFenceImportFlags flags) {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700216 auto guard = WriteLock();
Jeremy Gebben140a0a52021-12-15 18:22:34 -0700217 if (scope_ != kSyncScopeExternalPermanent) {
218 if ((handle_type == VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT || flags & VK_FENCE_IMPORT_TEMPORARY_BIT) &&
219 scope_ == kSyncScopeInternal) {
220 scope_ = kSyncScopeExternalTemporary;
221 } else {
222 scope_ = kSyncScopeExternalPermanent;
223 }
224 }
225}
226
227void FENCE_STATE::Export(VkExternalFenceHandleTypeFlagBits handle_type) {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700228 auto guard = WriteLock();
Jeremy Gebben140a0a52021-12-15 18:22:34 -0700229 if (handle_type != VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT) {
230 // Export with reference transference becomes external
231 scope_ = kSyncScopeExternalPermanent;
232 } else if (scope_ == kSyncScopeInternal) {
233 // Export with copy transference has a side effect of resetting the fence
234 state_ = FENCE_UNSIGNALED;
Jeremy Gebben4af0aa82021-09-08 09:35:16 -0600235 }
236}
237
Jeremy Gebben15332642021-12-15 19:33:15 -0700238bool SEMAPHORE_STATE::EnqueueSignal(QUEUE_STATE *queue, uint64_t queue_seq, uint64_t &payload) {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700239 auto guard = WriteLock();
Jeremy Gebben15332642021-12-15 19:33:15 -0700240 if (scope_ != kSyncScopeInternal) {
241 return true; // retire early
242 }
243 if (type == VK_SEMAPHORE_TYPE_BINARY) {
244 payload = next_payload_++;
245 }
246 operations_.emplace(SemOp{kSignal, queue, queue_seq, payload});
247 return false;
248}
249
250void SEMAPHORE_STATE::EnqueueWait(QUEUE_STATE *queue, uint64_t queue_seq, uint64_t &payload) {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700251 auto guard = WriteLock();
Jeremy Gebben15332642021-12-15 19:33:15 -0700252 switch (scope_) {
253 case kSyncScopeExternalTemporary:
254 scope_ = kSyncScopeInternal;
255 break;
256 default:
257 break;
258 }
259 if (type == VK_SEMAPHORE_TYPE_BINARY) {
260 payload = next_payload_++;
261 }
262 operations_.emplace(SemOp{kWait, queue, queue_seq, payload});
263}
264
265void SEMAPHORE_STATE::EnqueueAcquire() {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700266 auto guard = WriteLock();
Jeremy Gebben15332642021-12-15 19:33:15 -0700267 assert(type == VK_SEMAPHORE_TYPE_BINARY);
268 operations_.emplace(SemOp{kBinaryAcquire, nullptr, 0, next_payload_++});
269}
270
271void SEMAPHORE_STATE::EnqueuePresent(QUEUE_STATE *queue) {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700272 auto guard = WriteLock();
Jeremy Gebben15332642021-12-15 19:33:15 -0700273 assert(type == VK_SEMAPHORE_TYPE_BINARY);
274 operations_.emplace(SemOp{kBinaryPresent, queue, 0, next_payload_++});
275}
276
277layer_data::optional<SemOp> SEMAPHORE_STATE::LastOp(std::function<bool(const SemOp &)> filter) const {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700278 auto guard = ReadLock();
Jeremy Gebben15332642021-12-15 19:33:15 -0700279 layer_data::optional<SemOp> result;
280
281 for (auto pos = operations_.rbegin(); pos != operations_.rend(); ++pos) {
282 if (!filter || filter(*pos)) {
283 result.emplace(*pos);
284 break;
Jeremy Gebben57642982021-09-14 14:14:55 -0600285 }
Jeremy Gebben15332642021-12-15 19:33:15 -0700286 }
287 return result;
288}
289
290bool SEMAPHORE_STATE::CanBeSignaled() const {
291 if (type == VK_SEMAPHORE_TYPE_TIMELINE) {
292 return true;
293 }
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700294 // both LastOp() and Completed() lock, so no locking needed in this method.
Jeremy Gebben15332642021-12-15 19:33:15 -0700295 auto op = LastOp();
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700296 if (op) {
297 return op->CanBeSignaled();
298 }
299 auto comp = Completed();
300 return comp.CanBeSignaled();
Jeremy Gebben15332642021-12-15 19:33:15 -0700301}
302
303bool SEMAPHORE_STATE::CanBeWaited() const {
304 if (type == VK_SEMAPHORE_TYPE_TIMELINE) {
305 return true;
306 }
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700307 // both LastOp() and Completed() lock, so no locking needed in this method.
Jeremy Gebben15332642021-12-15 19:33:15 -0700308 auto op = LastOp();
309 if (op) {
310 return op->op_type == kSignal || op->op_type == kBinaryAcquire;
311 }
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700312 auto comp = Completed();
313 return comp.op_type == kSignal || comp.op_type == kBinaryAcquire;
Jeremy Gebben15332642021-12-15 19:33:15 -0700314}
315
ziga-lunarga635db52022-04-14 19:24:08 +0200316VkQueue SEMAPHORE_STATE::AnotherQueueWaitsBinary(VkQueue queue) const {
317 if (type == VK_SEMAPHORE_TYPE_TIMELINE) {
318 return VK_NULL_HANDLE;
319 }
320 auto guard = ReadLock();
321
322 for (auto pos = operations_.rbegin(); pos != operations_.rend(); ++pos) {
323 if (pos->op_type == kWait && pos->queue->Queue() != queue) {
324 return pos->queue->Queue();
325 }
326 }
327 return VK_NULL_HANDLE;
328}
329
Jeremy Gebben15332642021-12-15 19:33:15 -0700330SEMAPHORE_STATE::RetireResult SEMAPHORE_STATE::Retire(QUEUE_STATE *queue, uint64_t payload) {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700331 auto guard = WriteLock();
Jeremy Gebben15332642021-12-15 19:33:15 -0700332 RetireResult result;
333
334 while (!operations_.empty() && operations_.begin()->payload <= payload) {
335 completed_ = *operations_.begin();
336 operations_.erase(operations_.begin());
337 // Note: even though presentation is directed to a queue, there is no direct ordering between QP and subsequent work,
338 // so QP (and its semaphore waits) /never/ participate in any completion proof. Likewise, Acquire is not associated
339 // with a queue.
340 if (completed_.op_type != kBinaryAcquire && completed_.op_type != kBinaryPresent) {
341 auto &last_seq = result[completed_.queue];
342 last_seq = std::max(last_seq, completed_.seq);
Jeremy Gebben4af0aa82021-09-08 09:35:16 -0600343 }
344 }
Jeremy Gebben15332642021-12-15 19:33:15 -0700345 return result;
346}
347
348void SEMAPHORE_STATE::RetireTimeline(uint64_t payload) {
349 if (type == VK_SEMAPHORE_TYPE_TIMELINE) {
350 auto results = Retire(nullptr, payload);
351 for (auto &entry : results) {
352 entry.first->Retire(entry.second);
353 }
354 }
355}
356
357void SEMAPHORE_STATE::Import(VkExternalSemaphoreHandleTypeFlagBits handle_type, VkSemaphoreImportFlags flags) {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700358 auto guard = WriteLock();
Jeremy Gebben15332642021-12-15 19:33:15 -0700359 if (scope_ != kSyncScopeExternalPermanent) {
360 if ((handle_type == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT || flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT) &&
361 scope_ == kSyncScopeInternal) {
362 scope_ = kSyncScopeExternalTemporary;
363 } else {
364 scope_ = kSyncScopeExternalPermanent;
365 }
366 }
367}
368
369void SEMAPHORE_STATE::Export(VkExternalSemaphoreHandleTypeFlagBits handle_type) {
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700370 auto guard = WriteLock();
Jeremy Gebben15332642021-12-15 19:33:15 -0700371 if (handle_type != VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) {
372 // Cannot track semaphore state once it is exported, except for Sync FD handle types which have copy transference
373 scope_ = kSyncScopeExternalPermanent;
374 }
Jeremy Gebben4af0aa82021-09-08 09:35:16 -0600375}