Jeremy Gebben | 140a0a5 | 2021-12-15 18:22:34 -0700 | [diff] [blame] | 1 | /* Copyright (c) 2015-2022 The Khronos Group Inc. |
| 2 | * Copyright (c) 2015-2022 Valve Corporation |
| 3 | * Copyright (c) 2015-2022 LunarG, Inc. |
| 4 | * Copyright (C) 2015-2022 Google Inc. |
Jeremy Gebben | 4af0aa8 | 2021-09-08 09:35:16 -0600 | [diff] [blame] | 5 | * Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved. |
| 6 | * |
| 7 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 8 | * you may not use this file except in compliance with the License. |
| 9 | * You may obtain a copy of the License at |
| 10 | * |
| 11 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 12 | * |
| 13 | * Unless required by applicable law or agreed to in writing, software |
| 14 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 16 | * See the License for the specific language governing permissions and |
| 17 | * limitations under the License. |
| 18 | * |
| 19 | * Author: Courtney Goeltzenleuchter <courtneygo@google.com> |
| 20 | * Author: Tobin Ehlis <tobine@google.com> |
| 21 | * Author: Chris Forbes <chrisf@ijw.co.nz> |
| 22 | * Author: Mark Lobodzinski <mark@lunarg.com> |
| 23 | * Author: Dave Houlton <daveh@lunarg.com> |
| 24 | * Author: John Zulauf <jzulauf@lunarg.com> |
| 25 | * Author: Tobias Hector <tobias.hector@amd.com> |
| 26 | */ |
| 27 | #include "queue_state.h" |
Jeremy Gebben | 4af0aa8 | 2021-09-08 09:35:16 -0600 | [diff] [blame] | 28 | #include "cmd_buffer_state.h" |
Jeremy Gebben | 5764298 | 2021-09-14 14:14:55 -0600 | [diff] [blame] | 29 | #include "state_tracker.h" |
Jeremy Gebben | 4af0aa8 | 2021-09-08 09:35:16 -0600 | [diff] [blame] | 30 | |
Jeremy Gebben | 1533264 | 2021-12-15 19:33:15 -0700 | [diff] [blame] | 31 | using SemOp = SEMAPHORE_STATE::SemOp; |
Jeremy Gebben | 5764298 | 2021-09-14 14:14:55 -0600 | [diff] [blame] | 32 | |
Jeremy Gebben | 1533264 | 2021-12-15 19:33:15 -0700 | [diff] [blame] | 33 | uint64_t QUEUE_STATE::Submit(CB_SUBMISSION &&submission) { |
Jeremy Gebben | 5764298 | 2021-09-14 14:14:55 -0600 | [diff] [blame] | 34 | for (auto &cb_node : submission.cbs) { |
Jeremy Gebben | 332d4dd | 2022-01-01 12:40:02 -0700 | [diff] [blame] | 35 | auto cb_guard = cb_node->WriteLock(); |
Jeremy Gebben | 5764298 | 2021-09-14 14:14:55 -0600 | [diff] [blame] | 36 | for (auto *secondary_cmd_buffer : cb_node->linkedCommandBuffers) { |
Jeremy Gebben | 332d4dd | 2022-01-01 12:40:02 -0700 | [diff] [blame] | 37 | auto secondary_guard = secondary_cmd_buffer->WriteLock(); |
Jeremy Gebben | 5764298 | 2021-09-14 14:14:55 -0600 | [diff] [blame] | 38 | secondary_cmd_buffer->IncrementResources(); |
| 39 | } |
| 40 | cb_node->IncrementResources(); |
| 41 | // increment use count for all bound objects including secondary cbs |
| 42 | cb_node->BeginUse(); |
| 43 | cb_node->Submit(submission.perf_submit_pass); |
| 44 | } |
Jeremy Gebben | c6ccdc5 | 2022-01-01 12:29:19 -0700 | [diff] [blame] | 45 | // Lock required for queue / semaphore operations, but not for command buffer |
| 46 | // processing above. |
| 47 | auto guard = WriteLock(); |
Jeremy Gebben | 98faf72 | 2022-07-06 15:44:29 -0600 | [diff] [blame^] | 48 | submission.seq = seq_ + submissions_.size() + 1; |
Jeremy Gebben | 1533264 | 2021-12-15 19:33:15 -0700 | [diff] [blame] | 49 | bool retire_early = false; |
Jeremy Gebben | 5764298 | 2021-09-14 14:14:55 -0600 | [diff] [blame] | 50 | for (auto &wait : submission.wait_semaphores) { |
Jeremy Gebben | 98faf72 | 2022-07-06 15:44:29 -0600 | [diff] [blame^] | 51 | wait.semaphore->EnqueueWait(this, submission.seq, wait.payload); |
Jeremy Gebben | 5764298 | 2021-09-14 14:14:55 -0600 | [diff] [blame] | 52 | wait.semaphore->BeginUse(); |
| 53 | } |
| 54 | |
| 55 | for (auto &signal : submission.signal_semaphores) { |
Jeremy Gebben | 98faf72 | 2022-07-06 15:44:29 -0600 | [diff] [blame^] | 56 | if (signal.semaphore->EnqueueSignal(this, submission.seq, signal.payload)) { |
Jeremy Gebben | 4af0aa8 | 2021-09-08 09:35:16 -0600 | [diff] [blame] | 57 | retire_early = true; |
| 58 | } |
Jeremy Gebben | 5764298 | 2021-09-14 14:14:55 -0600 | [diff] [blame] | 59 | signal.semaphore->BeginUse(); |
Jeremy Gebben | 4af0aa8 | 2021-09-08 09:35:16 -0600 | [diff] [blame] | 60 | } |
Jeremy Gebben | 4af0aa8 | 2021-09-08 09:35:16 -0600 | [diff] [blame] | 61 | |
Jeremy Gebben | 5764298 | 2021-09-14 14:14:55 -0600 | [diff] [blame] | 62 | if (submission.fence) { |
Jeremy Gebben | 98faf72 | 2022-07-06 15:44:29 -0600 | [diff] [blame^] | 63 | if (submission.fence->EnqueueSignal(this, submission.seq)) { |
Jeremy Gebben | 5764298 | 2021-09-14 14:14:55 -0600 | [diff] [blame] | 64 | retire_early = true; |
Jeremy Gebben | 4af0aa8 | 2021-09-08 09:35:16 -0600 | [diff] [blame] | 65 | } |
Jeremy Gebben | 5764298 | 2021-09-14 14:14:55 -0600 | [diff] [blame] | 66 | submission.fence->BeginUse(); |
Jeremy Gebben | 4af0aa8 | 2021-09-08 09:35:16 -0600 | [diff] [blame] | 67 | } |
Jeremy Gebben | c6ccdc5 | 2022-01-01 12:29:19 -0700 | [diff] [blame] | 68 | submissions_.emplace_back(std::move(submission)); |
Jeremy Gebben | 98faf72 | 2022-07-06 15:44:29 -0600 | [diff] [blame^] | 69 | return retire_early ? submission.seq : 0; |
Jeremy Gebben | 4af0aa8 | 2021-09-08 09:35:16 -0600 | [diff] [blame] | 70 | } |
| 71 | |
ziga-lunarg | 81b56d3 | 2022-03-21 22:22:05 +0100 | [diff] [blame] | 72 | bool QUEUE_STATE::HasWait(VkSemaphore semaphore, VkFence fence) const { |
| 73 | auto guard = ReadLock(); |
| 74 | for (const auto &submission : submissions_) { |
| 75 | if (fence != VK_NULL_HANDLE && submission.fence && submission.fence->Handle().Cast<VkFence>() == fence) { |
| 76 | return true; |
| 77 | } |
| 78 | for (const auto &wait_semaphore : submission.wait_semaphores) { |
| 79 | if (wait_semaphore.semaphore->Handle().Cast<VkSemaphore>() == semaphore) { |
| 80 | return true; |
| 81 | } |
| 82 | } |
| 83 | } |
| 84 | return false; |
| 85 | } |
| 86 | |
Jeremy Gebben | 1533264 | 2021-12-15 19:33:15 -0700 | [diff] [blame] | 87 | static void MergeResults(SEMAPHORE_STATE::RetireResult &results, const SEMAPHORE_STATE::RetireResult &sem_result) { |
| 88 | for (auto &entry : sem_result) { |
| 89 | auto &last_seq = results[entry.first]; |
| 90 | last_seq = std::max(last_seq, entry.second); |
| 91 | } |
| 92 | } |
| 93 | |
| 94 | layer_data::optional<CB_SUBMISSION> QUEUE_STATE::NextSubmission(uint64_t until_seq) { |
Jeremy Gebben | c6ccdc5 | 2022-01-01 12:29:19 -0700 | [diff] [blame] | 95 | // Pop the next submission off of the queue so that Retire() doesn't need to worry |
| 96 | // about locking. |
| 97 | auto guard = WriteLock(); |
Jeremy Gebben | 1533264 | 2021-12-15 19:33:15 -0700 | [diff] [blame] | 98 | layer_data::optional<CB_SUBMISSION> result; |
Jeremy Gebben | c6ccdc5 | 2022-01-01 12:29:19 -0700 | [diff] [blame] | 99 | if (seq_ < until_seq && !submissions_.empty()) { |
| 100 | result.emplace(std::move(submissions_.front())); |
| 101 | submissions_.pop_front(); |
Jeremy Gebben | 1533264 | 2021-12-15 19:33:15 -0700 | [diff] [blame] | 102 | seq_++; |
| 103 | } |
| 104 | return result; |
| 105 | } |
| 106 | |
| 107 | void QUEUE_STATE::Retire(uint64_t until_seq) { |
| 108 | SEMAPHORE_STATE::RetireResult other_queue_seqs; |
| 109 | |
| 110 | layer_data::optional<CB_SUBMISSION> submission; |
Jeremy Gebben | 4af0aa8 | 2021-09-08 09:35:16 -0600 | [diff] [blame] | 111 | |
| 112 | // Roll this queue forward, one submission at a time. |
Jeremy Gebben | 1533264 | 2021-12-15 19:33:15 -0700 | [diff] [blame] | 113 | while ((submission = NextSubmission(until_seq))) { |
| 114 | for (auto &wait : submission->wait_semaphores) { |
| 115 | auto result = wait.semaphore->Retire(this, wait.payload); |
| 116 | MergeResults(other_queue_seqs, result); |
Jeremy Gebben | 5764298 | 2021-09-14 14:14:55 -0600 | [diff] [blame] | 117 | wait.semaphore->EndUse(); |
Jeremy Gebben | 4af0aa8 | 2021-09-08 09:35:16 -0600 | [diff] [blame] | 118 | } |
ziga-lunarg | 69aa72f | 2022-03-29 15:24:35 +0200 | [diff] [blame] | 119 | for (auto &signal : submission->signal_semaphores) { |
| 120 | auto result = signal.semaphore->Retire(this, signal.payload); |
| 121 | // in the case of timeline semaphores, signaling at payload == N |
| 122 | // may unblock waiting queues for payload <= N so we need to |
| 123 | // process them |
| 124 | MergeResults(other_queue_seqs, result); |
| 125 | signal.semaphore->EndUse(); |
| 126 | } |
Jeremy Gebben | 1533264 | 2021-12-15 19:33:15 -0700 | [diff] [blame] | 127 | // Handle updates to how far the current queue has progressed |
| 128 | // without going recursive when we call Retire on other_queue_seqs |
| 129 | // below. |
| 130 | auto self_update = other_queue_seqs.find(this); |
| 131 | if (self_update != other_queue_seqs.end()) { |
| 132 | until_seq = std::max(until_seq, self_update->second); |
| 133 | other_queue_seqs.erase(self_update); |
| 134 | } |
Jeremy Gebben | 4af0aa8 | 2021-09-08 09:35:16 -0600 | [diff] [blame] | 135 | |
ziga-lunarg | 96c7d82 | 2022-02-28 19:39:17 +0100 | [diff] [blame] | 136 | auto is_query_updated_after = [this](const QueryObject &query_object) { |
| 137 | for (const auto &submission : submissions_) { |
| 138 | for (uint32_t j = 0; j < submission.cbs.size(); ++j) { |
| 139 | const auto &next_cb_node = submission.cbs[j]; |
| 140 | if (!next_cb_node) { |
| 141 | continue; |
| 142 | } |
| 143 | if (next_cb_node->updatedQueries.find(query_object) != next_cb_node->updatedQueries.end()) { |
| 144 | return true; |
| 145 | } |
| 146 | } |
| 147 | } |
| 148 | return false; |
| 149 | }; |
| 150 | |
Jeremy Gebben | 1533264 | 2021-12-15 19:33:15 -0700 | [diff] [blame] | 151 | for (auto &cb_node : submission->cbs) { |
Jeremy Gebben | 332d4dd | 2022-01-01 12:40:02 -0700 | [diff] [blame] | 152 | auto cb_guard = cb_node->WriteLock(); |
Jeremy Gebben | 5764298 | 2021-09-14 14:14:55 -0600 | [diff] [blame] | 153 | for (auto *secondary_cmd_buffer : cb_node->linkedCommandBuffers) { |
Jeremy Gebben | 332d4dd | 2022-01-01 12:40:02 -0700 | [diff] [blame] | 154 | auto secondary_guard = secondary_cmd_buffer->WriteLock(); |
ziga-lunarg | 96c7d82 | 2022-02-28 19:39:17 +0100 | [diff] [blame] | 155 | secondary_cmd_buffer->Retire(submission->perf_submit_pass, is_query_updated_after); |
Jeremy Gebben | 4af0aa8 | 2021-09-08 09:35:16 -0600 | [diff] [blame] | 156 | } |
ziga-lunarg | 96c7d82 | 2022-02-28 19:39:17 +0100 | [diff] [blame] | 157 | cb_node->Retire(submission->perf_submit_pass, is_query_updated_after); |
Jeremy Gebben | 5764298 | 2021-09-14 14:14:55 -0600 | [diff] [blame] | 158 | cb_node->EndUse(); |
Jeremy Gebben | 4af0aa8 | 2021-09-08 09:35:16 -0600 | [diff] [blame] | 159 | } |
| 160 | |
Jeremy Gebben | 1533264 | 2021-12-15 19:33:15 -0700 | [diff] [blame] | 161 | if (submission->fence) { |
Jeremy Gebben | 98faf72 | 2022-07-06 15:44:29 -0600 | [diff] [blame^] | 162 | submission->fence->Retire(this, submission->seq); |
Jeremy Gebben | 1533264 | 2021-12-15 19:33:15 -0700 | [diff] [blame] | 163 | submission->fence->EndUse(); |
Jeremy Gebben | 4af0aa8 | 2021-09-08 09:35:16 -0600 | [diff] [blame] | 164 | } |
Jeremy Gebben | 4af0aa8 | 2021-09-08 09:35:16 -0600 | [diff] [blame] | 165 | } |
| 166 | |
| 167 | // Roll other queues forward to the highest seq we saw a wait for |
| 168 | for (const auto &qs : other_queue_seqs) { |
Jeremy Gebben | 5764298 | 2021-09-14 14:14:55 -0600 | [diff] [blame] | 169 | qs.first->Retire(qs.second); |
Jeremy Gebben | 4af0aa8 | 2021-09-08 09:35:16 -0600 | [diff] [blame] | 170 | } |
Jeremy Gebben | 4af0aa8 | 2021-09-08 09:35:16 -0600 | [diff] [blame] | 171 | } |
| 172 | |
Jeremy Gebben | 140a0a5 | 2021-12-15 18:22:34 -0700 | [diff] [blame] | 173 | bool FENCE_STATE::EnqueueSignal(QUEUE_STATE *queue_state, uint64_t next_seq) { |
Jeremy Gebben | c6ccdc5 | 2022-01-01 12:29:19 -0700 | [diff] [blame] | 174 | auto guard = WriteLock(); |
Jeremy Gebben | 140a0a5 | 2021-12-15 18:22:34 -0700 | [diff] [blame] | 175 | if (scope_ != kSyncScopeInternal) { |
| 176 | return true; |
| 177 | } |
| 178 | // Mark fence in use |
| 179 | state_ = FENCE_INFLIGHT; |
| 180 | queue_ = queue_state; |
| 181 | seq_ = next_seq; |
| 182 | return false; |
| 183 | } |
| 184 | |
Jeremy Gebben | 98faf72 | 2022-07-06 15:44:29 -0600 | [diff] [blame^] | 185 | // Retire from a non-queue operation, such as vkWaitForFences() |
| 186 | void FENCE_STATE::Retire() { |
Jeremy Gebben | c6ccdc5 | 2022-01-01 12:29:19 -0700 | [diff] [blame] | 187 | QUEUE_STATE *q = nullptr; |
| 188 | uint64_t seq = 0; |
| 189 | { |
| 190 | // Hold the lock only while updating members, but not |
| 191 | // while calling QUEUE_STATE::Retire() |
| 192 | auto guard = WriteLock(); |
Jeremy Gebben | 98faf72 | 2022-07-06 15:44:29 -0600 | [diff] [blame^] | 193 | if (state_ == FENCE_INFLIGHT) { |
| 194 | if (scope_ == kSyncScopeInternal) { |
| 195 | q = queue_; |
| 196 | seq = seq_; |
| 197 | } |
| 198 | queue_ = nullptr; |
| 199 | seq_ = 0; |
| 200 | state_ = FENCE_RETIRED; |
Jeremy Gebben | 4af0aa8 | 2021-09-08 09:35:16 -0600 | [diff] [blame] | 201 | } |
Jeremy Gebben | 98faf72 | 2022-07-06 15:44:29 -0600 | [diff] [blame^] | 202 | } |
| 203 | if (q) { |
| 204 | q->Retire(seq); |
| 205 | } |
| 206 | } |
| 207 | |
| 208 | // Retire from a queue operation |
| 209 | void FENCE_STATE::Retire(const QUEUE_STATE *queue_state, uint64_t seq) { |
| 210 | auto guard = WriteLock(); |
| 211 | if (state_ == FENCE_INFLIGHT && queue_ != nullptr && queue_ == queue_state && seq_ == seq) { |
Jeremy Gebben | c6ccdc5 | 2022-01-01 12:29:19 -0700 | [diff] [blame] | 212 | queue_ = nullptr; |
| 213 | seq_ = 0; |
| 214 | state_ = FENCE_RETIRED; |
Jeremy Gebben | 140a0a5 | 2021-12-15 18:22:34 -0700 | [diff] [blame] | 215 | } |
Jeremy Gebben | 140a0a5 | 2021-12-15 18:22:34 -0700 | [diff] [blame] | 216 | } |
| 217 | |
| 218 | void FENCE_STATE::Reset() { |
Jeremy Gebben | c6ccdc5 | 2022-01-01 12:29:19 -0700 | [diff] [blame] | 219 | auto guard = WriteLock(); |
Jeremy Gebben | 140a0a5 | 2021-12-15 18:22:34 -0700 | [diff] [blame] | 220 | if (scope_ == kSyncScopeInternal) { |
| 221 | state_ = FENCE_UNSIGNALED; |
| 222 | } else if (scope_ == kSyncScopeExternalTemporary) { |
| 223 | scope_ = kSyncScopeInternal; |
| 224 | } |
| 225 | } |
| 226 | |
| 227 | void FENCE_STATE::Import(VkExternalFenceHandleTypeFlagBits handle_type, VkFenceImportFlags flags) { |
Jeremy Gebben | c6ccdc5 | 2022-01-01 12:29:19 -0700 | [diff] [blame] | 228 | auto guard = WriteLock(); |
Jeremy Gebben | 140a0a5 | 2021-12-15 18:22:34 -0700 | [diff] [blame] | 229 | if (scope_ != kSyncScopeExternalPermanent) { |
| 230 | if ((handle_type == VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT || flags & VK_FENCE_IMPORT_TEMPORARY_BIT) && |
| 231 | scope_ == kSyncScopeInternal) { |
| 232 | scope_ = kSyncScopeExternalTemporary; |
| 233 | } else { |
| 234 | scope_ = kSyncScopeExternalPermanent; |
| 235 | } |
| 236 | } |
| 237 | } |
| 238 | |
| 239 | void FENCE_STATE::Export(VkExternalFenceHandleTypeFlagBits handle_type) { |
Jeremy Gebben | c6ccdc5 | 2022-01-01 12:29:19 -0700 | [diff] [blame] | 240 | auto guard = WriteLock(); |
Jeremy Gebben | 140a0a5 | 2021-12-15 18:22:34 -0700 | [diff] [blame] | 241 | if (handle_type != VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT) { |
| 242 | // Export with reference transference becomes external |
| 243 | scope_ = kSyncScopeExternalPermanent; |
| 244 | } else if (scope_ == kSyncScopeInternal) { |
| 245 | // Export with copy transference has a side effect of resetting the fence |
| 246 | state_ = FENCE_UNSIGNALED; |
Jeremy Gebben | 4af0aa8 | 2021-09-08 09:35:16 -0600 | [diff] [blame] | 247 | } |
| 248 | } |
| 249 | |
Jeremy Gebben | 1533264 | 2021-12-15 19:33:15 -0700 | [diff] [blame] | 250 | bool SEMAPHORE_STATE::EnqueueSignal(QUEUE_STATE *queue, uint64_t queue_seq, uint64_t &payload) { |
Jeremy Gebben | c6ccdc5 | 2022-01-01 12:29:19 -0700 | [diff] [blame] | 251 | auto guard = WriteLock(); |
Jeremy Gebben | 1533264 | 2021-12-15 19:33:15 -0700 | [diff] [blame] | 252 | if (scope_ != kSyncScopeInternal) { |
| 253 | return true; // retire early |
| 254 | } |
| 255 | if (type == VK_SEMAPHORE_TYPE_BINARY) { |
| 256 | payload = next_payload_++; |
| 257 | } |
| 258 | operations_.emplace(SemOp{kSignal, queue, queue_seq, payload}); |
| 259 | return false; |
| 260 | } |
| 261 | |
| 262 | void SEMAPHORE_STATE::EnqueueWait(QUEUE_STATE *queue, uint64_t queue_seq, uint64_t &payload) { |
Jeremy Gebben | c6ccdc5 | 2022-01-01 12:29:19 -0700 | [diff] [blame] | 263 | auto guard = WriteLock(); |
Jeremy Gebben | 1533264 | 2021-12-15 19:33:15 -0700 | [diff] [blame] | 264 | switch (scope_) { |
| 265 | case kSyncScopeExternalTemporary: |
| 266 | scope_ = kSyncScopeInternal; |
| 267 | break; |
| 268 | default: |
| 269 | break; |
| 270 | } |
| 271 | if (type == VK_SEMAPHORE_TYPE_BINARY) { |
| 272 | payload = next_payload_++; |
| 273 | } |
| 274 | operations_.emplace(SemOp{kWait, queue, queue_seq, payload}); |
| 275 | } |
| 276 | |
| 277 | void SEMAPHORE_STATE::EnqueueAcquire() { |
Jeremy Gebben | c6ccdc5 | 2022-01-01 12:29:19 -0700 | [diff] [blame] | 278 | auto guard = WriteLock(); |
Jeremy Gebben | 1533264 | 2021-12-15 19:33:15 -0700 | [diff] [blame] | 279 | assert(type == VK_SEMAPHORE_TYPE_BINARY); |
| 280 | operations_.emplace(SemOp{kBinaryAcquire, nullptr, 0, next_payload_++}); |
| 281 | } |
| 282 | |
| 283 | void SEMAPHORE_STATE::EnqueuePresent(QUEUE_STATE *queue) { |
Jeremy Gebben | c6ccdc5 | 2022-01-01 12:29:19 -0700 | [diff] [blame] | 284 | auto guard = WriteLock(); |
Jeremy Gebben | 1533264 | 2021-12-15 19:33:15 -0700 | [diff] [blame] | 285 | assert(type == VK_SEMAPHORE_TYPE_BINARY); |
| 286 | operations_.emplace(SemOp{kBinaryPresent, queue, 0, next_payload_++}); |
| 287 | } |
| 288 | |
| 289 | layer_data::optional<SemOp> SEMAPHORE_STATE::LastOp(std::function<bool(const SemOp &)> filter) const { |
Jeremy Gebben | c6ccdc5 | 2022-01-01 12:29:19 -0700 | [diff] [blame] | 290 | auto guard = ReadLock(); |
Jeremy Gebben | 1533264 | 2021-12-15 19:33:15 -0700 | [diff] [blame] | 291 | layer_data::optional<SemOp> result; |
| 292 | |
| 293 | for (auto pos = operations_.rbegin(); pos != operations_.rend(); ++pos) { |
| 294 | if (!filter || filter(*pos)) { |
| 295 | result.emplace(*pos); |
| 296 | break; |
Jeremy Gebben | 5764298 | 2021-09-14 14:14:55 -0600 | [diff] [blame] | 297 | } |
Jeremy Gebben | 1533264 | 2021-12-15 19:33:15 -0700 | [diff] [blame] | 298 | } |
| 299 | return result; |
| 300 | } |
| 301 | |
| 302 | bool SEMAPHORE_STATE::CanBeSignaled() const { |
| 303 | if (type == VK_SEMAPHORE_TYPE_TIMELINE) { |
| 304 | return true; |
| 305 | } |
Jeremy Gebben | c6ccdc5 | 2022-01-01 12:29:19 -0700 | [diff] [blame] | 306 | // both LastOp() and Completed() lock, so no locking needed in this method. |
Jeremy Gebben | 1533264 | 2021-12-15 19:33:15 -0700 | [diff] [blame] | 307 | auto op = LastOp(); |
Jeremy Gebben | c6ccdc5 | 2022-01-01 12:29:19 -0700 | [diff] [blame] | 308 | if (op) { |
| 309 | return op->CanBeSignaled(); |
| 310 | } |
| 311 | auto comp = Completed(); |
| 312 | return comp.CanBeSignaled(); |
Jeremy Gebben | 1533264 | 2021-12-15 19:33:15 -0700 | [diff] [blame] | 313 | } |
| 314 | |
| 315 | bool SEMAPHORE_STATE::CanBeWaited() const { |
| 316 | if (type == VK_SEMAPHORE_TYPE_TIMELINE) { |
| 317 | return true; |
| 318 | } |
Jeremy Gebben | c6ccdc5 | 2022-01-01 12:29:19 -0700 | [diff] [blame] | 319 | // both LastOp() and Completed() lock, so no locking needed in this method. |
Jeremy Gebben | 1533264 | 2021-12-15 19:33:15 -0700 | [diff] [blame] | 320 | auto op = LastOp(); |
| 321 | if (op) { |
| 322 | return op->op_type == kSignal || op->op_type == kBinaryAcquire; |
| 323 | } |
Jeremy Gebben | c6ccdc5 | 2022-01-01 12:29:19 -0700 | [diff] [blame] | 324 | auto comp = Completed(); |
| 325 | return comp.op_type == kSignal || comp.op_type == kBinaryAcquire; |
Jeremy Gebben | 1533264 | 2021-12-15 19:33:15 -0700 | [diff] [blame] | 326 | } |
| 327 | |
ziga-lunarg | a635db5 | 2022-04-14 19:24:08 +0200 | [diff] [blame] | 328 | VkQueue SEMAPHORE_STATE::AnotherQueueWaitsBinary(VkQueue queue) const { |
| 329 | if (type == VK_SEMAPHORE_TYPE_TIMELINE) { |
| 330 | return VK_NULL_HANDLE; |
| 331 | } |
| 332 | auto guard = ReadLock(); |
| 333 | |
| 334 | for (auto pos = operations_.rbegin(); pos != operations_.rend(); ++pos) { |
| 335 | if (pos->op_type == kWait && pos->queue->Queue() != queue) { |
| 336 | return pos->queue->Queue(); |
| 337 | } |
| 338 | } |
| 339 | return VK_NULL_HANDLE; |
| 340 | } |
| 341 | |
Jeremy Gebben | 1533264 | 2021-12-15 19:33:15 -0700 | [diff] [blame] | 342 | SEMAPHORE_STATE::RetireResult SEMAPHORE_STATE::Retire(QUEUE_STATE *queue, uint64_t payload) { |
Jeremy Gebben | c6ccdc5 | 2022-01-01 12:29:19 -0700 | [diff] [blame] | 343 | auto guard = WriteLock(); |
Jeremy Gebben | 1533264 | 2021-12-15 19:33:15 -0700 | [diff] [blame] | 344 | RetireResult result; |
| 345 | |
| 346 | while (!operations_.empty() && operations_.begin()->payload <= payload) { |
| 347 | completed_ = *operations_.begin(); |
| 348 | operations_.erase(operations_.begin()); |
| 349 | // Note: even though presentation is directed to a queue, there is no direct ordering between QP and subsequent work, |
| 350 | // so QP (and its semaphore waits) /never/ participate in any completion proof. Likewise, Acquire is not associated |
| 351 | // with a queue. |
| 352 | if (completed_.op_type != kBinaryAcquire && completed_.op_type != kBinaryPresent) { |
| 353 | auto &last_seq = result[completed_.queue]; |
| 354 | last_seq = std::max(last_seq, completed_.seq); |
Jeremy Gebben | 4af0aa8 | 2021-09-08 09:35:16 -0600 | [diff] [blame] | 355 | } |
| 356 | } |
Jeremy Gebben | 1533264 | 2021-12-15 19:33:15 -0700 | [diff] [blame] | 357 | return result; |
| 358 | } |
| 359 | |
| 360 | void SEMAPHORE_STATE::RetireTimeline(uint64_t payload) { |
| 361 | if (type == VK_SEMAPHORE_TYPE_TIMELINE) { |
| 362 | auto results = Retire(nullptr, payload); |
| 363 | for (auto &entry : results) { |
| 364 | entry.first->Retire(entry.second); |
| 365 | } |
| 366 | } |
| 367 | } |
| 368 | |
| 369 | void SEMAPHORE_STATE::Import(VkExternalSemaphoreHandleTypeFlagBits handle_type, VkSemaphoreImportFlags flags) { |
Jeremy Gebben | c6ccdc5 | 2022-01-01 12:29:19 -0700 | [diff] [blame] | 370 | auto guard = WriteLock(); |
Jeremy Gebben | 1533264 | 2021-12-15 19:33:15 -0700 | [diff] [blame] | 371 | if (scope_ != kSyncScopeExternalPermanent) { |
| 372 | if ((handle_type == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT || flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT) && |
| 373 | scope_ == kSyncScopeInternal) { |
| 374 | scope_ = kSyncScopeExternalTemporary; |
| 375 | } else { |
| 376 | scope_ = kSyncScopeExternalPermanent; |
| 377 | } |
| 378 | } |
| 379 | } |
| 380 | |
| 381 | void SEMAPHORE_STATE::Export(VkExternalSemaphoreHandleTypeFlagBits handle_type) { |
Jeremy Gebben | c6ccdc5 | 2022-01-01 12:29:19 -0700 | [diff] [blame] | 382 | auto guard = WriteLock(); |
Jeremy Gebben | 1533264 | 2021-12-15 19:33:15 -0700 | [diff] [blame] | 383 | if (handle_type != VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) { |
| 384 | // Cannot track semaphore state once it is exported, except for Sync FD handle types which have copy transference |
| 385 | scope_ = kSyncScopeExternalPermanent; |
| 386 | } |
Jeremy Gebben | 4af0aa8 | 2021-09-08 09:35:16 -0600 | [diff] [blame] | 387 | } |