blob: dc81dd202f65d2fc7c5656b3adcc55f761069fec [file] [log] [blame]
Jeremy Gebben140a0a52021-12-15 18:22:34 -07001/* Copyright (c) 2015-2022 The Khronos Group Inc.
2 * Copyright (c) 2015-2022 Valve Corporation
3 * Copyright (c) 2015-2022 LunarG, Inc.
4 * Copyright (C) 2015-2022 Google Inc.
Jeremy Gebben5573a8c2021-06-04 08:55:10 -06005 * Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 * Author: Courtney Goeltzenleuchter <courtneygo@google.com>
20 * Author: Tobin Ehlis <tobine@google.com>
21 * Author: Chris Forbes <chrisf@ijw.co.nz>
22 * Author: Mark Lobodzinski <mark@lunarg.com>
23 * Author: Dave Houlton <daveh@lunarg.com>
24 * Author: John Zulauf <jzulauf@lunarg.com>
25 * Author: Tobias Hector <tobias.hector@amd.com>
26 */
27#pragma once
28#include "base_node.h"
Jeremy Gebben5573a8c2021-06-04 08:55:10 -060029#include <deque>
Jeremy Gebben15332642021-12-15 19:33:15 -070030#include <set>
31#include <vector>
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -070032#include "vk_layer_utils.h"
Jeremy Gebben5573a8c2021-06-04 08:55:10 -060033
Jeremy Gebben57642982021-09-14 14:14:55 -060034class CMD_BUFFER_STATE;
35class QUEUE_STATE;
36
Jeremy Gebben5573a8c2021-06-04 08:55:10 -060037enum SyncScope {
38 kSyncScopeInternal,
39 kSyncScopeExternalTemporary,
40 kSyncScopeExternalPermanent,
41};
42
43enum FENCE_STATUS { FENCE_UNSIGNALED, FENCE_INFLIGHT, FENCE_RETIRED };
44
45class FENCE_STATE : public REFCOUNTED_NODE {
46 public:
Jeremy Gebben5573a8c2021-06-04 08:55:10 -060047 // Default constructor
Jeremy Gebben57642982021-09-14 14:14:55 -060048 FENCE_STATE(VkFence f, const VkFenceCreateInfo *pCreateInfo)
Jeremy Gebben5573a8c2021-06-04 08:55:10 -060049 : REFCOUNTED_NODE(f, kVulkanObjectTypeFence),
Jeremy Gebben5dd48e92022-09-21 12:47:49 -060050 flags(pCreateInfo->flags),
51 exportHandleTypes(GetExportHandleTypes(pCreateInfo)),
Jeremy Gebben140a0a52021-12-15 18:22:34 -070052 state_((pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) ? FENCE_RETIRED : FENCE_UNSIGNALED),
53 scope_(kSyncScopeInternal) {}
Jeremy Gebben5573a8c2021-06-04 08:55:10 -060054
55 VkFence fence() const { return handle_.Cast<VkFence>(); }
Jeremy Gebben57642982021-09-14 14:14:55 -060056
Jeremy Gebben140a0a52021-12-15 18:22:34 -070057 bool EnqueueSignal(QUEUE_STATE *queue_state, uint64_t next_seq);
58
Jeremy Gebben98faf722022-07-06 15:44:29 -060059 void Retire();
60
61 void Retire(const QUEUE_STATE *queue_state, uint64_t seq);
Jeremy Gebben140a0a52021-12-15 18:22:34 -070062
63 void Reset();
64
65 void Import(VkExternalFenceHandleTypeFlagBits handle_type, VkFenceImportFlags flags);
66
67 void Export(VkExternalFenceHandleTypeFlagBits handle_type);
68
Jeremy Gebben5dd48e92022-09-21 12:47:49 -060069 const VkFenceCreateFlags flags;
70 const VkExternalFenceHandleTypeFlags exportHandleTypes;
Jeremy Gebben140a0a52021-12-15 18:22:34 -070071
72 SyncScope Scope() const { return scope_; }
73 FENCE_STATUS State() const { return state_; }
74 QUEUE_STATE *Queue() const { return queue_; }
75 uint64_t QueueSeq() const { return seq_; }
76
77 private:
Jeremy Gebben5dd48e92022-09-21 12:47:49 -060078 static VkExternalFenceHandleTypeFlags GetExportHandleTypes(const VkFenceCreateInfo *info) {
79 auto export_info = LvlFindInChain<VkExportFenceCreateInfo>(info->pNext);
80 return export_info ? export_info->handleTypes : 0;
81 }
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -070082 ReadLockGuard ReadLock() const { return ReadLockGuard(lock_); }
83 WriteLockGuard WriteLock() { return WriteLockGuard(lock_); }
84
Jeremy Gebben140a0a52021-12-15 18:22:34 -070085 QUEUE_STATE *queue_{nullptr};
86 uint64_t seq_{0};
87 FENCE_STATUS state_;
88 SyncScope scope_{kSyncScopeInternal};
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -070089 mutable ReadWriteLock lock_;
Jeremy Gebben5573a8c2021-06-04 08:55:10 -060090};
91
92class SEMAPHORE_STATE : public REFCOUNTED_NODE {
93 public:
Jeremy Gebben15332642021-12-15 19:33:15 -070094 // possible payload values for binary semaphore
95 enum OpType {
96 kNone,
97 kWait,
98 kSignal,
99 kBinaryAcquire,
100 kBinaryPresent,
101 };
102 static inline const char *OpTypeName(OpType t) {
103 switch (t) {
104 case kWait:
105 return "wait";
106 case kSignal:
107 return "signal";
108 case kBinaryAcquire:
109 return "acquire";
110 case kBinaryPresent:
111 return "present";
112 case kNone:
113 default:
114 return "NONE";
115 }
116 }
117
118 struct SemOp {
119 // NOTE: c++11 doesn't allow aggregate initialization and default member
120 // initializers in the same struct. This limitation is removed in c++14
121 OpType op_type;
122 QUEUE_STATE *queue;
123 uint64_t seq;
124 uint64_t payload;
125
126 bool operator<(const SemOp &rhs) const { return payload < rhs.payload; }
127
128 bool IsWait() const { return op_type == kWait || op_type == kBinaryPresent; }
129 bool IsSignal() const { return op_type == kSignal; }
130
131 // NOTE: Present semaphores are waited on by the implementation, not queue operations. We do not yet
132 // have a good way to figure out when this wait completes, so we must assume they are safe to re-use
133 bool CanBeSignaled() const { return op_type == kNone || op_type == kWait || op_type == kBinaryPresent; }
134 bool CanBeWaited() const { return op_type == kSignal || op_type == kBinaryAcquire; }
135 };
Jeremy Gebben5573a8c2021-06-04 08:55:10 -0600136
Tony-LunarG285dbdc2022-07-15 09:42:54 -0600137#ifdef VK_USE_PLATFORM_METAL_EXT
138 static bool GetMetalExport(const VkSemaphoreCreateInfo *info) {
139 bool retval = false;
140 auto export_metal_object_info = LvlFindInChain<VkExportMetalObjectCreateInfoEXT>(info->pNext);
141 while (export_metal_object_info) {
142 if (export_metal_object_info->exportObjectType == VK_EXPORT_METAL_OBJECT_TYPE_METAL_SHARED_EVENT_BIT_EXT) {
143 retval = true;
144 break;
145 }
146 export_metal_object_info = LvlFindInChain<VkExportMetalObjectCreateInfoEXT>(export_metal_object_info->pNext);
147 }
148 return retval;
149 }
150#endif // VK_USE_PLATFORM_METAL_EXT
Jeremy Gebben5dd48e92022-09-21 12:47:49 -0600151 VkExternalSemaphoreHandleTypeFlags GetExportHandleTypes(const VkSemaphoreCreateInfo *pCreateInfo) {
152 auto export_info = LvlFindInChain<VkExportSemaphoreCreateInfo>(pCreateInfo->pNext);
153 return export_info ? export_info->handleTypes : 0;
154 }
Tony-LunarG285dbdc2022-07-15 09:42:54 -0600155
Jeremy Gebben5dd48e92022-09-21 12:47:49 -0600156 SEMAPHORE_STATE(VkSemaphore sem, const VkSemaphoreCreateInfo *pCreateInfo)
157 : SEMAPHORE_STATE(sem, LvlFindInChain<VkSemaphoreTypeCreateInfo>(pCreateInfo->pNext), pCreateInfo) {}
158
159 SEMAPHORE_STATE(VkSemaphore sem, const VkSemaphoreTypeCreateInfo *type_create_info,
160 const VkSemaphoreCreateInfo *pCreateInfo)
Jeremy Gebben5573a8c2021-06-04 08:55:10 -0600161 : REFCOUNTED_NODE(sem, kVulkanObjectTypeSemaphore),
Tony-LunarG285dbdc2022-07-15 09:42:54 -0600162#ifdef VK_USE_PLATFORM_METAL_EXT
163 metal_semaphore_export(GetMetalExport(pCreateInfo)),
164#endif // VK_USE_PLATFORM_METAL_EXT
Jeremy Gebben5573a8c2021-06-04 08:55:10 -0600165 type(type_create_info ? type_create_info->semaphoreType : VK_SEMAPHORE_TYPE_BINARY),
Jeremy Gebben5dd48e92022-09-21 12:47:49 -0600166 exportHandleTypes(GetExportHandleTypes(pCreateInfo)),
Jeremy Gebben15332642021-12-15 19:33:15 -0700167 completed_{kNone, nullptr, 0, type_create_info ? type_create_info->initialValue : 0},
Jeremy Gebben5dd48e92022-09-21 12:47:49 -0600168 next_payload_(completed_.payload + 1) {
169 }
Jeremy Gebben5573a8c2021-06-04 08:55:10 -0600170
171 VkSemaphore semaphore() const { return handle_.Cast<VkSemaphore>(); }
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700172 SyncScope Scope() const {
173 auto guard = ReadLock();
174 return scope_;
175 }
176 // This is the most recently completed operation. It is returned by value so that the caller
177 // has a correct copy even if something else is completing on this queue in a different thread.
178 SemOp Completed() const {
179 auto guard = ReadLock();
180 return completed_;
181 }
Jeremy Gebben57642982021-09-14 14:14:55 -0600182
Jeremy Gebben15332642021-12-15 19:33:15 -0700183 // Enqueue a semaphore operation. For binary semaphores, the payload value is generated and
184 // returned, so that every semaphore operation has a unique value.
185 bool EnqueueSignal(QUEUE_STATE *queue, uint64_t queue_seq, uint64_t &payload);
186 void EnqueueWait(QUEUE_STATE *queue, uint64_t queue_seq, uint64_t &payload);
Jeremy Gebben5573a8c2021-06-04 08:55:10 -0600187
Jeremy Gebben15332642021-12-15 19:33:15 -0700188 // Binary only special cases enqueue functions
189 void EnqueueAcquire();
190 void EnqueuePresent(QUEUE_STATE *queue);
Jeremy Gebben5573a8c2021-06-04 08:55:10 -0600191
Jeremy Gebben15332642021-12-15 19:33:15 -0700192 // Remove completed operations and return highest sequence numbers for all affected queues
193 using RetireResult = layer_data::unordered_map<QUEUE_STATE *, uint64_t>;
194 RetireResult Retire(QUEUE_STATE *queue, uint64_t payload);
195
196 // Helper for retiring timeline semaphores and then retiring all queues using the semaphore
197 void RetireTimeline(uint64_t payload);
198
199 // look for most recent / highest payload operation that matches
200 layer_data::optional<SemOp> LastOp(std::function<bool(const SemOp &)> filter = nullptr) const;
201
202 bool CanBeSignaled() const;
203 bool CanBeWaited() const;
ziga-lunarga635db52022-04-14 19:24:08 +0200204 VkQueue AnotherQueueWaitsBinary(VkQueue queue) const;
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700205 bool HasPendingOps() const {
206 auto guard = ReadLock();
207 return !operations_.empty();
208 }
Jeremy Gebben15332642021-12-15 19:33:15 -0700209
210 void Import(VkExternalSemaphoreHandleTypeFlagBits handle_type, VkSemaphoreImportFlags flags);
211 void Export(VkExternalSemaphoreHandleTypeFlagBits handle_type);
Tony-LunarGffb5b522022-06-15 15:49:27 -0600212#ifdef VK_USE_PLATFORM_METAL_EXT
Tony-LunarG285dbdc2022-07-15 09:42:54 -0600213 const bool metal_semaphore_export;
214#endif // VK_USE_PLATFORM_METAL_EXT
215 const VkSemaphoreType type;
Jeremy Gebben5dd48e92022-09-21 12:47:49 -0600216 const VkExternalSemaphoreHandleTypeFlags exportHandleTypes;
Jeremy Gebben15332642021-12-15 19:33:15 -0700217
218 private:
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700219 ReadLockGuard ReadLock() const { return ReadLockGuard(lock_); }
220 WriteLockGuard WriteLock() { return WriteLockGuard(lock_); }
221
Jeremy Gebben15332642021-12-15 19:33:15 -0700222 SyncScope scope_{kSyncScopeInternal};
223 // the most recently completed operation
224 SemOp completed_{};
225 // next payload value for binary semaphore operations
226 uint64_t next_payload_;
227
ziga-lunargb6b460f2022-03-23 16:39:56 +0100228 std::vector<std::shared_ptr<std::function<void()>>> waiting_functions_;
229
Jeremy Gebben15332642021-12-15 19:33:15 -0700230 // Set of pending operations ordered by payload. This must be a multiset because
231 // timeline operations can be added in any order and multiple operations
232 // can use the same payload value.
233 std::multiset<SemOp> operations_;
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700234 mutable ReadWriteLock lock_;
Jeremy Gebben5573a8c2021-06-04 08:55:10 -0600235};
236
237struct CB_SUBMISSION {
Jeremy Gebben15332642021-12-15 19:33:15 -0700238 struct SemaphoreInfo {
239 std::shared_ptr<SEMAPHORE_STATE> semaphore;
240 uint64_t payload{0};
241 };
242
Jeremy Gebben57642982021-09-14 14:14:55 -0600243 std::vector<std::shared_ptr<CMD_BUFFER_STATE>> cbs;
Jeremy Gebben15332642021-12-15 19:33:15 -0700244 std::vector<SemaphoreInfo> wait_semaphores;
245 std::vector<SemaphoreInfo> signal_semaphores;
Jeremy Gebben57642982021-09-14 14:14:55 -0600246 std::shared_ptr<FENCE_STATE> fence;
Jeremy Gebben98faf722022-07-06 15:44:29 -0600247 uint64_t seq{0};
Jeremy Gebben57642982021-09-14 14:14:55 -0600248 uint32_t perf_submit_pass{0};
Jeremy Gebben5573a8c2021-06-04 08:55:10 -0600249
Jeremy Gebben57642982021-09-14 14:14:55 -0600250 void AddCommandBuffer(std::shared_ptr<CMD_BUFFER_STATE> &&cb_node) { cbs.emplace_back(std::move(cb_node)); }
251
252 void AddSignalSemaphore(std::shared_ptr<SEMAPHORE_STATE> &&semaphore_state, uint64_t value) {
Jeremy Gebben15332642021-12-15 19:33:15 -0700253 SemaphoreInfo signal;
Jeremy Gebben57642982021-09-14 14:14:55 -0600254 signal.semaphore = std::move(semaphore_state);
255 signal.payload = value;
Jeremy Gebben57642982021-09-14 14:14:55 -0600256 signal_semaphores.emplace_back(std::move(signal));
257 }
258
259 void AddWaitSemaphore(std::shared_ptr<SEMAPHORE_STATE> &&semaphore_state, uint64_t value) {
Jeremy Gebben15332642021-12-15 19:33:15 -0700260 SemaphoreInfo wait;
Jeremy Gebben57642982021-09-14 14:14:55 -0600261 wait.semaphore = std::move(semaphore_state);
262 wait.payload = value;
263 wait_semaphores.emplace_back(std::move(wait));
264 }
265
266 void AddFence(std::shared_ptr<FENCE_STATE> &&fence_state) { fence = std::move(fence_state); }
Jeremy Gebben5573a8c2021-06-04 08:55:10 -0600267};
268
Jeremy Gebben63f3cb02021-09-07 15:16:32 -0600269class QUEUE_STATE : public BASE_NODE {
Jeremy Gebben5573a8c2021-06-04 08:55:10 -0600270 public:
Rodrigo Locatti7ab778d2022-03-09 18:57:15 -0300271 QUEUE_STATE(VkQueue q, uint32_t index, VkDeviceQueueCreateFlags flags, const VkQueueFamilyProperties &queueFamilyProperties)
272 : BASE_NODE(q, kVulkanObjectTypeQueue),
273 queueFamilyIndex(index),
274 flags(flags),
275 queueFamilyProperties(queueFamilyProperties) {}
Jeremy Gebben63f3cb02021-09-07 15:16:32 -0600276
277 VkQueue Queue() const { return handle_.Cast<VkQueue>(); }
Jeremy Gebben57642982021-09-14 14:14:55 -0600278
279 uint64_t Submit(CB_SUBMISSION &&submission);
280
ziga-lunarg81b56d32022-03-21 22:22:05 +0100281 bool HasWait(VkSemaphore semaphore, VkFence fence) const;
282
Jeremy Gebben15332642021-12-15 19:33:15 -0700283 void Retire(uint64_t until_seq = UINT64_MAX);
284
285 const uint32_t queueFamilyIndex;
286 const VkDeviceQueueCreateFlags flags;
Rodrigo Locatti7ab778d2022-03-09 18:57:15 -0300287 const VkQueueFamilyProperties queueFamilyProperties;
Jeremy Gebben15332642021-12-15 19:33:15 -0700288
Jeremy Gebben15332642021-12-15 19:33:15 -0700289 private:
290 layer_data::optional<CB_SUBMISSION> NextSubmission(uint64_t until_seq);
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700291 ReadLockGuard ReadLock() const { return ReadLockGuard(lock_); }
292 WriteLockGuard WriteLock() { return WriteLockGuard(lock_); }
Jeremy Gebben15332642021-12-15 19:33:15 -0700293
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700294 std::deque<CB_SUBMISSION> submissions_;
Rodrigo Locatti7ab778d2022-03-09 18:57:15 -0300295 uint64_t seq_ = 0;
Jeremy Gebbenc6ccdc52022-01-01 12:29:19 -0700296 mutable ReadWriteLock lock_;
Jeremy Gebben5573a8c2021-06-04 08:55:10 -0600297};