blob: b42dd36a2f0e6a81240fa98471eb2ca78c6d93a3 [file] [log] [blame]
Jeremy Gebben4d51c552022-01-06 21:27:15 -07001/* Copyright (c) 2019-2022 The Khronos Group Inc.
2 * Copyright (c) 2019-2022 Valve Corporation
3 * Copyright (c) 2019-2022 LunarG, Inc.
John Zulauf9cb530d2019-09-30 14:14:10 -06004 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 * Author: John Zulauf <jzulauf@lunarg.com>
John Zulaufab7756b2020-12-29 16:10:16 -070018 * Author: Locke Lin <locke@lunarg.com>
19 * Author: Jeremy Gebben <jeremyg@lunarg.com>
John Zulauf9cb530d2019-09-30 14:14:10 -060020 */
21
22#include <limits>
23#include <vector>
locke-lunarg296a3c92020-03-25 01:04:29 -060024#include <memory>
25#include <bitset>
John Zulauf9cb530d2019-09-30 14:14:10 -060026#include "synchronization_validation.h"
Jeremy Gebben5f585ae2021-02-02 09:03:06 -070027#include "sync_utils.h"
John Zulauf9cb530d2019-09-30 14:14:10 -060028
John Zulaufea943c52022-02-22 11:05:17 -070029// Utilities to DRY up Get... calls
30template <typename Map, typename Key = typename Map::key_type, typename RetVal = layer_data::optional<typename Map::mapped_type>>
31RetVal GetMappedOptional(const Map &map, const Key &key) {
32 RetVal ret_val;
33 auto it = map.find(key);
34 if (it != map.cend()) {
35 ret_val.emplace(it->second);
36 }
37 return ret_val;
38}
39template <typename Map, typename Fn>
40typename Map::mapped_type GetMapped(const Map &map, const typename Map::key_type &key, Fn &&default_factory) {
41 auto value = GetMappedOptional(map, key);
42 return (value) ? *value : default_factory();
43}
44
45template <typename Map, typename Fn>
John Zulauf397e68b2022-04-19 11:44:07 -060046typename Map::mapped_type GetMappedInsert(Map &map, const typename Map::key_type &key, Fn &&emplace_factory) {
John Zulaufea943c52022-02-22 11:05:17 -070047 auto value = GetMappedOptional(map, key);
48 if (value) {
49 return *value;
50 }
John Zulauf397e68b2022-04-19 11:44:07 -060051 auto insert_it = map.emplace(std::make_pair(key, emplace_factory()));
John Zulaufea943c52022-02-22 11:05:17 -070052 assert(insert_it.second);
53
54 return insert_it.first->second;
55}
56
57template <typename Map, typename Key = typename Map::key_type, typename Mapped = typename Map::mapped_type,
58 typename Value = typename Mapped::element_type>
59Value *GetMappedPlainFromShared(const Map &map, const Key &key) {
60 auto value = GetMappedOptional<Map, Key>(map, key);
61 if (value) return value->get();
62 return nullptr;
63}
64
Jeremy Gebben6fbf8242021-06-21 09:14:46 -060065static bool SimpleBinding(const BINDABLE &bindable) { return !bindable.sparse && bindable.Binding(); }
John Zulauf264cce02021-02-05 14:40:47 -070066
John Zulauf29d00532021-03-04 13:28:54 -070067static bool SimpleBinding(const IMAGE_STATE &image_state) {
Jeremy Gebben62c3bf42021-07-21 15:38:24 -060068 bool simple =
Jeremy Gebben82e11d52021-07-26 09:19:37 -060069 SimpleBinding(static_cast<const BINDABLE &>(image_state)) || image_state.IsSwapchainImage() || image_state.bind_swapchain;
John Zulauf29d00532021-03-04 13:28:54 -070070
71 // If it's not simple we must have an encoder.
72 assert(!simple || image_state.fragment_encoder.get());
73 return simple;
74}
75
John Zulauf4fa68462021-04-26 21:04:22 -060076static const ResourceAccessRange kFullRange(std::numeric_limits<VkDeviceSize>::min(), std::numeric_limits<VkDeviceSize>::max());
77static const std::array<AccessAddressType, static_cast<size_t>(AccessAddressType::kTypeCount)> kAddressTypes = {
John Zulauf43cc7462020-12-03 12:33:12 -070078 AccessAddressType::kLinear, AccessAddressType::kIdealized};
79
John Zulaufd5115702021-01-18 12:34:33 -070080static constexpr AccessAddressType GetAccessAddressType(const BUFFER_STATE &) { return AccessAddressType::kLinear; };
John Zulauf264cce02021-02-05 14:40:47 -070081static AccessAddressType GetAccessAddressType(const IMAGE_STATE &image) {
82 return SimpleBinding(image) ? AccessContext::ImageAddressType(image) : AccessAddressType::kIdealized;
83}
John Zulaufd5115702021-01-18 12:34:33 -070084
John Zulauf9cb530d2019-09-30 14:14:10 -060085static const char *string_SyncHazardVUID(SyncHazard hazard) {
86 switch (hazard) {
87 case SyncHazard::NONE:
John Zulauf2f952d22020-02-10 11:34:51 -070088 return "SYNC-HAZARD-NONE";
John Zulauf9cb530d2019-09-30 14:14:10 -060089 break;
90 case SyncHazard::READ_AFTER_WRITE:
91 return "SYNC-HAZARD-READ_AFTER_WRITE";
92 break;
93 case SyncHazard::WRITE_AFTER_READ:
94 return "SYNC-HAZARD-WRITE_AFTER_READ";
95 break;
96 case SyncHazard::WRITE_AFTER_WRITE:
97 return "SYNC-HAZARD-WRITE_AFTER_WRITE";
98 break;
John Zulauf2f952d22020-02-10 11:34:51 -070099 case SyncHazard::READ_RACING_WRITE:
100 return "SYNC-HAZARD-READ-RACING-WRITE";
101 break;
102 case SyncHazard::WRITE_RACING_WRITE:
103 return "SYNC-HAZARD-WRITE-RACING-WRITE";
104 break;
105 case SyncHazard::WRITE_RACING_READ:
106 return "SYNC-HAZARD-WRITE-RACING-READ";
107 break;
John Zulauf9cb530d2019-09-30 14:14:10 -0600108 default:
109 assert(0);
110 }
111 return "SYNC-HAZARD-INVALID";
112}
113
John Zulauf59e25072020-07-17 10:55:21 -0600114static bool IsHazardVsRead(SyncHazard hazard) {
115 switch (hazard) {
116 case SyncHazard::NONE:
117 return false;
118 break;
119 case SyncHazard::READ_AFTER_WRITE:
120 return false;
121 break;
122 case SyncHazard::WRITE_AFTER_READ:
123 return true;
124 break;
125 case SyncHazard::WRITE_AFTER_WRITE:
126 return false;
127 break;
128 case SyncHazard::READ_RACING_WRITE:
129 return false;
130 break;
131 case SyncHazard::WRITE_RACING_WRITE:
132 return false;
133 break;
134 case SyncHazard::WRITE_RACING_READ:
135 return true;
136 break;
137 default:
138 assert(0);
139 }
140 return false;
141}
142
John Zulauf9cb530d2019-09-30 14:14:10 -0600143static const char *string_SyncHazard(SyncHazard hazard) {
144 switch (hazard) {
145 case SyncHazard::NONE:
146 return "NONR";
147 break;
148 case SyncHazard::READ_AFTER_WRITE:
149 return "READ_AFTER_WRITE";
150 break;
151 case SyncHazard::WRITE_AFTER_READ:
152 return "WRITE_AFTER_READ";
153 break;
154 case SyncHazard::WRITE_AFTER_WRITE:
155 return "WRITE_AFTER_WRITE";
156 break;
John Zulauf2f952d22020-02-10 11:34:51 -0700157 case SyncHazard::READ_RACING_WRITE:
158 return "READ_RACING_WRITE";
159 break;
160 case SyncHazard::WRITE_RACING_WRITE:
161 return "WRITE_RACING_WRITE";
162 break;
163 case SyncHazard::WRITE_RACING_READ:
164 return "WRITE_RACING_READ";
165 break;
John Zulauf9cb530d2019-09-30 14:14:10 -0600166 default:
167 assert(0);
168 }
169 return "INVALID HAZARD";
170}
171
John Zulauf37ceaed2020-07-03 16:18:15 -0600172static const SyncStageAccessInfoType *SyncStageAccessInfoFromMask(SyncStageAccessFlags flags) {
173 // Return the info for the first bit found
174 const SyncStageAccessInfoType *info = nullptr;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700175 for (size_t i = 0; i < flags.size(); i++) {
176 if (flags.test(i)) {
177 info = &syncStageAccessInfoByStageAccessIndex[i];
178 break;
John Zulauf37ceaed2020-07-03 16:18:15 -0600179 }
180 }
181 return info;
182}
183
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700184static std::string string_SyncStageAccessFlags(const SyncStageAccessFlags &flags, const char *sep = "|") {
John Zulauf59e25072020-07-17 10:55:21 -0600185 std::string out_str;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700186 if (flags.none()) {
John Zulauf389c34b2020-07-28 11:19:35 -0600187 out_str = "0";
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700188 } else {
189 for (size_t i = 0; i < syncStageAccessInfoByStageAccessIndex.size(); i++) {
190 const auto &info = syncStageAccessInfoByStageAccessIndex[i];
191 if ((flags & info.stage_access_bit).any()) {
192 if (!out_str.empty()) {
193 out_str.append(sep);
194 }
195 out_str.append(info.name);
John Zulauf59e25072020-07-17 10:55:21 -0600196 }
John Zulauf59e25072020-07-17 10:55:21 -0600197 }
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700198 if (out_str.length() == 0) {
199 out_str.append("Unhandled SyncStageAccess");
200 }
John Zulauf59e25072020-07-17 10:55:21 -0600201 }
202 return out_str;
203}
204
John Zulauf397e68b2022-04-19 11:44:07 -0600205std::ostream &operator<<(std::ostream &out, const ResourceUsageRecord &record) {
206 out << "command: " << CommandTypeString(record.command);
207 out << ", seq_no: " << record.seq_num;
208 if (record.sub_command != 0) {
209 out << ", subcmd: " << record.sub_command;
Jeremy Gebben6ea9d9e2020-12-11 09:41:01 -0700210 }
John Zulauf397e68b2022-04-19 11:44:07 -0600211 return out;
Jeremy Gebben6ea9d9e2020-12-11 09:41:01 -0700212}
John Zulauf397e68b2022-04-19 11:44:07 -0600213
John Zulauf4fa68462021-04-26 21:04:22 -0600214static std::string string_UsageIndex(SyncStageAccessIndex usage_index) {
215 const char *stage_access_name = "INVALID_STAGE_ACCESS";
216 if (usage_index < static_cast<SyncStageAccessIndex>(syncStageAccessInfoByStageAccessIndex.size())) {
217 stage_access_name = syncStageAccessInfoByStageAccessIndex[usage_index].name;
218 }
219 return std::string(stage_access_name);
220}
221
John Zulauf397e68b2022-04-19 11:44:07 -0600222struct SyncNodeFormatter {
223 const debug_report_data *report_data;
224 const BASE_NODE *node;
225 const char *label;
226
227 SyncNodeFormatter(const SyncValidator &sync_state, const CMD_BUFFER_STATE *cb_state)
228 : report_data(sync_state.report_data), node(cb_state), label("command_buffer") {}
229 SyncNodeFormatter(const SyncValidator &sync_state, const QUEUE_STATE *q_state)
230 : report_data(sync_state.report_data), node(q_state), label("queue") {}
231};
232
233std::ostream &operator<<(std::ostream &out, const SyncNodeFormatter &formater) {
234 if (formater.node) {
235 out << ", " << formater.label << ": " << formater.report_data->FormatHandle(formater.node->Handle()).c_str();
236 if (formater.node->Destroyed()) {
237 out << " (destroyed)";
238 }
239 } else {
240 out << ", " << formater.label << ": null handle";
241 }
242 return out;
243}
244
245std::ostream &operator<<(std::ostream &out, const HazardResult &hazard) {
246 assert(hazard.usage_index < static_cast<SyncStageAccessIndex>(syncStageAccessInfoByStageAccessIndex.size()));
247 const auto &usage_info = syncStageAccessInfoByStageAccessIndex[hazard.usage_index];
248 const auto *info = SyncStageAccessInfoFromMask(hazard.prior_access);
249 const char *stage_access_name = info ? info->name : "INVALID_STAGE_ACCESS";
250 out << "(";
251 if (!hazard.recorded_access.get()) {
252 // if we have a recorded usage the usage is reported from the recorded contexts point of view
253 out << "usage: " << usage_info.name << ", ";
254 }
255 out << "prior_usage: " << stage_access_name;
256 if (IsHazardVsRead(hazard.hazard)) {
257 const auto barriers = hazard.access_state->GetReadBarriers(hazard.prior_access);
258 out << ", read_barriers: " << string_VkPipelineStageFlags2KHR(barriers);
259 } else {
260 SyncStageAccessFlags write_barrier = hazard.access_state->GetWriteBarriers();
261 out << ", write_barriers: " << string_SyncStageAccessFlags(write_barrier);
262 }
263 return out;
264}
265
John Zulauf4fa68462021-04-26 21:04:22 -0600266struct NoopBarrierAction {
267 explicit NoopBarrierAction() {}
268 void operator()(ResourceAccessState *access) const {}
John Zulauf5c628d02021-05-04 15:46:36 -0600269 const bool layout_transition = false;
John Zulauf4fa68462021-04-26 21:04:22 -0600270};
271
272// NOTE: Make sure the proxy doesn't outlive from, as the proxy is pointing directly to access contexts owned by from.
273CommandBufferAccessContext::CommandBufferAccessContext(const CommandBufferAccessContext &from, AsProxyContext dummy)
274 : CommandBufferAccessContext(from.sync_state_) {
275 // Copy only the needed fields out of from for a temporary, proxy command buffer context
276 cb_state_ = from.cb_state_;
277 queue_flags_ = from.queue_flags_;
278 destroyed_ = from.destroyed_;
279 access_log_ = from.access_log_; // potentially large, but no choice given tagging lookup.
280 command_number_ = from.command_number_;
281 subcommand_number_ = from.subcommand_number_;
282 reset_count_ = from.reset_count_;
283
284 const auto *from_context = from.GetCurrentAccessContext();
285 assert(from_context);
286
287 // Construct a fully resolved single access context out of from
288 const NoopBarrierAction noop_barrier;
289 for (AccessAddressType address_type : kAddressTypes) {
290 from_context->ResolveAccessRange(address_type, kFullRange, noop_barrier,
291 &cb_access_context_.GetAccessStateMap(address_type), nullptr);
292 }
293 // The proxy has flatten the current render pass context (if any), but the async contexts are needed for hazard detection
294 cb_access_context_.ImportAsyncContexts(*from_context);
295
296 events_context_ = from.events_context_;
297
298 // We don't want to copy the full render_pass_context_ history just for the proxy.
299}
300
301std::string CommandBufferAccessContext::FormatUsage(const ResourceUsageTag tag) const {
John Zulauf397e68b2022-04-19 11:44:07 -0600302 if (tag >= access_log_.size()) return std::string();
303
John Zulauf4fa68462021-04-26 21:04:22 -0600304 std::stringstream out;
305 assert(tag < access_log_.size());
306 const auto &record = access_log_[tag];
John Zulauf397e68b2022-04-19 11:44:07 -0600307 out << record;
308 if (cb_state_.get() != record.cb_state) {
309 out << SyncNodeFormatter(*sync_state_, record.cb_state);
John Zulauf4fa68462021-04-26 21:04:22 -0600310 }
John Zulaufd142c9a2022-04-12 14:22:44 -0600311 out << ", reset_no: " << std::to_string(record.reset_count);
John Zulauf4fa68462021-04-26 21:04:22 -0600312 return out.str();
313}
John Zulauf397e68b2022-04-19 11:44:07 -0600314
John Zulauf4fa68462021-04-26 21:04:22 -0600315std::string CommandBufferAccessContext::FormatUsage(const ResourceFirstAccess &access) const {
316 std::stringstream out;
317 out << "(recorded_usage: " << string_UsageIndex(access.usage_index);
318 out << ", " << FormatUsage(access.tag) << ")";
319 return out.str();
320}
Jeremy Gebben6ea9d9e2020-12-11 09:41:01 -0700321
John Zulauf397e68b2022-04-19 11:44:07 -0600322std::string CommandExecutionContext::FormatHazard(const HazardResult &hazard) const {
John Zulauf1dae9192020-06-16 15:46:44 -0600323 std::stringstream out;
John Zulauf397e68b2022-04-19 11:44:07 -0600324 out << hazard;
325 out << ", " << FormatUsage(hazard.tag) << ")";
John Zulauf1dae9192020-06-16 15:46:44 -0600326 return out.str();
327}
328
John Zulaufd14743a2020-07-03 09:42:39 -0600329// NOTE: the attachement read flag is put *only* in the access scope and not in the exect scope, since the ordering
330// rules apply only to this specific access for this stage, and not the stage as a whole. The ordering detection
331// also reflects this special case for read hazard detection (using access instead of exec scope)
Jeremy Gebben40a22942020-12-22 14:22:06 -0700332static constexpr VkPipelineStageFlags2KHR kColorAttachmentExecScope = VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT_KHR;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700333static const SyncStageAccessFlags kColorAttachmentAccessScope =
334 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ_BIT |
335 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT |
336 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE_BIT |
337 SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT; // Note: this is intentionally not in the exec scope
Jeremy Gebben40a22942020-12-22 14:22:06 -0700338static constexpr VkPipelineStageFlags2KHR kDepthStencilAttachmentExecScope =
339 VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT_KHR | VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT_KHR;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700340static const SyncStageAccessFlags kDepthStencilAttachmentAccessScope =
341 SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
342 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
343 SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT; // Note: this is intentionally not in the exec scope
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -0700344static constexpr VkPipelineStageFlags2KHR kRasterAttachmentExecScope = kDepthStencilAttachmentExecScope | kColorAttachmentExecScope;
John Zulauf8e3c3e92021-01-06 11:19:36 -0700345static const SyncStageAccessFlags kRasterAttachmentAccessScope = kDepthStencilAttachmentAccessScope | kColorAttachmentAccessScope;
John Zulaufb027cdb2020-05-21 14:25:22 -0600346
John Zulauf8e3c3e92021-01-06 11:19:36 -0700347ResourceAccessState::OrderingBarriers ResourceAccessState::kOrderingRules = {
Jeremy Gebben40a22942020-12-22 14:22:06 -0700348 {{VK_PIPELINE_STAGE_2_NONE_KHR, SyncStageAccessFlags()},
John Zulauf8e3c3e92021-01-06 11:19:36 -0700349 {kColorAttachmentExecScope, kColorAttachmentAccessScope},
350 {kDepthStencilAttachmentExecScope, kDepthStencilAttachmentAccessScope},
351 {kRasterAttachmentExecScope, kRasterAttachmentAccessScope}}};
352
John Zulaufee984022022-04-13 16:39:50 -0600353// Sometimes we have an internal access conflict, and we using the kInvalidTag to set and detect in temporary/proxy contexts
354static const ResourceUsageTag kInvalidTag(ResourceUsageRecord::kMaxIndex);
John Zulaufb027cdb2020-05-21 14:25:22 -0600355
Jeremy Gebben62c3bf42021-07-21 15:38:24 -0600356static VkDeviceSize ResourceBaseAddress(const BINDABLE &bindable) { return bindable.GetFakeBaseAddress(); }
John Zulaufb02c1eb2020-10-06 16:33:36 -0600357
John Zulaufcb7e1672022-05-04 13:46:08 -0600358VkDeviceSize GetRealWholeSize(VkDeviceSize offset, VkDeviceSize size, VkDeviceSize whole_size) {
locke-lunarg3c038002020-04-30 23:08:08 -0600359 if (size == VK_WHOLE_SIZE) {
360 return (whole_size - offset);
361 }
362 return size;
363}
364
John Zulauf3e86bf02020-09-12 10:47:57 -0600365static inline VkDeviceSize GetBufferWholeSize(const BUFFER_STATE &buf_state, VkDeviceSize offset, VkDeviceSize size) {
366 return GetRealWholeSize(offset, size, buf_state.createInfo.size);
367}
368
John Zulauf16adfc92020-04-08 10:28:33 -0600369template <typename T>
John Zulauf355e49b2020-04-24 15:11:15 -0600370static ResourceAccessRange MakeRange(const T &has_offset_and_size) {
John Zulauf16adfc92020-04-08 10:28:33 -0600371 return ResourceAccessRange(has_offset_and_size.offset, (has_offset_and_size.offset + has_offset_and_size.size));
372}
373
John Zulauf355e49b2020-04-24 15:11:15 -0600374static ResourceAccessRange MakeRange(VkDeviceSize start, VkDeviceSize size) { return ResourceAccessRange(start, (start + size)); }
John Zulauf16adfc92020-04-08 10:28:33 -0600375
John Zulauf3e86bf02020-09-12 10:47:57 -0600376static inline ResourceAccessRange MakeRange(const BUFFER_STATE &buffer, VkDeviceSize offset, VkDeviceSize size) {
377 return MakeRange(offset, GetBufferWholeSize(buffer, offset, size));
378}
379
380static inline ResourceAccessRange MakeRange(const BUFFER_VIEW_STATE &buf_view_state) {
381 return MakeRange(*buf_view_state.buffer_state.get(), buf_view_state.create_info.offset, buf_view_state.create_info.range);
382}
383
John Zulauf4a6105a2020-11-17 15:11:05 -0700384// Range generators for to allow event scope filtration to be limited to the top of the resource access traversal pipeline
385//
John Zulauf10f1f522020-12-18 12:00:35 -0700386// Note: there is no "begin/end" or reset facility. These are each written as "one time through" generators.
387//
John Zulauf4a6105a2020-11-17 15:11:05 -0700388// Usage:
389// Constructor() -- initializes the generator to point to the begin of the space declared.
390// * -- the current range of the generator empty signfies end
391// ++ -- advance to the next non-empty range (or end)
392
393// A wrapper for a single range with the same semantics as the actual generators below
394template <typename KeyType>
395class SingleRangeGenerator {
396 public:
397 SingleRangeGenerator(const KeyType &range) : current_(range) {}
John Zulaufd5115702021-01-18 12:34:33 -0700398 const KeyType &operator*() const { return current_; }
399 const KeyType *operator->() const { return &current_; }
John Zulauf4a6105a2020-11-17 15:11:05 -0700400 SingleRangeGenerator &operator++() {
401 current_ = KeyType(); // just one real range
402 return *this;
403 }
404
405 bool operator==(const SingleRangeGenerator &other) const { return current_ == other.current_; }
406
407 private:
408 SingleRangeGenerator() = default;
409 const KeyType range_;
410 KeyType current_;
411};
412
John Zulaufae842002021-04-15 18:20:55 -0600413// Generate the ranges that are the intersection of range and the entries in the RangeMap
414template <typename RangeMap, typename KeyType = typename RangeMap::key_type>
415class MapRangesRangeGenerator {
John Zulauf4a6105a2020-11-17 15:11:05 -0700416 public:
John Zulaufd5115702021-01-18 12:34:33 -0700417 // Default constructed is safe to dereference for "empty" test, but for no other operation.
John Zulaufae842002021-04-15 18:20:55 -0600418 MapRangesRangeGenerator() : range_(), map_(nullptr), map_pos_(), current_() {
John Zulaufd5115702021-01-18 12:34:33 -0700419 // Default construction for KeyType *must* be empty range
420 assert(current_.empty());
421 }
John Zulaufae842002021-04-15 18:20:55 -0600422 MapRangesRangeGenerator(const RangeMap &filter, const KeyType &range) : range_(range), map_(&filter), map_pos_(), current_() {
John Zulauf4a6105a2020-11-17 15:11:05 -0700423 SeekBegin();
424 }
John Zulaufae842002021-04-15 18:20:55 -0600425 MapRangesRangeGenerator(const MapRangesRangeGenerator &from) = default;
John Zulaufd5115702021-01-18 12:34:33 -0700426
John Zulauf4a6105a2020-11-17 15:11:05 -0700427 const KeyType &operator*() const { return current_; }
428 const KeyType *operator->() const { return &current_; }
John Zulaufae842002021-04-15 18:20:55 -0600429 MapRangesRangeGenerator &operator++() {
430 ++map_pos_;
John Zulauf4a6105a2020-11-17 15:11:05 -0700431 UpdateCurrent();
432 return *this;
433 }
434
John Zulaufae842002021-04-15 18:20:55 -0600435 bool operator==(const MapRangesRangeGenerator &other) const { return current_ == other.current_; }
John Zulauf4a6105a2020-11-17 15:11:05 -0700436
John Zulaufae842002021-04-15 18:20:55 -0600437 protected:
John Zulauf4a6105a2020-11-17 15:11:05 -0700438 void UpdateCurrent() {
John Zulaufae842002021-04-15 18:20:55 -0600439 if (map_pos_ != map_->cend()) {
440 current_ = range_ & map_pos_->first;
John Zulauf4a6105a2020-11-17 15:11:05 -0700441 } else {
442 current_ = KeyType();
443 }
444 }
445 void SeekBegin() {
John Zulaufae842002021-04-15 18:20:55 -0600446 map_pos_ = map_->lower_bound(range_);
John Zulauf4a6105a2020-11-17 15:11:05 -0700447 UpdateCurrent();
448 }
John Zulaufae842002021-04-15 18:20:55 -0600449
450 // Adding this functionality here, to avoid gratuitous Base:: qualifiers in the derived class
451 // Note: Not exposed in this classes public interface to encourage using a consistent ++/empty generator semantic
452 template <typename Pred>
453 MapRangesRangeGenerator &PredicatedIncrement(Pred &pred) {
454 do {
455 ++map_pos_;
456 } while (map_pos_ != map_->cend() && map_pos_->first.intersects(range_) && !pred(map_pos_));
457 UpdateCurrent();
458 return *this;
459 }
460
John Zulauf4a6105a2020-11-17 15:11:05 -0700461 const KeyType range_;
John Zulaufae842002021-04-15 18:20:55 -0600462 const RangeMap *map_;
463 typename RangeMap::const_iterator map_pos_;
John Zulauf4a6105a2020-11-17 15:11:05 -0700464 KeyType current_;
465};
John Zulaufd5115702021-01-18 12:34:33 -0700466using SingleAccessRangeGenerator = SingleRangeGenerator<ResourceAccessRange>;
John Zulaufae842002021-04-15 18:20:55 -0600467using EventSimpleRangeGenerator = MapRangesRangeGenerator<SyncEventState::ScopeMap>;
John Zulauf4a6105a2020-11-17 15:11:05 -0700468
John Zulaufae842002021-04-15 18:20:55 -0600469// Generate the ranges for entries meeting the predicate that are the intersection of range and the entries in the RangeMap
470template <typename RangeMap, typename Predicate, typename KeyType = typename RangeMap::key_type>
471class PredicatedMapRangesRangeGenerator : public MapRangesRangeGenerator<RangeMap, KeyType> {
472 public:
473 using Base = MapRangesRangeGenerator<RangeMap, KeyType>;
474 // Default constructed is safe to dereference for "empty" test, but for no other operation.
475 PredicatedMapRangesRangeGenerator() : Base(), pred_() {}
476 PredicatedMapRangesRangeGenerator(const RangeMap &filter, const KeyType &range, Predicate pred)
477 : Base(filter, range), pred_(pred) {}
478 PredicatedMapRangesRangeGenerator(const PredicatedMapRangesRangeGenerator &from) = default;
479
480 PredicatedMapRangesRangeGenerator &operator++() {
481 Base::PredicatedIncrement(pred_);
482 return *this;
483 }
484
485 protected:
486 Predicate pred_;
487};
John Zulauf4a6105a2020-11-17 15:11:05 -0700488
489// Generate the ranges that are the intersection of the RangeGen ranges and the entries in the FilterMap
John Zulaufae842002021-04-15 18:20:55 -0600490// Templated to allow for different Range generators or map sources...
491template <typename RangeMap, typename RangeGen, typename KeyType = typename RangeMap::key_type>
John Zulauf4a6105a2020-11-17 15:11:05 -0700492class FilteredGeneratorGenerator {
493 public:
John Zulaufd5115702021-01-18 12:34:33 -0700494 // Default constructed is safe to dereference for "empty" test, but for no other operation.
495 FilteredGeneratorGenerator() : filter_(nullptr), gen_(), filter_pos_(), current_() {
496 // Default construction for KeyType *must* be empty range
497 assert(current_.empty());
498 }
John Zulaufae842002021-04-15 18:20:55 -0600499 FilteredGeneratorGenerator(const RangeMap &filter, RangeGen &gen) : filter_(&filter), gen_(gen), filter_pos_(), current_() {
John Zulauf4a6105a2020-11-17 15:11:05 -0700500 SeekBegin();
501 }
John Zulaufd5115702021-01-18 12:34:33 -0700502 FilteredGeneratorGenerator(const FilteredGeneratorGenerator &from) = default;
John Zulauf4a6105a2020-11-17 15:11:05 -0700503 const KeyType &operator*() const { return current_; }
504 const KeyType *operator->() const { return &current_; }
505 FilteredGeneratorGenerator &operator++() {
506 KeyType gen_range = GenRange();
507 KeyType filter_range = FilterRange();
508 current_ = KeyType();
509 while (gen_range.non_empty() && filter_range.non_empty() && current_.empty()) {
510 if (gen_range.end > filter_range.end) {
511 // if the generated range is beyond the filter_range, advance the filter range
512 filter_range = AdvanceFilter();
513 } else {
514 gen_range = AdvanceGen();
515 }
516 current_ = gen_range & filter_range;
517 }
518 return *this;
519 }
520
521 bool operator==(const FilteredGeneratorGenerator &other) const { return current_ == other.current_; }
522
523 private:
524 KeyType AdvanceFilter() {
525 ++filter_pos_;
526 auto filter_range = FilterRange();
527 if (filter_range.valid()) {
528 FastForwardGen(filter_range);
529 }
530 return filter_range;
531 }
532 KeyType AdvanceGen() {
John Zulaufd5115702021-01-18 12:34:33 -0700533 ++gen_;
John Zulauf4a6105a2020-11-17 15:11:05 -0700534 auto gen_range = GenRange();
535 if (gen_range.valid()) {
536 FastForwardFilter(gen_range);
537 }
538 return gen_range;
539 }
540
541 KeyType FilterRange() const { return (filter_pos_ != filter_->cend()) ? filter_pos_->first : KeyType(); }
John Zulaufd5115702021-01-18 12:34:33 -0700542 KeyType GenRange() const { return *gen_; }
John Zulauf4a6105a2020-11-17 15:11:05 -0700543
544 KeyType FastForwardFilter(const KeyType &range) {
545 auto filter_range = FilterRange();
546 int retry_count = 0;
John Zulauf10f1f522020-12-18 12:00:35 -0700547 const static int kRetryLimit = 2; // TODO -- determine whether this limit is optimal
John Zulauf4a6105a2020-11-17 15:11:05 -0700548 while (!filter_range.empty() && (filter_range.end <= range.begin)) {
549 if (retry_count < kRetryLimit) {
550 ++filter_pos_;
551 filter_range = FilterRange();
552 retry_count++;
553 } else {
554 // Okay we've tried walking, do a seek.
555 filter_pos_ = filter_->lower_bound(range);
556 break;
557 }
558 }
559 return FilterRange();
560 }
561
562 // TODO: Consider adding "seek" (or an absolute bound "get" to range generators to make this walk
563 // faster.
564 KeyType FastForwardGen(const KeyType &range) {
565 auto gen_range = GenRange();
566 while (!gen_range.empty() && (gen_range.end <= range.begin)) {
John Zulaufd5115702021-01-18 12:34:33 -0700567 ++gen_;
John Zulauf4a6105a2020-11-17 15:11:05 -0700568 gen_range = GenRange();
569 }
570 return gen_range;
571 }
572
573 void SeekBegin() {
574 auto gen_range = GenRange();
575 if (gen_range.empty()) {
576 current_ = KeyType();
577 filter_pos_ = filter_->cend();
578 } else {
579 filter_pos_ = filter_->lower_bound(gen_range);
580 current_ = gen_range & FilterRange();
581 }
582 }
583
John Zulaufae842002021-04-15 18:20:55 -0600584 const RangeMap *filter_;
John Zulaufd5115702021-01-18 12:34:33 -0700585 RangeGen gen_;
John Zulaufae842002021-04-15 18:20:55 -0600586 typename RangeMap::const_iterator filter_pos_;
John Zulauf4a6105a2020-11-17 15:11:05 -0700587 KeyType current_;
588};
589
590using EventImageRangeGenerator = FilteredGeneratorGenerator<SyncEventState::ScopeMap, subresource_adapter::ImageRangeGenerator>;
591
John Zulauf5c5e88d2019-12-26 11:22:02 -0700592
John Zulauf3e86bf02020-09-12 10:47:57 -0600593ResourceAccessRange GetBufferRange(VkDeviceSize offset, VkDeviceSize buf_whole_size, uint32_t first_index, uint32_t count,
594 VkDeviceSize stride) {
595 VkDeviceSize range_start = offset + first_index * stride;
596 VkDeviceSize range_size = 0;
locke-lunargff255f92020-05-13 18:53:52 -0600597 if (count == UINT32_MAX) {
598 range_size = buf_whole_size - range_start;
599 } else {
600 range_size = count * stride;
601 }
John Zulauf3e86bf02020-09-12 10:47:57 -0600602 return MakeRange(range_start, range_size);
locke-lunargff255f92020-05-13 18:53:52 -0600603}
604
locke-lunarg654e3692020-06-04 17:19:15 -0600605SyncStageAccessIndex GetSyncStageAccessIndexsByDescriptorSet(VkDescriptorType descriptor_type, const interface_var &descriptor_data,
606 VkShaderStageFlagBits stage_flag) {
607 if (descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT) {
608 assert(stage_flag == VK_SHADER_STAGE_FRAGMENT_BIT);
609 return SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ;
610 }
611 auto stage_access = syncStageAccessMaskByShaderStage.find(stage_flag);
612 if (stage_access == syncStageAccessMaskByShaderStage.end()) {
613 assert(0);
614 }
615 if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER || descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
616 return stage_access->second.uniform_read;
617 }
618
619 // If the desriptorSet is writable, we don't need to care SHADER_READ. SHADER_WRITE is enough.
620 // Because if write hazard happens, read hazard might or might not happen.
621 // But if write hazard doesn't happen, read hazard is impossible to happen.
622 if (descriptor_data.is_writable) {
Jeremy Gebben40a22942020-12-22 14:22:06 -0700623 return stage_access->second.storage_write;
locke-lunarg654e3692020-06-04 17:19:15 -0600624 }
Jeremy Gebben40a22942020-12-22 14:22:06 -0700625 // TODO: sampled_read
626 return stage_access->second.storage_read;
locke-lunarg654e3692020-06-04 17:19:15 -0600627}
628
locke-lunarg37047832020-06-12 13:44:45 -0600629bool IsImageLayoutDepthWritable(VkImageLayout image_layout) {
630 return (image_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
631 image_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL ||
632 image_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL)
633 ? true
634 : false;
635}
636
637bool IsImageLayoutStencilWritable(VkImageLayout image_layout) {
638 return (image_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
639 image_layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL ||
640 image_layout == VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL)
641 ? true
642 : false;
643}
644
John Zulauf355e49b2020-04-24 15:11:15 -0600645// Class AccessContext stores the state of accesses specific to a Command, Subpass, or Queue
John Zulaufb02c1eb2020-10-06 16:33:36 -0600646template <typename Action>
647static void ApplyOverImageRange(const IMAGE_STATE &image_state, const VkImageSubresourceRange &subresource_range_arg,
648 Action &action) {
649 // At this point the "apply over range" logic only supports a single memory binding
650 if (!SimpleBinding(image_state)) return;
651 auto subresource_range = NormalizeSubresourceRange(image_state.createInfo, subresource_range_arg);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600652 const auto base_address = ResourceBaseAddress(image_state);
John Zulauf150e5332020-12-03 08:52:52 -0700653 subresource_adapter::ImageRangeGenerator range_gen(*image_state.fragment_encoder.get(), subresource_range, {0, 0, 0},
Aitor Camachoe67f2c72022-06-08 14:41:58 +0200654 image_state.createInfo.extent, base_address, false);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600655 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf150e5332020-12-03 08:52:52 -0700656 action(*range_gen);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600657 }
658}
659
John Zulauf7635de32020-05-29 17:14:15 -0600660// Tranverse the attachment resolves for this a specific subpass, and do action() to them.
661// Used by both validation and record operations
662//
663// The signature for Action() reflect the needs of both uses.
664template <typename Action>
John Zulaufd0ec59f2021-03-13 14:25:08 -0700665void ResolveOperation(Action &action, const RENDER_PASS_STATE &rp_state, const AttachmentViewGenVector &attachment_views,
666 uint32_t subpass) {
John Zulauf7635de32020-05-29 17:14:15 -0600667 const auto &rp_ci = rp_state.createInfo;
668 const auto *attachment_ci = rp_ci.pAttachments;
669 const auto &subpass_ci = rp_ci.pSubpasses[subpass];
670
671 // Color resolves -- require an inuse color attachment and a matching inuse resolve attachment
672 const auto *color_attachments = subpass_ci.pColorAttachments;
673 const auto *color_resolve = subpass_ci.pResolveAttachments;
674 if (color_resolve && color_attachments) {
675 for (uint32_t i = 0; i < subpass_ci.colorAttachmentCount; i++) {
676 const auto &color_attach = color_attachments[i].attachment;
677 const auto &resolve_attach = subpass_ci.pResolveAttachments[i].attachment;
678 if ((color_attach != VK_ATTACHMENT_UNUSED) && (resolve_attach != VK_ATTACHMENT_UNUSED)) {
679 action("color", "resolve read", color_attach, resolve_attach, attachment_views[color_attach],
John Zulaufd0ec59f2021-03-13 14:25:08 -0700680 AttachmentViewGen::Gen::kRenderArea, SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ,
681 SyncOrdering::kColorAttachment);
John Zulauf7635de32020-05-29 17:14:15 -0600682 action("color", "resolve write", color_attach, resolve_attach, attachment_views[resolve_attach],
John Zulaufd0ec59f2021-03-13 14:25:08 -0700683 AttachmentViewGen::Gen::kRenderArea, SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE,
684 SyncOrdering::kColorAttachment);
John Zulauf7635de32020-05-29 17:14:15 -0600685 }
686 }
687 }
688
689 // Depth stencil resolve only if the extension is present
Mark Lobodzinski1f887d32020-12-30 15:31:33 -0700690 const auto ds_resolve = LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(subpass_ci.pNext);
John Zulauf7635de32020-05-29 17:14:15 -0600691 if (ds_resolve && ds_resolve->pDepthStencilResolveAttachment &&
692 (ds_resolve->pDepthStencilResolveAttachment->attachment != VK_ATTACHMENT_UNUSED) && subpass_ci.pDepthStencilAttachment &&
693 (subpass_ci.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
694 const auto src_at = subpass_ci.pDepthStencilAttachment->attachment;
695 const auto src_ci = attachment_ci[src_at];
696 // The formats are required to match so we can pick either
697 const bool resolve_depth = (ds_resolve->depthResolveMode != VK_RESOLVE_MODE_NONE) && FormatHasDepth(src_ci.format);
698 const bool resolve_stencil = (ds_resolve->stencilResolveMode != VK_RESOLVE_MODE_NONE) && FormatHasStencil(src_ci.format);
699 const auto dst_at = ds_resolve->pDepthStencilResolveAttachment->attachment;
John Zulauf7635de32020-05-29 17:14:15 -0600700
701 // Figure out which aspects are actually touched during resolve operations
702 const char *aspect_string = nullptr;
John Zulaufd0ec59f2021-03-13 14:25:08 -0700703 AttachmentViewGen::Gen gen_type = AttachmentViewGen::Gen::kRenderArea;
John Zulauf7635de32020-05-29 17:14:15 -0600704 if (resolve_depth && resolve_stencil) {
John Zulauf7635de32020-05-29 17:14:15 -0600705 aspect_string = "depth/stencil";
706 } else if (resolve_depth) {
707 // Validate depth only
John Zulaufd0ec59f2021-03-13 14:25:08 -0700708 gen_type = AttachmentViewGen::Gen::kDepthOnlyRenderArea;
John Zulauf7635de32020-05-29 17:14:15 -0600709 aspect_string = "depth";
710 } else if (resolve_stencil) {
711 // Validate all stencil only
John Zulaufd0ec59f2021-03-13 14:25:08 -0700712 gen_type = AttachmentViewGen::Gen::kStencilOnlyRenderArea;
John Zulauf7635de32020-05-29 17:14:15 -0600713 aspect_string = "stencil";
714 }
715
John Zulaufd0ec59f2021-03-13 14:25:08 -0700716 if (aspect_string) {
717 action(aspect_string, "resolve read", src_at, dst_at, attachment_views[src_at], gen_type,
718 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ, SyncOrdering::kRaster);
719 action(aspect_string, "resolve write", src_at, dst_at, attachment_views[dst_at], gen_type,
720 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kRaster);
John Zulauf7635de32020-05-29 17:14:15 -0600721 }
722 }
723}
724
725// Action for validating resolve operations
726class ValidateResolveAction {
727 public:
John Zulauffaea0ee2021-01-14 14:01:32 -0700728 ValidateResolveAction(VkRenderPass render_pass, uint32_t subpass, const AccessContext &context,
sjfricke0bea06e2022-06-05 09:22:26 +0900729 const CommandExecutionContext &exec_context, CMD_TYPE cmd_type)
John Zulauf7635de32020-05-29 17:14:15 -0600730 : render_pass_(render_pass),
731 subpass_(subpass),
732 context_(context),
John Zulaufbb890452021-12-14 11:30:18 -0700733 exec_context_(exec_context),
sjfricke0bea06e2022-06-05 09:22:26 +0900734 cmd_type_(cmd_type),
John Zulauf7635de32020-05-29 17:14:15 -0600735 skip_(false) {}
736 void operator()(const char *aspect_name, const char *attachment_name, uint32_t src_at, uint32_t dst_at,
John Zulaufd0ec59f2021-03-13 14:25:08 -0700737 const AttachmentViewGen &view_gen, AttachmentViewGen::Gen gen_type, SyncStageAccessIndex current_usage,
738 SyncOrdering ordering_rule) {
John Zulauf7635de32020-05-29 17:14:15 -0600739 HazardResult hazard;
John Zulaufd0ec59f2021-03-13 14:25:08 -0700740 hazard = context_.DetectHazard(view_gen, gen_type, current_usage, ordering_rule);
John Zulauf7635de32020-05-29 17:14:15 -0600741 if (hazard.hazard) {
sjfricke0bea06e2022-06-05 09:22:26 +0900742 skip_ |= exec_context_.GetSyncState().LogError(
743 render_pass_, string_SyncHazardVUID(hazard.hazard),
744 "%s: Hazard %s in subpass %" PRIu32 "during %s %s, from attachment %" PRIu32 " to resolve attachment %" PRIu32
745 ". Access info %s.",
746 CommandTypeString(cmd_type_), string_SyncHazard(hazard.hazard), subpass_, aspect_name, attachment_name, src_at,
747 dst_at, exec_context_.FormatHazard(hazard).c_str());
John Zulauf7635de32020-05-29 17:14:15 -0600748 }
749 }
750 // Providing a mechanism for the constructing caller to get the result of the validation
751 bool GetSkip() const { return skip_; }
752
753 private:
754 VkRenderPass render_pass_;
755 const uint32_t subpass_;
756 const AccessContext &context_;
John Zulaufbb890452021-12-14 11:30:18 -0700757 const CommandExecutionContext &exec_context_;
sjfricke0bea06e2022-06-05 09:22:26 +0900758 CMD_TYPE cmd_type_;
John Zulauf7635de32020-05-29 17:14:15 -0600759 bool skip_;
760};
761
762// Update action for resolve operations
763class UpdateStateResolveAction {
764 public:
John Zulauf14940722021-04-12 15:19:02 -0600765 UpdateStateResolveAction(AccessContext &context, ResourceUsageTag tag) : context_(context), tag_(tag) {}
John Zulaufd0ec59f2021-03-13 14:25:08 -0700766 void operator()(const char *, const char *, uint32_t, uint32_t, const AttachmentViewGen &view_gen,
767 AttachmentViewGen::Gen gen_type, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule) {
John Zulauf7635de32020-05-29 17:14:15 -0600768 // Ignores validation only arguments...
John Zulaufd0ec59f2021-03-13 14:25:08 -0700769 context_.UpdateAccessState(view_gen, gen_type, current_usage, ordering_rule, tag_);
John Zulauf7635de32020-05-29 17:14:15 -0600770 }
771
772 private:
773 AccessContext &context_;
John Zulauf14940722021-04-12 15:19:02 -0600774 const ResourceUsageTag tag_;
John Zulauf7635de32020-05-29 17:14:15 -0600775};
776
John Zulauf59e25072020-07-17 10:55:21 -0600777void HazardResult::Set(const ResourceAccessState *access_state_, SyncStageAccessIndex usage_index_, SyncHazard hazard_,
John Zulauf14940722021-04-12 15:19:02 -0600778 const SyncStageAccessFlags &prior_, const ResourceUsageTag tag_) {
John Zulauf4fa68462021-04-26 21:04:22 -0600779 access_state = layer_data::make_unique<const ResourceAccessState>(*access_state_);
John Zulauf59e25072020-07-17 10:55:21 -0600780 usage_index = usage_index_;
781 hazard = hazard_;
782 prior_access = prior_;
783 tag = tag_;
784}
785
John Zulauf4fa68462021-04-26 21:04:22 -0600786void HazardResult::AddRecordedAccess(const ResourceFirstAccess &first_access) {
787 recorded_access = layer_data::make_unique<const ResourceFirstAccess>(first_access);
788}
789
John Zulauf1d5f9c12022-05-13 14:51:08 -0600790void AccessContext::DeleteAccess(const AddressRange &address) { GetAccessStateMap(address.type).erase_range(address.range); }
791
John Zulauf540266b2020-04-06 18:54:53 -0600792AccessContext::AccessContext(uint32_t subpass, VkQueueFlags queue_flags,
793 const std::vector<SubpassDependencyGraphNode> &dependencies,
John Zulauf1a224292020-06-30 14:52:13 -0600794 const std::vector<AccessContext> &contexts, const AccessContext *external_context) {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600795 Reset();
796 const auto &subpass_dep = dependencies[subpass];
John Zulauf22aefed2021-03-11 18:14:35 -0700797 bool has_barrier_from_external = subpass_dep.barrier_from_external.size() > 0U;
798 prev_.reserve(subpass_dep.prev.size() + (has_barrier_from_external ? 1U : 0U));
John Zulauf355e49b2020-04-24 15:11:15 -0600799 prev_by_subpass_.resize(subpass, nullptr); // Can't be more prevs than the subpass we're on
John Zulauf3d84f1b2020-03-09 13:33:25 -0600800 for (const auto &prev_dep : subpass_dep.prev) {
John Zulaufbaea94f2020-09-15 17:55:16 -0600801 const auto prev_pass = prev_dep.first->pass;
802 const auto &prev_barriers = prev_dep.second;
803 assert(prev_dep.second.size());
804 prev_.emplace_back(&contexts[prev_pass], queue_flags, prev_barriers);
805 prev_by_subpass_[prev_pass] = &prev_.back();
John Zulauf5c5e88d2019-12-26 11:22:02 -0700806 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600807
808 async_.reserve(subpass_dep.async.size());
809 for (const auto async_subpass : subpass_dep.async) {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -0700810 async_.emplace_back(&contexts[async_subpass]);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600811 }
John Zulauf22aefed2021-03-11 18:14:35 -0700812 if (has_barrier_from_external) {
813 // Store the barrier from external with the reat, but save pointer for "by subpass" lookups.
814 prev_.emplace_back(external_context, queue_flags, subpass_dep.barrier_from_external);
815 src_external_ = &prev_.back();
John Zulaufe5da6e52020-03-18 15:32:18 -0600816 }
John Zulaufbaea94f2020-09-15 17:55:16 -0600817 if (subpass_dep.barrier_to_external.size()) {
818 dst_external_ = TrackBack(this, queue_flags, subpass_dep.barrier_to_external);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600819 }
John Zulauf5c5e88d2019-12-26 11:22:02 -0700820}
821
John Zulauf5f13a792020-03-10 07:31:21 -0600822template <typename Detector>
John Zulaufe0757ba2022-06-10 16:51:45 -0600823HazardResult AccessContext::DetectPreviousHazard(AccessAddressType type, Detector &detector,
John Zulauf540266b2020-04-06 18:54:53 -0600824 const ResourceAccessRange &range) const {
John Zulauf5f13a792020-03-10 07:31:21 -0600825 ResourceAccessRangeMap descent_map;
John Zulauf69133422020-05-20 14:55:53 -0600826 ResolvePreviousAccess(type, range, &descent_map, nullptr);
John Zulauf5f13a792020-03-10 07:31:21 -0600827
828 HazardResult hazard;
829 for (auto prev = descent_map.begin(); prev != descent_map.end() && !hazard.hazard; ++prev) {
830 hazard = detector.Detect(prev);
831 }
832 return hazard;
833}
834
John Zulauf4a6105a2020-11-17 15:11:05 -0700835template <typename Action>
836void AccessContext::ForAll(Action &&action) {
837 for (const auto address_type : kAddressTypes) {
838 auto &accesses = GetAccessStateMap(address_type);
John Zulauf1d5f9c12022-05-13 14:51:08 -0600839 for (auto &access : accesses) {
John Zulauf4a6105a2020-11-17 15:11:05 -0700840 action(address_type, access);
841 }
842 }
843}
844
John Zulauf3d84f1b2020-03-09 13:33:25 -0600845// A recursive range walker for hazard detection, first for the current context and the (DetectHazardRecur) to walk
846// the DAG of the contexts (for example subpasses)
847template <typename Detector>
John Zulaufe0757ba2022-06-10 16:51:45 -0600848HazardResult AccessContext::DetectHazard(AccessAddressType type, Detector &detector, const ResourceAccessRange &range,
John Zulauf355e49b2020-04-24 15:11:15 -0600849 DetectOptions options) const {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600850 HazardResult hazard;
John Zulauf5f13a792020-03-10 07:31:21 -0600851
John Zulauf1a224292020-06-30 14:52:13 -0600852 if (static_cast<uint32_t>(options) & DetectOptions::kDetectAsync) {
John Zulauf355e49b2020-04-24 15:11:15 -0600853 // Async checks don't require recursive lookups, as the async lists are exhaustive for the top-level context
854 // so we'll check these first
855 for (const auto &async_context : async_) {
856 hazard = async_context->DetectAsyncHazard(type, detector, range);
857 if (hazard.hazard) return hazard;
858 }
John Zulauf5f13a792020-03-10 07:31:21 -0600859 }
860
John Zulauf1a224292020-06-30 14:52:13 -0600861 const bool detect_prev = (static_cast<uint32_t>(options) & DetectOptions::kDetectPrevious) != 0;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600862
John Zulauf69133422020-05-20 14:55:53 -0600863 const auto &accesses = GetAccessStateMap(type);
John Zulauf3cafbf72021-03-26 16:55:19 -0600864 const auto the_end = accesses.cend(); // End is not invalidated
865 auto pos = accesses.lower_bound(range);
John Zulauf69133422020-05-20 14:55:53 -0600866 ResourceAccessRange gap = {range.begin, range.begin};
John Zulauf5f13a792020-03-10 07:31:21 -0600867
John Zulauf3cafbf72021-03-26 16:55:19 -0600868 while (pos != the_end && pos->first.begin < range.end) {
John Zulauf69133422020-05-20 14:55:53 -0600869 // Cover any leading gap, or gap between entries
870 if (detect_prev) {
871 // TODO: After profiling we may want to change the descent logic such that we don't recur per gap...
872 // Cover any leading gap, or gap between entries
873 gap.end = pos->first.begin; // We know this begin is < range.end
John Zulauf355e49b2020-04-24 15:11:15 -0600874 if (gap.non_empty()) {
John Zulauf69133422020-05-20 14:55:53 -0600875 // Recur on all gaps
John Zulauf16adfc92020-04-08 10:28:33 -0600876 hazard = DetectPreviousHazard(type, detector, gap);
John Zulauf5f13a792020-03-10 07:31:21 -0600877 if (hazard.hazard) return hazard;
878 }
John Zulauf69133422020-05-20 14:55:53 -0600879 // Set up for the next gap. If pos..end is >= range.end, loop will exit, and trailing gap will be empty
880 gap.begin = pos->first.end;
881 }
882
883 hazard = detector.Detect(pos);
884 if (hazard.hazard) return hazard;
John Zulauf3cafbf72021-03-26 16:55:19 -0600885 ++pos;
John Zulauf69133422020-05-20 14:55:53 -0600886 }
887
888 if (detect_prev) {
889 // Detect in the trailing empty as needed
890 gap.end = range.end;
891 if (gap.non_empty()) {
892 hazard = DetectPreviousHazard(type, detector, gap);
John Zulauf16adfc92020-04-08 10:28:33 -0600893 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600894 }
895
896 return hazard;
897}
898
899// A non recursive range walker for the asynchronous contexts (those we have no barriers with)
900template <typename Detector>
John Zulauf43cc7462020-12-03 12:33:12 -0700901HazardResult AccessContext::DetectAsyncHazard(AccessAddressType type, const Detector &detector,
902 const ResourceAccessRange &range) const {
John Zulauf16adfc92020-04-08 10:28:33 -0600903 auto &accesses = GetAccessStateMap(type);
John Zulauf3cafbf72021-03-26 16:55:19 -0600904 auto pos = accesses.lower_bound(range);
905 const auto the_end = accesses.end();
John Zulauf16adfc92020-04-08 10:28:33 -0600906
John Zulauf3d84f1b2020-03-09 13:33:25 -0600907 HazardResult hazard;
John Zulauf3cafbf72021-03-26 16:55:19 -0600908 while (pos != the_end && pos->first.begin < range.end) {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -0700909 hazard = detector.DetectAsync(pos, start_tag_);
John Zulauf3cafbf72021-03-26 16:55:19 -0600910 if (hazard.hazard) break;
911 ++pos;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600912 }
John Zulauf16adfc92020-04-08 10:28:33 -0600913
John Zulauf3d84f1b2020-03-09 13:33:25 -0600914 return hazard;
915}
916
John Zulaufb02c1eb2020-10-06 16:33:36 -0600917struct ApplySubpassTransitionBarriersAction {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -0700918 explicit ApplySubpassTransitionBarriersAction(const std::vector<SyncBarrier> &barriers_) : barriers(barriers_) {}
John Zulaufb02c1eb2020-10-06 16:33:36 -0600919 void operator()(ResourceAccessState *access) const {
920 assert(access);
921 access->ApplyBarriers(barriers, true);
922 }
923 const std::vector<SyncBarrier> &barriers;
924};
925
John Zulaufe0757ba2022-06-10 16:51:45 -0600926struct QueueTagOffsetBarrierAction {
927 QueueTagOffsetBarrierAction(QueueId qid, ResourceUsageTag offset) : queue_id(qid), tag_offset(offset) {}
928 void operator()(ResourceAccessState *access) const {
929 access->OffsetTag(tag_offset);
930 access->SetQueueId(queue_id);
931 };
932 QueueId queue_id;
933 ResourceUsageTag tag_offset;
934};
935
John Zulauf22aefed2021-03-11 18:14:35 -0700936struct ApplyTrackbackStackAction {
937 explicit ApplyTrackbackStackAction(const std::vector<SyncBarrier> &barriers_,
938 const ResourceAccessStateFunction *previous_barrier_ = nullptr)
939 : barriers(barriers_), previous_barrier(previous_barrier_) {}
John Zulaufb02c1eb2020-10-06 16:33:36 -0600940 void operator()(ResourceAccessState *access) const {
941 assert(access);
942 assert(!access->HasPendingState());
943 access->ApplyBarriers(barriers, false);
John Zulaufee984022022-04-13 16:39:50 -0600944 // NOTE: We can use invalid tag, as these barriers do no include layout transitions (would assert in SetWrite)
945 access->ApplyPendingBarriers(kInvalidTag);
John Zulauf22aefed2021-03-11 18:14:35 -0700946 if (previous_barrier) {
947 assert(bool(*previous_barrier));
948 (*previous_barrier)(access);
949 }
John Zulaufb02c1eb2020-10-06 16:33:36 -0600950 }
951 const std::vector<SyncBarrier> &barriers;
John Zulauf22aefed2021-03-11 18:14:35 -0700952 const ResourceAccessStateFunction *previous_barrier;
John Zulaufb02c1eb2020-10-06 16:33:36 -0600953};
954
955// Splits a single map entry into piece matching the entries in [first, last) the total range over [first, last) must be
956// contained with entry. Entry must be an iterator pointing to dest, first and last must be iterators pointing to a
957// *different* map from dest.
958// Returns the position past the last resolved range -- the entry covering the remainder of entry->first not included in the
959// range [first, last)
960template <typename BarrierAction>
John Zulauf355e49b2020-04-24 15:11:15 -0600961static void ResolveMapToEntry(ResourceAccessRangeMap *dest, ResourceAccessRangeMap::iterator entry,
962 ResourceAccessRangeMap::const_iterator first, ResourceAccessRangeMap::const_iterator last,
John Zulaufb02c1eb2020-10-06 16:33:36 -0600963 BarrierAction &barrier_action) {
John Zulauf355e49b2020-04-24 15:11:15 -0600964 auto at = entry;
965 for (auto pos = first; pos != last; ++pos) {
966 // Every member of the input iterator range must fit within the remaining portion of entry
967 assert(at->first.includes(pos->first));
968 assert(at != dest->end());
969 // Trim up at to the same size as the entry to resolve
970 at = sparse_container::split(at, *dest, pos->first);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600971 auto access = pos->second; // intentional copy
972 barrier_action(&access);
John Zulauf355e49b2020-04-24 15:11:15 -0600973 at->second.Resolve(access);
974 ++at; // Go to the remaining unused section of entry
975 }
976}
977
John Zulaufa0a98292020-09-18 09:30:10 -0600978static SyncBarrier MergeBarriers(const std::vector<SyncBarrier> &barriers) {
979 SyncBarrier merged = {};
980 for (const auto &barrier : barriers) {
981 merged.Merge(barrier);
982 }
983 return merged;
984}
985
John Zulaufb02c1eb2020-10-06 16:33:36 -0600986template <typename BarrierAction>
John Zulauf43cc7462020-12-03 12:33:12 -0700987void AccessContext::ResolveAccessRange(AccessAddressType type, const ResourceAccessRange &range, BarrierAction &barrier_action,
John Zulauf355e49b2020-04-24 15:11:15 -0600988 ResourceAccessRangeMap *resolve_map, const ResourceAccessState *infill_state,
989 bool recur_to_infill) const {
John Zulauf3bcab5e2020-06-19 14:42:32 -0600990 if (!range.non_empty()) return;
991
John Zulauf355e49b2020-04-24 15:11:15 -0600992 ResourceRangeMergeIterator current(*resolve_map, GetAccessStateMap(type), range.begin);
993 while (current->range.non_empty() && range.includes(current->range.begin)) {
John Zulauf3bcab5e2020-06-19 14:42:32 -0600994 const auto current_range = current->range & range;
John Zulauf16adfc92020-04-08 10:28:33 -0600995 if (current->pos_B->valid) {
996 const auto &src_pos = current->pos_B->lower_bound;
John Zulaufb02c1eb2020-10-06 16:33:36 -0600997 auto access = src_pos->second; // intentional copy
998 barrier_action(&access);
999
John Zulauf16adfc92020-04-08 10:28:33 -06001000 if (current->pos_A->valid) {
John Zulauf3bcab5e2020-06-19 14:42:32 -06001001 const auto trimmed = sparse_container::split(current->pos_A->lower_bound, *resolve_map, current_range);
1002 trimmed->second.Resolve(access);
1003 current.invalidate_A(trimmed);
John Zulauf5f13a792020-03-10 07:31:21 -06001004 } else {
John Zulauf3bcab5e2020-06-19 14:42:32 -06001005 auto inserted = resolve_map->insert(current->pos_A->lower_bound, std::make_pair(current_range, access));
John Zulauf355e49b2020-04-24 15:11:15 -06001006 current.invalidate_A(inserted); // Update the parallel iterator to point at the insert segment
John Zulauf5f13a792020-03-10 07:31:21 -06001007 }
John Zulauf16adfc92020-04-08 10:28:33 -06001008 } else {
1009 // we have to descend to fill this gap
1010 if (recur_to_infill) {
John Zulauf22aefed2021-03-11 18:14:35 -07001011 ResourceAccessRange recurrence_range = current_range;
1012 // The current context is empty for the current range, so recur to fill the gap.
1013 // Since we will be recurring back up the DAG, expand the gap descent to cover the full range for which B
1014 // is not valid, to minimize that recurrence
1015 if (current->pos_B.at_end()) {
1016 // Do the remainder here....
1017 recurrence_range.end = range.end;
John Zulauf355e49b2020-04-24 15:11:15 -06001018 } else {
John Zulauf22aefed2021-03-11 18:14:35 -07001019 // Recur only over the range until B becomes valid (within the limits of range).
1020 recurrence_range.end = std::min(range.end, current->pos_B->lower_bound->first.begin);
John Zulauf355e49b2020-04-24 15:11:15 -06001021 }
John Zulauf22aefed2021-03-11 18:14:35 -07001022 ResolvePreviousAccessStack(type, recurrence_range, resolve_map, infill_state, barrier_action);
1023
John Zulauf355e49b2020-04-24 15:11:15 -06001024 // Given that there could be gaps we need to seek carefully to not repeatedly search the same gaps in the next
1025 // iterator of the outer while.
1026
1027 // Set the parallel iterator to the end of this range s.t. ++ will move us to the next range whether or
1028 // not the end of the range is a gap. For the seek to work, first we need to warn the parallel iterator
1029 // we stepped on the dest map
John Zulauf22aefed2021-03-11 18:14:35 -07001030 const auto seek_to = recurrence_range.end - 1; // The subtraction is safe as range can't be empty (loop condition)
locke-lunarg88dbb542020-06-23 22:05:42 -06001031 current.invalidate_A(); // Changes current->range
John Zulauf355e49b2020-04-24 15:11:15 -06001032 current.seek(seek_to);
1033 } else if (!current->pos_A->valid && infill_state) {
1034 // If we didn't find anything in the current range, and we aren't reccuring... we infill if required
1035 auto inserted = resolve_map->insert(current->pos_A->lower_bound, std::make_pair(current->range, *infill_state));
1036 current.invalidate_A(inserted); // Update the parallel iterator to point at the correct segment after insert
John Zulauf16adfc92020-04-08 10:28:33 -06001037 }
John Zulauf5f13a792020-03-10 07:31:21 -06001038 }
ziga-lunargf0e27ad2022-03-28 00:44:12 +02001039 if (current->range.non_empty()) {
1040 ++current;
1041 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06001042 }
John Zulauf1a224292020-06-30 14:52:13 -06001043
1044 // Infill if range goes passed both the current and resolve map prior contents
1045 if (recur_to_infill && (current->range.end < range.end)) {
1046 ResourceAccessRange trailing_fill_range = {current->range.end, range.end};
John Zulauf22aefed2021-03-11 18:14:35 -07001047 ResolvePreviousAccessStack<BarrierAction>(type, trailing_fill_range, resolve_map, infill_state, barrier_action);
John Zulauf1a224292020-06-30 14:52:13 -06001048 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06001049}
1050
John Zulauf22aefed2021-03-11 18:14:35 -07001051template <typename BarrierAction>
1052void AccessContext::ResolvePreviousAccessStack(AccessAddressType type, const ResourceAccessRange &range,
1053 ResourceAccessRangeMap *descent_map, const ResourceAccessState *infill_state,
1054 const BarrierAction &previous_barrier) const {
1055 ResourceAccessStateFunction stacked_barrier(std::ref(previous_barrier));
1056 ResolvePreviousAccess(type, range, descent_map, infill_state, &stacked_barrier);
1057}
1058
John Zulauf43cc7462020-12-03 12:33:12 -07001059void AccessContext::ResolvePreviousAccess(AccessAddressType type, const ResourceAccessRange &range,
John Zulauf22aefed2021-03-11 18:14:35 -07001060 ResourceAccessRangeMap *descent_map, const ResourceAccessState *infill_state,
1061 const ResourceAccessStateFunction *previous_barrier) const {
1062 if (prev_.size() == 0) {
John Zulauf5f13a792020-03-10 07:31:21 -06001063 if (range.non_empty() && infill_state) {
John Zulauf22aefed2021-03-11 18:14:35 -07001064 // Fill the empty poritions of descent_map with the default_state with the barrier function applied (iff present)
1065 ResourceAccessState state_copy;
1066 if (previous_barrier) {
1067 assert(bool(*previous_barrier));
1068 state_copy = *infill_state;
1069 (*previous_barrier)(&state_copy);
1070 infill_state = &state_copy;
1071 }
1072 sparse_container::update_range_value(*descent_map, range, *infill_state,
1073 sparse_container::value_precedence::prefer_dest);
John Zulauf5f13a792020-03-10 07:31:21 -06001074 }
1075 } else {
1076 // Look for something to fill the gap further along.
1077 for (const auto &prev_dep : prev_) {
John Zulauf22aefed2021-03-11 18:14:35 -07001078 const ApplyTrackbackStackAction barrier_action(prev_dep.barriers, previous_barrier);
John Zulaufbb890452021-12-14 11:30:18 -07001079 prev_dep.source_subpass->ResolveAccessRange(type, range, barrier_action, descent_map, infill_state);
John Zulauf5f13a792020-03-10 07:31:21 -06001080 }
John Zulauf5f13a792020-03-10 07:31:21 -06001081 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06001082}
1083
John Zulauf4a6105a2020-11-17 15:11:05 -07001084// Non-lazy import of all accesses, WaitEvents needs this.
1085void AccessContext::ResolvePreviousAccesses() {
1086 ResourceAccessState default_state;
John Zulauf22aefed2021-03-11 18:14:35 -07001087 if (!prev_.size()) return; // If no previous contexts, nothing to do
1088
John Zulauf4a6105a2020-11-17 15:11:05 -07001089 for (const auto address_type : kAddressTypes) {
1090 ResolvePreviousAccess(address_type, kFullRange, &GetAccessStateMap(address_type), &default_state);
1091 }
1092}
1093
John Zulauf43cc7462020-12-03 12:33:12 -07001094AccessAddressType AccessContext::ImageAddressType(const IMAGE_STATE &image) {
1095 return (image.fragment_encoder->IsLinearImage()) ? AccessAddressType::kLinear : AccessAddressType::kIdealized;
John Zulauf16adfc92020-04-08 10:28:33 -06001096}
1097
John Zulauf1507ee42020-05-18 11:33:09 -06001098static SyncStageAccessIndex ColorLoadUsage(VkAttachmentLoadOp load_op) {
John Zulauf57261402021-08-13 11:32:06 -06001099 const auto stage_access = (load_op == VK_ATTACHMENT_LOAD_OP_NONE_EXT)
1100 ? SYNC_ACCESS_INDEX_NONE
1101 : ((load_op == VK_ATTACHMENT_LOAD_OP_LOAD) ? SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ
1102 : SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE);
John Zulauf1507ee42020-05-18 11:33:09 -06001103 return stage_access;
1104}
1105static SyncStageAccessIndex DepthStencilLoadUsage(VkAttachmentLoadOp load_op) {
John Zulauf57261402021-08-13 11:32:06 -06001106 const auto stage_access =
1107 (load_op == VK_ATTACHMENT_LOAD_OP_NONE_EXT)
1108 ? SYNC_ACCESS_INDEX_NONE
1109 : ((load_op == VK_ATTACHMENT_LOAD_OP_LOAD) ? SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_READ
1110 : SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE);
John Zulauf1507ee42020-05-18 11:33:09 -06001111 return stage_access;
1112}
1113
John Zulauf7635de32020-05-29 17:14:15 -06001114// Caller must manage returned pointer
1115static AccessContext *CreateStoreResolveProxyContext(const AccessContext &context, const RENDER_PASS_STATE &rp_state,
John Zulaufd0ec59f2021-03-13 14:25:08 -07001116 uint32_t subpass, const AttachmentViewGenVector &attachment_views) {
John Zulauf7635de32020-05-29 17:14:15 -06001117 auto *proxy = new AccessContext(context);
John Zulaufee984022022-04-13 16:39:50 -06001118 proxy->UpdateAttachmentResolveAccess(rp_state, attachment_views, subpass, kInvalidTag);
1119 proxy->UpdateAttachmentStoreAccess(rp_state, attachment_views, subpass, kInvalidTag);
John Zulauf7635de32020-05-29 17:14:15 -06001120 return proxy;
1121}
1122
John Zulaufb02c1eb2020-10-06 16:33:36 -06001123template <typename BarrierAction>
John Zulaufd0ec59f2021-03-13 14:25:08 -07001124void AccessContext::ResolveAccessRange(const AttachmentViewGen &view_gen, AttachmentViewGen::Gen gen_type,
1125 BarrierAction &barrier_action, ResourceAccessRangeMap *descent_map,
1126 const ResourceAccessState *infill_state) const {
1127 const auto *attachment_gen = view_gen.GetRangeGen(gen_type);
1128 if (!attachment_gen) return;
1129
1130 subresource_adapter::ImageRangeGenerator range_gen(*attachment_gen);
1131 const AccessAddressType address_type = view_gen.GetAddressType();
1132 for (; range_gen->non_empty(); ++range_gen) {
1133 ResolveAccessRange(address_type, *range_gen, barrier_action, descent_map, infill_state);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001134 }
John Zulauf62f10592020-04-03 12:20:02 -06001135}
1136
John Zulauf1d5f9c12022-05-13 14:51:08 -06001137template <typename ResolveOp>
1138void AccessContext::ResolveFromContext(ResolveOp &&resolve_op, const AccessContext &from_context,
1139 const ResourceAccessState *infill_state, bool recur_to_infill) {
1140 for (auto address_type : kAddressTypes) {
1141 from_context.ResolveAccessRange(address_type, kFullRange, resolve_op, &GetAccessStateMap(address_type), infill_state,
1142 recur_to_infill);
1143 }
1144}
1145
John Zulauf7635de32020-05-29 17:14:15 -06001146// Layout transitions are handled as if the were occuring in the beginning of the next subpass
John Zulaufbb890452021-12-14 11:30:18 -07001147bool AccessContext::ValidateLayoutTransitions(const CommandExecutionContext &exec_context, const RENDER_PASS_STATE &rp_state,
John Zulauf7635de32020-05-29 17:14:15 -06001148 const VkRect2D &render_area, uint32_t subpass,
sjfricke0bea06e2022-06-05 09:22:26 +09001149 const AttachmentViewGenVector &attachment_views, CMD_TYPE cmd_type) const {
John Zulauf355e49b2020-04-24 15:11:15 -06001150 bool skip = false;
John Zulauf7635de32020-05-29 17:14:15 -06001151 // As validation methods are const and precede the record/update phase, for any tranistions from the immediately
1152 // previous subpass, we have to validate them against a copy of the AccessContext, with resolve operations applied, as
1153 // those affects have not been recorded yet.
1154 //
1155 // Note: we could be more efficient by tracking whether or not we actually *have* any changes (e.g. attachment resolve)
1156 // to apply and only copy then, if this proves a hot spot.
1157 std::unique_ptr<AccessContext> proxy_for_prev;
1158 TrackBack proxy_track_back;
1159
John Zulauf355e49b2020-04-24 15:11:15 -06001160 const auto &transitions = rp_state.subpass_transitions[subpass];
1161 for (const auto &transition : transitions) {
John Zulauf7635de32020-05-29 17:14:15 -06001162 const bool prev_needs_proxy = transition.prev_pass != VK_SUBPASS_EXTERNAL && (transition.prev_pass + 1 == subpass);
1163
1164 const auto *track_back = GetTrackBackFromSubpass(transition.prev_pass);
John Zulauf22aefed2021-03-11 18:14:35 -07001165 assert(track_back);
John Zulauf7635de32020-05-29 17:14:15 -06001166 if (prev_needs_proxy) {
1167 if (!proxy_for_prev) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001168 proxy_for_prev.reset(
John Zulaufbb890452021-12-14 11:30:18 -07001169 CreateStoreResolveProxyContext(*track_back->source_subpass, rp_state, transition.prev_pass, attachment_views));
John Zulauf7635de32020-05-29 17:14:15 -06001170 proxy_track_back = *track_back;
John Zulaufbb890452021-12-14 11:30:18 -07001171 proxy_track_back.source_subpass = proxy_for_prev.get();
John Zulauf7635de32020-05-29 17:14:15 -06001172 }
1173 track_back = &proxy_track_back;
1174 }
1175 auto hazard = DetectSubpassTransitionHazard(*track_back, attachment_views[transition.attachment]);
John Zulauf355e49b2020-04-24 15:11:15 -06001176 if (hazard.hazard) {
sjfricke0bea06e2022-06-05 09:22:26 +09001177 const char *func_name = CommandTypeString(cmd_type);
John Zulaufee984022022-04-13 16:39:50 -06001178 if (hazard.tag == kInvalidTag) {
John Zulaufbb890452021-12-14 11:30:18 -07001179 skip |= exec_context.GetSyncState().LogError(
John Zulaufee984022022-04-13 16:39:50 -06001180 rp_state.renderPass(), string_SyncHazardVUID(hazard.hazard),
1181 "%s: Hazard %s in subpass %" PRIu32 " for attachment %" PRIu32
1182 " image layout transition (old_layout: %s, new_layout: %s) after store/resolve operation in subpass %" PRIu32,
1183 func_name, string_SyncHazard(hazard.hazard), subpass, transition.attachment,
1184 string_VkImageLayout(transition.old_layout), string_VkImageLayout(transition.new_layout), transition.prev_pass);
1185 } else {
John Zulaufbb890452021-12-14 11:30:18 -07001186 skip |= exec_context.GetSyncState().LogError(
John Zulaufee984022022-04-13 16:39:50 -06001187 rp_state.renderPass(), string_SyncHazardVUID(hazard.hazard),
1188 "%s: Hazard %s in subpass %" PRIu32 " for attachment %" PRIu32
1189 " image layout transition (old_layout: %s, new_layout: %s). Access info %s.",
1190 func_name, string_SyncHazard(hazard.hazard), subpass, transition.attachment,
1191 string_VkImageLayout(transition.old_layout), string_VkImageLayout(transition.new_layout),
John Zulauf397e68b2022-04-19 11:44:07 -06001192 exec_context.FormatHazard(hazard).c_str());
John Zulaufee984022022-04-13 16:39:50 -06001193 }
John Zulauf355e49b2020-04-24 15:11:15 -06001194 }
1195 }
1196 return skip;
1197}
1198
John Zulaufbb890452021-12-14 11:30:18 -07001199bool AccessContext::ValidateLoadOperation(const CommandExecutionContext &exec_context, const RENDER_PASS_STATE &rp_state,
John Zulauf7635de32020-05-29 17:14:15 -06001200 const VkRect2D &render_area, uint32_t subpass,
sjfricke0bea06e2022-06-05 09:22:26 +09001201 const AttachmentViewGenVector &attachment_views, CMD_TYPE cmd_type) const {
John Zulauf1507ee42020-05-18 11:33:09 -06001202 bool skip = false;
1203 const auto *attachment_ci = rp_state.createInfo.pAttachments;
John Zulaufa0a98292020-09-18 09:30:10 -06001204
John Zulauf1507ee42020-05-18 11:33:09 -06001205 for (uint32_t i = 0; i < rp_state.createInfo.attachmentCount; i++) {
1206 if (subpass == rp_state.attachment_first_subpass[i]) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001207 const auto &view_gen = attachment_views[i];
1208 if (!view_gen.IsValid()) continue;
John Zulauf1507ee42020-05-18 11:33:09 -06001209 const auto &ci = attachment_ci[i];
John Zulauf1507ee42020-05-18 11:33:09 -06001210
1211 // Need check in the following way
1212 // 1) if the usage bit isn't in the dest_access_scope, and there is layout traniition for initial use, report hazard
1213 // vs. transition
1214 // 2) if there isn't a layout transition, we need to look at the external context with a "detect hazard" operation
1215 // for each aspect loaded.
1216
1217 const bool has_depth = FormatHasDepth(ci.format);
John Zulaufb027cdb2020-05-21 14:25:22 -06001218 const bool has_stencil = FormatHasStencil(ci.format);
John Zulauf1507ee42020-05-18 11:33:09 -06001219 const bool is_color = !(has_depth || has_stencil);
1220
1221 const SyncStageAccessIndex load_index = has_depth ? DepthStencilLoadUsage(ci.loadOp) : ColorLoadUsage(ci.loadOp);
John Zulauf1507ee42020-05-18 11:33:09 -06001222 const SyncStageAccessIndex stencil_load_index = has_stencil ? DepthStencilLoadUsage(ci.stencilLoadOp) : load_index;
John Zulauf1507ee42020-05-18 11:33:09 -06001223
John Zulaufaff20662020-06-01 14:07:58 -06001224 HazardResult hazard;
John Zulauf1507ee42020-05-18 11:33:09 -06001225 const char *aspect = nullptr;
John Zulauf1507ee42020-05-18 11:33:09 -06001226
John Zulaufb02c1eb2020-10-06 16:33:36 -06001227 bool checked_stencil = false;
John Zulauf57261402021-08-13 11:32:06 -06001228 if (is_color && (load_index != SYNC_ACCESS_INDEX_NONE)) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001229 hazard = DetectHazard(view_gen, AttachmentViewGen::Gen::kRenderArea, load_index, SyncOrdering::kColorAttachment);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001230 aspect = "color";
1231 } else {
John Zulauf57261402021-08-13 11:32:06 -06001232 if (has_depth && (load_index != SYNC_ACCESS_INDEX_NONE)) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001233 hazard = DetectHazard(view_gen, AttachmentViewGen::Gen::kDepthOnlyRenderArea, load_index,
1234 SyncOrdering::kDepthStencilAttachment);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001235 aspect = "depth";
1236 }
John Zulauf57261402021-08-13 11:32:06 -06001237 if (!hazard.hazard && has_stencil && (stencil_load_index != SYNC_ACCESS_INDEX_NONE)) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001238 hazard = DetectHazard(view_gen, AttachmentViewGen::Gen::kStencilOnlyRenderArea, stencil_load_index,
1239 SyncOrdering::kDepthStencilAttachment);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001240 aspect = "stencil";
1241 checked_stencil = true;
1242 }
1243 }
1244
1245 if (hazard.hazard) {
sjfricke0bea06e2022-06-05 09:22:26 +09001246 const char *func_name = CommandTypeString(cmd_type);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001247 auto load_op_string = string_VkAttachmentLoadOp(checked_stencil ? ci.stencilLoadOp : ci.loadOp);
John Zulaufbb890452021-12-14 11:30:18 -07001248 const auto &sync_state = exec_context.GetSyncState();
John Zulaufee984022022-04-13 16:39:50 -06001249 if (hazard.tag == kInvalidTag) {
John Zulaufb02c1eb2020-10-06 16:33:36 -06001250 // Hazard vs. ILT
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06001251 skip |= sync_state.LogError(rp_state.renderPass(), string_SyncHazardVUID(hazard.hazard),
John Zulaufb02c1eb2020-10-06 16:33:36 -06001252 "%s: Hazard %s vs. layout transition in subpass %" PRIu32 " for attachment %" PRIu32
1253 " aspect %s during load with loadOp %s.",
1254 func_name, string_SyncHazard(hazard.hazard), subpass, i, aspect, load_op_string);
1255 } else {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06001256 skip |= sync_state.LogError(rp_state.renderPass(), string_SyncHazardVUID(hazard.hazard),
John Zulauf1507ee42020-05-18 11:33:09 -06001257 "%s: Hazard %s in subpass %" PRIu32 " for attachment %" PRIu32
John Zulauf59e25072020-07-17 10:55:21 -06001258 " aspect %s during load with loadOp %s. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06001259 func_name, string_SyncHazard(hazard.hazard), subpass, i, aspect, load_op_string,
John Zulauf397e68b2022-04-19 11:44:07 -06001260 exec_context.FormatHazard(hazard).c_str());
John Zulauf1507ee42020-05-18 11:33:09 -06001261 }
1262 }
1263 }
1264 }
1265 return skip;
1266}
1267
John Zulaufaff20662020-06-01 14:07:58 -06001268// Store operation validation can ignore resolve (before it) and layout tranistions after it. The first is ignored
1269// because of the ordering guarantees w.r.t. sample access and that the resolve validation hasn't altered the state, because
1270// store is part of the same Next/End operation.
1271// The latter is handled in layout transistion validation directly
John Zulaufbb890452021-12-14 11:30:18 -07001272bool AccessContext::ValidateStoreOperation(const CommandExecutionContext &exec_context, const RENDER_PASS_STATE &rp_state,
John Zulaufaff20662020-06-01 14:07:58 -06001273 const VkRect2D &render_area, uint32_t subpass,
sjfricke0bea06e2022-06-05 09:22:26 +09001274 const AttachmentViewGenVector &attachment_views, CMD_TYPE cmd_type) const {
John Zulaufaff20662020-06-01 14:07:58 -06001275 bool skip = false;
1276 const auto *attachment_ci = rp_state.createInfo.pAttachments;
John Zulaufaff20662020-06-01 14:07:58 -06001277
1278 for (uint32_t i = 0; i < rp_state.createInfo.attachmentCount; i++) {
1279 if (subpass == rp_state.attachment_last_subpass[i]) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001280 const AttachmentViewGen &view_gen = attachment_views[i];
1281 if (!view_gen.IsValid()) continue;
John Zulaufaff20662020-06-01 14:07:58 -06001282 const auto &ci = attachment_ci[i];
1283
1284 // The spec states that "don't care" is an operation with VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
1285 // so we assume that an implementation is *free* to write in that case, meaning that for correctness
1286 // sake, we treat DONT_CARE as writing.
1287 const bool has_depth = FormatHasDepth(ci.format);
1288 const bool has_stencil = FormatHasStencil(ci.format);
1289 const bool is_color = !(has_depth || has_stencil);
John Zulauf57261402021-08-13 11:32:06 -06001290 const bool store_op_stores = ci.storeOp != VK_ATTACHMENT_STORE_OP_NONE_EXT;
John Zulaufaff20662020-06-01 14:07:58 -06001291 if (!has_stencil && !store_op_stores) continue;
1292
1293 HazardResult hazard;
1294 const char *aspect = nullptr;
1295 bool checked_stencil = false;
1296 if (is_color) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001297 hazard = DetectHazard(view_gen, AttachmentViewGen::Gen::kRenderArea,
1298 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kRaster);
John Zulaufaff20662020-06-01 14:07:58 -06001299 aspect = "color";
1300 } else {
John Zulauf57261402021-08-13 11:32:06 -06001301 const bool stencil_op_stores = ci.stencilStoreOp != VK_ATTACHMENT_STORE_OP_NONE_EXT;
John Zulaufaff20662020-06-01 14:07:58 -06001302 if (has_depth && store_op_stores) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001303 hazard = DetectHazard(view_gen, AttachmentViewGen::Gen::kDepthOnlyRenderArea,
1304 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, SyncOrdering::kRaster);
John Zulaufaff20662020-06-01 14:07:58 -06001305 aspect = "depth";
1306 }
1307 if (!hazard.hazard && has_stencil && stencil_op_stores) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001308 hazard = DetectHazard(view_gen, AttachmentViewGen::Gen::kStencilOnlyRenderArea,
1309 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, SyncOrdering::kRaster);
John Zulaufaff20662020-06-01 14:07:58 -06001310 aspect = "stencil";
1311 checked_stencil = true;
1312 }
1313 }
1314
1315 if (hazard.hazard) {
1316 const char *const op_type_string = checked_stencil ? "stencilStoreOp" : "storeOp";
1317 const char *const store_op_string = string_VkAttachmentStoreOp(checked_stencil ? ci.stencilStoreOp : ci.storeOp);
John Zulauf397e68b2022-04-19 11:44:07 -06001318 skip |= exec_context.GetSyncState().LogError(rp_state.renderPass(), string_SyncHazardVUID(hazard.hazard),
1319 "%s: Hazard %s in subpass %" PRIu32 " for attachment %" PRIu32
1320 " %s aspect during store with %s %s. Access info %s",
sjfricke0bea06e2022-06-05 09:22:26 +09001321 CommandTypeString(cmd_type), string_SyncHazard(hazard.hazard), subpass,
1322 i, aspect, op_type_string, store_op_string,
John Zulauf397e68b2022-04-19 11:44:07 -06001323 exec_context.FormatHazard(hazard).c_str());
John Zulaufaff20662020-06-01 14:07:58 -06001324 }
1325 }
1326 }
1327 return skip;
1328}
1329
John Zulaufbb890452021-12-14 11:30:18 -07001330bool AccessContext::ValidateResolveOperations(const CommandExecutionContext &exec_context, const RENDER_PASS_STATE &rp_state,
John Zulaufd0ec59f2021-03-13 14:25:08 -07001331 const VkRect2D &render_area, const AttachmentViewGenVector &attachment_views,
sjfricke0bea06e2022-06-05 09:22:26 +09001332 CMD_TYPE cmd_type, uint32_t subpass) const {
1333 ValidateResolveAction validate_action(rp_state.renderPass(), subpass, *this, exec_context, cmd_type);
John Zulaufd0ec59f2021-03-13 14:25:08 -07001334 ResolveOperation(validate_action, rp_state, attachment_views, subpass);
John Zulauf7635de32020-05-29 17:14:15 -06001335 return validate_action.GetSkip();
John Zulaufb027cdb2020-05-21 14:25:22 -06001336}
1337
John Zulauf06f6f1e2022-04-19 15:28:11 -06001338void AccessContext::AddAsyncContext(const AccessContext *context) { async_.emplace_back(context); }
1339
John Zulauf3d84f1b2020-03-09 13:33:25 -06001340class HazardDetector {
1341 SyncStageAccessIndex usage_index_;
1342
1343 public:
John Zulauf5f13a792020-03-10 07:31:21 -06001344 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const { return pos->second.DetectHazard(usage_index_); }
John Zulauf14940722021-04-12 15:19:02 -06001345 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, ResourceUsageTag start_tag) const {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07001346 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001347 }
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001348 explicit HazardDetector(SyncStageAccessIndex usage) : usage_index_(usage) {}
John Zulauf3d84f1b2020-03-09 13:33:25 -06001349};
1350
John Zulauf69133422020-05-20 14:55:53 -06001351class HazardDetectorWithOrdering {
1352 const SyncStageAccessIndex usage_index_;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001353 const SyncOrdering ordering_rule_;
John Zulauf69133422020-05-20 14:55:53 -06001354
1355 public:
1356 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
John Zulaufec943ec2022-06-29 07:52:56 -06001357 return pos->second.DetectHazard(usage_index_, ordering_rule_, QueueSyncState::kQueueIdInvalid);
John Zulauf69133422020-05-20 14:55:53 -06001358 }
John Zulauf14940722021-04-12 15:19:02 -06001359 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, ResourceUsageTag start_tag) const {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07001360 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
John Zulauf69133422020-05-20 14:55:53 -06001361 }
John Zulauf8e3c3e92021-01-06 11:19:36 -07001362 HazardDetectorWithOrdering(SyncStageAccessIndex usage, SyncOrdering ordering) : usage_index_(usage), ordering_rule_(ordering) {}
John Zulauf69133422020-05-20 14:55:53 -06001363};
1364
John Zulauf16adfc92020-04-08 10:28:33 -06001365HazardResult AccessContext::DetectHazard(const BUFFER_STATE &buffer, SyncStageAccessIndex usage_index,
John Zulauf355e49b2020-04-24 15:11:15 -06001366 const ResourceAccessRange &range) const {
John Zulauf16adfc92020-04-08 10:28:33 -06001367 if (!SimpleBinding(buffer)) return HazardResult();
John Zulauf150e5332020-12-03 08:52:52 -07001368 const auto base_address = ResourceBaseAddress(buffer);
1369 HazardDetector detector(usage_index);
1370 return DetectHazard(AccessAddressType::kLinear, detector, (range + base_address), DetectOptions::kDetectAll);
John Zulaufe5da6e52020-03-18 15:32:18 -06001371}
1372
John Zulauf69133422020-05-20 14:55:53 -06001373template <typename Detector>
John Zulaufd0ec59f2021-03-13 14:25:08 -07001374HazardResult AccessContext::DetectHazard(Detector &detector, const AttachmentViewGen &view_gen, AttachmentViewGen::Gen gen_type,
1375 DetectOptions options) const {
1376 const auto *attachment_gen = view_gen.GetRangeGen(gen_type);
1377 if (!attachment_gen) return HazardResult();
1378
1379 subresource_adapter::ImageRangeGenerator range_gen(*attachment_gen);
1380 const auto address_type = view_gen.GetAddressType();
1381 for (; range_gen->non_empty(); ++range_gen) {
1382 HazardResult hazard = DetectHazard(address_type, detector, *range_gen, options);
1383 if (hazard.hazard) return hazard;
1384 }
1385
1386 return HazardResult();
1387}
1388
1389template <typename Detector>
John Zulauf69133422020-05-20 14:55:53 -06001390HazardResult AccessContext::DetectHazard(Detector &detector, const IMAGE_STATE &image,
1391 const VkImageSubresourceRange &subresource_range, const VkOffset3D &offset,
Aitor Camachoe67f2c72022-06-08 14:41:58 +02001392 const VkExtent3D &extent, bool is_depth_sliced, DetectOptions options) const {
John Zulauf69133422020-05-20 14:55:53 -06001393 if (!SimpleBinding(image)) return HazardResult();
John Zulauf69133422020-05-20 14:55:53 -06001394 const auto base_address = ResourceBaseAddress(image);
John Zulauf150e5332020-12-03 08:52:52 -07001395 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, offset, extent,
Aitor Camachoe67f2c72022-06-08 14:41:58 +02001396 base_address, is_depth_sliced);
John Zulauf150e5332020-12-03 08:52:52 -07001397 const auto address_type = ImageAddressType(image);
John Zulauf69133422020-05-20 14:55:53 -06001398 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf150e5332020-12-03 08:52:52 -07001399 HazardResult hazard = DetectHazard(address_type, detector, *range_gen, options);
John Zulauf69133422020-05-20 14:55:53 -06001400 if (hazard.hazard) return hazard;
1401 }
1402 return HazardResult();
1403}
John Zulauf110413c2021-03-20 05:38:38 -06001404template <typename Detector>
1405HazardResult AccessContext::DetectHazard(Detector &detector, const IMAGE_STATE &image,
Aitor Camachoe67f2c72022-06-08 14:41:58 +02001406 const VkImageSubresourceRange &subresource_range, bool is_depth_sliced,
1407 DetectOptions options) const {
John Zulauf110413c2021-03-20 05:38:38 -06001408 if (!SimpleBinding(image)) return HazardResult();
1409 const auto base_address = ResourceBaseAddress(image);
Aitor Camachoe67f2c72022-06-08 14:41:58 +02001410 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, base_address,
1411 is_depth_sliced);
John Zulauf110413c2021-03-20 05:38:38 -06001412 const auto address_type = ImageAddressType(image);
1413 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf110413c2021-03-20 05:38:38 -06001414 HazardResult hazard = DetectHazard(address_type, detector, *range_gen, options);
1415 if (hazard.hazard) return hazard;
1416 }
1417 return HazardResult();
1418}
John Zulauf69133422020-05-20 14:55:53 -06001419
John Zulauf540266b2020-04-06 18:54:53 -06001420HazardResult AccessContext::DetectHazard(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
1421 const VkImageSubresourceLayers &subresource, const VkOffset3D &offset,
Aitor Camachoe67f2c72022-06-08 14:41:58 +02001422 const VkExtent3D &extent, bool is_depth_sliced) const {
John Zulauf5c5e88d2019-12-26 11:22:02 -07001423 VkImageSubresourceRange subresource_range = {subresource.aspectMask, subresource.mipLevel, 1, subresource.baseArrayLayer,
1424 subresource.layerCount};
John Zulauf110413c2021-03-20 05:38:38 -06001425 HazardDetector detector(current_usage);
Aitor Camachoe67f2c72022-06-08 14:41:58 +02001426 return DetectHazard(detector, image, subresource_range, offset, extent, is_depth_sliced, DetectOptions::kDetectAll);
John Zulauf1507ee42020-05-18 11:33:09 -06001427}
1428
1429HazardResult AccessContext::DetectHazard(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
Aitor Camachoe67f2c72022-06-08 14:41:58 +02001430 const VkImageSubresourceRange &subresource_range, bool is_depth_sliced) const {
John Zulauf69133422020-05-20 14:55:53 -06001431 HazardDetector detector(current_usage);
Aitor Camachoe67f2c72022-06-08 14:41:58 +02001432 return DetectHazard(detector, image, subresource_range, is_depth_sliced, DetectOptions::kDetectAll);
John Zulauf69133422020-05-20 14:55:53 -06001433}
1434
John Zulaufd0ec59f2021-03-13 14:25:08 -07001435HazardResult AccessContext::DetectHazard(const AttachmentViewGen &view_gen, AttachmentViewGen::Gen gen_type,
1436 SyncStageAccessIndex current_usage, SyncOrdering ordering_rule) const {
1437 HazardDetectorWithOrdering detector(current_usage, ordering_rule);
1438 return DetectHazard(detector, view_gen, gen_type, DetectOptions::kDetectAll);
1439}
1440
John Zulauf69133422020-05-20 14:55:53 -06001441HazardResult AccessContext::DetectHazard(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
John Zulauf8e3c3e92021-01-06 11:19:36 -07001442 const VkImageSubresourceRange &subresource_range, SyncOrdering ordering_rule,
Aitor Camachoe67f2c72022-06-08 14:41:58 +02001443 const VkOffset3D &offset, const VkExtent3D &extent, bool is_depth_sliced) const {
John Zulauf8e3c3e92021-01-06 11:19:36 -07001444 HazardDetectorWithOrdering detector(current_usage, ordering_rule);
Aitor Camachoe67f2c72022-06-08 14:41:58 +02001445 return DetectHazard(detector, image, subresource_range, offset, extent, is_depth_sliced, DetectOptions::kDetectAll);
John Zulauf9cb530d2019-09-30 14:14:10 -06001446}
1447
John Zulauf3d84f1b2020-03-09 13:33:25 -06001448class BarrierHazardDetector {
1449 public:
Jeremy Gebben40a22942020-12-22 14:22:06 -07001450 BarrierHazardDetector(SyncStageAccessIndex usage_index, VkPipelineStageFlags2KHR src_exec_scope,
John Zulauf3d84f1b2020-03-09 13:33:25 -06001451 SyncStageAccessFlags src_access_scope)
1452 : usage_index_(usage_index), src_exec_scope_(src_exec_scope), src_access_scope_(src_access_scope) {}
1453
John Zulauf5f13a792020-03-10 07:31:21 -06001454 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
John Zulaufec943ec2022-06-29 07:52:56 -06001455 return pos->second.DetectBarrierHazard(usage_index_, QueueSyncState::kQueueIdInvalid, src_exec_scope_, src_access_scope_);
John Zulauf0cb5be22020-01-23 12:18:22 -07001456 }
John Zulauf14940722021-04-12 15:19:02 -06001457 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, ResourceUsageTag start_tag) const {
John Zulauf3d84f1b2020-03-09 13:33:25 -06001458 // Async barrier hazard detection can use the same path as the usage index is not IsRead, but is IsWrite
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07001459 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001460 }
1461
1462 private:
1463 SyncStageAccessIndex usage_index_;
Jeremy Gebben40a22942020-12-22 14:22:06 -07001464 VkPipelineStageFlags2KHR src_exec_scope_;
John Zulauf3d84f1b2020-03-09 13:33:25 -06001465 SyncStageAccessFlags src_access_scope_;
1466};
1467
John Zulauf4a6105a2020-11-17 15:11:05 -07001468class EventBarrierHazardDetector {
1469 public:
Jeremy Gebben40a22942020-12-22 14:22:06 -07001470 EventBarrierHazardDetector(SyncStageAccessIndex usage_index, VkPipelineStageFlags2KHR src_exec_scope,
John Zulaufe0757ba2022-06-10 16:51:45 -06001471 SyncStageAccessFlags src_access_scope, const SyncEventState::ScopeMap &event_scope, QueueId queue_id,
John Zulauf14940722021-04-12 15:19:02 -06001472 ResourceUsageTag scope_tag)
John Zulauf4a6105a2020-11-17 15:11:05 -07001473 : usage_index_(usage_index),
1474 src_exec_scope_(src_exec_scope),
1475 src_access_scope_(src_access_scope),
1476 event_scope_(event_scope),
John Zulaufe0757ba2022-06-10 16:51:45 -06001477 scope_queue_id_(queue_id),
1478 scope_tag_(scope_tag),
John Zulauf4a6105a2020-11-17 15:11:05 -07001479 scope_pos_(event_scope.cbegin()),
John Zulaufe0757ba2022-06-10 16:51:45 -06001480 scope_end_(event_scope.cend()) {}
John Zulauf4a6105a2020-11-17 15:11:05 -07001481
John Zulaufe0757ba2022-06-10 16:51:45 -06001482 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) {
1483 // Need to piece together coverage of pos->first range:
1484 // Copy the range as we'll be chopping it up as needed
1485 ResourceAccessRange range = pos->first;
1486 const ResourceAccessState &access = pos->second;
1487 HazardResult hazard;
1488
1489 bool in_scope = AdvanceScope(range);
1490 bool unscoped_tested = false;
1491 while (in_scope && !hazard.IsHazard()) {
1492 if (range.begin < ScopeBegin()) {
1493 if (!unscoped_tested) {
1494 unscoped_tested = true;
1495 hazard = access.DetectHazard(usage_index_);
1496 }
1497 // Note: don't need to check for in_scope as AdvanceScope true means range and ScopeRange intersect.
1498 // Thus a [ ScopeBegin, range.end ) will be non-empty.
1499 range.begin = ScopeBegin();
1500 } else { // in_scope implied that ScopeRange and range intersect
1501 hazard = access.DetectBarrierHazard(usage_index_, ScopeState(), src_exec_scope_, src_access_scope_, scope_queue_id_,
1502 scope_tag_);
1503 if (!hazard.IsHazard()) {
1504 range.begin = ScopeEnd();
1505 in_scope = AdvanceScope(range); // contains a non_empty check
1506 }
1507 }
John Zulauf4a6105a2020-11-17 15:11:05 -07001508 }
John Zulaufe0757ba2022-06-10 16:51:45 -06001509 if (range.non_empty() && !hazard.IsHazard() && !unscoped_tested) {
1510 hazard = access.DetectHazard(usage_index_);
1511 }
1512 return hazard;
John Zulauf4a6105a2020-11-17 15:11:05 -07001513 }
John Zulaufe0757ba2022-06-10 16:51:45 -06001514
John Zulauf14940722021-04-12 15:19:02 -06001515 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, ResourceUsageTag start_tag) const {
John Zulauf4a6105a2020-11-17 15:11:05 -07001516 // Async barrier hazard detection can use the same path as the usage index is not IsRead, but is IsWrite
1517 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
1518 }
1519
1520 private:
John Zulaufe0757ba2022-06-10 16:51:45 -06001521 bool ScopeInvalid() const { return scope_pos_ == scope_end_; }
1522 bool ScopeValid() const { return !ScopeInvalid(); }
1523 void ScopeSeek(const ResourceAccessRange &range) { scope_pos_ = event_scope_.lower_bound(range); }
1524
1525 // Hiding away the std::pair grunge...
1526 ResourceAddress ScopeBegin() const { return scope_pos_->first.begin; }
1527 ResourceAddress ScopeEnd() const { return scope_pos_->first.end; }
1528 const ResourceAccessRange &ScopeRange() const { return scope_pos_->first; }
1529 const ResourceAccessState &ScopeState() const { return scope_pos_->second; }
1530
1531 bool AdvanceScope(const ResourceAccessRange &range) {
1532 // Note: non_empty is (valid && !empty), so don't change !non_empty to empty...
1533 if (!range.non_empty()) return false;
1534 if (ScopeInvalid()) return false;
1535
1536 if (ScopeRange().strictly_less(range)) {
1537 ScopeSeek(range);
1538 }
1539
1540 return ScopeValid() && ScopeRange().intersects(range);
1541 }
1542
John Zulauf4a6105a2020-11-17 15:11:05 -07001543 SyncStageAccessIndex usage_index_;
Jeremy Gebben40a22942020-12-22 14:22:06 -07001544 VkPipelineStageFlags2KHR src_exec_scope_;
John Zulauf4a6105a2020-11-17 15:11:05 -07001545 SyncStageAccessFlags src_access_scope_;
1546 const SyncEventState::ScopeMap &event_scope_;
John Zulaufe0757ba2022-06-10 16:51:45 -06001547 QueueId scope_queue_id_;
1548 const ResourceUsageTag scope_tag_;
John Zulauf4a6105a2020-11-17 15:11:05 -07001549 SyncEventState::ScopeMap::const_iterator scope_pos_;
1550 SyncEventState::ScopeMap::const_iterator scope_end_;
John Zulauf4a6105a2020-11-17 15:11:05 -07001551};
1552
John Zulaufe0757ba2022-06-10 16:51:45 -06001553HazardResult AccessContext::DetectImageBarrierHazard(const IMAGE_STATE &image, const VkImageSubresourceRange &subresource_range,
1554 VkPipelineStageFlags2KHR src_exec_scope,
1555 const SyncStageAccessFlags &src_access_scope, QueueId queue_id,
1556 const SyncEventState &sync_event, AccessContext::DetectOptions options) const {
John Zulauf4a6105a2020-11-17 15:11:05 -07001557 // It's not particularly DRY to get the address type in this function as well as lower down, but we have to select the
1558 // first access scope map to use, and there's no easy way to plumb it in below.
1559 const auto address_type = ImageAddressType(image);
1560 const auto &event_scope = sync_event.FirstScope(address_type);
1561
1562 EventBarrierHazardDetector detector(SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION, src_exec_scope, src_access_scope,
John Zulaufe0757ba2022-06-10 16:51:45 -06001563 event_scope, queue_id, sync_event.first_scope_tag);
Aitor Camachoe67f2c72022-06-08 14:41:58 +02001564 return DetectHazard(detector, image, subresource_range, false, options);
John Zulauf4a6105a2020-11-17 15:11:05 -07001565}
1566
John Zulaufd0ec59f2021-03-13 14:25:08 -07001567HazardResult AccessContext::DetectImageBarrierHazard(const AttachmentViewGen &view_gen, const SyncBarrier &barrier,
1568 DetectOptions options) const {
1569 BarrierHazardDetector detector(SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION, barrier.src_exec_scope.exec_scope,
1570 barrier.src_access_scope);
1571 return DetectHazard(detector, view_gen, AttachmentViewGen::Gen::kViewSubresource, options);
1572}
1573
Jeremy Gebben40a22942020-12-22 14:22:06 -07001574HazardResult AccessContext::DetectImageBarrierHazard(const IMAGE_STATE &image, VkPipelineStageFlags2KHR src_exec_scope,
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07001575 const SyncStageAccessFlags &src_access_scope,
John Zulauf355e49b2020-04-24 15:11:15 -06001576 const VkImageSubresourceRange &subresource_range,
John Zulauf43cc7462020-12-03 12:33:12 -07001577 const DetectOptions options) const {
John Zulauf69133422020-05-20 14:55:53 -06001578 BarrierHazardDetector detector(SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION, src_exec_scope, src_access_scope);
Aitor Camachoe67f2c72022-06-08 14:41:58 +02001579 return DetectHazard(detector, image, subresource_range, false, options);
John Zulauf0cb5be22020-01-23 12:18:22 -07001580}
1581
John Zulaufe7f6a5e2021-01-16 14:31:18 -07001582HazardResult AccessContext::DetectImageBarrierHazard(const SyncImageMemoryBarrier &image_barrier) const {
John Zulaufc523bf62021-02-16 08:20:34 -07001583 return DetectImageBarrierHazard(*image_barrier.image.get(), image_barrier.barrier.src_exec_scope.exec_scope,
John Zulauf110413c2021-03-20 05:38:38 -06001584 image_barrier.barrier.src_access_scope, image_barrier.range, kDetectAll);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07001585}
John Zulauf355e49b2020-04-24 15:11:15 -06001586
John Zulauf9cb530d2019-09-30 14:14:10 -06001587template <typename Flags, typename Map>
1588SyncStageAccessFlags AccessScopeImpl(Flags flag_mask, const Map &map) {
1589 SyncStageAccessFlags scope = 0;
1590 for (const auto &bit_scope : map) {
1591 if (flag_mask < bit_scope.first) break;
1592
1593 if (flag_mask & bit_scope.first) {
1594 scope |= bit_scope.second;
1595 }
1596 }
1597 return scope;
1598}
1599
Jeremy Gebben40a22942020-12-22 14:22:06 -07001600SyncStageAccessFlags SyncStageAccess::AccessScopeByStage(VkPipelineStageFlags2KHR stages) {
John Zulauf9cb530d2019-09-30 14:14:10 -06001601 return AccessScopeImpl(stages, syncStageAccessMaskByStageBit);
1602}
1603
Jeremy Gebben40a22942020-12-22 14:22:06 -07001604SyncStageAccessFlags SyncStageAccess::AccessScopeByAccess(VkAccessFlags2KHR accesses) {
1605 return AccessScopeImpl(sync_utils::ExpandAccessFlags(accesses), syncStageAccessMaskByAccessBit);
John Zulauf9cb530d2019-09-30 14:14:10 -06001606}
1607
Jeremy Gebben40a22942020-12-22 14:22:06 -07001608// Getting from stage mask and access mask to stage/access masks is something we need to be good at...
1609SyncStageAccessFlags SyncStageAccess::AccessScope(VkPipelineStageFlags2KHR stages, VkAccessFlags2KHR accesses) {
John Zulauf5f13a792020-03-10 07:31:21 -06001610 // The access scope is the intersection of all stage/access types possible for the enabled stages and the enables
1611 // accesses (after doing a couple factoring of common terms the union of stage/access intersections is the intersections
1612 // of the union of all stage/access types for all the stages and the same unions for the access mask...
John Zulauf9cb530d2019-09-30 14:14:10 -06001613 return AccessScopeByStage(stages) & AccessScopeByAccess(accesses);
1614}
1615
1616template <typename Action>
John Zulauf5c5e88d2019-12-26 11:22:02 -07001617void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const ResourceAccessRange &range, const Action &action) {
John Zulauf7635de32020-05-29 17:14:15 -06001618 // TODO: Optimization for operations that do a pure overwrite (i.e. WRITE usages which rewrite the state, vs READ usages
1619 // that do incrementalupdates
John Zulauf4a6105a2020-11-17 15:11:05 -07001620 assert(accesses);
John Zulauf9cb530d2019-09-30 14:14:10 -06001621 auto pos = accesses->lower_bound(range);
1622 if (pos == accesses->end() || !pos->first.intersects(range)) {
1623 // The range is empty, fill it with a default value.
1624 pos = action.Infill(accesses, pos, range);
1625 } else if (range.begin < pos->first.begin) {
1626 // Leading empty space, infill
John Zulauf5c5e88d2019-12-26 11:22:02 -07001627 pos = action.Infill(accesses, pos, ResourceAccessRange(range.begin, pos->first.begin));
John Zulauf9cb530d2019-09-30 14:14:10 -06001628 } else if (pos->first.begin < range.begin) {
1629 // Trim the beginning if needed
1630 pos = accesses->split(pos, range.begin, sparse_container::split_op_keep_both());
1631 ++pos;
1632 }
1633
1634 const auto the_end = accesses->end();
1635 while ((pos != the_end) && pos->first.intersects(range)) {
1636 if (pos->first.end > range.end) {
1637 pos = accesses->split(pos, range.end, sparse_container::split_op_keep_both());
1638 }
1639
1640 pos = action(accesses, pos);
1641 if (pos == the_end) break;
1642
1643 auto next = pos;
1644 ++next;
1645 if ((pos->first.end < range.end) && (next != the_end) && !next->first.is_subsequent_to(pos->first)) {
1646 // Need to infill if next is disjoint
1647 VkDeviceSize limit = (next == the_end) ? range.end : std::min(range.end, next->first.begin);
John Zulauf5c5e88d2019-12-26 11:22:02 -07001648 ResourceAccessRange new_range(pos->first.end, limit);
John Zulauf9cb530d2019-09-30 14:14:10 -06001649 next = action.Infill(accesses, next, new_range);
1650 }
1651 pos = next;
1652 }
1653}
John Zulaufd5115702021-01-18 12:34:33 -07001654
1655// Give a comparable interface for range generators and ranges
1656template <typename Action>
John Zulaufcb7e1672022-05-04 13:46:08 -06001657void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const Action &action, ResourceAccessRange *range) {
John Zulaufd5115702021-01-18 12:34:33 -07001658 assert(range);
1659 UpdateMemoryAccessState(accesses, *range, action);
1660}
1661
John Zulauf4a6105a2020-11-17 15:11:05 -07001662template <typename Action, typename RangeGen>
1663void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const Action &action, RangeGen *range_gen_arg) {
1664 assert(range_gen_arg);
John Zulaufd5115702021-01-18 12:34:33 -07001665 RangeGen &range_gen = *range_gen_arg; // Non-const references must be * by style requirement but deref-ing * iterator is a pain
John Zulauf4a6105a2020-11-17 15:11:05 -07001666 for (; range_gen->non_empty(); ++range_gen) {
1667 UpdateMemoryAccessState(accesses, *range_gen, action);
1668 }
1669}
John Zulauf9cb530d2019-09-30 14:14:10 -06001670
John Zulaufd0ec59f2021-03-13 14:25:08 -07001671template <typename Action, typename RangeGen>
1672void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const Action &action, const RangeGen &range_gen_prebuilt) {
1673 RangeGen range_gen(range_gen_prebuilt); // RangeGenerators can be expensive to create from scratch... initialize from built
1674 for (; range_gen->non_empty(); ++range_gen) {
1675 UpdateMemoryAccessState(accesses, *range_gen, action);
1676 }
1677}
John Zulauf9cb530d2019-09-30 14:14:10 -06001678struct UpdateMemoryAccessStateFunctor {
John Zulauf5c5e88d2019-12-26 11:22:02 -07001679 using Iterator = ResourceAccessRangeMap::iterator;
1680 Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const {
John Zulauf5f13a792020-03-10 07:31:21 -06001681 // this is only called on gaps, and never returns a gap.
1682 ResourceAccessState default_state;
John Zulauf16adfc92020-04-08 10:28:33 -06001683 context.ResolvePreviousAccess(type, range, accesses, &default_state);
John Zulauf5f13a792020-03-10 07:31:21 -06001684 return accesses->lower_bound(range);
John Zulauf9cb530d2019-09-30 14:14:10 -06001685 }
John Zulauf5f13a792020-03-10 07:31:21 -06001686
John Zulauf5c5e88d2019-12-26 11:22:02 -07001687 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -06001688 auto &access_state = pos->second;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001689 access_state.Update(usage, ordering_rule, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06001690 return pos;
1691 }
1692
John Zulauf43cc7462020-12-03 12:33:12 -07001693 UpdateMemoryAccessStateFunctor(AccessAddressType type_, const AccessContext &context_, SyncStageAccessIndex usage_,
John Zulauf14940722021-04-12 15:19:02 -06001694 SyncOrdering ordering_rule_, ResourceUsageTag tag_)
John Zulauf8e3c3e92021-01-06 11:19:36 -07001695 : type(type_), context(context_), usage(usage_), ordering_rule(ordering_rule_), tag(tag_) {}
John Zulauf43cc7462020-12-03 12:33:12 -07001696 const AccessAddressType type;
John Zulauf540266b2020-04-06 18:54:53 -06001697 const AccessContext &context;
John Zulauf16adfc92020-04-08 10:28:33 -06001698 const SyncStageAccessIndex usage;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001699 const SyncOrdering ordering_rule;
John Zulauf14940722021-04-12 15:19:02 -06001700 const ResourceUsageTag tag;
John Zulauf9cb530d2019-09-30 14:14:10 -06001701};
1702
John Zulauf4a6105a2020-11-17 15:11:05 -07001703// The barrier operation for pipeline and subpass dependencies`
John Zulauf1e331ec2020-12-04 18:29:38 -07001704struct PipelineBarrierOp {
1705 SyncBarrier barrier;
1706 bool layout_transition;
John Zulauf00119522022-05-23 19:07:42 -06001707 ResourceAccessState::QueueScopeOps scope;
1708 PipelineBarrierOp(QueueId queue_id, const SyncBarrier &barrier_, bool layout_transition_)
1709 : barrier(barrier_), layout_transition(layout_transition_), scope(queue_id) {}
John Zulaufd5115702021-01-18 12:34:33 -07001710 PipelineBarrierOp(const PipelineBarrierOp &) = default;
John Zulauf00119522022-05-23 19:07:42 -06001711 void operator()(ResourceAccessState *access_state) const { access_state->ApplyBarrier(scope, barrier, layout_transition); }
John Zulauf1e331ec2020-12-04 18:29:38 -07001712};
John Zulauf00119522022-05-23 19:07:42 -06001713
John Zulaufecf4ac52022-06-06 10:08:42 -06001714// Batch barrier ops don't modify in place, and thus don't need to hold pending state, and also are *never* layout transitions.
1715struct BatchBarrierOp : public PipelineBarrierOp {
1716 void operator()(ResourceAccessState *access_state) const {
1717 access_state->ApplyBarrier(scope, barrier, layout_transition);
1718 access_state->ApplyPendingBarriers(kInvalidTag); // There can't be any need for this tag
1719 }
1720 BatchBarrierOp(QueueId queue_id, const SyncBarrier &barrier_) : PipelineBarrierOp(queue_id, barrier_, false) {}
1721};
1722
John Zulauf4a6105a2020-11-17 15:11:05 -07001723// The barrier operation for wait events
1724struct WaitEventBarrierOp {
John Zulaufb7578302022-05-19 13:50:18 -06001725 ResourceAccessState::EventScopeOps scope_ops;
John Zulauf4a6105a2020-11-17 15:11:05 -07001726 SyncBarrier barrier;
1727 bool layout_transition;
John Zulaufe0757ba2022-06-10 16:51:45 -06001728
1729 WaitEventBarrierOp(const QueueId scope_queue_, const ResourceUsageTag scope_tag_, const SyncBarrier &barrier_,
John Zulauf00119522022-05-23 19:07:42 -06001730 bool layout_transition_)
John Zulaufe0757ba2022-06-10 16:51:45 -06001731 : scope_ops(scope_queue_, scope_tag_), barrier(barrier_), layout_transition(layout_transition_) {}
John Zulaufb7578302022-05-19 13:50:18 -06001732 void operator()(ResourceAccessState *access_state) const { access_state->ApplyBarrier(scope_ops, barrier, layout_transition); }
John Zulauf4a6105a2020-11-17 15:11:05 -07001733};
John Zulauf1e331ec2020-12-04 18:29:38 -07001734
John Zulauf4a6105a2020-11-17 15:11:05 -07001735// This functor applies a collection of barriers, updating the "pending state" in each touched memory range, and optionally
1736// resolves the pending state. Suitable for processing Global memory barriers, or Subpass Barriers when the "final" barrier
1737// of a collection is known/present.
John Zulauf5c628d02021-05-04 15:46:36 -06001738template <typename BarrierOp, typename OpVector = std::vector<BarrierOp>>
John Zulauf89311b42020-09-29 16:28:47 -06001739class ApplyBarrierOpsFunctor {
1740 public:
John Zulauf5c5e88d2019-12-26 11:22:02 -07001741 using Iterator = ResourceAccessRangeMap::iterator;
John Zulauf5c628d02021-05-04 15:46:36 -06001742 // Only called with a gap, and pos at the lower_bound(range)
1743 inline Iterator Infill(ResourceAccessRangeMap *accesses, const Iterator &pos, const ResourceAccessRange &range) const {
1744 if (!infill_default_) {
1745 return pos;
1746 }
1747 ResourceAccessState default_state;
1748 auto inserted = accesses->insert(pos, std::make_pair(range, default_state));
1749 return inserted;
1750 }
John Zulauf9cb530d2019-09-30 14:14:10 -06001751
John Zulauf5c628d02021-05-04 15:46:36 -06001752 Iterator operator()(ResourceAccessRangeMap *accesses, const Iterator &pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -06001753 auto &access_state = pos->second;
John Zulauf1e331ec2020-12-04 18:29:38 -07001754 for (const auto &op : barrier_ops_) {
1755 op(&access_state);
John Zulauf89311b42020-09-29 16:28:47 -06001756 }
John Zulauf9cb530d2019-09-30 14:14:10 -06001757
John Zulauf89311b42020-09-29 16:28:47 -06001758 if (resolve_) {
1759 // If this is the last (or only) batch, we can do the pending resolve as the last step in this operation to avoid
1760 // another walk
1761 access_state.ApplyPendingBarriers(tag_);
John Zulauf9cb530d2019-09-30 14:14:10 -06001762 }
1763 return pos;
1764 }
1765
John Zulauf89311b42020-09-29 16:28:47 -06001766 // A valid tag is required IFF layout_transition is true, as transitions are write ops
John Zulauf5c628d02021-05-04 15:46:36 -06001767 ApplyBarrierOpsFunctor(bool resolve, typename OpVector::size_type size_hint, ResourceUsageTag tag)
1768 : resolve_(resolve), infill_default_(false), barrier_ops_(), tag_(tag) {
John Zulaufd5115702021-01-18 12:34:33 -07001769 barrier_ops_.reserve(size_hint);
1770 }
John Zulauf5c628d02021-05-04 15:46:36 -06001771 void EmplaceBack(const BarrierOp &op) {
1772 barrier_ops_.emplace_back(op);
1773 infill_default_ |= op.layout_transition;
1774 }
John Zulauf89311b42020-09-29 16:28:47 -06001775
1776 private:
1777 bool resolve_;
John Zulauf5c628d02021-05-04 15:46:36 -06001778 bool infill_default_;
1779 OpVector barrier_ops_;
John Zulauf14940722021-04-12 15:19:02 -06001780 const ResourceUsageTag tag_;
John Zulauf1e331ec2020-12-04 18:29:38 -07001781};
1782
John Zulauf4a6105a2020-11-17 15:11:05 -07001783// This functor applies a single barrier, updating the "pending state" in each touched memory range, but does not
1784// resolve the pendinging state. Suitable for processing Image and Buffer barriers from PipelineBarriers or Events
1785template <typename BarrierOp>
John Zulauf5c628d02021-05-04 15:46:36 -06001786class ApplyBarrierFunctor : public ApplyBarrierOpsFunctor<BarrierOp, small_vector<BarrierOp, 1>> {
1787 using Base = ApplyBarrierOpsFunctor<BarrierOp, small_vector<BarrierOp, 1>>;
1788
John Zulauf4a6105a2020-11-17 15:11:05 -07001789 public:
John Zulaufee984022022-04-13 16:39:50 -06001790 ApplyBarrierFunctor(const BarrierOp &barrier_op) : Base(false, 1, kInvalidTag) { Base::EmplaceBack(barrier_op); }
John Zulauf4a6105a2020-11-17 15:11:05 -07001791};
1792
John Zulauf1e331ec2020-12-04 18:29:38 -07001793// This functor resolves the pendinging state.
John Zulauf5c628d02021-05-04 15:46:36 -06001794class ResolvePendingBarrierFunctor : public ApplyBarrierOpsFunctor<NoopBarrierAction, small_vector<NoopBarrierAction, 1>> {
1795 using Base = ApplyBarrierOpsFunctor<NoopBarrierAction, small_vector<NoopBarrierAction, 1>>;
1796
John Zulauf1e331ec2020-12-04 18:29:38 -07001797 public:
John Zulauf5c628d02021-05-04 15:46:36 -06001798 ResolvePendingBarrierFunctor(ResourceUsageTag tag) : Base(true, 0, tag) {}
John Zulauf9cb530d2019-09-30 14:14:10 -06001799};
1800
John Zulauf8e3c3e92021-01-06 11:19:36 -07001801void AccessContext::UpdateAccessState(AccessAddressType type, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf14940722021-04-12 15:19:02 -06001802 const ResourceAccessRange &range, const ResourceUsageTag tag) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07001803 UpdateMemoryAccessStateFunctor action(type, *this, current_usage, ordering_rule, tag);
John Zulauf16adfc92020-04-08 10:28:33 -06001804 UpdateMemoryAccessState(&GetAccessStateMap(type), range, action);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001805}
1806
John Zulauf8e3c3e92021-01-06 11:19:36 -07001807void AccessContext::UpdateAccessState(const BUFFER_STATE &buffer, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf14940722021-04-12 15:19:02 -06001808 const ResourceAccessRange &range, const ResourceUsageTag tag) {
John Zulauf16adfc92020-04-08 10:28:33 -06001809 if (!SimpleBinding(buffer)) return;
1810 const auto base_address = ResourceBaseAddress(buffer);
John Zulauf8e3c3e92021-01-06 11:19:36 -07001811 UpdateAccessState(AccessAddressType::kLinear, current_usage, ordering_rule, range + base_address, tag);
John Zulauf16adfc92020-04-08 10:28:33 -06001812}
John Zulauf355e49b2020-04-24 15:11:15 -06001813
John Zulauf8e3c3e92021-01-06 11:19:36 -07001814void AccessContext::UpdateAccessState(const IMAGE_STATE &image, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf110413c2021-03-20 05:38:38 -06001815 const VkImageSubresourceRange &subresource_range, const ResourceUsageTag &tag) {
1816 if (!SimpleBinding(image)) return;
1817 const auto base_address = ResourceBaseAddress(image);
Aitor Camachoe67f2c72022-06-08 14:41:58 +02001818 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, base_address, false);
John Zulauf110413c2021-03-20 05:38:38 -06001819 const auto address_type = ImageAddressType(image);
1820 UpdateMemoryAccessStateFunctor action(address_type, *this, current_usage, ordering_rule, tag);
1821 UpdateMemoryAccessState(&GetAccessStateMap(address_type), action, &range_gen);
1822}
1823void AccessContext::UpdateAccessState(const IMAGE_STATE &image, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf355e49b2020-04-24 15:11:15 -06001824 const VkImageSubresourceRange &subresource_range, const VkOffset3D &offset,
John Zulauf14940722021-04-12 15:19:02 -06001825 const VkExtent3D &extent, const ResourceUsageTag tag) {
John Zulauf16adfc92020-04-08 10:28:33 -06001826 if (!SimpleBinding(image)) return;
John Zulauf16adfc92020-04-08 10:28:33 -06001827 const auto base_address = ResourceBaseAddress(image);
John Zulauf150e5332020-12-03 08:52:52 -07001828 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, offset, extent,
Aitor Camachoe67f2c72022-06-08 14:41:58 +02001829 base_address, false);
John Zulauf150e5332020-12-03 08:52:52 -07001830 const auto address_type = ImageAddressType(image);
John Zulauf8e3c3e92021-01-06 11:19:36 -07001831 UpdateMemoryAccessStateFunctor action(address_type, *this, current_usage, ordering_rule, tag);
John Zulauf110413c2021-03-20 05:38:38 -06001832 UpdateMemoryAccessState(&GetAccessStateMap(address_type), action, &range_gen);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001833}
John Zulaufd0ec59f2021-03-13 14:25:08 -07001834
1835void AccessContext::UpdateAccessState(const AttachmentViewGen &view_gen, AttachmentViewGen::Gen gen_type,
John Zulauf14940722021-04-12 15:19:02 -06001836 SyncStageAccessIndex current_usage, SyncOrdering ordering_rule, const ResourceUsageTag tag) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001837 const ImageRangeGen *gen = view_gen.GetRangeGen(gen_type);
1838 if (!gen) return;
1839 subresource_adapter::ImageRangeGenerator range_gen(*gen);
1840 const auto address_type = view_gen.GetAddressType();
1841 UpdateMemoryAccessStateFunctor action(address_type, *this, current_usage, ordering_rule, tag);
1842 ApplyUpdateAction(address_type, action, &range_gen);
John Zulauf7635de32020-05-29 17:14:15 -06001843}
John Zulauf3d84f1b2020-03-09 13:33:25 -06001844
John Zulauf8e3c3e92021-01-06 11:19:36 -07001845void AccessContext::UpdateAccessState(const IMAGE_STATE &image, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf355e49b2020-04-24 15:11:15 -06001846 const VkImageSubresourceLayers &subresource, const VkOffset3D &offset,
John Zulauf14940722021-04-12 15:19:02 -06001847 const VkExtent3D &extent, const ResourceUsageTag tag) {
John Zulauf355e49b2020-04-24 15:11:15 -06001848 VkImageSubresourceRange subresource_range = {subresource.aspectMask, subresource.mipLevel, 1, subresource.baseArrayLayer,
1849 subresource.layerCount};
John Zulauf8e3c3e92021-01-06 11:19:36 -07001850 UpdateAccessState(image, current_usage, ordering_rule, subresource_range, offset, extent, tag);
John Zulauf355e49b2020-04-24 15:11:15 -06001851}
1852
John Zulaufd0ec59f2021-03-13 14:25:08 -07001853template <typename Action, typename RangeGen>
1854void AccessContext::ApplyUpdateAction(AccessAddressType address_type, const Action &action, RangeGen *range_gen_arg) {
1855 assert(range_gen_arg); // Old Google C++ styleguide require non-const object pass by * not &, but this isn't an optional arg.
1856 UpdateMemoryAccessState(&GetAccessStateMap(address_type), action, range_gen_arg);
John Zulauf540266b2020-04-06 18:54:53 -06001857}
1858
1859template <typename Action>
John Zulaufd0ec59f2021-03-13 14:25:08 -07001860void AccessContext::ApplyUpdateAction(const AttachmentViewGen &view_gen, AttachmentViewGen::Gen gen_type, const Action &action) {
1861 const ImageRangeGen *gen = view_gen.GetRangeGen(gen_type);
1862 if (!gen) return;
1863 UpdateMemoryAccessState(&GetAccessStateMap(view_gen.GetAddressType()), action, *gen);
John Zulauf540266b2020-04-06 18:54:53 -06001864}
1865
John Zulaufd0ec59f2021-03-13 14:25:08 -07001866void AccessContext::UpdateAttachmentResolveAccess(const RENDER_PASS_STATE &rp_state,
1867 const AttachmentViewGenVector &attachment_views, uint32_t subpass,
John Zulauf14940722021-04-12 15:19:02 -06001868 const ResourceUsageTag tag) {
John Zulauf7635de32020-05-29 17:14:15 -06001869 UpdateStateResolveAction update(*this, tag);
John Zulaufd0ec59f2021-03-13 14:25:08 -07001870 ResolveOperation(update, rp_state, attachment_views, subpass);
John Zulauf7635de32020-05-29 17:14:15 -06001871}
1872
John Zulaufd0ec59f2021-03-13 14:25:08 -07001873void AccessContext::UpdateAttachmentStoreAccess(const RENDER_PASS_STATE &rp_state, const AttachmentViewGenVector &attachment_views,
John Zulauf14940722021-04-12 15:19:02 -06001874 uint32_t subpass, const ResourceUsageTag tag) {
John Zulaufaff20662020-06-01 14:07:58 -06001875 const auto *attachment_ci = rp_state.createInfo.pAttachments;
John Zulaufaff20662020-06-01 14:07:58 -06001876
1877 for (uint32_t i = 0; i < rp_state.createInfo.attachmentCount; i++) {
1878 if (rp_state.attachment_last_subpass[i] == subpass) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001879 const auto &view_gen = attachment_views[i];
1880 if (!view_gen.IsValid()) continue; // UNUSED
John Zulaufaff20662020-06-01 14:07:58 -06001881
1882 const auto &ci = attachment_ci[i];
1883 const bool has_depth = FormatHasDepth(ci.format);
1884 const bool has_stencil = FormatHasStencil(ci.format);
1885 const bool is_color = !(has_depth || has_stencil);
John Zulauf57261402021-08-13 11:32:06 -06001886 const bool store_op_stores = ci.storeOp != VK_ATTACHMENT_STORE_OP_NONE_EXT;
John Zulaufaff20662020-06-01 14:07:58 -06001887
1888 if (is_color && store_op_stores) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001889 UpdateAccessState(view_gen, AttachmentViewGen::Gen::kRenderArea,
1890 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kRaster, tag);
John Zulaufaff20662020-06-01 14:07:58 -06001891 } else {
John Zulaufaff20662020-06-01 14:07:58 -06001892 if (has_depth && store_op_stores) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001893 UpdateAccessState(view_gen, AttachmentViewGen::Gen::kDepthOnlyRenderArea,
1894 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, SyncOrdering::kRaster, tag);
John Zulaufaff20662020-06-01 14:07:58 -06001895 }
John Zulauf57261402021-08-13 11:32:06 -06001896 const bool stencil_op_stores = ci.stencilStoreOp != VK_ATTACHMENT_STORE_OP_NONE_EXT;
John Zulaufaff20662020-06-01 14:07:58 -06001897 if (has_stencil && stencil_op_stores) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001898 UpdateAccessState(view_gen, AttachmentViewGen::Gen::kStencilOnlyRenderArea,
1899 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, SyncOrdering::kRaster, tag);
John Zulaufaff20662020-06-01 14:07:58 -06001900 }
1901 }
1902 }
1903 }
1904}
1905
John Zulauf540266b2020-04-06 18:54:53 -06001906template <typename Action>
John Zulaufd5115702021-01-18 12:34:33 -07001907void AccessContext::ApplyToContext(const Action &barrier_action) {
John Zulauf540266b2020-04-06 18:54:53 -06001908 // Note: Barriers do *not* cross context boundaries, applying to accessess within.... (at least for renderpass subpasses)
John Zulauf16adfc92020-04-08 10:28:33 -06001909 for (const auto address_type : kAddressTypes) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001910 UpdateMemoryAccessState(&GetAccessStateMap(address_type), kFullRange, barrier_action);
John Zulauf540266b2020-04-06 18:54:53 -06001911 }
1912}
1913
1914void AccessContext::ResolveChildContexts(const std::vector<AccessContext> &contexts) {
John Zulauf540266b2020-04-06 18:54:53 -06001915 for (uint32_t subpass_index = 0; subpass_index < contexts.size(); subpass_index++) {
1916 auto &context = contexts[subpass_index];
John Zulauf22aefed2021-03-11 18:14:35 -07001917 ApplyTrackbackStackAction barrier_action(context.GetDstExternalTrackBack().barriers);
John Zulauf16adfc92020-04-08 10:28:33 -06001918 for (const auto address_type : kAddressTypes) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001919 context.ResolveAccessRange(address_type, kFullRange, barrier_action, &GetAccessStateMap(address_type), nullptr, false);
John Zulauf540266b2020-04-06 18:54:53 -06001920 }
1921 }
1922}
1923
John Zulauf4fa68462021-04-26 21:04:22 -06001924// Caller must ensure that lifespan of this is less than from
1925void AccessContext::ImportAsyncContexts(const AccessContext &from) { async_ = from.async_; }
1926
John Zulauf355e49b2020-04-24 15:11:15 -06001927// Suitable only for *subpass* access contexts
John Zulaufd0ec59f2021-03-13 14:25:08 -07001928HazardResult AccessContext::DetectSubpassTransitionHazard(const TrackBack &track_back, const AttachmentViewGen &attach_view) const {
1929 if (!attach_view.IsValid()) return HazardResult();
John Zulauf355e49b2020-04-24 15:11:15 -06001930
John Zulauf355e49b2020-04-24 15:11:15 -06001931 // We should never ask for a transition from a context we don't have
John Zulaufbb890452021-12-14 11:30:18 -07001932 assert(track_back.source_subpass);
John Zulauf355e49b2020-04-24 15:11:15 -06001933
1934 // Do the detection against the specific prior context independent of other contexts. (Synchronous only)
John Zulaufa0a98292020-09-18 09:30:10 -06001935 // Hazard detection for the transition can be against the merged of the barriers (it only uses src_...)
1936 const auto merged_barrier = MergeBarriers(track_back.barriers);
John Zulaufbb890452021-12-14 11:30:18 -07001937 HazardResult hazard = track_back.source_subpass->DetectImageBarrierHazard(attach_view, merged_barrier, kDetectPrevious);
John Zulauf355e49b2020-04-24 15:11:15 -06001938 if (!hazard.hazard) {
1939 // The Async hazard check is against the current context's async set.
John Zulaufd0ec59f2021-03-13 14:25:08 -07001940 hazard = DetectImageBarrierHazard(attach_view, merged_barrier, kDetectAsync);
John Zulauf355e49b2020-04-24 15:11:15 -06001941 }
John Zulaufa0a98292020-09-18 09:30:10 -06001942
John Zulauf355e49b2020-04-24 15:11:15 -06001943 return hazard;
1944}
1945
John Zulaufb02c1eb2020-10-06 16:33:36 -06001946void AccessContext::RecordLayoutTransitions(const RENDER_PASS_STATE &rp_state, uint32_t subpass,
John Zulauf14940722021-04-12 15:19:02 -06001947 const AttachmentViewGenVector &attachment_views, const ResourceUsageTag tag) {
John Zulaufb02c1eb2020-10-06 16:33:36 -06001948 const auto &transitions = rp_state.subpass_transitions[subpass];
John Zulauf646cc292020-10-23 09:16:45 -06001949 const ResourceAccessState empty_infill;
John Zulaufb02c1eb2020-10-06 16:33:36 -06001950 for (const auto &transition : transitions) {
1951 const auto prev_pass = transition.prev_pass;
John Zulaufd0ec59f2021-03-13 14:25:08 -07001952 const auto &view_gen = attachment_views[transition.attachment];
1953 if (!view_gen.IsValid()) continue;
John Zulaufb02c1eb2020-10-06 16:33:36 -06001954
1955 const auto *trackback = GetTrackBackFromSubpass(prev_pass);
1956 assert(trackback);
1957
1958 // Import the attachments into the current context
John Zulaufbb890452021-12-14 11:30:18 -07001959 const auto *prev_context = trackback->source_subpass;
John Zulaufb02c1eb2020-10-06 16:33:36 -06001960 assert(prev_context);
John Zulaufd0ec59f2021-03-13 14:25:08 -07001961 const auto address_type = view_gen.GetAddressType();
John Zulaufb02c1eb2020-10-06 16:33:36 -06001962 auto &target_map = GetAccessStateMap(address_type);
1963 ApplySubpassTransitionBarriersAction barrier_action(trackback->barriers);
John Zulaufd0ec59f2021-03-13 14:25:08 -07001964 prev_context->ResolveAccessRange(view_gen, AttachmentViewGen::Gen::kViewSubresource, barrier_action, &target_map,
1965 &empty_infill);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001966 }
1967
John Zulauf86356ca2020-10-19 11:46:41 -06001968 // If there were no transitions skip this global map walk
1969 if (transitions.size()) {
John Zulauf1e331ec2020-12-04 18:29:38 -07001970 ResolvePendingBarrierFunctor apply_pending_action(tag);
John Zulaufd5115702021-01-18 12:34:33 -07001971 ApplyToContext(apply_pending_action);
John Zulauf86356ca2020-10-19 11:46:41 -06001972 }
John Zulaufb02c1eb2020-10-06 16:33:36 -06001973}
Jeremy Gebben9893daf2021-01-04 10:40:50 -07001974
sjfricke0bea06e2022-06-05 09:22:26 +09001975bool CommandBufferAccessContext::ValidateDispatchDrawDescriptorSet(VkPipelineBindPoint pipelineBindPoint, CMD_TYPE cmd_type) const {
locke-lunarg61870c22020-06-09 14:51:50 -06001976 bool skip = false;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001977 const PIPELINE_STATE *pipe = nullptr;
locke-lunarg61870c22020-06-09 14:51:50 -06001978 const std::vector<LAST_BOUND_STATE::PER_SET> *per_sets = nullptr;
Jeremy Gebben159b3cc2021-06-03 09:09:03 -06001979 cb_state_->GetCurrentPipelineAndDesriptorSets(pipelineBindPoint, &pipe, &per_sets);
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001980 if (!pipe || !per_sets) {
locke-lunarg61870c22020-06-09 14:51:50 -06001981 return skip;
1982 }
sjfricke0bea06e2022-06-05 09:22:26 +09001983 const char *caller_name = CommandTypeString(cmd_type);
locke-lunarg61870c22020-06-09 14:51:50 -06001984
1985 using DescriptorClass = cvdescriptorset::DescriptorClass;
1986 using BufferDescriptor = cvdescriptorset::BufferDescriptor;
1987 using ImageDescriptor = cvdescriptorset::ImageDescriptor;
locke-lunarg61870c22020-06-09 14:51:50 -06001988 using TexelDescriptor = cvdescriptorset::TexelDescriptor;
1989
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001990 for (const auto &stage_state : pipe->stage_state) {
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07001991 const auto raster_state = pipe->RasterizationState();
1992 if (stage_state.stage_flag == VK_SHADER_STAGE_FRAGMENT_BIT && raster_state && raster_state->rasterizerDiscardEnable) {
locke-lunarge9f1cdf2020-06-12 12:28:57 -06001993 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001994 }
locke-lunarg61870c22020-06-09 14:51:50 -06001995 for (const auto &set_binding : stage_state.descriptor_uses) {
Jeremy Gebben4d51c552022-01-06 21:27:15 -07001996 const auto *descriptor_set = (*per_sets)[set_binding.first.set].bound_descriptor_set.get();
locke-lunarg61870c22020-06-09 14:51:50 -06001997 cvdescriptorset::DescriptorSetLayout::ConstBindingIterator binding_it(descriptor_set->GetLayout().get(),
Jeremy Gebben7fc88a22021-08-25 13:30:45 -06001998 set_binding.first.binding);
locke-lunarg61870c22020-06-09 14:51:50 -06001999 const auto descriptor_type = binding_it.GetType();
2000 cvdescriptorset::IndexRange index_range = binding_it.GetGlobalIndexRange();
2001 auto array_idx = 0;
2002
2003 if (binding_it.IsVariableDescriptorCount()) {
2004 index_range.end = index_range.start + descriptor_set->GetVariableDescriptorCount();
2005 }
2006 SyncStageAccessIndex sync_index =
2007 GetSyncStageAccessIndexsByDescriptorSet(descriptor_type, set_binding.second, stage_state.stage_flag);
2008
2009 for (uint32_t i = index_range.start; i < index_range.end; ++i, ++array_idx) {
2010 uint32_t index = i - index_range.start;
2011 const auto *descriptor = descriptor_set->GetDescriptorFromGlobalIndex(i);
2012 switch (descriptor->GetClass()) {
2013 case DescriptorClass::ImageSampler:
2014 case DescriptorClass::Image: {
Jeremy Gebbena08da232022-02-01 15:14:52 -07002015 if (descriptor->Invalid()) {
2016 continue;
locke-lunarg61870c22020-06-09 14:51:50 -06002017 }
Jeremy Gebbena08da232022-02-01 15:14:52 -07002018
2019 // NOTE: ImageSamplerDescriptor inherits from ImageDescriptor, so this cast works for both types.
2020 const auto *image_descriptor = static_cast<const ImageDescriptor *>(descriptor);
2021 const auto *img_view_state = image_descriptor->GetImageViewState();
2022 VkImageLayout image_layout = image_descriptor->GetImageLayout();
2023
John Zulauf361fb532020-07-22 10:45:39 -06002024 HazardResult hazard;
Jeremy Gebben11a68a32021-07-29 11:59:22 -06002025 // NOTE: 2D ImageViews of VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT Images are not allowed in
2026 // Descriptors, so we do not have to worry about depth slicing here.
2027 // See: VUID 00343
2028 assert(!img_view_state->IsDepthSliced());
John Zulauf110413c2021-03-20 05:38:38 -06002029 const IMAGE_STATE *img_state = img_view_state->image_state.get();
John Zulauf361fb532020-07-22 10:45:39 -06002030 const auto &subresource_range = img_view_state->normalized_subresource_range;
John Zulauf110413c2021-03-20 05:38:38 -06002031
2032 if (sync_index == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ) {
2033 const VkExtent3D extent = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.extent);
2034 const VkOffset3D offset = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.offset);
John Zulauf361fb532020-07-22 10:45:39 -06002035 // Input attachments are subject to raster ordering rules
Aitor Camachoe67f2c72022-06-08 14:41:58 +02002036 hazard =
2037 current_context_->DetectHazard(*img_state, sync_index, subresource_range, SyncOrdering::kRaster,
2038 offset, extent, img_view_state->IsDepthSliced());
John Zulauf361fb532020-07-22 10:45:39 -06002039 } else {
Aitor Camachoe67f2c72022-06-08 14:41:58 +02002040 hazard = current_context_->DetectHazard(*img_state, sync_index, subresource_range,
2041 img_view_state->IsDepthSliced());
John Zulauf361fb532020-07-22 10:45:39 -06002042 }
John Zulauf110413c2021-03-20 05:38:38 -06002043
John Zulauf33fc1d52020-07-17 11:01:10 -06002044 if (hazard.hazard && !sync_state_->SupressedBoundDescriptorWAW(hazard)) {
John Zulauf1dae9192020-06-16 15:46:44 -06002045 skip |= sync_state_->LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002046 img_view_state->image_view(), string_SyncHazardVUID(hazard.hazard),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06002047 "%s: Hazard %s for %s, in %s, and %s, %s, type: %s, imageLayout: %s, binding #%" PRIu32
2048 ", index %" PRIu32 ". Access info %s.",
sjfricke0bea06e2022-06-05 09:22:26 +09002049 caller_name, string_SyncHazard(hazard.hazard),
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002050 sync_state_->report_data->FormatHandle(img_view_state->image_view()).c_str(),
2051 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer()).c_str(),
2052 sync_state_->report_data->FormatHandle(pipe->pipeline()).c_str(),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06002053 sync_state_->report_data->FormatHandle(descriptor_set->GetSet()).c_str(),
2054 string_VkDescriptorType(descriptor_type), string_VkImageLayout(image_layout),
John Zulauf397e68b2022-04-19 11:44:07 -06002055 set_binding.first.binding, index, FormatHazard(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06002056 }
2057 break;
2058 }
2059 case DescriptorClass::TexelBuffer: {
Jeremy Gebbena08da232022-02-01 15:14:52 -07002060 const auto *texel_descriptor = static_cast<const TexelDescriptor *>(descriptor);
2061 if (texel_descriptor->Invalid()) {
2062 continue;
2063 }
2064 const auto *buf_view_state = texel_descriptor->GetBufferViewState();
2065 const auto *buf_state = buf_view_state->buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06002066 const ResourceAccessRange range = MakeRange(*buf_view_state);
locke-lunarg61870c22020-06-09 14:51:50 -06002067 auto hazard = current_context_->DetectHazard(*buf_state, sync_index, range);
John Zulauf33fc1d52020-07-17 11:01:10 -06002068 if (hazard.hazard && !sync_state_->SupressedBoundDescriptorWAW(hazard)) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002069 skip |= sync_state_->LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002070 buf_view_state->buffer_view(), string_SyncHazardVUID(hazard.hazard),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06002071 "%s: Hazard %s for %s in %s, %s, and %s, type: %s, binding #%d index %d. Access info %s.",
sjfricke0bea06e2022-06-05 09:22:26 +09002072 caller_name, string_SyncHazard(hazard.hazard),
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002073 sync_state_->report_data->FormatHandle(buf_view_state->buffer_view()).c_str(),
2074 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer()).c_str(),
2075 sync_state_->report_data->FormatHandle(pipe->pipeline()).c_str(),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06002076 sync_state_->report_data->FormatHandle(descriptor_set->GetSet()).c_str(),
Jeremy Gebben7fc88a22021-08-25 13:30:45 -06002077 string_VkDescriptorType(descriptor_type), set_binding.first.binding, index,
John Zulauf397e68b2022-04-19 11:44:07 -06002078 FormatHazard(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06002079 }
2080 break;
2081 }
2082 case DescriptorClass::GeneralBuffer: {
2083 const auto *buffer_descriptor = static_cast<const BufferDescriptor *>(descriptor);
Jeremy Gebbena08da232022-02-01 15:14:52 -07002084 if (buffer_descriptor->Invalid()) {
2085 continue;
2086 }
2087 const auto *buf_state = buffer_descriptor->GetBufferState();
John Zulauf3e86bf02020-09-12 10:47:57 -06002088 const ResourceAccessRange range =
2089 MakeRange(*buf_state, buffer_descriptor->GetOffset(), buffer_descriptor->GetRange());
locke-lunarg61870c22020-06-09 14:51:50 -06002090 auto hazard = current_context_->DetectHazard(*buf_state, sync_index, range);
John Zulauf3ac701a2020-09-07 14:34:41 -06002091 if (hazard.hazard && !sync_state_->SupressedBoundDescriptorWAW(hazard)) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002092 skip |= sync_state_->LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002093 buf_state->buffer(), string_SyncHazardVUID(hazard.hazard),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06002094 "%s: Hazard %s for %s in %s, %s, and %s, type: %s, binding #%d index %d. Access info %s.",
sjfricke0bea06e2022-06-05 09:22:26 +09002095 caller_name, string_SyncHazard(hazard.hazard),
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002096 sync_state_->report_data->FormatHandle(buf_state->buffer()).c_str(),
2097 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer()).c_str(),
2098 sync_state_->report_data->FormatHandle(pipe->pipeline()).c_str(),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06002099 sync_state_->report_data->FormatHandle(descriptor_set->GetSet()).c_str(),
Jeremy Gebben7fc88a22021-08-25 13:30:45 -06002100 string_VkDescriptorType(descriptor_type), set_binding.first.binding, index,
John Zulauf397e68b2022-04-19 11:44:07 -06002101 FormatHazard(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06002102 }
2103 break;
2104 }
2105 // TODO: INLINE_UNIFORM_BLOCK_EXT, ACCELERATION_STRUCTURE_KHR
2106 default:
2107 break;
2108 }
2109 }
2110 }
2111 }
2112 return skip;
2113}
2114
2115void CommandBufferAccessContext::RecordDispatchDrawDescriptorSet(VkPipelineBindPoint pipelineBindPoint,
John Zulauf14940722021-04-12 15:19:02 -06002116 const ResourceUsageTag tag) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002117 const PIPELINE_STATE *pipe = nullptr;
locke-lunarg61870c22020-06-09 14:51:50 -06002118 const std::vector<LAST_BOUND_STATE::PER_SET> *per_sets = nullptr;
Jeremy Gebben159b3cc2021-06-03 09:09:03 -06002119 cb_state_->GetCurrentPipelineAndDesriptorSets(pipelineBindPoint, &pipe, &per_sets);
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002120 if (!pipe || !per_sets) {
locke-lunarg61870c22020-06-09 14:51:50 -06002121 return;
2122 }
2123
2124 using DescriptorClass = cvdescriptorset::DescriptorClass;
2125 using BufferDescriptor = cvdescriptorset::BufferDescriptor;
2126 using ImageDescriptor = cvdescriptorset::ImageDescriptor;
locke-lunarg61870c22020-06-09 14:51:50 -06002127 using TexelDescriptor = cvdescriptorset::TexelDescriptor;
2128
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002129 for (const auto &stage_state : pipe->stage_state) {
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002130 const auto raster_state = pipe->RasterizationState();
2131 if (stage_state.stage_flag == VK_SHADER_STAGE_FRAGMENT_BIT && raster_state && raster_state->rasterizerDiscardEnable) {
locke-lunarge9f1cdf2020-06-12 12:28:57 -06002132 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002133 }
locke-lunarg61870c22020-06-09 14:51:50 -06002134 for (const auto &set_binding : stage_state.descriptor_uses) {
Jeremy Gebben4d51c552022-01-06 21:27:15 -07002135 const auto *descriptor_set = (*per_sets)[set_binding.first.set].bound_descriptor_set.get();
locke-lunarg61870c22020-06-09 14:51:50 -06002136 cvdescriptorset::DescriptorSetLayout::ConstBindingIterator binding_it(descriptor_set->GetLayout().get(),
Jeremy Gebben7fc88a22021-08-25 13:30:45 -06002137 set_binding.first.binding);
locke-lunarg61870c22020-06-09 14:51:50 -06002138 const auto descriptor_type = binding_it.GetType();
2139 cvdescriptorset::IndexRange index_range = binding_it.GetGlobalIndexRange();
2140 auto array_idx = 0;
2141
2142 if (binding_it.IsVariableDescriptorCount()) {
2143 index_range.end = index_range.start + descriptor_set->GetVariableDescriptorCount();
2144 }
2145 SyncStageAccessIndex sync_index =
2146 GetSyncStageAccessIndexsByDescriptorSet(descriptor_type, set_binding.second, stage_state.stage_flag);
2147
2148 for (uint32_t i = index_range.start; i < index_range.end; ++i, ++array_idx) {
2149 const auto *descriptor = descriptor_set->GetDescriptorFromGlobalIndex(i);
2150 switch (descriptor->GetClass()) {
2151 case DescriptorClass::ImageSampler:
2152 case DescriptorClass::Image: {
Jeremy Gebbena08da232022-02-01 15:14:52 -07002153 // NOTE: ImageSamplerDescriptor inherits from ImageDescriptor, so this cast works for both types.
2154 const auto *image_descriptor = static_cast<const ImageDescriptor *>(descriptor);
2155 if (image_descriptor->Invalid()) {
2156 continue;
locke-lunarg61870c22020-06-09 14:51:50 -06002157 }
Jeremy Gebbena08da232022-02-01 15:14:52 -07002158 const auto *img_view_state = image_descriptor->GetImageViewState();
Jeremy Gebben11a68a32021-07-29 11:59:22 -06002159 // NOTE: 2D ImageViews of VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT Images are not allowed in
2160 // Descriptors, so we do not have to worry about depth slicing here.
2161 // See: VUID 00343
2162 assert(!img_view_state->IsDepthSliced());
locke-lunarg61870c22020-06-09 14:51:50 -06002163 const IMAGE_STATE *img_state = img_view_state->image_state.get();
locke-lunarg61870c22020-06-09 14:51:50 -06002164 if (sync_index == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ) {
John Zulauf110413c2021-03-20 05:38:38 -06002165 const VkExtent3D extent = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.extent);
2166 const VkOffset3D offset = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.offset);
2167 current_context_->UpdateAccessState(*img_state, sync_index, SyncOrdering::kRaster,
2168 img_view_state->normalized_subresource_range, offset, extent, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002169 } else {
John Zulauf110413c2021-03-20 05:38:38 -06002170 current_context_->UpdateAccessState(*img_state, sync_index, SyncOrdering::kNonAttachment,
2171 img_view_state->normalized_subresource_range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002172 }
locke-lunarg61870c22020-06-09 14:51:50 -06002173 break;
2174 }
2175 case DescriptorClass::TexelBuffer: {
Jeremy Gebbena08da232022-02-01 15:14:52 -07002176 const auto *texel_descriptor = static_cast<const TexelDescriptor *>(descriptor);
2177 if (texel_descriptor->Invalid()) {
2178 continue;
2179 }
2180 const auto *buf_view_state = texel_descriptor->GetBufferViewState();
2181 const auto *buf_state = buf_view_state->buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06002182 const ResourceAccessRange range = MakeRange(*buf_view_state);
John Zulauf8e3c3e92021-01-06 11:19:36 -07002183 current_context_->UpdateAccessState(*buf_state, sync_index, SyncOrdering::kNonAttachment, range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002184 break;
2185 }
2186 case DescriptorClass::GeneralBuffer: {
2187 const auto *buffer_descriptor = static_cast<const BufferDescriptor *>(descriptor);
Jeremy Gebbena08da232022-02-01 15:14:52 -07002188 if (buffer_descriptor->Invalid()) {
2189 continue;
2190 }
2191 const auto *buf_state = buffer_descriptor->GetBufferState();
John Zulauf3e86bf02020-09-12 10:47:57 -06002192 const ResourceAccessRange range =
2193 MakeRange(*buf_state, buffer_descriptor->GetOffset(), buffer_descriptor->GetRange());
John Zulauf8e3c3e92021-01-06 11:19:36 -07002194 current_context_->UpdateAccessState(*buf_state, sync_index, SyncOrdering::kNonAttachment, range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002195 break;
2196 }
2197 // TODO: INLINE_UNIFORM_BLOCK_EXT, ACCELERATION_STRUCTURE_KHR
2198 default:
2199 break;
2200 }
2201 }
2202 }
2203 }
2204}
2205
sjfricke0bea06e2022-06-05 09:22:26 +09002206bool CommandBufferAccessContext::ValidateDrawVertex(uint32_t vertexCount, uint32_t firstVertex, CMD_TYPE cmd_type) const {
locke-lunarg61870c22020-06-09 14:51:50 -06002207 bool skip = false;
Jeremy Gebben159b3cc2021-06-03 09:09:03 -06002208 const auto *pipe = cb_state_->GetCurrentPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS);
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002209 if (!pipe) {
locke-lunarg61870c22020-06-09 14:51:50 -06002210 return skip;
2211 }
2212
2213 const auto &binding_buffers = cb_state_->current_vertex_buffer_binding_info.vertex_buffer_bindings;
2214 const auto &binding_buffers_size = binding_buffers.size();
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002215 const auto &binding_descriptions_size = pipe->vertex_input_state->binding_descriptions.size();
locke-lunarg61870c22020-06-09 14:51:50 -06002216
2217 for (size_t i = 0; i < binding_descriptions_size; ++i) {
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002218 const auto &binding_description = pipe->vertex_input_state->binding_descriptions[i];
locke-lunarg61870c22020-06-09 14:51:50 -06002219 if (binding_description.binding < binding_buffers_size) {
2220 const auto &binding_buffer = binding_buffers[binding_description.binding];
Jeremy Gebben9efe1cf2021-05-15 20:05:09 -06002221 if (binding_buffer.buffer_state == nullptr || binding_buffer.buffer_state->Destroyed()) continue;
locke-lunarg61870c22020-06-09 14:51:50 -06002222
locke-lunarg1ae57d62020-11-18 10:49:19 -07002223 auto *buf_state = binding_buffer.buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06002224 const ResourceAccessRange range = GetBufferRange(binding_buffer.offset, buf_state->createInfo.size, firstVertex,
2225 vertexCount, binding_description.stride);
Jeremy Gebben40a22942020-12-22 14:22:06 -07002226 auto hazard = current_context_->DetectHazard(*buf_state, SYNC_VERTEX_ATTRIBUTE_INPUT_VERTEX_ATTRIBUTE_READ, range);
locke-lunarg61870c22020-06-09 14:51:50 -06002227 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002228 skip |= sync_state_->LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002229 buf_state->buffer(), string_SyncHazardVUID(hazard.hazard), "%s: Hazard %s for vertex %s in %s. Access info %s.",
sjfricke0bea06e2022-06-05 09:22:26 +09002230 CommandTypeString(cmd_type), string_SyncHazard(hazard.hazard),
John Zulauf397e68b2022-04-19 11:44:07 -06002231 sync_state_->report_data->FormatHandle(buf_state->buffer()).c_str(),
2232 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer()).c_str(), FormatHazard(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06002233 }
2234 }
2235 }
2236 return skip;
2237}
2238
John Zulauf14940722021-04-12 15:19:02 -06002239void CommandBufferAccessContext::RecordDrawVertex(uint32_t vertexCount, uint32_t firstVertex, const ResourceUsageTag tag) {
Jeremy Gebben159b3cc2021-06-03 09:09:03 -06002240 const auto *pipe = cb_state_->GetCurrentPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS);
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002241 if (!pipe) {
locke-lunarg61870c22020-06-09 14:51:50 -06002242 return;
2243 }
2244 const auto &binding_buffers = cb_state_->current_vertex_buffer_binding_info.vertex_buffer_bindings;
2245 const auto &binding_buffers_size = binding_buffers.size();
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002246 const auto &binding_descriptions_size = pipe->vertex_input_state->binding_descriptions.size();
locke-lunarg61870c22020-06-09 14:51:50 -06002247
2248 for (size_t i = 0; i < binding_descriptions_size; ++i) {
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002249 const auto &binding_description = pipe->vertex_input_state->binding_descriptions[i];
locke-lunarg61870c22020-06-09 14:51:50 -06002250 if (binding_description.binding < binding_buffers_size) {
2251 const auto &binding_buffer = binding_buffers[binding_description.binding];
Jeremy Gebben9efe1cf2021-05-15 20:05:09 -06002252 if (binding_buffer.buffer_state == nullptr || binding_buffer.buffer_state->Destroyed()) continue;
locke-lunarg61870c22020-06-09 14:51:50 -06002253
locke-lunarg1ae57d62020-11-18 10:49:19 -07002254 auto *buf_state = binding_buffer.buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06002255 const ResourceAccessRange range = GetBufferRange(binding_buffer.offset, buf_state->createInfo.size, firstVertex,
2256 vertexCount, binding_description.stride);
Jeremy Gebben40a22942020-12-22 14:22:06 -07002257 current_context_->UpdateAccessState(*buf_state, SYNC_VERTEX_ATTRIBUTE_INPUT_VERTEX_ATTRIBUTE_READ,
2258 SyncOrdering::kNonAttachment, range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002259 }
2260 }
2261}
2262
sjfricke0bea06e2022-06-05 09:22:26 +09002263bool CommandBufferAccessContext::ValidateDrawVertexIndex(uint32_t indexCount, uint32_t firstIndex, CMD_TYPE cmd_type) const {
locke-lunarg61870c22020-06-09 14:51:50 -06002264 bool skip = false;
Jeremy Gebben9efe1cf2021-05-15 20:05:09 -06002265 if (cb_state_->index_buffer_binding.buffer_state == nullptr || cb_state_->index_buffer_binding.buffer_state->Destroyed()) {
locke-lunarg1ae57d62020-11-18 10:49:19 -07002266 return skip;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002267 }
locke-lunarg61870c22020-06-09 14:51:50 -06002268
locke-lunarg1ae57d62020-11-18 10:49:19 -07002269 auto *index_buf_state = cb_state_->index_buffer_binding.buffer_state.get();
locke-lunarg61870c22020-06-09 14:51:50 -06002270 const auto index_size = GetIndexAlignment(cb_state_->index_buffer_binding.index_type);
John Zulauf3e86bf02020-09-12 10:47:57 -06002271 const ResourceAccessRange range = GetBufferRange(cb_state_->index_buffer_binding.offset, index_buf_state->createInfo.size,
2272 firstIndex, indexCount, index_size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07002273 auto hazard = current_context_->DetectHazard(*index_buf_state, SYNC_INDEX_INPUT_INDEX_READ, range);
locke-lunarg61870c22020-06-09 14:51:50 -06002274 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002275 skip |= sync_state_->LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002276 index_buf_state->buffer(), string_SyncHazardVUID(hazard.hazard), "%s: Hazard %s for index %s in %s. Access info %s.",
sjfricke0bea06e2022-06-05 09:22:26 +09002277 CommandTypeString(cmd_type), string_SyncHazard(hazard.hazard),
2278 sync_state_->report_data->FormatHandle(index_buf_state->buffer()).c_str(),
John Zulauf397e68b2022-04-19 11:44:07 -06002279 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer()).c_str(), FormatHazard(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06002280 }
2281
2282 // TODO: For now, we detect the whole vertex buffer. Index buffer could be changed until SubmitQueue.
2283 // We will detect more accurate range in the future.
sjfricke0bea06e2022-06-05 09:22:26 +09002284 skip |= ValidateDrawVertex(UINT32_MAX, 0, cmd_type);
locke-lunarg61870c22020-06-09 14:51:50 -06002285 return skip;
2286}
2287
John Zulauf14940722021-04-12 15:19:02 -06002288void CommandBufferAccessContext::RecordDrawVertexIndex(uint32_t indexCount, uint32_t firstIndex, const ResourceUsageTag tag) {
Jeremy Gebben9efe1cf2021-05-15 20:05:09 -06002289 if (cb_state_->index_buffer_binding.buffer_state == nullptr || cb_state_->index_buffer_binding.buffer_state->Destroyed()) return;
locke-lunarg61870c22020-06-09 14:51:50 -06002290
locke-lunarg1ae57d62020-11-18 10:49:19 -07002291 auto *index_buf_state = cb_state_->index_buffer_binding.buffer_state.get();
locke-lunarg61870c22020-06-09 14:51:50 -06002292 const auto index_size = GetIndexAlignment(cb_state_->index_buffer_binding.index_type);
John Zulauf3e86bf02020-09-12 10:47:57 -06002293 const ResourceAccessRange range = GetBufferRange(cb_state_->index_buffer_binding.offset, index_buf_state->createInfo.size,
2294 firstIndex, indexCount, index_size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07002295 current_context_->UpdateAccessState(*index_buf_state, SYNC_INDEX_INPUT_INDEX_READ, SyncOrdering::kNonAttachment, range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002296
2297 // TODO: For now, we detect the whole vertex buffer. Index buffer could be changed until SubmitQueue.
2298 // We will detect more accurate range in the future.
2299 RecordDrawVertex(UINT32_MAX, 0, tag);
2300}
2301
sjfricke0bea06e2022-06-05 09:22:26 +09002302bool CommandBufferAccessContext::ValidateDrawSubpassAttachment(CMD_TYPE cmd_type) const {
locke-lunarg7077d502020-06-18 21:37:26 -06002303 bool skip = false;
2304 if (!current_renderpass_context_) return skip;
sjfricke0bea06e2022-06-05 09:22:26 +09002305 skip |= current_renderpass_context_->ValidateDrawSubpassAttachment(GetExecutionContext(), *cb_state_.get(), cmd_type);
locke-lunarg7077d502020-06-18 21:37:26 -06002306 return skip;
locke-lunarg61870c22020-06-09 14:51:50 -06002307}
2308
John Zulauf14940722021-04-12 15:19:02 -06002309void CommandBufferAccessContext::RecordDrawSubpassAttachment(const ResourceUsageTag tag) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002310 if (current_renderpass_context_) {
John Zulauf64ffe552021-02-06 10:25:07 -07002311 current_renderpass_context_->RecordDrawSubpassAttachment(*cb_state_.get(), tag);
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002312 }
locke-lunarg61870c22020-06-09 14:51:50 -06002313}
2314
John Zulauf00119522022-05-23 19:07:42 -06002315QueueId CommandBufferAccessContext::GetQueueId() const { return QueueSyncState::kQueueIdInvalid; }
2316
sjfricke0bea06e2022-06-05 09:22:26 +09002317ResourceUsageTag CommandBufferAccessContext::RecordBeginRenderPass(CMD_TYPE cmd_type, const RENDER_PASS_STATE &rp_state,
John Zulauf41a9c7c2021-12-07 15:59:53 -07002318 const VkRect2D &render_area,
2319 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views) {
John Zulauf355e49b2020-04-24 15:11:15 -06002320 // Create an access context the current renderpass.
sjfricke0bea06e2022-06-05 09:22:26 +09002321 const auto barrier_tag = NextCommandTag(cmd_type, ResourceUsageRecord::SubcommandType::kSubpassTransition);
2322 const auto load_tag = NextSubcommandTag(cmd_type, ResourceUsageRecord::SubcommandType::kLoadOp);
John Zulauf64ffe552021-02-06 10:25:07 -07002323 render_pass_contexts_.emplace_back(rp_state, render_area, GetQueueFlags(), attachment_views, &cb_access_context_);
John Zulauf16adfc92020-04-08 10:28:33 -06002324 current_renderpass_context_ = &render_pass_contexts_.back();
John Zulauf41a9c7c2021-12-07 15:59:53 -07002325 current_renderpass_context_->RecordBeginRenderPass(barrier_tag, load_tag);
John Zulauf16adfc92020-04-08 10:28:33 -06002326 current_context_ = &current_renderpass_context_->CurrentContext();
John Zulauf41a9c7c2021-12-07 15:59:53 -07002327 return barrier_tag;
John Zulauf16adfc92020-04-08 10:28:33 -06002328}
2329
sjfricke0bea06e2022-06-05 09:22:26 +09002330ResourceUsageTag CommandBufferAccessContext::RecordNextSubpass(const CMD_TYPE cmd_type) {
John Zulauf16adfc92020-04-08 10:28:33 -06002331 assert(current_renderpass_context_);
sjfricke0bea06e2022-06-05 09:22:26 +09002332 if (!current_renderpass_context_) return NextCommandTag(cmd_type);
John Zulauf41a9c7c2021-12-07 15:59:53 -07002333
sjfricke0bea06e2022-06-05 09:22:26 +09002334 auto store_tag = NextCommandTag(cmd_type, ResourceUsageRecord::SubcommandType::kStoreOp);
2335 auto barrier_tag = NextSubcommandTag(cmd_type, ResourceUsageRecord::SubcommandType::kSubpassTransition);
2336 auto load_tag = NextSubcommandTag(cmd_type, ResourceUsageRecord::SubcommandType::kLoadOp);
John Zulauf41a9c7c2021-12-07 15:59:53 -07002337
2338 current_renderpass_context_->RecordNextSubpass(store_tag, barrier_tag, load_tag);
John Zulauf16adfc92020-04-08 10:28:33 -06002339 current_context_ = &current_renderpass_context_->CurrentContext();
John Zulauf41a9c7c2021-12-07 15:59:53 -07002340 return barrier_tag;
John Zulauf16adfc92020-04-08 10:28:33 -06002341}
2342
sjfricke0bea06e2022-06-05 09:22:26 +09002343ResourceUsageTag CommandBufferAccessContext::RecordEndRenderPass(const CMD_TYPE cmd_type) {
John Zulauf16adfc92020-04-08 10:28:33 -06002344 assert(current_renderpass_context_);
sjfricke0bea06e2022-06-05 09:22:26 +09002345 if (!current_renderpass_context_) return NextCommandTag(cmd_type);
John Zulauf16adfc92020-04-08 10:28:33 -06002346
sjfricke0bea06e2022-06-05 09:22:26 +09002347 auto store_tag = NextCommandTag(cmd_type, ResourceUsageRecord::SubcommandType::kStoreOp);
2348 auto barrier_tag = NextSubcommandTag(cmd_type, ResourceUsageRecord::SubcommandType::kSubpassTransition);
John Zulauf41a9c7c2021-12-07 15:59:53 -07002349
2350 current_renderpass_context_->RecordEndRenderPass(&cb_access_context_, store_tag, barrier_tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002351 current_context_ = &cb_access_context_;
John Zulauf16adfc92020-04-08 10:28:33 -06002352 current_renderpass_context_ = nullptr;
John Zulauf41a9c7c2021-12-07 15:59:53 -07002353 return barrier_tag;
John Zulauf16adfc92020-04-08 10:28:33 -06002354}
2355
John Zulauf4a6105a2020-11-17 15:11:05 -07002356void CommandBufferAccessContext::RecordDestroyEvent(VkEvent event) {
2357 // Erase is okay with the key not being
Jeremy Gebbenf4449392022-01-28 10:09:10 -07002358 auto event_state = sync_state_->Get<EVENT_STATE>(event);
John Zulauf669dfd52021-01-27 17:15:28 -07002359 if (event_state) {
Jeremy Gebben9f537102021-10-05 16:37:12 -06002360 GetCurrentEventsContext()->Destroy(event_state.get());
John Zulaufd5115702021-01-18 12:34:33 -07002361 }
2362}
2363
John Zulaufae842002021-04-15 18:20:55 -06002364// The is the recorded cb context
John Zulaufbb890452021-12-14 11:30:18 -07002365bool CommandBufferAccessContext::ValidateFirstUse(CommandExecutionContext *proxy_context, const char *func_name,
John Zulauf4fa68462021-04-26 21:04:22 -06002366 uint32_t index) const {
2367 assert(proxy_context);
John Zulauf00119522022-05-23 19:07:42 -06002368 SyncEventsContext *const events_context = proxy_context->GetCurrentEventsContext();
2369 AccessContext *const access_context = proxy_context->GetCurrentAccessContext();
2370 const QueueId queue_id = proxy_context->GetQueueId();
John Zulauf4fa68462021-04-26 21:04:22 -06002371 const ResourceUsageTag base_tag = proxy_context->GetTagLimit();
John Zulaufae842002021-04-15 18:20:55 -06002372 bool skip = false;
2373 ResourceUsageRange tag_range = {0, 0};
2374 const AccessContext *recorded_context = GetCurrentAccessContext();
2375 assert(recorded_context);
2376 HazardResult hazard;
John Zulaufbb890452021-12-14 11:30:18 -07002377 auto log_msg = [this](const HazardResult &hazard, const CommandExecutionContext &exec_context, const char *func_name,
John Zulaufae842002021-04-15 18:20:55 -06002378 uint32_t index) {
John Zulaufbb890452021-12-14 11:30:18 -07002379 const auto handle = exec_context.Handle();
John Zulaufae842002021-04-15 18:20:55 -06002380 const auto recorded_handle = cb_state_->commandBuffer();
John Zulauf4fa68462021-04-26 21:04:22 -06002381 const auto *report_data = sync_state_->report_data;
John Zulaufbb890452021-12-14 11:30:18 -07002382 return sync_state_->LogError(handle, string_SyncHazardVUID(hazard.hazard),
John Zulauf4fa68462021-04-26 21:04:22 -06002383 "%s: Hazard %s for entry %" PRIu32 ", %s, Recorded access info %s. Access info %s.", func_name,
2384 string_SyncHazard(hazard.hazard), index, report_data->FormatHandle(recorded_handle).c_str(),
John Zulauf397e68b2022-04-19 11:44:07 -06002385 FormatUsage(*hazard.recorded_access).c_str(), exec_context.FormatHazard(hazard).c_str());
John Zulaufae842002021-04-15 18:20:55 -06002386 };
John Zulaufbb890452021-12-14 11:30:18 -07002387 const ReplayTrackbackBarriersAction *replay_context = nullptr;
John Zulaufae842002021-04-15 18:20:55 -06002388 for (const auto &sync_op : sync_ops_) {
John Zulauf4fa68462021-04-26 21:04:22 -06002389 // we update the range to any include layout transition first use writes,
2390 // as they are stored along with the source scope (as effective barrier) when recorded
2391 tag_range.end = sync_op.tag + 1;
John Zulauf610e28c2021-08-03 17:46:23 -06002392 skip |= sync_op.sync_op->ReplayValidate(sync_op.tag, *this, base_tag, proxy_context);
John Zulauf4fa68462021-04-26 21:04:22 -06002393
John Zulaufec943ec2022-06-29 07:52:56 -06002394 hazard = recorded_context->DetectFirstUseHazard(queue_id, tag_range, *access_context, replay_context);
John Zulaufae842002021-04-15 18:20:55 -06002395 if (hazard.hazard) {
John Zulauf4fa68462021-04-26 21:04:22 -06002396 skip |= log_msg(hazard, *proxy_context, func_name, index);
John Zulaufae842002021-04-15 18:20:55 -06002397 }
2398 // NOTE: Add call to replay validate here when we add support for syncop with non-trivial replay
John Zulauf4fa68462021-04-26 21:04:22 -06002399 // Record the barrier into the proxy context.
John Zulauf00119522022-05-23 19:07:42 -06002400 sync_op.sync_op->ReplayRecord(queue_id, base_tag + sync_op.tag, access_context, events_context);
John Zulaufbb890452021-12-14 11:30:18 -07002401 replay_context = sync_op.sync_op->GetReplayTrackback();
John Zulauf4fa68462021-04-26 21:04:22 -06002402 tag_range.begin = tag_range.end;
John Zulaufae842002021-04-15 18:20:55 -06002403 }
2404
John Zulaufbb890452021-12-14 11:30:18 -07002405 // Renderpasses may not cross command buffer boundaries
2406 assert(replay_context == nullptr);
2407
John Zulaufae842002021-04-15 18:20:55 -06002408 // and anything after the last syncop
John Zulaufae842002021-04-15 18:20:55 -06002409 tag_range.end = ResourceUsageRecord::kMaxIndex;
John Zulaufec943ec2022-06-29 07:52:56 -06002410 hazard = recorded_context->DetectFirstUseHazard(queue_id, tag_range, *access_context, replay_context);
John Zulaufae842002021-04-15 18:20:55 -06002411 if (hazard.hazard) {
John Zulauf4fa68462021-04-26 21:04:22 -06002412 skip |= log_msg(hazard, *proxy_context, func_name, index);
John Zulaufae842002021-04-15 18:20:55 -06002413 }
2414
2415 return skip;
2416}
2417
sjfricke0bea06e2022-06-05 09:22:26 +09002418void CommandBufferAccessContext::RecordExecutedCommandBuffer(const CommandBufferAccessContext &recorded_cb_context) {
John Zulauf00119522022-05-23 19:07:42 -06002419 SyncEventsContext *const events_context = GetCurrentEventsContext();
2420 AccessContext *const access_context = GetCurrentAccessContext();
2421 const QueueId queue_id = GetQueueId();
2422
John Zulauf4fa68462021-04-26 21:04:22 -06002423 const AccessContext *recorded_context = recorded_cb_context.GetCurrentAccessContext();
2424 assert(recorded_context);
2425
2426 // Just run through the barriers ignoring the usage from the recorded context, as Resolve will overwrite outdated state
2427 const ResourceUsageTag base_tag = GetTagLimit();
John Zulauf06f6f1e2022-04-19 15:28:11 -06002428 for (const auto &sync_op : recorded_cb_context.GetSyncOps()) {
John Zulauf4fa68462021-04-26 21:04:22 -06002429 // we update the range to any include layout transition first use writes,
2430 // as they are stored along with the source scope (as effective barrier) when recorded
John Zulauf00119522022-05-23 19:07:42 -06002431 sync_op.sync_op->ReplayRecord(queue_id, base_tag + sync_op.tag, access_context, events_context);
John Zulauf4fa68462021-04-26 21:04:22 -06002432 }
2433
2434 ResourceUsageRange tag_range = ImportRecordedAccessLog(recorded_cb_context);
2435 assert(base_tag == tag_range.begin); // to ensure the to offset calculation agree
John Zulauf1d5f9c12022-05-13 14:51:08 -06002436 ResolveExecutedCommandBuffer(*recorded_context, tag_range.begin);
John Zulauf4fa68462021-04-26 21:04:22 -06002437}
2438
John Zulauf1d5f9c12022-05-13 14:51:08 -06002439void CommandBufferAccessContext::ResolveExecutedCommandBuffer(const AccessContext &recorded_context, ResourceUsageTag offset) {
John Zulauf4fa68462021-04-26 21:04:22 -06002440 auto tag_offset = [offset](ResourceAccessState *access) { access->OffsetTag(offset); };
John Zulauf1d5f9c12022-05-13 14:51:08 -06002441 GetCurrentAccessContext()->ResolveFromContext(tag_offset, recorded_context);
John Zulauf4fa68462021-04-26 21:04:22 -06002442}
2443
John Zulauf3c788ef2022-02-22 12:12:30 -07002444ResourceUsageRange CommandExecutionContext::ImportRecordedAccessLog(const CommandBufferAccessContext &recorded_context) {
John Zulauf4fa68462021-04-26 21:04:22 -06002445 // The execution references ensure lifespan for the referenced child CB's...
2446 ResourceUsageRange tag_range(GetTagLimit(), 0);
John Zulauf3c788ef2022-02-22 12:12:30 -07002447 InsertRecordedAccessLogEntries(recorded_context);
2448 tag_range.end = GetTagLimit();
John Zulauf4fa68462021-04-26 21:04:22 -06002449 return tag_range;
2450}
2451
John Zulauf3c788ef2022-02-22 12:12:30 -07002452void CommandBufferAccessContext::InsertRecordedAccessLogEntries(const CommandBufferAccessContext &recorded_context) {
2453 cbs_referenced_.emplace(recorded_context.GetCBStateShared());
2454 access_log_.insert(access_log_.end(), recorded_context.access_log_.cbegin(), recorded_context.access_log_.end());
2455}
2456
John Zulauf41a9c7c2021-12-07 15:59:53 -07002457ResourceUsageTag CommandBufferAccessContext::NextSubcommandTag(CMD_TYPE command, ResourceUsageRecord::SubcommandType subcommand) {
2458 ResourceUsageTag next = access_log_.size();
2459 access_log_.emplace_back(command, command_number_, subcommand, ++subcommand_number_, cb_state_.get(), reset_count_);
2460 return next;
2461}
2462
2463ResourceUsageTag CommandBufferAccessContext::NextCommandTag(CMD_TYPE command, ResourceUsageRecord::SubcommandType subcommand) {
2464 command_number_++;
2465 subcommand_number_ = 0;
2466 ResourceUsageTag next = access_log_.size();
2467 access_log_.emplace_back(command, command_number_, subcommand, subcommand_number_, cb_state_.get(), reset_count_);
2468 return next;
2469}
2470
2471ResourceUsageTag CommandBufferAccessContext::NextIndexedCommandTag(CMD_TYPE command, uint32_t index) {
2472 if (index == 0) {
2473 return NextCommandTag(command, ResourceUsageRecord::SubcommandType::kIndex);
2474 }
2475 return NextSubcommandTag(command, ResourceUsageRecord::SubcommandType::kIndex);
2476}
2477
John Zulaufbb890452021-12-14 11:30:18 -07002478void CommandBufferAccessContext::RecordSyncOp(SyncOpPointer &&sync_op) {
2479 auto tag = sync_op->Record(this);
2480 // As renderpass operations can have side effects on the command buffer access context,
2481 // update the sync operation to record these if any.
2482 if (current_renderpass_context_) {
2483 const auto &rpc = *current_renderpass_context_;
2484 sync_op->SetReplayContext(rpc.GetCurrentSubpass(), rpc.GetReplayContext());
2485 }
2486 sync_ops_.emplace_back(tag, std::move(sync_op));
2487}
2488
John Zulaufae842002021-04-15 18:20:55 -06002489class HazardDetectFirstUse {
2490 public:
John Zulaufec943ec2022-06-29 07:52:56 -06002491 HazardDetectFirstUse(const ResourceAccessState &recorded_use, QueueId queue_id, const ResourceUsageRange &tag_range,
John Zulaufbb890452021-12-14 11:30:18 -07002492 const ReplayTrackbackBarriersAction *replay_barrier)
John Zulaufec943ec2022-06-29 07:52:56 -06002493 : recorded_use_(recorded_use), queue_id_(queue_id), tag_range_(tag_range), replay_barrier_(replay_barrier) {}
John Zulaufae842002021-04-15 18:20:55 -06002494 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
John Zulaufbb890452021-12-14 11:30:18 -07002495 if (replay_barrier_) {
2496 // Intentional copy to apply the replay barrier
2497 auto access = pos->second;
2498 (*replay_barrier_)(&access);
John Zulaufec943ec2022-06-29 07:52:56 -06002499 return access.DetectHazard(recorded_use_, queue_id_, tag_range_);
John Zulaufbb890452021-12-14 11:30:18 -07002500 }
John Zulaufec943ec2022-06-29 07:52:56 -06002501 return pos->second.DetectHazard(recorded_use_, queue_id_, tag_range_);
John Zulaufae842002021-04-15 18:20:55 -06002502 }
2503 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, ResourceUsageTag start_tag) const {
2504 return pos->second.DetectAsyncHazard(recorded_use_, tag_range_, start_tag);
2505 }
2506
2507 private:
2508 const ResourceAccessState &recorded_use_;
John Zulaufec943ec2022-06-29 07:52:56 -06002509 const QueueId queue_id_;
John Zulaufae842002021-04-15 18:20:55 -06002510 const ResourceUsageRange &tag_range_;
John Zulaufbb890452021-12-14 11:30:18 -07002511 const ReplayTrackbackBarriersAction *replay_barrier_;
John Zulaufae842002021-04-15 18:20:55 -06002512};
2513
2514// This is called with the *recorded* command buffers access context, with the *active* access context pass in, againsts which
2515// hazards will be detected
John Zulaufec943ec2022-06-29 07:52:56 -06002516HazardResult AccessContext::DetectFirstUseHazard(QueueId queue_id, const ResourceUsageRange &tag_range,
2517 const AccessContext &access_context,
John Zulaufbb890452021-12-14 11:30:18 -07002518 const ReplayTrackbackBarriersAction *replay_barrier) const {
John Zulaufae842002021-04-15 18:20:55 -06002519 HazardResult hazard;
2520 for (const auto address_type : kAddressTypes) {
2521 const auto &recorded_access_map = GetAccessStateMap(address_type);
2522 for (const auto &recorded_access : recorded_access_map) {
2523 // Cull any entries not in the current tag range
2524 if (!recorded_access.second.FirstAccessInTagRange(tag_range)) continue;
John Zulaufec943ec2022-06-29 07:52:56 -06002525 HazardDetectFirstUse detector(recorded_access.second, queue_id, tag_range, replay_barrier);
John Zulaufae842002021-04-15 18:20:55 -06002526 hazard = access_context.DetectHazard(address_type, detector, recorded_access.first, DetectOptions::kDetectAll);
2527 if (hazard.hazard) break;
2528 }
2529 }
2530
2531 return hazard;
2532}
2533
John Zulaufbb890452021-12-14 11:30:18 -07002534bool RenderPassAccessContext::ValidateDrawSubpassAttachment(const CommandExecutionContext &exec_context,
sjfricke0bea06e2022-06-05 09:22:26 +09002535 const CMD_BUFFER_STATE &cmd_buffer, CMD_TYPE cmd_type) const {
locke-lunarg61870c22020-06-09 14:51:50 -06002536 bool skip = false;
John Zulaufbb890452021-12-14 11:30:18 -07002537 const auto &sync_state = exec_context.GetSyncState();
sjfricke0bea06e2022-06-05 09:22:26 +09002538 const auto *pipe = cmd_buffer.GetCurrentPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS);
Jeremy Gebben11af9792021-08-20 10:20:09 -06002539 if (!pipe) {
2540 return skip;
2541 }
2542
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002543 const auto raster_state = pipe->RasterizationState();
2544 if (raster_state && raster_state->rasterizerDiscardEnable) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002545 return skip;
2546 }
sjfricke0bea06e2022-06-05 09:22:26 +09002547 const char *caller_name = CommandTypeString(cmd_type);
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002548 const auto &list = pipe->fragmentShader_writable_output_location_list;
locke-lunarg61870c22020-06-09 14:51:50 -06002549 const auto &subpass = rp_state_->createInfo.pSubpasses[current_subpass_];
locke-lunarg37047832020-06-12 13:44:45 -06002550
John Zulauf1a224292020-06-30 14:52:13 -06002551 const auto &current_context = CurrentContext();
locke-lunarg44f9bb12020-06-10 14:43:57 -06002552 // Subpass's inputAttachment has been done in ValidateDispatchDrawDescriptorSet
locke-lunarg96dc9632020-06-10 17:22:18 -06002553 if (subpass.pColorAttachments && subpass.colorAttachmentCount && !list.empty()) {
2554 for (const auto location : list) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002555 if (location >= subpass.colorAttachmentCount ||
2556 subpass.pColorAttachments[location].attachment == VK_ATTACHMENT_UNUSED) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002557 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002558 }
John Zulaufd0ec59f2021-03-13 14:25:08 -07002559 const AttachmentViewGen &view_gen = attachment_views_[subpass.pColorAttachments[location].attachment];
2560 if (!view_gen.IsValid()) continue;
2561 HazardResult hazard =
2562 current_context.DetectHazard(view_gen, AttachmentViewGen::Gen::kRenderArea,
2563 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kColorAttachment);
locke-lunarg96dc9632020-06-10 17:22:18 -06002564 if (hazard.hazard) {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002565 const VkImageView view_handle = view_gen.GetViewState()->image_view();
John Zulaufd0ec59f2021-03-13 14:25:08 -07002566 skip |= sync_state.LogError(view_handle, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06002567 "%s: Hazard %s for %s in %s, Subpass #%d, and pColorAttachments #%d. Access info %s.",
sjfricke0bea06e2022-06-05 09:22:26 +09002568 caller_name, string_SyncHazard(hazard.hazard),
John Zulaufd0ec59f2021-03-13 14:25:08 -07002569 sync_state.report_data->FormatHandle(view_handle).c_str(),
sjfricke0bea06e2022-06-05 09:22:26 +09002570 sync_state.report_data->FormatHandle(cmd_buffer.commandBuffer()).c_str(),
2571 cmd_buffer.activeSubpass, location, exec_context.FormatHazard(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06002572 }
2573 }
2574 }
locke-lunarg37047832020-06-12 13:44:45 -06002575
2576 // PHASE1 TODO: Add layout based read/vs. write selection.
2577 // PHASE1 TODO: Read operations for both depth and stencil are possible in the future.
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002578 const auto ds_state = pipe->DepthStencilState();
2579 const uint32_t depth_stencil_attachment = GetSubpassDepthStencilAttachmentIndex(ds_state, subpass.pDepthStencilAttachment);
John Zulaufd0ec59f2021-03-13 14:25:08 -07002580
2581 if ((depth_stencil_attachment != VK_ATTACHMENT_UNUSED) && attachment_views_[depth_stencil_attachment].IsValid()) {
2582 const AttachmentViewGen &view_gen = attachment_views_[depth_stencil_attachment];
2583 const IMAGE_VIEW_STATE &view_state = *view_gen.GetViewState();
locke-lunarg37047832020-06-12 13:44:45 -06002584 bool depth_write = false, stencil_write = false;
2585
2586 // PHASE1 TODO: These validation should be in core_checks.
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002587 if (!FormatIsStencilOnly(view_state.create_info.format) && ds_state->depthTestEnable && ds_state->depthWriteEnable &&
locke-lunarg37047832020-06-12 13:44:45 -06002588 IsImageLayoutDepthWritable(subpass.pDepthStencilAttachment->layout)) {
2589 depth_write = true;
2590 }
2591 // PHASE1 TODO: It needs to check if stencil is writable.
2592 // If failOp, passOp, or depthFailOp are not KEEP, and writeMask isn't 0, it's writable.
2593 // If depth test is disable, it's considered depth test passes, and then depthFailOp doesn't run.
2594 // PHASE1 TODO: These validation should be in core_checks.
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002595 if (!FormatIsDepthOnly(view_state.create_info.format) && ds_state->stencilTestEnable &&
locke-lunarg37047832020-06-12 13:44:45 -06002596 IsImageLayoutStencilWritable(subpass.pDepthStencilAttachment->layout)) {
2597 stencil_write = true;
2598 }
2599
2600 // PHASE1 TODO: Add EARLY stage detection based on ExecutionMode.
2601 if (depth_write) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07002602 HazardResult hazard = current_context.DetectHazard(view_gen, AttachmentViewGen::Gen::kDepthOnlyRenderArea,
2603 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE,
2604 SyncOrdering::kDepthStencilAttachment);
locke-lunarg37047832020-06-12 13:44:45 -06002605 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002606 skip |= sync_state.LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002607 view_state.image_view(), string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06002608 "%s: Hazard %s for %s in %s, Subpass #%d, and depth part of pDepthStencilAttachment. Access info %s.",
sjfricke0bea06e2022-06-05 09:22:26 +09002609 caller_name, string_SyncHazard(hazard.hazard),
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002610 sync_state.report_data->FormatHandle(view_state.image_view()).c_str(),
sjfricke0bea06e2022-06-05 09:22:26 +09002611 sync_state.report_data->FormatHandle(cmd_buffer.commandBuffer()).c_str(), cmd_buffer.activeSubpass,
John Zulauf397e68b2022-04-19 11:44:07 -06002612 exec_context.FormatHazard(hazard).c_str());
locke-lunarg37047832020-06-12 13:44:45 -06002613 }
2614 }
2615 if (stencil_write) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07002616 HazardResult hazard = current_context.DetectHazard(view_gen, AttachmentViewGen::Gen::kStencilOnlyRenderArea,
2617 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE,
2618 SyncOrdering::kDepthStencilAttachment);
locke-lunarg37047832020-06-12 13:44:45 -06002619 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002620 skip |= sync_state.LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002621 view_state.image_view(), string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06002622 "%s: Hazard %s for %s in %s, Subpass #%d, and stencil part of pDepthStencilAttachment. Access info %s.",
sjfricke0bea06e2022-06-05 09:22:26 +09002623 caller_name, string_SyncHazard(hazard.hazard),
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002624 sync_state.report_data->FormatHandle(view_state.image_view()).c_str(),
sjfricke0bea06e2022-06-05 09:22:26 +09002625 sync_state.report_data->FormatHandle(cmd_buffer.commandBuffer()).c_str(), cmd_buffer.activeSubpass,
John Zulauf397e68b2022-04-19 11:44:07 -06002626 exec_context.FormatHazard(hazard).c_str());
locke-lunarg37047832020-06-12 13:44:45 -06002627 }
locke-lunarg61870c22020-06-09 14:51:50 -06002628 }
2629 }
2630 return skip;
2631}
2632
sjfricke0bea06e2022-06-05 09:22:26 +09002633void RenderPassAccessContext::RecordDrawSubpassAttachment(const CMD_BUFFER_STATE &cmd_buffer, const ResourceUsageTag tag) {
2634 const auto *pipe = cmd_buffer.GetCurrentPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS);
Jeremy Gebben11af9792021-08-20 10:20:09 -06002635 if (!pipe) {
2636 return;
2637 }
2638
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002639 const auto *raster_state = pipe->RasterizationState();
2640 if (raster_state && raster_state->rasterizerDiscardEnable) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002641 return;
2642 }
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002643 const auto &list = pipe->fragmentShader_writable_output_location_list;
locke-lunarg61870c22020-06-09 14:51:50 -06002644 const auto &subpass = rp_state_->createInfo.pSubpasses[current_subpass_];
locke-lunarg61870c22020-06-09 14:51:50 -06002645
John Zulauf1a224292020-06-30 14:52:13 -06002646 auto &current_context = CurrentContext();
locke-lunarg44f9bb12020-06-10 14:43:57 -06002647 // Subpass's inputAttachment has been done in RecordDispatchDrawDescriptorSet
locke-lunarg96dc9632020-06-10 17:22:18 -06002648 if (subpass.pColorAttachments && subpass.colorAttachmentCount && !list.empty()) {
2649 for (const auto location : list) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002650 if (location >= subpass.colorAttachmentCount ||
2651 subpass.pColorAttachments[location].attachment == VK_ATTACHMENT_UNUSED) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002652 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002653 }
John Zulaufd0ec59f2021-03-13 14:25:08 -07002654 const AttachmentViewGen &view_gen = attachment_views_[subpass.pColorAttachments[location].attachment];
2655 current_context.UpdateAccessState(view_gen, AttachmentViewGen::Gen::kRenderArea,
2656 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kColorAttachment,
2657 tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002658 }
2659 }
locke-lunarg37047832020-06-12 13:44:45 -06002660
2661 // PHASE1 TODO: Add layout based read/vs. write selection.
2662 // PHASE1 TODO: Read operations for both depth and stencil are possible in the future.
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002663 const auto *ds_state = pipe->DepthStencilState();
2664 const uint32_t depth_stencil_attachment = GetSubpassDepthStencilAttachmentIndex(ds_state, subpass.pDepthStencilAttachment);
John Zulaufd0ec59f2021-03-13 14:25:08 -07002665 if ((depth_stencil_attachment != VK_ATTACHMENT_UNUSED) && attachment_views_[depth_stencil_attachment].IsValid()) {
2666 const AttachmentViewGen &view_gen = attachment_views_[depth_stencil_attachment];
2667 const IMAGE_VIEW_STATE &view_state = *view_gen.GetViewState();
locke-lunarg37047832020-06-12 13:44:45 -06002668 bool depth_write = false, stencil_write = false;
John Zulaufd0ec59f2021-03-13 14:25:08 -07002669 const bool has_depth = 0 != (view_state.normalized_subresource_range.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT);
2670 const bool has_stencil = 0 != (view_state.normalized_subresource_range.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT);
locke-lunarg37047832020-06-12 13:44:45 -06002671
2672 // PHASE1 TODO: These validation should be in core_checks.
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002673 if (has_depth && !FormatIsStencilOnly(view_state.create_info.format) && ds_state->depthTestEnable &&
2674 ds_state->depthWriteEnable && IsImageLayoutDepthWritable(subpass.pDepthStencilAttachment->layout)) {
locke-lunarg37047832020-06-12 13:44:45 -06002675 depth_write = true;
2676 }
2677 // PHASE1 TODO: It needs to check if stencil is writable.
2678 // If failOp, passOp, or depthFailOp are not KEEP, and writeMask isn't 0, it's writable.
2679 // If depth test is disable, it's considered depth test passes, and then depthFailOp doesn't run.
2680 // PHASE1 TODO: These validation should be in core_checks.
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002681 if (has_stencil && !FormatIsDepthOnly(view_state.create_info.format) && ds_state->stencilTestEnable &&
locke-lunarg37047832020-06-12 13:44:45 -06002682 IsImageLayoutStencilWritable(subpass.pDepthStencilAttachment->layout)) {
2683 stencil_write = true;
2684 }
2685
John Zulaufd0ec59f2021-03-13 14:25:08 -07002686 if (depth_write || stencil_write) {
2687 const auto ds_gentype = view_gen.GetDepthStencilRenderAreaGenType(depth_write, stencil_write);
2688 // PHASE1 TODO: Add EARLY stage detection based on ExecutionMode.
2689 current_context.UpdateAccessState(view_gen, ds_gentype, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE,
2690 SyncOrdering::kDepthStencilAttachment, tag);
locke-lunarg37047832020-06-12 13:44:45 -06002691 }
locke-lunarg61870c22020-06-09 14:51:50 -06002692 }
2693}
2694
sjfricke0bea06e2022-06-05 09:22:26 +09002695bool RenderPassAccessContext::ValidateNextSubpass(const CommandExecutionContext &exec_context, CMD_TYPE cmd_type) const {
John Zulaufaff20662020-06-01 14:07:58 -06002696 // PHASE1 TODO: Add Validate Preserve attachments
John Zulauf355e49b2020-04-24 15:11:15 -06002697 bool skip = false;
sjfricke0bea06e2022-06-05 09:22:26 +09002698 skip |= CurrentContext().ValidateResolveOperations(exec_context, *rp_state_, render_area_, attachment_views_, cmd_type,
John Zulaufb027cdb2020-05-21 14:25:22 -06002699 current_subpass_);
John Zulaufbb890452021-12-14 11:30:18 -07002700 skip |= CurrentContext().ValidateStoreOperation(exec_context, *rp_state_, render_area_, current_subpass_, attachment_views_,
sjfricke0bea06e2022-06-05 09:22:26 +09002701 cmd_type);
John Zulaufaff20662020-06-01 14:07:58 -06002702
John Zulauf355e49b2020-04-24 15:11:15 -06002703 const auto next_subpass = current_subpass_ + 1;
ziga-lunarg31a3e772022-03-22 11:48:46 +01002704 if (next_subpass >= subpass_contexts_.size()) {
2705 return skip;
2706 }
John Zulauf1507ee42020-05-18 11:33:09 -06002707 const auto &next_context = subpass_contexts_[next_subpass];
John Zulauf64ffe552021-02-06 10:25:07 -07002708 skip |=
sjfricke0bea06e2022-06-05 09:22:26 +09002709 next_context.ValidateLayoutTransitions(exec_context, *rp_state_, render_area_, next_subpass, attachment_views_, cmd_type);
John Zulaufb02c1eb2020-10-06 16:33:36 -06002710 if (!skip) {
2711 // To avoid complex (and buggy) duplication of the affect of layout transitions on load operations, we'll record them
2712 // on a copy of the (empty) next context.
2713 // Note: The resource access map should be empty so hopefully this copy isn't too horrible from a perf POV.
2714 AccessContext temp_context(next_context);
John Zulaufee984022022-04-13 16:39:50 -06002715 temp_context.RecordLayoutTransitions(*rp_state_, next_subpass, attachment_views_, kInvalidTag);
John Zulauf64ffe552021-02-06 10:25:07 -07002716 skip |=
sjfricke0bea06e2022-06-05 09:22:26 +09002717 temp_context.ValidateLoadOperation(exec_context, *rp_state_, render_area_, next_subpass, attachment_views_, cmd_type);
John Zulaufb02c1eb2020-10-06 16:33:36 -06002718 }
John Zulauf7635de32020-05-29 17:14:15 -06002719 return skip;
2720}
sjfricke0bea06e2022-06-05 09:22:26 +09002721bool RenderPassAccessContext::ValidateEndRenderPass(const CommandExecutionContext &exec_context, CMD_TYPE cmd_type) const {
John Zulaufaff20662020-06-01 14:07:58 -06002722 // PHASE1 TODO: Validate Preserve
John Zulauf7635de32020-05-29 17:14:15 -06002723 bool skip = false;
sjfricke0bea06e2022-06-05 09:22:26 +09002724 skip |= CurrentContext().ValidateResolveOperations(exec_context, *rp_state_, render_area_, attachment_views_, cmd_type,
John Zulauf7635de32020-05-29 17:14:15 -06002725 current_subpass_);
sjfricke0bea06e2022-06-05 09:22:26 +09002726 skip |= CurrentContext().ValidateStoreOperation(exec_context, *rp_state_, render_area_, current_subpass_, attachment_views_,
2727 cmd_type);
2728 skip |= ValidateFinalSubpassLayoutTransitions(exec_context, cmd_type);
John Zulauf355e49b2020-04-24 15:11:15 -06002729 return skip;
2730}
2731
John Zulauf64ffe552021-02-06 10:25:07 -07002732AccessContext *RenderPassAccessContext::CreateStoreResolveProxy() const {
John Zulaufd0ec59f2021-03-13 14:25:08 -07002733 return CreateStoreResolveProxyContext(CurrentContext(), *rp_state_, current_subpass_, attachment_views_);
John Zulauf7635de32020-05-29 17:14:15 -06002734}
2735
John Zulaufbb890452021-12-14 11:30:18 -07002736bool RenderPassAccessContext::ValidateFinalSubpassLayoutTransitions(const CommandExecutionContext &exec_context,
sjfricke0bea06e2022-06-05 09:22:26 +09002737 CMD_TYPE cmd_type) const {
John Zulauf355e49b2020-04-24 15:11:15 -06002738 bool skip = false;
2739
John Zulauf7635de32020-05-29 17:14:15 -06002740 // As validation methods are const and precede the record/update phase, for any tranistions from the current (last)
2741 // subpass, we have to validate them against a copy of the current AccessContext, with resolve operations applied.
2742 // Note: we could be more efficient by tracking whether or not we actually *have* any changes (e.g. attachment resolve)
2743 // to apply and only copy then, if this proves a hot spot.
2744 std::unique_ptr<AccessContext> proxy_for_current;
2745
John Zulauf355e49b2020-04-24 15:11:15 -06002746 // Validate the "finalLayout" transitions to external
2747 // Get them from where there we're hidding in the extra entry.
2748 const auto &final_transitions = rp_state_->subpass_transitions.back();
2749 for (const auto &transition : final_transitions) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07002750 const auto &view_gen = attachment_views_[transition.attachment];
John Zulauf355e49b2020-04-24 15:11:15 -06002751 const auto &trackback = subpass_contexts_[transition.prev_pass].GetDstExternalTrackBack();
John Zulaufbb890452021-12-14 11:30:18 -07002752 assert(trackback.source_subpass); // Transitions are given implicit transitions if the StateTracker is working correctly
2753 auto *context = trackback.source_subpass;
John Zulauf7635de32020-05-29 17:14:15 -06002754
2755 if (transition.prev_pass == current_subpass_) {
2756 if (!proxy_for_current) {
2757 // We haven't recorded resolve ofor the current_subpass, so we need to copy current and update it *as if*
John Zulauf64ffe552021-02-06 10:25:07 -07002758 proxy_for_current.reset(CreateStoreResolveProxy());
John Zulauf7635de32020-05-29 17:14:15 -06002759 }
2760 context = proxy_for_current.get();
2761 }
2762
John Zulaufa0a98292020-09-18 09:30:10 -06002763 // Use the merged barrier for the hazard check (safe since it just considers the src (first) scope.
2764 const auto merged_barrier = MergeBarriers(trackback.barriers);
John Zulaufd0ec59f2021-03-13 14:25:08 -07002765 auto hazard = context->DetectImageBarrierHazard(view_gen, merged_barrier, AccessContext::DetectOptions::kDetectPrevious);
John Zulauf355e49b2020-04-24 15:11:15 -06002766 if (hazard.hazard) {
sjfricke0bea06e2022-06-05 09:22:26 +09002767 const char *func_name = CommandTypeString(cmd_type);
John Zulaufee984022022-04-13 16:39:50 -06002768 if (hazard.tag == kInvalidTag) {
2769 // Hazard vs. ILT
John Zulaufbb890452021-12-14 11:30:18 -07002770 skip |= exec_context.GetSyncState().LogError(
John Zulaufee984022022-04-13 16:39:50 -06002771 rp_state_->renderPass(), string_SyncHazardVUID(hazard.hazard),
2772 "%s: Hazard %s vs. store/resolve operations in subpass %" PRIu32 " for attachment %" PRIu32
2773 " final image layout transition (old_layout: %s, new_layout: %s).",
2774 func_name, string_SyncHazard(hazard.hazard), transition.prev_pass, transition.attachment,
2775 string_VkImageLayout(transition.old_layout), string_VkImageLayout(transition.new_layout));
2776 } else {
John Zulaufbb890452021-12-14 11:30:18 -07002777 skip |= exec_context.GetSyncState().LogError(
John Zulaufee984022022-04-13 16:39:50 -06002778 rp_state_->renderPass(), string_SyncHazardVUID(hazard.hazard),
2779 "%s: Hazard %s with last use subpass %" PRIu32 " for attachment %" PRIu32
2780 " final image layout transition (old_layout: %s, new_layout: %s). Access info %s.",
2781 func_name, string_SyncHazard(hazard.hazard), transition.prev_pass, transition.attachment,
2782 string_VkImageLayout(transition.old_layout), string_VkImageLayout(transition.new_layout),
John Zulauf397e68b2022-04-19 11:44:07 -06002783 exec_context.FormatHazard(hazard).c_str());
John Zulaufee984022022-04-13 16:39:50 -06002784 }
John Zulauf355e49b2020-04-24 15:11:15 -06002785 }
2786 }
2787 return skip;
2788}
2789
John Zulauf14940722021-04-12 15:19:02 -06002790void RenderPassAccessContext::RecordLayoutTransitions(const ResourceUsageTag tag) {
John Zulauf355e49b2020-04-24 15:11:15 -06002791 // Add layout transitions...
John Zulaufb02c1eb2020-10-06 16:33:36 -06002792 subpass_contexts_[current_subpass_].RecordLayoutTransitions(*rp_state_, current_subpass_, attachment_views_, tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002793}
2794
John Zulauf14940722021-04-12 15:19:02 -06002795void RenderPassAccessContext::RecordLoadOperations(const ResourceUsageTag tag) {
John Zulauf1507ee42020-05-18 11:33:09 -06002796 const auto *attachment_ci = rp_state_->createInfo.pAttachments;
2797 auto &subpass_context = subpass_contexts_[current_subpass_];
John Zulauf1507ee42020-05-18 11:33:09 -06002798
2799 for (uint32_t i = 0; i < rp_state_->createInfo.attachmentCount; i++) {
2800 if (rp_state_->attachment_first_subpass[i] == current_subpass_) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07002801 const AttachmentViewGen &view_gen = attachment_views_[i];
2802 if (!view_gen.IsValid()) continue; // UNUSED
John Zulauf1507ee42020-05-18 11:33:09 -06002803
2804 const auto &ci = attachment_ci[i];
2805 const bool has_depth = FormatHasDepth(ci.format);
John Zulaufb027cdb2020-05-21 14:25:22 -06002806 const bool has_stencil = FormatHasStencil(ci.format);
John Zulauf1507ee42020-05-18 11:33:09 -06002807 const bool is_color = !(has_depth || has_stencil);
2808
2809 if (is_color) {
John Zulauf57261402021-08-13 11:32:06 -06002810 const SyncStageAccessIndex load_op = ColorLoadUsage(ci.loadOp);
2811 if (load_op != SYNC_ACCESS_INDEX_NONE) {
2812 subpass_context.UpdateAccessState(view_gen, AttachmentViewGen::Gen::kRenderArea, load_op,
2813 SyncOrdering::kColorAttachment, tag);
2814 }
John Zulauf1507ee42020-05-18 11:33:09 -06002815 } else {
John Zulauf1507ee42020-05-18 11:33:09 -06002816 if (has_depth) {
John Zulauf57261402021-08-13 11:32:06 -06002817 const SyncStageAccessIndex load_op = DepthStencilLoadUsage(ci.loadOp);
2818 if (load_op != SYNC_ACCESS_INDEX_NONE) {
2819 subpass_context.UpdateAccessState(view_gen, AttachmentViewGen::Gen::kDepthOnlyRenderArea, load_op,
2820 SyncOrdering::kDepthStencilAttachment, tag);
2821 }
John Zulauf1507ee42020-05-18 11:33:09 -06002822 }
2823 if (has_stencil) {
John Zulauf57261402021-08-13 11:32:06 -06002824 const SyncStageAccessIndex load_op = DepthStencilLoadUsage(ci.stencilLoadOp);
2825 if (load_op != SYNC_ACCESS_INDEX_NONE) {
2826 subpass_context.UpdateAccessState(view_gen, AttachmentViewGen::Gen::kStencilOnlyRenderArea, load_op,
2827 SyncOrdering::kDepthStencilAttachment, tag);
2828 }
John Zulauf1507ee42020-05-18 11:33:09 -06002829 }
2830 }
2831 }
2832 }
2833}
John Zulaufd0ec59f2021-03-13 14:25:08 -07002834AttachmentViewGenVector RenderPassAccessContext::CreateAttachmentViewGen(
2835 const VkRect2D &render_area, const std::vector<const IMAGE_VIEW_STATE *> &attachment_views) {
2836 AttachmentViewGenVector view_gens;
2837 VkExtent3D extent = CastTo3D(render_area.extent);
2838 VkOffset3D offset = CastTo3D(render_area.offset);
2839 view_gens.reserve(attachment_views.size());
2840 for (const auto *view : attachment_views) {
2841 view_gens.emplace_back(view, offset, extent);
2842 }
2843 return view_gens;
2844}
John Zulauf64ffe552021-02-06 10:25:07 -07002845RenderPassAccessContext::RenderPassAccessContext(const RENDER_PASS_STATE &rp_state, const VkRect2D &render_area,
2846 VkQueueFlags queue_flags,
2847 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views,
2848 const AccessContext *external_context)
John Zulaufd0ec59f2021-03-13 14:25:08 -07002849 : rp_state_(&rp_state), render_area_(render_area), current_subpass_(0U), attachment_views_() {
John Zulauf355e49b2020-04-24 15:11:15 -06002850 // Add this for all subpasses here so that they exsist during next subpass validation
John Zulauf64ffe552021-02-06 10:25:07 -07002851 subpass_contexts_.reserve(rp_state_->createInfo.subpassCount);
John Zulaufbb890452021-12-14 11:30:18 -07002852 replay_context_ = std::make_shared<ReplayRenderpassContext>();
2853 auto &replay_subpass_contexts = replay_context_->subpass_contexts;
2854 replay_subpass_contexts.reserve(rp_state_->createInfo.subpassCount);
John Zulauf355e49b2020-04-24 15:11:15 -06002855 for (uint32_t pass = 0; pass < rp_state_->createInfo.subpassCount; pass++) {
John Zulauf1a224292020-06-30 14:52:13 -06002856 subpass_contexts_.emplace_back(pass, queue_flags, rp_state_->subpass_dependencies, subpass_contexts_, external_context);
John Zulaufbb890452021-12-14 11:30:18 -07002857 replay_subpass_contexts.emplace_back(queue_flags, rp_state_->subpass_dependencies[pass], replay_subpass_contexts);
John Zulauf355e49b2020-04-24 15:11:15 -06002858 }
John Zulaufd0ec59f2021-03-13 14:25:08 -07002859 attachment_views_ = CreateAttachmentViewGen(render_area, attachment_views);
John Zulauf64ffe552021-02-06 10:25:07 -07002860}
John Zulauf41a9c7c2021-12-07 15:59:53 -07002861void RenderPassAccessContext::RecordBeginRenderPass(const ResourceUsageTag barrier_tag, const ResourceUsageTag load_tag) {
John Zulauf64ffe552021-02-06 10:25:07 -07002862 assert(0 == current_subpass_);
John Zulauf41a9c7c2021-12-07 15:59:53 -07002863 subpass_contexts_[current_subpass_].SetStartTag(barrier_tag);
2864 RecordLayoutTransitions(barrier_tag);
2865 RecordLoadOperations(load_tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002866}
John Zulauf1507ee42020-05-18 11:33:09 -06002867
John Zulauf41a9c7c2021-12-07 15:59:53 -07002868void RenderPassAccessContext::RecordNextSubpass(const ResourceUsageTag store_tag, const ResourceUsageTag barrier_tag,
2869 const ResourceUsageTag load_tag) {
John Zulauf7635de32020-05-29 17:14:15 -06002870 // Resolves are against *prior* subpass context and thus *before* the subpass increment
John Zulauf41a9c7c2021-12-07 15:59:53 -07002871 CurrentContext().UpdateAttachmentResolveAccess(*rp_state_, attachment_views_, current_subpass_, store_tag);
2872 CurrentContext().UpdateAttachmentStoreAccess(*rp_state_, attachment_views_, current_subpass_, store_tag);
John Zulauf7635de32020-05-29 17:14:15 -06002873
ziga-lunarg31a3e772022-03-22 11:48:46 +01002874 if (current_subpass_ + 1 >= subpass_contexts_.size()) {
2875 return;
2876 }
Jeremy Gebben6ea9d9e2020-12-11 09:41:01 -07002877 // Move to the next sub-command for the new subpass. The resolve and store are logically part of the previous
2878 // subpass, so their tag needs to be different from the layout and load operations below.
John Zulauf355e49b2020-04-24 15:11:15 -06002879 current_subpass_++;
John Zulauf41a9c7c2021-12-07 15:59:53 -07002880 subpass_contexts_[current_subpass_].SetStartTag(barrier_tag);
2881 RecordLayoutTransitions(barrier_tag);
2882 RecordLoadOperations(load_tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002883}
2884
John Zulauf41a9c7c2021-12-07 15:59:53 -07002885void RenderPassAccessContext::RecordEndRenderPass(AccessContext *external_context, const ResourceUsageTag store_tag,
2886 const ResourceUsageTag barrier_tag) {
John Zulaufaff20662020-06-01 14:07:58 -06002887 // Add the resolve and store accesses
John Zulauf41a9c7c2021-12-07 15:59:53 -07002888 CurrentContext().UpdateAttachmentResolveAccess(*rp_state_, attachment_views_, current_subpass_, store_tag);
2889 CurrentContext().UpdateAttachmentStoreAccess(*rp_state_, attachment_views_, current_subpass_, store_tag);
John Zulauf7635de32020-05-29 17:14:15 -06002890
John Zulauf355e49b2020-04-24 15:11:15 -06002891 // Export the accesses from the renderpass...
John Zulauf1a224292020-06-30 14:52:13 -06002892 external_context->ResolveChildContexts(subpass_contexts_);
John Zulauf355e49b2020-04-24 15:11:15 -06002893
2894 // Add the "finalLayout" transitions to external
2895 // Get them from where there we're hidding in the extra entry.
John Zulauf89311b42020-09-29 16:28:47 -06002896 // Not that since *final* always comes from *one* subpass per view, we don't have to accumulate the barriers
2897 // TODO Aliasing we may need to reconsider barrier accumulation... though I don't know that it would be valid for aliasing
2898 // that had mulitple final layout transistions from mulitple final subpasses.
John Zulauf355e49b2020-04-24 15:11:15 -06002899 const auto &final_transitions = rp_state_->subpass_transitions.back();
2900 for (const auto &transition : final_transitions) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07002901 const AttachmentViewGen &view_gen = attachment_views_[transition.attachment];
John Zulauf355e49b2020-04-24 15:11:15 -06002902 const auto &last_trackback = subpass_contexts_[transition.prev_pass].GetDstExternalTrackBack();
John Zulaufbb890452021-12-14 11:30:18 -07002903 assert(&subpass_contexts_[transition.prev_pass] == last_trackback.source_subpass);
John Zulauf41a9c7c2021-12-07 15:59:53 -07002904 ApplyBarrierOpsFunctor<PipelineBarrierOp> barrier_action(true /* resolve */, last_trackback.barriers.size(), barrier_tag);
John Zulauf1e331ec2020-12-04 18:29:38 -07002905 for (const auto &barrier : last_trackback.barriers) {
John Zulauf00119522022-05-23 19:07:42 -06002906 barrier_action.EmplaceBack(PipelineBarrierOp(QueueSyncState::kQueueIdInvalid, barrier, true));
John Zulauf1e331ec2020-12-04 18:29:38 -07002907 }
John Zulaufd0ec59f2021-03-13 14:25:08 -07002908 external_context->ApplyUpdateAction(view_gen, AttachmentViewGen::Gen::kViewSubresource, barrier_action);
John Zulauf355e49b2020-04-24 15:11:15 -06002909 }
2910}
2911
John Zulauf06f6f1e2022-04-19 15:28:11 -06002912SyncExecScope SyncExecScope::MakeSrc(VkQueueFlags queue_flags, VkPipelineStageFlags2KHR mask_param,
2913 const VkPipelineStageFlags2KHR disabled_feature_mask) {
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002914 SyncExecScope result;
2915 result.mask_param = mask_param;
John Zulauf06f6f1e2022-04-19 15:28:11 -06002916 result.expanded_mask = sync_utils::ExpandPipelineStages(mask_param, queue_flags, disabled_feature_mask);
Jeremy Gebben5f585ae2021-02-02 09:03:06 -07002917 result.exec_scope = sync_utils::WithEarlierPipelineStages(result.expanded_mask);
Jeremy Gebben87fd0422022-06-08 15:43:47 -06002918 result.valid_accesses = SyncStageAccess::AccessScopeByStage(result.expanded_mask);
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002919 return result;
2920}
2921
Jeremy Gebben40a22942020-12-22 14:22:06 -07002922SyncExecScope SyncExecScope::MakeDst(VkQueueFlags queue_flags, VkPipelineStageFlags2KHR mask_param) {
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002923 SyncExecScope result;
2924 result.mask_param = mask_param;
Jeremy Gebben5f585ae2021-02-02 09:03:06 -07002925 result.expanded_mask = sync_utils::ExpandPipelineStages(mask_param, queue_flags);
2926 result.exec_scope = sync_utils::WithLaterPipelineStages(result.expanded_mask);
Jeremy Gebben87fd0422022-06-08 15:43:47 -06002927 result.valid_accesses = SyncStageAccess::AccessScopeByStage(result.expanded_mask);
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002928 return result;
2929}
2930
John Zulaufecf4ac52022-06-06 10:08:42 -06002931SyncBarrier::SyncBarrier(const SyncExecScope &src, const SyncExecScope &dst)
2932 : src_exec_scope(src), src_access_scope(0), dst_exec_scope(dst), dst_access_scope(0) {}
2933
2934SyncBarrier::SyncBarrier(const SyncExecScope &src, const SyncExecScope &dst, const SyncBarrier::AllAccess &)
2935 : src_exec_scope(src), src_access_scope(src.valid_accesses), dst_exec_scope(dst), dst_access_scope(src.valid_accesses) {}
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002936
2937template <typename Barrier>
John Zulaufecf4ac52022-06-06 10:08:42 -06002938SyncBarrier::SyncBarrier(const Barrier &barrier, const SyncExecScope &src, const SyncExecScope &dst)
2939 : src_exec_scope(src),
2940 src_access_scope(SyncStageAccess::AccessScope(src.valid_accesses, barrier.srcAccessMask)),
2941 dst_exec_scope(dst),
2942 dst_access_scope(SyncStageAccess::AccessScope(dst.valid_accesses, barrier.dstAccessMask)) {}
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002943
2944SyncBarrier::SyncBarrier(VkQueueFlags queue_flags, const VkSubpassDependency2 &subpass) {
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002945 const auto barrier = lvl_find_in_chain<VkMemoryBarrier2KHR>(subpass.pNext);
2946 if (barrier) {
2947 auto src = SyncExecScope::MakeSrc(queue_flags, barrier->srcStageMask);
John Zulaufc523bf62021-02-16 08:20:34 -07002948 src_exec_scope = src;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002949 src_access_scope = SyncStageAccess::AccessScope(src.valid_accesses, barrier->srcAccessMask);
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002950
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002951 auto dst = SyncExecScope::MakeDst(queue_flags, barrier->dstStageMask);
John Zulaufc523bf62021-02-16 08:20:34 -07002952 dst_exec_scope = dst;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002953 dst_access_scope = SyncStageAccess::AccessScope(dst.valid_accesses, barrier->dstAccessMask);
2954
2955 } else {
2956 auto src = SyncExecScope::MakeSrc(queue_flags, subpass.srcStageMask);
John Zulaufc523bf62021-02-16 08:20:34 -07002957 src_exec_scope = src;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002958 src_access_scope = SyncStageAccess::AccessScope(src.valid_accesses, subpass.srcAccessMask);
2959
2960 auto dst = SyncExecScope::MakeDst(queue_flags, subpass.dstStageMask);
John Zulaufc523bf62021-02-16 08:20:34 -07002961 dst_exec_scope = dst;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002962 dst_access_scope = SyncStageAccess::AccessScope(dst.valid_accesses, subpass.dstAccessMask);
2963 }
2964}
2965
2966template <typename Barrier>
2967SyncBarrier::SyncBarrier(VkQueueFlags queue_flags, const Barrier &barrier) {
2968 auto src = SyncExecScope::MakeSrc(queue_flags, barrier.srcStageMask);
2969 src_exec_scope = src.exec_scope;
2970 src_access_scope = SyncStageAccess::AccessScope(src.valid_accesses, barrier.srcAccessMask);
2971
2972 auto dst = SyncExecScope::MakeDst(queue_flags, barrier.dstStageMask);
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002973 dst_exec_scope = dst.exec_scope;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002974 dst_access_scope = SyncStageAccess::AccessScope(dst.valid_accesses, barrier.dstAccessMask);
John Zulauf3d84f1b2020-03-09 13:33:25 -06002975}
2976
John Zulaufb02c1eb2020-10-06 16:33:36 -06002977// Apply a list of barriers, without resolving pending state, useful for subpass layout transitions
2978void ResourceAccessState::ApplyBarriers(const std::vector<SyncBarrier> &barriers, bool layout_transition) {
John Zulauf00119522022-05-23 19:07:42 -06002979 const UntaggedScopeOps scope;
John Zulaufb02c1eb2020-10-06 16:33:36 -06002980 for (const auto &barrier : barriers) {
John Zulauf00119522022-05-23 19:07:42 -06002981 ApplyBarrier(scope, barrier, layout_transition);
John Zulaufb02c1eb2020-10-06 16:33:36 -06002982 }
2983}
2984
John Zulauf89311b42020-09-29 16:28:47 -06002985// ApplyBarriers is design for *fully* inclusive barrier lists without layout tranistions. Designed use was for
2986// inter-subpass barriers for lazy-evaluation of parent context memory ranges. Subpass layout transistions are *not* done
2987// lazily, s.t. no previous access reports should need layout transitions.
John Zulaufbb890452021-12-14 11:30:18 -07002988void ResourceAccessState::ApplyBarriersImmediate(const std::vector<SyncBarrier> &barriers) {
John Zulaufb02c1eb2020-10-06 16:33:36 -06002989 assert(!pending_layout_transition); // This should never be call in the middle of another barrier application
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002990 assert(pending_write_barriers.none());
John Zulaufb02c1eb2020-10-06 16:33:36 -06002991 assert(!pending_write_dep_chain);
John Zulauf00119522022-05-23 19:07:42 -06002992 const UntaggedScopeOps scope;
John Zulaufa0a98292020-09-18 09:30:10 -06002993 for (const auto &barrier : barriers) {
John Zulauf00119522022-05-23 19:07:42 -06002994 ApplyBarrier(scope, barrier, false);
John Zulaufa0a98292020-09-18 09:30:10 -06002995 }
John Zulaufbb890452021-12-14 11:30:18 -07002996 ApplyPendingBarriers(kInvalidTag); // There can't be any need for this tag
John Zulauf3d84f1b2020-03-09 13:33:25 -06002997}
John Zulauf9cb530d2019-09-30 14:14:10 -06002998HazardResult ResourceAccessState::DetectHazard(SyncStageAccessIndex usage_index) const {
2999 HazardResult hazard;
3000 auto usage = FlagBit(usage_index);
John Zulauf361fb532020-07-22 10:45:39 -06003001 const auto usage_stage = PipelineStageBit(usage_index);
John Zulauf9cb530d2019-09-30 14:14:10 -06003002 if (IsRead(usage)) {
John Zulauf4285ee92020-09-23 10:20:52 -06003003 if (IsRAWHazard(usage_stage, usage)) {
John Zulauf59e25072020-07-17 10:55:21 -06003004 hazard.Set(this, usage_index, READ_AFTER_WRITE, last_write, write_tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003005 }
3006 } else {
John Zulauf361fb532020-07-22 10:45:39 -06003007 // Write operation:
3008 // Check for read operations more recent than last_write (as setting last_write clears reads, that would be *any*
3009 // If reads exists -- test only against them because either:
3010 // * the reads were hazards, and we've reported the hazard, so just test the current write vs. the read operations
3011 // * the read weren't hazards, and thus if the write is safe w.r.t. the reads, no hazard vs. last_write is possible if
3012 // the current write happens after the reads, so just test the write against the reades
3013 // Otherwise test against last_write
3014 //
3015 // Look for casus belli for WAR
John Zulaufab7756b2020-12-29 16:10:16 -07003016 if (last_reads.size()) {
3017 for (const auto &read_access : last_reads) {
John Zulauf361fb532020-07-22 10:45:39 -06003018 if (IsReadHazard(usage_stage, read_access)) {
3019 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
3020 break;
3021 }
3022 }
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003023 } else if (last_write.any() && IsWriteHazard(usage)) {
John Zulauf361fb532020-07-22 10:45:39 -06003024 // Write-After-Write check -- if we have a previous write to test against
3025 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003026 }
3027 }
3028 return hazard;
3029}
3030
John Zulaufec943ec2022-06-29 07:52:56 -06003031HazardResult ResourceAccessState::DetectHazard(SyncStageAccessIndex usage_index, const SyncOrdering ordering_rule,
3032 QueueId queue_id) const {
John Zulauf8e3c3e92021-01-06 11:19:36 -07003033 const auto &ordering = GetOrderingRules(ordering_rule);
John Zulaufec943ec2022-06-29 07:52:56 -06003034 return DetectHazard(usage_index, ordering, queue_id);
John Zulauf4fa68462021-04-26 21:04:22 -06003035}
3036
John Zulaufec943ec2022-06-29 07:52:56 -06003037HazardResult ResourceAccessState::DetectHazard(SyncStageAccessIndex usage_index, const OrderingBarrier &ordering,
3038 QueueId queue_id) const {
John Zulauf69133422020-05-20 14:55:53 -06003039 // The ordering guarantees act as barriers to the last accesses, independent of synchronization operations
3040 HazardResult hazard;
John Zulauf4285ee92020-09-23 10:20:52 -06003041 const auto usage_bit = FlagBit(usage_index);
John Zulauf361fb532020-07-22 10:45:39 -06003042 const auto usage_stage = PipelineStageBit(usage_index);
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003043 const bool input_attachment_ordering = (ordering.access_scope & SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT).any();
John Zulaufec943ec2022-06-29 07:52:56 -06003044 const bool last_write_is_ordered = (last_write & ordering.access_scope).any() && (write_queue == queue_id);
John Zulauf4285ee92020-09-23 10:20:52 -06003045 if (IsRead(usage_bit)) {
3046 // Exclude RAW if no write, or write not most "most recent" operation w.r.t. usage;
3047 bool is_raw_hazard = IsRAWHazard(usage_stage, usage_bit);
3048 if (is_raw_hazard) {
3049 // NOTE: we know last_write is non-zero
3050 // See if the ordering rules save us from the simple RAW check above
3051 // First check to see if the current usage is covered by the ordering rules
3052 const bool usage_is_input_attachment = (usage_index == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ);
3053 const bool usage_is_ordered =
3054 (input_attachment_ordering && usage_is_input_attachment) || (0 != (usage_stage & ordering.exec_scope));
3055 if (usage_is_ordered) {
3056 // Now see of the most recent write (or a subsequent read) are ordered
John Zulaufec943ec2022-06-29 07:52:56 -06003057 const bool most_recent_is_ordered = last_write_is_ordered || (0 != GetOrderedStages(queue_id, ordering));
John Zulauf4285ee92020-09-23 10:20:52 -06003058 is_raw_hazard = !most_recent_is_ordered;
John Zulauf361fb532020-07-22 10:45:39 -06003059 }
3060 }
John Zulauf4285ee92020-09-23 10:20:52 -06003061 if (is_raw_hazard) {
3062 hazard.Set(this, usage_index, READ_AFTER_WRITE, last_write, write_tag);
3063 }
John Zulauf5c628d02021-05-04 15:46:36 -06003064 } else if (usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION) {
3065 // For Image layout transitions, the barrier represents the first synchronization/access scope of the layout transition
John Zulaufec943ec2022-06-29 07:52:56 -06003066 return DetectBarrierHazard(usage_index, queue_id, ordering.exec_scope, ordering.access_scope);
John Zulauf361fb532020-07-22 10:45:39 -06003067 } else {
3068 // Only check for WAW if there are no reads since last_write
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003069 bool usage_write_is_ordered = (usage_bit & ordering.access_scope).any();
John Zulaufab7756b2020-12-29 16:10:16 -07003070 if (last_reads.size()) {
John Zulauf361fb532020-07-22 10:45:39 -06003071 // Look for any WAR hazards outside the ordered set of stages
Jeremy Gebben40a22942020-12-22 14:22:06 -07003072 VkPipelineStageFlags2KHR ordered_stages = 0;
John Zulauf4285ee92020-09-23 10:20:52 -06003073 if (usage_write_is_ordered) {
3074 // If the usage is ordered, we can ignore all ordered read stages w.r.t. WAR)
John Zulaufec943ec2022-06-29 07:52:56 -06003075 ordered_stages = GetOrderedStages(queue_id, ordering);
John Zulauf4285ee92020-09-23 10:20:52 -06003076 }
3077 // If we're tracking any reads that aren't ordered against the current write, got to check 'em all.
3078 if ((ordered_stages & last_read_stages) != last_read_stages) {
John Zulaufab7756b2020-12-29 16:10:16 -07003079 for (const auto &read_access : last_reads) {
John Zulauf4285ee92020-09-23 10:20:52 -06003080 if (read_access.stage & ordered_stages) continue; // but we can skip the ordered ones
3081 if (IsReadHazard(usage_stage, read_access)) {
3082 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
3083 break;
3084 }
John Zulaufd14743a2020-07-03 09:42:39 -06003085 }
3086 }
John Zulauf2a344ca2021-09-09 17:07:19 -06003087 } else if (last_write.any() && !(last_write_is_ordered && usage_write_is_ordered)) {
3088 bool ilt_ilt_hazard = false;
3089 if ((usage_index == SYNC_IMAGE_LAYOUT_TRANSITION) && (usage_bit == last_write)) {
3090 // ILT after ILT is a special case where we check the 2nd access scope of the first ILT against the first access
3091 // scope of the second ILT, which has been passed (smuggled?) in the ordering barrier
3092 ilt_ilt_hazard = !(write_barriers & ordering.access_scope).any();
3093 }
3094 if (ilt_ilt_hazard || IsWriteHazard(usage_bit)) {
John Zulauf4285ee92020-09-23 10:20:52 -06003095 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
John Zulauf361fb532020-07-22 10:45:39 -06003096 }
John Zulauf69133422020-05-20 14:55:53 -06003097 }
3098 }
3099 return hazard;
3100}
3101
John Zulaufec943ec2022-06-29 07:52:56 -06003102HazardResult ResourceAccessState::DetectHazard(const ResourceAccessState &recorded_use, QueueId queue_id,
3103 const ResourceUsageRange &tag_range) const {
John Zulaufae842002021-04-15 18:20:55 -06003104 HazardResult hazard;
John Zulauf4fa68462021-04-26 21:04:22 -06003105 using Size = FirstAccesses::size_type;
3106 const auto &recorded_accesses = recorded_use.first_accesses_;
3107 Size count = recorded_accesses.size();
3108 if (count) {
3109 const auto &last_access = recorded_accesses.back();
3110 bool do_write_last = IsWrite(last_access.usage_index);
3111 if (do_write_last) --count;
John Zulaufae842002021-04-15 18:20:55 -06003112
John Zulauf4fa68462021-04-26 21:04:22 -06003113 for (Size i = 0; i < count; ++count) {
3114 const auto &first = recorded_accesses[i];
3115 // Skip and quit logic
3116 if (first.tag < tag_range.begin) continue;
3117 if (first.tag >= tag_range.end) {
3118 do_write_last = false; // ignore last since we know it can't be in tag_range
3119 break;
3120 }
3121
John Zulaufec943ec2022-06-29 07:52:56 -06003122 hazard = DetectHazard(first.usage_index, first.ordering_rule, queue_id);
John Zulauf4fa68462021-04-26 21:04:22 -06003123 if (hazard.hazard) {
3124 hazard.AddRecordedAccess(first);
3125 break;
3126 }
3127 }
3128
3129 if (do_write_last && tag_range.includes(last_access.tag)) {
3130 // Writes are a bit special... both for the "most recent" access logic, and layout transition specific logic
3131 OrderingBarrier barrier = GetOrderingRules(last_access.ordering_rule);
3132 if (last_access.usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION) {
3133 // Or in the layout first access scope as a barrier... IFF the usage is an ILT
3134 // this was saved off in the "apply barriers" logic to simplify ILT access checks as they straddle
3135 // the barrier that applies them
3136 barrier |= recorded_use.first_write_layout_ordering_;
3137 }
3138 // Any read stages present in the recorded context (this) are most recent to the write, and thus mask those stages in
3139 // the active context
3140 if (recorded_use.first_read_stages_) {
3141 // we need to ignore the first use read stage in the active context (so we add them to the ordering rule),
3142 // reads in the active context are not "most recent" as all recorded context operations are *after* them
3143 // This supresses only RAW checks for stages present in the recorded context, but not those only present in the
3144 // active context.
3145 barrier.exec_scope |= recorded_use.first_read_stages_;
3146 // if there are any first use reads, we suppress WAW by injecting the active context write in the ordering rule
3147 barrier.access_scope |= FlagBit(last_access.usage_index);
3148 }
John Zulaufec943ec2022-06-29 07:52:56 -06003149 hazard = DetectHazard(last_access.usage_index, barrier, queue_id);
John Zulauf4fa68462021-04-26 21:04:22 -06003150 if (hazard.hazard) {
3151 hazard.AddRecordedAccess(last_access);
3152 }
3153 }
John Zulaufae842002021-04-15 18:20:55 -06003154 }
3155 return hazard;
3156}
3157
John Zulauf2f952d22020-02-10 11:34:51 -07003158// Asynchronous Hazards occur between subpasses with no connection through the DAG
John Zulauf14940722021-04-12 15:19:02 -06003159HazardResult ResourceAccessState::DetectAsyncHazard(SyncStageAccessIndex usage_index, const ResourceUsageTag start_tag) const {
John Zulauf2f952d22020-02-10 11:34:51 -07003160 HazardResult hazard;
3161 auto usage = FlagBit(usage_index);
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07003162 // Async checks need to not go back further than the start of the subpass, as we only want to find hazards between the async
3163 // subpasses. Anything older than that should have been checked at the start of each subpass, taking into account all of
3164 // the raster ordering rules.
John Zulauf2f952d22020-02-10 11:34:51 -07003165 if (IsRead(usage)) {
John Zulauf14940722021-04-12 15:19:02 -06003166 if (last_write.any() && (write_tag >= start_tag)) {
John Zulauf59e25072020-07-17 10:55:21 -06003167 hazard.Set(this, usage_index, READ_RACING_WRITE, last_write, write_tag);
John Zulauf2f952d22020-02-10 11:34:51 -07003168 }
3169 } else {
John Zulauf14940722021-04-12 15:19:02 -06003170 if (last_write.any() && (write_tag >= start_tag)) {
John Zulauf59e25072020-07-17 10:55:21 -06003171 hazard.Set(this, usage_index, WRITE_RACING_WRITE, last_write, write_tag);
John Zulaufab7756b2020-12-29 16:10:16 -07003172 } else if (last_reads.size() > 0) {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07003173 // Any reads during the other subpass will conflict with this write, so we need to check them all.
John Zulaufab7756b2020-12-29 16:10:16 -07003174 for (const auto &read_access : last_reads) {
John Zulauf14940722021-04-12 15:19:02 -06003175 if (read_access.tag >= start_tag) {
John Zulaufab7756b2020-12-29 16:10:16 -07003176 hazard.Set(this, usage_index, WRITE_RACING_READ, read_access.access, read_access.tag);
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07003177 break;
3178 }
3179 }
John Zulauf2f952d22020-02-10 11:34:51 -07003180 }
3181 }
3182 return hazard;
3183}
3184
John Zulaufae842002021-04-15 18:20:55 -06003185HazardResult ResourceAccessState::DetectAsyncHazard(const ResourceAccessState &recorded_use, const ResourceUsageRange &tag_range,
3186 ResourceUsageTag start_tag) const {
3187 HazardResult hazard;
John Zulauf4fa68462021-04-26 21:04:22 -06003188 for (const auto &first : recorded_use.first_accesses_) {
John Zulaufae842002021-04-15 18:20:55 -06003189 // Skip and quit logic
3190 if (first.tag < tag_range.begin) continue;
3191 if (first.tag >= tag_range.end) break;
John Zulaufae842002021-04-15 18:20:55 -06003192
3193 hazard = DetectAsyncHazard(first.usage_index, start_tag);
John Zulauf4fa68462021-04-26 21:04:22 -06003194 if (hazard.hazard) {
3195 hazard.AddRecordedAccess(first);
3196 break;
3197 }
John Zulaufae842002021-04-15 18:20:55 -06003198 }
3199 return hazard;
3200}
3201
John Zulaufec943ec2022-06-29 07:52:56 -06003202HazardResult ResourceAccessState::DetectBarrierHazard(SyncStageAccessIndex usage_index, QueueId queue_id,
3203 VkPipelineStageFlags2KHR src_exec_scope,
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003204 const SyncStageAccessFlags &src_access_scope) const {
John Zulauf0cb5be22020-01-23 12:18:22 -07003205 // Only supporting image layout transitions for now
3206 assert(usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION);
3207 HazardResult hazard;
John Zulauf361fb532020-07-22 10:45:39 -06003208 // only test for WAW if there no intervening read operations.
3209 // See DetectHazard(SyncStagetAccessIndex) above for more details.
John Zulaufab7756b2020-12-29 16:10:16 -07003210 if (last_reads.size()) {
John Zulauf355e49b2020-04-24 15:11:15 -06003211 // Look at the reads if any
John Zulaufab7756b2020-12-29 16:10:16 -07003212 for (const auto &read_access : last_reads) {
John Zulaufec943ec2022-06-29 07:52:56 -06003213 if (read_access.IsReadBarrierHazard(queue_id, src_exec_scope)) {
John Zulauf59e25072020-07-17 10:55:21 -06003214 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
John Zulauf0cb5be22020-01-23 12:18:22 -07003215 break;
3216 }
3217 }
John Zulaufec943ec2022-06-29 07:52:56 -06003218 } else if (last_write.any() && IsWriteBarrierHazard(queue_id, src_exec_scope, src_access_scope)) {
John Zulauf4a6105a2020-11-17 15:11:05 -07003219 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
3220 }
3221
3222 return hazard;
3223}
3224
John Zulaufe0757ba2022-06-10 16:51:45 -06003225HazardResult ResourceAccessState::DetectBarrierHazard(SyncStageAccessIndex usage_index, const ResourceAccessState &scope_state,
3226 VkPipelineStageFlags2KHR src_exec_scope,
3227 const SyncStageAccessFlags &src_access_scope, QueueId event_queue,
3228 ResourceUsageTag event_tag) const {
John Zulauf4a6105a2020-11-17 15:11:05 -07003229 // Only supporting image layout transitions for now
3230 assert(usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION);
3231 HazardResult hazard;
John Zulauf4a6105a2020-11-17 15:11:05 -07003232
John Zulaufe0757ba2022-06-10 16:51:45 -06003233 if ((write_tag >= event_tag) && last_write.any()) {
3234 // Any write after the event precludes the possibility of being in the first access scope for the layout transition
3235 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
3236 } else {
3237 // only test for WAW if there no intervening read operations.
3238 // See DetectHazard(SyncStagetAccessIndex) above for more details.
3239 if (last_reads.size()) {
3240 // Look at the reads if any... if reads exist, they are either the reason the access is in the event
3241 // first scope, or they are a hazard.
3242 const ReadStates &scope_reads = scope_state.last_reads;
3243 const ReadStates::size_type scope_read_count = scope_reads.size();
3244 // Since the hasn't been a write:
3245 // * The current read state is a superset of the scoped one
3246 // * The stage order is the same.
3247 assert(last_reads.size() >= scope_read_count);
3248 for (ReadStates::size_type read_idx = 0; read_idx < scope_read_count; ++read_idx) {
3249 const ReadState &scope_read = scope_reads[read_idx];
3250 const ReadState &current_read = last_reads[read_idx];
3251 assert(scope_read.stage == current_read.stage);
3252 if (current_read.tag > event_tag) {
3253 // The read is more recent than the set event scope, thus no barrier from the wait/ILT.
3254 hazard.Set(this, usage_index, WRITE_AFTER_READ, current_read.access, current_read.tag);
3255 } else {
3256 // The read is in the events first synchronization scope, so we use a barrier hazard check
3257 // If the read stage is not in the src sync scope
3258 // *AND* not execution chained with an existing sync barrier (that's the or)
3259 // then the barrier access is unsafe (R/W after R)
3260 if (scope_read.IsReadBarrierHazard(event_queue, src_exec_scope)) {
3261 hazard.Set(this, usage_index, WRITE_AFTER_READ, scope_read.access, scope_read.tag);
3262 break;
3263 }
John Zulauf4a6105a2020-11-17 15:11:05 -07003264 }
John Zulauf4a6105a2020-11-17 15:11:05 -07003265 }
John Zulaufe0757ba2022-06-10 16:51:45 -06003266 if (!hazard.IsHazard() && (last_reads.size() > scope_read_count)) {
3267 const ReadState &current_read = last_reads[scope_read_count];
3268 hazard.Set(this, usage_index, WRITE_AFTER_READ, current_read.access, current_read.tag);
3269 }
3270 } else if (last_write.any()) {
3271 // if there are no reads, the write is either the reason the access is in the event scope... they are a hazard
John Zulauf4a6105a2020-11-17 15:11:05 -07003272 // The write is in the first sync scope of the event (sync their aren't any reads to be the reason)
3273 // So do a normal barrier hazard check
John Zulaufe0757ba2022-06-10 16:51:45 -06003274 if (scope_state.IsWriteBarrierHazard(src_exec_scope, src_access_scope)) {
3275 hazard.Set(&scope_state, usage_index, WRITE_AFTER_WRITE, scope_state.last_write, scope_state.write_tag);
John Zulauf4a6105a2020-11-17 15:11:05 -07003276 }
John Zulauf361fb532020-07-22 10:45:39 -06003277 }
John Zulaufd14743a2020-07-03 09:42:39 -06003278 }
John Zulauf361fb532020-07-22 10:45:39 -06003279
John Zulauf0cb5be22020-01-23 12:18:22 -07003280 return hazard;
3281}
3282
John Zulauf5f13a792020-03-10 07:31:21 -06003283// The logic behind resolves is the same as update, we assume that earlier hazards have be reported, and that no
3284// tranistive hazard can exists with a hazard between the earlier operations. Yes, an early hazard can mask that another
3285// exists, but if you fix *that* hazard it either fixes or unmasks the subsequent ones.
3286void ResourceAccessState::Resolve(const ResourceAccessState &other) {
John Zulauf14940722021-04-12 15:19:02 -06003287 if (write_tag < other.write_tag) {
John Zulauf4285ee92020-09-23 10:20:52 -06003288 // If this is a later write, we've reported any exsiting hazard, and we can just overwrite as the more recent
3289 // operation
John Zulauf5f13a792020-03-10 07:31:21 -06003290 *this = other;
John Zulauf14940722021-04-12 15:19:02 -06003291 } else if (other.write_tag == write_tag) {
3292 // In the *equals* case for write operations, we merged the write barriers and the read state (but without the
John Zulauf5f13a792020-03-10 07:31:21 -06003293 // dependency chaining logic or any stage expansion)
3294 write_barriers |= other.write_barriers;
John Zulaufb02c1eb2020-10-06 16:33:36 -06003295 pending_write_barriers |= other.pending_write_barriers;
3296 pending_layout_transition |= other.pending_layout_transition;
3297 pending_write_dep_chain |= other.pending_write_dep_chain;
John Zulauf4fa68462021-04-26 21:04:22 -06003298 pending_layout_ordering_ |= other.pending_layout_ordering_;
John Zulauf5f13a792020-03-10 07:31:21 -06003299
John Zulaufd14743a2020-07-03 09:42:39 -06003300 // Merge the read states
John Zulaufab7756b2020-12-29 16:10:16 -07003301 const auto pre_merge_count = last_reads.size();
John Zulauf4285ee92020-09-23 10:20:52 -06003302 const auto pre_merge_stages = last_read_stages;
John Zulaufab7756b2020-12-29 16:10:16 -07003303 for (uint32_t other_read_index = 0; other_read_index < other.last_reads.size(); other_read_index++) {
John Zulauf5f13a792020-03-10 07:31:21 -06003304 auto &other_read = other.last_reads[other_read_index];
John Zulauf4285ee92020-09-23 10:20:52 -06003305 if (pre_merge_stages & other_read.stage) {
John Zulauf5f13a792020-03-10 07:31:21 -06003306 // Merge in the barriers for read stages that exist in *both* this and other
John Zulauf4285ee92020-09-23 10:20:52 -06003307 // TODO: This is N^2 with stages... perhaps the ReadStates should be sorted by stage index.
3308 // but we should wait on profiling data for that.
3309 for (uint32_t my_read_index = 0; my_read_index < pre_merge_count; my_read_index++) {
John Zulauf5f13a792020-03-10 07:31:21 -06003310 auto &my_read = last_reads[my_read_index];
3311 if (other_read.stage == my_read.stage) {
John Zulauf14940722021-04-12 15:19:02 -06003312 if (my_read.tag < other_read.tag) {
John Zulauf4285ee92020-09-23 10:20:52 -06003313 // Other is more recent, copy in the state
John Zulauf37ceaed2020-07-03 16:18:15 -06003314 my_read.access = other_read.access;
John Zulauf4285ee92020-09-23 10:20:52 -06003315 my_read.tag = other_read.tag;
John Zulauf1d5f9c12022-05-13 14:51:08 -06003316 my_read.queue = other_read.queue;
John Zulaufb02c1eb2020-10-06 16:33:36 -06003317 my_read.pending_dep_chain = other_read.pending_dep_chain;
3318 // TODO: Phase 2 -- review the state merge logic to avoid false positive from overwriting the barriers
3319 // May require tracking more than one access per stage.
3320 my_read.barriers = other_read.barriers;
John Zulauf1d5f9c12022-05-13 14:51:08 -06003321 my_read.sync_stages = other_read.sync_stages;
Jeremy Gebben40a22942020-12-22 14:22:06 -07003322 if (my_read.stage == VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR) {
John Zulauf4285ee92020-09-23 10:20:52 -06003323 // Since I'm overwriting the fragement stage read, also update the input attachment info
3324 // as this is the only stage that affects it.
John Zulauff51fbb62020-10-02 14:43:24 -06003325 input_attachment_read = other.input_attachment_read;
John Zulauf4285ee92020-09-23 10:20:52 -06003326 }
John Zulauf14940722021-04-12 15:19:02 -06003327 } else if (other_read.tag == my_read.tag) {
John Zulaufb02c1eb2020-10-06 16:33:36 -06003328 // The read tags match so merge the barriers
3329 my_read.barriers |= other_read.barriers;
John Zulauf1d5f9c12022-05-13 14:51:08 -06003330 my_read.sync_stages |= other_read.sync_stages;
John Zulaufb02c1eb2020-10-06 16:33:36 -06003331 my_read.pending_dep_chain |= other_read.pending_dep_chain;
John Zulauf5f13a792020-03-10 07:31:21 -06003332 }
John Zulaufb02c1eb2020-10-06 16:33:36 -06003333
John Zulauf5f13a792020-03-10 07:31:21 -06003334 break;
3335 }
3336 }
3337 } else {
3338 // The other read stage doesn't exist in this, so add it.
John Zulaufab7756b2020-12-29 16:10:16 -07003339 last_reads.emplace_back(other_read);
John Zulauf5f13a792020-03-10 07:31:21 -06003340 last_read_stages |= other_read.stage;
Jeremy Gebben40a22942020-12-22 14:22:06 -07003341 if (other_read.stage == VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR) {
John Zulauff51fbb62020-10-02 14:43:24 -06003342 input_attachment_read = other.input_attachment_read;
John Zulauf4285ee92020-09-23 10:20:52 -06003343 }
John Zulauf5f13a792020-03-10 07:31:21 -06003344 }
3345 }
John Zulauf361fb532020-07-22 10:45:39 -06003346 read_execution_barriers |= other.read_execution_barriers;
John Zulauf4285ee92020-09-23 10:20:52 -06003347 } // the else clause would be that other write is before this write... in which case we supercede the other state and
3348 // ignore it.
John Zulauffaea0ee2021-01-14 14:01:32 -07003349
3350 // Merge first access information by making a copy of this first_access and reconstructing with a shuffle
3351 // of the copy and other into this using the update first logic.
3352 // NOTE: All sorts of additional cleverness could be put into short circuts. (for example back is write and is before front
3353 // of the other first_accesses... )
3354 if (!(first_accesses_ == other.first_accesses_) && !other.first_accesses_.empty()) {
3355 FirstAccesses firsts(std::move(first_accesses_));
3356 first_accesses_.clear();
3357 first_read_stages_ = 0U;
3358 auto a = firsts.begin();
3359 auto a_end = firsts.end();
3360 for (auto &b : other.first_accesses_) {
John Zulauf14940722021-04-12 15:19:02 -06003361 // TODO: Determine whether some tag offset will be needed for PHASE II
3362 while ((a != a_end) && (a->tag < b.tag)) {
John Zulauffaea0ee2021-01-14 14:01:32 -07003363 UpdateFirst(a->tag, a->usage_index, a->ordering_rule);
3364 ++a;
3365 }
3366 UpdateFirst(b.tag, b.usage_index, b.ordering_rule);
3367 }
3368 for (; a != a_end; ++a) {
3369 UpdateFirst(a->tag, a->usage_index, a->ordering_rule);
3370 }
3371 }
John Zulauf5f13a792020-03-10 07:31:21 -06003372}
3373
John Zulauf14940722021-04-12 15:19:02 -06003374void ResourceAccessState::Update(SyncStageAccessIndex usage_index, SyncOrdering ordering_rule, const ResourceUsageTag tag) {
John Zulauf9cb530d2019-09-30 14:14:10 -06003375 // Move this logic in the ResourceStateTracker as methods, thereof (or we'll repeat it for every flavor of resource...
3376 const auto usage_bit = FlagBit(usage_index);
John Zulauf4285ee92020-09-23 10:20:52 -06003377 if (IsRead(usage_index)) {
John Zulauf9cb530d2019-09-30 14:14:10 -06003378 // Mulitple outstanding reads may be of interest and do dependency chains independently
3379 // However, for purposes of barrier tracking, only one read per pipeline stage matters
3380 const auto usage_stage = PipelineStageBit(usage_index);
3381 if (usage_stage & last_read_stages) {
John Zulaufecf4ac52022-06-06 10:08:42 -06003382 const auto not_usage_stage = ~usage_stage;
John Zulaufab7756b2020-12-29 16:10:16 -07003383 for (auto &read_access : last_reads) {
3384 if (read_access.stage == usage_stage) {
3385 read_access.Set(usage_stage, usage_bit, 0, tag);
John Zulauf1d5f9c12022-05-13 14:51:08 -06003386 } else if (read_access.barriers & usage_stage) {
John Zulaufecf4ac52022-06-06 10:08:42 -06003387 // If the current access is barriered to this stage, mark it as "known to happen after"
John Zulauf1d5f9c12022-05-13 14:51:08 -06003388 read_access.sync_stages |= usage_stage;
John Zulaufecf4ac52022-06-06 10:08:42 -06003389 } else {
3390 // If the current access is *NOT* barriered to this stage it needs to be cleared.
3391 // Note: this is possible because semaphores can *clear* effective barriers, so the assumption
3392 // that sync_stages is a subset of barriers may not apply.
3393 read_access.sync_stages &= not_usage_stage;
John Zulauf9cb530d2019-09-30 14:14:10 -06003394 }
3395 }
3396 } else {
John Zulauf1d5f9c12022-05-13 14:51:08 -06003397 for (auto &read_access : last_reads) {
3398 if (read_access.barriers & usage_stage) {
3399 read_access.sync_stages |= usage_stage;
3400 }
3401 }
John Zulaufab7756b2020-12-29 16:10:16 -07003402 last_reads.emplace_back(usage_stage, usage_bit, 0, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003403 last_read_stages |= usage_stage;
3404 }
John Zulauf4285ee92020-09-23 10:20:52 -06003405
3406 // Fragment shader reads come in two flavors, and we need to track if the one we're tracking is the special one.
Jeremy Gebben40a22942020-12-22 14:22:06 -07003407 if (usage_stage == VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR) {
John Zulauff51fbb62020-10-02 14:43:24 -06003408 // TODO Revisit re: multiple reads for a given stage
3409 input_attachment_read = (usage_bit == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT);
John Zulauf4285ee92020-09-23 10:20:52 -06003410 }
John Zulauf9cb530d2019-09-30 14:14:10 -06003411 } else {
3412 // Assume write
3413 // TODO determine what to do with READ-WRITE operations if any
John Zulauf89311b42020-09-29 16:28:47 -06003414 SetWrite(usage_bit, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003415 }
John Zulauffaea0ee2021-01-14 14:01:32 -07003416 UpdateFirst(tag, usage_index, ordering_rule);
John Zulauf9cb530d2019-09-30 14:14:10 -06003417}
John Zulauf5f13a792020-03-10 07:31:21 -06003418
John Zulauf89311b42020-09-29 16:28:47 -06003419// Clobber last read and all barriers... because all we have is DANGER, DANGER, WILL ROBINSON!!!
3420// if the last_reads/last_write were unsafe, we've reported them, in either case the prior access is irrelevant.
3421// We can overwrite them as *this* write is now after them.
3422//
3423// Note: intentionally ignore pending barriers and chains (i.e. don't apply or clear them), let ApplyPendingBarriers handle them.
John Zulauf14940722021-04-12 15:19:02 -06003424void ResourceAccessState::SetWrite(const SyncStageAccessFlags &usage_bit, const ResourceUsageTag tag) {
John Zulauf1d5f9c12022-05-13 14:51:08 -06003425 ClearRead();
3426 ClearWrite();
John Zulauf89311b42020-09-29 16:28:47 -06003427 write_tag = tag;
3428 last_write = usage_bit;
John Zulauf9cb530d2019-09-30 14:14:10 -06003429}
3430
John Zulauf1d5f9c12022-05-13 14:51:08 -06003431void ResourceAccessState::ClearWrite() {
3432 read_execution_barriers = VK_PIPELINE_STAGE_2_NONE;
3433 input_attachment_read = false; // Denotes no outstanding input attachment read after the last write.
3434 write_barriers.reset();
3435 write_dependency_chain = VK_PIPELINE_STAGE_2_NONE;
3436 last_write.reset();
3437
3438 write_tag = 0;
3439 write_queue = QueueSyncState::kQueueIdInvalid;
3440}
3441
3442void ResourceAccessState::ClearRead() {
3443 last_reads.clear();
3444 last_read_stages = VK_PIPELINE_STAGE_2_NONE;
3445}
3446
John Zulauf89311b42020-09-29 16:28:47 -06003447// Apply the memory barrier without updating the existing barriers. The execution barrier
3448// changes the "chaining" state, but to keep barriers independent, we defer this until all barriers
3449// of the batch have been processed. Also, depending on whether layout transition happens, we'll either
3450// replace the current write barriers or add to them, so accumulate to pending as well.
John Zulaufb7578302022-05-19 13:50:18 -06003451template <typename ScopeOps>
3452void ResourceAccessState::ApplyBarrier(ScopeOps &&scope, const SyncBarrier &barrier, bool layout_transition) {
John Zulauf89311b42020-09-29 16:28:47 -06003453 // For independent barriers we need to track what the new barriers and dependency chain *will* be when we're done
3454 // applying the memory barriers
John Zulauf86356ca2020-10-19 11:46:41 -06003455 // NOTE: We update the write barrier if the write is in the first access scope or if there is a layout
John Zulaufb7578302022-05-19 13:50:18 -06003456 // transistion, under the theory of "most recent access". If the resource acces *isn't* safe
John Zulauf86356ca2020-10-19 11:46:41 -06003457 // vs. this layout transition DetectBarrierHazard should report it. We treat the layout
3458 // transistion *as* a write and in scope with the barrier (it's before visibility).
John Zulaufb7578302022-05-19 13:50:18 -06003459 if (layout_transition || scope.WriteInScope(barrier, *this)) {
John Zulauf89311b42020-09-29 16:28:47 -06003460 pending_write_barriers |= barrier.dst_access_scope;
John Zulaufc523bf62021-02-16 08:20:34 -07003461 pending_write_dep_chain |= barrier.dst_exec_scope.exec_scope;
John Zulauf4fa68462021-04-26 21:04:22 -06003462 if (layout_transition) {
3463 pending_layout_ordering_ |= OrderingBarrier(barrier.src_exec_scope.exec_scope, barrier.src_access_scope);
3464 }
John Zulaufa0a98292020-09-18 09:30:10 -06003465 }
John Zulauf89311b42020-09-29 16:28:47 -06003466 // Track layout transistion as pending as we can't modify last_write until all barriers processed
3467 pending_layout_transition |= layout_transition;
John Zulaufa0a98292020-09-18 09:30:10 -06003468
John Zulauf89311b42020-09-29 16:28:47 -06003469 if (!pending_layout_transition) {
John Zulaufb7578302022-05-19 13:50:18 -06003470 // Once we're dealing with a layout transition (which is modelled as a *write*) then the last reads/chains
3471 // don't need to be tracked as we're just going to clear them.
John Zulauf434c4e62022-05-19 16:03:56 -06003472 VkPipelineStageFlags2 stages_in_scope = VK_PIPELINE_STAGE_2_NONE;
3473
John Zulaufab7756b2020-12-29 16:10:16 -07003474 for (auto &read_access : last_reads) {
John Zulauf89311b42020-09-29 16:28:47 -06003475 // The | implements the "dependency chain" logic for this access, as the barriers field stores the second sync scope
John Zulaufb7578302022-05-19 13:50:18 -06003476 if (scope.ReadInScope(barrier, read_access)) {
John Zulauf434c4e62022-05-19 16:03:56 -06003477 // We'll apply the barrier in the next loop, because it's DRY'r to do it one place.
3478 stages_in_scope |= read_access.stage;
3479 }
3480 }
3481
3482 for (auto &read_access : last_reads) {
3483 if (0 != ((read_access.stage | read_access.sync_stages) & stages_in_scope)) {
3484 // If this stage, or any stage known to be synchronized after it are in scope, apply the barrier to this read
3485 // NOTE: Forwarding barriers to known prior stages changes the sync_stages from shallow to deep, because the
3486 // barriers used to determine sync_stages have been propagated to all known earlier stages
John Zulaufc523bf62021-02-16 08:20:34 -07003487 read_access.pending_dep_chain |= barrier.dst_exec_scope.exec_scope;
John Zulaufa0a98292020-09-18 09:30:10 -06003488 }
3489 }
John Zulaufa0a98292020-09-18 09:30:10 -06003490 }
John Zulaufa0a98292020-09-18 09:30:10 -06003491}
3492
John Zulauf14940722021-04-12 15:19:02 -06003493void ResourceAccessState::ApplyPendingBarriers(const ResourceUsageTag tag) {
John Zulauf89311b42020-09-29 16:28:47 -06003494 if (pending_layout_transition) {
John Zulauf4fa68462021-04-26 21:04:22 -06003495 // SetWrite clobbers the last_reads array, and thus we don't have to clear the read_state out.
John Zulauf89311b42020-09-29 16:28:47 -06003496 SetWrite(SYNC_IMAGE_LAYOUT_TRANSITION_BIT, tag); // Side effect notes below
John Zulauffaea0ee2021-01-14 14:01:32 -07003497 UpdateFirst(tag, SYNC_IMAGE_LAYOUT_TRANSITION, SyncOrdering::kNonAttachment);
John Zulauf4fa68462021-04-26 21:04:22 -06003498 TouchupFirstForLayoutTransition(tag, pending_layout_ordering_);
3499 pending_layout_ordering_ = OrderingBarrier();
John Zulauf89311b42020-09-29 16:28:47 -06003500 pending_layout_transition = false;
John Zulauf9cb530d2019-09-30 14:14:10 -06003501 }
John Zulauf89311b42020-09-29 16:28:47 -06003502
3503 // Apply the accumulate execution barriers (and thus update chaining information)
John Zulauf4fa68462021-04-26 21:04:22 -06003504 // for layout transition, last_reads is reset by SetWrite, so this will be skipped.
John Zulaufab7756b2020-12-29 16:10:16 -07003505 for (auto &read_access : last_reads) {
3506 read_access.barriers |= read_access.pending_dep_chain;
3507 read_execution_barriers |= read_access.barriers;
3508 read_access.pending_dep_chain = 0;
John Zulauf89311b42020-09-29 16:28:47 -06003509 }
3510
3511 // We OR in the accumulated write chain and barriers even in the case of a layout transition as SetWrite zeros them.
3512 write_dependency_chain |= pending_write_dep_chain;
3513 write_barriers |= pending_write_barriers;
3514 pending_write_dep_chain = 0;
3515 pending_write_barriers = 0;
John Zulauf9cb530d2019-09-30 14:14:10 -06003516}
3517
John Zulaufecf4ac52022-06-06 10:08:42 -06003518// Assumes signal queue != wait queue
3519void ResourceAccessState::ApplySemaphore(const SemaphoreScope &signal, const SemaphoreScope wait) {
3520 // Semaphores only guarantee the first scope of the signal is before the second scope of the wait.
3521 // If any access isn't in the first scope, there are no guarantees, thus those barriers are cleared
3522 assert(signal.queue != wait.queue);
3523 for (auto &read_access : last_reads) {
3524 if (read_access.ReadInQueueScopeOrChain(signal.queue, signal.exec_scope)) {
3525 // Deflects WAR on wait queue
3526 read_access.barriers = wait.exec_scope;
3527 } else {
3528 // Leave sync stages alone. Update method will clear unsynchronized stages on subsequent reads as needed.
3529 read_access.barriers = VK_PIPELINE_STAGE_2_NONE;
3530 }
3531 }
3532 if (WriteInQueueSourceScopeOrChain(signal.queue, signal.exec_scope, signal.valid_accesses)) {
3533 // Will deflect RAW wait queue, WAW needs a chained barrier on wait queue
3534 read_execution_barriers = wait.exec_scope;
3535 write_barriers = wait.valid_accesses;
3536 } else {
3537 read_execution_barriers = VK_PIPELINE_STAGE_2_NONE;
3538 write_barriers.reset();
3539 }
3540 write_dependency_chain = read_execution_barriers;
3541}
3542
John Zulauf1d5f9c12022-05-13 14:51:08 -06003543bool ResourceAccessState::QueueTagPredicate::operator()(QueueId usage_queue, ResourceUsageTag usage_tag) {
3544 return (queue == usage_queue) && (tag <= usage_tag);
3545}
3546
3547bool ResourceAccessState::QueuePredicate::operator()(QueueId usage_queue, ResourceUsageTag) { return queue == usage_queue; }
3548
3549bool ResourceAccessState::TagPredicate::operator()(QueueId, ResourceUsageTag usage_tag) { return tag <= usage_tag; }
3550
3551// Return if the resulting state is "empty"
3552template <typename Pred>
3553bool ResourceAccessState::ApplyQueueTagWait(Pred &&queue_tag_test) {
3554 VkPipelineStageFlags2KHR sync_reads = VK_PIPELINE_STAGE_2_NONE;
3555
3556 // Use the predicate to build a mask of the read stages we are synchronizing
3557 // Use the sync_stages to also detect reads known to be before any synchronized reads (first pass)
John Zulauf1d5f9c12022-05-13 14:51:08 -06003558 for (auto &read_access : last_reads) {
John Zulauf434c4e62022-05-19 16:03:56 -06003559 if (queue_tag_test(read_access.queue, read_access.tag)) {
John Zulauf1d5f9c12022-05-13 14:51:08 -06003560 // If we know this stage is before any stage we syncing, or if the predicate tells us that we are waited for..
3561 sync_reads |= read_access.stage;
John Zulauf1d5f9c12022-05-13 14:51:08 -06003562 }
3563 }
3564
John Zulauf434c4e62022-05-19 16:03:56 -06003565 // Now that we know the reads directly in scopejust need to go over the list again to pick up the "known earlier" stages.
3566 // NOTE: sync_stages is "deep" catching all stages synchronized after it because we forward barriers
3567 uint32_t unsync_count = 0;
3568 for (auto &read_access : last_reads) {
3569 if (0 != ((read_access.stage | read_access.sync_stages) & sync_reads)) {
3570 // This is redundant in the "stage" case, but avoids a second branch to get an accurate count
3571 sync_reads |= read_access.stage;
3572 } else {
3573 ++unsync_count;
John Zulauf1d5f9c12022-05-13 14:51:08 -06003574 }
3575 }
3576
3577 if (unsync_count) {
3578 if (sync_reads) {
3579 // When have some remaining unsynchronized reads, we have to rewrite the last_reads array.
3580 ReadStates unsync_reads;
3581 unsync_reads.reserve(unsync_count);
3582 VkPipelineStageFlags2KHR unsync_read_stages = VK_PIPELINE_STAGE_2_NONE;
3583 for (auto &read_access : last_reads) {
3584 if (0 == (read_access.stage & sync_reads)) {
3585 unsync_reads.emplace_back(read_access);
3586 unsync_read_stages |= read_access.stage;
3587 }
3588 }
3589 last_read_stages = unsync_read_stages;
3590 last_reads = std::move(unsync_reads);
3591 }
3592 } else {
3593 // Nothing remains (or it was empty to begin with)
3594 ClearRead();
3595 }
3596
3597 bool all_clear = last_reads.size() == 0;
3598 if (last_write.any()) {
3599 if (queue_tag_test(write_queue, write_tag) || sync_reads) {
3600 // Clear any predicated write, or any the write from any any access with synchronized reads.
3601 // This could drop RAW detection, but only if the synchronized reads were RAW hazards, and given
3602 // MRR approach to reporting, this is consistent with other drops, especially since fixing the
3603 // RAW wit the sync_reads stages would preclude a subsequent RAW.
3604 ClearWrite();
3605 } else {
3606 all_clear = false;
3607 }
3608 }
3609 return all_clear;
3610}
3611
John Zulaufae842002021-04-15 18:20:55 -06003612bool ResourceAccessState::FirstAccessInTagRange(const ResourceUsageRange &tag_range) const {
3613 if (!first_accesses_.size()) return false;
3614 const ResourceUsageRange first_access_range = {first_accesses_.front().tag, first_accesses_.back().tag + 1};
3615 return tag_range.intersects(first_access_range);
3616}
3617
John Zulauf1d5f9c12022-05-13 14:51:08 -06003618void ResourceAccessState::OffsetTag(ResourceUsageTag offset) {
3619 if (last_write.any()) write_tag += offset;
3620 for (auto &read_access : last_reads) {
3621 read_access.tag += offset;
3622 }
3623 for (auto &first : first_accesses_) {
3624 first.tag += offset;
3625 }
3626}
3627
3628ResourceAccessState::ResourceAccessState()
3629 : write_barriers(~SyncStageAccessFlags(0)),
3630 write_dependency_chain(0),
3631 write_tag(),
3632 write_queue(QueueSyncState::kQueueIdInvalid),
3633 last_write(0),
3634 input_attachment_read(false),
3635 last_read_stages(0),
3636 read_execution_barriers(0),
3637 pending_write_dep_chain(0),
3638 pending_layout_transition(false),
3639 pending_write_barriers(0),
3640 pending_layout_ordering_(),
3641 first_accesses_(),
3642 first_read_stages_(0U),
3643 first_write_layout_ordering_() {}
3644
John Zulauf59e25072020-07-17 10:55:21 -06003645// This should be just Bits or Index, but we don't have an invalid state for Index
Jeremy Gebben40a22942020-12-22 14:22:06 -07003646VkPipelineStageFlags2KHR ResourceAccessState::GetReadBarriers(const SyncStageAccessFlags &usage_bit) const {
3647 VkPipelineStageFlags2KHR barriers = 0U;
John Zulauf4285ee92020-09-23 10:20:52 -06003648
John Zulaufab7756b2020-12-29 16:10:16 -07003649 for (const auto &read_access : last_reads) {
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003650 if ((read_access.access & usage_bit).any()) {
John Zulauf4285ee92020-09-23 10:20:52 -06003651 barriers = read_access.barriers;
3652 break;
John Zulauf59e25072020-07-17 10:55:21 -06003653 }
3654 }
John Zulauf4285ee92020-09-23 10:20:52 -06003655
John Zulauf59e25072020-07-17 10:55:21 -06003656 return barriers;
3657}
3658
John Zulauf1d5f9c12022-05-13 14:51:08 -06003659void ResourceAccessState::SetQueueId(QueueId id) {
3660 for (auto &read_access : last_reads) {
3661 if (read_access.queue == QueueSyncState::kQueueIdInvalid) {
3662 read_access.queue = id;
3663 }
3664 }
3665 if (last_write.any() && (write_queue == QueueSyncState::kQueueIdInvalid)) {
3666 write_queue = id;
3667 }
3668}
3669
John Zulauf00119522022-05-23 19:07:42 -06003670bool ResourceAccessState::WriteInChain(VkPipelineStageFlags2KHR src_exec_scope) const {
3671 return 0 != (write_dependency_chain & src_exec_scope);
3672}
3673
3674bool ResourceAccessState::WriteInScope(const SyncStageAccessFlags &src_access_scope) const {
3675 return (src_access_scope & last_write).any();
3676}
3677
John Zulaufec943ec2022-06-29 07:52:56 -06003678bool ResourceAccessState::WriteBarrierInScope(const SyncStageAccessFlags &src_access_scope) const {
3679 return (write_barriers & src_access_scope).any();
3680}
3681
John Zulaufb7578302022-05-19 13:50:18 -06003682bool ResourceAccessState::WriteInSourceScopeOrChain(VkPipelineStageFlags2KHR src_exec_scope,
3683 SyncStageAccessFlags src_access_scope) const {
John Zulauf00119522022-05-23 19:07:42 -06003684 return WriteInChain(src_exec_scope) || WriteInScope(src_access_scope);
3685}
3686
3687bool ResourceAccessState::WriteInQueueSourceScopeOrChain(QueueId queue, VkPipelineStageFlags2KHR src_exec_scope,
3688 SyncStageAccessFlags src_access_scope) const {
3689 return WriteInChain(src_exec_scope) || ((queue == write_queue) && WriteInScope(src_access_scope));
John Zulaufb7578302022-05-19 13:50:18 -06003690}
3691
John Zulaufe0757ba2022-06-10 16:51:45 -06003692bool ResourceAccessState::WriteInEventScope(VkPipelineStageFlags2KHR src_exec_scope, const SyncStageAccessFlags &src_access_scope,
3693 QueueId scope_queue, ResourceUsageTag scope_tag) const {
John Zulaufb7578302022-05-19 13:50:18 -06003694 // The scope logic for events is, if we're asking, the resource usage was flagged as "in the first execution scope" at
3695 // the time of the SetEvent, thus all we need check is whether the access is the same one (i.e. before the scope tag
3696 // in order to know if it's in the excecution scope
John Zulaufe0757ba2022-06-10 16:51:45 -06003697 return (write_tag < scope_tag) && WriteInQueueSourceScopeOrChain(scope_queue, src_exec_scope, src_access_scope);
John Zulaufb7578302022-05-19 13:50:18 -06003698}
3699
John Zulaufec943ec2022-06-29 07:52:56 -06003700bool ResourceAccessState::WriteInChainedScope(VkPipelineStageFlags2KHR src_exec_scope,
3701 const SyncStageAccessFlags &src_access_scope) const {
3702 return WriteInChain(src_exec_scope) && WriteBarrierInScope(src_access_scope);
3703}
3704
John Zulaufcb7e1672022-05-04 13:46:08 -06003705bool ResourceAccessState::IsRAWHazard(VkPipelineStageFlags2KHR usage_stage, const SyncStageAccessFlags &usage) const {
John Zulauf4285ee92020-09-23 10:20:52 -06003706 assert(IsRead(usage));
3707 // Only RAW vs. last_write if it doesn't happen-after any other read because either:
3708 // * the previous reads are not hazards, and thus last_write must be visible and available to
3709 // any reads that happen after.
3710 // * the previous reads *are* hazards to last_write, have been reported, and if that hazard is fixed
3711 // the current read will be also not be a hazard, thus reporting a hazard here adds no needed information.
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003712 return last_write.any() && (0 == (read_execution_barriers & usage_stage)) && IsWriteHazard(usage);
John Zulauf4285ee92020-09-23 10:20:52 -06003713}
3714
John Zulaufec943ec2022-06-29 07:52:56 -06003715VkPipelineStageFlags2 ResourceAccessState::GetOrderedStages(QueueId queue_id, const OrderingBarrier &ordering) const {
3716 // At apply queue submission order limits on the effect of ordering
3717 VkPipelineStageFlags2 non_qso_stages = VK_PIPELINE_STAGE_2_NONE;
3718 if (queue_id != QueueSyncState::kQueueIdInvalid) {
3719 for (const auto &read_access : last_reads) {
3720 if (read_access.queue != queue_id) {
3721 non_qso_stages |= read_access.stage;
3722 }
3723 }
3724 }
John Zulauf4285ee92020-09-23 10:20:52 -06003725 // Whether the stage are in the ordering scope only matters if the current write is ordered
John Zulaufec943ec2022-06-29 07:52:56 -06003726 const VkPipelineStageFlags2 read_stages_in_qso = last_read_stages & ~non_qso_stages;
3727 VkPipelineStageFlags2 ordered_stages = read_stages_in_qso & ordering.exec_scope;
John Zulauf4285ee92020-09-23 10:20:52 -06003728 // Special input attachment handling as always (not encoded in exec_scop)
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003729 const bool input_attachment_ordering = (ordering.access_scope & SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT).any();
John Zulauff51fbb62020-10-02 14:43:24 -06003730 if (input_attachment_ordering && input_attachment_read) {
John Zulauf4285ee92020-09-23 10:20:52 -06003731 // If we have an input attachment in last_reads and input attachments are ordered we all that stage
Jeremy Gebben40a22942020-12-22 14:22:06 -07003732 ordered_stages |= VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR;
John Zulauf4285ee92020-09-23 10:20:52 -06003733 }
3734
3735 return ordered_stages;
3736}
3737
John Zulauf14940722021-04-12 15:19:02 -06003738void ResourceAccessState::UpdateFirst(const ResourceUsageTag tag, SyncStageAccessIndex usage_index, SyncOrdering ordering_rule) {
John Zulauffaea0ee2021-01-14 14:01:32 -07003739 // Only record until we record a write.
3740 if (first_accesses_.empty() || IsRead(first_accesses_.back().usage_index)) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003741 const VkPipelineStageFlags2KHR usage_stage = IsRead(usage_index) ? PipelineStageBit(usage_index) : 0U;
John Zulauffaea0ee2021-01-14 14:01:32 -07003742 if (0 == (usage_stage & first_read_stages_)) {
3743 // If this is a read we haven't seen or a write, record.
John Zulauf4fa68462021-04-26 21:04:22 -06003744 // We always need to know what stages were found prior to write
John Zulauffaea0ee2021-01-14 14:01:32 -07003745 first_read_stages_ |= usage_stage;
John Zulauf4fa68462021-04-26 21:04:22 -06003746 if (0 == (read_execution_barriers & usage_stage)) {
3747 // If this stage isn't masked then we add it (since writes map to usage_stage 0, this also records writes)
3748 first_accesses_.emplace_back(tag, usage_index, ordering_rule);
3749 }
John Zulauffaea0ee2021-01-14 14:01:32 -07003750 }
3751 }
3752}
3753
John Zulauf4fa68462021-04-26 21:04:22 -06003754void ResourceAccessState::TouchupFirstForLayoutTransition(ResourceUsageTag tag, const OrderingBarrier &layout_ordering) {
3755 // Only call this after recording an image layout transition
3756 assert(first_accesses_.size());
3757 if (first_accesses_.back().tag == tag) {
3758 // If this layout transition is the the first write, add the additional ordering rules that guard the ILT
Samuel Iglesias Gonsálvez9b4660b2021-10-21 08:50:39 +02003759 assert(first_accesses_.back().usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION);
John Zulauf4fa68462021-04-26 21:04:22 -06003760 first_write_layout_ordering_ = layout_ordering;
3761 }
3762}
3763
John Zulauf1d5f9c12022-05-13 14:51:08 -06003764ResourceAccessState::ReadState::ReadState(VkPipelineStageFlags2KHR stage_, SyncStageAccessFlags access_,
3765 VkPipelineStageFlags2KHR barriers_, ResourceUsageTag tag_)
3766 : stage(stage_),
3767 access(access_),
3768 barriers(barriers_),
3769 sync_stages(VK_PIPELINE_STAGE_2_NONE),
3770 tag(tag_),
3771 queue(QueueSyncState::kQueueIdInvalid),
3772 pending_dep_chain(VK_PIPELINE_STAGE_2_NONE) {}
3773
John Zulaufee984022022-04-13 16:39:50 -06003774void ResourceAccessState::ReadState::Set(VkPipelineStageFlags2KHR stage_, const SyncStageAccessFlags &access_,
3775 VkPipelineStageFlags2KHR barriers_, ResourceUsageTag tag_) {
3776 stage = stage_;
3777 access = access_;
3778 barriers = barriers_;
John Zulauf1d5f9c12022-05-13 14:51:08 -06003779 sync_stages = VK_PIPELINE_STAGE_2_NONE;
John Zulaufee984022022-04-13 16:39:50 -06003780 tag = tag_;
John Zulauf1d5f9c12022-05-13 14:51:08 -06003781 pending_dep_chain = VK_PIPELINE_STAGE_2_NONE; // If this is a new read, we aren't applying a barrier set.
John Zulaufee984022022-04-13 16:39:50 -06003782}
3783
John Zulauf00119522022-05-23 19:07:42 -06003784// Scope test including "queue submission order" effects. Specifically, accesses from a different queue are not
3785// considered to be in "queue submission order" with barriers, events, or semaphore signalling, but any barriers
3786// that have bee applied (via semaphore) to those accesses can be chained off of.
3787bool ResourceAccessState::ReadState::ReadInQueueScopeOrChain(QueueId scope_queue, VkPipelineStageFlags2 exec_scope) const {
3788 VkPipelineStageFlags2 effective_stages = barriers | ((scope_queue == queue) ? stage : VK_PIPELINE_STAGE_2_NONE);
3789 return (exec_scope & effective_stages) != 0;
3790}
3791
John Zulauf697c0e12022-04-19 16:31:12 -06003792ResourceUsageRange SyncValidator::ReserveGlobalTagRange(size_t tag_count) const {
3793 ResourceUsageRange reserve;
3794 reserve.begin = tag_limit_.fetch_add(tag_count);
3795 reserve.end = reserve.begin + tag_count;
3796 return reserve;
3797}
3798
John Zulaufbbda4572022-04-19 16:20:45 -06003799const QueueSyncState *SyncValidator::GetQueueSyncState(VkQueue queue) const {
3800 return GetMappedPlainFromShared(queue_sync_states_, queue);
3801}
3802
3803QueueSyncState *SyncValidator::GetQueueSyncState(VkQueue queue) { return GetMappedPlainFromShared(queue_sync_states_, queue); }
3804
3805std::shared_ptr<const QueueSyncState> SyncValidator::GetQueueSyncStateShared(VkQueue queue) const {
3806 return GetMapped(queue_sync_states_, queue, []() { return std::shared_ptr<QueueSyncState>(); });
3807}
3808
3809std::shared_ptr<QueueSyncState> SyncValidator::GetQueueSyncStateShared(VkQueue queue) {
3810 return GetMapped(queue_sync_states_, queue, []() { return std::shared_ptr<QueueSyncState>(); });
3811}
3812
John Zulaufe0757ba2022-06-10 16:51:45 -06003813template <typename T>
3814struct GetBatchTraits {};
3815template <>
3816struct GetBatchTraits<std::shared_ptr<QueueSyncState>> {
3817 using Batch = std::shared_ptr<QueueBatchContext>;
3818 static Batch Get(const std::shared_ptr<QueueSyncState> &qss) { return qss ? qss->LastBatch() : Batch(); }
3819};
3820
3821template <>
3822struct GetBatchTraits<std::shared_ptr<SignaledSemaphores::Signal>> {
3823 using Batch = std::shared_ptr<QueueBatchContext>;
3824 static Batch Get(const std::shared_ptr<SignaledSemaphores::Signal> &sig) { return sig ? sig->batch : Batch(); }
3825};
3826
3827template <typename BatchSet, typename Map, typename Predicate>
3828static BatchSet GetQueueBatchSnapshotImpl(const Map &map, Predicate &&pred) {
John Zulauf1d5f9c12022-05-13 14:51:08 -06003829 BatchSet snapshot;
John Zulaufe0757ba2022-06-10 16:51:45 -06003830 for (auto &entry : map) {
3831 // Intentional copy
3832 auto batch = GetBatchTraits<typename Map::mapped_type>::Get(entry.second);
John Zulauf1d5f9c12022-05-13 14:51:08 -06003833 if (batch && pred(batch)) snapshot.emplace(std::move(batch));
John Zulauf697c0e12022-04-19 16:31:12 -06003834 }
John Zulauf1d5f9c12022-05-13 14:51:08 -06003835 return snapshot;
3836}
3837
3838template <typename Predicate>
3839QueueBatchContext::ConstBatchSet SyncValidator::GetQueueLastBatchSnapshot(Predicate &&pred) const {
John Zulaufe0757ba2022-06-10 16:51:45 -06003840 return GetQueueBatchSnapshotImpl<QueueBatchContext::ConstBatchSet>(queue_sync_states_, std::forward<Predicate>(pred));
John Zulauf1d5f9c12022-05-13 14:51:08 -06003841}
3842
3843template <typename Predicate>
3844QueueBatchContext::BatchSet SyncValidator::GetQueueLastBatchSnapshot(Predicate &&pred) {
John Zulaufe0757ba2022-06-10 16:51:45 -06003845 return GetQueueBatchSnapshotImpl<QueueBatchContext::BatchSet>(queue_sync_states_, std::forward<Predicate>(pred));
3846}
3847
3848QueueBatchContext::BatchSet SyncValidator::GetQueueBatchSnapshot() {
3849 QueueBatchContext::BatchSet snapshot = GetQueueLastBatchSnapshot();
3850 auto append = [&snapshot](const std::shared_ptr<QueueBatchContext> batch) {
3851 if (batch && !layer_data::Contains(snapshot, batch)) {
3852 snapshot.emplace(batch);
3853 }
3854 return false;
3855 };
3856 GetQueueBatchSnapshotImpl<QueueBatchContext::BatchSet>(signaled_semaphores_, append);
3857 return snapshot;
John Zulauf697c0e12022-04-19 16:31:12 -06003858}
3859
John Zulaufcb7e1672022-05-04 13:46:08 -06003860bool SignaledSemaphores::SignalSemaphore(const std::shared_ptr<const SEMAPHORE_STATE> &sem_state,
3861 const std::shared_ptr<QueueBatchContext> &batch,
3862 const VkSemaphoreSubmitInfo &signal_info) {
John Zulaufecf4ac52022-06-06 10:08:42 -06003863 assert(batch);
John Zulaufcb7e1672022-05-04 13:46:08 -06003864 const SyncExecScope exec_scope =
3865 SyncExecScope::MakeSrc(batch->GetQueueFlags(), signal_info.stageMask, VK_PIPELINE_STAGE_2_HOST_BIT);
3866 const VkSemaphore sem = sem_state->semaphore();
3867 auto signal_it = signaled_.find(sem);
3868 std::shared_ptr<Signal> insert_signal;
3869 if (signal_it == signaled_.end()) {
3870 if (prev_) {
3871 auto prev_sig = GetMapped(prev_->signaled_, sem_state->semaphore(), []() { return std::shared_ptr<Signal>(); });
3872 if (prev_sig) {
3873 // The is an invalid signal, as this semaphore is already signaled... copy the prev state (as prev_ is const)
3874 insert_signal = std::make_shared<Signal>(*prev_sig);
3875 }
3876 }
3877 auto insert_pair = signaled_.emplace(sem, std::move(insert_signal));
3878 signal_it = insert_pair.first;
John Zulauf697c0e12022-04-19 16:31:12 -06003879 }
John Zulaufcb7e1672022-05-04 13:46:08 -06003880
3881 bool success = false;
3882 if (!signal_it->second) {
3883 signal_it->second = std::make_shared<Signal>(sem_state, batch, exec_scope);
3884 success = true;
3885 }
3886
3887 return success;
3888}
3889
John Zulaufecf4ac52022-06-06 10:08:42 -06003890std::shared_ptr<const SignaledSemaphores::Signal> SignaledSemaphores::Unsignal(VkSemaphore sem) {
3891 std::shared_ptr<const Signal> unsignaled;
John Zulaufcb7e1672022-05-04 13:46:08 -06003892 const auto found_it = signaled_.find(sem);
3893 if (found_it != signaled_.end()) {
3894 // Move the unsignaled singal out from the signaled list, but keep the shared_ptr as the caller needs the contents for
3895 // a bit.
3896 unsignaled = std::move(found_it->second);
3897 if (!prev_) {
3898 // No parent, not need to keep the entry
3899 // IFF (prev_) leave the entry in the leaf table as we use it to export unsignal to prev_ during record phase
3900 signaled_.erase(found_it);
3901 }
3902 } else if (prev_) {
3903 // We can't unsignal prev_ because it's const * by design.
3904 // We put in an empty placeholder
3905 signaled_.emplace(sem, std::shared_ptr<Signal>());
3906 unsignaled = GetPrev(sem);
3907 }
3908 // NOTE: No else clause. Because if we didn't find it, and there's no previous, this indicates an error,
3909 // but CoreChecks should have reported it
3910
3911 // If unsignaled is null, there was a missing pending semaphore, and that's also issue CoreChecks reports
John Zulauf697c0e12022-04-19 16:31:12 -06003912 return unsignaled;
3913}
3914
John Zulaufcb7e1672022-05-04 13:46:08 -06003915void SignaledSemaphores::Import(VkSemaphore sem, std::shared_ptr<Signal> &&from) {
3916 // Overwrite the s tate with the last state from this
3917 if (from) {
3918 assert(sem == from->sem_state->semaphore());
3919 signaled_[sem] = std::move(from);
3920 } else {
3921 signaled_.erase(sem);
3922 }
3923}
3924
3925void SignaledSemaphores::Reset() {
3926 signaled_.clear();
3927 prev_ = nullptr;
3928}
3929
John Zulaufea943c52022-02-22 11:05:17 -07003930std::shared_ptr<CommandBufferAccessContext> SyncValidator::AccessContextFactory(VkCommandBuffer command_buffer) {
3931 // If we don't have one, make it.
3932 auto cb_state = Get<CMD_BUFFER_STATE>(command_buffer);
3933 assert(cb_state.get());
3934 auto queue_flags = cb_state->GetQueueFlags();
3935 return std::make_shared<CommandBufferAccessContext>(*this, cb_state, queue_flags);
3936}
3937
John Zulaufcb7e1672022-05-04 13:46:08 -06003938std::shared_ptr<CommandBufferAccessContext> SyncValidator::GetAccessContextShared(VkCommandBuffer command_buffer) {
John Zulaufea943c52022-02-22 11:05:17 -07003939 return GetMappedInsert(cb_access_state, command_buffer,
3940 [this, command_buffer]() { return AccessContextFactory(command_buffer); });
3941}
3942
3943std::shared_ptr<const CommandBufferAccessContext> SyncValidator::GetAccessContextShared(VkCommandBuffer command_buffer) const {
3944 return GetMapped(cb_access_state, command_buffer, []() { return std::shared_ptr<CommandBufferAccessContext>(); });
3945}
3946
3947const CommandBufferAccessContext *SyncValidator::GetAccessContext(VkCommandBuffer command_buffer) const {
3948 return GetMappedPlainFromShared(cb_access_state, command_buffer);
3949}
3950
3951CommandBufferAccessContext *SyncValidator::GetAccessContext(VkCommandBuffer command_buffer) {
3952 return GetAccessContextShared(command_buffer).get();
3953}
3954
3955CommandBufferAccessContext *SyncValidator::GetAccessContextNoInsert(VkCommandBuffer command_buffer) {
3956 return GetMappedPlainFromShared(cb_access_state, command_buffer);
3957}
3958
John Zulaufd1f85d42020-04-15 12:23:15 -06003959void SyncValidator::ResetCommandBufferCallback(VkCommandBuffer command_buffer) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003960 auto *access_context = GetAccessContextNoInsert(command_buffer);
3961 if (access_context) {
3962 access_context->Reset();
John Zulauf9cb530d2019-09-30 14:14:10 -06003963 }
3964}
3965
John Zulaufd1f85d42020-04-15 12:23:15 -06003966void SyncValidator::FreeCommandBufferCallback(VkCommandBuffer command_buffer) {
3967 auto access_found = cb_access_state.find(command_buffer);
3968 if (access_found != cb_access_state.end()) {
3969 access_found->second->Reset();
John Zulauf4fa68462021-04-26 21:04:22 -06003970 access_found->second->MarkDestroyed();
John Zulaufd1f85d42020-04-15 12:23:15 -06003971 cb_access_state.erase(access_found);
3972 }
3973}
3974
John Zulauf9cb530d2019-09-30 14:14:10 -06003975bool SyncValidator::PreCallValidateCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
3976 uint32_t regionCount, const VkBufferCopy *pRegions) const {
3977 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -06003978 const auto *cb_context = GetAccessContext(commandBuffer);
3979 assert(cb_context);
3980 if (!cb_context) return skip;
3981 const auto *context = cb_context->GetCurrentAccessContext();
John Zulauf9cb530d2019-09-30 14:14:10 -06003982
John Zulauf3d84f1b2020-03-09 13:33:25 -06003983 // If we have no previous accesses, we have no hazards
Jeremy Gebbenf4449392022-01-28 10:09:10 -07003984 auto src_buffer = Get<BUFFER_STATE>(srcBuffer);
3985 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003986
3987 for (uint32_t region = 0; region < regionCount; region++) {
3988 const auto &copy_region = pRegions[region];
John Zulauf16adfc92020-04-08 10:28:33 -06003989 if (src_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06003990 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003991 auto hazard = context->DetectHazard(*src_buffer, SYNC_COPY_TRANSFER_READ, src_range);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003992 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003993 skip |= LogError(srcBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003994 "vkCmdCopyBuffer: Hazard %s for srcBuffer %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06003995 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcBuffer).c_str(), region,
John Zulauf397e68b2022-04-19 11:44:07 -06003996 cb_context->FormatHazard(hazard).c_str());
John Zulauf9cb530d2019-09-30 14:14:10 -06003997 }
John Zulauf9cb530d2019-09-30 14:14:10 -06003998 }
John Zulauf16adfc92020-04-08 10:28:33 -06003999 if (dst_buffer && !skip) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004000 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004001 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, dst_range);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004002 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06004003 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004004 "vkCmdCopyBuffer: Hazard %s for dstBuffer %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06004005 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstBuffer).c_str(), region,
John Zulauf397e68b2022-04-19 11:44:07 -06004006 cb_context->FormatHazard(hazard).c_str());
John Zulauf3d84f1b2020-03-09 13:33:25 -06004007 }
4008 }
4009 if (skip) break;
John Zulauf9cb530d2019-09-30 14:14:10 -06004010 }
4011 return skip;
4012}
4013
4014void SyncValidator::PreCallRecordCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
4015 uint32_t regionCount, const VkBufferCopy *pRegions) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06004016 auto *cb_context = GetAccessContext(commandBuffer);
4017 assert(cb_context);
John Zulauf2b151bf2020-04-24 15:37:44 -06004018 const auto tag = cb_context->NextCommandTag(CMD_COPYBUFFER);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004019 auto *context = cb_context->GetCurrentAccessContext();
4020
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004021 auto src_buffer = Get<BUFFER_STATE>(srcBuffer);
4022 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
John Zulauf9cb530d2019-09-30 14:14:10 -06004023
4024 for (uint32_t region = 0; region < regionCount; region++) {
4025 const auto &copy_region = pRegions[region];
John Zulauf16adfc92020-04-08 10:28:33 -06004026 if (src_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004027 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004028 context->UpdateAccessState(*src_buffer, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment, src_range, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06004029 }
John Zulauf16adfc92020-04-08 10:28:33 -06004030 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004031 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004032 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, dst_range, tag);
John Zulauf5c5e88d2019-12-26 11:22:02 -07004033 }
4034 }
4035}
4036
John Zulauf4a6105a2020-11-17 15:11:05 -07004037void SyncValidator::PreCallRecordDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
4038 // Clear out events from the command buffer contexts
4039 for (auto &cb_context : cb_access_state) {
4040 cb_context.second->RecordDestroyEvent(event);
4041 }
4042}
4043
Tony-LunarGef035472021-11-02 10:23:33 -06004044bool SyncValidator::ValidateCmdCopyBuffer2(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2 *pCopyBufferInfos,
4045 CMD_TYPE cmd_type) const {
Jeff Leger178b1e52020-10-05 12:22:23 -04004046 bool skip = false;
4047 const auto *cb_context = GetAccessContext(commandBuffer);
4048 assert(cb_context);
4049 if (!cb_context) return skip;
4050 const auto *context = cb_context->GetCurrentAccessContext();
4051
4052 // If we have no previous accesses, we have no hazards
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004053 auto src_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->srcBuffer);
4054 auto dst_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->dstBuffer);
Jeff Leger178b1e52020-10-05 12:22:23 -04004055
4056 for (uint32_t region = 0; region < pCopyBufferInfos->regionCount; region++) {
4057 const auto &copy_region = pCopyBufferInfos->pRegions[region];
4058 if (src_buffer) {
4059 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004060 auto hazard = context->DetectHazard(*src_buffer, SYNC_COPY_TRANSFER_READ, src_range);
Jeff Leger178b1e52020-10-05 12:22:23 -04004061 if (hazard.hazard) {
4062 // TODO -- add tag information to log msg when useful.
sjfricke0bea06e2022-06-05 09:22:26 +09004063 skip |=
4064 LogError(pCopyBufferInfos->srcBuffer, string_SyncHazardVUID(hazard.hazard),
4065 "%s(): Hazard %s for srcBuffer %s, region %" PRIu32 ". Access info %s.", CommandTypeString(cmd_type),
4066 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyBufferInfos->srcBuffer).c_str(),
4067 region, cb_context->FormatHazard(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04004068 }
4069 }
4070 if (dst_buffer && !skip) {
4071 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004072 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, dst_range);
Jeff Leger178b1e52020-10-05 12:22:23 -04004073 if (hazard.hazard) {
sjfricke0bea06e2022-06-05 09:22:26 +09004074 skip |=
4075 LogError(pCopyBufferInfos->dstBuffer, string_SyncHazardVUID(hazard.hazard),
4076 "%s(): Hazard %s for dstBuffer %s, region %" PRIu32 ". Access info %s.", CommandTypeString(cmd_type),
4077 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyBufferInfos->dstBuffer).c_str(),
4078 region, cb_context->FormatHazard(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04004079 }
4080 }
4081 if (skip) break;
4082 }
4083 return skip;
4084}
4085
Tony-LunarGef035472021-11-02 10:23:33 -06004086bool SyncValidator::PreCallValidateCmdCopyBuffer2KHR(VkCommandBuffer commandBuffer,
4087 const VkCopyBufferInfo2KHR *pCopyBufferInfos) const {
4088 return ValidateCmdCopyBuffer2(commandBuffer, pCopyBufferInfos, CMD_COPYBUFFER2KHR);
4089}
4090
4091bool SyncValidator::PreCallValidateCmdCopyBuffer2(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2 *pCopyBufferInfos) const {
4092 return ValidateCmdCopyBuffer2(commandBuffer, pCopyBufferInfos, CMD_COPYBUFFER2);
4093}
4094
4095void SyncValidator::RecordCmdCopyBuffer2(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2KHR *pCopyBufferInfos, CMD_TYPE cmd_type) {
Jeff Leger178b1e52020-10-05 12:22:23 -04004096 auto *cb_context = GetAccessContext(commandBuffer);
4097 assert(cb_context);
Tony-LunarGef035472021-11-02 10:23:33 -06004098 const auto tag = cb_context->NextCommandTag(cmd_type);
Jeff Leger178b1e52020-10-05 12:22:23 -04004099 auto *context = cb_context->GetCurrentAccessContext();
4100
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004101 auto src_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->srcBuffer);
4102 auto dst_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->dstBuffer);
Jeff Leger178b1e52020-10-05 12:22:23 -04004103
4104 for (uint32_t region = 0; region < pCopyBufferInfos->regionCount; region++) {
4105 const auto &copy_region = pCopyBufferInfos->pRegions[region];
4106 if (src_buffer) {
4107 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004108 context->UpdateAccessState(*src_buffer, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment, src_range, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04004109 }
4110 if (dst_buffer) {
4111 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004112 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, dst_range, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04004113 }
4114 }
4115}
4116
Tony-LunarGef035472021-11-02 10:23:33 -06004117void SyncValidator::PreCallRecordCmdCopyBuffer2KHR(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2KHR *pCopyBufferInfos) {
4118 RecordCmdCopyBuffer2(commandBuffer, pCopyBufferInfos, CMD_COPYBUFFER2KHR);
4119}
4120
4121void SyncValidator::PreCallRecordCmdCopyBuffer2(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2 *pCopyBufferInfos) {
4122 RecordCmdCopyBuffer2(commandBuffer, pCopyBufferInfos, CMD_COPYBUFFER2);
4123}
4124
John Zulauf5c5e88d2019-12-26 11:22:02 -07004125bool SyncValidator::PreCallValidateCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4126 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
4127 const VkImageCopy *pRegions) const {
4128 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -06004129 const auto *cb_access_context = GetAccessContext(commandBuffer);
4130 assert(cb_access_context);
4131 if (!cb_access_context) return skip;
John Zulauf5c5e88d2019-12-26 11:22:02 -07004132
John Zulauf3d84f1b2020-03-09 13:33:25 -06004133 const auto *context = cb_access_context->GetCurrentAccessContext();
4134 assert(context);
4135 if (!context) return skip;
4136
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004137 auto src_image = Get<IMAGE_STATE>(srcImage);
4138 auto dst_image = Get<IMAGE_STATE>(dstImage);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004139 for (uint32_t region = 0; region < regionCount; region++) {
4140 const auto &copy_region = pRegions[region];
4141 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004142 auto hazard = context->DetectHazard(*src_image, SYNC_COPY_TRANSFER_READ, copy_region.srcSubresource,
Aitor Camachoe67f2c72022-06-08 14:41:58 +02004143 copy_region.srcOffset, copy_region.extent, false);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004144 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06004145 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004146 "vkCmdCopyImage: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06004147 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauf397e68b2022-04-19 11:44:07 -06004148 cb_access_context->FormatHazard(hazard).c_str());
John Zulauf5c5e88d2019-12-26 11:22:02 -07004149 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06004150 }
4151
4152 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004153 auto hazard = context->DetectHazard(*dst_image, SYNC_COPY_TRANSFER_WRITE, copy_region.dstSubresource,
Aitor Camachoe67f2c72022-06-08 14:41:58 +02004154 copy_region.dstOffset, copy_region.extent, false);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004155 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06004156 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004157 "vkCmdCopyImage: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06004158 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauf397e68b2022-04-19 11:44:07 -06004159 cb_access_context->FormatHazard(hazard).c_str());
John Zulauf5c5e88d2019-12-26 11:22:02 -07004160 }
locke-lunarg1dbbb9e2020-02-28 22:43:53 -07004161 if (skip) break;
John Zulauf5c5e88d2019-12-26 11:22:02 -07004162 }
4163 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06004164
John Zulauf5c5e88d2019-12-26 11:22:02 -07004165 return skip;
4166}
4167
4168void SyncValidator::PreCallRecordCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4169 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
4170 const VkImageCopy *pRegions) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06004171 auto *cb_access_context = GetAccessContext(commandBuffer);
4172 assert(cb_access_context);
John Zulauf2b151bf2020-04-24 15:37:44 -06004173 const auto tag = cb_access_context->NextCommandTag(CMD_COPYIMAGE);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004174 auto *context = cb_access_context->GetCurrentAccessContext();
4175 assert(context);
4176
Jeremy Gebben9f537102021-10-05 16:37:12 -06004177 auto src_image = Get<IMAGE_STATE>(srcImage);
4178 auto dst_image = Get<IMAGE_STATE>(dstImage);
John Zulauf5c5e88d2019-12-26 11:22:02 -07004179
4180 for (uint32_t region = 0; region < regionCount; region++) {
4181 const auto &copy_region = pRegions[region];
John Zulauf3d84f1b2020-03-09 13:33:25 -06004182 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004183 context->UpdateAccessState(*src_image, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07004184 copy_region.srcSubresource, copy_region.srcOffset, copy_region.extent, tag);
John Zulauf5c5e88d2019-12-26 11:22:02 -07004185 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06004186 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004187 context->UpdateAccessState(*dst_image, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
ziga-lunarg73746512022-03-23 23:08:17 +01004188 copy_region.dstSubresource, copy_region.dstOffset, copy_region.extent, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06004189 }
4190 }
4191}
4192
Tony-LunarGb61514a2021-11-02 12:36:51 -06004193bool SyncValidator::ValidateCmdCopyImage2(VkCommandBuffer commandBuffer, const VkCopyImageInfo2 *pCopyImageInfo,
4194 CMD_TYPE cmd_type) const {
Jeff Leger178b1e52020-10-05 12:22:23 -04004195 bool skip = false;
4196 const auto *cb_access_context = GetAccessContext(commandBuffer);
4197 assert(cb_access_context);
4198 if (!cb_access_context) return skip;
4199
4200 const auto *context = cb_access_context->GetCurrentAccessContext();
4201 assert(context);
4202 if (!context) return skip;
4203
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004204 auto src_image = Get<IMAGE_STATE>(pCopyImageInfo->srcImage);
4205 auto dst_image = Get<IMAGE_STATE>(pCopyImageInfo->dstImage);
Tony-LunarGb61514a2021-11-02 12:36:51 -06004206
Jeff Leger178b1e52020-10-05 12:22:23 -04004207 for (uint32_t region = 0; region < pCopyImageInfo->regionCount; region++) {
4208 const auto &copy_region = pCopyImageInfo->pRegions[region];
4209 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004210 auto hazard = context->DetectHazard(*src_image, SYNC_COPY_TRANSFER_READ, copy_region.srcSubresource,
Aitor Camachoe67f2c72022-06-08 14:41:58 +02004211 copy_region.srcOffset, copy_region.extent, false);
Jeff Leger178b1e52020-10-05 12:22:23 -04004212 if (hazard.hazard) {
4213 skip |= LogError(pCopyImageInfo->srcImage, string_SyncHazardVUID(hazard.hazard),
sjfricke0bea06e2022-06-05 09:22:26 +09004214 "%s: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.", CommandTypeString(cmd_type),
Jeff Leger178b1e52020-10-05 12:22:23 -04004215 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyImageInfo->srcImage).c_str(),
John Zulauf397e68b2022-04-19 11:44:07 -06004216 region, cb_access_context->FormatHazard(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04004217 }
4218 }
4219
4220 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004221 auto hazard = context->DetectHazard(*dst_image, SYNC_COPY_TRANSFER_WRITE, copy_region.dstSubresource,
Aitor Camachoe67f2c72022-06-08 14:41:58 +02004222 copy_region.dstOffset, copy_region.extent, false);
Jeff Leger178b1e52020-10-05 12:22:23 -04004223 if (hazard.hazard) {
4224 skip |= LogError(pCopyImageInfo->dstImage, string_SyncHazardVUID(hazard.hazard),
sjfricke0bea06e2022-06-05 09:22:26 +09004225 "%s: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.", CommandTypeString(cmd_type),
Jeff Leger178b1e52020-10-05 12:22:23 -04004226 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyImageInfo->dstImage).c_str(),
John Zulauf397e68b2022-04-19 11:44:07 -06004227 region, cb_access_context->FormatHazard(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04004228 }
4229 if (skip) break;
4230 }
4231 }
4232
4233 return skip;
4234}
4235
Tony-LunarGb61514a2021-11-02 12:36:51 -06004236bool SyncValidator::PreCallValidateCmdCopyImage2KHR(VkCommandBuffer commandBuffer,
4237 const VkCopyImageInfo2KHR *pCopyImageInfo) const {
4238 return ValidateCmdCopyImage2(commandBuffer, pCopyImageInfo, CMD_COPYIMAGE2KHR);
4239}
4240
4241bool SyncValidator::PreCallValidateCmdCopyImage2(VkCommandBuffer commandBuffer, const VkCopyImageInfo2 *pCopyImageInfo) const {
4242 return ValidateCmdCopyImage2(commandBuffer, pCopyImageInfo, CMD_COPYIMAGE2);
4243}
4244
4245void SyncValidator::RecordCmdCopyImage2(VkCommandBuffer commandBuffer, const VkCopyImageInfo2KHR *pCopyImageInfo, CMD_TYPE cmd_type) {
Jeff Leger178b1e52020-10-05 12:22:23 -04004246 auto *cb_access_context = GetAccessContext(commandBuffer);
4247 assert(cb_access_context);
Tony-LunarGb61514a2021-11-02 12:36:51 -06004248 const auto tag = cb_access_context->NextCommandTag(cmd_type);
Jeff Leger178b1e52020-10-05 12:22:23 -04004249 auto *context = cb_access_context->GetCurrentAccessContext();
4250 assert(context);
4251
Jeremy Gebben9f537102021-10-05 16:37:12 -06004252 auto src_image = Get<IMAGE_STATE>(pCopyImageInfo->srcImage);
4253 auto dst_image = Get<IMAGE_STATE>(pCopyImageInfo->dstImage);
Jeff Leger178b1e52020-10-05 12:22:23 -04004254
4255 for (uint32_t region = 0; region < pCopyImageInfo->regionCount; region++) {
4256 const auto &copy_region = pCopyImageInfo->pRegions[region];
4257 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004258 context->UpdateAccessState(*src_image, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07004259 copy_region.srcSubresource, copy_region.srcOffset, copy_region.extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04004260 }
4261 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004262 context->UpdateAccessState(*dst_image, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
ziga-lunarg73746512022-03-23 23:08:17 +01004263 copy_region.dstSubresource, copy_region.dstOffset, copy_region.extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04004264 }
4265 }
4266}
4267
Tony-LunarGb61514a2021-11-02 12:36:51 -06004268void SyncValidator::PreCallRecordCmdCopyImage2KHR(VkCommandBuffer commandBuffer, const VkCopyImageInfo2KHR *pCopyImageInfo) {
4269 RecordCmdCopyImage2(commandBuffer, pCopyImageInfo, CMD_COPYIMAGE2KHR);
4270}
4271
4272void SyncValidator::PreCallRecordCmdCopyImage2(VkCommandBuffer commandBuffer, const VkCopyImageInfo2 *pCopyImageInfo) {
4273 RecordCmdCopyImage2(commandBuffer, pCopyImageInfo, CMD_COPYIMAGE2);
4274}
4275
John Zulauf9cb530d2019-09-30 14:14:10 -06004276bool SyncValidator::PreCallValidateCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
4277 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
4278 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
4279 uint32_t bufferMemoryBarrierCount,
4280 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
4281 uint32_t imageMemoryBarrierCount,
4282 const VkImageMemoryBarrier *pImageMemoryBarriers) const {
4283 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -06004284 const auto *cb_access_context = GetAccessContext(commandBuffer);
4285 assert(cb_access_context);
4286 if (!cb_access_context) return skip;
John Zulauf0cb5be22020-01-23 12:18:22 -07004287
John Zulauf36ef9282021-02-02 11:47:24 -07004288 SyncOpPipelineBarrier pipeline_barrier(CMD_PIPELINEBARRIER, *this, cb_access_context->GetQueueFlags(), srcStageMask,
4289 dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers,
4290 bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
4291 pImageMemoryBarriers);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07004292 skip = pipeline_barrier.Validate(*cb_access_context);
John Zulauf9cb530d2019-09-30 14:14:10 -06004293 return skip;
4294}
4295
4296void SyncValidator::PreCallRecordCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
4297 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
4298 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
4299 uint32_t bufferMemoryBarrierCount,
4300 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
4301 uint32_t imageMemoryBarrierCount,
4302 const VkImageMemoryBarrier *pImageMemoryBarriers) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06004303 auto *cb_access_context = GetAccessContext(commandBuffer);
4304 assert(cb_access_context);
4305 if (!cb_access_context) return;
John Zulauf9cb530d2019-09-30 14:14:10 -06004306
John Zulauf1bf30522021-09-03 15:39:06 -06004307 cb_access_context->RecordSyncOp<SyncOpPipelineBarrier>(CMD_PIPELINEBARRIER, *this, cb_access_context->GetQueueFlags(),
4308 srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount,
4309 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
4310 imageMemoryBarrierCount, pImageMemoryBarriers);
John Zulauf9cb530d2019-09-30 14:14:10 -06004311}
4312
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07004313bool SyncValidator::PreCallValidateCmdPipelineBarrier2KHR(VkCommandBuffer commandBuffer,
4314 const VkDependencyInfoKHR *pDependencyInfo) const {
4315 bool skip = false;
4316 const auto *cb_access_context = GetAccessContext(commandBuffer);
4317 assert(cb_access_context);
4318 if (!cb_access_context) return skip;
4319
4320 SyncOpPipelineBarrier pipeline_barrier(CMD_PIPELINEBARRIER2KHR, *this, cb_access_context->GetQueueFlags(), *pDependencyInfo);
4321 skip = pipeline_barrier.Validate(*cb_access_context);
4322 return skip;
4323}
4324
Tony-LunarG3f6eceb2021-11-18 14:34:49 -07004325bool SyncValidator::PreCallValidateCmdPipelineBarrier2(VkCommandBuffer commandBuffer,
4326 const VkDependencyInfo *pDependencyInfo) const {
4327 bool skip = false;
4328 const auto *cb_access_context = GetAccessContext(commandBuffer);
4329 assert(cb_access_context);
4330 if (!cb_access_context) return skip;
4331
4332 SyncOpPipelineBarrier pipeline_barrier(CMD_PIPELINEBARRIER2, *this, cb_access_context->GetQueueFlags(), *pDependencyInfo);
4333 skip = pipeline_barrier.Validate(*cb_access_context);
4334 return skip;
4335}
4336
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07004337void SyncValidator::PreCallRecordCmdPipelineBarrier2KHR(VkCommandBuffer commandBuffer, const VkDependencyInfoKHR *pDependencyInfo) {
4338 auto *cb_access_context = GetAccessContext(commandBuffer);
4339 assert(cb_access_context);
4340 if (!cb_access_context) return;
4341
John Zulauf1bf30522021-09-03 15:39:06 -06004342 cb_access_context->RecordSyncOp<SyncOpPipelineBarrier>(CMD_PIPELINEBARRIER2KHR, *this, cb_access_context->GetQueueFlags(),
4343 *pDependencyInfo);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07004344}
4345
Tony-LunarG3f6eceb2021-11-18 14:34:49 -07004346void SyncValidator::PreCallRecordCmdPipelineBarrier2(VkCommandBuffer commandBuffer, const VkDependencyInfo *pDependencyInfo) {
4347 auto *cb_access_context = GetAccessContext(commandBuffer);
4348 assert(cb_access_context);
4349 if (!cb_access_context) return;
4350
4351 cb_access_context->RecordSyncOp<SyncOpPipelineBarrier>(CMD_PIPELINEBARRIER2, *this, cb_access_context->GetQueueFlags(),
4352 *pDependencyInfo);
4353}
4354
Jeremy Gebben36a3b832022-03-23 10:54:18 -06004355void SyncValidator::CreateDevice(const VkDeviceCreateInfo *pCreateInfo) {
John Zulauf9cb530d2019-09-30 14:14:10 -06004356 // The state tracker sets up the device state
Jeremy Gebben36a3b832022-03-23 10:54:18 -06004357 StateTracker::CreateDevice(pCreateInfo);
John Zulauf9cb530d2019-09-30 14:14:10 -06004358
John Zulauf5f13a792020-03-10 07:31:21 -06004359 // Add the callback hooks for the functions that are either broadly or deeply used and that the ValidationStateTracker
4360 // refactor would be messier without.
John Zulauf9cb530d2019-09-30 14:14:10 -06004361 // TODO: Find a good way to do this hooklessly.
Jeremy Gebben36a3b832022-03-23 10:54:18 -06004362 SetCommandBufferResetCallback([this](VkCommandBuffer command_buffer) -> void { ResetCommandBufferCallback(command_buffer); });
4363 SetCommandBufferFreeCallback([this](VkCommandBuffer command_buffer) -> void { FreeCommandBufferCallback(command_buffer); });
John Zulaufbbda4572022-04-19 16:20:45 -06004364
John Zulauf1d5f9c12022-05-13 14:51:08 -06004365 QueueId queue_id = QueueSyncState::kQueueIdBase;
4366 ForEachShared<QUEUE_STATE>([this, &queue_id](const std::shared_ptr<QUEUE_STATE> &queue_state) {
John Zulaufbbda4572022-04-19 16:20:45 -06004367 auto queue_flags = physical_device_state->queue_family_properties[queue_state->queueFamilyIndex].queueFlags;
John Zulauf1d5f9c12022-05-13 14:51:08 -06004368 std::shared_ptr<QueueSyncState> queue_sync_state = std::make_shared<QueueSyncState>(queue_state, queue_flags, queue_id++);
John Zulaufbbda4572022-04-19 16:20:45 -06004369 queue_sync_states_.emplace(std::make_pair(queue_state->Queue(), std::move(queue_sync_state)));
4370 });
John Zulauf9cb530d2019-09-30 14:14:10 -06004371}
John Zulauf3d84f1b2020-03-09 13:33:25 -06004372
John Zulauf355e49b2020-04-24 15:11:15 -06004373bool SyncValidator::ValidateBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
sjfricke0bea06e2022-06-05 09:22:26 +09004374 const VkSubpassBeginInfo *pSubpassBeginInfo, CMD_TYPE cmd_type) const {
John Zulauf355e49b2020-04-24 15:11:15 -06004375 bool skip = false;
John Zulauf355e49b2020-04-24 15:11:15 -06004376 auto cb_context = GetAccessContext(commandBuffer);
John Zulauf64ffe552021-02-06 10:25:07 -07004377 if (cb_context) {
sjfricke0bea06e2022-06-05 09:22:26 +09004378 SyncOpBeginRenderPass sync_op(cmd_type, *this, pRenderPassBegin, pSubpassBeginInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07004379 skip = sync_op.Validate(*cb_context);
John Zulauf355e49b2020-04-24 15:11:15 -06004380 }
John Zulauf355e49b2020-04-24 15:11:15 -06004381 return skip;
4382}
4383
4384bool SyncValidator::PreCallValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
4385 VkSubpassContents contents) const {
4386 bool skip = StateTracker::PreCallValidateCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07004387 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf355e49b2020-04-24 15:11:15 -06004388 subpass_begin_info.contents = contents;
John Zulauf64ffe552021-02-06 10:25:07 -07004389 skip |= ValidateBeginRenderPass(commandBuffer, pRenderPassBegin, &subpass_begin_info, CMD_BEGINRENDERPASS);
John Zulauf355e49b2020-04-24 15:11:15 -06004390 return skip;
4391}
4392
4393bool SyncValidator::PreCallValidateCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
Mike Schuchardt2df08912020-12-15 16:28:09 -08004394 const VkSubpassBeginInfo *pSubpassBeginInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06004395 bool skip = StateTracker::PreCallValidateCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07004396 skip |= ValidateBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2);
John Zulauf355e49b2020-04-24 15:11:15 -06004397 return skip;
4398}
4399
4400bool SyncValidator::PreCallValidateCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
4401 const VkRenderPassBeginInfo *pRenderPassBegin,
Mike Schuchardt2df08912020-12-15 16:28:09 -08004402 const VkSubpassBeginInfo *pSubpassBeginInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06004403 bool skip = StateTracker::PreCallValidateCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
sfricke-samsung85584a72021-09-30 21:43:38 -07004404 skip |= ValidateBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2KHR);
John Zulauf355e49b2020-04-24 15:11:15 -06004405 return skip;
4406}
4407
John Zulauf3d84f1b2020-03-09 13:33:25 -06004408void SyncValidator::PostCallRecordBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo,
4409 VkResult result) {
4410 // The state tracker sets up the command buffer state
4411 StateTracker::PostCallRecordBeginCommandBuffer(commandBuffer, pBeginInfo, result);
4412
4413 // Create/initialize the structure that trackers accesses at the command buffer scope.
4414 auto cb_access_context = GetAccessContext(commandBuffer);
4415 assert(cb_access_context);
4416 cb_access_context->Reset();
4417}
4418
4419void SyncValidator::RecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
sjfricke0bea06e2022-06-05 09:22:26 +09004420 const VkSubpassBeginInfo *pSubpassBeginInfo, CMD_TYPE cmd_type) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06004421 auto cb_context = GetAccessContext(commandBuffer);
John Zulauf355e49b2020-04-24 15:11:15 -06004422 if (cb_context) {
sjfricke0bea06e2022-06-05 09:22:26 +09004423 cb_context->RecordSyncOp<SyncOpBeginRenderPass>(cmd_type, *this, pRenderPassBegin, pSubpassBeginInfo);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004424 }
4425}
4426
4427void SyncValidator::PostCallRecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
4428 VkSubpassContents contents) {
4429 StateTracker::PostCallRecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07004430 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf3d84f1b2020-03-09 13:33:25 -06004431 subpass_begin_info.contents = contents;
John Zulauf355e49b2020-04-24 15:11:15 -06004432 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, &subpass_begin_info, CMD_BEGINRENDERPASS);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004433}
4434
4435void SyncValidator::PostCallRecordCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
4436 const VkSubpassBeginInfo *pSubpassBeginInfo) {
4437 StateTracker::PostCallRecordCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
John Zulauf355e49b2020-04-24 15:11:15 -06004438 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004439}
4440
4441void SyncValidator::PostCallRecordCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
4442 const VkRenderPassBeginInfo *pRenderPassBegin,
4443 const VkSubpassBeginInfo *pSubpassBeginInfo) {
4444 StateTracker::PostCallRecordCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
sfricke-samsung85584a72021-09-30 21:43:38 -07004445 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2KHR);
John Zulauf355e49b2020-04-24 15:11:15 -06004446}
4447
Mike Schuchardt2df08912020-12-15 16:28:09 -08004448bool SyncValidator::ValidateCmdNextSubpass(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
sjfricke0bea06e2022-06-05 09:22:26 +09004449 const VkSubpassEndInfo *pSubpassEndInfo, CMD_TYPE cmd_type) const {
John Zulauf355e49b2020-04-24 15:11:15 -06004450 bool skip = false;
4451
4452 auto cb_context = GetAccessContext(commandBuffer);
4453 assert(cb_context);
John Zulauf64ffe552021-02-06 10:25:07 -07004454 if (!cb_context) return skip;
sjfricke0bea06e2022-06-05 09:22:26 +09004455 SyncOpNextSubpass sync_op(cmd_type, *this, pSubpassBeginInfo, pSubpassEndInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07004456 return sync_op.Validate(*cb_context);
John Zulauf355e49b2020-04-24 15:11:15 -06004457}
4458
4459bool SyncValidator::PreCallValidateCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) const {
4460 bool skip = StateTracker::PreCallValidateCmdNextSubpass(commandBuffer, contents);
John Zulauf64ffe552021-02-06 10:25:07 -07004461 // Convert to a NextSubpass2
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07004462 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf355e49b2020-04-24 15:11:15 -06004463 subpass_begin_info.contents = contents;
John Zulauf64ffe552021-02-06 10:25:07 -07004464 auto subpass_end_info = LvlInitStruct<VkSubpassEndInfo>();
4465 skip |= ValidateCmdNextSubpass(commandBuffer, &subpass_begin_info, &subpass_end_info, CMD_NEXTSUBPASS);
John Zulauf355e49b2020-04-24 15:11:15 -06004466 return skip;
4467}
4468
Mike Schuchardt2df08912020-12-15 16:28:09 -08004469bool SyncValidator::PreCallValidateCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
4470 const VkSubpassEndInfo *pSubpassEndInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06004471 bool skip = StateTracker::PreCallValidateCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
sfricke-samsung85584a72021-09-30 21:43:38 -07004472 skip |= ValidateCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2KHR);
John Zulauf355e49b2020-04-24 15:11:15 -06004473 return skip;
4474}
4475
4476bool SyncValidator::PreCallValidateCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
4477 const VkSubpassEndInfo *pSubpassEndInfo) const {
4478 bool skip = StateTracker::PreCallValidateCmdNextSubpass2(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07004479 skip |= ValidateCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2);
John Zulauf355e49b2020-04-24 15:11:15 -06004480 return skip;
John Zulauf3d84f1b2020-03-09 13:33:25 -06004481}
4482
4483void SyncValidator::RecordCmdNextSubpass(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
sjfricke0bea06e2022-06-05 09:22:26 +09004484 const VkSubpassEndInfo *pSubpassEndInfo, CMD_TYPE cmd_type) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06004485 auto cb_context = GetAccessContext(commandBuffer);
4486 assert(cb_context);
John Zulauf64ffe552021-02-06 10:25:07 -07004487 if (!cb_context) return;
John Zulauf3d84f1b2020-03-09 13:33:25 -06004488
sjfricke0bea06e2022-06-05 09:22:26 +09004489 cb_context->RecordSyncOp<SyncOpNextSubpass>(cmd_type, *this, pSubpassBeginInfo, pSubpassEndInfo);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004490}
4491
4492void SyncValidator::PostCallRecordCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
4493 StateTracker::PostCallRecordCmdNextSubpass(commandBuffer, contents);
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07004494 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf3d84f1b2020-03-09 13:33:25 -06004495 subpass_begin_info.contents = contents;
John Zulauf355e49b2020-04-24 15:11:15 -06004496 RecordCmdNextSubpass(commandBuffer, &subpass_begin_info, nullptr, CMD_NEXTSUBPASS);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004497}
4498
4499void SyncValidator::PostCallRecordCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
4500 const VkSubpassEndInfo *pSubpassEndInfo) {
4501 StateTracker::PostCallRecordCmdNextSubpass2(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
John Zulauf355e49b2020-04-24 15:11:15 -06004502 RecordCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004503}
4504
4505void SyncValidator::PostCallRecordCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
4506 const VkSubpassEndInfo *pSubpassEndInfo) {
4507 StateTracker::PostCallRecordCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
sfricke-samsung85584a72021-09-30 21:43:38 -07004508 RecordCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2KHR);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004509}
4510
sfricke-samsung85584a72021-09-30 21:43:38 -07004511bool SyncValidator::ValidateCmdEndRenderPass(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo,
sjfricke0bea06e2022-06-05 09:22:26 +09004512 CMD_TYPE cmd_type) const {
John Zulauf355e49b2020-04-24 15:11:15 -06004513 bool skip = false;
4514
4515 auto cb_context = GetAccessContext(commandBuffer);
4516 assert(cb_context);
John Zulauf64ffe552021-02-06 10:25:07 -07004517 if (!cb_context) return skip;
John Zulauf355e49b2020-04-24 15:11:15 -06004518
sjfricke0bea06e2022-06-05 09:22:26 +09004519 SyncOpEndRenderPass sync_op(cmd_type, *this, pSubpassEndInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07004520 skip |= sync_op.Validate(*cb_context);
John Zulauf355e49b2020-04-24 15:11:15 -06004521 return skip;
4522}
4523
4524bool SyncValidator::PreCallValidateCmdEndRenderPass(VkCommandBuffer commandBuffer) const {
4525 bool skip = StateTracker::PreCallValidateCmdEndRenderPass(commandBuffer);
John Zulauf64ffe552021-02-06 10:25:07 -07004526 skip |= ValidateCmdEndRenderPass(commandBuffer, nullptr, CMD_ENDRENDERPASS);
John Zulauf355e49b2020-04-24 15:11:15 -06004527 return skip;
4528}
4529
Mike Schuchardt2df08912020-12-15 16:28:09 -08004530bool SyncValidator::PreCallValidateCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06004531 bool skip = StateTracker::PreCallValidateCmdEndRenderPass2(commandBuffer, pSubpassEndInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07004532 skip |= ValidateCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2);
John Zulauf355e49b2020-04-24 15:11:15 -06004533 return skip;
4534}
4535
4536bool SyncValidator::PreCallValidateCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer,
Mike Schuchardt2df08912020-12-15 16:28:09 -08004537 const VkSubpassEndInfo *pSubpassEndInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06004538 bool skip = StateTracker::PreCallValidateCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo);
sfricke-samsung85584a72021-09-30 21:43:38 -07004539 skip |= ValidateCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2KHR);
John Zulauf355e49b2020-04-24 15:11:15 -06004540 return skip;
4541}
4542
sjfricke0bea06e2022-06-05 09:22:26 +09004543void SyncValidator::RecordCmdEndRenderPass(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo,
4544 CMD_TYPE cmd_type) {
John Zulaufe5da6e52020-03-18 15:32:18 -06004545 // Resolve the all subpass contexts to the command buffer contexts
4546 auto cb_context = GetAccessContext(commandBuffer);
4547 assert(cb_context);
John Zulauf64ffe552021-02-06 10:25:07 -07004548 if (!cb_context) return;
John Zulaufe5da6e52020-03-18 15:32:18 -06004549
sjfricke0bea06e2022-06-05 09:22:26 +09004550 cb_context->RecordSyncOp<SyncOpEndRenderPass>(cmd_type, *this, pSubpassEndInfo);
John Zulaufe5da6e52020-03-18 15:32:18 -06004551}
John Zulauf3d84f1b2020-03-09 13:33:25 -06004552
John Zulauf33fc1d52020-07-17 11:01:10 -06004553// Simple heuristic rule to detect WAW operations representing algorithmically safe or increment
4554// updates to a resource which do not conflict at the byte level.
4555// TODO: Revisit this rule to see if it needs to be tighter or looser
4556// TODO: Add programatic control over suppression heuristics
4557bool SyncValidator::SupressedBoundDescriptorWAW(const HazardResult &hazard) const {
4558 return (hazard.hazard == WRITE_AFTER_WRITE) && (FlagBit(hazard.usage_index) == hazard.prior_access);
4559}
4560
John Zulauf3d84f1b2020-03-09 13:33:25 -06004561void SyncValidator::PostCallRecordCmdEndRenderPass(VkCommandBuffer commandBuffer) {
John Zulauf355e49b2020-04-24 15:11:15 -06004562 RecordCmdEndRenderPass(commandBuffer, nullptr, CMD_ENDRENDERPASS);
John Zulauf5a1a5382020-06-22 17:23:25 -06004563 StateTracker::PostCallRecordCmdEndRenderPass(commandBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004564}
4565
4566void SyncValidator::PostCallRecordCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
John Zulauf355e49b2020-04-24 15:11:15 -06004567 RecordCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2);
John Zulauf5a1a5382020-06-22 17:23:25 -06004568 StateTracker::PostCallRecordCmdEndRenderPass2(commandBuffer, pSubpassEndInfo);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004569}
4570
4571void SyncValidator::PostCallRecordCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
sfricke-samsung85584a72021-09-30 21:43:38 -07004572 RecordCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2KHR);
John Zulauf5a1a5382020-06-22 17:23:25 -06004573 StateTracker::PostCallRecordCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004574}
locke-lunarga19c71d2020-03-02 18:17:04 -07004575
sfricke-samsung71f04e32022-03-16 01:21:21 -05004576template <typename RegionType>
Jeff Leger178b1e52020-10-05 12:22:23 -04004577bool SyncValidator::ValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
sfricke-samsung71f04e32022-03-16 01:21:21 -05004578 VkImageLayout dstImageLayout, uint32_t regionCount, const RegionType *pRegions,
4579 CMD_TYPE cmd_type) const {
locke-lunarga19c71d2020-03-02 18:17:04 -07004580 bool skip = false;
4581 const auto *cb_access_context = GetAccessContext(commandBuffer);
4582 assert(cb_access_context);
4583 if (!cb_access_context) return skip;
4584
4585 const auto *context = cb_access_context->GetCurrentAccessContext();
4586 assert(context);
4587 if (!context) return skip;
4588
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004589 auto src_buffer = Get<BUFFER_STATE>(srcBuffer);
4590 auto dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07004591
4592 for (uint32_t region = 0; region < regionCount; region++) {
4593 const auto &copy_region = pRegions[region];
John Zulauf477700e2021-01-06 11:41:49 -07004594 HazardResult hazard;
locke-lunarga19c71d2020-03-02 18:17:04 -07004595 if (dst_image) {
John Zulauf477700e2021-01-06 11:41:49 -07004596 if (src_buffer) {
4597 ResourceAccessRange src_range =
4598 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, dst_image->createInfo.format));
Jeremy Gebben40a22942020-12-22 14:22:06 -07004599 hazard = context->DetectHazard(*src_buffer, SYNC_COPY_TRANSFER_READ, src_range);
John Zulauf477700e2021-01-06 11:41:49 -07004600 if (hazard.hazard) {
4601 // PHASE1 TODO -- add tag information to log msg when useful.
sjfricke0bea06e2022-06-05 09:22:26 +09004602 skip |=
4603 LogError(srcBuffer, string_SyncHazardVUID(hazard.hazard),
4604 "%s: Hazard %s for srcBuffer %s, region %" PRIu32 ". Access info %s.", CommandTypeString(cmd_type),
4605 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcBuffer).c_str(), region,
4606 cb_access_context->FormatHazard(hazard).c_str());
John Zulauf477700e2021-01-06 11:41:49 -07004607 }
4608 }
4609
Jeremy Gebben40a22942020-12-22 14:22:06 -07004610 hazard = context->DetectHazard(*dst_image, SYNC_COPY_TRANSFER_WRITE, copy_region.imageSubresource,
Aitor Camachoe67f2c72022-06-08 14:41:58 +02004611 copy_region.imageOffset, copy_region.imageExtent, false);
locke-lunarga19c71d2020-03-02 18:17:04 -07004612 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06004613 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
sjfricke0bea06e2022-06-05 09:22:26 +09004614 "%s: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.", CommandTypeString(cmd_type),
John Zulauf1dae9192020-06-16 15:46:44 -06004615 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauf397e68b2022-04-19 11:44:07 -06004616 cb_access_context->FormatHazard(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07004617 }
4618 if (skip) break;
4619 }
4620 if (skip) break;
4621 }
4622 return skip;
4623}
4624
Jeff Leger178b1e52020-10-05 12:22:23 -04004625bool SyncValidator::PreCallValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
4626 VkImageLayout dstImageLayout, uint32_t regionCount,
4627 const VkBufferImageCopy *pRegions) const {
4628 return ValidateCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions,
Tony Barbour845d29b2021-11-09 11:43:14 -07004629 CMD_COPYBUFFERTOIMAGE);
Jeff Leger178b1e52020-10-05 12:22:23 -04004630}
4631
4632bool SyncValidator::PreCallValidateCmdCopyBufferToImage2KHR(VkCommandBuffer commandBuffer,
4633 const VkCopyBufferToImageInfo2KHR *pCopyBufferToImageInfo) const {
4634 return ValidateCmdCopyBufferToImage(commandBuffer, pCopyBufferToImageInfo->srcBuffer, pCopyBufferToImageInfo->dstImage,
4635 pCopyBufferToImageInfo->dstImageLayout, pCopyBufferToImageInfo->regionCount,
Tony Barbour845d29b2021-11-09 11:43:14 -07004636 pCopyBufferToImageInfo->pRegions, CMD_COPYBUFFERTOIMAGE2KHR);
4637}
4638
4639bool SyncValidator::PreCallValidateCmdCopyBufferToImage2(VkCommandBuffer commandBuffer,
4640 const VkCopyBufferToImageInfo2 *pCopyBufferToImageInfo) const {
4641 return ValidateCmdCopyBufferToImage(commandBuffer, pCopyBufferToImageInfo->srcBuffer, pCopyBufferToImageInfo->dstImage,
4642 pCopyBufferToImageInfo->dstImageLayout, pCopyBufferToImageInfo->regionCount,
4643 pCopyBufferToImageInfo->pRegions, CMD_COPYBUFFERTOIMAGE2);
Jeff Leger178b1e52020-10-05 12:22:23 -04004644}
4645
sfricke-samsung71f04e32022-03-16 01:21:21 -05004646template <typename RegionType>
Jeff Leger178b1e52020-10-05 12:22:23 -04004647void SyncValidator::RecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
sfricke-samsung71f04e32022-03-16 01:21:21 -05004648 VkImageLayout dstImageLayout, uint32_t regionCount, const RegionType *pRegions,
4649 CMD_TYPE cmd_type) {
locke-lunarga19c71d2020-03-02 18:17:04 -07004650 auto *cb_access_context = GetAccessContext(commandBuffer);
4651 assert(cb_access_context);
Jeff Leger178b1e52020-10-05 12:22:23 -04004652
Jeff Leger178b1e52020-10-05 12:22:23 -04004653 const auto tag = cb_access_context->NextCommandTag(cmd_type);
locke-lunarga19c71d2020-03-02 18:17:04 -07004654 auto *context = cb_access_context->GetCurrentAccessContext();
4655 assert(context);
4656
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004657 auto src_buffer = Get<BUFFER_STATE>(srcBuffer);
4658 auto dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07004659
4660 for (uint32_t region = 0; region < regionCount; region++) {
4661 const auto &copy_region = pRegions[region];
locke-lunarga19c71d2020-03-02 18:17:04 -07004662 if (dst_image) {
John Zulauf477700e2021-01-06 11:41:49 -07004663 if (src_buffer) {
4664 ResourceAccessRange src_range =
4665 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, dst_image->createInfo.format));
Jeremy Gebben40a22942020-12-22 14:22:06 -07004666 context->UpdateAccessState(*src_buffer, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment, src_range, tag);
John Zulauf477700e2021-01-06 11:41:49 -07004667 }
Jeremy Gebben40a22942020-12-22 14:22:06 -07004668 context->UpdateAccessState(*dst_image, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07004669 copy_region.imageSubresource, copy_region.imageOffset, copy_region.imageExtent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07004670 }
4671 }
4672}
4673
Jeff Leger178b1e52020-10-05 12:22:23 -04004674void SyncValidator::PreCallRecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
4675 VkImageLayout dstImageLayout, uint32_t regionCount,
4676 const VkBufferImageCopy *pRegions) {
4677 StateTracker::PreCallRecordCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
Tony Barbour845d29b2021-11-09 11:43:14 -07004678 RecordCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions, CMD_COPYBUFFERTOIMAGE);
Jeff Leger178b1e52020-10-05 12:22:23 -04004679}
4680
4681void SyncValidator::PreCallRecordCmdCopyBufferToImage2KHR(VkCommandBuffer commandBuffer,
4682 const VkCopyBufferToImageInfo2KHR *pCopyBufferToImageInfo) {
4683 StateTracker::PreCallRecordCmdCopyBufferToImage2KHR(commandBuffer, pCopyBufferToImageInfo);
4684 RecordCmdCopyBufferToImage(commandBuffer, pCopyBufferToImageInfo->srcBuffer, pCopyBufferToImageInfo->dstImage,
4685 pCopyBufferToImageInfo->dstImageLayout, pCopyBufferToImageInfo->regionCount,
Tony Barbour845d29b2021-11-09 11:43:14 -07004686 pCopyBufferToImageInfo->pRegions, CMD_COPYBUFFERTOIMAGE2KHR);
4687}
4688
4689void SyncValidator::PreCallRecordCmdCopyBufferToImage2(VkCommandBuffer commandBuffer,
4690 const VkCopyBufferToImageInfo2 *pCopyBufferToImageInfo) {
4691 StateTracker::PreCallRecordCmdCopyBufferToImage2(commandBuffer, pCopyBufferToImageInfo);
4692 RecordCmdCopyBufferToImage(commandBuffer, pCopyBufferToImageInfo->srcBuffer, pCopyBufferToImageInfo->dstImage,
4693 pCopyBufferToImageInfo->dstImageLayout, pCopyBufferToImageInfo->regionCount,
4694 pCopyBufferToImageInfo->pRegions, CMD_COPYBUFFERTOIMAGE2);
Jeff Leger178b1e52020-10-05 12:22:23 -04004695}
4696
sfricke-samsung71f04e32022-03-16 01:21:21 -05004697template <typename RegionType>
Jeff Leger178b1e52020-10-05 12:22:23 -04004698bool SyncValidator::ValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
sfricke-samsung71f04e32022-03-16 01:21:21 -05004699 VkBuffer dstBuffer, uint32_t regionCount, const RegionType *pRegions,
4700 CMD_TYPE cmd_type) const {
locke-lunarga19c71d2020-03-02 18:17:04 -07004701 bool skip = false;
4702 const auto *cb_access_context = GetAccessContext(commandBuffer);
4703 assert(cb_access_context);
4704 if (!cb_access_context) return skip;
Jeff Leger178b1e52020-10-05 12:22:23 -04004705
locke-lunarga19c71d2020-03-02 18:17:04 -07004706 const auto *context = cb_access_context->GetCurrentAccessContext();
4707 assert(context);
4708 if (!context) return skip;
4709
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004710 auto src_image = Get<IMAGE_STATE>(srcImage);
4711 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
Jeremy Gebben6fbf8242021-06-21 09:14:46 -06004712 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->MemState()->mem() : VK_NULL_HANDLE;
locke-lunarga19c71d2020-03-02 18:17:04 -07004713 for (uint32_t region = 0; region < regionCount; region++) {
4714 const auto &copy_region = pRegions[region];
4715 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004716 auto hazard = context->DetectHazard(*src_image, SYNC_COPY_TRANSFER_READ, copy_region.imageSubresource,
Aitor Camachoe67f2c72022-06-08 14:41:58 +02004717 copy_region.imageOffset, copy_region.imageExtent, false);
locke-lunarga19c71d2020-03-02 18:17:04 -07004718 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06004719 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
sjfricke0bea06e2022-06-05 09:22:26 +09004720 "%s: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.", CommandTypeString(cmd_type),
John Zulauf1dae9192020-06-16 15:46:44 -06004721 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauf397e68b2022-04-19 11:44:07 -06004722 cb_access_context->FormatHazard(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07004723 }
John Zulauf477700e2021-01-06 11:41:49 -07004724 if (dst_mem) {
4725 ResourceAccessRange dst_range =
4726 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, src_image->createInfo.format));
Jeremy Gebben40a22942020-12-22 14:22:06 -07004727 hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, dst_range);
John Zulauf477700e2021-01-06 11:41:49 -07004728 if (hazard.hazard) {
sjfricke0bea06e2022-06-05 09:22:26 +09004729 skip |=
4730 LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
4731 "%s: Hazard %s for dstBuffer %s, region %" PRIu32 ". Access info %s.", CommandTypeString(cmd_type),
4732 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstBuffer).c_str(), region,
4733 cb_access_context->FormatHazard(hazard).c_str());
John Zulauf477700e2021-01-06 11:41:49 -07004734 }
locke-lunarga19c71d2020-03-02 18:17:04 -07004735 }
4736 }
4737 if (skip) break;
4738 }
4739 return skip;
4740}
4741
Jeff Leger178b1e52020-10-05 12:22:23 -04004742bool SyncValidator::PreCallValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
4743 VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount,
4744 const VkBufferImageCopy *pRegions) const {
4745 return ValidateCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions,
Tony-LunarGaf3632a2021-11-10 15:51:57 -07004746 CMD_COPYIMAGETOBUFFER);
Jeff Leger178b1e52020-10-05 12:22:23 -04004747}
4748
4749bool SyncValidator::PreCallValidateCmdCopyImageToBuffer2KHR(VkCommandBuffer commandBuffer,
4750 const VkCopyImageToBufferInfo2KHR *pCopyImageToBufferInfo) const {
4751 return ValidateCmdCopyImageToBuffer(commandBuffer, pCopyImageToBufferInfo->srcImage, pCopyImageToBufferInfo->srcImageLayout,
4752 pCopyImageToBufferInfo->dstBuffer, pCopyImageToBufferInfo->regionCount,
Tony-LunarGaf3632a2021-11-10 15:51:57 -07004753 pCopyImageToBufferInfo->pRegions, CMD_COPYIMAGETOBUFFER2KHR);
4754}
4755
4756bool SyncValidator::PreCallValidateCmdCopyImageToBuffer2(VkCommandBuffer commandBuffer,
4757 const VkCopyImageToBufferInfo2 *pCopyImageToBufferInfo) const {
4758 return ValidateCmdCopyImageToBuffer(commandBuffer, pCopyImageToBufferInfo->srcImage, pCopyImageToBufferInfo->srcImageLayout,
4759 pCopyImageToBufferInfo->dstBuffer, pCopyImageToBufferInfo->regionCount,
4760 pCopyImageToBufferInfo->pRegions, CMD_COPYIMAGETOBUFFER2);
Jeff Leger178b1e52020-10-05 12:22:23 -04004761}
4762
sfricke-samsung71f04e32022-03-16 01:21:21 -05004763template <typename RegionType>
Jeff Leger178b1e52020-10-05 12:22:23 -04004764void SyncValidator::RecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
sfricke-samsung71f04e32022-03-16 01:21:21 -05004765 VkBuffer dstBuffer, uint32_t regionCount, const RegionType *pRegions,
Tony-LunarGaf3632a2021-11-10 15:51:57 -07004766 CMD_TYPE cmd_type) {
locke-lunarga19c71d2020-03-02 18:17:04 -07004767 auto *cb_access_context = GetAccessContext(commandBuffer);
4768 assert(cb_access_context);
Jeff Leger178b1e52020-10-05 12:22:23 -04004769
Jeff Leger178b1e52020-10-05 12:22:23 -04004770 const auto tag = cb_access_context->NextCommandTag(cmd_type);
locke-lunarga19c71d2020-03-02 18:17:04 -07004771 auto *context = cb_access_context->GetCurrentAccessContext();
4772 assert(context);
4773
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004774 auto src_image = Get<IMAGE_STATE>(srcImage);
Jeremy Gebben9f537102021-10-05 16:37:12 -06004775 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
Jeremy Gebben6fbf8242021-06-21 09:14:46 -06004776 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->MemState()->mem() : VK_NULL_HANDLE;
John Zulauf5f13a792020-03-10 07:31:21 -06004777 const VulkanTypedHandle dst_handle(dst_mem, kVulkanObjectTypeDeviceMemory);
locke-lunarga19c71d2020-03-02 18:17:04 -07004778
4779 for (uint32_t region = 0; region < regionCount; region++) {
4780 const auto &copy_region = pRegions[region];
4781 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004782 context->UpdateAccessState(*src_image, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07004783 copy_region.imageSubresource, copy_region.imageOffset, copy_region.imageExtent, tag);
John Zulauf477700e2021-01-06 11:41:49 -07004784 if (dst_buffer) {
4785 ResourceAccessRange dst_range =
4786 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, src_image->createInfo.format));
Jeremy Gebben40a22942020-12-22 14:22:06 -07004787 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, dst_range, tag);
John Zulauf477700e2021-01-06 11:41:49 -07004788 }
locke-lunarga19c71d2020-03-02 18:17:04 -07004789 }
4790 }
4791}
4792
Jeff Leger178b1e52020-10-05 12:22:23 -04004793void SyncValidator::PreCallRecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4794 VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
4795 StateTracker::PreCallRecordCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
Tony-LunarGaf3632a2021-11-10 15:51:57 -07004796 RecordCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions, CMD_COPYIMAGETOBUFFER);
Jeff Leger178b1e52020-10-05 12:22:23 -04004797}
4798
4799void SyncValidator::PreCallRecordCmdCopyImageToBuffer2KHR(VkCommandBuffer commandBuffer,
4800 const VkCopyImageToBufferInfo2KHR *pCopyImageToBufferInfo) {
4801 StateTracker::PreCallRecordCmdCopyImageToBuffer2KHR(commandBuffer, pCopyImageToBufferInfo);
4802 RecordCmdCopyImageToBuffer(commandBuffer, pCopyImageToBufferInfo->srcImage, pCopyImageToBufferInfo->srcImageLayout,
4803 pCopyImageToBufferInfo->dstBuffer, pCopyImageToBufferInfo->regionCount,
Tony-LunarGaf3632a2021-11-10 15:51:57 -07004804 pCopyImageToBufferInfo->pRegions, CMD_COPYIMAGETOBUFFER2KHR);
4805}
4806
4807void SyncValidator::PreCallRecordCmdCopyImageToBuffer2(VkCommandBuffer commandBuffer,
4808 const VkCopyImageToBufferInfo2 *pCopyImageToBufferInfo) {
4809 StateTracker::PreCallRecordCmdCopyImageToBuffer2(commandBuffer, pCopyImageToBufferInfo);
4810 RecordCmdCopyImageToBuffer(commandBuffer, pCopyImageToBufferInfo->srcImage, pCopyImageToBufferInfo->srcImageLayout,
4811 pCopyImageToBufferInfo->dstBuffer, pCopyImageToBufferInfo->regionCount,
4812 pCopyImageToBufferInfo->pRegions, CMD_COPYIMAGETOBUFFER2);
Jeff Leger178b1e52020-10-05 12:22:23 -04004813}
4814
4815template <typename RegionType>
4816bool SyncValidator::ValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4817 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
sjfricke0bea06e2022-06-05 09:22:26 +09004818 const RegionType *pRegions, VkFilter filter, CMD_TYPE cmd_type) const {
locke-lunarga19c71d2020-03-02 18:17:04 -07004819 bool skip = false;
4820 const auto *cb_access_context = GetAccessContext(commandBuffer);
4821 assert(cb_access_context);
4822 if (!cb_access_context) return skip;
4823
4824 const auto *context = cb_access_context->GetCurrentAccessContext();
4825 assert(context);
4826 if (!context) return skip;
4827
sjfricke0bea06e2022-06-05 09:22:26 +09004828 const char *caller_name = CommandTypeString(cmd_type);
4829
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004830 auto src_image = Get<IMAGE_STATE>(srcImage);
4831 auto dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07004832
4833 for (uint32_t region = 0; region < regionCount; region++) {
4834 const auto &blit_region = pRegions[region];
4835 if (src_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06004836 VkOffset3D offset = {std::min(blit_region.srcOffsets[0].x, blit_region.srcOffsets[1].x),
4837 std::min(blit_region.srcOffsets[0].y, blit_region.srcOffsets[1].y),
4838 std::min(blit_region.srcOffsets[0].z, blit_region.srcOffsets[1].z)};
4839 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.srcOffsets[1].x - blit_region.srcOffsets[0].x)),
4840 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].y - blit_region.srcOffsets[0].y)),
4841 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].z - blit_region.srcOffsets[0].z))};
Aitor Camachoe67f2c72022-06-08 14:41:58 +02004842 auto hazard =
4843 context->DetectHazard(*src_image, SYNC_BLIT_TRANSFER_READ, blit_region.srcSubresource, offset, extent, false);
locke-lunarga19c71d2020-03-02 18:17:04 -07004844 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06004845 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
sjfricke0bea06e2022-06-05 09:22:26 +09004846 "%s: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.", caller_name,
John Zulauf1dae9192020-06-16 15:46:44 -06004847 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauf397e68b2022-04-19 11:44:07 -06004848 cb_access_context->FormatHazard(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07004849 }
4850 }
4851
4852 if (dst_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06004853 VkOffset3D offset = {std::min(blit_region.dstOffsets[0].x, blit_region.dstOffsets[1].x),
4854 std::min(blit_region.dstOffsets[0].y, blit_region.dstOffsets[1].y),
4855 std::min(blit_region.dstOffsets[0].z, blit_region.dstOffsets[1].z)};
4856 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.dstOffsets[1].x - blit_region.dstOffsets[0].x)),
4857 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].y - blit_region.dstOffsets[0].y)),
4858 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].z - blit_region.dstOffsets[0].z))};
Aitor Camachoe67f2c72022-06-08 14:41:58 +02004859 auto hazard =
4860 context->DetectHazard(*dst_image, SYNC_BLIT_TRANSFER_WRITE, blit_region.dstSubresource, offset, extent, false);
locke-lunarga19c71d2020-03-02 18:17:04 -07004861 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06004862 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
sjfricke0bea06e2022-06-05 09:22:26 +09004863 "%s: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.", caller_name,
John Zulauf1dae9192020-06-16 15:46:44 -06004864 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauf397e68b2022-04-19 11:44:07 -06004865 cb_access_context->FormatHazard(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07004866 }
4867 if (skip) break;
4868 }
4869 }
4870
4871 return skip;
4872}
4873
Jeff Leger178b1e52020-10-05 12:22:23 -04004874bool SyncValidator::PreCallValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4875 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
4876 const VkImageBlit *pRegions, VkFilter filter) const {
4877 return ValidateCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter,
sjfricke0bea06e2022-06-05 09:22:26 +09004878 CMD_BLITIMAGE);
Jeff Leger178b1e52020-10-05 12:22:23 -04004879}
4880
4881bool SyncValidator::PreCallValidateCmdBlitImage2KHR(VkCommandBuffer commandBuffer,
4882 const VkBlitImageInfo2KHR *pBlitImageInfo) const {
4883 return ValidateCmdBlitImage(commandBuffer, pBlitImageInfo->srcImage, pBlitImageInfo->srcImageLayout, pBlitImageInfo->dstImage,
4884 pBlitImageInfo->dstImageLayout, pBlitImageInfo->regionCount, pBlitImageInfo->pRegions,
sjfricke0bea06e2022-06-05 09:22:26 +09004885 pBlitImageInfo->filter, CMD_BLITIMAGE2KHR);
Jeff Leger178b1e52020-10-05 12:22:23 -04004886}
4887
Tony-LunarG542ae912021-11-04 16:06:44 -06004888bool SyncValidator::PreCallValidateCmdBlitImage2(VkCommandBuffer commandBuffer,
4889 const VkBlitImageInfo2 *pBlitImageInfo) const {
4890 return ValidateCmdBlitImage(commandBuffer, pBlitImageInfo->srcImage, pBlitImageInfo->srcImageLayout, pBlitImageInfo->dstImage,
sjfricke0bea06e2022-06-05 09:22:26 +09004891 pBlitImageInfo->dstImageLayout, pBlitImageInfo->regionCount, pBlitImageInfo->pRegions,
4892 pBlitImageInfo->filter, CMD_BLITIMAGE2);
Tony-LunarG542ae912021-11-04 16:06:44 -06004893}
4894
Jeff Leger178b1e52020-10-05 12:22:23 -04004895template <typename RegionType>
4896void SyncValidator::RecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4897 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
4898 const RegionType *pRegions, VkFilter filter, ResourceUsageTag tag) {
locke-lunarga19c71d2020-03-02 18:17:04 -07004899 auto *cb_access_context = GetAccessContext(commandBuffer);
4900 assert(cb_access_context);
4901 auto *context = cb_access_context->GetCurrentAccessContext();
4902 assert(context);
4903
Jeremy Gebben9f537102021-10-05 16:37:12 -06004904 auto src_image = Get<IMAGE_STATE>(srcImage);
4905 auto dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07004906
4907 for (uint32_t region = 0; region < regionCount; region++) {
4908 const auto &blit_region = pRegions[region];
4909 if (src_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06004910 VkOffset3D offset = {std::min(blit_region.srcOffsets[0].x, blit_region.srcOffsets[1].x),
4911 std::min(blit_region.srcOffsets[0].y, blit_region.srcOffsets[1].y),
4912 std::min(blit_region.srcOffsets[0].z, blit_region.srcOffsets[1].z)};
4913 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.srcOffsets[1].x - blit_region.srcOffsets[0].x)),
4914 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].y - blit_region.srcOffsets[0].y)),
4915 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].z - blit_region.srcOffsets[0].z))};
Jeremy Gebben40a22942020-12-22 14:22:06 -07004916 context->UpdateAccessState(*src_image, SYNC_BLIT_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07004917 blit_region.srcSubresource, offset, extent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07004918 }
4919 if (dst_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06004920 VkOffset3D offset = {std::min(blit_region.dstOffsets[0].x, blit_region.dstOffsets[1].x),
4921 std::min(blit_region.dstOffsets[0].y, blit_region.dstOffsets[1].y),
4922 std::min(blit_region.dstOffsets[0].z, blit_region.dstOffsets[1].z)};
4923 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.dstOffsets[1].x - blit_region.dstOffsets[0].x)),
4924 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].y - blit_region.dstOffsets[0].y)),
4925 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].z - blit_region.dstOffsets[0].z))};
Jeremy Gebben40a22942020-12-22 14:22:06 -07004926 context->UpdateAccessState(*dst_image, SYNC_BLIT_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07004927 blit_region.dstSubresource, offset, extent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07004928 }
4929 }
4930}
locke-lunarg36ba2592020-04-03 09:42:04 -06004931
Jeff Leger178b1e52020-10-05 12:22:23 -04004932void SyncValidator::PreCallRecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4933 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
4934 const VkImageBlit *pRegions, VkFilter filter) {
4935 auto *cb_access_context = GetAccessContext(commandBuffer);
4936 assert(cb_access_context);
4937 const auto tag = cb_access_context->NextCommandTag(CMD_BLITIMAGE);
4938 StateTracker::PreCallRecordCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
4939 pRegions, filter);
4940 RecordCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter, tag);
4941}
4942
4943void SyncValidator::PreCallRecordCmdBlitImage2KHR(VkCommandBuffer commandBuffer, const VkBlitImageInfo2KHR *pBlitImageInfo) {
4944 StateTracker::PreCallRecordCmdBlitImage2KHR(commandBuffer, pBlitImageInfo);
4945 auto *cb_access_context = GetAccessContext(commandBuffer);
4946 assert(cb_access_context);
4947 const auto tag = cb_access_context->NextCommandTag(CMD_BLITIMAGE2KHR);
4948 RecordCmdBlitImage(commandBuffer, pBlitImageInfo->srcImage, pBlitImageInfo->srcImageLayout, pBlitImageInfo->dstImage,
4949 pBlitImageInfo->dstImageLayout, pBlitImageInfo->regionCount, pBlitImageInfo->pRegions,
4950 pBlitImageInfo->filter, tag);
4951}
4952
Tony-LunarG542ae912021-11-04 16:06:44 -06004953void SyncValidator::PreCallRecordCmdBlitImage2(VkCommandBuffer commandBuffer, const VkBlitImageInfo2 *pBlitImageInfo) {
4954 StateTracker::PreCallRecordCmdBlitImage2KHR(commandBuffer, pBlitImageInfo);
4955 auto *cb_access_context = GetAccessContext(commandBuffer);
4956 assert(cb_access_context);
4957 const auto tag = cb_access_context->NextCommandTag(CMD_BLITIMAGE2);
4958 RecordCmdBlitImage(commandBuffer, pBlitImageInfo->srcImage, pBlitImageInfo->srcImageLayout, pBlitImageInfo->dstImage,
4959 pBlitImageInfo->dstImageLayout, pBlitImageInfo->regionCount, pBlitImageInfo->pRegions,
4960 pBlitImageInfo->filter, tag);
4961}
4962
John Zulauffaea0ee2021-01-14 14:01:32 -07004963bool SyncValidator::ValidateIndirectBuffer(const CommandBufferAccessContext &cb_context, const AccessContext &context,
4964 VkCommandBuffer commandBuffer, const VkDeviceSize struct_size, const VkBuffer buffer,
4965 const VkDeviceSize offset, const uint32_t drawCount, const uint32_t stride,
sjfricke0bea06e2022-06-05 09:22:26 +09004966 CMD_TYPE cmd_type) const {
locke-lunargff255f92020-05-13 18:53:52 -06004967 bool skip = false;
4968 if (drawCount == 0) return skip;
4969
sjfricke0bea06e2022-06-05 09:22:26 +09004970 const char *caller_name = CommandTypeString(cmd_type);
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004971 auto buf_state = Get<BUFFER_STATE>(buffer);
locke-lunargff255f92020-05-13 18:53:52 -06004972 VkDeviceSize size = struct_size;
4973 if (drawCount == 1 || stride == size) {
4974 if (drawCount > 1) size *= drawCount;
John Zulauf3e86bf02020-09-12 10:47:57 -06004975 const ResourceAccessRange range = MakeRange(offset, size);
locke-lunargff255f92020-05-13 18:53:52 -06004976 auto hazard = context.DetectHazard(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, range);
4977 if (hazard.hazard) {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06004978 skip |= LogError(buf_state->buffer(), string_SyncHazardVUID(hazard.hazard),
sjfricke0bea06e2022-06-05 09:22:26 +09004979 "%s: Hazard %s for indirect %s in %s. Access info %s.", caller_name, string_SyncHazard(hazard.hazard),
John Zulauf1dae9192020-06-16 15:46:44 -06004980 report_data->FormatHandle(buffer).c_str(), report_data->FormatHandle(commandBuffer).c_str(),
John Zulauf397e68b2022-04-19 11:44:07 -06004981 cb_context.FormatHazard(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06004982 }
4983 } else {
4984 for (uint32_t i = 0; i < drawCount; ++i) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004985 const ResourceAccessRange range = MakeRange(offset + i * stride, size);
locke-lunargff255f92020-05-13 18:53:52 -06004986 auto hazard = context.DetectHazard(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, range);
4987 if (hazard.hazard) {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06004988 skip |= LogError(buf_state->buffer(), string_SyncHazardVUID(hazard.hazard),
sjfricke0bea06e2022-06-05 09:22:26 +09004989 "%s: Hazard %s for indirect %s in %s. Access info %s.", caller_name,
4990 string_SyncHazard(hazard.hazard), report_data->FormatHandle(buffer).c_str(),
4991 report_data->FormatHandle(commandBuffer).c_str(), cb_context.FormatHazard(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06004992 break;
4993 }
4994 }
4995 }
4996 return skip;
4997}
4998
John Zulauf14940722021-04-12 15:19:02 -06004999void SyncValidator::RecordIndirectBuffer(AccessContext &context, const ResourceUsageTag tag, const VkDeviceSize struct_size,
locke-lunarg61870c22020-06-09 14:51:50 -06005000 const VkBuffer buffer, const VkDeviceSize offset, const uint32_t drawCount,
5001 uint32_t stride) {
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005002 auto buf_state = Get<BUFFER_STATE>(buffer);
locke-lunargff255f92020-05-13 18:53:52 -06005003 VkDeviceSize size = struct_size;
5004 if (drawCount == 1 || stride == size) {
5005 if (drawCount > 1) size *= drawCount;
John Zulauf3e86bf02020-09-12 10:47:57 -06005006 const ResourceAccessRange range = MakeRange(offset, size);
John Zulauf8e3c3e92021-01-06 11:19:36 -07005007 context.UpdateAccessState(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, SyncOrdering::kNonAttachment, range, tag);
locke-lunargff255f92020-05-13 18:53:52 -06005008 } else {
5009 for (uint32_t i = 0; i < drawCount; ++i) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005010 const ResourceAccessRange range = MakeRange(offset + i * stride, size);
John Zulauf8e3c3e92021-01-06 11:19:36 -07005011 context.UpdateAccessState(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, SyncOrdering::kNonAttachment, range,
5012 tag);
locke-lunargff255f92020-05-13 18:53:52 -06005013 }
5014 }
5015}
5016
John Zulauffaea0ee2021-01-14 14:01:32 -07005017bool SyncValidator::ValidateCountBuffer(const CommandBufferAccessContext &cb_context, const AccessContext &context,
5018 VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
sjfricke0bea06e2022-06-05 09:22:26 +09005019 CMD_TYPE cmd_type) const {
locke-lunargff255f92020-05-13 18:53:52 -06005020 bool skip = false;
5021
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005022 auto count_buf_state = Get<BUFFER_STATE>(buffer);
John Zulauf3e86bf02020-09-12 10:47:57 -06005023 const ResourceAccessRange range = MakeRange(offset, 4);
locke-lunargff255f92020-05-13 18:53:52 -06005024 auto hazard = context.DetectHazard(*count_buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, range);
5025 if (hazard.hazard) {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06005026 skip |= LogError(count_buf_state->buffer(), string_SyncHazardVUID(hazard.hazard),
sjfricke0bea06e2022-06-05 09:22:26 +09005027 "%s: Hazard %s for countBuffer %s in %s. Access info %s.", CommandTypeString(cmd_type),
5028 string_SyncHazard(hazard.hazard), report_data->FormatHandle(buffer).c_str(),
5029 report_data->FormatHandle(commandBuffer).c_str(), cb_context.FormatHazard(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06005030 }
5031 return skip;
5032}
5033
John Zulauf14940722021-04-12 15:19:02 -06005034void SyncValidator::RecordCountBuffer(AccessContext &context, const ResourceUsageTag tag, VkBuffer buffer, VkDeviceSize offset) {
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005035 auto count_buf_state = Get<BUFFER_STATE>(buffer);
John Zulauf3e86bf02020-09-12 10:47:57 -06005036 const ResourceAccessRange range = MakeRange(offset, 4);
John Zulauf8e3c3e92021-01-06 11:19:36 -07005037 context.UpdateAccessState(*count_buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, SyncOrdering::kNonAttachment, range, tag);
locke-lunargff255f92020-05-13 18:53:52 -06005038}
5039
locke-lunarg36ba2592020-04-03 09:42:04 -06005040bool SyncValidator::PreCallValidateCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) const {
locke-lunargff255f92020-05-13 18:53:52 -06005041 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06005042 const auto *cb_access_context = GetAccessContext(commandBuffer);
5043 assert(cb_access_context);
5044 if (!cb_access_context) return skip;
5045
sjfricke0bea06e2022-06-05 09:22:26 +09005046 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, CMD_DISPATCH);
locke-lunargff255f92020-05-13 18:53:52 -06005047 return skip;
locke-lunarg36ba2592020-04-03 09:42:04 -06005048}
5049
5050void SyncValidator::PreCallRecordCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005051 StateTracker::PreCallRecordCmdDispatch(commandBuffer, x, y, z);
locke-lunargff255f92020-05-13 18:53:52 -06005052 auto *cb_access_context = GetAccessContext(commandBuffer);
5053 assert(cb_access_context);
5054 const auto tag = cb_access_context->NextCommandTag(CMD_DISPATCH);
locke-lunargff255f92020-05-13 18:53:52 -06005055
locke-lunarg61870c22020-06-09 14:51:50 -06005056 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, tag);
locke-lunarg36ba2592020-04-03 09:42:04 -06005057}
locke-lunarge1a67022020-04-29 00:15:36 -06005058
5059bool SyncValidator::PreCallValidateCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) const {
locke-lunargff255f92020-05-13 18:53:52 -06005060 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06005061 const auto *cb_access_context = GetAccessContext(commandBuffer);
5062 assert(cb_access_context);
5063 if (!cb_access_context) return skip;
5064
5065 const auto *context = cb_access_context->GetCurrentAccessContext();
5066 assert(context);
5067 if (!context) return skip;
5068
sjfricke0bea06e2022-06-05 09:22:26 +09005069 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, CMD_DISPATCHINDIRECT);
John Zulauffaea0ee2021-01-14 14:01:32 -07005070 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDispatchIndirectCommand), buffer, offset,
sjfricke0bea06e2022-06-05 09:22:26 +09005071 1, sizeof(VkDispatchIndirectCommand), CMD_DISPATCHINDIRECT);
locke-lunargff255f92020-05-13 18:53:52 -06005072 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06005073}
5074
5075void SyncValidator::PreCallRecordCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005076 StateTracker::PreCallRecordCmdDispatchIndirect(commandBuffer, buffer, offset);
locke-lunargff255f92020-05-13 18:53:52 -06005077 auto *cb_access_context = GetAccessContext(commandBuffer);
5078 assert(cb_access_context);
5079 const auto tag = cb_access_context->NextCommandTag(CMD_DISPATCHINDIRECT);
5080 auto *context = cb_access_context->GetCurrentAccessContext();
5081 assert(context);
5082
locke-lunarg61870c22020-06-09 14:51:50 -06005083 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, tag);
5084 RecordIndirectBuffer(*context, tag, sizeof(VkDispatchIndirectCommand), buffer, offset, 1, sizeof(VkDispatchIndirectCommand));
locke-lunarge1a67022020-04-29 00:15:36 -06005085}
5086
5087bool SyncValidator::PreCallValidateCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
5088 uint32_t firstVertex, uint32_t firstInstance) const {
locke-lunarga4d39ea2020-05-22 14:17:29 -06005089 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06005090 const auto *cb_access_context = GetAccessContext(commandBuffer);
5091 assert(cb_access_context);
5092 if (!cb_access_context) return skip;
5093
sjfricke0bea06e2022-06-05 09:22:26 +09005094 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, CMD_DRAW);
5095 skip |= cb_access_context->ValidateDrawVertex(vertexCount, firstVertex, CMD_DRAW);
5096 skip |= cb_access_context->ValidateDrawSubpassAttachment(CMD_DRAW);
locke-lunarga4d39ea2020-05-22 14:17:29 -06005097 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06005098}
5099
5100void SyncValidator::PreCallRecordCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
5101 uint32_t firstVertex, uint32_t firstInstance) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005102 StateTracker::PreCallRecordCmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
locke-lunargff255f92020-05-13 18:53:52 -06005103 auto *cb_access_context = GetAccessContext(commandBuffer);
5104 assert(cb_access_context);
5105 const auto tag = cb_access_context->NextCommandTag(CMD_DRAW);
locke-lunargff255f92020-05-13 18:53:52 -06005106
locke-lunarg61870c22020-06-09 14:51:50 -06005107 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
5108 cb_access_context->RecordDrawVertex(vertexCount, firstVertex, tag);
5109 cb_access_context->RecordDrawSubpassAttachment(tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005110}
5111
5112bool SyncValidator::PreCallValidateCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
5113 uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) const {
locke-lunarga4d39ea2020-05-22 14:17:29 -06005114 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06005115 const auto *cb_access_context = GetAccessContext(commandBuffer);
5116 assert(cb_access_context);
5117 if (!cb_access_context) return skip;
5118
sjfricke0bea06e2022-06-05 09:22:26 +09005119 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, CMD_DRAWINDEXED);
5120 skip |= cb_access_context->ValidateDrawVertexIndex(indexCount, firstIndex, CMD_DRAWINDEXED);
5121 skip |= cb_access_context->ValidateDrawSubpassAttachment(CMD_DRAWINDEXED);
locke-lunarga4d39ea2020-05-22 14:17:29 -06005122 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06005123}
5124
5125void SyncValidator::PreCallRecordCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
5126 uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005127 StateTracker::PreCallRecordCmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
locke-lunargff255f92020-05-13 18:53:52 -06005128 auto *cb_access_context = GetAccessContext(commandBuffer);
5129 assert(cb_access_context);
5130 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDEXED);
locke-lunargff255f92020-05-13 18:53:52 -06005131
locke-lunarg61870c22020-06-09 14:51:50 -06005132 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
5133 cb_access_context->RecordDrawVertexIndex(indexCount, firstIndex, tag);
5134 cb_access_context->RecordDrawSubpassAttachment(tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005135}
5136
5137bool SyncValidator::PreCallValidateCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5138 uint32_t drawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06005139 bool skip = false;
5140 if (drawCount == 0) return skip;
5141
locke-lunargff255f92020-05-13 18:53:52 -06005142 const auto *cb_access_context = GetAccessContext(commandBuffer);
5143 assert(cb_access_context);
5144 if (!cb_access_context) return skip;
5145
5146 const auto *context = cb_access_context->GetCurrentAccessContext();
5147 assert(context);
5148 if (!context) return skip;
5149
sjfricke0bea06e2022-06-05 09:22:26 +09005150 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, CMD_DRAWINDIRECT);
5151 skip |= cb_access_context->ValidateDrawSubpassAttachment(CMD_DRAWINDIRECT);
John Zulauffaea0ee2021-01-14 14:01:32 -07005152 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndirectCommand), buffer, offset,
sjfricke0bea06e2022-06-05 09:22:26 +09005153 drawCount, stride, CMD_DRAWINDIRECT);
locke-lunargff255f92020-05-13 18:53:52 -06005154
5155 // TODO: For now, we validate the whole vertex buffer. It might cause some false positive.
5156 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
5157 // We will validate the vertex buffer in SubmitQueue in the future.
sjfricke0bea06e2022-06-05 09:22:26 +09005158 skip |= cb_access_context->ValidateDrawVertex(UINT32_MAX, 0, CMD_DRAWINDIRECT);
locke-lunargff255f92020-05-13 18:53:52 -06005159 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06005160}
5161
5162void SyncValidator::PreCallRecordCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5163 uint32_t drawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005164 StateTracker::PreCallRecordCmdDrawIndirect(commandBuffer, buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06005165 if (drawCount == 0) return;
locke-lunargff255f92020-05-13 18:53:52 -06005166 auto *cb_access_context = GetAccessContext(commandBuffer);
5167 assert(cb_access_context);
5168 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDIRECT);
5169 auto *context = cb_access_context->GetCurrentAccessContext();
5170 assert(context);
5171
locke-lunarg61870c22020-06-09 14:51:50 -06005172 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
5173 cb_access_context->RecordDrawSubpassAttachment(tag);
5174 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndirectCommand), buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06005175
5176 // TODO: For now, we record the whole vertex buffer. It might cause some false positive.
5177 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
5178 // We will record the vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06005179 cb_access_context->RecordDrawVertex(UINT32_MAX, 0, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005180}
5181
5182bool SyncValidator::PreCallValidateCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5183 uint32_t drawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06005184 bool skip = false;
5185 if (drawCount == 0) return skip;
locke-lunargff255f92020-05-13 18:53:52 -06005186 const auto *cb_access_context = GetAccessContext(commandBuffer);
5187 assert(cb_access_context);
5188 if (!cb_access_context) return skip;
5189
5190 const auto *context = cb_access_context->GetCurrentAccessContext();
5191 assert(context);
5192 if (!context) return skip;
5193
sjfricke0bea06e2022-06-05 09:22:26 +09005194 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, CMD_DRAWINDEXEDINDIRECT);
5195 skip |= cb_access_context->ValidateDrawSubpassAttachment(CMD_DRAWINDEXEDINDIRECT);
John Zulauffaea0ee2021-01-14 14:01:32 -07005196 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndexedIndirectCommand), buffer,
sjfricke0bea06e2022-06-05 09:22:26 +09005197 offset, drawCount, stride, CMD_DRAWINDEXEDINDIRECT);
locke-lunargff255f92020-05-13 18:53:52 -06005198
5199 // TODO: For now, we validate the whole index and vertex buffer. It might cause some false positive.
5200 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
5201 // We will validate the index and vertex buffer in SubmitQueue in the future.
sjfricke0bea06e2022-06-05 09:22:26 +09005202 skip |= cb_access_context->ValidateDrawVertexIndex(UINT32_MAX, 0, CMD_DRAWINDEXEDINDIRECT);
locke-lunargff255f92020-05-13 18:53:52 -06005203 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06005204}
5205
5206void SyncValidator::PreCallRecordCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5207 uint32_t drawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005208 StateTracker::PreCallRecordCmdDrawIndexedIndirect(commandBuffer, buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06005209 auto *cb_access_context = GetAccessContext(commandBuffer);
5210 assert(cb_access_context);
5211 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDEXEDINDIRECT);
5212 auto *context = cb_access_context->GetCurrentAccessContext();
5213 assert(context);
5214
locke-lunarg61870c22020-06-09 14:51:50 -06005215 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
5216 cb_access_context->RecordDrawSubpassAttachment(tag);
5217 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndexedIndirectCommand), buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06005218
5219 // TODO: For now, we record the whole index and vertex buffer. It might cause some false positive.
5220 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
5221 // We will record the index and vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06005222 cb_access_context->RecordDrawVertexIndex(UINT32_MAX, 0, tag);
locke-lunargff255f92020-05-13 18:53:52 -06005223}
5224
5225bool SyncValidator::ValidateCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5226 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
sjfricke0bea06e2022-06-05 09:22:26 +09005227 uint32_t stride, CMD_TYPE cmd_type) const {
locke-lunargff255f92020-05-13 18:53:52 -06005228 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06005229 const auto *cb_access_context = GetAccessContext(commandBuffer);
5230 assert(cb_access_context);
5231 if (!cb_access_context) return skip;
5232
5233 const auto *context = cb_access_context->GetCurrentAccessContext();
5234 assert(context);
5235 if (!context) return skip;
5236
sjfricke0bea06e2022-06-05 09:22:26 +09005237 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, cmd_type);
5238 skip |= cb_access_context->ValidateDrawSubpassAttachment(cmd_type);
John Zulauffaea0ee2021-01-14 14:01:32 -07005239 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndirectCommand), buffer, offset,
sjfricke0bea06e2022-06-05 09:22:26 +09005240 maxDrawCount, stride, cmd_type);
5241 skip |= ValidateCountBuffer(*cb_access_context, *context, commandBuffer, countBuffer, countBufferOffset, cmd_type);
locke-lunargff255f92020-05-13 18:53:52 -06005242
5243 // TODO: For now, we validate the whole vertex buffer. It might cause some false positive.
5244 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
5245 // We will validate the vertex buffer in SubmitQueue in the future.
sjfricke0bea06e2022-06-05 09:22:26 +09005246 skip |= cb_access_context->ValidateDrawVertex(UINT32_MAX, 0, cmd_type);
locke-lunargff255f92020-05-13 18:53:52 -06005247 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06005248}
5249
5250bool SyncValidator::PreCallValidateCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5251 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
5252 uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06005253 return ValidateCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
sjfricke0bea06e2022-06-05 09:22:26 +09005254 CMD_DRAWINDIRECTCOUNT);
locke-lunarge1a67022020-04-29 00:15:36 -06005255}
5256
sfricke-samsung85584a72021-09-30 21:43:38 -07005257void SyncValidator::RecordCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5258 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
5259 uint32_t stride, CMD_TYPE cmd_type) {
locke-lunargff255f92020-05-13 18:53:52 -06005260 auto *cb_access_context = GetAccessContext(commandBuffer);
5261 assert(cb_access_context);
sfricke-samsung85584a72021-09-30 21:43:38 -07005262 const auto tag = cb_access_context->NextCommandTag(cmd_type);
locke-lunargff255f92020-05-13 18:53:52 -06005263 auto *context = cb_access_context->GetCurrentAccessContext();
5264 assert(context);
5265
locke-lunarg61870c22020-06-09 14:51:50 -06005266 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
5267 cb_access_context->RecordDrawSubpassAttachment(tag);
5268 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndirectCommand), buffer, offset, 1, stride);
5269 RecordCountBuffer(*context, tag, countBuffer, countBufferOffset);
locke-lunargff255f92020-05-13 18:53:52 -06005270
5271 // TODO: For now, we record the whole vertex buffer. It might cause some false positive.
5272 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
5273 // We will record the vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06005274 cb_access_context->RecordDrawVertex(UINT32_MAX, 0, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005275}
5276
sfricke-samsung85584a72021-09-30 21:43:38 -07005277void SyncValidator::PreCallRecordCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5278 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
5279 uint32_t stride) {
5280 StateTracker::PreCallRecordCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount,
5281 stride);
5282 RecordCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
5283 CMD_DRAWINDIRECTCOUNT);
5284}
locke-lunarge1a67022020-04-29 00:15:36 -06005285bool SyncValidator::PreCallValidateCmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5286 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
5287 uint32_t maxDrawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06005288 return ValidateCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
sjfricke0bea06e2022-06-05 09:22:26 +09005289 CMD_DRAWINDIRECTCOUNTKHR);
locke-lunarge1a67022020-04-29 00:15:36 -06005290}
5291
5292void SyncValidator::PreCallRecordCmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5293 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
5294 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005295 StateTracker::PreCallRecordCmdDrawIndirectCountKHR(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount,
5296 stride);
sfricke-samsung85584a72021-09-30 21:43:38 -07005297 RecordCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
5298 CMD_DRAWINDIRECTCOUNTKHR);
locke-lunarge1a67022020-04-29 00:15:36 -06005299}
5300
5301bool SyncValidator::PreCallValidateCmdDrawIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5302 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
5303 uint32_t maxDrawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06005304 return ValidateCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
sjfricke0bea06e2022-06-05 09:22:26 +09005305 CMD_DRAWINDIRECTCOUNTAMD);
locke-lunarge1a67022020-04-29 00:15:36 -06005306}
5307
5308void SyncValidator::PreCallRecordCmdDrawIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5309 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
5310 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005311 StateTracker::PreCallRecordCmdDrawIndirectCountAMD(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount,
5312 stride);
sfricke-samsung85584a72021-09-30 21:43:38 -07005313 RecordCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
5314 CMD_DRAWINDIRECTCOUNTAMD);
locke-lunargff255f92020-05-13 18:53:52 -06005315}
5316
5317bool SyncValidator::ValidateCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5318 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
sjfricke0bea06e2022-06-05 09:22:26 +09005319 uint32_t stride, CMD_TYPE cmd_type) const {
locke-lunargff255f92020-05-13 18:53:52 -06005320 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06005321 const auto *cb_access_context = GetAccessContext(commandBuffer);
5322 assert(cb_access_context);
5323 if (!cb_access_context) return skip;
5324
5325 const auto *context = cb_access_context->GetCurrentAccessContext();
5326 assert(context);
5327 if (!context) return skip;
5328
sjfricke0bea06e2022-06-05 09:22:26 +09005329 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, cmd_type);
5330 skip |= cb_access_context->ValidateDrawSubpassAttachment(cmd_type);
John Zulauffaea0ee2021-01-14 14:01:32 -07005331 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndexedIndirectCommand), buffer,
sjfricke0bea06e2022-06-05 09:22:26 +09005332 offset, maxDrawCount, stride, cmd_type);
5333 skip |= ValidateCountBuffer(*cb_access_context, *context, commandBuffer, countBuffer, countBufferOffset, cmd_type);
locke-lunargff255f92020-05-13 18:53:52 -06005334
5335 // TODO: For now, we validate the whole index and vertex buffer. It might cause some false positive.
5336 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
5337 // We will validate the index and vertex buffer in SubmitQueue in the future.
sjfricke0bea06e2022-06-05 09:22:26 +09005338 skip |= cb_access_context->ValidateDrawVertexIndex(UINT32_MAX, 0, cmd_type);
locke-lunargff255f92020-05-13 18:53:52 -06005339 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06005340}
5341
5342bool SyncValidator::PreCallValidateCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5343 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
5344 uint32_t maxDrawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06005345 return ValidateCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
sjfricke0bea06e2022-06-05 09:22:26 +09005346 CMD_DRAWINDEXEDINDIRECTCOUNT);
locke-lunarge1a67022020-04-29 00:15:36 -06005347}
5348
sfricke-samsung85584a72021-09-30 21:43:38 -07005349void SyncValidator::RecordCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5350 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
5351 uint32_t stride, CMD_TYPE cmd_type) {
locke-lunargff255f92020-05-13 18:53:52 -06005352 auto *cb_access_context = GetAccessContext(commandBuffer);
5353 assert(cb_access_context);
sfricke-samsung85584a72021-09-30 21:43:38 -07005354 const auto tag = cb_access_context->NextCommandTag(cmd_type);
locke-lunargff255f92020-05-13 18:53:52 -06005355 auto *context = cb_access_context->GetCurrentAccessContext();
5356 assert(context);
5357
locke-lunarg61870c22020-06-09 14:51:50 -06005358 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
5359 cb_access_context->RecordDrawSubpassAttachment(tag);
5360 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndexedIndirectCommand), buffer, offset, 1, stride);
5361 RecordCountBuffer(*context, tag, countBuffer, countBufferOffset);
locke-lunargff255f92020-05-13 18:53:52 -06005362
5363 // TODO: For now, we record the whole index and vertex buffer. It might cause some false positive.
5364 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
locke-lunarg61870c22020-06-09 14:51:50 -06005365 // We will update the index and vertex buffer in SubmitQueue in the future.
5366 cb_access_context->RecordDrawVertexIndex(UINT32_MAX, 0, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005367}
5368
sfricke-samsung85584a72021-09-30 21:43:38 -07005369void SyncValidator::PreCallRecordCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5370 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
5371 uint32_t maxDrawCount, uint32_t stride) {
5372 StateTracker::PreCallRecordCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
5373 maxDrawCount, stride);
5374 RecordCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
5375 CMD_DRAWINDEXEDINDIRECTCOUNT);
5376}
5377
locke-lunarge1a67022020-04-29 00:15:36 -06005378bool SyncValidator::PreCallValidateCmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer,
5379 VkDeviceSize offset, VkBuffer countBuffer,
5380 VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
5381 uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06005382 return ValidateCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
sjfricke0bea06e2022-06-05 09:22:26 +09005383 CMD_DRAWINDEXEDINDIRECTCOUNTKHR);
locke-lunarge1a67022020-04-29 00:15:36 -06005384}
5385
5386void SyncValidator::PreCallRecordCmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5387 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
5388 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005389 StateTracker::PreCallRecordCmdDrawIndexedIndirectCountKHR(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
5390 maxDrawCount, stride);
sfricke-samsung85584a72021-09-30 21:43:38 -07005391 RecordCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
5392 CMD_DRAWINDEXEDINDIRECTCOUNTKHR);
locke-lunarge1a67022020-04-29 00:15:36 -06005393}
5394
5395bool SyncValidator::PreCallValidateCmdDrawIndexedIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer,
5396 VkDeviceSize offset, VkBuffer countBuffer,
5397 VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
5398 uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06005399 return ValidateCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
sjfricke0bea06e2022-06-05 09:22:26 +09005400 CMD_DRAWINDEXEDINDIRECTCOUNTAMD);
locke-lunarge1a67022020-04-29 00:15:36 -06005401}
5402
5403void SyncValidator::PreCallRecordCmdDrawIndexedIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5404 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
5405 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005406 StateTracker::PreCallRecordCmdDrawIndexedIndirectCountAMD(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
5407 maxDrawCount, stride);
sfricke-samsung85584a72021-09-30 21:43:38 -07005408 RecordCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
5409 CMD_DRAWINDEXEDINDIRECTCOUNTAMD);
locke-lunarge1a67022020-04-29 00:15:36 -06005410}
5411
5412bool SyncValidator::PreCallValidateCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
5413 const VkClearColorValue *pColor, uint32_t rangeCount,
5414 const VkImageSubresourceRange *pRanges) const {
5415 bool skip = false;
5416 const auto *cb_access_context = GetAccessContext(commandBuffer);
5417 assert(cb_access_context);
5418 if (!cb_access_context) return skip;
5419
5420 const auto *context = cb_access_context->GetCurrentAccessContext();
5421 assert(context);
5422 if (!context) return skip;
5423
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005424 auto image_state = Get<IMAGE_STATE>(image);
locke-lunarge1a67022020-04-29 00:15:36 -06005425
5426 for (uint32_t index = 0; index < rangeCount; index++) {
5427 const auto &range = pRanges[index];
5428 if (image_state) {
Aitor Camachoe67f2c72022-06-08 14:41:58 +02005429 auto hazard = context->DetectHazard(*image_state, SYNC_CLEAR_TRANSFER_WRITE, range, false);
locke-lunarge1a67022020-04-29 00:15:36 -06005430 if (hazard.hazard) {
5431 skip |= LogError(image, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06005432 "vkCmdClearColorImage: Hazard %s for %s, range index %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06005433 string_SyncHazard(hazard.hazard), report_data->FormatHandle(image).c_str(), index,
John Zulauf397e68b2022-04-19 11:44:07 -06005434 cb_access_context->FormatHazard(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06005435 }
5436 }
5437 }
5438 return skip;
5439}
5440
5441void SyncValidator::PreCallRecordCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
5442 const VkClearColorValue *pColor, uint32_t rangeCount,
5443 const VkImageSubresourceRange *pRanges) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005444 StateTracker::PreCallRecordCmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
locke-lunarge1a67022020-04-29 00:15:36 -06005445 auto *cb_access_context = GetAccessContext(commandBuffer);
5446 assert(cb_access_context);
5447 const auto tag = cb_access_context->NextCommandTag(CMD_CLEARCOLORIMAGE);
5448 auto *context = cb_access_context->GetCurrentAccessContext();
5449 assert(context);
5450
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005451 auto image_state = Get<IMAGE_STATE>(image);
locke-lunarge1a67022020-04-29 00:15:36 -06005452
5453 for (uint32_t index = 0; index < rangeCount; index++) {
5454 const auto &range = pRanges[index];
5455 if (image_state) {
John Zulauf110413c2021-03-20 05:38:38 -06005456 context->UpdateAccessState(*image_state, SYNC_CLEAR_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005457 }
5458 }
5459}
5460
5461bool SyncValidator::PreCallValidateCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image,
5462 VkImageLayout imageLayout,
5463 const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
5464 const VkImageSubresourceRange *pRanges) const {
5465 bool skip = false;
5466 const auto *cb_access_context = GetAccessContext(commandBuffer);
5467 assert(cb_access_context);
5468 if (!cb_access_context) return skip;
5469
5470 const auto *context = cb_access_context->GetCurrentAccessContext();
5471 assert(context);
5472 if (!context) return skip;
5473
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005474 auto image_state = Get<IMAGE_STATE>(image);
locke-lunarge1a67022020-04-29 00:15:36 -06005475
5476 for (uint32_t index = 0; index < rangeCount; index++) {
5477 const auto &range = pRanges[index];
5478 if (image_state) {
Aitor Camachoe67f2c72022-06-08 14:41:58 +02005479 auto hazard = context->DetectHazard(*image_state, SYNC_CLEAR_TRANSFER_WRITE, range, false);
locke-lunarge1a67022020-04-29 00:15:36 -06005480 if (hazard.hazard) {
5481 skip |= LogError(image, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06005482 "vkCmdClearDepthStencilImage: Hazard %s for %s, range index %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06005483 string_SyncHazard(hazard.hazard), report_data->FormatHandle(image).c_str(), index,
John Zulauf397e68b2022-04-19 11:44:07 -06005484 cb_access_context->FormatHazard(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06005485 }
5486 }
5487 }
5488 return skip;
5489}
5490
5491void SyncValidator::PreCallRecordCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
5492 const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
5493 const VkImageSubresourceRange *pRanges) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005494 StateTracker::PreCallRecordCmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
locke-lunarge1a67022020-04-29 00:15:36 -06005495 auto *cb_access_context = GetAccessContext(commandBuffer);
5496 assert(cb_access_context);
5497 const auto tag = cb_access_context->NextCommandTag(CMD_CLEARDEPTHSTENCILIMAGE);
5498 auto *context = cb_access_context->GetCurrentAccessContext();
5499 assert(context);
5500
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005501 auto image_state = Get<IMAGE_STATE>(image);
locke-lunarge1a67022020-04-29 00:15:36 -06005502
5503 for (uint32_t index = 0; index < rangeCount; index++) {
5504 const auto &range = pRanges[index];
5505 if (image_state) {
John Zulauf110413c2021-03-20 05:38:38 -06005506 context->UpdateAccessState(*image_state, SYNC_CLEAR_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005507 }
5508 }
5509}
5510
5511bool SyncValidator::PreCallValidateCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool,
5512 uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer,
5513 VkDeviceSize dstOffset, VkDeviceSize stride,
5514 VkQueryResultFlags flags) const {
5515 bool skip = false;
5516 const auto *cb_access_context = GetAccessContext(commandBuffer);
5517 assert(cb_access_context);
5518 if (!cb_access_context) return skip;
5519
5520 const auto *context = cb_access_context->GetCurrentAccessContext();
5521 assert(context);
5522 if (!context) return skip;
5523
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005524 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
locke-lunarge1a67022020-04-29 00:15:36 -06005525
5526 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005527 const ResourceAccessRange range = MakeRange(dstOffset, stride * queryCount);
Jeremy Gebben40a22942020-12-22 14:22:06 -07005528 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
locke-lunarge1a67022020-04-29 00:15:36 -06005529 if (hazard.hazard) {
John Zulauf59e25072020-07-17 10:55:21 -06005530 skip |=
5531 LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
5532 "vkCmdCopyQueryPoolResults: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauf397e68b2022-04-19 11:44:07 -06005533 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatHazard(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06005534 }
5535 }
locke-lunargff255f92020-05-13 18:53:52 -06005536
5537 // TODO:Track VkQueryPool
locke-lunarge1a67022020-04-29 00:15:36 -06005538 return skip;
5539}
5540
5541void SyncValidator::PreCallRecordCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
5542 uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
5543 VkDeviceSize stride, VkQueryResultFlags flags) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005544 StateTracker::PreCallRecordCmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset,
5545 stride, flags);
locke-lunarge1a67022020-04-29 00:15:36 -06005546 auto *cb_access_context = GetAccessContext(commandBuffer);
5547 assert(cb_access_context);
locke-lunargff255f92020-05-13 18:53:52 -06005548 const auto tag = cb_access_context->NextCommandTag(CMD_COPYQUERYPOOLRESULTS);
locke-lunarge1a67022020-04-29 00:15:36 -06005549 auto *context = cb_access_context->GetCurrentAccessContext();
5550 assert(context);
5551
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005552 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
locke-lunarge1a67022020-04-29 00:15:36 -06005553
5554 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005555 const ResourceAccessRange range = MakeRange(dstOffset, stride * queryCount);
Jeremy Gebben40a22942020-12-22 14:22:06 -07005556 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005557 }
locke-lunargff255f92020-05-13 18:53:52 -06005558
5559 // TODO:Track VkQueryPool
locke-lunarge1a67022020-04-29 00:15:36 -06005560}
5561
5562bool SyncValidator::PreCallValidateCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
5563 VkDeviceSize size, uint32_t data) const {
5564 bool skip = false;
5565 const auto *cb_access_context = GetAccessContext(commandBuffer);
5566 assert(cb_access_context);
5567 if (!cb_access_context) return skip;
5568
5569 const auto *context = cb_access_context->GetCurrentAccessContext();
5570 assert(context);
5571 if (!context) return skip;
5572
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005573 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
locke-lunarge1a67022020-04-29 00:15:36 -06005574
5575 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005576 const ResourceAccessRange range = MakeRange(*dst_buffer, dstOffset, size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07005577 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
locke-lunarge1a67022020-04-29 00:15:36 -06005578 if (hazard.hazard) {
John Zulauf1dae9192020-06-16 15:46:44 -06005579 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06005580 "vkCmdFillBuffer: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauf397e68b2022-04-19 11:44:07 -06005581 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatHazard(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06005582 }
5583 }
5584 return skip;
5585}
5586
5587void SyncValidator::PreCallRecordCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
5588 VkDeviceSize size, uint32_t data) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005589 StateTracker::PreCallRecordCmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
locke-lunarge1a67022020-04-29 00:15:36 -06005590 auto *cb_access_context = GetAccessContext(commandBuffer);
5591 assert(cb_access_context);
5592 const auto tag = cb_access_context->NextCommandTag(CMD_FILLBUFFER);
5593 auto *context = cb_access_context->GetCurrentAccessContext();
5594 assert(context);
5595
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005596 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
locke-lunarge1a67022020-04-29 00:15:36 -06005597
5598 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005599 const ResourceAccessRange range = MakeRange(*dst_buffer, dstOffset, size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07005600 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005601 }
5602}
5603
5604bool SyncValidator::PreCallValidateCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
5605 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
5606 const VkImageResolve *pRegions) const {
5607 bool skip = false;
5608 const auto *cb_access_context = GetAccessContext(commandBuffer);
5609 assert(cb_access_context);
5610 if (!cb_access_context) return skip;
5611
5612 const auto *context = cb_access_context->GetCurrentAccessContext();
5613 assert(context);
5614 if (!context) return skip;
5615
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005616 auto src_image = Get<IMAGE_STATE>(srcImage);
5617 auto dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarge1a67022020-04-29 00:15:36 -06005618
5619 for (uint32_t region = 0; region < regionCount; region++) {
5620 const auto &resolve_region = pRegions[region];
5621 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07005622 auto hazard = context->DetectHazard(*src_image, SYNC_RESOLVE_TRANSFER_READ, resolve_region.srcSubresource,
Aitor Camachoe67f2c72022-06-08 14:41:58 +02005623 resolve_region.srcOffset, resolve_region.extent, false);
locke-lunarge1a67022020-04-29 00:15:36 -06005624 if (hazard.hazard) {
5625 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06005626 "vkCmdResolveImage: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06005627 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauf397e68b2022-04-19 11:44:07 -06005628 cb_access_context->FormatHazard(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06005629 }
5630 }
5631
5632 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07005633 auto hazard = context->DetectHazard(*dst_image, SYNC_RESOLVE_TRANSFER_WRITE, resolve_region.dstSubresource,
Aitor Camachoe67f2c72022-06-08 14:41:58 +02005634 resolve_region.dstOffset, resolve_region.extent, false);
locke-lunarge1a67022020-04-29 00:15:36 -06005635 if (hazard.hazard) {
5636 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06005637 "vkCmdResolveImage: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06005638 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauf397e68b2022-04-19 11:44:07 -06005639 cb_access_context->FormatHazard(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06005640 }
5641 if (skip) break;
5642 }
5643 }
5644
5645 return skip;
5646}
5647
5648void SyncValidator::PreCallRecordCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
5649 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
5650 const VkImageResolve *pRegions) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005651 StateTracker::PreCallRecordCmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
5652 pRegions);
locke-lunarge1a67022020-04-29 00:15:36 -06005653 auto *cb_access_context = GetAccessContext(commandBuffer);
5654 assert(cb_access_context);
5655 const auto tag = cb_access_context->NextCommandTag(CMD_RESOLVEIMAGE);
5656 auto *context = cb_access_context->GetCurrentAccessContext();
5657 assert(context);
5658
Jeremy Gebben9f537102021-10-05 16:37:12 -06005659 auto src_image = Get<IMAGE_STATE>(srcImage);
5660 auto dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarge1a67022020-04-29 00:15:36 -06005661
5662 for (uint32_t region = 0; region < regionCount; region++) {
5663 const auto &resolve_region = pRegions[region];
5664 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07005665 context->UpdateAccessState(*src_image, SYNC_RESOLVE_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07005666 resolve_region.srcSubresource, resolve_region.srcOffset, resolve_region.extent, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005667 }
5668 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07005669 context->UpdateAccessState(*dst_image, SYNC_RESOLVE_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07005670 resolve_region.dstSubresource, resolve_region.dstOffset, resolve_region.extent, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005671 }
5672 }
5673}
5674
Tony-LunarG562fc102021-11-12 13:58:35 -07005675bool SyncValidator::ValidateCmdResolveImage2(VkCommandBuffer commandBuffer, const VkResolveImageInfo2KHR *pResolveImageInfo,
5676 CMD_TYPE cmd_type) const {
Jeff Leger178b1e52020-10-05 12:22:23 -04005677 bool skip = false;
5678 const auto *cb_access_context = GetAccessContext(commandBuffer);
5679 assert(cb_access_context);
5680 if (!cb_access_context) return skip;
5681
5682 const auto *context = cb_access_context->GetCurrentAccessContext();
5683 assert(context);
5684 if (!context) return skip;
5685
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005686 auto src_image = Get<IMAGE_STATE>(pResolveImageInfo->srcImage);
5687 auto dst_image = Get<IMAGE_STATE>(pResolveImageInfo->dstImage);
Jeff Leger178b1e52020-10-05 12:22:23 -04005688
5689 for (uint32_t region = 0; region < pResolveImageInfo->regionCount; region++) {
5690 const auto &resolve_region = pResolveImageInfo->pRegions[region];
5691 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07005692 auto hazard = context->DetectHazard(*src_image, SYNC_RESOLVE_TRANSFER_READ, resolve_region.srcSubresource,
Aitor Camachoe67f2c72022-06-08 14:41:58 +02005693 resolve_region.srcOffset, resolve_region.extent, false);
Jeff Leger178b1e52020-10-05 12:22:23 -04005694 if (hazard.hazard) {
5695 skip |= LogError(pResolveImageInfo->srcImage, string_SyncHazardVUID(hazard.hazard),
sjfricke0bea06e2022-06-05 09:22:26 +09005696 "%s: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.", CommandTypeString(cmd_type),
Jeff Leger178b1e52020-10-05 12:22:23 -04005697 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pResolveImageInfo->srcImage).c_str(),
John Zulauf397e68b2022-04-19 11:44:07 -06005698 region, cb_access_context->FormatHazard(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04005699 }
5700 }
5701
5702 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07005703 auto hazard = context->DetectHazard(*dst_image, SYNC_RESOLVE_TRANSFER_WRITE, resolve_region.dstSubresource,
Aitor Camachoe67f2c72022-06-08 14:41:58 +02005704 resolve_region.dstOffset, resolve_region.extent, false);
Jeff Leger178b1e52020-10-05 12:22:23 -04005705 if (hazard.hazard) {
5706 skip |= LogError(pResolveImageInfo->dstImage, string_SyncHazardVUID(hazard.hazard),
sjfricke0bea06e2022-06-05 09:22:26 +09005707 "%s: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.", CommandTypeString(cmd_type),
Jeff Leger178b1e52020-10-05 12:22:23 -04005708 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pResolveImageInfo->dstImage).c_str(),
John Zulauf397e68b2022-04-19 11:44:07 -06005709 region, cb_access_context->FormatHazard(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04005710 }
5711 if (skip) break;
5712 }
5713 }
5714
5715 return skip;
5716}
5717
Tony-LunarG562fc102021-11-12 13:58:35 -07005718bool SyncValidator::PreCallValidateCmdResolveImage2KHR(VkCommandBuffer commandBuffer,
5719 const VkResolveImageInfo2KHR *pResolveImageInfo) const {
5720 return ValidateCmdResolveImage2(commandBuffer, pResolveImageInfo, CMD_RESOLVEIMAGE2KHR);
5721}
5722
5723bool SyncValidator::PreCallValidateCmdResolveImage2(VkCommandBuffer commandBuffer,
5724 const VkResolveImageInfo2 *pResolveImageInfo) const {
5725 return ValidateCmdResolveImage2(commandBuffer, pResolveImageInfo, CMD_RESOLVEIMAGE2);
5726}
5727
5728void SyncValidator::RecordCmdResolveImage2(VkCommandBuffer commandBuffer, const VkResolveImageInfo2KHR *pResolveImageInfo,
5729 CMD_TYPE cmd_type) {
Jeff Leger178b1e52020-10-05 12:22:23 -04005730 StateTracker::PreCallRecordCmdResolveImage2KHR(commandBuffer, pResolveImageInfo);
5731 auto *cb_access_context = GetAccessContext(commandBuffer);
5732 assert(cb_access_context);
Tony-LunarG562fc102021-11-12 13:58:35 -07005733 const auto tag = cb_access_context->NextCommandTag(cmd_type);
Jeff Leger178b1e52020-10-05 12:22:23 -04005734 auto *context = cb_access_context->GetCurrentAccessContext();
5735 assert(context);
5736
Jeremy Gebben9f537102021-10-05 16:37:12 -06005737 auto src_image = Get<IMAGE_STATE>(pResolveImageInfo->srcImage);
5738 auto dst_image = Get<IMAGE_STATE>(pResolveImageInfo->dstImage);
Jeff Leger178b1e52020-10-05 12:22:23 -04005739
5740 for (uint32_t region = 0; region < pResolveImageInfo->regionCount; region++) {
5741 const auto &resolve_region = pResolveImageInfo->pRegions[region];
5742 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07005743 context->UpdateAccessState(*src_image, SYNC_RESOLVE_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07005744 resolve_region.srcSubresource, resolve_region.srcOffset, resolve_region.extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04005745 }
5746 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07005747 context->UpdateAccessState(*dst_image, SYNC_RESOLVE_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07005748 resolve_region.dstSubresource, resolve_region.dstOffset, resolve_region.extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04005749 }
5750 }
5751}
5752
Tony-LunarG562fc102021-11-12 13:58:35 -07005753void SyncValidator::PreCallRecordCmdResolveImage2KHR(VkCommandBuffer commandBuffer,
5754 const VkResolveImageInfo2KHR *pResolveImageInfo) {
5755 RecordCmdResolveImage2(commandBuffer, pResolveImageInfo, CMD_RESOLVEIMAGE2KHR);
5756}
5757
5758void SyncValidator::PreCallRecordCmdResolveImage2(VkCommandBuffer commandBuffer, const VkResolveImageInfo2 *pResolveImageInfo) {
5759 RecordCmdResolveImage2(commandBuffer, pResolveImageInfo, CMD_RESOLVEIMAGE2);
5760}
5761
locke-lunarge1a67022020-04-29 00:15:36 -06005762bool SyncValidator::PreCallValidateCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
5763 VkDeviceSize dataSize, const void *pData) const {
5764 bool skip = false;
5765 const auto *cb_access_context = GetAccessContext(commandBuffer);
5766 assert(cb_access_context);
5767 if (!cb_access_context) return skip;
5768
5769 const auto *context = cb_access_context->GetCurrentAccessContext();
5770 assert(context);
5771 if (!context) return skip;
5772
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005773 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
locke-lunarge1a67022020-04-29 00:15:36 -06005774
5775 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005776 // VK_WHOLE_SIZE not allowed
5777 const ResourceAccessRange range = MakeRange(dstOffset, dataSize);
Jeremy Gebben40a22942020-12-22 14:22:06 -07005778 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
locke-lunarge1a67022020-04-29 00:15:36 -06005779 if (hazard.hazard) {
John Zulauf1dae9192020-06-16 15:46:44 -06005780 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06005781 "vkCmdUpdateBuffer: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauf397e68b2022-04-19 11:44:07 -06005782 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatHazard(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06005783 }
5784 }
5785 return skip;
5786}
5787
5788void SyncValidator::PreCallRecordCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
5789 VkDeviceSize dataSize, const void *pData) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005790 StateTracker::PreCallRecordCmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
locke-lunarge1a67022020-04-29 00:15:36 -06005791 auto *cb_access_context = GetAccessContext(commandBuffer);
5792 assert(cb_access_context);
5793 const auto tag = cb_access_context->NextCommandTag(CMD_UPDATEBUFFER);
5794 auto *context = cb_access_context->GetCurrentAccessContext();
5795 assert(context);
5796
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005797 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
locke-lunarge1a67022020-04-29 00:15:36 -06005798
5799 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005800 // VK_WHOLE_SIZE not allowed
5801 const ResourceAccessRange range = MakeRange(dstOffset, dataSize);
Jeremy Gebben40a22942020-12-22 14:22:06 -07005802 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005803 }
5804}
locke-lunargff255f92020-05-13 18:53:52 -06005805
5806bool SyncValidator::PreCallValidateCmdWriteBufferMarkerAMD(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
5807 VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) const {
5808 bool skip = false;
5809 const auto *cb_access_context = GetAccessContext(commandBuffer);
5810 assert(cb_access_context);
5811 if (!cb_access_context) return skip;
5812
5813 const auto *context = cb_access_context->GetCurrentAccessContext();
5814 assert(context);
5815 if (!context) return skip;
5816
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005817 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
locke-lunargff255f92020-05-13 18:53:52 -06005818
5819 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005820 const ResourceAccessRange range = MakeRange(dstOffset, 4);
Jeremy Gebben40a22942020-12-22 14:22:06 -07005821 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
locke-lunargff255f92020-05-13 18:53:52 -06005822 if (hazard.hazard) {
John Zulauf59e25072020-07-17 10:55:21 -06005823 skip |=
5824 LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
5825 "vkCmdWriteBufferMarkerAMD: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauf397e68b2022-04-19 11:44:07 -06005826 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatHazard(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06005827 }
5828 }
5829 return skip;
5830}
5831
5832void SyncValidator::PreCallRecordCmdWriteBufferMarkerAMD(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
5833 VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005834 StateTracker::PreCallRecordCmdWriteBufferMarkerAMD(commandBuffer, pipelineStage, dstBuffer, dstOffset, marker);
locke-lunargff255f92020-05-13 18:53:52 -06005835 auto *cb_access_context = GetAccessContext(commandBuffer);
5836 assert(cb_access_context);
5837 const auto tag = cb_access_context->NextCommandTag(CMD_WRITEBUFFERMARKERAMD);
5838 auto *context = cb_access_context->GetCurrentAccessContext();
5839 assert(context);
5840
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005841 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
locke-lunargff255f92020-05-13 18:53:52 -06005842
5843 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005844 const ResourceAccessRange range = MakeRange(dstOffset, 4);
Jeremy Gebben40a22942020-12-22 14:22:06 -07005845 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunargff255f92020-05-13 18:53:52 -06005846 }
5847}
John Zulauf49beb112020-11-04 16:06:31 -07005848
5849bool SyncValidator::PreCallValidateCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) const {
5850 bool skip = false;
5851 const auto *cb_context = GetAccessContext(commandBuffer);
5852 assert(cb_context);
5853 if (!cb_context) return skip;
John Zulaufe0757ba2022-06-10 16:51:45 -06005854 const auto *access_context = cb_context->GetCurrentAccessContext();
5855 assert(access_context);
5856 if (!access_context) return skip;
John Zulauf49beb112020-11-04 16:06:31 -07005857
John Zulaufe0757ba2022-06-10 16:51:45 -06005858 SyncOpSetEvent set_event_op(CMD_SETEVENT, *this, cb_context->GetQueueFlags(), event, stageMask, nullptr);
John Zulauf6ce24372021-01-30 05:56:25 -07005859 return set_event_op.Validate(*cb_context);
John Zulauf49beb112020-11-04 16:06:31 -07005860}
5861
5862void SyncValidator::PostCallRecordCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
5863 StateTracker::PostCallRecordCmdSetEvent(commandBuffer, event, stageMask);
5864 auto *cb_context = GetAccessContext(commandBuffer);
5865 assert(cb_context);
5866 if (!cb_context) return;
John Zulaufe0757ba2022-06-10 16:51:45 -06005867
5868 cb_context->RecordSyncOp<SyncOpSetEvent>(CMD_SETEVENT, *this, cb_context->GetQueueFlags(), event, stageMask,
5869 cb_context->GetCurrentAccessContext());
John Zulauf49beb112020-11-04 16:06:31 -07005870}
5871
John Zulauf4edde622021-02-15 08:54:50 -07005872bool SyncValidator::PreCallValidateCmdSetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event,
5873 const VkDependencyInfoKHR *pDependencyInfo) const {
5874 bool skip = false;
5875 const auto *cb_context = GetAccessContext(commandBuffer);
5876 assert(cb_context);
5877 if (!cb_context || !pDependencyInfo) return skip;
5878
John Zulaufe0757ba2022-06-10 16:51:45 -06005879 const auto *access_context = cb_context->GetCurrentAccessContext();
5880 assert(access_context);
5881 if (!access_context) return skip;
5882
5883 SyncOpSetEvent set_event_op(CMD_SETEVENT2KHR, *this, cb_context->GetQueueFlags(), event, *pDependencyInfo, nullptr);
John Zulauf4edde622021-02-15 08:54:50 -07005884 return set_event_op.Validate(*cb_context);
5885}
5886
Tony-LunarGc43525f2021-11-15 16:12:38 -07005887bool SyncValidator::PreCallValidateCmdSetEvent2(VkCommandBuffer commandBuffer, VkEvent event,
5888 const VkDependencyInfo *pDependencyInfo) const {
5889 bool skip = false;
5890 const auto *cb_context = GetAccessContext(commandBuffer);
5891 assert(cb_context);
5892 if (!cb_context || !pDependencyInfo) return skip;
5893
John Zulaufe0757ba2022-06-10 16:51:45 -06005894 SyncOpSetEvent set_event_op(CMD_SETEVENT2, *this, cb_context->GetQueueFlags(), event, *pDependencyInfo, nullptr);
Tony-LunarGc43525f2021-11-15 16:12:38 -07005895 return set_event_op.Validate(*cb_context);
5896}
5897
John Zulauf4edde622021-02-15 08:54:50 -07005898void SyncValidator::PostCallRecordCmdSetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event,
5899 const VkDependencyInfoKHR *pDependencyInfo) {
5900 StateTracker::PostCallRecordCmdSetEvent2KHR(commandBuffer, event, pDependencyInfo);
5901 auto *cb_context = GetAccessContext(commandBuffer);
5902 assert(cb_context);
5903 if (!cb_context || !pDependencyInfo) return;
5904
John Zulaufe0757ba2022-06-10 16:51:45 -06005905 cb_context->RecordSyncOp<SyncOpSetEvent>(CMD_SETEVENT2KHR, *this, cb_context->GetQueueFlags(), event, *pDependencyInfo,
5906 cb_context->GetCurrentAccessContext());
John Zulauf4edde622021-02-15 08:54:50 -07005907}
5908
Tony-LunarGc43525f2021-11-15 16:12:38 -07005909void SyncValidator::PostCallRecordCmdSetEvent2(VkCommandBuffer commandBuffer, VkEvent event,
5910 const VkDependencyInfo *pDependencyInfo) {
5911 StateTracker::PostCallRecordCmdSetEvent2(commandBuffer, event, pDependencyInfo);
5912 auto *cb_context = GetAccessContext(commandBuffer);
5913 assert(cb_context);
5914 if (!cb_context || !pDependencyInfo) return;
5915
John Zulaufe0757ba2022-06-10 16:51:45 -06005916 cb_context->RecordSyncOp<SyncOpSetEvent>(CMD_SETEVENT2, *this, cb_context->GetQueueFlags(), event, *pDependencyInfo,
5917 cb_context->GetCurrentAccessContext());
Tony-LunarGc43525f2021-11-15 16:12:38 -07005918}
5919
John Zulauf49beb112020-11-04 16:06:31 -07005920bool SyncValidator::PreCallValidateCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event,
5921 VkPipelineStageFlags stageMask) const {
5922 bool skip = false;
5923 const auto *cb_context = GetAccessContext(commandBuffer);
5924 assert(cb_context);
5925 if (!cb_context) return skip;
5926
John Zulauf36ef9282021-02-02 11:47:24 -07005927 SyncOpResetEvent reset_event_op(CMD_RESETEVENT, *this, cb_context->GetQueueFlags(), event, stageMask);
John Zulauf6ce24372021-01-30 05:56:25 -07005928 return reset_event_op.Validate(*cb_context);
John Zulauf49beb112020-11-04 16:06:31 -07005929}
5930
5931void SyncValidator::PostCallRecordCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
5932 StateTracker::PostCallRecordCmdResetEvent(commandBuffer, event, stageMask);
5933 auto *cb_context = GetAccessContext(commandBuffer);
5934 assert(cb_context);
5935 if (!cb_context) return;
5936
John Zulauf1bf30522021-09-03 15:39:06 -06005937 cb_context->RecordSyncOp<SyncOpResetEvent>(CMD_RESETEVENT, *this, cb_context->GetQueueFlags(), event, stageMask);
John Zulauf49beb112020-11-04 16:06:31 -07005938}
5939
John Zulauf4edde622021-02-15 08:54:50 -07005940bool SyncValidator::PreCallValidateCmdResetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event,
5941 VkPipelineStageFlags2KHR stageMask) const {
5942 bool skip = false;
5943 const auto *cb_context = GetAccessContext(commandBuffer);
5944 assert(cb_context);
5945 if (!cb_context) return skip;
5946
5947 SyncOpResetEvent reset_event_op(CMD_RESETEVENT2KHR, *this, cb_context->GetQueueFlags(), event, stageMask);
5948 return reset_event_op.Validate(*cb_context);
5949}
5950
Tony-LunarGa2662db2021-11-16 07:26:24 -07005951bool SyncValidator::PreCallValidateCmdResetEvent2(VkCommandBuffer commandBuffer, VkEvent event,
5952 VkPipelineStageFlags2 stageMask) const {
5953 bool skip = false;
5954 const auto *cb_context = GetAccessContext(commandBuffer);
5955 assert(cb_context);
5956 if (!cb_context) return skip;
5957
5958 SyncOpResetEvent reset_event_op(CMD_RESETEVENT2, *this, cb_context->GetQueueFlags(), event, stageMask);
5959 return reset_event_op.Validate(*cb_context);
5960}
5961
John Zulauf4edde622021-02-15 08:54:50 -07005962void SyncValidator::PostCallRecordCmdResetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event,
5963 VkPipelineStageFlags2KHR stageMask) {
5964 StateTracker::PostCallRecordCmdResetEvent2KHR(commandBuffer, event, stageMask);
5965 auto *cb_context = GetAccessContext(commandBuffer);
5966 assert(cb_context);
5967 if (!cb_context) return;
5968
John Zulauf1bf30522021-09-03 15:39:06 -06005969 cb_context->RecordSyncOp<SyncOpResetEvent>(CMD_RESETEVENT2KHR, *this, cb_context->GetQueueFlags(), event, stageMask);
John Zulauf4edde622021-02-15 08:54:50 -07005970}
5971
Tony-LunarGa2662db2021-11-16 07:26:24 -07005972void SyncValidator::PostCallRecordCmdResetEvent2(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags2 stageMask) {
5973 StateTracker::PostCallRecordCmdResetEvent2(commandBuffer, event, stageMask);
5974 auto *cb_context = GetAccessContext(commandBuffer);
5975 assert(cb_context);
5976 if (!cb_context) return;
5977
5978 cb_context->RecordSyncOp<SyncOpResetEvent>(CMD_RESETEVENT2, *this, cb_context->GetQueueFlags(), event, stageMask);
5979}
5980
John Zulauf49beb112020-11-04 16:06:31 -07005981bool SyncValidator::PreCallValidateCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
5982 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
5983 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
5984 uint32_t bufferMemoryBarrierCount,
5985 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
5986 uint32_t imageMemoryBarrierCount,
5987 const VkImageMemoryBarrier *pImageMemoryBarriers) const {
5988 bool skip = false;
5989 const auto *cb_context = GetAccessContext(commandBuffer);
5990 assert(cb_context);
5991 if (!cb_context) return skip;
5992
John Zulauf36ef9282021-02-02 11:47:24 -07005993 SyncOpWaitEvents wait_events_op(CMD_WAITEVENTS, *this, cb_context->GetQueueFlags(), eventCount, pEvents, srcStageMask,
5994 dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
5995 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
John Zulaufd5115702021-01-18 12:34:33 -07005996 return wait_events_op.Validate(*cb_context);
John Zulauf49beb112020-11-04 16:06:31 -07005997}
5998
5999void SyncValidator::PostCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
6000 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
6001 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
6002 uint32_t bufferMemoryBarrierCount,
6003 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
6004 uint32_t imageMemoryBarrierCount,
6005 const VkImageMemoryBarrier *pImageMemoryBarriers) {
6006 StateTracker::PostCallRecordCmdWaitEvents(commandBuffer, eventCount, pEvents, srcStageMask, dstStageMask, memoryBarrierCount,
6007 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
6008 imageMemoryBarrierCount, pImageMemoryBarriers);
6009
6010 auto *cb_context = GetAccessContext(commandBuffer);
6011 assert(cb_context);
6012 if (!cb_context) return;
6013
John Zulauf1bf30522021-09-03 15:39:06 -06006014 cb_context->RecordSyncOp<SyncOpWaitEvents>(
John Zulauf610e28c2021-08-03 17:46:23 -06006015 CMD_WAITEVENTS, *this, cb_context->GetQueueFlags(), eventCount, pEvents, srcStageMask, dstStageMask, memoryBarrierCount,
John Zulauf1bf30522021-09-03 15:39:06 -06006016 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
John Zulauf4a6105a2020-11-17 15:11:05 -07006017}
6018
John Zulauf4edde622021-02-15 08:54:50 -07006019bool SyncValidator::PreCallValidateCmdWaitEvents2KHR(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
6020 const VkDependencyInfoKHR *pDependencyInfos) const {
6021 bool skip = false;
6022 const auto *cb_context = GetAccessContext(commandBuffer);
6023 assert(cb_context);
6024 if (!cb_context) return skip;
6025
6026 SyncOpWaitEvents wait_events_op(CMD_WAITEVENTS2KHR, *this, cb_context->GetQueueFlags(), eventCount, pEvents, pDependencyInfos);
6027 skip |= wait_events_op.Validate(*cb_context);
6028 return skip;
6029}
6030
6031void SyncValidator::PostCallRecordCmdWaitEvents2KHR(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
6032 const VkDependencyInfoKHR *pDependencyInfos) {
6033 StateTracker::PostCallRecordCmdWaitEvents2KHR(commandBuffer, eventCount, pEvents, pDependencyInfos);
6034
6035 auto *cb_context = GetAccessContext(commandBuffer);
6036 assert(cb_context);
6037 if (!cb_context) return;
6038
John Zulauf1bf30522021-09-03 15:39:06 -06006039 cb_context->RecordSyncOp<SyncOpWaitEvents>(CMD_WAITEVENTS2KHR, *this, cb_context->GetQueueFlags(), eventCount, pEvents,
6040 pDependencyInfos);
John Zulauf4edde622021-02-15 08:54:50 -07006041}
6042
Tony-LunarG1364cf52021-11-17 16:10:11 -07006043bool SyncValidator::PreCallValidateCmdWaitEvents2(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
6044 const VkDependencyInfo *pDependencyInfos) const {
6045 bool skip = false;
6046 const auto *cb_context = GetAccessContext(commandBuffer);
6047 assert(cb_context);
6048 if (!cb_context) return skip;
6049
6050 SyncOpWaitEvents wait_events_op(CMD_WAITEVENTS2, *this, cb_context->GetQueueFlags(), eventCount, pEvents, pDependencyInfos);
6051 skip |= wait_events_op.Validate(*cb_context);
6052 return skip;
6053}
6054
6055void SyncValidator::PostCallRecordCmdWaitEvents2(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
6056 const VkDependencyInfo *pDependencyInfos) {
6057 StateTracker::PostCallRecordCmdWaitEvents2KHR(commandBuffer, eventCount, pEvents, pDependencyInfos);
6058
6059 auto *cb_context = GetAccessContext(commandBuffer);
6060 assert(cb_context);
6061 if (!cb_context) return;
6062
6063 cb_context->RecordSyncOp<SyncOpWaitEvents>(CMD_WAITEVENTS2, *this, cb_context->GetQueueFlags(), eventCount, pEvents,
6064 pDependencyInfos);
6065}
6066
John Zulauf4a6105a2020-11-17 15:11:05 -07006067void SyncEventState::ResetFirstScope() {
John Zulaufe0757ba2022-06-10 16:51:45 -06006068 first_scope.reset();
Jeremy Gebben9893daf2021-01-04 10:40:50 -07006069 scope = SyncExecScope();
John Zulauf78b1f892021-09-20 15:02:09 -06006070 first_scope_tag = 0;
John Zulauf4a6105a2020-11-17 15:11:05 -07006071}
6072
6073// Keep the "ignore this event" logic in same place for ValidateWait and RecordWait to use
sjfricke0bea06e2022-06-05 09:22:26 +09006074SyncEventState::IgnoreReason SyncEventState::IsIgnoredByWait(CMD_TYPE cmd_type, VkPipelineStageFlags2KHR srcStageMask) const {
John Zulauf4a6105a2020-11-17 15:11:05 -07006075 IgnoreReason reason = NotIgnored;
6076
sjfricke0bea06e2022-06-05 09:22:26 +09006077 if ((CMD_WAITEVENTS2KHR == cmd_type || CMD_WAITEVENTS2 == cmd_type) && (CMD_SETEVENT == last_command)) {
John Zulauf4edde622021-02-15 08:54:50 -07006078 reason = SetVsWait2;
6079 } else if ((last_command == CMD_RESETEVENT || last_command == CMD_RESETEVENT2KHR) && !HasBarrier(0U, 0U)) {
6080 reason = (last_command == CMD_RESETEVENT) ? ResetWaitRace : Reset2WaitRace;
John Zulauf4a6105a2020-11-17 15:11:05 -07006081 } else if (unsynchronized_set) {
6082 reason = SetRace;
John Zulaufe0757ba2022-06-10 16:51:45 -06006083 } else if (first_scope) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07006084 const VkPipelineStageFlags2KHR missing_bits = scope.mask_param & ~srcStageMask;
John Zulaufe0757ba2022-06-10 16:51:45 -06006085 // Note it is the "not missing bits" path that is the only "NotIgnored" path
John Zulauf4a6105a2020-11-17 15:11:05 -07006086 if (missing_bits) reason = MissingStageBits;
John Zulaufe0757ba2022-06-10 16:51:45 -06006087 } else {
6088 reason = MissingSetEvent;
John Zulauf4a6105a2020-11-17 15:11:05 -07006089 }
6090
6091 return reason;
6092}
6093
Jeremy Gebben40a22942020-12-22 14:22:06 -07006094bool SyncEventState::HasBarrier(VkPipelineStageFlags2KHR stageMask, VkPipelineStageFlags2KHR exec_scope_arg) const {
John Zulauf4a6105a2020-11-17 15:11:05 -07006095 bool has_barrier = (last_command == CMD_NONE) || (stageMask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) ||
6096 (barriers & exec_scope_arg) || (barriers & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
6097 return has_barrier;
John Zulauf49beb112020-11-04 16:06:31 -07006098}
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006099
John Zulaufbb890452021-12-14 11:30:18 -07006100void SyncOpBase::SetReplayContext(uint32_t subpass, ReplayContextPtr &&replay) {
6101 subpass_ = subpass;
6102 replay_context_ = std::move(replay);
6103}
6104
6105const ReplayTrackbackBarriersAction *SyncOpBase::GetReplayTrackback() const {
6106 if (replay_context_) {
6107 assert(subpass_ < replay_context_->subpass_contexts.size());
6108 return &replay_context_->subpass_contexts[subpass_];
6109 }
6110 return nullptr;
6111}
6112
sjfricke0bea06e2022-06-05 09:22:26 +09006113SyncOpBarriers::SyncOpBarriers(CMD_TYPE cmd_type, const SyncValidator &sync_state, VkQueueFlags queue_flags,
John Zulauf36ef9282021-02-02 11:47:24 -07006114 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
6115 VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount,
John Zulaufd5115702021-01-18 12:34:33 -07006116 const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount,
6117 const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount,
6118 const VkImageMemoryBarrier *pImageMemoryBarriers)
sjfricke0bea06e2022-06-05 09:22:26 +09006119 : SyncOpBase(cmd_type), barriers_(1) {
John Zulauf4edde622021-02-15 08:54:50 -07006120 auto &barrier_set = barriers_[0];
6121 barrier_set.dependency_flags = dependencyFlags;
6122 barrier_set.src_exec_scope = SyncExecScope::MakeSrc(queue_flags, srcStageMask);
6123 barrier_set.dst_exec_scope = SyncExecScope::MakeDst(queue_flags, dstStageMask);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006124 // Translate the API parameters into structures SyncVal understands directly, and dehandle for safer/faster replay.
John Zulauf4edde622021-02-15 08:54:50 -07006125 barrier_set.MakeMemoryBarriers(barrier_set.src_exec_scope, barrier_set.dst_exec_scope, dependencyFlags, memoryBarrierCount,
6126 pMemoryBarriers);
6127 barrier_set.MakeBufferMemoryBarriers(sync_state, barrier_set.src_exec_scope, barrier_set.dst_exec_scope, dependencyFlags,
6128 bufferMemoryBarrierCount, pBufferMemoryBarriers);
6129 barrier_set.MakeImageMemoryBarriers(sync_state, barrier_set.src_exec_scope, barrier_set.dst_exec_scope, dependencyFlags,
6130 imageMemoryBarrierCount, pImageMemoryBarriers);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006131}
6132
sjfricke0bea06e2022-06-05 09:22:26 +09006133SyncOpBarriers::SyncOpBarriers(CMD_TYPE cmd_type, const SyncValidator &sync_state, VkQueueFlags queue_flags, uint32_t event_count,
John Zulauf4edde622021-02-15 08:54:50 -07006134 const VkDependencyInfoKHR *dep_infos)
sjfricke0bea06e2022-06-05 09:22:26 +09006135 : SyncOpBase(cmd_type), barriers_(event_count) {
John Zulauf4edde622021-02-15 08:54:50 -07006136 for (uint32_t i = 0; i < event_count; i++) {
6137 const auto &dep_info = dep_infos[i];
6138 auto &barrier_set = barriers_[i];
6139 barrier_set.dependency_flags = dep_info.dependencyFlags;
6140 auto stage_masks = sync_utils::GetGlobalStageMasks(dep_info);
6141 barrier_set.src_exec_scope = SyncExecScope::MakeSrc(queue_flags, stage_masks.src);
6142 barrier_set.dst_exec_scope = SyncExecScope::MakeDst(queue_flags, stage_masks.dst);
6143 // Translate the API parameters into structures SyncVal understands directly, and dehandle for safer/faster replay.
6144 barrier_set.MakeMemoryBarriers(queue_flags, dep_info.dependencyFlags, dep_info.memoryBarrierCount,
6145 dep_info.pMemoryBarriers);
6146 barrier_set.MakeBufferMemoryBarriers(sync_state, queue_flags, dep_info.dependencyFlags, dep_info.bufferMemoryBarrierCount,
6147 dep_info.pBufferMemoryBarriers);
6148 barrier_set.MakeImageMemoryBarriers(sync_state, queue_flags, dep_info.dependencyFlags, dep_info.imageMemoryBarrierCount,
6149 dep_info.pImageMemoryBarriers);
6150 }
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006151}
6152
sjfricke0bea06e2022-06-05 09:22:26 +09006153SyncOpPipelineBarrier::SyncOpPipelineBarrier(CMD_TYPE cmd_type, const SyncValidator &sync_state, VkQueueFlags queue_flags,
John Zulaufd5115702021-01-18 12:34:33 -07006154 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
6155 VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount,
6156 const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount,
6157 const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount,
6158 const VkImageMemoryBarrier *pImageMemoryBarriers)
sjfricke0bea06e2022-06-05 09:22:26 +09006159 : SyncOpBarriers(cmd_type, sync_state, queue_flags, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount,
6160 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
6161 pImageMemoryBarriers) {}
John Zulaufd5115702021-01-18 12:34:33 -07006162
sjfricke0bea06e2022-06-05 09:22:26 +09006163SyncOpPipelineBarrier::SyncOpPipelineBarrier(CMD_TYPE cmd_type, const SyncValidator &sync_state, VkQueueFlags queue_flags,
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006164 const VkDependencyInfoKHR &dep_info)
sjfricke0bea06e2022-06-05 09:22:26 +09006165 : SyncOpBarriers(cmd_type, sync_state, queue_flags, 1, &dep_info) {}
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006166
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006167bool SyncOpPipelineBarrier::Validate(const CommandBufferAccessContext &cb_context) const {
6168 bool skip = false;
6169 const auto *context = cb_context.GetCurrentAccessContext();
6170 assert(context);
6171 if (!context) return skip;
John Zulauf6fdf3d02021-03-05 16:50:47 -07006172 assert(barriers_.size() == 1); // PipelineBarriers only support a single barrier set.
6173
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006174 // Validate Image Layout transitions
John Zulauf6fdf3d02021-03-05 16:50:47 -07006175 const auto &barrier_set = barriers_[0];
6176 for (const auto &image_barrier : barrier_set.image_memory_barriers) {
6177 if (image_barrier.new_layout == image_barrier.old_layout) continue; // Only interested in layout transitions at this point.
6178 const auto *image_state = image_barrier.image.get();
6179 if (!image_state) continue;
6180 const auto hazard = context->DetectImageBarrierHazard(image_barrier);
6181 if (hazard.hazard) {
6182 // PHASE1 TODO -- add tag information to log msg when useful.
6183 const auto &sync_state = cb_context.GetSyncState();
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06006184 const auto image_handle = image_state->image();
John Zulauf6fdf3d02021-03-05 16:50:47 -07006185 skip |= sync_state.LogError(image_handle, string_SyncHazardVUID(hazard.hazard),
6186 "%s: Hazard %s for image barrier %" PRIu32 " %s. Access info %s.", CmdName(),
6187 string_SyncHazard(hazard.hazard), image_barrier.index,
6188 sync_state.report_data->FormatHandle(image_handle).c_str(),
John Zulauf397e68b2022-04-19 11:44:07 -06006189 cb_context.FormatHazard(hazard).c_str());
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006190 }
6191 }
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006192 return skip;
6193}
6194
John Zulaufd5115702021-01-18 12:34:33 -07006195struct SyncOpPipelineBarrierFunctorFactory {
6196 using BarrierOpFunctor = PipelineBarrierOp;
6197 using ApplyFunctor = ApplyBarrierFunctor<BarrierOpFunctor>;
6198 using GlobalBarrierOpFunctor = PipelineBarrierOp;
6199 using GlobalApplyFunctor = ApplyBarrierOpsFunctor<GlobalBarrierOpFunctor>;
6200 using BufferRange = ResourceAccessRange;
6201 using ImageRange = subresource_adapter::ImageRangeGenerator;
6202 using GlobalRange = ResourceAccessRange;
6203
John Zulauf00119522022-05-23 19:07:42 -06006204 ApplyFunctor MakeApplyFunctor(QueueId queue_id, const SyncBarrier &barrier, bool layout_transition) const {
6205 return ApplyFunctor(BarrierOpFunctor(queue_id, barrier, layout_transition));
John Zulaufd5115702021-01-18 12:34:33 -07006206 }
John Zulauf14940722021-04-12 15:19:02 -06006207 GlobalApplyFunctor MakeGlobalApplyFunctor(size_t size_hint, ResourceUsageTag tag) const {
John Zulaufd5115702021-01-18 12:34:33 -07006208 return GlobalApplyFunctor(true /* resolve */, size_hint, tag);
6209 }
John Zulauf00119522022-05-23 19:07:42 -06006210 GlobalBarrierOpFunctor MakeGlobalBarrierOpFunctor(QueueId queue_id, const SyncBarrier &barrier) const {
6211 return GlobalBarrierOpFunctor(queue_id, barrier, false);
John Zulaufd5115702021-01-18 12:34:33 -07006212 }
6213
6214 BufferRange MakeRangeGen(const BUFFER_STATE &buffer, const ResourceAccessRange &range) const {
6215 if (!SimpleBinding(buffer)) return ResourceAccessRange();
6216 const auto base_address = ResourceBaseAddress(buffer);
6217 return (range + base_address);
6218 }
John Zulauf110413c2021-03-20 05:38:38 -06006219 ImageRange MakeRangeGen(const IMAGE_STATE &image, const VkImageSubresourceRange &subresource_range) const {
John Zulauf264cce02021-02-05 14:40:47 -07006220 if (!SimpleBinding(image)) return subresource_adapter::ImageRangeGenerator();
John Zulaufd5115702021-01-18 12:34:33 -07006221
6222 const auto base_address = ResourceBaseAddress(image);
Aitor Camachoe67f2c72022-06-08 14:41:58 +02006223 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, base_address, false);
John Zulaufd5115702021-01-18 12:34:33 -07006224 return range_gen;
6225 }
6226 GlobalRange MakeGlobalRangeGen(AccessAddressType) const { return kFullRange; }
6227};
6228
6229template <typename Barriers, typename FunctorFactory>
John Zulauf00119522022-05-23 19:07:42 -06006230void SyncOpBarriers::ApplyBarriers(const Barriers &barriers, const FunctorFactory &factory, const QueueId queue_id,
6231 const ResourceUsageTag tag, AccessContext *context) {
John Zulaufd5115702021-01-18 12:34:33 -07006232 for (const auto &barrier : barriers) {
6233 const auto *state = barrier.GetState();
6234 if (state) {
6235 auto *const accesses = &context->GetAccessStateMap(GetAccessAddressType(*state));
John Zulauf00119522022-05-23 19:07:42 -06006236 auto update_action = factory.MakeApplyFunctor(queue_id, barrier.barrier, barrier.IsLayoutTransition());
John Zulaufd5115702021-01-18 12:34:33 -07006237 auto range_gen = factory.MakeRangeGen(*state, barrier.Range());
6238 UpdateMemoryAccessState(accesses, update_action, &range_gen);
6239 }
6240 }
6241}
6242
6243template <typename Barriers, typename FunctorFactory>
John Zulauf00119522022-05-23 19:07:42 -06006244void SyncOpBarriers::ApplyGlobalBarriers(const Barriers &barriers, const FunctorFactory &factory, const QueueId queue_id,
6245 const ResourceUsageTag tag, AccessContext *access_context) {
John Zulaufd5115702021-01-18 12:34:33 -07006246 auto barriers_functor = factory.MakeGlobalApplyFunctor(barriers.size(), tag);
6247 for (const auto &barrier : barriers) {
John Zulauf00119522022-05-23 19:07:42 -06006248 barriers_functor.EmplaceBack(factory.MakeGlobalBarrierOpFunctor(queue_id, barrier));
John Zulaufd5115702021-01-18 12:34:33 -07006249 }
6250 for (const auto address_type : kAddressTypes) {
6251 auto range_gen = factory.MakeGlobalRangeGen(address_type);
6252 UpdateMemoryAccessState(&(access_context->GetAccessStateMap(address_type)), barriers_functor, &range_gen);
6253 }
6254}
6255
John Zulauf8eda1562021-04-13 17:06:41 -06006256ResourceUsageTag SyncOpPipelineBarrier::Record(CommandBufferAccessContext *cb_context) const {
John Zulaufd5115702021-01-18 12:34:33 -07006257 auto *access_context = cb_context->GetCurrentAccessContext();
John Zulauf8eda1562021-04-13 17:06:41 -06006258 auto *events_context = cb_context->GetCurrentEventsContext();
John Zulauf00119522022-05-23 19:07:42 -06006259 const QueueId queue_id = cb_context->GetQueueId();
sjfricke0bea06e2022-06-05 09:22:26 +09006260 const auto tag = cb_context->NextCommandTag(cmd_type_);
John Zulauf00119522022-05-23 19:07:42 -06006261 ReplayRecord(queue_id, tag, access_context, events_context);
John Zulauf4fa68462021-04-26 21:04:22 -06006262 return tag;
6263}
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006264
John Zulauf00119522022-05-23 19:07:42 -06006265void SyncOpPipelineBarrier::ReplayRecord(QueueId queue_id, const ResourceUsageTag tag, AccessContext *access_context,
John Zulaufbb890452021-12-14 11:30:18 -07006266 SyncEventsContext *events_context) const {
John Zulauf8eda1562021-04-13 17:06:41 -06006267 SyncOpPipelineBarrierFunctorFactory factory;
John Zulauf4edde622021-02-15 08:54:50 -07006268 // Pipeline barriers only have a single barrier set, unlike WaitEvents2
6269 assert(barriers_.size() == 1);
6270 const auto &barrier_set = barriers_[0];
John Zulauf00119522022-05-23 19:07:42 -06006271 ApplyBarriers(barrier_set.buffer_memory_barriers, factory, queue_id, tag, access_context);
6272 ApplyBarriers(barrier_set.image_memory_barriers, factory, queue_id, tag, access_context);
6273 ApplyGlobalBarriers(barrier_set.memory_barriers, factory, queue_id, tag, access_context);
John Zulauf4edde622021-02-15 08:54:50 -07006274 if (barrier_set.single_exec_scope) {
John Zulaufe0757ba2022-06-10 16:51:45 -06006275 events_context->ApplyBarrier(barrier_set.src_exec_scope, barrier_set.dst_exec_scope, tag);
John Zulauf4edde622021-02-15 08:54:50 -07006276 } else {
6277 for (const auto &barrier : barrier_set.memory_barriers) {
John Zulaufe0757ba2022-06-10 16:51:45 -06006278 events_context->ApplyBarrier(barrier.src_exec_scope, barrier.dst_exec_scope, tag);
John Zulauf4edde622021-02-15 08:54:50 -07006279 }
6280 }
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006281}
6282
John Zulauf8eda1562021-04-13 17:06:41 -06006283bool SyncOpPipelineBarrier::ReplayValidate(ResourceUsageTag recorded_tag, const CommandBufferAccessContext &recorded_context,
John Zulaufbb890452021-12-14 11:30:18 -07006284 ResourceUsageTag base_tag, CommandExecutionContext *exec_context) const {
John Zulauf4fa68462021-04-26 21:04:22 -06006285 // No Validation for replay, as the layout transition accesses are checked directly, and the src*Mask ordering is captured
6286 // with first access information.
John Zulauf8eda1562021-04-13 17:06:41 -06006287 return false;
6288}
6289
John Zulauf4edde622021-02-15 08:54:50 -07006290void SyncOpBarriers::BarrierSet::MakeMemoryBarriers(const SyncExecScope &src, const SyncExecScope &dst,
6291 VkDependencyFlags dependency_flags, uint32_t memory_barrier_count,
6292 const VkMemoryBarrier *barriers) {
6293 memory_barriers.reserve(std::max<uint32_t>(1, memory_barrier_count));
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006294 for (uint32_t barrier_index = 0; barrier_index < memory_barrier_count; barrier_index++) {
John Zulauf4edde622021-02-15 08:54:50 -07006295 const auto &barrier = barriers[barrier_index];
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006296 SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07006297 memory_barriers.emplace_back(sync_barrier);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006298 }
6299 if (0 == memory_barrier_count) {
6300 // If there are no global memory barriers, force an exec barrier
John Zulauf4edde622021-02-15 08:54:50 -07006301 memory_barriers.emplace_back(SyncBarrier(src, dst));
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006302 }
John Zulauf4edde622021-02-15 08:54:50 -07006303 single_exec_scope = true;
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006304}
6305
John Zulauf4edde622021-02-15 08:54:50 -07006306void SyncOpBarriers::BarrierSet::MakeBufferMemoryBarriers(const SyncValidator &sync_state, const SyncExecScope &src,
6307 const SyncExecScope &dst, VkDependencyFlags dependencyFlags,
6308 uint32_t barrier_count, const VkBufferMemoryBarrier *barriers) {
6309 buffer_memory_barriers.reserve(barrier_count);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006310 for (uint32_t index = 0; index < barrier_count; index++) {
6311 const auto &barrier = barriers[index];
Jeremy Gebben9f537102021-10-05 16:37:12 -06006312 auto buffer = sync_state.Get<BUFFER_STATE>(barrier.buffer);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006313 if (buffer) {
6314 const auto barrier_size = GetBufferWholeSize(*buffer, barrier.offset, barrier.size);
6315 const auto range = MakeRange(barrier.offset, barrier_size);
6316 const SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07006317 buffer_memory_barriers.emplace_back(buffer, sync_barrier, range);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006318 } else {
John Zulauf4edde622021-02-15 08:54:50 -07006319 buffer_memory_barriers.emplace_back();
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006320 }
6321 }
6322}
6323
John Zulauf4edde622021-02-15 08:54:50 -07006324void SyncOpBarriers::BarrierSet::MakeMemoryBarriers(VkQueueFlags queue_flags, VkDependencyFlags dependency_flags,
Tony-LunarG3f6eceb2021-11-18 14:34:49 -07006325 uint32_t memory_barrier_count, const VkMemoryBarrier2 *barriers) {
John Zulauf4edde622021-02-15 08:54:50 -07006326 memory_barriers.reserve(memory_barrier_count);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006327 for (uint32_t barrier_index = 0; barrier_index < memory_barrier_count; barrier_index++) {
John Zulauf4edde622021-02-15 08:54:50 -07006328 const auto &barrier = barriers[barrier_index];
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006329 auto src = SyncExecScope::MakeSrc(queue_flags, barrier.srcStageMask);
6330 auto dst = SyncExecScope::MakeDst(queue_flags, barrier.dstStageMask);
6331 SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07006332 memory_barriers.emplace_back(sync_barrier);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006333 }
John Zulauf4edde622021-02-15 08:54:50 -07006334 single_exec_scope = false;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006335}
6336
John Zulauf4edde622021-02-15 08:54:50 -07006337void SyncOpBarriers::BarrierSet::MakeBufferMemoryBarriers(const SyncValidator &sync_state, VkQueueFlags queue_flags,
6338 VkDependencyFlags dependencyFlags, uint32_t barrier_count,
Tony-LunarG3f6eceb2021-11-18 14:34:49 -07006339 const VkBufferMemoryBarrier2 *barriers) {
John Zulauf4edde622021-02-15 08:54:50 -07006340 buffer_memory_barriers.reserve(barrier_count);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006341 for (uint32_t index = 0; index < barrier_count; index++) {
6342 const auto &barrier = barriers[index];
6343 auto src = SyncExecScope::MakeSrc(queue_flags, barrier.srcStageMask);
6344 auto dst = SyncExecScope::MakeDst(queue_flags, barrier.dstStageMask);
Jeremy Gebben9f537102021-10-05 16:37:12 -06006345 auto buffer = sync_state.Get<BUFFER_STATE>(barrier.buffer);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006346 if (buffer) {
6347 const auto barrier_size = GetBufferWholeSize(*buffer, barrier.offset, barrier.size);
6348 const auto range = MakeRange(barrier.offset, barrier_size);
6349 const SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07006350 buffer_memory_barriers.emplace_back(buffer, sync_barrier, range);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006351 } else {
John Zulauf4edde622021-02-15 08:54:50 -07006352 buffer_memory_barriers.emplace_back();
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006353 }
6354 }
6355}
6356
John Zulauf4edde622021-02-15 08:54:50 -07006357void SyncOpBarriers::BarrierSet::MakeImageMemoryBarriers(const SyncValidator &sync_state, const SyncExecScope &src,
6358 const SyncExecScope &dst, VkDependencyFlags dependencyFlags,
6359 uint32_t barrier_count, const VkImageMemoryBarrier *barriers) {
6360 image_memory_barriers.reserve(barrier_count);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006361 for (uint32_t index = 0; index < barrier_count; index++) {
6362 const auto &barrier = barriers[index];
Jeremy Gebbenf4449392022-01-28 10:09:10 -07006363 auto image = sync_state.Get<IMAGE_STATE>(barrier.image);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006364 if (image) {
6365 auto subresource_range = NormalizeSubresourceRange(image->createInfo, barrier.subresourceRange);
6366 const SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07006367 image_memory_barriers.emplace_back(image, index, sync_barrier, barrier.oldLayout, barrier.newLayout, subresource_range);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006368 } else {
John Zulauf4edde622021-02-15 08:54:50 -07006369 image_memory_barriers.emplace_back();
6370 image_memory_barriers.back().index = index; // Just in case we're interested in the ones we skipped.
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006371 }
6372 }
6373}
John Zulaufd5115702021-01-18 12:34:33 -07006374
John Zulauf4edde622021-02-15 08:54:50 -07006375void SyncOpBarriers::BarrierSet::MakeImageMemoryBarriers(const SyncValidator &sync_state, VkQueueFlags queue_flags,
6376 VkDependencyFlags dependencyFlags, uint32_t barrier_count,
Tony-LunarG3f6eceb2021-11-18 14:34:49 -07006377 const VkImageMemoryBarrier2 *barriers) {
John Zulauf4edde622021-02-15 08:54:50 -07006378 image_memory_barriers.reserve(barrier_count);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006379 for (uint32_t index = 0; index < barrier_count; index++) {
6380 const auto &barrier = barriers[index];
6381 auto src = SyncExecScope::MakeSrc(queue_flags, barrier.srcStageMask);
6382 auto dst = SyncExecScope::MakeDst(queue_flags, barrier.dstStageMask);
Jeremy Gebbenf4449392022-01-28 10:09:10 -07006383 auto image = sync_state.Get<IMAGE_STATE>(barrier.image);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006384 if (image) {
6385 auto subresource_range = NormalizeSubresourceRange(image->createInfo, barrier.subresourceRange);
6386 const SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07006387 image_memory_barriers.emplace_back(image, index, sync_barrier, barrier.oldLayout, barrier.newLayout, subresource_range);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006388 } else {
John Zulauf4edde622021-02-15 08:54:50 -07006389 image_memory_barriers.emplace_back();
6390 image_memory_barriers.back().index = index; // Just in case we're interested in the ones we skipped.
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006391 }
6392 }
6393}
6394
sjfricke0bea06e2022-06-05 09:22:26 +09006395SyncOpWaitEvents::SyncOpWaitEvents(CMD_TYPE cmd_type, const SyncValidator &sync_state, VkQueueFlags queue_flags,
6396 uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags srcStageMask,
6397 VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount,
6398 const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount,
6399 const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount,
6400 const VkImageMemoryBarrier *pImageMemoryBarriers)
6401 : SyncOpBarriers(cmd_type, sync_state, queue_flags, srcStageMask, dstStageMask, VkDependencyFlags(0U), memoryBarrierCount,
John Zulaufd5115702021-01-18 12:34:33 -07006402 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
6403 pImageMemoryBarriers) {
John Zulauf669dfd52021-01-27 17:15:28 -07006404 MakeEventsList(sync_state, eventCount, pEvents);
John Zulaufd5115702021-01-18 12:34:33 -07006405}
6406
sjfricke0bea06e2022-06-05 09:22:26 +09006407SyncOpWaitEvents::SyncOpWaitEvents(CMD_TYPE cmd_type, const SyncValidator &sync_state, VkQueueFlags queue_flags,
6408 uint32_t eventCount, const VkEvent *pEvents, const VkDependencyInfoKHR *pDependencyInfo)
6409 : SyncOpBarriers(cmd_type, sync_state, queue_flags, eventCount, pDependencyInfo) {
John Zulauf4edde622021-02-15 08:54:50 -07006410 MakeEventsList(sync_state, eventCount, pEvents);
6411 assert(events_.size() == barriers_.size()); // Just so nobody gets clever and decides to cull the event or barrier arrays
6412}
6413
John Zulauf610e28c2021-08-03 17:46:23 -06006414const char *const SyncOpWaitEvents::kIgnored = "Wait operation is ignored for this event.";
6415
John Zulaufd5115702021-01-18 12:34:33 -07006416bool SyncOpWaitEvents::Validate(const CommandBufferAccessContext &cb_context) const {
John Zulaufd5115702021-01-18 12:34:33 -07006417 bool skip = false;
6418 const auto &sync_state = cb_context.GetSyncState();
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06006419 const auto command_buffer_handle = cb_context.GetCBState().commandBuffer();
John Zulaufd5115702021-01-18 12:34:33 -07006420
John Zulauf610e28c2021-08-03 17:46:23 -06006421 // This is only interesting at record and not replay (Execute/Submit) time.
John Zulauf4edde622021-02-15 08:54:50 -07006422 for (size_t barrier_set_index = 0; barrier_set_index < barriers_.size(); barrier_set_index++) {
6423 const auto &barrier_set = barriers_[barrier_set_index];
6424 if (barrier_set.single_exec_scope) {
6425 if (barrier_set.src_exec_scope.mask_param & VK_PIPELINE_STAGE_HOST_BIT) {
6426 const std::string vuid = std::string("SYNC-") + std::string(CmdName()) + std::string("-hostevent-unsupported");
6427 skip = sync_state.LogInfo(command_buffer_handle, vuid,
6428 "%s, srcStageMask includes %s, unsupported by synchronization validation.", CmdName(),
6429 string_VkPipelineStageFlagBits(VK_PIPELINE_STAGE_HOST_BIT));
6430 } else {
6431 const auto &barriers = barrier_set.memory_barriers;
6432 for (size_t barrier_index = 0; barrier_index < barriers.size(); barrier_index++) {
6433 const auto &barrier = barriers[barrier_index];
6434 if (barrier.src_exec_scope.mask_param & VK_PIPELINE_STAGE_HOST_BIT) {
6435 const std::string vuid =
6436 std::string("SYNC-") + std::string(CmdName()) + std::string("-hostevent-unsupported");
6437 skip =
6438 sync_state.LogInfo(command_buffer_handle, vuid,
6439 "%s, srcStageMask %s of %s %zu, %s %zu, unsupported by synchronization validation.",
6440 CmdName(), string_VkPipelineStageFlagBits(VK_PIPELINE_STAGE_HOST_BIT),
6441 "pDependencyInfo", barrier_set_index, "pMemoryBarriers", barrier_index);
6442 }
6443 }
6444 }
6445 }
John Zulaufd5115702021-01-18 12:34:33 -07006446 }
6447
John Zulauf610e28c2021-08-03 17:46:23 -06006448 // The rest is common to record time and replay time.
6449 skip |= DoValidate(cb_context, ResourceUsageRecord::kMaxIndex);
6450 return skip;
6451}
6452
John Zulaufbb890452021-12-14 11:30:18 -07006453bool SyncOpWaitEvents::DoValidate(const CommandExecutionContext &exec_context, const ResourceUsageTag base_tag) const {
John Zulauf610e28c2021-08-03 17:46:23 -06006454 bool skip = false;
John Zulaufbb890452021-12-14 11:30:18 -07006455 const auto &sync_state = exec_context.GetSyncState();
John Zulaufe0757ba2022-06-10 16:51:45 -06006456 const QueueId queue_id = exec_context.GetQueueId();
John Zulauf610e28c2021-08-03 17:46:23 -06006457
Jeremy Gebben40a22942020-12-22 14:22:06 -07006458 VkPipelineStageFlags2KHR event_stage_masks = 0U;
John Zulauf4edde622021-02-15 08:54:50 -07006459 VkPipelineStageFlags2KHR barrier_mask_params = 0U;
John Zulaufd5115702021-01-18 12:34:33 -07006460 bool events_not_found = false;
John Zulaufbb890452021-12-14 11:30:18 -07006461 const auto *events_context = exec_context.GetCurrentEventsContext();
John Zulauf669dfd52021-01-27 17:15:28 -07006462 assert(events_context);
John Zulauf4edde622021-02-15 08:54:50 -07006463 size_t barrier_set_index = 0;
6464 size_t barrier_set_incr = (barriers_.size() == 1) ? 0 : 1;
John Zulauf78394fc2021-07-12 15:41:40 -06006465 for (const auto &event : events_) {
6466 const auto *sync_event = events_context->Get(event.get());
6467 const auto &barrier_set = barriers_[barrier_set_index];
6468 if (!sync_event) {
6469 // NOTE PHASE2: This is where we'll need queue submit time validation to come back and check the srcStageMask bits
6470 // or solve this with replay creating the SyncEventState in the queue context... also this will be a
6471 // new validation error... wait without previously submitted set event...
6472 events_not_found = true; // Demote "extra_stage_bits" error to warning, to avoid false positives at *record time*
John Zulauf4edde622021-02-15 08:54:50 -07006473 barrier_set_index += barrier_set_incr;
John Zulauf78394fc2021-07-12 15:41:40 -06006474 continue; // Core, Lifetimes, or Param check needs to catch invalid events.
John Zulaufd5115702021-01-18 12:34:33 -07006475 }
John Zulauf610e28c2021-08-03 17:46:23 -06006476
6477 // For replay calls, don't revalidate "same command buffer" events
6478 if (sync_event->last_command_tag > base_tag) continue;
6479
John Zulauf78394fc2021-07-12 15:41:40 -06006480 const auto event_handle = sync_event->event->event();
6481 // TODO add "destroyed" checks
6482
John Zulaufe0757ba2022-06-10 16:51:45 -06006483 if (sync_event->first_scope) {
John Zulauf78b1f892021-09-20 15:02:09 -06006484 // Only accumulate barrier and event stages if there is a pending set in the current context
6485 barrier_mask_params |= barrier_set.src_exec_scope.mask_param;
6486 event_stage_masks |= sync_event->scope.mask_param;
6487 }
6488
John Zulauf78394fc2021-07-12 15:41:40 -06006489 const auto &src_exec_scope = barrier_set.src_exec_scope;
John Zulauf78b1f892021-09-20 15:02:09 -06006490
sjfricke0bea06e2022-06-05 09:22:26 +09006491 const auto ignore_reason = sync_event->IsIgnoredByWait(cmd_type_, src_exec_scope.mask_param);
John Zulauf78394fc2021-07-12 15:41:40 -06006492 if (ignore_reason) {
6493 switch (ignore_reason) {
6494 case SyncEventState::ResetWaitRace:
6495 case SyncEventState::Reset2WaitRace: {
6496 // Four permuations of Reset and Wait calls...
6497 const char *vuid =
sjfricke0bea06e2022-06-05 09:22:26 +09006498 (cmd_type_ == CMD_WAITEVENTS) ? "VUID-vkCmdResetEvent-event-03834" : "VUID-vkCmdResetEvent-event-03835";
John Zulauf78394fc2021-07-12 15:41:40 -06006499 if (ignore_reason == SyncEventState::Reset2WaitRace) {
sjfricke0bea06e2022-06-05 09:22:26 +09006500 vuid = (cmd_type_ == CMD_WAITEVENTS) ? "VUID-vkCmdResetEvent2-event-03831"
6501 : "VUID-vkCmdResetEvent2-event-03832";
John Zulauf78394fc2021-07-12 15:41:40 -06006502 }
6503 const char *const message =
6504 "%s: %s %s operation following %s without intervening execution barrier, may cause race condition. %s";
6505 skip |= sync_state.LogError(event_handle, vuid, message, CmdName(),
6506 sync_state.report_data->FormatHandle(event_handle).c_str(), CmdName(),
John Zulauf610e28c2021-08-03 17:46:23 -06006507 CommandTypeString(sync_event->last_command), kIgnored);
John Zulauf78394fc2021-07-12 15:41:40 -06006508 break;
6509 }
6510 case SyncEventState::SetRace: {
6511 // Issue error message that Wait is waiting on an signal subject to race condition, and is thus ignored for
6512 // this event
6513 const char *const vuid = "SYNC-vkCmdWaitEvents-unsynchronized-setops";
6514 const char *const message =
6515 "%s: %s Unsychronized %s calls result in race conditions w.r.t. event signalling, %s %s";
6516 const char *const reason = "First synchronization scope is undefined.";
6517 skip |= sync_state.LogError(event_handle, vuid, message, CmdName(),
6518 sync_state.report_data->FormatHandle(event_handle).c_str(),
John Zulauf610e28c2021-08-03 17:46:23 -06006519 CommandTypeString(sync_event->last_command), reason, kIgnored);
John Zulauf78394fc2021-07-12 15:41:40 -06006520 break;
6521 }
6522 case SyncEventState::MissingStageBits: {
6523 const auto missing_bits = sync_event->scope.mask_param & ~src_exec_scope.mask_param;
6524 // Issue error message that event waited for is not in wait events scope
6525 const char *const vuid = "VUID-vkCmdWaitEvents-srcStageMask-01158";
6526 const char *const message = "%s: %s stageMask %" PRIx64 " includes bits not present in srcStageMask 0x%" PRIx64
6527 ". Bits missing from srcStageMask %s. %s";
6528 skip |= sync_state.LogError(event_handle, vuid, message, CmdName(),
6529 sync_state.report_data->FormatHandle(event_handle).c_str(),
6530 sync_event->scope.mask_param, src_exec_scope.mask_param,
John Zulauf610e28c2021-08-03 17:46:23 -06006531 sync_utils::StringPipelineStageFlags(missing_bits).c_str(), kIgnored);
John Zulauf78394fc2021-07-12 15:41:40 -06006532 break;
6533 }
6534 case SyncEventState::SetVsWait2: {
Tony-LunarG279601c2021-11-16 10:50:51 -07006535 skip |= sync_state.LogError(event_handle, "VUID-vkCmdWaitEvents2-pEvents-03837",
John Zulauf78394fc2021-07-12 15:41:40 -06006536 "%s: Follows set of %s by %s. Disallowed.", CmdName(),
6537 sync_state.report_data->FormatHandle(event_handle).c_str(),
6538 CommandTypeString(sync_event->last_command));
6539 break;
6540 }
John Zulaufe0757ba2022-06-10 16:51:45 -06006541 case SyncEventState::MissingSetEvent: {
6542 // TODO: There are conditions at queue submit time where we can definitively say that
6543 // a missing set event is an error. Add those if not captured in CoreChecks
6544 break;
6545 }
John Zulauf78394fc2021-07-12 15:41:40 -06006546 default:
6547 assert(ignore_reason == SyncEventState::NotIgnored);
6548 }
6549 } else if (barrier_set.image_memory_barriers.size()) {
6550 const auto &image_memory_barriers = barrier_set.image_memory_barriers;
John Zulaufbb890452021-12-14 11:30:18 -07006551 const auto *context = exec_context.GetCurrentAccessContext();
John Zulauf78394fc2021-07-12 15:41:40 -06006552 assert(context);
6553 for (const auto &image_memory_barrier : image_memory_barriers) {
6554 if (image_memory_barrier.old_layout == image_memory_barrier.new_layout) continue;
6555 const auto *image_state = image_memory_barrier.image.get();
6556 if (!image_state) continue;
6557 const auto &subresource_range = image_memory_barrier.range;
6558 const auto &src_access_scope = image_memory_barrier.barrier.src_access_scope;
John Zulaufe0757ba2022-06-10 16:51:45 -06006559 const auto hazard = context->DetectImageBarrierHazard(*image_state, subresource_range, sync_event->scope.exec_scope,
6560 src_access_scope, queue_id, *sync_event,
6561 AccessContext::DetectOptions::kDetectAll);
John Zulauf78394fc2021-07-12 15:41:40 -06006562 if (hazard.hazard) {
6563 skip |= sync_state.LogError(image_state->image(), string_SyncHazardVUID(hazard.hazard),
6564 "%s: Hazard %s for image barrier %" PRIu32 " %s. Access info %s.", CmdName(),
6565 string_SyncHazard(hazard.hazard), image_memory_barrier.index,
6566 sync_state.report_data->FormatHandle(image_state->image()).c_str(),
John Zulauf397e68b2022-04-19 11:44:07 -06006567 exec_context.FormatHazard(hazard).c_str());
John Zulauf78394fc2021-07-12 15:41:40 -06006568 break;
6569 }
6570 }
6571 }
6572 // TODO: Add infrastructure for checking pDependencyInfo's vs. CmdSetEvent2 VUID - vkCmdWaitEvents2KHR - pEvents -
6573 // 03839
6574 barrier_set_index += barrier_set_incr;
6575 }
John Zulaufd5115702021-01-18 12:34:33 -07006576
6577 // Note that we can't check for HOST in pEvents as we don't track that set event type
John Zulauf4edde622021-02-15 08:54:50 -07006578 const auto extra_stage_bits = (barrier_mask_params & ~VK_PIPELINE_STAGE_2_HOST_BIT_KHR) & ~event_stage_masks;
John Zulaufd5115702021-01-18 12:34:33 -07006579 if (extra_stage_bits) {
6580 // Issue error message that event waited for is not in wait events scope
John Zulauf4edde622021-02-15 08:54:50 -07006581 // NOTE: This isn't exactly the right VUID for WaitEvents2, but it's as close as we currently have support for
6582 const char *const vuid =
sjfricke0bea06e2022-06-05 09:22:26 +09006583 (CMD_WAITEVENTS == cmd_type_) ? "VUID-vkCmdWaitEvents-srcStageMask-01158" : "VUID-vkCmdWaitEvents2-pEvents-03838";
John Zulaufd5115702021-01-18 12:34:33 -07006584 const char *const message =
Jeremy Gebben40a22942020-12-22 14:22:06 -07006585 "%s: srcStageMask 0x%" PRIx64 " contains stages not present in pEvents stageMask. Extra stages are %s.%s";
John Zulaufbb890452021-12-14 11:30:18 -07006586 const auto handle = exec_context.Handle();
John Zulaufd5115702021-01-18 12:34:33 -07006587 if (events_not_found) {
John Zulaufbb890452021-12-14 11:30:18 -07006588 skip |= sync_state.LogInfo(handle, vuid, message, CmdName(), barrier_mask_params,
Jeremy Gebben40a22942020-12-22 14:22:06 -07006589 sync_utils::StringPipelineStageFlags(extra_stage_bits).c_str(),
John Zulaufd5115702021-01-18 12:34:33 -07006590 " vkCmdSetEvent may be in previously submitted command buffer.");
6591 } else {
John Zulaufbb890452021-12-14 11:30:18 -07006592 skip |= sync_state.LogError(handle, vuid, message, CmdName(), barrier_mask_params,
Jeremy Gebben40a22942020-12-22 14:22:06 -07006593 sync_utils::StringPipelineStageFlags(extra_stage_bits).c_str(), "");
John Zulaufd5115702021-01-18 12:34:33 -07006594 }
6595 }
6596 return skip;
6597}
6598
6599struct SyncOpWaitEventsFunctorFactory {
6600 using BarrierOpFunctor = WaitEventBarrierOp;
6601 using ApplyFunctor = ApplyBarrierFunctor<BarrierOpFunctor>;
6602 using GlobalBarrierOpFunctor = WaitEventBarrierOp;
6603 using GlobalApplyFunctor = ApplyBarrierOpsFunctor<GlobalBarrierOpFunctor>;
6604 using BufferRange = EventSimpleRangeGenerator;
6605 using ImageRange = EventImageRangeGenerator;
6606 using GlobalRange = EventSimpleRangeGenerator;
6607
6608 // Need to restrict to only valid exec and access scope for this event
6609 // Pass by value is intentional to get a copy we can change without modifying the passed barrier
6610 SyncBarrier RestrictToEvent(SyncBarrier barrier) const {
John Zulaufc523bf62021-02-16 08:20:34 -07006611 barrier.src_exec_scope.exec_scope = sync_event->scope.exec_scope & barrier.src_exec_scope.exec_scope;
John Zulaufd5115702021-01-18 12:34:33 -07006612 barrier.src_access_scope = sync_event->scope.valid_accesses & barrier.src_access_scope;
6613 return barrier;
6614 }
John Zulauf00119522022-05-23 19:07:42 -06006615 ApplyFunctor MakeApplyFunctor(QueueId queue_id, const SyncBarrier &barrier_arg, bool layout_transition) const {
John Zulaufd5115702021-01-18 12:34:33 -07006616 auto barrier = RestrictToEvent(barrier_arg);
John Zulauf00119522022-05-23 19:07:42 -06006617 return ApplyFunctor(BarrierOpFunctor(queue_id, sync_event->first_scope_tag, barrier, layout_transition));
John Zulaufd5115702021-01-18 12:34:33 -07006618 }
John Zulauf14940722021-04-12 15:19:02 -06006619 GlobalApplyFunctor MakeGlobalApplyFunctor(size_t size_hint, ResourceUsageTag tag) const {
John Zulaufd5115702021-01-18 12:34:33 -07006620 return GlobalApplyFunctor(false /* don't resolve */, size_hint, tag);
6621 }
John Zulauf00119522022-05-23 19:07:42 -06006622 GlobalBarrierOpFunctor MakeGlobalBarrierOpFunctor(const QueueId queue_id, const SyncBarrier &barrier_arg) const {
John Zulaufd5115702021-01-18 12:34:33 -07006623 auto barrier = RestrictToEvent(barrier_arg);
John Zulauf00119522022-05-23 19:07:42 -06006624 return GlobalBarrierOpFunctor(queue_id, sync_event->first_scope_tag, barrier, false);
John Zulaufd5115702021-01-18 12:34:33 -07006625 }
6626
6627 BufferRange MakeRangeGen(const BUFFER_STATE &buffer, const ResourceAccessRange &range_arg) const {
6628 const AccessAddressType address_type = GetAccessAddressType(buffer);
6629 const auto base_address = ResourceBaseAddress(buffer);
6630 ResourceAccessRange range = SimpleBinding(buffer) ? (range_arg + base_address) : ResourceAccessRange();
6631 EventSimpleRangeGenerator filtered_range_gen(sync_event->FirstScope(address_type), range);
6632 return filtered_range_gen;
6633 }
John Zulauf110413c2021-03-20 05:38:38 -06006634 ImageRange MakeRangeGen(const IMAGE_STATE &image, const VkImageSubresourceRange &subresource_range) const {
John Zulaufd5115702021-01-18 12:34:33 -07006635 if (!SimpleBinding(image)) return ImageRange();
6636 const auto address_type = GetAccessAddressType(image);
6637 const auto base_address = ResourceBaseAddress(image);
Aitor Camachoe67f2c72022-06-08 14:41:58 +02006638 subresource_adapter::ImageRangeGenerator image_range_gen(*image.fragment_encoder.get(), subresource_range, base_address,
6639 false);
John Zulaufd5115702021-01-18 12:34:33 -07006640 EventImageRangeGenerator filtered_range_gen(sync_event->FirstScope(address_type), image_range_gen);
6641
6642 return filtered_range_gen;
6643 }
6644 GlobalRange MakeGlobalRangeGen(AccessAddressType address_type) const {
6645 return EventSimpleRangeGenerator(sync_event->FirstScope(address_type), kFullRange);
6646 }
6647 SyncOpWaitEventsFunctorFactory(SyncEventState *sync_event_) : sync_event(sync_event_) { assert(sync_event); }
6648 SyncEventState *sync_event;
6649};
6650
John Zulauf8eda1562021-04-13 17:06:41 -06006651ResourceUsageTag SyncOpWaitEvents::Record(CommandBufferAccessContext *cb_context) const {
sjfricke0bea06e2022-06-05 09:22:26 +09006652 const auto tag = cb_context->NextCommandTag(cmd_type_);
John Zulaufd5115702021-01-18 12:34:33 -07006653 auto *access_context = cb_context->GetCurrentAccessContext();
John Zulauf00119522022-05-23 19:07:42 -06006654 const QueueId queue_id = cb_context->GetQueueId();
John Zulaufd5115702021-01-18 12:34:33 -07006655 assert(access_context);
John Zulauf8eda1562021-04-13 17:06:41 -06006656 if (!access_context) return tag;
John Zulauf669dfd52021-01-27 17:15:28 -07006657 auto *events_context = cb_context->GetCurrentEventsContext();
6658 assert(events_context);
John Zulauf8eda1562021-04-13 17:06:41 -06006659 if (!events_context) return tag;
John Zulaufd5115702021-01-18 12:34:33 -07006660
John Zulauf00119522022-05-23 19:07:42 -06006661 ReplayRecord(queue_id, tag, access_context, events_context);
John Zulauf610e28c2021-08-03 17:46:23 -06006662 return tag;
6663}
6664
John Zulauf00119522022-05-23 19:07:42 -06006665void SyncOpWaitEvents::ReplayRecord(QueueId queue_id, ResourceUsageTag tag, AccessContext *access_context,
6666 SyncEventsContext *events_context) const {
John Zulaufd5115702021-01-18 12:34:33 -07006667 // Unlike PipelineBarrier, WaitEvent is *not* limited to accesses within the current subpass (if any) and thus needs to import
6668 // all accesses. Can instead import for all first_scopes, or a union of them, if this becomes a performance/memory issue,
6669 // but with no idea of the performance of the union, nor of whether it even matters... take the simplest approach here,
6670 access_context->ResolvePreviousAccesses();
6671
John Zulauf4edde622021-02-15 08:54:50 -07006672 size_t barrier_set_index = 0;
6673 size_t barrier_set_incr = (barriers_.size() == 1) ? 0 : 1;
6674 assert(barriers_.size() == 1 || (barriers_.size() == events_.size()));
John Zulauf669dfd52021-01-27 17:15:28 -07006675 for (auto &event_shared : events_) {
6676 if (!event_shared.get()) continue;
6677 auto *sync_event = events_context->GetFromShared(event_shared);
John Zulaufd5115702021-01-18 12:34:33 -07006678
sjfricke0bea06e2022-06-05 09:22:26 +09006679 sync_event->last_command = cmd_type_;
John Zulauf610e28c2021-08-03 17:46:23 -06006680 sync_event->last_command_tag = tag;
John Zulaufd5115702021-01-18 12:34:33 -07006681
John Zulauf4edde622021-02-15 08:54:50 -07006682 const auto &barrier_set = barriers_[barrier_set_index];
6683 const auto &dst = barrier_set.dst_exec_scope;
sjfricke0bea06e2022-06-05 09:22:26 +09006684 if (!sync_event->IsIgnoredByWait(cmd_type_, barrier_set.src_exec_scope.mask_param)) {
John Zulaufd5115702021-01-18 12:34:33 -07006685 // These apply barriers one at a time as the are restricted to the resource ranges specified per each barrier,
6686 // but do not update the dependency chain information (but set the "pending" state) // s.t. the order independence
6687 // of the barriers is maintained.
6688 SyncOpWaitEventsFunctorFactory factory(sync_event);
John Zulauf00119522022-05-23 19:07:42 -06006689 ApplyBarriers(barrier_set.buffer_memory_barriers, factory, queue_id, tag, access_context);
6690 ApplyBarriers(barrier_set.image_memory_barriers, factory, queue_id, tag, access_context);
6691 ApplyGlobalBarriers(barrier_set.memory_barriers, factory, queue_id, tag, access_context);
John Zulaufd5115702021-01-18 12:34:33 -07006692
6693 // Apply the global barrier to the event itself (for race condition tracking)
6694 // Events don't happen at a stage, so we need to store the unexpanded ALL_COMMANDS if set for inter-event-calls
6695 sync_event->barriers = dst.mask_param & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
6696 sync_event->barriers |= dst.exec_scope;
6697 } else {
6698 // We ignored this wait, so we don't have any effective synchronization barriers for it.
6699 sync_event->barriers = 0U;
6700 }
John Zulauf4edde622021-02-15 08:54:50 -07006701 barrier_set_index += barrier_set_incr;
John Zulaufd5115702021-01-18 12:34:33 -07006702 }
6703
6704 // Apply the pending barriers
6705 ResolvePendingBarrierFunctor apply_pending_action(tag);
6706 access_context->ApplyToContext(apply_pending_action);
6707}
6708
John Zulauf8eda1562021-04-13 17:06:41 -06006709bool SyncOpWaitEvents::ReplayValidate(ResourceUsageTag recorded_tag, const CommandBufferAccessContext &recorded_context,
John Zulaufbb890452021-12-14 11:30:18 -07006710 ResourceUsageTag base_tag, CommandExecutionContext *exec_context) const {
6711 return DoValidate(*exec_context, base_tag);
John Zulauf8eda1562021-04-13 17:06:41 -06006712}
6713
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006714bool SyncValidator::PreCallValidateCmdWriteBufferMarker2AMD(VkCommandBuffer commandBuffer, VkPipelineStageFlags2KHR pipelineStage,
6715 VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) const {
6716 bool skip = false;
6717 const auto *cb_access_context = GetAccessContext(commandBuffer);
6718 assert(cb_access_context);
6719 if (!cb_access_context) return skip;
6720
6721 const auto *context = cb_access_context->GetCurrentAccessContext();
6722 assert(context);
6723 if (!context) return skip;
6724
Jeremy Gebbenf4449392022-01-28 10:09:10 -07006725 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006726
6727 if (dst_buffer) {
6728 const ResourceAccessRange range = MakeRange(dstOffset, 4);
6729 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
6730 if (hazard.hazard) {
6731 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
6732 "vkCmdWriteBufferMarkerAMD2: Hazard %s for dstBuffer %s. Access info %s.",
6733 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstBuffer).c_str(),
John Zulauf397e68b2022-04-19 11:44:07 -06006734 cb_access_context->FormatHazard(hazard).c_str());
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006735 }
6736 }
6737 return skip;
6738}
6739
John Zulauf669dfd52021-01-27 17:15:28 -07006740void SyncOpWaitEvents::MakeEventsList(const SyncValidator &sync_state, uint32_t event_count, const VkEvent *events) {
John Zulaufd5115702021-01-18 12:34:33 -07006741 events_.reserve(event_count);
6742 for (uint32_t event_index = 0; event_index < event_count; event_index++) {
Jeremy Gebben9f537102021-10-05 16:37:12 -06006743 events_.emplace_back(sync_state.Get<EVENT_STATE>(events[event_index]));
John Zulaufd5115702021-01-18 12:34:33 -07006744 }
6745}
John Zulauf6ce24372021-01-30 05:56:25 -07006746
sjfricke0bea06e2022-06-05 09:22:26 +09006747SyncOpResetEvent::SyncOpResetEvent(CMD_TYPE cmd_type, const SyncValidator &sync_state, VkQueueFlags queue_flags, VkEvent event,
John Zulauf4edde622021-02-15 08:54:50 -07006748 VkPipelineStageFlags2KHR stageMask)
sjfricke0bea06e2022-06-05 09:22:26 +09006749 : SyncOpBase(cmd_type),
6750 event_(sync_state.Get<EVENT_STATE>(event)),
6751 exec_scope_(SyncExecScope::MakeSrc(queue_flags, stageMask)) {}
John Zulauf6ce24372021-01-30 05:56:25 -07006752
John Zulauf1bf30522021-09-03 15:39:06 -06006753bool SyncOpResetEvent::Validate(const CommandBufferAccessContext& cb_context) const {
6754 return DoValidate(cb_context, ResourceUsageRecord::kMaxIndex);
6755}
6756
John Zulaufbb890452021-12-14 11:30:18 -07006757bool SyncOpResetEvent::DoValidate(const CommandExecutionContext &exec_context, const ResourceUsageTag base_tag) const {
6758 auto *events_context = exec_context.GetCurrentEventsContext();
John Zulauf6ce24372021-01-30 05:56:25 -07006759 assert(events_context);
6760 bool skip = false;
6761 if (!events_context) return skip;
6762
John Zulaufbb890452021-12-14 11:30:18 -07006763 const auto &sync_state = exec_context.GetSyncState();
John Zulauf6ce24372021-01-30 05:56:25 -07006764 const auto *sync_event = events_context->Get(event_);
6765 if (!sync_event) return skip; // Core, Lifetimes, or Param check needs to catch invalid events.
6766
John Zulauf1bf30522021-09-03 15:39:06 -06006767 if (sync_event->last_command_tag > base_tag) return skip; // if we validated this in recording of the secondary, don't repeat
6768
John Zulauf6ce24372021-01-30 05:56:25 -07006769 const char *const set_wait =
6770 "%s: %s %s operation following %s without intervening execution barrier, is a race condition and may result in data "
6771 "hazards.";
6772 const char *message = set_wait; // Only one message this call.
6773 if (!sync_event->HasBarrier(exec_scope_.mask_param, exec_scope_.exec_scope)) {
6774 const char *vuid = nullptr;
6775 switch (sync_event->last_command) {
6776 case CMD_SETEVENT:
John Zulauf4edde622021-02-15 08:54:50 -07006777 case CMD_SETEVENT2KHR:
Tony-LunarG8d71c4f2022-01-27 15:25:53 -07006778 case CMD_SETEVENT2:
John Zulauf6ce24372021-01-30 05:56:25 -07006779 // Needs a barrier between set and reset
6780 vuid = "SYNC-vkCmdResetEvent-missingbarrier-set";
6781 break;
John Zulauf4edde622021-02-15 08:54:50 -07006782 case CMD_WAITEVENTS:
Tony-LunarG1364cf52021-11-17 16:10:11 -07006783 case CMD_WAITEVENTS2:
John Zulauf4edde622021-02-15 08:54:50 -07006784 case CMD_WAITEVENTS2KHR: {
John Zulauf6ce24372021-01-30 05:56:25 -07006785 // Needs to be in the barriers chain (either because of a barrier, or because of dstStageMask
6786 vuid = "SYNC-vkCmdResetEvent-missingbarrier-wait";
6787 break;
6788 }
6789 default:
6790 // The only other valid last command that wasn't one.
John Zulauf4edde622021-02-15 08:54:50 -07006791 assert((sync_event->last_command == CMD_NONE) || (sync_event->last_command == CMD_RESETEVENT) ||
6792 (sync_event->last_command == CMD_RESETEVENT2KHR));
John Zulauf6ce24372021-01-30 05:56:25 -07006793 break;
6794 }
6795 if (vuid) {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06006796 skip |= sync_state.LogError(event_->event(), vuid, message, CmdName(),
6797 sync_state.report_data->FormatHandle(event_->event()).c_str(), CmdName(),
John Zulauf6ce24372021-01-30 05:56:25 -07006798 CommandTypeString(sync_event->last_command));
6799 }
6800 }
6801 return skip;
6802}
6803
John Zulauf8eda1562021-04-13 17:06:41 -06006804ResourceUsageTag SyncOpResetEvent::Record(CommandBufferAccessContext *cb_context) const {
sjfricke0bea06e2022-06-05 09:22:26 +09006805 const auto tag = cb_context->NextCommandTag(cmd_type_);
John Zulauf6ce24372021-01-30 05:56:25 -07006806 auto *events_context = cb_context->GetCurrentEventsContext();
John Zulaufe0757ba2022-06-10 16:51:45 -06006807 auto *access_context = cb_context->GetCurrentAccessContext();
6808 const QueueId queue_id = cb_context->GetQueueId();
John Zulauf6ce24372021-01-30 05:56:25 -07006809 assert(events_context);
John Zulaufe0757ba2022-06-10 16:51:45 -06006810 if (access_context && events_context) {
6811 ReplayRecord(queue_id, tag, access_context, events_context);
6812 }
John Zulauf8eda1562021-04-13 17:06:41 -06006813 return tag;
John Zulauf6ce24372021-01-30 05:56:25 -07006814}
6815
John Zulauf8eda1562021-04-13 17:06:41 -06006816bool SyncOpResetEvent::ReplayValidate(ResourceUsageTag recorded_tag, const CommandBufferAccessContext &recorded_context,
John Zulaufbb890452021-12-14 11:30:18 -07006817 ResourceUsageTag base_tag, CommandExecutionContext *exec_context) const {
6818 return DoValidate(*exec_context, base_tag);
John Zulauf8eda1562021-04-13 17:06:41 -06006819}
6820
John Zulauf00119522022-05-23 19:07:42 -06006821void SyncOpResetEvent::ReplayRecord(QueueId queue_id, ResourceUsageTag tag, AccessContext *access_context,
John Zulaufe0757ba2022-06-10 16:51:45 -06006822 SyncEventsContext *events_context) const {
6823 auto *sync_event = events_context->GetFromShared(event_);
6824 if (!sync_event) return; // Core, Lifetimes, or Param check needs to catch invalid events.
6825
6826 // Update the event state
6827 sync_event->last_command = cmd_type_;
6828 sync_event->last_command_tag = tag;
6829 sync_event->unsynchronized_set = CMD_NONE;
6830 sync_event->ResetFirstScope();
6831 sync_event->barriers = 0U;
6832}
John Zulauf8eda1562021-04-13 17:06:41 -06006833
sjfricke0bea06e2022-06-05 09:22:26 +09006834SyncOpSetEvent::SyncOpSetEvent(CMD_TYPE cmd_type, const SyncValidator &sync_state, VkQueueFlags queue_flags, VkEvent event,
John Zulaufe0757ba2022-06-10 16:51:45 -06006835 VkPipelineStageFlags2KHR stageMask, const AccessContext *access_context)
sjfricke0bea06e2022-06-05 09:22:26 +09006836 : SyncOpBase(cmd_type),
Jeremy Gebben9f537102021-10-05 16:37:12 -06006837 event_(sync_state.Get<EVENT_STATE>(event)),
John Zulaufe0757ba2022-06-10 16:51:45 -06006838 recorded_context_(),
John Zulauf4edde622021-02-15 08:54:50 -07006839 src_exec_scope_(SyncExecScope::MakeSrc(queue_flags, stageMask)),
John Zulaufe0757ba2022-06-10 16:51:45 -06006840 dep_info_() {
6841 // Snapshot the current access_context for later inspection at wait time.
6842 // NOTE: This appears brute force, but given that we only save a "first-last" model of access history, the current
6843 // access context (include barrier state for chaining) won't necessarily contain the needed information at Wait
6844 // or Submit time reference.
6845 if (access_context) {
6846 recorded_context_ = std::make_shared<const AccessContext>(*access_context);
6847 }
6848}
John Zulauf4edde622021-02-15 08:54:50 -07006849
sjfricke0bea06e2022-06-05 09:22:26 +09006850SyncOpSetEvent::SyncOpSetEvent(CMD_TYPE cmd_type, const SyncValidator &sync_state, VkQueueFlags queue_flags, VkEvent event,
John Zulaufe0757ba2022-06-10 16:51:45 -06006851 const VkDependencyInfoKHR &dep_info, const AccessContext *access_context)
sjfricke0bea06e2022-06-05 09:22:26 +09006852 : SyncOpBase(cmd_type),
Jeremy Gebben9f537102021-10-05 16:37:12 -06006853 event_(sync_state.Get<EVENT_STATE>(event)),
John Zulaufe0757ba2022-06-10 16:51:45 -06006854 recorded_context_(),
John Zulauf4edde622021-02-15 08:54:50 -07006855 src_exec_scope_(SyncExecScope::MakeSrc(queue_flags, sync_utils::GetGlobalStageMasks(dep_info).src)),
John Zulaufe0757ba2022-06-10 16:51:45 -06006856 dep_info_(new safe_VkDependencyInfo(&dep_info)) {
6857 if (access_context) {
6858 recorded_context_ = std::make_shared<const AccessContext>(*access_context);
6859 }
6860}
John Zulauf6ce24372021-01-30 05:56:25 -07006861
6862bool SyncOpSetEvent::Validate(const CommandBufferAccessContext &cb_context) const {
John Zulauf610e28c2021-08-03 17:46:23 -06006863 return DoValidate(cb_context, ResourceUsageRecord::kMaxIndex);
6864}
6865bool SyncOpSetEvent::ReplayValidate(ResourceUsageTag recorded_tag, const CommandBufferAccessContext &recorded_context,
John Zulaufbb890452021-12-14 11:30:18 -07006866 ResourceUsageTag base_tag, CommandExecutionContext *exec_context) const {
6867 assert(exec_context);
6868 return DoValidate(*exec_context, base_tag);
John Zulauf610e28c2021-08-03 17:46:23 -06006869}
6870
John Zulaufbb890452021-12-14 11:30:18 -07006871bool SyncOpSetEvent::DoValidate(const CommandExecutionContext &exec_context, const ResourceUsageTag base_tag) const {
John Zulauf6ce24372021-01-30 05:56:25 -07006872 bool skip = false;
6873
John Zulaufbb890452021-12-14 11:30:18 -07006874 const auto &sync_state = exec_context.GetSyncState();
6875 auto *events_context = exec_context.GetCurrentEventsContext();
John Zulauf6ce24372021-01-30 05:56:25 -07006876 assert(events_context);
6877 if (!events_context) return skip;
6878
6879 const auto *sync_event = events_context->Get(event_);
6880 if (!sync_event) return skip; // Core, Lifetimes, or Param check needs to catch invalid events.
6881
John Zulauf610e28c2021-08-03 17:46:23 -06006882 if (sync_event->last_command_tag >= base_tag) return skip; // for replay we don't want to revalidate internal "last commmand"
6883
John Zulauf6ce24372021-01-30 05:56:25 -07006884 const char *const reset_set =
6885 "%s: %s %s operation following %s without intervening execution barrier, is a race condition and may result in data "
6886 "hazards.";
6887 const char *const wait =
6888 "%s: %s %s operation following %s without intervening vkCmdResetEvent, may result in data hazard and is ignored.";
6889
6890 if (!sync_event->HasBarrier(src_exec_scope_.mask_param, src_exec_scope_.exec_scope)) {
John Zulauf4edde622021-02-15 08:54:50 -07006891 const char *vuid_stem = nullptr;
John Zulauf6ce24372021-01-30 05:56:25 -07006892 const char *message = nullptr;
6893 switch (sync_event->last_command) {
6894 case CMD_RESETEVENT:
John Zulauf4edde622021-02-15 08:54:50 -07006895 case CMD_RESETEVENT2KHR:
Tony-LunarG8d71c4f2022-01-27 15:25:53 -07006896 case CMD_RESETEVENT2:
John Zulauf6ce24372021-01-30 05:56:25 -07006897 // Needs a barrier between reset and set
John Zulauf4edde622021-02-15 08:54:50 -07006898 vuid_stem = "-missingbarrier-reset";
John Zulauf6ce24372021-01-30 05:56:25 -07006899 message = reset_set;
6900 break;
6901 case CMD_SETEVENT:
John Zulauf4edde622021-02-15 08:54:50 -07006902 case CMD_SETEVENT2KHR:
Tony-LunarG8d71c4f2022-01-27 15:25:53 -07006903 case CMD_SETEVENT2:
John Zulauf6ce24372021-01-30 05:56:25 -07006904 // Needs a barrier between set and set
John Zulauf4edde622021-02-15 08:54:50 -07006905 vuid_stem = "-missingbarrier-set";
John Zulauf6ce24372021-01-30 05:56:25 -07006906 message = reset_set;
6907 break;
6908 case CMD_WAITEVENTS:
Tony-LunarG1364cf52021-11-17 16:10:11 -07006909 case CMD_WAITEVENTS2:
John Zulauf4edde622021-02-15 08:54:50 -07006910 case CMD_WAITEVENTS2KHR:
John Zulauf6ce24372021-01-30 05:56:25 -07006911 // Needs a barrier or is in second execution scope
John Zulauf4edde622021-02-15 08:54:50 -07006912 vuid_stem = "-missingbarrier-wait";
John Zulauf6ce24372021-01-30 05:56:25 -07006913 message = wait;
6914 break;
6915 default:
6916 // The only other valid last command that wasn't one.
6917 assert(sync_event->last_command == CMD_NONE);
6918 break;
6919 }
John Zulauf4edde622021-02-15 08:54:50 -07006920 if (vuid_stem) {
John Zulauf6ce24372021-01-30 05:56:25 -07006921 assert(nullptr != message);
John Zulauf4edde622021-02-15 08:54:50 -07006922 std::string vuid("SYNC-");
6923 vuid.append(CmdName()).append(vuid_stem);
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06006924 skip |= sync_state.LogError(event_->event(), vuid.c_str(), message, CmdName(),
6925 sync_state.report_data->FormatHandle(event_->event()).c_str(), CmdName(),
John Zulauf6ce24372021-01-30 05:56:25 -07006926 CommandTypeString(sync_event->last_command));
6927 }
6928 }
6929
6930 return skip;
6931}
6932
John Zulauf8eda1562021-04-13 17:06:41 -06006933ResourceUsageTag SyncOpSetEvent::Record(CommandBufferAccessContext *cb_context) const {
sjfricke0bea06e2022-06-05 09:22:26 +09006934 const auto tag = cb_context->NextCommandTag(cmd_type_);
John Zulauf6ce24372021-01-30 05:56:25 -07006935 auto *events_context = cb_context->GetCurrentEventsContext();
John Zulauf00119522022-05-23 19:07:42 -06006936 const QueueId queue_id = cb_context->GetQueueId();
John Zulaufe0757ba2022-06-10 16:51:45 -06006937 assert(recorded_context_);
6938 if (recorded_context_ && events_context) {
6939 DoRecord(queue_id, tag, recorded_context_, events_context);
John Zulauf610e28c2021-08-03 17:46:23 -06006940 }
6941 return tag;
6942}
John Zulauf6ce24372021-01-30 05:56:25 -07006943
John Zulauf00119522022-05-23 19:07:42 -06006944void SyncOpSetEvent::ReplayRecord(QueueId queue_id, ResourceUsageTag tag, AccessContext *access_context,
6945 SyncEventsContext *events_context) const {
John Zulaufe0757ba2022-06-10 16:51:45 -06006946 // Create a copy of the current context, and merge in the state snapshot at record set event time
6947 // Note: we mustn't change the recorded context copy, as a given CB could be submitted more than once (in generaL)
6948 auto merged_context = std::make_shared<AccessContext>(*access_context);
6949 merged_context->ResolveFromContext(QueueTagOffsetBarrierAction(queue_id, tag), *recorded_context_);
6950 DoRecord(queue_id, tag, merged_context, events_context);
6951}
6952
6953void SyncOpSetEvent::DoRecord(QueueId queue_id, ResourceUsageTag tag, const std::shared_ptr<const AccessContext> &access_context,
6954 SyncEventsContext *events_context) const {
John Zulauf6ce24372021-01-30 05:56:25 -07006955 auto *sync_event = events_context->GetFromShared(event_);
John Zulauf610e28c2021-08-03 17:46:23 -06006956 if (!sync_event) return; // Core, Lifetimes, or Param check needs to catch invalid events.
John Zulauf6ce24372021-01-30 05:56:25 -07006957
6958 // NOTE: We're going to simply record the sync scope here, as anything else would be implementation defined/undefined
6959 // and we're issuing errors re: missing barriers between event commands, which if the user fixes would fix
6960 // any issues caused by naive scope setting here.
6961
6962 // What happens with two SetEvent is that one cannot know what group of operations will be waited for.
6963 // Given:
6964 // Stuff1; SetEvent; Stuff2; SetEvent; WaitEvents;
6965 // WaitEvents cannot know which of Stuff1, Stuff2, or both has completed execution.
6966
6967 if (!sync_event->HasBarrier(src_exec_scope_.mask_param, src_exec_scope_.exec_scope)) {
6968 sync_event->unsynchronized_set = sync_event->last_command;
6969 sync_event->ResetFirstScope();
John Zulaufe0757ba2022-06-10 16:51:45 -06006970 } else if (!sync_event->first_scope) {
John Zulauf6ce24372021-01-30 05:56:25 -07006971 // We only set the scope if there isn't one
6972 sync_event->scope = src_exec_scope_;
6973
John Zulaufe0757ba2022-06-10 16:51:45 -06006974 // Save the shared_ptr to copy of the access_context present at set time (sent us by the caller)
6975 sync_event->first_scope = access_context;
John Zulauf6ce24372021-01-30 05:56:25 -07006976 sync_event->unsynchronized_set = CMD_NONE;
6977 sync_event->first_scope_tag = tag;
6978 }
John Zulauf4edde622021-02-15 08:54:50 -07006979 // TODO: Store dep_info_ shared ptr in sync_state for WaitEvents2 validation
sjfricke0bea06e2022-06-05 09:22:26 +09006980 sync_event->last_command = cmd_type_;
John Zulauf610e28c2021-08-03 17:46:23 -06006981 sync_event->last_command_tag = tag;
John Zulauf6ce24372021-01-30 05:56:25 -07006982 sync_event->barriers = 0U;
6983}
John Zulauf64ffe552021-02-06 10:25:07 -07006984
sjfricke0bea06e2022-06-05 09:22:26 +09006985SyncOpBeginRenderPass::SyncOpBeginRenderPass(CMD_TYPE cmd_type, const SyncValidator &sync_state,
John Zulauf64ffe552021-02-06 10:25:07 -07006986 const VkRenderPassBeginInfo *pRenderPassBegin,
sfricke-samsung85584a72021-09-30 21:43:38 -07006987 const VkSubpassBeginInfo *pSubpassBeginInfo)
sjfricke0bea06e2022-06-05 09:22:26 +09006988 : SyncOpBase(cmd_type) {
John Zulauf64ffe552021-02-06 10:25:07 -07006989 if (pRenderPassBegin) {
Jeremy Gebben9f537102021-10-05 16:37:12 -06006990 rp_state_ = sync_state.Get<RENDER_PASS_STATE>(pRenderPassBegin->renderPass);
John Zulauf64ffe552021-02-06 10:25:07 -07006991 renderpass_begin_info_ = safe_VkRenderPassBeginInfo(pRenderPassBegin);
Jeremy Gebbenf4449392022-01-28 10:09:10 -07006992 auto fb_state = sync_state.Get<FRAMEBUFFER_STATE>(pRenderPassBegin->framebuffer);
John Zulauf64ffe552021-02-06 10:25:07 -07006993 if (fb_state) {
Jeremy Gebben9f537102021-10-05 16:37:12 -06006994 shared_attachments_ = sync_state.GetAttachmentViews(*renderpass_begin_info_.ptr(), *fb_state);
John Zulauf64ffe552021-02-06 10:25:07 -07006995 // TODO: Revisit this when all attachment validation is through SyncOps to see if we can discard the plain pointer copy
6996 // Note that this a safe to presist as long as shared_attachments is not cleared
6997 attachments_.reserve(shared_attachments_.size());
sfricke-samsung01c9ae92021-02-09 22:30:52 -08006998 for (const auto &attachment : shared_attachments_) {
John Zulauf64ffe552021-02-06 10:25:07 -07006999 attachments_.emplace_back(attachment.get());
7000 }
7001 }
7002 if (pSubpassBeginInfo) {
7003 subpass_begin_info_ = safe_VkSubpassBeginInfo(pSubpassBeginInfo);
7004 }
7005 }
7006}
7007
7008bool SyncOpBeginRenderPass::Validate(const CommandBufferAccessContext &cb_context) const {
7009 // Check if any of the layout transitions are hazardous.... but we don't have the renderpass context to work with, so we
7010 bool skip = false;
7011
7012 assert(rp_state_.get());
7013 if (nullptr == rp_state_.get()) return skip;
7014 auto &rp_state = *rp_state_.get();
7015
7016 const uint32_t subpass = 0;
7017
7018 // Construct the state we can use to validate against... (since validation is const and RecordCmdBeginRenderPass
7019 // hasn't happened yet)
7020 const std::vector<AccessContext> empty_context_vector;
7021 AccessContext temp_context(subpass, cb_context.GetQueueFlags(), rp_state.subpass_dependencies, empty_context_vector,
7022 cb_context.GetCurrentAccessContext());
7023
7024 // Validate attachment operations
7025 if (attachments_.size() == 0) return skip;
7026 const auto &render_area = renderpass_begin_info_.renderArea;
John Zulaufd0ec59f2021-03-13 14:25:08 -07007027
7028 // Since the isn't a valid RenderPassAccessContext until Record, needs to create the view/generator list... we could limit this
7029 // by predicating on whether subpass 0 uses the attachment if it is too expensive to create the full list redundantly here.
7030 // More broadly we could look at thread specific state shared between Validate and Record as is done for other heavyweight
7031 // operations (though it's currently a messy approach)
7032 AttachmentViewGenVector view_gens = RenderPassAccessContext::CreateAttachmentViewGen(render_area, attachments_);
sjfricke0bea06e2022-06-05 09:22:26 +09007033 skip |= temp_context.ValidateLayoutTransitions(cb_context, rp_state, render_area, subpass, view_gens, cmd_type_);
John Zulauf64ffe552021-02-06 10:25:07 -07007034
7035 // Validate load operations if there were no layout transition hazards
7036 if (!skip) {
John Zulaufee984022022-04-13 16:39:50 -06007037 temp_context.RecordLayoutTransitions(rp_state, subpass, view_gens, kInvalidTag);
sjfricke0bea06e2022-06-05 09:22:26 +09007038 skip |= temp_context.ValidateLoadOperation(cb_context, rp_state, render_area, subpass, view_gens, cmd_type_);
John Zulauf64ffe552021-02-06 10:25:07 -07007039 }
7040
7041 return skip;
7042}
7043
John Zulauf8eda1562021-04-13 17:06:41 -06007044ResourceUsageTag SyncOpBeginRenderPass::Record(CommandBufferAccessContext *cb_context) const {
John Zulauf64ffe552021-02-06 10:25:07 -07007045 // TODO PHASE2 need to have a consistent way to record to either command buffer or queue contexts
7046 assert(rp_state_.get());
sjfricke0bea06e2022-06-05 09:22:26 +09007047 if (nullptr == rp_state_.get()) return cb_context->NextCommandTag(cmd_type_);
7048 return cb_context->RecordBeginRenderPass(cmd_type_, *rp_state_.get(), renderpass_begin_info_.renderArea, attachments_);
John Zulauf64ffe552021-02-06 10:25:07 -07007049}
7050
John Zulauf8eda1562021-04-13 17:06:41 -06007051bool SyncOpBeginRenderPass::ReplayValidate(ResourceUsageTag recorded_tag, const CommandBufferAccessContext &recorded_context,
John Zulaufbb890452021-12-14 11:30:18 -07007052 ResourceUsageTag base_tag, CommandExecutionContext *exec_context) const {
John Zulauf8eda1562021-04-13 17:06:41 -06007053 return false;
7054}
7055
John Zulauf00119522022-05-23 19:07:42 -06007056void SyncOpBeginRenderPass::ReplayRecord(QueueId queue_id, ResourceUsageTag tag, AccessContext *access_context,
John Zulaufbb890452021-12-14 11:30:18 -07007057 SyncEventsContext *events_context) const {}
John Zulauf8eda1562021-04-13 17:06:41 -06007058
sjfricke0bea06e2022-06-05 09:22:26 +09007059SyncOpNextSubpass::SyncOpNextSubpass(CMD_TYPE cmd_type, const SyncValidator &sync_state,
7060 const VkSubpassBeginInfo *pSubpassBeginInfo, const VkSubpassEndInfo *pSubpassEndInfo)
7061 : SyncOpBase(cmd_type) {
John Zulauf64ffe552021-02-06 10:25:07 -07007062 if (pSubpassBeginInfo) {
7063 subpass_begin_info_.initialize(pSubpassBeginInfo);
7064 }
7065 if (pSubpassEndInfo) {
7066 subpass_end_info_.initialize(pSubpassEndInfo);
7067 }
7068}
7069
7070bool SyncOpNextSubpass::Validate(const CommandBufferAccessContext &cb_context) const {
7071 bool skip = false;
7072 const auto *renderpass_context = cb_context.GetCurrentRenderPassContext();
7073 if (!renderpass_context) return skip;
7074
sjfricke0bea06e2022-06-05 09:22:26 +09007075 skip |= renderpass_context->ValidateNextSubpass(cb_context.GetExecutionContext(), cmd_type_);
John Zulauf64ffe552021-02-06 10:25:07 -07007076 return skip;
7077}
7078
John Zulauf8eda1562021-04-13 17:06:41 -06007079ResourceUsageTag SyncOpNextSubpass::Record(CommandBufferAccessContext *cb_context) const {
sjfricke0bea06e2022-06-05 09:22:26 +09007080 return cb_context->RecordNextSubpass(cmd_type_);
John Zulauf8eda1562021-04-13 17:06:41 -06007081}
7082
7083bool SyncOpNextSubpass::ReplayValidate(ResourceUsageTag recorded_tag, const CommandBufferAccessContext &recorded_context,
John Zulaufbb890452021-12-14 11:30:18 -07007084 ResourceUsageTag base_tag, CommandExecutionContext *exec_context) const {
John Zulauf8eda1562021-04-13 17:06:41 -06007085 return false;
John Zulauf64ffe552021-02-06 10:25:07 -07007086}
7087
sjfricke0bea06e2022-06-05 09:22:26 +09007088SyncOpEndRenderPass::SyncOpEndRenderPass(CMD_TYPE cmd_type, const SyncValidator &sync_state,
7089 const VkSubpassEndInfo *pSubpassEndInfo)
7090 : SyncOpBase(cmd_type) {
John Zulauf64ffe552021-02-06 10:25:07 -07007091 if (pSubpassEndInfo) {
7092 subpass_end_info_.initialize(pSubpassEndInfo);
7093 }
7094}
7095
John Zulauf00119522022-05-23 19:07:42 -06007096void SyncOpNextSubpass::ReplayRecord(QueueId queue_id, ResourceUsageTag tag, AccessContext *access_context,
7097 SyncEventsContext *events_context) const {}
John Zulauf8eda1562021-04-13 17:06:41 -06007098
John Zulauf64ffe552021-02-06 10:25:07 -07007099bool SyncOpEndRenderPass::Validate(const CommandBufferAccessContext &cb_context) const {
7100 bool skip = false;
7101 const auto *renderpass_context = cb_context.GetCurrentRenderPassContext();
7102
7103 if (!renderpass_context) return skip;
sjfricke0bea06e2022-06-05 09:22:26 +09007104 skip |= renderpass_context->ValidateEndRenderPass(cb_context.GetExecutionContext(), cmd_type_);
John Zulauf64ffe552021-02-06 10:25:07 -07007105 return skip;
7106}
7107
John Zulauf8eda1562021-04-13 17:06:41 -06007108ResourceUsageTag SyncOpEndRenderPass::Record(CommandBufferAccessContext *cb_context) const {
sjfricke0bea06e2022-06-05 09:22:26 +09007109 return cb_context->RecordEndRenderPass(cmd_type_);
John Zulauf64ffe552021-02-06 10:25:07 -07007110}
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07007111
John Zulauf8eda1562021-04-13 17:06:41 -06007112bool SyncOpEndRenderPass::ReplayValidate(ResourceUsageTag recorded_tag, const CommandBufferAccessContext &recorded_context,
John Zulaufbb890452021-12-14 11:30:18 -07007113 ResourceUsageTag base_tag, CommandExecutionContext *exec_context) const {
John Zulauf8eda1562021-04-13 17:06:41 -06007114 return false;
7115}
7116
John Zulauf00119522022-05-23 19:07:42 -06007117void SyncOpEndRenderPass::ReplayRecord(QueueId queue_id, ResourceUsageTag tag, AccessContext *access_context,
John Zulaufbb890452021-12-14 11:30:18 -07007118 SyncEventsContext *events_context) const {}
John Zulauf8eda1562021-04-13 17:06:41 -06007119
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07007120void SyncValidator::PreCallRecordCmdWriteBufferMarker2AMD(VkCommandBuffer commandBuffer, VkPipelineStageFlags2KHR pipelineStage,
7121 VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) {
7122 StateTracker::PreCallRecordCmdWriteBufferMarker2AMD(commandBuffer, pipelineStage, dstBuffer, dstOffset, marker);
7123 auto *cb_access_context = GetAccessContext(commandBuffer);
7124 assert(cb_access_context);
7125 const auto tag = cb_access_context->NextCommandTag(CMD_WRITEBUFFERMARKERAMD);
7126 auto *context = cb_access_context->GetCurrentAccessContext();
7127 assert(context);
7128
Jeremy Gebbenf4449392022-01-28 10:09:10 -07007129 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07007130
7131 if (dst_buffer) {
7132 const ResourceAccessRange range = MakeRange(dstOffset, 4);
7133 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
7134 }
7135}
John Zulaufd05c5842021-03-26 11:32:16 -06007136
John Zulaufae842002021-04-15 18:20:55 -06007137bool SyncValidator::PreCallValidateCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBufferCount,
7138 const VkCommandBuffer *pCommandBuffers) const {
7139 bool skip = StateTracker::PreCallValidateCmdExecuteCommands(commandBuffer, commandBufferCount, pCommandBuffers);
John Zulaufae842002021-04-15 18:20:55 -06007140 const auto *cb_context = GetAccessContext(commandBuffer);
7141 assert(cb_context);
John Zulauf4fa68462021-04-26 21:04:22 -06007142
7143 // Heavyweight, but we need a proxy copy of the active command buffer access context
7144 CommandBufferAccessContext proxy_cb_context(*cb_context, CommandBufferAccessContext::AsProxyContext());
John Zulaufae842002021-04-15 18:20:55 -06007145
7146 // Make working copies of the access and events contexts
John Zulaufae842002021-04-15 18:20:55 -06007147 for (uint32_t cb_index = 0; cb_index < commandBufferCount; ++cb_index) {
John Zulauf41a9c7c2021-12-07 15:59:53 -07007148 proxy_cb_context.NextIndexedCommandTag(CMD_EXECUTECOMMANDS, cb_index);
7149
John Zulaufae842002021-04-15 18:20:55 -06007150 const auto *recorded_cb_context = GetAccessContext(pCommandBuffers[cb_index]);
7151 if (!recorded_cb_context) continue;
John Zulauf4fa68462021-04-26 21:04:22 -06007152
7153 const auto *recorded_context = recorded_cb_context->GetCurrentAccessContext();
7154 assert(recorded_context);
sjfricke0bea06e2022-06-05 09:22:26 +09007155 skip |= recorded_cb_context->ValidateFirstUse(&proxy_cb_context, "vkCmdExecuteCommands", cb_index);
John Zulauf4fa68462021-04-26 21:04:22 -06007156
7157 // The barriers have already been applied in ValidatFirstUse
7158 ResourceUsageRange tag_range = proxy_cb_context.ImportRecordedAccessLog(*recorded_cb_context);
John Zulauf1d5f9c12022-05-13 14:51:08 -06007159 proxy_cb_context.ResolveExecutedCommandBuffer(*recorded_context, tag_range.begin);
John Zulaufae842002021-04-15 18:20:55 -06007160 }
7161
John Zulaufae842002021-04-15 18:20:55 -06007162 return skip;
7163}
7164
7165void SyncValidator::PreCallRecordCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBufferCount,
7166 const VkCommandBuffer *pCommandBuffers) {
7167 StateTracker::PreCallRecordCmdExecuteCommands(commandBuffer, commandBufferCount, pCommandBuffers);
John Zulauf4fa68462021-04-26 21:04:22 -06007168 auto *cb_context = GetAccessContext(commandBuffer);
7169 assert(cb_context);
John Zulauf4fa68462021-04-26 21:04:22 -06007170 for (uint32_t cb_index = 0; cb_index < commandBufferCount; ++cb_index) {
John Zulauf41a9c7c2021-12-07 15:59:53 -07007171 cb_context->NextIndexedCommandTag(CMD_EXECUTECOMMANDS, cb_index);
John Zulauf4fa68462021-04-26 21:04:22 -06007172 const auto *recorded_cb_context = GetAccessContext(pCommandBuffers[cb_index]);
7173 if (!recorded_cb_context) continue;
sjfricke0bea06e2022-06-05 09:22:26 +09007174 cb_context->RecordExecutedCommandBuffer(*recorded_cb_context);
John Zulauf4fa68462021-04-26 21:04:22 -06007175 }
John Zulaufae842002021-04-15 18:20:55 -06007176}
7177
John Zulauf1d5f9c12022-05-13 14:51:08 -06007178void SyncValidator::PostCallRecordQueueWaitIdle(VkQueue queue, VkResult result) {
7179 StateTracker::PostCallRecordQueueWaitIdle(queue, result);
7180 if ((result != VK_SUCCESS) || (!enabled[sync_validation_queue_submit]) || (queue == VK_NULL_HANDLE)) return;
7181
7182 const auto queue_state = GetQueueSyncStateShared(queue);
7183 if (!queue_state) return; // Invalid queue
7184 QueueId waited_queue = queue_state->GetQueueId();
7185
7186 // We need to go through every queue batch context and clear all accesses this wait synchronizes
7187 // As usual -- two groups, the "last batch" and the signaled semaphores
7188 // NOTE: Since ApplyTaggedWait crawls through every usage in every ResourceAccessState in the AccessContext of *every*
7189 // QueueBatchContext, track which we've done to avoid duplicate traversals
John Zulaufe0757ba2022-06-10 16:51:45 -06007190 QueueBatchContext::BatchSet queue_batch_contexts = GetQueueBatchSnapshot();
7191 for (auto &batch : queue_batch_contexts) {
7192 batch->ApplyTaggedWait(waited_queue, ResourceUsageRecord::kMaxIndex);
John Zulauf1d5f9c12022-05-13 14:51:08 -06007193 }
7194
John Zulaufe0757ba2022-06-10 16:51:45 -06007195 // TODO: Fences affected by Wait
John Zulauf1d5f9c12022-05-13 14:51:08 -06007196}
7197
7198void SyncValidator::PostCallRecordDeviceWaitIdle(VkDevice device, VkResult result) {
7199 StateTracker::PostCallRecordDeviceWaitIdle(device, result);
John Zulaufe0757ba2022-06-10 16:51:45 -06007200
7201 QueueBatchContext::BatchSet queue_batch_contexts = GetQueueBatchSnapshot();
7202 for (auto &batch : queue_batch_contexts) {
7203 batch->ApplyDeviceWait();
John Zulauf1d5f9c12022-05-13 14:51:08 -06007204 }
7205
John Zulaufe0757ba2022-06-10 16:51:45 -06007206 // TODO: Update Fences affected by Wait
John Zulauf1d5f9c12022-05-13 14:51:08 -06007207}
7208
John Zulauf697c0e12022-04-19 16:31:12 -06007209struct QueueSubmitCmdState {
7210 std::shared_ptr<const QueueSyncState> queue;
7211 std::shared_ptr<QueueBatchContext> last_batch;
John Zulauf697c0e12022-04-19 16:31:12 -06007212 AccessLogger logger;
John Zulaufcb7e1672022-05-04 13:46:08 -06007213 SignaledSemaphores signaled;
John Zulauf00119522022-05-23 19:07:42 -06007214 QueueSubmitCmdState(const AccessLogger &parent_log, const SignaledSemaphores &parent_semaphores)
7215 : logger(&parent_log), signaled(parent_semaphores) {}
John Zulauf697c0e12022-04-19 16:31:12 -06007216};
7217
7218template <>
7219thread_local layer_data::optional<QueueSubmitCmdState> layer_data::TlsGuard<QueueSubmitCmdState>::payload_{};
7220
John Zulaufbbda4572022-04-19 16:20:45 -06007221bool SyncValidator::PreCallValidateQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
7222 VkFence fence) const {
7223 bool skip = false;
John Zulaufcb7e1672022-05-04 13:46:08 -06007224
7225 // Since this early return is above the TlsGuard, the Record phase must also be.
John Zulauf78cb2082022-04-20 16:37:48 -06007226 if (!enabled[sync_validation_queue_submit]) return skip;
7227
John Zulauf00119522022-05-23 19:07:42 -06007228 layer_data::TlsGuard<QueueSubmitCmdState> cmd_state(&skip, global_access_log_, signaled_semaphores_);
John Zulauf697c0e12022-04-19 16:31:12 -06007229 const auto fence_state = Get<FENCE_STATE>(fence);
7230 cmd_state->queue = GetQueueSyncStateShared(queue);
7231 if (!cmd_state->queue) return skip; // Invalid Queue
John Zulaufbbda4572022-04-19 16:20:45 -06007232
John Zulauf697c0e12022-04-19 16:31:12 -06007233 // The submit id is a mutable automic which is not recoverable on a skip == true condition
7234 uint64_t submit_id = cmd_state->queue->ReserveSubmitId();
7235
7236 // verify each submit batch
7237 // Since the last batch from the queue state is const, we need to track the last_batch separately from the
7238 // most recently created batch
7239 std::shared_ptr<const QueueBatchContext> last_batch = cmd_state->queue->LastBatch();
7240 std::shared_ptr<QueueBatchContext> batch;
7241 for (uint32_t batch_idx = 0; batch_idx < submitCount; batch_idx++) {
7242 const VkSubmitInfo &submit = pSubmits[batch_idx];
John Zulaufcb7e1672022-05-04 13:46:08 -06007243 batch = std::make_shared<QueueBatchContext>(*this, *cmd_state->queue);
7244 batch->Setup(last_batch, submit, cmd_state->signaled);
John Zulauf697c0e12022-04-19 16:31:12 -06007245
John Zulaufe0757ba2022-06-10 16:51:45 -06007246 // Skip import and validation of empty batches
7247 if (batch->GetTagRange().size()) {
7248 batch->SetBatchLog(cmd_state->logger, submit_id, batch_idx);
John Zulauf697c0e12022-04-19 16:31:12 -06007249
John Zulaufe0757ba2022-06-10 16:51:45 -06007250 // For each submit in the batch...
7251 for (const auto &cb : *batch) {
7252 if (cb.cb->GetTagLimit() == 0) continue; // Skip empty CB's
7253 skip |= cb.cb->ValidateFirstUse(batch.get(), "vkQueueSubmit", cb.index);
7254
7255 // The barriers have already been applied in ValidatFirstUse
7256 ResourceUsageRange tag_range = batch->ImportRecordedAccessLog(*cb.cb);
7257 batch->ResolveSubmittedCommandBuffer(*cb.cb->GetCurrentAccessContext(), tag_range.begin);
7258 }
John Zulauf697c0e12022-04-19 16:31:12 -06007259 }
7260
John Zulaufe0757ba2022-06-10 16:51:45 -06007261 // Empty batches could have semaphores, though.
John Zulauf697c0e12022-04-19 16:31:12 -06007262 for (auto &sem : layer_data::make_span(submit.pSignalSemaphores, submit.signalSemaphoreCount)) {
7263 // Make a copy of the state, signal the copy and pend it...
John Zulaufcb7e1672022-05-04 13:46:08 -06007264 auto sem_state = Get<SEMAPHORE_STATE>(sem);
John Zulauf697c0e12022-04-19 16:31:12 -06007265 if (!sem_state) continue;
John Zulaufcb7e1672022-05-04 13:46:08 -06007266 auto semaphore_info = lvl_init_struct<VkSemaphoreSubmitInfo>();
7267 semaphore_info.stageMask = VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT;
7268 cmd_state->signaled.SignalSemaphore(sem_state, batch, semaphore_info);
John Zulauf697c0e12022-04-19 16:31:12 -06007269 }
7270 // Unless the previous batch was referenced by a signal, the QueueBatchContext will self destruct, but as
7271 // we ResolvePrevious as we can let any contexts we've fully referenced go.
7272 last_batch = batch;
7273 }
7274 // The most recently created batch will become the queue's "last batch" in the record phase
7275 if (batch) {
7276 cmd_state->last_batch = std::move(batch);
7277 }
7278
7279 // Note that if we skip, guard cleans up for us, but cannot release the reserved tag range
John Zulaufbbda4572022-04-19 16:20:45 -06007280 return skip;
7281}
7282
7283void SyncValidator::PostCallRecordQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence,
7284 VkResult result) {
7285 StateTracker::PostCallRecordQueueSubmit(queue, submitCount, pSubmits, fence, result);
John Zulauf78cb2082022-04-20 16:37:48 -06007286
John Zulaufcb7e1672022-05-04 13:46:08 -06007287 // If this return is above the TlsGuard, then the Validate phase return must also be.
John Zulauf78cb2082022-04-20 16:37:48 -06007288 if (!enabled[sync_validation_queue_submit]) return; // Queue submit validation must be affirmatively enabled
7289
John Zulaufcb7e1672022-05-04 13:46:08 -06007290 // The earliest return (when enabled), must be *after* the TlsGuard, as it is the TlsGuard that cleans up the cmd_state
7291 // static payload
John Zulauf697c0e12022-04-19 16:31:12 -06007292 layer_data::TlsGuard<QueueSubmitCmdState> cmd_state;
John Zulaufcb7e1672022-05-04 13:46:08 -06007293
7294 if (VK_SUCCESS != result) return; // dispatched QueueSubmit failed
John Zulauf78cb2082022-04-20 16:37:48 -06007295 if (!cmd_state->queue) return; // Validation couldn't find a valid queue object
7296
John Zulauf697c0e12022-04-19 16:31:12 -06007297 // Don't need to look up the queue state again, but we need a non-const version
7298 std::shared_ptr<QueueSyncState> queue_state = std::const_pointer_cast<QueueSyncState>(std::move(cmd_state->queue));
John Zulauf697c0e12022-04-19 16:31:12 -06007299
John Zulaufcb7e1672022-05-04 13:46:08 -06007300 // The global the semaphores we applied to the cmd_state QueueBatchContexts
7301 // NOTE: All conserved QueueBatchContext's need to have there access logs reset to use the global logger and the only conserved
7302 // QBC's are those referenced by unwaited signals and the last batch.
7303 for (auto &sig_sem : cmd_state->signaled) {
7304 if (sig_sem.second && sig_sem.second->batch) {
John Zulaufe0757ba2022-06-10 16:51:45 -06007305 auto &sig_batch = sig_sem.second->batch;
7306 sig_batch->ResetAccessLog();
7307 // Batches retained for signalled semaphore don't need to retain event data, unless it's the last batch in the submit
7308 if (sig_batch != cmd_state->last_batch) {
7309 sig_batch->ResetEventsContext();
7310 }
John Zulaufcb7e1672022-05-04 13:46:08 -06007311 }
7312 signaled_semaphores_.Import(sig_sem.first, std::move(sig_sem.second));
John Zulauf697c0e12022-04-19 16:31:12 -06007313 }
John Zulaufcb7e1672022-05-04 13:46:08 -06007314 cmd_state->signaled.Reset();
John Zulauf697c0e12022-04-19 16:31:12 -06007315
John Zulaufcb7e1672022-05-04 13:46:08 -06007316 // Update the queue to point to the last batch from the submit
7317 if (cmd_state->last_batch) {
7318 cmd_state->last_batch->ResetAccessLog();
John Zulaufe0757ba2022-06-10 16:51:45 -06007319
7320 // Clean up the events data in the previous last batch on queue, as only the subsequent batches have valid use for them
7321 // and the QueueBatchContext::Setup calls have be copying them along from batch to batch during submit.
7322 auto last_batch = queue_state->LastBatch();
7323 if (last_batch) {
7324 last_batch->ResetEventsContext();
7325 }
John Zulaufcb7e1672022-05-04 13:46:08 -06007326 queue_state->SetLastBatch(std::move(cmd_state->last_batch));
John Zulauf697c0e12022-04-19 16:31:12 -06007327 }
7328
7329 // Update the global access log from the one built during validation
7330 global_access_log_.MergeMove(std::move(cmd_state->logger));
7331
John Zulauf697c0e12022-04-19 16:31:12 -06007332
7333 // WIP: record information about fences
John Zulaufbbda4572022-04-19 16:20:45 -06007334}
7335
7336bool SyncValidator::PreCallValidateQueueSubmit2KHR(VkQueue queue, uint32_t submitCount, const VkSubmitInfo2KHR *pSubmits,
7337 VkFence fence) const {
John Zulauf78cb2082022-04-20 16:37:48 -06007338 bool skip = false;
7339 if (!enabled[sync_validation_queue_submit]) return skip;
7340
John Zulauf697c0e12022-04-19 16:31:12 -06007341 // WIP: Add Submit2 support
John Zulauf78cb2082022-04-20 16:37:48 -06007342 return skip;
John Zulaufbbda4572022-04-19 16:20:45 -06007343}
7344
7345void SyncValidator::PostCallRecordQueueSubmit2KHR(VkQueue queue, uint32_t submitCount, const VkSubmitInfo2KHR *pSubmits,
7346 VkFence fence, VkResult result) {
7347 StateTracker::PostCallRecordQueueSubmit2KHR(queue, submitCount, pSubmits, fence, result);
John Zulauf78cb2082022-04-20 16:37:48 -06007348 if (VK_SUCCESS != result) return; // dispatched QueueSubmit2 failed
7349
7350 if (!enabled[sync_validation_queue_submit]) return;
7351
John Zulauf697c0e12022-04-19 16:31:12 -06007352 // WIP: Add Submit2 support
John Zulaufbbda4572022-04-19 16:20:45 -06007353}
7354
John Zulaufd0ec59f2021-03-13 14:25:08 -07007355AttachmentViewGen::AttachmentViewGen(const IMAGE_VIEW_STATE *view, const VkOffset3D &offset, const VkExtent3D &extent)
7356 : view_(view), view_mask_(), gen_store_() {
7357 if (!view_ || !view_->image_state || !SimpleBinding(*view_->image_state)) return;
7358 const IMAGE_STATE &image_state = *view_->image_state.get();
7359 const auto base_address = ResourceBaseAddress(image_state);
7360 const auto *encoder = image_state.fragment_encoder.get();
7361 if (!encoder) return;
Jeremy Gebben11a68a32021-07-29 11:59:22 -06007362 // Get offset and extent for the view, accounting for possible depth slicing
7363 const VkOffset3D zero_offset = view->GetOffset();
7364 const VkExtent3D &image_extent = view->GetExtent();
John Zulaufd0ec59f2021-03-13 14:25:08 -07007365 // Intentional copy
7366 VkImageSubresourceRange subres_range = view_->normalized_subresource_range;
7367 view_mask_ = subres_range.aspectMask;
Aitor Camachoe67f2c72022-06-08 14:41:58 +02007368 gen_store_[Gen::kViewSubresource].emplace(*encoder, subres_range, zero_offset, image_extent, base_address,
7369 view->IsDepthSliced());
7370 gen_store_[Gen::kRenderArea].emplace(*encoder, subres_range, offset, extent, base_address, view->IsDepthSliced());
John Zulaufd0ec59f2021-03-13 14:25:08 -07007371
7372 const auto depth = view_mask_ & VK_IMAGE_ASPECT_DEPTH_BIT;
7373 if (depth && (depth != view_mask_)) {
7374 subres_range.aspectMask = depth;
Aitor Camachoe67f2c72022-06-08 14:41:58 +02007375 gen_store_[Gen::kDepthOnlyRenderArea].emplace(*encoder, subres_range, offset, extent, base_address, view->IsDepthSliced());
John Zulaufd0ec59f2021-03-13 14:25:08 -07007376 }
7377 const auto stencil = view_mask_ & VK_IMAGE_ASPECT_STENCIL_BIT;
7378 if (stencil && (stencil != view_mask_)) {
7379 subres_range.aspectMask = stencil;
Aitor Camachoe67f2c72022-06-08 14:41:58 +02007380 gen_store_[Gen::kStencilOnlyRenderArea].emplace(*encoder, subres_range, offset, extent, base_address,
7381 view->IsDepthSliced());
John Zulaufd0ec59f2021-03-13 14:25:08 -07007382 }
7383}
7384
7385const ImageRangeGen *AttachmentViewGen::GetRangeGen(AttachmentViewGen::Gen gen_type) const {
7386 const ImageRangeGen *got = nullptr;
7387 switch (gen_type) {
7388 case kViewSubresource:
7389 got = &gen_store_[kViewSubresource];
7390 break;
7391 case kRenderArea:
7392 got = &gen_store_[kRenderArea];
7393 break;
7394 case kDepthOnlyRenderArea:
7395 got =
7396 (view_mask_ == VK_IMAGE_ASPECT_DEPTH_BIT) ? &gen_store_[Gen::kRenderArea] : &gen_store_[Gen::kDepthOnlyRenderArea];
7397 break;
7398 case kStencilOnlyRenderArea:
7399 got = (view_mask_ == VK_IMAGE_ASPECT_STENCIL_BIT) ? &gen_store_[Gen::kRenderArea]
7400 : &gen_store_[Gen::kStencilOnlyRenderArea];
7401 break;
7402 default:
7403 assert(got);
7404 }
7405 return got;
7406}
7407
7408AttachmentViewGen::Gen AttachmentViewGen::GetDepthStencilRenderAreaGenType(bool depth_op, bool stencil_op) const {
7409 assert(IsValid());
7410 assert(view_mask_ & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT));
7411 if (depth_op) {
7412 assert(view_mask_ & VK_IMAGE_ASPECT_DEPTH_BIT);
7413 if (stencil_op) {
7414 assert(view_mask_ & VK_IMAGE_ASPECT_STENCIL_BIT);
7415 return kRenderArea;
7416 }
7417 return kDepthOnlyRenderArea;
7418 }
7419 if (stencil_op) {
7420 assert(view_mask_ & VK_IMAGE_ASPECT_STENCIL_BIT);
7421 return kStencilOnlyRenderArea;
7422 }
7423
7424 assert(depth_op || stencil_op);
7425 return kRenderArea;
7426}
7427
7428AccessAddressType AttachmentViewGen::GetAddressType() const { return AccessContext::ImageAddressType(*view_->image_state); }
John Zulauf8eda1562021-04-13 17:06:41 -06007429
John Zulaufe0757ba2022-06-10 16:51:45 -06007430void SyncEventsContext::ApplyBarrier(const SyncExecScope &src, const SyncExecScope &dst, ResourceUsageTag tag) {
John Zulauf8eda1562021-04-13 17:06:41 -06007431 const bool all_commands_bit = 0 != (src.mask_param & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
7432 for (auto &event_pair : map_) {
7433 assert(event_pair.second); // Shouldn't be storing empty
7434 auto &sync_event = *event_pair.second;
7435 // Events don't happen at a stage, so we need to check and store the unexpanded ALL_COMMANDS if set for inter-event-calls
John Zulaufe0757ba2022-06-10 16:51:45 -06007436 // But only if occuring before the tag
7437 if (((sync_event.barriers & src.exec_scope) || all_commands_bit) && (sync_event.last_command_tag <= tag)) {
John Zulauf8eda1562021-04-13 17:06:41 -06007438 sync_event.barriers |= dst.exec_scope;
7439 sync_event.barriers |= dst.mask_param & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
7440 }
7441 }
7442}
John Zulaufbb890452021-12-14 11:30:18 -07007443
John Zulaufe0757ba2022-06-10 16:51:45 -06007444void SyncEventsContext::ApplyTaggedWait(VkQueueFlags queue_flags, ResourceUsageTag tag) {
7445 const SyncExecScope src_scope =
7446 SyncExecScope::MakeSrc(queue_flags, VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_2_HOST_BIT);
7447 const SyncExecScope dst_scope = SyncExecScope::MakeDst(queue_flags, VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT);
7448 ApplyBarrier(src_scope, dst_scope, tag);
7449}
7450
7451SyncEventsContext &SyncEventsContext::DeepCopy(const SyncEventsContext &from) {
7452 // We need a deep copy of the const context to update during validation phase
7453 for (const auto &event : from.map_) {
7454 map_.emplace(event.first, std::make_shared<SyncEventState>(*event.second));
7455 }
7456 return *this;
7457}
7458
John Zulaufbb890452021-12-14 11:30:18 -07007459ReplayTrackbackBarriersAction::ReplayTrackbackBarriersAction(VkQueueFlags queue_flags,
7460 const SubpassDependencyGraphNode &subpass_dep,
7461 const std::vector<ReplayTrackbackBarriersAction> &replay_contexts) {
7462 bool has_barrier_from_external = subpass_dep.barrier_from_external.size() > 0U;
7463 trackback_barriers.reserve(subpass_dep.prev.size() + (has_barrier_from_external ? 1U : 0U));
7464 for (const auto &prev_dep : subpass_dep.prev) {
7465 const auto prev_pass = prev_dep.first->pass;
7466 const auto &prev_barriers = prev_dep.second;
7467 trackback_barriers.emplace_back(&replay_contexts[prev_pass], queue_flags, prev_barriers);
7468 }
7469 if (has_barrier_from_external) {
7470 // Store the barrier from external with the reat, but save pointer for "by subpass" lookups.
7471 trackback_barriers.emplace_back(nullptr, queue_flags, subpass_dep.barrier_from_external);
7472 }
7473}
7474
7475void ReplayTrackbackBarriersAction::operator()(ResourceAccessState *access) const {
7476 if (trackback_barriers.size() == 1) {
7477 trackback_barriers[0](access);
7478 } else {
7479 ResourceAccessState resolved;
7480 for (const auto &trackback : trackback_barriers) {
7481 ResourceAccessState access_copy = *access;
7482 trackback(&access_copy);
7483 resolved.Resolve(access_copy);
7484 }
7485 *access = resolved;
7486 }
7487}
7488
7489ReplayTrackbackBarriersAction::TrackbackBarriers::TrackbackBarriers(
7490 const ReplayTrackbackBarriersAction *source_subpass_, VkQueueFlags queue_flags_,
7491 const std::vector<const VkSubpassDependency2 *> &subpass_dependencies_)
7492 : Base(source_subpass_, queue_flags_, subpass_dependencies_) {}
7493
7494void ReplayTrackbackBarriersAction::TrackbackBarriers::operator()(ResourceAccessState *access) const {
7495 if (source_subpass) {
7496 (*source_subpass)(access);
7497 }
7498 access->ApplyBarriersImmediate(barriers);
7499}
John Zulauf697c0e12022-04-19 16:31:12 -06007500
John Zulaufcb7e1672022-05-04 13:46:08 -06007501QueueBatchContext::QueueBatchContext(const SyncValidator &sync_state, const QueueSyncState &queue_state)
7502 : CommandExecutionContext(&sync_state), queue_state_(&queue_state), tag_range_(0, 0), batch_log_(nullptr) {}
7503
John Zulauf697c0e12022-04-19 16:31:12 -06007504template <typename BatchInfo>
John Zulaufcb7e1672022-05-04 13:46:08 -06007505void QueueBatchContext::Setup(const std::shared_ptr<const QueueBatchContext> &prev_batch, const BatchInfo &batch_info,
7506 SignaledSemaphores &signaled) {
John Zulauf697c0e12022-04-19 16:31:12 -06007507 SetupCommandBufferInfo(batch_info);
John Zulaufcb7e1672022-05-04 13:46:08 -06007508 SetupAccessContext(prev_batch, batch_info, signaled);
John Zulauf697c0e12022-04-19 16:31:12 -06007509}
John Zulauf1d5f9c12022-05-13 14:51:08 -06007510void QueueBatchContext::ResolveSubmittedCommandBuffer(const AccessContext &recorded_context, ResourceUsageTag offset) {
John Zulaufe0757ba2022-06-10 16:51:45 -06007511 GetCurrentAccessContext()->ResolveFromContext(QueueTagOffsetBarrierAction(GetQueueId(), offset), recorded_context);
John Zulauf1d5f9c12022-05-13 14:51:08 -06007512}
John Zulauf697c0e12022-04-19 16:31:12 -06007513
7514VulkanTypedHandle QueueBatchContext::Handle() const { return queue_state_->Handle(); }
7515
John Zulauf1d5f9c12022-05-13 14:51:08 -06007516void QueueBatchContext::ApplyTaggedWait(QueueId queue_id, ResourceUsageTag tag) {
7517 QueueWaitWorm wait_worm(queue_id);
7518 access_context_.ForAll(wait_worm);
7519 if (wait_worm.erase_all) {
7520 access_context_.Reset();
7521 } else {
7522 // TODO: Profiling will tell us if we need a more efficient clean up.
7523 for (const auto &address : wait_worm.erase_list) {
7524 access_context_.DeleteAccess(address);
7525 }
7526 }
John Zulaufe0757ba2022-06-10 16:51:45 -06007527
7528 if (queue_id == GetQueueId()) {
7529 events_context_.ApplyTaggedWait(GetQueueFlags(), tag);
7530 }
John Zulauf1d5f9c12022-05-13 14:51:08 -06007531}
7532
7533// Clear all accesses
John Zulaufe0757ba2022-06-10 16:51:45 -06007534void QueueBatchContext::ApplyDeviceWait() {
7535 access_context_.Reset();
7536 events_context_.ApplyTaggedWait(GetQueueFlags(), ResourceUsageRecord::kMaxIndex);
7537}
John Zulauf1d5f9c12022-05-13 14:51:08 -06007538
John Zulaufecf4ac52022-06-06 10:08:42 -06007539class ApplySemaphoreBarrierAction {
7540 public:
7541 ApplySemaphoreBarrierAction(const SemaphoreScope &signal, const SemaphoreScope &wait) : signal_(signal), wait_(wait) {}
7542 void operator()(ResourceAccessState *access) const { access->ApplySemaphore(signal_, wait_); }
7543
7544 private:
7545 const SemaphoreScope &signal_;
7546 const SemaphoreScope wait_;
7547};
7548
7549std::shared_ptr<QueueBatchContext> QueueBatchContext::ResolveOneWaitSemaphore(VkSemaphore sem, VkPipelineStageFlags2 wait_mask,
7550 SignaledSemaphores &signaled) {
John Zulaufcb7e1672022-05-04 13:46:08 -06007551 auto sem_state = sync_state_->Get<SEMAPHORE_STATE>(sem);
John Zulaufecf4ac52022-06-06 10:08:42 -06007552 if (!sem_state) return nullptr; // Semaphore validity is handled by CoreChecks
John Zulauf697c0e12022-04-19 16:31:12 -06007553
John Zulaufcb7e1672022-05-04 13:46:08 -06007554 // When signal state goes out of scope, the signal information will be dropped, as Unsignal has released ownership.
7555 auto signal_state = signaled.Unsignal(sem);
John Zulaufecf4ac52022-06-06 10:08:42 -06007556 if (!signal_state) return nullptr; // Invalid signal, skip it.
John Zulaufcb7e1672022-05-04 13:46:08 -06007557
John Zulaufecf4ac52022-06-06 10:08:42 -06007558 assert(signal_state->batch);
John Zulauf697c0e12022-04-19 16:31:12 -06007559
John Zulaufecf4ac52022-06-06 10:08:42 -06007560 const SemaphoreScope &signal_scope = signal_state->first_scope;
John Zulauf697c0e12022-04-19 16:31:12 -06007561 const auto queue_flags = queue_state_->GetQueueFlags();
John Zulaufecf4ac52022-06-06 10:08:42 -06007562 SemaphoreScope wait_scope{GetQueueId(), SyncExecScope::MakeDst(queue_flags, wait_mask)};
7563 if (signal_scope.queue == wait_scope.queue) {
7564 // If signal queue == wait queue, signal is treated as a memory barrier with an access scope equal to the
7565 // valid accesses for the sync scope.
7566 SyncBarrier sem_barrier(signal_scope, wait_scope, SyncBarrier::AllAccess());
7567 const BatchBarrierOp sem_barrier_op(wait_scope.queue, sem_barrier);
7568 access_context_.ResolveFromContext(sem_barrier_op, signal_state->batch->access_context_);
John Zulaufe0757ba2022-06-10 16:51:45 -06007569 events_context_.ApplyBarrier(sem_barrier.src_exec_scope, sem_barrier.dst_exec_scope, ResourceUsageRecord::kMaxIndex);
John Zulaufecf4ac52022-06-06 10:08:42 -06007570 } else {
7571 ApplySemaphoreBarrierAction sem_op(signal_scope, wait_scope);
7572 access_context_.ResolveFromContext(sem_op, signal_state->batch->access_context_);
John Zulauf697c0e12022-04-19 16:31:12 -06007573 }
John Zulaufecf4ac52022-06-06 10:08:42 -06007574 // Cannot move from the signal state because it could be from the const global state, and C++ doesn't
7575 // enforce deep constness.
7576 return signal_state->batch;
John Zulauf697c0e12022-04-19 16:31:12 -06007577}
7578
7579// Accessor Traits to allow Submit and Submit2 constructors to call the same utilities
7580template <>
7581class QueueBatchContext::SubmitInfoAccessor<VkSubmitInfo> {
7582 public:
7583 SubmitInfoAccessor(const VkSubmitInfo &info) : info_(info) {}
7584 inline uint32_t WaitSemaphoreCount() const { return info_.waitSemaphoreCount; }
7585 inline VkSemaphore WaitSemaphore(uint32_t index) { return info_.pWaitSemaphores[index]; }
7586 inline VkPipelineStageFlags2 WaitDstMask(uint32_t index) { return info_.pWaitDstStageMask[index]; }
7587 inline uint32_t CommandBufferCount() const { return info_.commandBufferCount; }
7588 inline VkCommandBuffer CommandBuffer(uint32_t index) { return info_.pCommandBuffers[index]; }
7589
7590 private:
7591 const VkSubmitInfo &info_;
7592};
7593template <typename BatchInfo, typename Fn>
7594void QueueBatchContext::ForEachWaitSemaphore(const BatchInfo &batch_info, Fn &&func) {
7595 using Accessor = QueueBatchContext::SubmitInfoAccessor<BatchInfo>;
7596 Accessor batch(batch_info);
7597 const uint32_t wait_count = batch.WaitSemaphoreCount();
7598 for (uint32_t i = 0; i < wait_count; ++i) {
7599 func(batch.WaitSemaphore(i), batch.WaitDstMask(i));
7600 }
7601}
7602
7603template <typename BatchInfo>
John Zulaufcb7e1672022-05-04 13:46:08 -06007604void QueueBatchContext::SetupAccessContext(const std::shared_ptr<const QueueBatchContext> &prev, const BatchInfo &batch_info,
7605 SignaledSemaphores &signaled) {
John Zulaufe0757ba2022-06-10 16:51:45 -06007606 // Copy in the event state from the previous batch (on this queue)
7607 if (prev) {
7608 events_context_.DeepCopy(prev->events_context_);
7609 }
7610
John Zulaufecf4ac52022-06-06 10:08:42 -06007611 // Import (resolve) the batches that are waited on, with the semaphore's effective barriers applied
7612 layer_data::unordered_set<std::shared_ptr<const QueueBatchContext>> batches_resolved;
7613 ForEachWaitSemaphore(batch_info, [this, &signaled, &batches_resolved](VkSemaphore sem, VkPipelineStageFlags2 wait_mask) {
7614 std::shared_ptr<QueueBatchContext> resolved = ResolveOneWaitSemaphore(sem, wait_mask, signaled);
7615 if (resolved) {
7616 batches_resolved.emplace(std::move(resolved));
7617 }
John Zulauf697c0e12022-04-19 16:31:12 -06007618 });
7619
John Zulaufecf4ac52022-06-06 10:08:42 -06007620 // If there are no semaphores to the previous batch, make sure a "submit order" non-barriered import is done
7621 if (prev && !layer_data::Contains(batches_resolved, prev)) {
7622 access_context_.ResolveFromContext(NoopBarrierAction(), prev->access_context_);
John Zulauf78cb2082022-04-20 16:37:48 -06007623 }
7624
John Zulauf697c0e12022-04-19 16:31:12 -06007625 // Gather async context information for hazard checks and conserve the QBC's for the async batches
John Zulaufecf4ac52022-06-06 10:08:42 -06007626 async_batches_ =
7627 sync_state_->GetQueueLastBatchSnapshot([&batches_resolved, &prev](const std::shared_ptr<const QueueBatchContext> &batch) {
7628 return (batch != prev) && !layer_data::Contains(batches_resolved, batch);
John Zulauf697c0e12022-04-19 16:31:12 -06007629 });
7630 for (const auto &async_batch : async_batches_) {
7631 access_context_.AddAsyncContext(async_batch->GetCurrentAccessContext());
7632 }
7633}
7634
7635template <typename BatchInfo>
7636void QueueBatchContext::SetupCommandBufferInfo(const BatchInfo &batch_info) {
7637 using Accessor = QueueBatchContext::SubmitInfoAccessor<BatchInfo>;
7638 Accessor batch(batch_info);
7639
7640 // Create the list of command buffers to submit
7641 const uint32_t cb_count = batch.CommandBufferCount();
7642 command_buffers_.reserve(cb_count);
7643 for (uint32_t index = 0; index < cb_count; ++index) {
7644 auto cb_context = sync_state_->GetAccessContextShared(batch.CommandBuffer(index));
7645 if (cb_context) {
7646 tag_range_.end += cb_context->GetTagLimit();
7647 command_buffers_.emplace_back(index, std::move(cb_context));
7648 }
7649 }
7650}
7651
7652// Look up the usage informaiton from the local or global logger
7653std::string QueueBatchContext::FormatUsage(ResourceUsageTag tag) const {
7654 const AccessLogger &use_logger = (logger_) ? *logger_ : sync_state_->global_access_log_;
7655 std::stringstream out;
7656 AccessLogger::AccessRecord access = use_logger[tag];
7657 if (access.IsValid()) {
7658 const AccessLogger::BatchRecord &batch = *access.batch;
7659 const ResourceUsageRecord &record = *access.record;
7660 // Queue and Batch information
7661 out << SyncNodeFormatter(*sync_state_, batch.queue->GetQueueState());
7662 out << ", submit: " << batch.submit_index << ", batch: " << batch.batch_index;
7663
7664 // Commandbuffer Usages Information
7665 out << record;
7666 out << SyncNodeFormatter(*sync_state_, record.cb_state);
7667 out << ", reset_no: " << std::to_string(record.reset_count);
7668 }
7669 return out.str();
7670}
7671
7672VkQueueFlags QueueBatchContext::GetQueueFlags() const { return queue_state_->GetQueueFlags(); }
7673
John Zulauf00119522022-05-23 19:07:42 -06007674QueueId QueueBatchContext::GetQueueId() const {
7675 QueueId id = queue_state_ ? queue_state_->GetQueueId() : QueueSyncState::kQueueIdInvalid;
7676 return id;
7677}
7678
John Zulauf697c0e12022-04-19 16:31:12 -06007679void QueueBatchContext::SetBatchLog(AccessLogger &logger, uint64_t submit_id, uint32_t batch_id) {
7680 // Need new global tags for all accesses... the Reserve updates a mutable atomic
7681 ResourceUsageRange global_tags = sync_state_->ReserveGlobalTagRange(GetTagRange().size());
7682 SetTagBias(global_tags.begin);
7683 // Add an access log for the batches range and point the batch at it.
7684 logger_ = &logger;
7685 batch_log_ = logger.AddBatch(queue_state_, submit_id, batch_id, global_tags);
7686}
7687
7688void QueueBatchContext::InsertRecordedAccessLogEntries(const CommandBufferAccessContext &submitted_cb) {
7689 assert(batch_log_); // Don't import command buffer contexts until you've set up the log for the batch context
7690 batch_log_->Append(submitted_cb.GetAccessLog());
7691}
7692
7693void QueueBatchContext::SetTagBias(ResourceUsageTag bias) {
7694 const auto size = tag_range_.size();
7695 tag_range_.begin = bias;
7696 tag_range_.end = bias + size;
7697 access_context_.SetStartTag(bias);
7698}
7699
John Zulauf1d5f9c12022-05-13 14:51:08 -06007700QueueBatchContext::QueueWaitWorm::QueueWaitWorm(QueueId queue, ResourceUsageTag tag) : predicate(queue) {}
7701
7702void QueueBatchContext::QueueWaitWorm::operator()(AccessAddressType address_type, ResourceAccessRangeMap::value_type &access) {
7703 bool erased = access.second.ApplyQueueTagWait(predicate);
7704 if (erased) {
7705 erase_list.emplace_back(address_type, access.first);
7706 } else {
7707 erase_all = false;
7708 }
7709}
7710
John Zulauf697c0e12022-04-19 16:31:12 -06007711AccessLogger::BatchLog *AccessLogger::AddBatch(const QueueSyncState *queue_state, uint64_t submit_id, uint32_t batch_id,
7712 const ResourceUsageRange &range) {
7713 const auto inserted = access_log_map_.insert(std::make_pair(range, BatchLog(BatchRecord(queue_state, submit_id, batch_id))));
7714 assert(inserted.second);
7715 return &inserted.first->second;
7716}
7717
7718void AccessLogger::MergeMove(AccessLogger &&child) {
7719 for (auto &range : child.access_log_map_) {
7720 BatchLog &child_batch = range.second;
7721 auto insert_pair = access_log_map_.insert(std::make_pair(range.first, BatchLog()));
7722 insert_pair.first->second = std::move(child_batch);
7723 assert(insert_pair.second);
7724 }
7725 child.Reset();
7726}
7727
7728void AccessLogger::Reset() {
7729 prev_ = nullptr;
7730 access_log_map_.clear();
7731}
7732
7733// Since we're updating the QueueSync state, this is Record phase and the access log needs to point to the global one
7734// Batch Contexts saved during signalling have their AccessLog reset when the pending signals are signalled.
7735// NOTE: By design, QueueBatchContexts that are neither last, nor referenced by a signal are abandoned as unowned, since
7736// the contexts Resolve all history from previous all contexts when created
7737void QueueSyncState::SetLastBatch(std::shared_ptr<QueueBatchContext> &&last) {
7738 last_batch_ = std::move(last);
7739 last_batch_->ResetAccessLog();
7740}
7741
7742// Note that function is const, but updates mutable submit_index to allow Validate to create correct tagging for command invocation
7743// scope state.
7744// Given that queue submits are supposed to be externally synchronized for the same queue, this should safe without being
7745// atomic... but as the ops are per submit, the performance cost is negible for the peace of mind.
7746uint64_t QueueSyncState::ReserveSubmitId() const { return submit_index_.fetch_add(1); }
7747
7748void AccessLogger::BatchLog::Append(const CommandExecutionContext::AccessLog &other) {
7749 log_.insert(log_.end(), other.cbegin(), other.cend());
7750 for (const auto &record : other) {
7751 assert(record.cb_state);
7752 cbs_referenced_.insert(record.cb_state->shared_from_this());
7753 }
7754}
7755
7756AccessLogger::AccessRecord AccessLogger::BatchLog::operator[](size_t index) const {
7757 assert(index < log_.size());
7758 return AccessRecord{&batch_, &log_[index]};
7759}
7760
7761AccessLogger::AccessRecord AccessLogger::operator[](ResourceUsageTag tag) const {
7762 AccessRecord access_record = {nullptr, nullptr};
7763
7764 auto found_range = access_log_map_.find(tag);
7765 if (found_range != access_log_map_.cend()) {
7766 const ResourceUsageTag bias = found_range->first.begin;
7767 assert(tag >= bias);
7768 access_record = found_range->second[tag - bias];
7769 } else if (prev_) {
7770 access_record = (*prev_)[tag];
7771 }
7772
7773 return access_record;
7774}
John Zulaufcb7e1672022-05-04 13:46:08 -06007775
John Zulaufecf4ac52022-06-06 10:08:42 -06007776// This is a const method, force the returned value to be const
7777std::shared_ptr<const SignaledSemaphores::Signal> SignaledSemaphores::GetPrev(VkSemaphore sem) const {
John Zulaufcb7e1672022-05-04 13:46:08 -06007778 std::shared_ptr<Signal> prev_state;
7779 if (prev_) {
7780 prev_state = GetMapped(prev_->signaled_, sem, [&prev_state]() { return prev_state; });
7781 }
7782 return prev_state;
7783}
John Zulaufecf4ac52022-06-06 10:08:42 -06007784
7785SignaledSemaphores::Signal::Signal(const std::shared_ptr<const SEMAPHORE_STATE> &sem_state_,
7786 const std::shared_ptr<QueueBatchContext> &batch_, const SyncExecScope &exec_scope_)
7787 : sem_state(sem_state_), batch(batch_), first_scope({batch->GetQueueId(), exec_scope_}) {
7788 // Illegal to create a signal from no batch or an invalid semaphore... caller must assure validity
7789 assert(batch);
7790 assert(sem_state);
7791}