blob: 5e8e8d4cad62ce029723139547e4770be752ec57 [file] [log] [blame]
Jeremy Gebben4d51c552022-01-06 21:27:15 -07001/* Copyright (c) 2019-2022 The Khronos Group Inc.
2 * Copyright (c) 2019-2022 Valve Corporation
3 * Copyright (c) 2019-2022 LunarG, Inc.
John Zulauf9cb530d2019-09-30 14:14:10 -06004 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 * Author: John Zulauf <jzulauf@lunarg.com>
John Zulaufab7756b2020-12-29 16:10:16 -070018 * Author: Locke Lin <locke@lunarg.com>
19 * Author: Jeremy Gebben <jeremyg@lunarg.com>
John Zulauf9cb530d2019-09-30 14:14:10 -060020 */
21
22#include <limits>
23#include <vector>
locke-lunarg296a3c92020-03-25 01:04:29 -060024#include <memory>
25#include <bitset>
John Zulauf9cb530d2019-09-30 14:14:10 -060026#include "synchronization_validation.h"
Jeremy Gebben5f585ae2021-02-02 09:03:06 -070027#include "sync_utils.h"
John Zulauf9cb530d2019-09-30 14:14:10 -060028
John Zulaufea943c52022-02-22 11:05:17 -070029// Utilities to DRY up Get... calls
30template <typename Map, typename Key = typename Map::key_type, typename RetVal = layer_data::optional<typename Map::mapped_type>>
31RetVal GetMappedOptional(const Map &map, const Key &key) {
32 RetVal ret_val;
33 auto it = map.find(key);
34 if (it != map.cend()) {
35 ret_val.emplace(it->second);
36 }
37 return ret_val;
38}
39template <typename Map, typename Fn>
40typename Map::mapped_type GetMapped(const Map &map, const typename Map::key_type &key, Fn &&default_factory) {
41 auto value = GetMappedOptional(map, key);
42 return (value) ? *value : default_factory();
43}
44
45template <typename Map, typename Fn>
John Zulauf397e68b2022-04-19 11:44:07 -060046typename Map::mapped_type GetMappedInsert(Map &map, const typename Map::key_type &key, Fn &&emplace_factory) {
John Zulaufea943c52022-02-22 11:05:17 -070047 auto value = GetMappedOptional(map, key);
48 if (value) {
49 return *value;
50 }
John Zulauf397e68b2022-04-19 11:44:07 -060051 auto insert_it = map.emplace(std::make_pair(key, emplace_factory()));
John Zulaufea943c52022-02-22 11:05:17 -070052 assert(insert_it.second);
53
54 return insert_it.first->second;
55}
56
57template <typename Map, typename Key = typename Map::key_type, typename Mapped = typename Map::mapped_type,
58 typename Value = typename Mapped::element_type>
59Value *GetMappedPlainFromShared(const Map &map, const Key &key) {
60 auto value = GetMappedOptional<Map, Key>(map, key);
61 if (value) return value->get();
62 return nullptr;
63}
64
Jeremy Gebben6fbf8242021-06-21 09:14:46 -060065static bool SimpleBinding(const BINDABLE &bindable) { return !bindable.sparse && bindable.Binding(); }
John Zulauf264cce02021-02-05 14:40:47 -070066
John Zulauf29d00532021-03-04 13:28:54 -070067static bool SimpleBinding(const IMAGE_STATE &image_state) {
Jeremy Gebben62c3bf42021-07-21 15:38:24 -060068 bool simple =
Jeremy Gebben82e11d52021-07-26 09:19:37 -060069 SimpleBinding(static_cast<const BINDABLE &>(image_state)) || image_state.IsSwapchainImage() || image_state.bind_swapchain;
John Zulauf29d00532021-03-04 13:28:54 -070070
71 // If it's not simple we must have an encoder.
72 assert(!simple || image_state.fragment_encoder.get());
73 return simple;
74}
75
John Zulauf4fa68462021-04-26 21:04:22 -060076static const ResourceAccessRange kFullRange(std::numeric_limits<VkDeviceSize>::min(), std::numeric_limits<VkDeviceSize>::max());
77static const std::array<AccessAddressType, static_cast<size_t>(AccessAddressType::kTypeCount)> kAddressTypes = {
John Zulauf43cc7462020-12-03 12:33:12 -070078 AccessAddressType::kLinear, AccessAddressType::kIdealized};
79
John Zulaufd5115702021-01-18 12:34:33 -070080static constexpr AccessAddressType GetAccessAddressType(const BUFFER_STATE &) { return AccessAddressType::kLinear; };
John Zulauf264cce02021-02-05 14:40:47 -070081static AccessAddressType GetAccessAddressType(const IMAGE_STATE &image) {
82 return SimpleBinding(image) ? AccessContext::ImageAddressType(image) : AccessAddressType::kIdealized;
83}
John Zulaufd5115702021-01-18 12:34:33 -070084
John Zulauf9cb530d2019-09-30 14:14:10 -060085static const char *string_SyncHazardVUID(SyncHazard hazard) {
86 switch (hazard) {
87 case SyncHazard::NONE:
John Zulauf2f952d22020-02-10 11:34:51 -070088 return "SYNC-HAZARD-NONE";
John Zulauf9cb530d2019-09-30 14:14:10 -060089 break;
90 case SyncHazard::READ_AFTER_WRITE:
John Zulauf451e8c22022-09-01 14:14:00 -060091 return "SYNC-HAZARD-READ-AFTER-WRITE";
John Zulauf9cb530d2019-09-30 14:14:10 -060092 break;
93 case SyncHazard::WRITE_AFTER_READ:
John Zulauf451e8c22022-09-01 14:14:00 -060094 return "SYNC-HAZARD-WRITE-AFTER-READ";
John Zulauf9cb530d2019-09-30 14:14:10 -060095 break;
96 case SyncHazard::WRITE_AFTER_WRITE:
John Zulauf451e8c22022-09-01 14:14:00 -060097 return "SYNC-HAZARD-WRITE-AFTER-WRITE";
John Zulauf9cb530d2019-09-30 14:14:10 -060098 break;
John Zulauf2f952d22020-02-10 11:34:51 -070099 case SyncHazard::READ_RACING_WRITE:
100 return "SYNC-HAZARD-READ-RACING-WRITE";
101 break;
102 case SyncHazard::WRITE_RACING_WRITE:
103 return "SYNC-HAZARD-WRITE-RACING-WRITE";
104 break;
105 case SyncHazard::WRITE_RACING_READ:
106 return "SYNC-HAZARD-WRITE-RACING-READ";
107 break;
John Zulauf9cb530d2019-09-30 14:14:10 -0600108 default:
109 assert(0);
110 }
111 return "SYNC-HAZARD-INVALID";
112}
113
John Zulauf59e25072020-07-17 10:55:21 -0600114static bool IsHazardVsRead(SyncHazard hazard) {
115 switch (hazard) {
116 case SyncHazard::NONE:
117 return false;
118 break;
119 case SyncHazard::READ_AFTER_WRITE:
120 return false;
121 break;
122 case SyncHazard::WRITE_AFTER_READ:
123 return true;
124 break;
125 case SyncHazard::WRITE_AFTER_WRITE:
126 return false;
127 break;
128 case SyncHazard::READ_RACING_WRITE:
129 return false;
130 break;
131 case SyncHazard::WRITE_RACING_WRITE:
132 return false;
133 break;
134 case SyncHazard::WRITE_RACING_READ:
135 return true;
136 break;
137 default:
138 assert(0);
139 }
140 return false;
141}
142
John Zulauf9cb530d2019-09-30 14:14:10 -0600143static const char *string_SyncHazard(SyncHazard hazard) {
144 switch (hazard) {
145 case SyncHazard::NONE:
146 return "NONR";
147 break;
148 case SyncHazard::READ_AFTER_WRITE:
149 return "READ_AFTER_WRITE";
150 break;
151 case SyncHazard::WRITE_AFTER_READ:
152 return "WRITE_AFTER_READ";
153 break;
154 case SyncHazard::WRITE_AFTER_WRITE:
155 return "WRITE_AFTER_WRITE";
156 break;
John Zulauf2f952d22020-02-10 11:34:51 -0700157 case SyncHazard::READ_RACING_WRITE:
158 return "READ_RACING_WRITE";
159 break;
160 case SyncHazard::WRITE_RACING_WRITE:
161 return "WRITE_RACING_WRITE";
162 break;
163 case SyncHazard::WRITE_RACING_READ:
164 return "WRITE_RACING_READ";
165 break;
John Zulauf9cb530d2019-09-30 14:14:10 -0600166 default:
167 assert(0);
168 }
169 return "INVALID HAZARD";
170}
171
John Zulauf37ceaed2020-07-03 16:18:15 -0600172static const SyncStageAccessInfoType *SyncStageAccessInfoFromMask(SyncStageAccessFlags flags) {
173 // Return the info for the first bit found
174 const SyncStageAccessInfoType *info = nullptr;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700175 for (size_t i = 0; i < flags.size(); i++) {
176 if (flags.test(i)) {
177 info = &syncStageAccessInfoByStageAccessIndex[i];
178 break;
John Zulauf37ceaed2020-07-03 16:18:15 -0600179 }
180 }
181 return info;
182}
183
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700184static std::string string_SyncStageAccessFlags(const SyncStageAccessFlags &flags, const char *sep = "|") {
John Zulauf59e25072020-07-17 10:55:21 -0600185 std::string out_str;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700186 if (flags.none()) {
John Zulauf389c34b2020-07-28 11:19:35 -0600187 out_str = "0";
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700188 } else {
189 for (size_t i = 0; i < syncStageAccessInfoByStageAccessIndex.size(); i++) {
190 const auto &info = syncStageAccessInfoByStageAccessIndex[i];
191 if ((flags & info.stage_access_bit).any()) {
192 if (!out_str.empty()) {
193 out_str.append(sep);
194 }
195 out_str.append(info.name);
John Zulauf59e25072020-07-17 10:55:21 -0600196 }
John Zulauf59e25072020-07-17 10:55:21 -0600197 }
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700198 if (out_str.length() == 0) {
199 out_str.append("Unhandled SyncStageAccess");
200 }
John Zulauf59e25072020-07-17 10:55:21 -0600201 }
202 return out_str;
203}
204
John Zulauf397e68b2022-04-19 11:44:07 -0600205std::ostream &operator<<(std::ostream &out, const ResourceUsageRecord &record) {
206 out << "command: " << CommandTypeString(record.command);
207 out << ", seq_no: " << record.seq_num;
208 if (record.sub_command != 0) {
209 out << ", subcmd: " << record.sub_command;
Jeremy Gebben6ea9d9e2020-12-11 09:41:01 -0700210 }
John Zulauf397e68b2022-04-19 11:44:07 -0600211 return out;
Jeremy Gebben6ea9d9e2020-12-11 09:41:01 -0700212}
John Zulauf397e68b2022-04-19 11:44:07 -0600213
John Zulauf4fa68462021-04-26 21:04:22 -0600214static std::string string_UsageIndex(SyncStageAccessIndex usage_index) {
215 const char *stage_access_name = "INVALID_STAGE_ACCESS";
216 if (usage_index < static_cast<SyncStageAccessIndex>(syncStageAccessInfoByStageAccessIndex.size())) {
217 stage_access_name = syncStageAccessInfoByStageAccessIndex[usage_index].name;
218 }
219 return std::string(stage_access_name);
220}
221
John Zulauf397e68b2022-04-19 11:44:07 -0600222struct SyncNodeFormatter {
223 const debug_report_data *report_data;
224 const BASE_NODE *node;
225 const char *label;
226
227 SyncNodeFormatter(const SyncValidator &sync_state, const CMD_BUFFER_STATE *cb_state)
228 : report_data(sync_state.report_data), node(cb_state), label("command_buffer") {}
229 SyncNodeFormatter(const SyncValidator &sync_state, const QUEUE_STATE *q_state)
230 : report_data(sync_state.report_data), node(q_state), label("queue") {}
231};
232
233std::ostream &operator<<(std::ostream &out, const SyncNodeFormatter &formater) {
234 if (formater.node) {
John Zulauf3298da92022-09-01 13:58:39 -0600235 out << formater.label << ": " << formater.report_data->FormatHandle(formater.node->Handle()).c_str();
John Zulauf397e68b2022-04-19 11:44:07 -0600236 if (formater.node->Destroyed()) {
237 out << " (destroyed)";
238 }
239 } else {
John Zulauf3298da92022-09-01 13:58:39 -0600240 out << formater.label << ": null handle";
John Zulauf397e68b2022-04-19 11:44:07 -0600241 }
242 return out;
243}
244
245std::ostream &operator<<(std::ostream &out, const HazardResult &hazard) {
246 assert(hazard.usage_index < static_cast<SyncStageAccessIndex>(syncStageAccessInfoByStageAccessIndex.size()));
247 const auto &usage_info = syncStageAccessInfoByStageAccessIndex[hazard.usage_index];
248 const auto *info = SyncStageAccessInfoFromMask(hazard.prior_access);
249 const char *stage_access_name = info ? info->name : "INVALID_STAGE_ACCESS";
250 out << "(";
251 if (!hazard.recorded_access.get()) {
252 // if we have a recorded usage the usage is reported from the recorded contexts point of view
253 out << "usage: " << usage_info.name << ", ";
254 }
255 out << "prior_usage: " << stage_access_name;
256 if (IsHazardVsRead(hazard.hazard)) {
257 const auto barriers = hazard.access_state->GetReadBarriers(hazard.prior_access);
258 out << ", read_barriers: " << string_VkPipelineStageFlags2KHR(barriers);
259 } else {
260 SyncStageAccessFlags write_barrier = hazard.access_state->GetWriteBarriers();
261 out << ", write_barriers: " << string_SyncStageAccessFlags(write_barrier);
262 }
263 return out;
264}
265
John Zulauf4fa68462021-04-26 21:04:22 -0600266struct NoopBarrierAction {
267 explicit NoopBarrierAction() {}
268 void operator()(ResourceAccessState *access) const {}
John Zulauf5c628d02021-05-04 15:46:36 -0600269 const bool layout_transition = false;
John Zulauf4fa68462021-04-26 21:04:22 -0600270};
271
John Zulaufdab327f2022-07-08 12:02:05 -0600272static void InitSubpassContexts(VkQueueFlags queue_flags, const RENDER_PASS_STATE &rp_state, const AccessContext *external_context,
273 std::vector<AccessContext> &subpass_contexts) {
274 const auto &create_info = rp_state.createInfo;
275 // Add this for all subpasses here so that they exsist during next subpass validation
276 subpass_contexts.clear();
277 subpass_contexts.reserve(create_info.subpassCount);
278 for (uint32_t pass = 0; pass < create_info.subpassCount; pass++) {
279 subpass_contexts.emplace_back(pass, queue_flags, rp_state.subpass_dependencies, subpass_contexts, external_context);
280 }
281}
282
John Zulauf4fa68462021-04-26 21:04:22 -0600283// NOTE: Make sure the proxy doesn't outlive from, as the proxy is pointing directly to access contexts owned by from.
284CommandBufferAccessContext::CommandBufferAccessContext(const CommandBufferAccessContext &from, AsProxyContext dummy)
285 : CommandBufferAccessContext(from.sync_state_) {
286 // Copy only the needed fields out of from for a temporary, proxy command buffer context
287 cb_state_ = from.cb_state_;
288 queue_flags_ = from.queue_flags_;
289 destroyed_ = from.destroyed_;
John Zulauf8a7b03d2022-09-20 11:41:19 -0600290 access_log_ = std::make_shared<AccessLog>(*from.access_log_); // potentially large, but no choice given tagging lookup.
John Zulauf4fa68462021-04-26 21:04:22 -0600291 command_number_ = from.command_number_;
292 subcommand_number_ = from.subcommand_number_;
293 reset_count_ = from.reset_count_;
294
295 const auto *from_context = from.GetCurrentAccessContext();
296 assert(from_context);
297
298 // Construct a fully resolved single access context out of from
299 const NoopBarrierAction noop_barrier;
300 for (AccessAddressType address_type : kAddressTypes) {
301 from_context->ResolveAccessRange(address_type, kFullRange, noop_barrier,
302 &cb_access_context_.GetAccessStateMap(address_type), nullptr);
303 }
304 // The proxy has flatten the current render pass context (if any), but the async contexts are needed for hazard detection
305 cb_access_context_.ImportAsyncContexts(*from_context);
306
307 events_context_ = from.events_context_;
308
309 // We don't want to copy the full render_pass_context_ history just for the proxy.
310}
311
312std::string CommandBufferAccessContext::FormatUsage(const ResourceUsageTag tag) const {
John Zulauf8a7b03d2022-09-20 11:41:19 -0600313 if (tag >= access_log_->size()) return std::string();
John Zulauf397e68b2022-04-19 11:44:07 -0600314
John Zulauf4fa68462021-04-26 21:04:22 -0600315 std::stringstream out;
John Zulauf8a7b03d2022-09-20 11:41:19 -0600316 assert(tag < access_log_->size());
317 const auto &record = (*access_log_)[tag];
John Zulauf397e68b2022-04-19 11:44:07 -0600318 out << record;
319 if (cb_state_.get() != record.cb_state) {
John Zulauf3298da92022-09-01 13:58:39 -0600320 out << ", " << SyncNodeFormatter(*sync_state_, record.cb_state);
John Zulauf4fa68462021-04-26 21:04:22 -0600321 }
John Zulaufd142c9a2022-04-12 14:22:44 -0600322 out << ", reset_no: " << std::to_string(record.reset_count);
John Zulauf4fa68462021-04-26 21:04:22 -0600323 return out.str();
324}
John Zulauf397e68b2022-04-19 11:44:07 -0600325
John Zulauf4fa68462021-04-26 21:04:22 -0600326std::string CommandBufferAccessContext::FormatUsage(const ResourceFirstAccess &access) const {
327 std::stringstream out;
328 out << "(recorded_usage: " << string_UsageIndex(access.usage_index);
329 out << ", " << FormatUsage(access.tag) << ")";
330 return out.str();
331}
Jeremy Gebben6ea9d9e2020-12-11 09:41:01 -0700332
John Zulauf397e68b2022-04-19 11:44:07 -0600333std::string CommandExecutionContext::FormatHazard(const HazardResult &hazard) const {
John Zulauf1dae9192020-06-16 15:46:44 -0600334 std::stringstream out;
John Zulauf397e68b2022-04-19 11:44:07 -0600335 out << hazard;
336 out << ", " << FormatUsage(hazard.tag) << ")";
John Zulauf1dae9192020-06-16 15:46:44 -0600337 return out.str();
338}
339
John Zulaufdab327f2022-07-08 12:02:05 -0600340
John Zulauf0223f142022-07-06 09:05:39 -0600341bool CommandExecutionContext::ValidForSyncOps() const {
342 bool valid = GetCurrentEventsContext() && GetCurrentAccessContext();
343 assert(valid);
344 return valid;
345}
346
John Zulaufd14743a2020-07-03 09:42:39 -0600347// NOTE: the attachement read flag is put *only* in the access scope and not in the exect scope, since the ordering
348// rules apply only to this specific access for this stage, and not the stage as a whole. The ordering detection
349// also reflects this special case for read hazard detection (using access instead of exec scope)
Jeremy Gebben40a22942020-12-22 14:22:06 -0700350static constexpr VkPipelineStageFlags2KHR kColorAttachmentExecScope = VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT_KHR;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700351static const SyncStageAccessFlags kColorAttachmentAccessScope =
352 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ_BIT |
353 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT |
354 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE_BIT |
355 SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT; // Note: this is intentionally not in the exec scope
Jeremy Gebben40a22942020-12-22 14:22:06 -0700356static constexpr VkPipelineStageFlags2KHR kDepthStencilAttachmentExecScope =
357 VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT_KHR | VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT_KHR;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700358static const SyncStageAccessFlags kDepthStencilAttachmentAccessScope =
359 SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
360 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
361 SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT; // Note: this is intentionally not in the exec scope
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -0700362static constexpr VkPipelineStageFlags2KHR kRasterAttachmentExecScope = kDepthStencilAttachmentExecScope | kColorAttachmentExecScope;
John Zulauf8e3c3e92021-01-06 11:19:36 -0700363static const SyncStageAccessFlags kRasterAttachmentAccessScope = kDepthStencilAttachmentAccessScope | kColorAttachmentAccessScope;
John Zulaufb027cdb2020-05-21 14:25:22 -0600364
John Zulauf8e3c3e92021-01-06 11:19:36 -0700365ResourceAccessState::OrderingBarriers ResourceAccessState::kOrderingRules = {
Jeremy Gebben40a22942020-12-22 14:22:06 -0700366 {{VK_PIPELINE_STAGE_2_NONE_KHR, SyncStageAccessFlags()},
John Zulauf8e3c3e92021-01-06 11:19:36 -0700367 {kColorAttachmentExecScope, kColorAttachmentAccessScope},
368 {kDepthStencilAttachmentExecScope, kDepthStencilAttachmentAccessScope},
369 {kRasterAttachmentExecScope, kRasterAttachmentAccessScope}}};
370
John Zulaufee984022022-04-13 16:39:50 -0600371// Sometimes we have an internal access conflict, and we using the kInvalidTag to set and detect in temporary/proxy contexts
372static const ResourceUsageTag kInvalidTag(ResourceUsageRecord::kMaxIndex);
John Zulaufb027cdb2020-05-21 14:25:22 -0600373
Jeremy Gebben62c3bf42021-07-21 15:38:24 -0600374static VkDeviceSize ResourceBaseAddress(const BINDABLE &bindable) { return bindable.GetFakeBaseAddress(); }
John Zulaufb02c1eb2020-10-06 16:33:36 -0600375
John Zulaufcb7e1672022-05-04 13:46:08 -0600376VkDeviceSize GetRealWholeSize(VkDeviceSize offset, VkDeviceSize size, VkDeviceSize whole_size) {
locke-lunarg3c038002020-04-30 23:08:08 -0600377 if (size == VK_WHOLE_SIZE) {
378 return (whole_size - offset);
379 }
380 return size;
381}
382
John Zulauf3e86bf02020-09-12 10:47:57 -0600383static inline VkDeviceSize GetBufferWholeSize(const BUFFER_STATE &buf_state, VkDeviceSize offset, VkDeviceSize size) {
384 return GetRealWholeSize(offset, size, buf_state.createInfo.size);
385}
386
John Zulauf16adfc92020-04-08 10:28:33 -0600387template <typename T>
John Zulauf355e49b2020-04-24 15:11:15 -0600388static ResourceAccessRange MakeRange(const T &has_offset_and_size) {
John Zulauf16adfc92020-04-08 10:28:33 -0600389 return ResourceAccessRange(has_offset_and_size.offset, (has_offset_and_size.offset + has_offset_and_size.size));
390}
391
John Zulauf355e49b2020-04-24 15:11:15 -0600392static ResourceAccessRange MakeRange(VkDeviceSize start, VkDeviceSize size) { return ResourceAccessRange(start, (start + size)); }
John Zulauf16adfc92020-04-08 10:28:33 -0600393
John Zulauf3e86bf02020-09-12 10:47:57 -0600394static inline ResourceAccessRange MakeRange(const BUFFER_STATE &buffer, VkDeviceSize offset, VkDeviceSize size) {
395 return MakeRange(offset, GetBufferWholeSize(buffer, offset, size));
396}
397
398static inline ResourceAccessRange MakeRange(const BUFFER_VIEW_STATE &buf_view_state) {
399 return MakeRange(*buf_view_state.buffer_state.get(), buf_view_state.create_info.offset, buf_view_state.create_info.range);
400}
401
John Zulauf4a6105a2020-11-17 15:11:05 -0700402// Range generators for to allow event scope filtration to be limited to the top of the resource access traversal pipeline
403//
John Zulauf10f1f522020-12-18 12:00:35 -0700404// Note: there is no "begin/end" or reset facility. These are each written as "one time through" generators.
405//
John Zulauf4a6105a2020-11-17 15:11:05 -0700406// Usage:
407// Constructor() -- initializes the generator to point to the begin of the space declared.
408// * -- the current range of the generator empty signfies end
409// ++ -- advance to the next non-empty range (or end)
410
411// A wrapper for a single range with the same semantics as the actual generators below
412template <typename KeyType>
413class SingleRangeGenerator {
414 public:
415 SingleRangeGenerator(const KeyType &range) : current_(range) {}
John Zulaufd5115702021-01-18 12:34:33 -0700416 const KeyType &operator*() const { return current_; }
417 const KeyType *operator->() const { return &current_; }
John Zulauf4a6105a2020-11-17 15:11:05 -0700418 SingleRangeGenerator &operator++() {
419 current_ = KeyType(); // just one real range
420 return *this;
421 }
422
423 bool operator==(const SingleRangeGenerator &other) const { return current_ == other.current_; }
424
425 private:
426 SingleRangeGenerator() = default;
427 const KeyType range_;
428 KeyType current_;
429};
430
John Zulaufae842002021-04-15 18:20:55 -0600431// Generate the ranges that are the intersection of range and the entries in the RangeMap
432template <typename RangeMap, typename KeyType = typename RangeMap::key_type>
433class MapRangesRangeGenerator {
John Zulauf4a6105a2020-11-17 15:11:05 -0700434 public:
John Zulaufd5115702021-01-18 12:34:33 -0700435 // Default constructed is safe to dereference for "empty" test, but for no other operation.
John Zulaufae842002021-04-15 18:20:55 -0600436 MapRangesRangeGenerator() : range_(), map_(nullptr), map_pos_(), current_() {
John Zulaufd5115702021-01-18 12:34:33 -0700437 // Default construction for KeyType *must* be empty range
438 assert(current_.empty());
439 }
John Zulaufae842002021-04-15 18:20:55 -0600440 MapRangesRangeGenerator(const RangeMap &filter, const KeyType &range) : range_(range), map_(&filter), map_pos_(), current_() {
John Zulauf4a6105a2020-11-17 15:11:05 -0700441 SeekBegin();
442 }
John Zulaufae842002021-04-15 18:20:55 -0600443 MapRangesRangeGenerator(const MapRangesRangeGenerator &from) = default;
John Zulaufd5115702021-01-18 12:34:33 -0700444
John Zulauf4a6105a2020-11-17 15:11:05 -0700445 const KeyType &operator*() const { return current_; }
446 const KeyType *operator->() const { return &current_; }
John Zulaufae842002021-04-15 18:20:55 -0600447 MapRangesRangeGenerator &operator++() {
448 ++map_pos_;
John Zulauf4a6105a2020-11-17 15:11:05 -0700449 UpdateCurrent();
450 return *this;
451 }
452
John Zulaufae842002021-04-15 18:20:55 -0600453 bool operator==(const MapRangesRangeGenerator &other) const { return current_ == other.current_; }
John Zulauf4a6105a2020-11-17 15:11:05 -0700454
John Zulaufae842002021-04-15 18:20:55 -0600455 protected:
John Zulauf4a6105a2020-11-17 15:11:05 -0700456 void UpdateCurrent() {
John Zulaufae842002021-04-15 18:20:55 -0600457 if (map_pos_ != map_->cend()) {
458 current_ = range_ & map_pos_->first;
John Zulauf4a6105a2020-11-17 15:11:05 -0700459 } else {
460 current_ = KeyType();
461 }
462 }
463 void SeekBegin() {
John Zulaufae842002021-04-15 18:20:55 -0600464 map_pos_ = map_->lower_bound(range_);
John Zulauf4a6105a2020-11-17 15:11:05 -0700465 UpdateCurrent();
466 }
John Zulaufae842002021-04-15 18:20:55 -0600467
468 // Adding this functionality here, to avoid gratuitous Base:: qualifiers in the derived class
469 // Note: Not exposed in this classes public interface to encourage using a consistent ++/empty generator semantic
470 template <typename Pred>
471 MapRangesRangeGenerator &PredicatedIncrement(Pred &pred) {
472 do {
473 ++map_pos_;
474 } while (map_pos_ != map_->cend() && map_pos_->first.intersects(range_) && !pred(map_pos_));
475 UpdateCurrent();
476 return *this;
477 }
478
John Zulauf4a6105a2020-11-17 15:11:05 -0700479 const KeyType range_;
John Zulaufae842002021-04-15 18:20:55 -0600480 const RangeMap *map_;
481 typename RangeMap::const_iterator map_pos_;
John Zulauf4a6105a2020-11-17 15:11:05 -0700482 KeyType current_;
483};
John Zulaufd5115702021-01-18 12:34:33 -0700484using SingleAccessRangeGenerator = SingleRangeGenerator<ResourceAccessRange>;
John Zulaufae842002021-04-15 18:20:55 -0600485using EventSimpleRangeGenerator = MapRangesRangeGenerator<SyncEventState::ScopeMap>;
John Zulauf4a6105a2020-11-17 15:11:05 -0700486
John Zulaufae842002021-04-15 18:20:55 -0600487// Generate the ranges for entries meeting the predicate that are the intersection of range and the entries in the RangeMap
488template <typename RangeMap, typename Predicate, typename KeyType = typename RangeMap::key_type>
489class PredicatedMapRangesRangeGenerator : public MapRangesRangeGenerator<RangeMap, KeyType> {
490 public:
491 using Base = MapRangesRangeGenerator<RangeMap, KeyType>;
492 // Default constructed is safe to dereference for "empty" test, but for no other operation.
493 PredicatedMapRangesRangeGenerator() : Base(), pred_() {}
494 PredicatedMapRangesRangeGenerator(const RangeMap &filter, const KeyType &range, Predicate pred)
495 : Base(filter, range), pred_(pred) {}
496 PredicatedMapRangesRangeGenerator(const PredicatedMapRangesRangeGenerator &from) = default;
497
498 PredicatedMapRangesRangeGenerator &operator++() {
499 Base::PredicatedIncrement(pred_);
500 return *this;
501 }
502
503 protected:
504 Predicate pred_;
505};
John Zulauf4a6105a2020-11-17 15:11:05 -0700506
507// Generate the ranges that are the intersection of the RangeGen ranges and the entries in the FilterMap
John Zulaufae842002021-04-15 18:20:55 -0600508// Templated to allow for different Range generators or map sources...
509template <typename RangeMap, typename RangeGen, typename KeyType = typename RangeMap::key_type>
John Zulauf4a6105a2020-11-17 15:11:05 -0700510class FilteredGeneratorGenerator {
511 public:
John Zulaufd5115702021-01-18 12:34:33 -0700512 // Default constructed is safe to dereference for "empty" test, but for no other operation.
513 FilteredGeneratorGenerator() : filter_(nullptr), gen_(), filter_pos_(), current_() {
514 // Default construction for KeyType *must* be empty range
515 assert(current_.empty());
516 }
John Zulaufae842002021-04-15 18:20:55 -0600517 FilteredGeneratorGenerator(const RangeMap &filter, RangeGen &gen) : filter_(&filter), gen_(gen), filter_pos_(), current_() {
John Zulauf4a6105a2020-11-17 15:11:05 -0700518 SeekBegin();
519 }
John Zulaufd5115702021-01-18 12:34:33 -0700520 FilteredGeneratorGenerator(const FilteredGeneratorGenerator &from) = default;
John Zulauf4a6105a2020-11-17 15:11:05 -0700521 const KeyType &operator*() const { return current_; }
522 const KeyType *operator->() const { return &current_; }
523 FilteredGeneratorGenerator &operator++() {
524 KeyType gen_range = GenRange();
525 KeyType filter_range = FilterRange();
526 current_ = KeyType();
527 while (gen_range.non_empty() && filter_range.non_empty() && current_.empty()) {
528 if (gen_range.end > filter_range.end) {
529 // if the generated range is beyond the filter_range, advance the filter range
530 filter_range = AdvanceFilter();
531 } else {
532 gen_range = AdvanceGen();
533 }
534 current_ = gen_range & filter_range;
535 }
536 return *this;
537 }
538
539 bool operator==(const FilteredGeneratorGenerator &other) const { return current_ == other.current_; }
540
541 private:
542 KeyType AdvanceFilter() {
543 ++filter_pos_;
544 auto filter_range = FilterRange();
545 if (filter_range.valid()) {
546 FastForwardGen(filter_range);
547 }
548 return filter_range;
549 }
550 KeyType AdvanceGen() {
John Zulaufd5115702021-01-18 12:34:33 -0700551 ++gen_;
John Zulauf4a6105a2020-11-17 15:11:05 -0700552 auto gen_range = GenRange();
553 if (gen_range.valid()) {
554 FastForwardFilter(gen_range);
555 }
556 return gen_range;
557 }
558
559 KeyType FilterRange() const { return (filter_pos_ != filter_->cend()) ? filter_pos_->first : KeyType(); }
John Zulaufd5115702021-01-18 12:34:33 -0700560 KeyType GenRange() const { return *gen_; }
John Zulauf4a6105a2020-11-17 15:11:05 -0700561
562 KeyType FastForwardFilter(const KeyType &range) {
563 auto filter_range = FilterRange();
564 int retry_count = 0;
John Zulauf10f1f522020-12-18 12:00:35 -0700565 const static int kRetryLimit = 2; // TODO -- determine whether this limit is optimal
John Zulauf4a6105a2020-11-17 15:11:05 -0700566 while (!filter_range.empty() && (filter_range.end <= range.begin)) {
567 if (retry_count < kRetryLimit) {
568 ++filter_pos_;
569 filter_range = FilterRange();
570 retry_count++;
571 } else {
572 // Okay we've tried walking, do a seek.
573 filter_pos_ = filter_->lower_bound(range);
574 break;
575 }
576 }
577 return FilterRange();
578 }
579
580 // TODO: Consider adding "seek" (or an absolute bound "get" to range generators to make this walk
581 // faster.
582 KeyType FastForwardGen(const KeyType &range) {
583 auto gen_range = GenRange();
584 while (!gen_range.empty() && (gen_range.end <= range.begin)) {
John Zulaufd5115702021-01-18 12:34:33 -0700585 ++gen_;
John Zulauf4a6105a2020-11-17 15:11:05 -0700586 gen_range = GenRange();
587 }
588 return gen_range;
589 }
590
591 void SeekBegin() {
592 auto gen_range = GenRange();
593 if (gen_range.empty()) {
594 current_ = KeyType();
595 filter_pos_ = filter_->cend();
596 } else {
597 filter_pos_ = filter_->lower_bound(gen_range);
598 current_ = gen_range & FilterRange();
599 }
600 }
601
John Zulaufae842002021-04-15 18:20:55 -0600602 const RangeMap *filter_;
John Zulaufd5115702021-01-18 12:34:33 -0700603 RangeGen gen_;
John Zulaufae842002021-04-15 18:20:55 -0600604 typename RangeMap::const_iterator filter_pos_;
John Zulauf4a6105a2020-11-17 15:11:05 -0700605 KeyType current_;
606};
607
608using EventImageRangeGenerator = FilteredGeneratorGenerator<SyncEventState::ScopeMap, subresource_adapter::ImageRangeGenerator>;
609
John Zulauf5c5e88d2019-12-26 11:22:02 -0700610
John Zulauf3e86bf02020-09-12 10:47:57 -0600611ResourceAccessRange GetBufferRange(VkDeviceSize offset, VkDeviceSize buf_whole_size, uint32_t first_index, uint32_t count,
612 VkDeviceSize stride) {
613 VkDeviceSize range_start = offset + first_index * stride;
614 VkDeviceSize range_size = 0;
locke-lunargff255f92020-05-13 18:53:52 -0600615 if (count == UINT32_MAX) {
616 range_size = buf_whole_size - range_start;
617 } else {
618 range_size = count * stride;
619 }
John Zulauf3e86bf02020-09-12 10:47:57 -0600620 return MakeRange(range_start, range_size);
locke-lunargff255f92020-05-13 18:53:52 -0600621}
622
locke-lunarg654e3692020-06-04 17:19:15 -0600623SyncStageAccessIndex GetSyncStageAccessIndexsByDescriptorSet(VkDescriptorType descriptor_type, const interface_var &descriptor_data,
624 VkShaderStageFlagBits stage_flag) {
625 if (descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT) {
626 assert(stage_flag == VK_SHADER_STAGE_FRAGMENT_BIT);
627 return SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ;
628 }
629 auto stage_access = syncStageAccessMaskByShaderStage.find(stage_flag);
630 if (stage_access == syncStageAccessMaskByShaderStage.end()) {
631 assert(0);
632 }
633 if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER || descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
634 return stage_access->second.uniform_read;
635 }
636
637 // If the desriptorSet is writable, we don't need to care SHADER_READ. SHADER_WRITE is enough.
638 // Because if write hazard happens, read hazard might or might not happen.
639 // But if write hazard doesn't happen, read hazard is impossible to happen.
640 if (descriptor_data.is_writable) {
Jeremy Gebben40a22942020-12-22 14:22:06 -0700641 return stage_access->second.storage_write;
locke-lunarg654e3692020-06-04 17:19:15 -0600642 }
Jeremy Gebben40a22942020-12-22 14:22:06 -0700643 // TODO: sampled_read
644 return stage_access->second.storage_read;
locke-lunarg654e3692020-06-04 17:19:15 -0600645}
646
locke-lunarg37047832020-06-12 13:44:45 -0600647bool IsImageLayoutDepthWritable(VkImageLayout image_layout) {
648 return (image_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
649 image_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL ||
650 image_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL)
651 ? true
652 : false;
653}
654
655bool IsImageLayoutStencilWritable(VkImageLayout image_layout) {
656 return (image_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
657 image_layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL ||
658 image_layout == VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL)
659 ? true
660 : false;
661}
662
John Zulauf355e49b2020-04-24 15:11:15 -0600663// Class AccessContext stores the state of accesses specific to a Command, Subpass, or Queue
John Zulaufb02c1eb2020-10-06 16:33:36 -0600664template <typename Action>
665static void ApplyOverImageRange(const IMAGE_STATE &image_state, const VkImageSubresourceRange &subresource_range_arg,
666 Action &action) {
667 // At this point the "apply over range" logic only supports a single memory binding
668 if (!SimpleBinding(image_state)) return;
669 auto subresource_range = NormalizeSubresourceRange(image_state.createInfo, subresource_range_arg);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600670 const auto base_address = ResourceBaseAddress(image_state);
John Zulauf150e5332020-12-03 08:52:52 -0700671 subresource_adapter::ImageRangeGenerator range_gen(*image_state.fragment_encoder.get(), subresource_range, {0, 0, 0},
Aitor Camachoe67f2c72022-06-08 14:41:58 +0200672 image_state.createInfo.extent, base_address, false);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600673 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf150e5332020-12-03 08:52:52 -0700674 action(*range_gen);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600675 }
676}
677
John Zulauf7635de32020-05-29 17:14:15 -0600678// Tranverse the attachment resolves for this a specific subpass, and do action() to them.
679// Used by both validation and record operations
680//
681// The signature for Action() reflect the needs of both uses.
682template <typename Action>
John Zulaufd0ec59f2021-03-13 14:25:08 -0700683void ResolveOperation(Action &action, const RENDER_PASS_STATE &rp_state, const AttachmentViewGenVector &attachment_views,
684 uint32_t subpass) {
John Zulauf7635de32020-05-29 17:14:15 -0600685 const auto &rp_ci = rp_state.createInfo;
686 const auto *attachment_ci = rp_ci.pAttachments;
687 const auto &subpass_ci = rp_ci.pSubpasses[subpass];
688
689 // Color resolves -- require an inuse color attachment and a matching inuse resolve attachment
690 const auto *color_attachments = subpass_ci.pColorAttachments;
691 const auto *color_resolve = subpass_ci.pResolveAttachments;
692 if (color_resolve && color_attachments) {
693 for (uint32_t i = 0; i < subpass_ci.colorAttachmentCount; i++) {
694 const auto &color_attach = color_attachments[i].attachment;
695 const auto &resolve_attach = subpass_ci.pResolveAttachments[i].attachment;
696 if ((color_attach != VK_ATTACHMENT_UNUSED) && (resolve_attach != VK_ATTACHMENT_UNUSED)) {
697 action("color", "resolve read", color_attach, resolve_attach, attachment_views[color_attach],
John Zulaufd0ec59f2021-03-13 14:25:08 -0700698 AttachmentViewGen::Gen::kRenderArea, SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ,
699 SyncOrdering::kColorAttachment);
John Zulauf7635de32020-05-29 17:14:15 -0600700 action("color", "resolve write", color_attach, resolve_attach, attachment_views[resolve_attach],
John Zulaufd0ec59f2021-03-13 14:25:08 -0700701 AttachmentViewGen::Gen::kRenderArea, SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE,
702 SyncOrdering::kColorAttachment);
John Zulauf7635de32020-05-29 17:14:15 -0600703 }
704 }
705 }
706
707 // Depth stencil resolve only if the extension is present
Mark Lobodzinski1f887d32020-12-30 15:31:33 -0700708 const auto ds_resolve = LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(subpass_ci.pNext);
John Zulauf7635de32020-05-29 17:14:15 -0600709 if (ds_resolve && ds_resolve->pDepthStencilResolveAttachment &&
710 (ds_resolve->pDepthStencilResolveAttachment->attachment != VK_ATTACHMENT_UNUSED) && subpass_ci.pDepthStencilAttachment &&
711 (subpass_ci.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
712 const auto src_at = subpass_ci.pDepthStencilAttachment->attachment;
713 const auto src_ci = attachment_ci[src_at];
714 // The formats are required to match so we can pick either
715 const bool resolve_depth = (ds_resolve->depthResolveMode != VK_RESOLVE_MODE_NONE) && FormatHasDepth(src_ci.format);
716 const bool resolve_stencil = (ds_resolve->stencilResolveMode != VK_RESOLVE_MODE_NONE) && FormatHasStencil(src_ci.format);
717 const auto dst_at = ds_resolve->pDepthStencilResolveAttachment->attachment;
John Zulauf7635de32020-05-29 17:14:15 -0600718
719 // Figure out which aspects are actually touched during resolve operations
720 const char *aspect_string = nullptr;
John Zulaufd0ec59f2021-03-13 14:25:08 -0700721 AttachmentViewGen::Gen gen_type = AttachmentViewGen::Gen::kRenderArea;
John Zulauf7635de32020-05-29 17:14:15 -0600722 if (resolve_depth && resolve_stencil) {
John Zulauf7635de32020-05-29 17:14:15 -0600723 aspect_string = "depth/stencil";
724 } else if (resolve_depth) {
725 // Validate depth only
John Zulaufd0ec59f2021-03-13 14:25:08 -0700726 gen_type = AttachmentViewGen::Gen::kDepthOnlyRenderArea;
John Zulauf7635de32020-05-29 17:14:15 -0600727 aspect_string = "depth";
728 } else if (resolve_stencil) {
729 // Validate all stencil only
John Zulaufd0ec59f2021-03-13 14:25:08 -0700730 gen_type = AttachmentViewGen::Gen::kStencilOnlyRenderArea;
John Zulauf7635de32020-05-29 17:14:15 -0600731 aspect_string = "stencil";
732 }
733
John Zulaufd0ec59f2021-03-13 14:25:08 -0700734 if (aspect_string) {
735 action(aspect_string, "resolve read", src_at, dst_at, attachment_views[src_at], gen_type,
736 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ, SyncOrdering::kRaster);
737 action(aspect_string, "resolve write", src_at, dst_at, attachment_views[dst_at], gen_type,
738 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kRaster);
John Zulauf7635de32020-05-29 17:14:15 -0600739 }
740 }
741}
742
743// Action for validating resolve operations
744class ValidateResolveAction {
745 public:
John Zulauffaea0ee2021-01-14 14:01:32 -0700746 ValidateResolveAction(VkRenderPass render_pass, uint32_t subpass, const AccessContext &context,
sjfricke0bea06e2022-06-05 09:22:26 +0900747 const CommandExecutionContext &exec_context, CMD_TYPE cmd_type)
John Zulauf7635de32020-05-29 17:14:15 -0600748 : render_pass_(render_pass),
749 subpass_(subpass),
750 context_(context),
John Zulaufbb890452021-12-14 11:30:18 -0700751 exec_context_(exec_context),
sjfricke0bea06e2022-06-05 09:22:26 +0900752 cmd_type_(cmd_type),
John Zulauf7635de32020-05-29 17:14:15 -0600753 skip_(false) {}
754 void operator()(const char *aspect_name, const char *attachment_name, uint32_t src_at, uint32_t dst_at,
John Zulaufd0ec59f2021-03-13 14:25:08 -0700755 const AttachmentViewGen &view_gen, AttachmentViewGen::Gen gen_type, SyncStageAccessIndex current_usage,
756 SyncOrdering ordering_rule) {
John Zulauf7635de32020-05-29 17:14:15 -0600757 HazardResult hazard;
John Zulaufd0ec59f2021-03-13 14:25:08 -0700758 hazard = context_.DetectHazard(view_gen, gen_type, current_usage, ordering_rule);
John Zulauf7635de32020-05-29 17:14:15 -0600759 if (hazard.hazard) {
sjfricke0bea06e2022-06-05 09:22:26 +0900760 skip_ |= exec_context_.GetSyncState().LogError(
761 render_pass_, string_SyncHazardVUID(hazard.hazard),
762 "%s: Hazard %s in subpass %" PRIu32 "during %s %s, from attachment %" PRIu32 " to resolve attachment %" PRIu32
763 ". Access info %s.",
764 CommandTypeString(cmd_type_), string_SyncHazard(hazard.hazard), subpass_, aspect_name, attachment_name, src_at,
765 dst_at, exec_context_.FormatHazard(hazard).c_str());
John Zulauf7635de32020-05-29 17:14:15 -0600766 }
767 }
768 // Providing a mechanism for the constructing caller to get the result of the validation
769 bool GetSkip() const { return skip_; }
770
771 private:
772 VkRenderPass render_pass_;
773 const uint32_t subpass_;
774 const AccessContext &context_;
John Zulaufbb890452021-12-14 11:30:18 -0700775 const CommandExecutionContext &exec_context_;
sjfricke0bea06e2022-06-05 09:22:26 +0900776 CMD_TYPE cmd_type_;
John Zulauf7635de32020-05-29 17:14:15 -0600777 bool skip_;
778};
779
780// Update action for resolve operations
781class UpdateStateResolveAction {
782 public:
John Zulauf14940722021-04-12 15:19:02 -0600783 UpdateStateResolveAction(AccessContext &context, ResourceUsageTag tag) : context_(context), tag_(tag) {}
John Zulaufd0ec59f2021-03-13 14:25:08 -0700784 void operator()(const char *, const char *, uint32_t, uint32_t, const AttachmentViewGen &view_gen,
785 AttachmentViewGen::Gen gen_type, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule) {
John Zulauf7635de32020-05-29 17:14:15 -0600786 // Ignores validation only arguments...
John Zulaufd0ec59f2021-03-13 14:25:08 -0700787 context_.UpdateAccessState(view_gen, gen_type, current_usage, ordering_rule, tag_);
John Zulauf7635de32020-05-29 17:14:15 -0600788 }
789
790 private:
791 AccessContext &context_;
John Zulauf14940722021-04-12 15:19:02 -0600792 const ResourceUsageTag tag_;
John Zulauf7635de32020-05-29 17:14:15 -0600793};
794
John Zulauf59e25072020-07-17 10:55:21 -0600795void HazardResult::Set(const ResourceAccessState *access_state_, SyncStageAccessIndex usage_index_, SyncHazard hazard_,
John Zulauf14940722021-04-12 15:19:02 -0600796 const SyncStageAccessFlags &prior_, const ResourceUsageTag tag_) {
John Zulauf4fa68462021-04-26 21:04:22 -0600797 access_state = layer_data::make_unique<const ResourceAccessState>(*access_state_);
John Zulauf59e25072020-07-17 10:55:21 -0600798 usage_index = usage_index_;
799 hazard = hazard_;
800 prior_access = prior_;
801 tag = tag_;
802}
803
John Zulauf4fa68462021-04-26 21:04:22 -0600804void HazardResult::AddRecordedAccess(const ResourceFirstAccess &first_access) {
805 recorded_access = layer_data::make_unique<const ResourceFirstAccess>(first_access);
806}
807
John Zulauf1d5f9c12022-05-13 14:51:08 -0600808void AccessContext::DeleteAccess(const AddressRange &address) { GetAccessStateMap(address.type).erase_range(address.range); }
809
John Zulauf540266b2020-04-06 18:54:53 -0600810AccessContext::AccessContext(uint32_t subpass, VkQueueFlags queue_flags,
811 const std::vector<SubpassDependencyGraphNode> &dependencies,
John Zulauf1a224292020-06-30 14:52:13 -0600812 const std::vector<AccessContext> &contexts, const AccessContext *external_context) {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600813 Reset();
814 const auto &subpass_dep = dependencies[subpass];
John Zulauf22aefed2021-03-11 18:14:35 -0700815 bool has_barrier_from_external = subpass_dep.barrier_from_external.size() > 0U;
816 prev_.reserve(subpass_dep.prev.size() + (has_barrier_from_external ? 1U : 0U));
John Zulauf355e49b2020-04-24 15:11:15 -0600817 prev_by_subpass_.resize(subpass, nullptr); // Can't be more prevs than the subpass we're on
John Zulauf3d84f1b2020-03-09 13:33:25 -0600818 for (const auto &prev_dep : subpass_dep.prev) {
John Zulaufbaea94f2020-09-15 17:55:16 -0600819 const auto prev_pass = prev_dep.first->pass;
820 const auto &prev_barriers = prev_dep.second;
821 assert(prev_dep.second.size());
822 prev_.emplace_back(&contexts[prev_pass], queue_flags, prev_barriers);
823 prev_by_subpass_[prev_pass] = &prev_.back();
John Zulauf5c5e88d2019-12-26 11:22:02 -0700824 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600825
826 async_.reserve(subpass_dep.async.size());
827 for (const auto async_subpass : subpass_dep.async) {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -0700828 async_.emplace_back(&contexts[async_subpass]);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600829 }
John Zulauf22aefed2021-03-11 18:14:35 -0700830 if (has_barrier_from_external) {
831 // Store the barrier from external with the reat, but save pointer for "by subpass" lookups.
832 prev_.emplace_back(external_context, queue_flags, subpass_dep.barrier_from_external);
833 src_external_ = &prev_.back();
John Zulaufe5da6e52020-03-18 15:32:18 -0600834 }
John Zulaufbaea94f2020-09-15 17:55:16 -0600835 if (subpass_dep.barrier_to_external.size()) {
836 dst_external_ = TrackBack(this, queue_flags, subpass_dep.barrier_to_external);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600837 }
John Zulauf5c5e88d2019-12-26 11:22:02 -0700838}
839
John Zulauf8a7b03d2022-09-20 11:41:19 -0600840void AccessContext::Trim() {
841 auto normalize = [](AccessAddressType address_type, ResourceAccessRangeMap::value_type &access) { access.second.Normalize(); };
842 ForAll(normalize);
843
John Zulauf90040ec2022-09-21 14:46:54 -0600844 // Consolidate map after normalization, combines directly adjacent ranges with common values.
John Zulauf8a7b03d2022-09-20 11:41:19 -0600845 for (auto& map : access_state_maps_) {
John Zulauf90040ec2022-09-21 14:46:54 -0600846 sparse_container::consolidate(map);
John Zulauf8a7b03d2022-09-20 11:41:19 -0600847 }
John Zulauf8a7b03d2022-09-20 11:41:19 -0600848}
849
850void AccessContext::AddReferencedTags(ResourceUsageTagSet &used) const {
851 auto gather = [&used](AccessAddressType address_type, const ResourceAccessRangeMap::value_type &access) {
852 access.second.GatherReferencedTags(used);
853 };
854 ConstForAll(gather);
855}
856
John Zulauf5f13a792020-03-10 07:31:21 -0600857template <typename Detector>
John Zulaufe0757ba2022-06-10 16:51:45 -0600858HazardResult AccessContext::DetectPreviousHazard(AccessAddressType type, Detector &detector,
John Zulauf540266b2020-04-06 18:54:53 -0600859 const ResourceAccessRange &range) const {
John Zulauf5f13a792020-03-10 07:31:21 -0600860 ResourceAccessRangeMap descent_map;
John Zulauf69133422020-05-20 14:55:53 -0600861 ResolvePreviousAccess(type, range, &descent_map, nullptr);
John Zulauf5f13a792020-03-10 07:31:21 -0600862
863 HazardResult hazard;
864 for (auto prev = descent_map.begin(); prev != descent_map.end() && !hazard.hazard; ++prev) {
865 hazard = detector.Detect(prev);
866 }
867 return hazard;
868}
869
John Zulauf4a6105a2020-11-17 15:11:05 -0700870template <typename Action>
871void AccessContext::ForAll(Action &&action) {
872 for (const auto address_type : kAddressTypes) {
873 auto &accesses = GetAccessStateMap(address_type);
John Zulauf1d5f9c12022-05-13 14:51:08 -0600874 for (auto &access : accesses) {
John Zulauf4a6105a2020-11-17 15:11:05 -0700875 action(address_type, access);
876 }
877 }
878}
879
John Zulauff26fca92022-08-15 11:53:34 -0600880template <typename Action>
881void AccessContext::ConstForAll(Action &&action) const {
882 for (const auto address_type : kAddressTypes) {
883 auto &accesses = GetAccessStateMap(address_type);
884 for (auto &access : accesses) {
885 action(address_type, access);
886 }
887 }
888}
889
John Zulauf3da08bb2022-08-01 17:56:56 -0600890template <typename Predicate>
891void AccessContext::EraseIf(Predicate &&pred) {
892 for (const auto address_type : kAddressTypes) {
893 auto &accesses = GetAccessStateMap(address_type);
894 // Note: Don't forward, we don't want r-values moved, since we're going to make multiple calls.
895 layer_data::EraseIf(accesses, pred);
896 }
897}
898
John Zulauf3d84f1b2020-03-09 13:33:25 -0600899// A recursive range walker for hazard detection, first for the current context and the (DetectHazardRecur) to walk
900// the DAG of the contexts (for example subpasses)
901template <typename Detector>
John Zulaufe0757ba2022-06-10 16:51:45 -0600902HazardResult AccessContext::DetectHazard(AccessAddressType type, Detector &detector, const ResourceAccessRange &range,
John Zulauf355e49b2020-04-24 15:11:15 -0600903 DetectOptions options) const {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600904 HazardResult hazard;
John Zulauf5f13a792020-03-10 07:31:21 -0600905
John Zulauf1a224292020-06-30 14:52:13 -0600906 if (static_cast<uint32_t>(options) & DetectOptions::kDetectAsync) {
John Zulauf355e49b2020-04-24 15:11:15 -0600907 // Async checks don't require recursive lookups, as the async lists are exhaustive for the top-level context
908 // so we'll check these first
909 for (const auto &async_context : async_) {
910 hazard = async_context->DetectAsyncHazard(type, detector, range);
911 if (hazard.hazard) return hazard;
912 }
John Zulauf5f13a792020-03-10 07:31:21 -0600913 }
914
John Zulauf1a224292020-06-30 14:52:13 -0600915 const bool detect_prev = (static_cast<uint32_t>(options) & DetectOptions::kDetectPrevious) != 0;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600916
John Zulauf69133422020-05-20 14:55:53 -0600917 const auto &accesses = GetAccessStateMap(type);
John Zulauf3cafbf72021-03-26 16:55:19 -0600918 const auto the_end = accesses.cend(); // End is not invalidated
919 auto pos = accesses.lower_bound(range);
John Zulauf69133422020-05-20 14:55:53 -0600920 ResourceAccessRange gap = {range.begin, range.begin};
John Zulauf5f13a792020-03-10 07:31:21 -0600921
John Zulauf3cafbf72021-03-26 16:55:19 -0600922 while (pos != the_end && pos->first.begin < range.end) {
John Zulauf69133422020-05-20 14:55:53 -0600923 // Cover any leading gap, or gap between entries
924 if (detect_prev) {
925 // TODO: After profiling we may want to change the descent logic such that we don't recur per gap...
926 // Cover any leading gap, or gap between entries
927 gap.end = pos->first.begin; // We know this begin is < range.end
John Zulauf355e49b2020-04-24 15:11:15 -0600928 if (gap.non_empty()) {
John Zulauf69133422020-05-20 14:55:53 -0600929 // Recur on all gaps
John Zulauf16adfc92020-04-08 10:28:33 -0600930 hazard = DetectPreviousHazard(type, detector, gap);
John Zulauf5f13a792020-03-10 07:31:21 -0600931 if (hazard.hazard) return hazard;
932 }
John Zulauf69133422020-05-20 14:55:53 -0600933 // Set up for the next gap. If pos..end is >= range.end, loop will exit, and trailing gap will be empty
934 gap.begin = pos->first.end;
935 }
936
937 hazard = detector.Detect(pos);
938 if (hazard.hazard) return hazard;
John Zulauf3cafbf72021-03-26 16:55:19 -0600939 ++pos;
John Zulauf69133422020-05-20 14:55:53 -0600940 }
941
942 if (detect_prev) {
943 // Detect in the trailing empty as needed
944 gap.end = range.end;
945 if (gap.non_empty()) {
946 hazard = DetectPreviousHazard(type, detector, gap);
John Zulauf16adfc92020-04-08 10:28:33 -0600947 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600948 }
949
950 return hazard;
951}
952
953// A non recursive range walker for the asynchronous contexts (those we have no barriers with)
954template <typename Detector>
John Zulauf43cc7462020-12-03 12:33:12 -0700955HazardResult AccessContext::DetectAsyncHazard(AccessAddressType type, const Detector &detector,
956 const ResourceAccessRange &range) const {
John Zulauf16adfc92020-04-08 10:28:33 -0600957 auto &accesses = GetAccessStateMap(type);
John Zulauf3cafbf72021-03-26 16:55:19 -0600958 auto pos = accesses.lower_bound(range);
959 const auto the_end = accesses.end();
John Zulauf16adfc92020-04-08 10:28:33 -0600960
John Zulauf3d84f1b2020-03-09 13:33:25 -0600961 HazardResult hazard;
John Zulauf3cafbf72021-03-26 16:55:19 -0600962 while (pos != the_end && pos->first.begin < range.end) {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -0700963 hazard = detector.DetectAsync(pos, start_tag_);
John Zulauf3cafbf72021-03-26 16:55:19 -0600964 if (hazard.hazard) break;
965 ++pos;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600966 }
John Zulauf16adfc92020-04-08 10:28:33 -0600967
John Zulauf3d84f1b2020-03-09 13:33:25 -0600968 return hazard;
969}
970
John Zulaufb02c1eb2020-10-06 16:33:36 -0600971struct ApplySubpassTransitionBarriersAction {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -0700972 explicit ApplySubpassTransitionBarriersAction(const std::vector<SyncBarrier> &barriers_) : barriers(barriers_) {}
John Zulaufb02c1eb2020-10-06 16:33:36 -0600973 void operator()(ResourceAccessState *access) const {
974 assert(access);
975 access->ApplyBarriers(barriers, true);
976 }
977 const std::vector<SyncBarrier> &barriers;
978};
979
John Zulaufe0757ba2022-06-10 16:51:45 -0600980struct QueueTagOffsetBarrierAction {
981 QueueTagOffsetBarrierAction(QueueId qid, ResourceUsageTag offset) : queue_id(qid), tag_offset(offset) {}
982 void operator()(ResourceAccessState *access) const {
983 access->OffsetTag(tag_offset);
984 access->SetQueueId(queue_id);
985 };
986 QueueId queue_id;
987 ResourceUsageTag tag_offset;
988};
989
John Zulauf22aefed2021-03-11 18:14:35 -0700990struct ApplyTrackbackStackAction {
991 explicit ApplyTrackbackStackAction(const std::vector<SyncBarrier> &barriers_,
992 const ResourceAccessStateFunction *previous_barrier_ = nullptr)
993 : barriers(barriers_), previous_barrier(previous_barrier_) {}
John Zulaufb02c1eb2020-10-06 16:33:36 -0600994 void operator()(ResourceAccessState *access) const {
995 assert(access);
996 assert(!access->HasPendingState());
997 access->ApplyBarriers(barriers, false);
John Zulaufee984022022-04-13 16:39:50 -0600998 // NOTE: We can use invalid tag, as these barriers do no include layout transitions (would assert in SetWrite)
999 access->ApplyPendingBarriers(kInvalidTag);
John Zulauf22aefed2021-03-11 18:14:35 -07001000 if (previous_barrier) {
1001 assert(bool(*previous_barrier));
1002 (*previous_barrier)(access);
1003 }
John Zulaufb02c1eb2020-10-06 16:33:36 -06001004 }
1005 const std::vector<SyncBarrier> &barriers;
John Zulauf22aefed2021-03-11 18:14:35 -07001006 const ResourceAccessStateFunction *previous_barrier;
John Zulaufb02c1eb2020-10-06 16:33:36 -06001007};
1008
1009// Splits a single map entry into piece matching the entries in [first, last) the total range over [first, last) must be
1010// contained with entry. Entry must be an iterator pointing to dest, first and last must be iterators pointing to a
1011// *different* map from dest.
1012// Returns the position past the last resolved range -- the entry covering the remainder of entry->first not included in the
1013// range [first, last)
1014template <typename BarrierAction>
John Zulauf355e49b2020-04-24 15:11:15 -06001015static void ResolveMapToEntry(ResourceAccessRangeMap *dest, ResourceAccessRangeMap::iterator entry,
1016 ResourceAccessRangeMap::const_iterator first, ResourceAccessRangeMap::const_iterator last,
John Zulaufb02c1eb2020-10-06 16:33:36 -06001017 BarrierAction &barrier_action) {
John Zulauf355e49b2020-04-24 15:11:15 -06001018 auto at = entry;
1019 for (auto pos = first; pos != last; ++pos) {
1020 // Every member of the input iterator range must fit within the remaining portion of entry
1021 assert(at->first.includes(pos->first));
1022 assert(at != dest->end());
1023 // Trim up at to the same size as the entry to resolve
1024 at = sparse_container::split(at, *dest, pos->first);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001025 auto access = pos->second; // intentional copy
1026 barrier_action(&access);
John Zulauf355e49b2020-04-24 15:11:15 -06001027 at->second.Resolve(access);
1028 ++at; // Go to the remaining unused section of entry
1029 }
1030}
1031
John Zulaufa0a98292020-09-18 09:30:10 -06001032static SyncBarrier MergeBarriers(const std::vector<SyncBarrier> &barriers) {
1033 SyncBarrier merged = {};
1034 for (const auto &barrier : barriers) {
1035 merged.Merge(barrier);
1036 }
1037 return merged;
1038}
John Zulaufb02c1eb2020-10-06 16:33:36 -06001039template <typename BarrierAction>
John Zulauf43cc7462020-12-03 12:33:12 -07001040void AccessContext::ResolveAccessRange(AccessAddressType type, const ResourceAccessRange &range, BarrierAction &barrier_action,
John Zulauf355e49b2020-04-24 15:11:15 -06001041 ResourceAccessRangeMap *resolve_map, const ResourceAccessState *infill_state,
1042 bool recur_to_infill) const {
John Zulauf3bcab5e2020-06-19 14:42:32 -06001043 if (!range.non_empty()) return;
1044
John Zulauf355e49b2020-04-24 15:11:15 -06001045 ResourceRangeMergeIterator current(*resolve_map, GetAccessStateMap(type), range.begin);
1046 while (current->range.non_empty() && range.includes(current->range.begin)) {
John Zulauf3bcab5e2020-06-19 14:42:32 -06001047 const auto current_range = current->range & range;
John Zulauf16adfc92020-04-08 10:28:33 -06001048 if (current->pos_B->valid) {
1049 const auto &src_pos = current->pos_B->lower_bound;
John Zulaufb02c1eb2020-10-06 16:33:36 -06001050 auto access = src_pos->second; // intentional copy
1051 barrier_action(&access);
John Zulauf16adfc92020-04-08 10:28:33 -06001052 if (current->pos_A->valid) {
John Zulauf3bcab5e2020-06-19 14:42:32 -06001053 const auto trimmed = sparse_container::split(current->pos_A->lower_bound, *resolve_map, current_range);
1054 trimmed->second.Resolve(access);
1055 current.invalidate_A(trimmed);
John Zulauf5f13a792020-03-10 07:31:21 -06001056 } else {
John Zulauf3bcab5e2020-06-19 14:42:32 -06001057 auto inserted = resolve_map->insert(current->pos_A->lower_bound, std::make_pair(current_range, access));
John Zulauf355e49b2020-04-24 15:11:15 -06001058 current.invalidate_A(inserted); // Update the parallel iterator to point at the insert segment
John Zulauf5f13a792020-03-10 07:31:21 -06001059 }
John Zulauf16adfc92020-04-08 10:28:33 -06001060 } else {
1061 // we have to descend to fill this gap
1062 if (recur_to_infill) {
John Zulauf22aefed2021-03-11 18:14:35 -07001063 ResourceAccessRange recurrence_range = current_range;
1064 // The current context is empty for the current range, so recur to fill the gap.
1065 // Since we will be recurring back up the DAG, expand the gap descent to cover the full range for which B
1066 // is not valid, to minimize that recurrence
1067 if (current->pos_B.at_end()) {
1068 // Do the remainder here....
1069 recurrence_range.end = range.end;
John Zulauf355e49b2020-04-24 15:11:15 -06001070 } else {
John Zulauf22aefed2021-03-11 18:14:35 -07001071 // Recur only over the range until B becomes valid (within the limits of range).
1072 recurrence_range.end = std::min(range.end, current->pos_B->lower_bound->first.begin);
John Zulauf355e49b2020-04-24 15:11:15 -06001073 }
John Zulauf22aefed2021-03-11 18:14:35 -07001074 ResolvePreviousAccessStack(type, recurrence_range, resolve_map, infill_state, barrier_action);
1075
John Zulauf355e49b2020-04-24 15:11:15 -06001076 // Given that there could be gaps we need to seek carefully to not repeatedly search the same gaps in the next
1077 // iterator of the outer while.
1078
1079 // Set the parallel iterator to the end of this range s.t. ++ will move us to the next range whether or
1080 // not the end of the range is a gap. For the seek to work, first we need to warn the parallel iterator
1081 // we stepped on the dest map
John Zulauf22aefed2021-03-11 18:14:35 -07001082 const auto seek_to = recurrence_range.end - 1; // The subtraction is safe as range can't be empty (loop condition)
locke-lunarg88dbb542020-06-23 22:05:42 -06001083 current.invalidate_A(); // Changes current->range
John Zulauf355e49b2020-04-24 15:11:15 -06001084 current.seek(seek_to);
1085 } else if (!current->pos_A->valid && infill_state) {
1086 // If we didn't find anything in the current range, and we aren't reccuring... we infill if required
1087 auto inserted = resolve_map->insert(current->pos_A->lower_bound, std::make_pair(current->range, *infill_state));
1088 current.invalidate_A(inserted); // Update the parallel iterator to point at the correct segment after insert
John Zulauf16adfc92020-04-08 10:28:33 -06001089 }
John Zulauf5f13a792020-03-10 07:31:21 -06001090 }
ziga-lunargf0e27ad2022-03-28 00:44:12 +02001091 if (current->range.non_empty()) {
1092 ++current;
1093 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06001094 }
John Zulauf1a224292020-06-30 14:52:13 -06001095
1096 // Infill if range goes passed both the current and resolve map prior contents
1097 if (recur_to_infill && (current->range.end < range.end)) {
1098 ResourceAccessRange trailing_fill_range = {current->range.end, range.end};
John Zulauf22aefed2021-03-11 18:14:35 -07001099 ResolvePreviousAccessStack<BarrierAction>(type, trailing_fill_range, resolve_map, infill_state, barrier_action);
John Zulauf1a224292020-06-30 14:52:13 -06001100 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06001101}
1102
John Zulauf22aefed2021-03-11 18:14:35 -07001103template <typename BarrierAction>
1104void AccessContext::ResolvePreviousAccessStack(AccessAddressType type, const ResourceAccessRange &range,
1105 ResourceAccessRangeMap *descent_map, const ResourceAccessState *infill_state,
1106 const BarrierAction &previous_barrier) const {
1107 ResourceAccessStateFunction stacked_barrier(std::ref(previous_barrier));
1108 ResolvePreviousAccess(type, range, descent_map, infill_state, &stacked_barrier);
1109}
1110
John Zulauf43cc7462020-12-03 12:33:12 -07001111void AccessContext::ResolvePreviousAccess(AccessAddressType type, const ResourceAccessRange &range,
John Zulauf22aefed2021-03-11 18:14:35 -07001112 ResourceAccessRangeMap *descent_map, const ResourceAccessState *infill_state,
1113 const ResourceAccessStateFunction *previous_barrier) const {
1114 if (prev_.size() == 0) {
John Zulauf5f13a792020-03-10 07:31:21 -06001115 if (range.non_empty() && infill_state) {
John Zulauf22aefed2021-03-11 18:14:35 -07001116 // Fill the empty poritions of descent_map with the default_state with the barrier function applied (iff present)
1117 ResourceAccessState state_copy;
1118 if (previous_barrier) {
1119 assert(bool(*previous_barrier));
1120 state_copy = *infill_state;
1121 (*previous_barrier)(&state_copy);
1122 infill_state = &state_copy;
1123 }
1124 sparse_container::update_range_value(*descent_map, range, *infill_state,
1125 sparse_container::value_precedence::prefer_dest);
John Zulauf5f13a792020-03-10 07:31:21 -06001126 }
1127 } else {
1128 // Look for something to fill the gap further along.
1129 for (const auto &prev_dep : prev_) {
John Zulauf22aefed2021-03-11 18:14:35 -07001130 const ApplyTrackbackStackAction barrier_action(prev_dep.barriers, previous_barrier);
John Zulaufbb890452021-12-14 11:30:18 -07001131 prev_dep.source_subpass->ResolveAccessRange(type, range, barrier_action, descent_map, infill_state);
John Zulauf5f13a792020-03-10 07:31:21 -06001132 }
John Zulauf5f13a792020-03-10 07:31:21 -06001133 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06001134}
1135
John Zulauf4a6105a2020-11-17 15:11:05 -07001136// Non-lazy import of all accesses, WaitEvents needs this.
1137void AccessContext::ResolvePreviousAccesses() {
1138 ResourceAccessState default_state;
John Zulauf22aefed2021-03-11 18:14:35 -07001139 if (!prev_.size()) return; // If no previous contexts, nothing to do
1140
John Zulauf4a6105a2020-11-17 15:11:05 -07001141 for (const auto address_type : kAddressTypes) {
1142 ResolvePreviousAccess(address_type, kFullRange, &GetAccessStateMap(address_type), &default_state);
1143 }
1144}
1145
John Zulauf43cc7462020-12-03 12:33:12 -07001146AccessAddressType AccessContext::ImageAddressType(const IMAGE_STATE &image) {
1147 return (image.fragment_encoder->IsLinearImage()) ? AccessAddressType::kLinear : AccessAddressType::kIdealized;
John Zulauf16adfc92020-04-08 10:28:33 -06001148}
1149
John Zulauf1507ee42020-05-18 11:33:09 -06001150static SyncStageAccessIndex ColorLoadUsage(VkAttachmentLoadOp load_op) {
John Zulauf57261402021-08-13 11:32:06 -06001151 const auto stage_access = (load_op == VK_ATTACHMENT_LOAD_OP_NONE_EXT)
1152 ? SYNC_ACCESS_INDEX_NONE
1153 : ((load_op == VK_ATTACHMENT_LOAD_OP_LOAD) ? SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ
1154 : SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE);
John Zulauf1507ee42020-05-18 11:33:09 -06001155 return stage_access;
1156}
1157static SyncStageAccessIndex DepthStencilLoadUsage(VkAttachmentLoadOp load_op) {
John Zulauf57261402021-08-13 11:32:06 -06001158 const auto stage_access =
1159 (load_op == VK_ATTACHMENT_LOAD_OP_NONE_EXT)
1160 ? SYNC_ACCESS_INDEX_NONE
1161 : ((load_op == VK_ATTACHMENT_LOAD_OP_LOAD) ? SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_READ
1162 : SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE);
John Zulauf1507ee42020-05-18 11:33:09 -06001163 return stage_access;
1164}
1165
John Zulauf7635de32020-05-29 17:14:15 -06001166// Caller must manage returned pointer
1167static AccessContext *CreateStoreResolveProxyContext(const AccessContext &context, const RENDER_PASS_STATE &rp_state,
John Zulaufd0ec59f2021-03-13 14:25:08 -07001168 uint32_t subpass, const AttachmentViewGenVector &attachment_views) {
John Zulauf7635de32020-05-29 17:14:15 -06001169 auto *proxy = new AccessContext(context);
John Zulaufee984022022-04-13 16:39:50 -06001170 proxy->UpdateAttachmentResolveAccess(rp_state, attachment_views, subpass, kInvalidTag);
1171 proxy->UpdateAttachmentStoreAccess(rp_state, attachment_views, subpass, kInvalidTag);
John Zulauf7635de32020-05-29 17:14:15 -06001172 return proxy;
1173}
1174
John Zulaufb02c1eb2020-10-06 16:33:36 -06001175template <typename BarrierAction>
John Zulaufd0ec59f2021-03-13 14:25:08 -07001176void AccessContext::ResolveAccessRange(const AttachmentViewGen &view_gen, AttachmentViewGen::Gen gen_type,
1177 BarrierAction &barrier_action, ResourceAccessRangeMap *descent_map,
1178 const ResourceAccessState *infill_state) const {
1179 const auto *attachment_gen = view_gen.GetRangeGen(gen_type);
1180 if (!attachment_gen) return;
1181
1182 subresource_adapter::ImageRangeGenerator range_gen(*attachment_gen);
1183 const AccessAddressType address_type = view_gen.GetAddressType();
1184 for (; range_gen->non_empty(); ++range_gen) {
1185 ResolveAccessRange(address_type, *range_gen, barrier_action, descent_map, infill_state);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001186 }
John Zulauf62f10592020-04-03 12:20:02 -06001187}
1188
John Zulauf1d5f9c12022-05-13 14:51:08 -06001189template <typename ResolveOp>
1190void AccessContext::ResolveFromContext(ResolveOp &&resolve_op, const AccessContext &from_context,
1191 const ResourceAccessState *infill_state, bool recur_to_infill) {
1192 for (auto address_type : kAddressTypes) {
1193 from_context.ResolveAccessRange(address_type, kFullRange, resolve_op, &GetAccessStateMap(address_type), infill_state,
1194 recur_to_infill);
1195 }
1196}
1197
John Zulauf7635de32020-05-29 17:14:15 -06001198// Layout transitions are handled as if the were occuring in the beginning of the next subpass
John Zulaufbb890452021-12-14 11:30:18 -07001199bool AccessContext::ValidateLayoutTransitions(const CommandExecutionContext &exec_context, const RENDER_PASS_STATE &rp_state,
John Zulauf7635de32020-05-29 17:14:15 -06001200 const VkRect2D &render_area, uint32_t subpass,
sjfricke0bea06e2022-06-05 09:22:26 +09001201 const AttachmentViewGenVector &attachment_views, CMD_TYPE cmd_type) const {
John Zulauf355e49b2020-04-24 15:11:15 -06001202 bool skip = false;
John Zulauf7635de32020-05-29 17:14:15 -06001203 // As validation methods are const and precede the record/update phase, for any tranistions from the immediately
1204 // previous subpass, we have to validate them against a copy of the AccessContext, with resolve operations applied, as
1205 // those affects have not been recorded yet.
1206 //
1207 // Note: we could be more efficient by tracking whether or not we actually *have* any changes (e.g. attachment resolve)
1208 // to apply and only copy then, if this proves a hot spot.
1209 std::unique_ptr<AccessContext> proxy_for_prev;
1210 TrackBack proxy_track_back;
1211
John Zulauf355e49b2020-04-24 15:11:15 -06001212 const auto &transitions = rp_state.subpass_transitions[subpass];
1213 for (const auto &transition : transitions) {
John Zulauf7635de32020-05-29 17:14:15 -06001214 const bool prev_needs_proxy = transition.prev_pass != VK_SUBPASS_EXTERNAL && (transition.prev_pass + 1 == subpass);
1215
1216 const auto *track_back = GetTrackBackFromSubpass(transition.prev_pass);
John Zulauf22aefed2021-03-11 18:14:35 -07001217 assert(track_back);
John Zulauf7635de32020-05-29 17:14:15 -06001218 if (prev_needs_proxy) {
1219 if (!proxy_for_prev) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001220 proxy_for_prev.reset(
John Zulaufbb890452021-12-14 11:30:18 -07001221 CreateStoreResolveProxyContext(*track_back->source_subpass, rp_state, transition.prev_pass, attachment_views));
John Zulauf7635de32020-05-29 17:14:15 -06001222 proxy_track_back = *track_back;
John Zulaufbb890452021-12-14 11:30:18 -07001223 proxy_track_back.source_subpass = proxy_for_prev.get();
John Zulauf7635de32020-05-29 17:14:15 -06001224 }
1225 track_back = &proxy_track_back;
1226 }
1227 auto hazard = DetectSubpassTransitionHazard(*track_back, attachment_views[transition.attachment]);
John Zulauf355e49b2020-04-24 15:11:15 -06001228 if (hazard.hazard) {
sjfricke0bea06e2022-06-05 09:22:26 +09001229 const char *func_name = CommandTypeString(cmd_type);
John Zulaufee984022022-04-13 16:39:50 -06001230 if (hazard.tag == kInvalidTag) {
John Zulaufbb890452021-12-14 11:30:18 -07001231 skip |= exec_context.GetSyncState().LogError(
John Zulaufee984022022-04-13 16:39:50 -06001232 rp_state.renderPass(), string_SyncHazardVUID(hazard.hazard),
1233 "%s: Hazard %s in subpass %" PRIu32 " for attachment %" PRIu32
1234 " image layout transition (old_layout: %s, new_layout: %s) after store/resolve operation in subpass %" PRIu32,
1235 func_name, string_SyncHazard(hazard.hazard), subpass, transition.attachment,
1236 string_VkImageLayout(transition.old_layout), string_VkImageLayout(transition.new_layout), transition.prev_pass);
1237 } else {
John Zulaufbb890452021-12-14 11:30:18 -07001238 skip |= exec_context.GetSyncState().LogError(
John Zulaufee984022022-04-13 16:39:50 -06001239 rp_state.renderPass(), string_SyncHazardVUID(hazard.hazard),
1240 "%s: Hazard %s in subpass %" PRIu32 " for attachment %" PRIu32
1241 " image layout transition (old_layout: %s, new_layout: %s). Access info %s.",
1242 func_name, string_SyncHazard(hazard.hazard), subpass, transition.attachment,
1243 string_VkImageLayout(transition.old_layout), string_VkImageLayout(transition.new_layout),
John Zulauf397e68b2022-04-19 11:44:07 -06001244 exec_context.FormatHazard(hazard).c_str());
John Zulaufee984022022-04-13 16:39:50 -06001245 }
John Zulauf355e49b2020-04-24 15:11:15 -06001246 }
1247 }
1248 return skip;
1249}
1250
John Zulaufbb890452021-12-14 11:30:18 -07001251bool AccessContext::ValidateLoadOperation(const CommandExecutionContext &exec_context, const RENDER_PASS_STATE &rp_state,
John Zulauf7635de32020-05-29 17:14:15 -06001252 const VkRect2D &render_area, uint32_t subpass,
sjfricke0bea06e2022-06-05 09:22:26 +09001253 const AttachmentViewGenVector &attachment_views, CMD_TYPE cmd_type) const {
John Zulauf1507ee42020-05-18 11:33:09 -06001254 bool skip = false;
1255 const auto *attachment_ci = rp_state.createInfo.pAttachments;
John Zulaufa0a98292020-09-18 09:30:10 -06001256
John Zulauf1507ee42020-05-18 11:33:09 -06001257 for (uint32_t i = 0; i < rp_state.createInfo.attachmentCount; i++) {
1258 if (subpass == rp_state.attachment_first_subpass[i]) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001259 const auto &view_gen = attachment_views[i];
1260 if (!view_gen.IsValid()) continue;
John Zulauf1507ee42020-05-18 11:33:09 -06001261 const auto &ci = attachment_ci[i];
John Zulauf1507ee42020-05-18 11:33:09 -06001262
1263 // Need check in the following way
1264 // 1) if the usage bit isn't in the dest_access_scope, and there is layout traniition for initial use, report hazard
1265 // vs. transition
1266 // 2) if there isn't a layout transition, we need to look at the external context with a "detect hazard" operation
1267 // for each aspect loaded.
1268
1269 const bool has_depth = FormatHasDepth(ci.format);
John Zulaufb027cdb2020-05-21 14:25:22 -06001270 const bool has_stencil = FormatHasStencil(ci.format);
John Zulauf1507ee42020-05-18 11:33:09 -06001271 const bool is_color = !(has_depth || has_stencil);
1272
1273 const SyncStageAccessIndex load_index = has_depth ? DepthStencilLoadUsage(ci.loadOp) : ColorLoadUsage(ci.loadOp);
John Zulauf1507ee42020-05-18 11:33:09 -06001274 const SyncStageAccessIndex stencil_load_index = has_stencil ? DepthStencilLoadUsage(ci.stencilLoadOp) : load_index;
John Zulauf1507ee42020-05-18 11:33:09 -06001275
John Zulaufaff20662020-06-01 14:07:58 -06001276 HazardResult hazard;
John Zulauf1507ee42020-05-18 11:33:09 -06001277 const char *aspect = nullptr;
John Zulauf1507ee42020-05-18 11:33:09 -06001278
John Zulaufb02c1eb2020-10-06 16:33:36 -06001279 bool checked_stencil = false;
John Zulauf57261402021-08-13 11:32:06 -06001280 if (is_color && (load_index != SYNC_ACCESS_INDEX_NONE)) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001281 hazard = DetectHazard(view_gen, AttachmentViewGen::Gen::kRenderArea, load_index, SyncOrdering::kColorAttachment);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001282 aspect = "color";
1283 } else {
John Zulauf57261402021-08-13 11:32:06 -06001284 if (has_depth && (load_index != SYNC_ACCESS_INDEX_NONE)) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001285 hazard = DetectHazard(view_gen, AttachmentViewGen::Gen::kDepthOnlyRenderArea, load_index,
1286 SyncOrdering::kDepthStencilAttachment);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001287 aspect = "depth";
1288 }
John Zulauf57261402021-08-13 11:32:06 -06001289 if (!hazard.hazard && has_stencil && (stencil_load_index != SYNC_ACCESS_INDEX_NONE)) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001290 hazard = DetectHazard(view_gen, AttachmentViewGen::Gen::kStencilOnlyRenderArea, stencil_load_index,
1291 SyncOrdering::kDepthStencilAttachment);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001292 aspect = "stencil";
1293 checked_stencil = true;
1294 }
1295 }
1296
1297 if (hazard.hazard) {
sjfricke0bea06e2022-06-05 09:22:26 +09001298 const char *func_name = CommandTypeString(cmd_type);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001299 auto load_op_string = string_VkAttachmentLoadOp(checked_stencil ? ci.stencilLoadOp : ci.loadOp);
John Zulaufbb890452021-12-14 11:30:18 -07001300 const auto &sync_state = exec_context.GetSyncState();
John Zulaufee984022022-04-13 16:39:50 -06001301 if (hazard.tag == kInvalidTag) {
John Zulaufb02c1eb2020-10-06 16:33:36 -06001302 // Hazard vs. ILT
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06001303 skip |= sync_state.LogError(rp_state.renderPass(), string_SyncHazardVUID(hazard.hazard),
John Zulaufb02c1eb2020-10-06 16:33:36 -06001304 "%s: Hazard %s vs. layout transition in subpass %" PRIu32 " for attachment %" PRIu32
1305 " aspect %s during load with loadOp %s.",
1306 func_name, string_SyncHazard(hazard.hazard), subpass, i, aspect, load_op_string);
1307 } else {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06001308 skip |= sync_state.LogError(rp_state.renderPass(), string_SyncHazardVUID(hazard.hazard),
John Zulauf1507ee42020-05-18 11:33:09 -06001309 "%s: Hazard %s in subpass %" PRIu32 " for attachment %" PRIu32
John Zulauf59e25072020-07-17 10:55:21 -06001310 " aspect %s during load with loadOp %s. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06001311 func_name, string_SyncHazard(hazard.hazard), subpass, i, aspect, load_op_string,
John Zulauf397e68b2022-04-19 11:44:07 -06001312 exec_context.FormatHazard(hazard).c_str());
John Zulauf1507ee42020-05-18 11:33:09 -06001313 }
1314 }
1315 }
1316 }
1317 return skip;
1318}
1319
John Zulaufaff20662020-06-01 14:07:58 -06001320// Store operation validation can ignore resolve (before it) and layout tranistions after it. The first is ignored
1321// because of the ordering guarantees w.r.t. sample access and that the resolve validation hasn't altered the state, because
1322// store is part of the same Next/End operation.
1323// The latter is handled in layout transistion validation directly
John Zulaufbb890452021-12-14 11:30:18 -07001324bool AccessContext::ValidateStoreOperation(const CommandExecutionContext &exec_context, const RENDER_PASS_STATE &rp_state,
John Zulaufaff20662020-06-01 14:07:58 -06001325 const VkRect2D &render_area, uint32_t subpass,
sjfricke0bea06e2022-06-05 09:22:26 +09001326 const AttachmentViewGenVector &attachment_views, CMD_TYPE cmd_type) const {
John Zulaufaff20662020-06-01 14:07:58 -06001327 bool skip = false;
1328 const auto *attachment_ci = rp_state.createInfo.pAttachments;
John Zulaufaff20662020-06-01 14:07:58 -06001329
1330 for (uint32_t i = 0; i < rp_state.createInfo.attachmentCount; i++) {
1331 if (subpass == rp_state.attachment_last_subpass[i]) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001332 const AttachmentViewGen &view_gen = attachment_views[i];
1333 if (!view_gen.IsValid()) continue;
John Zulaufaff20662020-06-01 14:07:58 -06001334 const auto &ci = attachment_ci[i];
1335
1336 // The spec states that "don't care" is an operation with VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
1337 // so we assume that an implementation is *free* to write in that case, meaning that for correctness
1338 // sake, we treat DONT_CARE as writing.
1339 const bool has_depth = FormatHasDepth(ci.format);
1340 const bool has_stencil = FormatHasStencil(ci.format);
1341 const bool is_color = !(has_depth || has_stencil);
John Zulauf57261402021-08-13 11:32:06 -06001342 const bool store_op_stores = ci.storeOp != VK_ATTACHMENT_STORE_OP_NONE_EXT;
John Zulaufaff20662020-06-01 14:07:58 -06001343 if (!has_stencil && !store_op_stores) continue;
1344
1345 HazardResult hazard;
1346 const char *aspect = nullptr;
1347 bool checked_stencil = false;
1348 if (is_color) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001349 hazard = DetectHazard(view_gen, AttachmentViewGen::Gen::kRenderArea,
1350 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kRaster);
John Zulaufaff20662020-06-01 14:07:58 -06001351 aspect = "color";
1352 } else {
John Zulauf57261402021-08-13 11:32:06 -06001353 const bool stencil_op_stores = ci.stencilStoreOp != VK_ATTACHMENT_STORE_OP_NONE_EXT;
John Zulaufaff20662020-06-01 14:07:58 -06001354 if (has_depth && store_op_stores) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001355 hazard = DetectHazard(view_gen, AttachmentViewGen::Gen::kDepthOnlyRenderArea,
1356 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, SyncOrdering::kRaster);
John Zulaufaff20662020-06-01 14:07:58 -06001357 aspect = "depth";
1358 }
1359 if (!hazard.hazard && has_stencil && stencil_op_stores) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001360 hazard = DetectHazard(view_gen, AttachmentViewGen::Gen::kStencilOnlyRenderArea,
1361 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, SyncOrdering::kRaster);
John Zulaufaff20662020-06-01 14:07:58 -06001362 aspect = "stencil";
1363 checked_stencil = true;
1364 }
1365 }
1366
1367 if (hazard.hazard) {
1368 const char *const op_type_string = checked_stencil ? "stencilStoreOp" : "storeOp";
1369 const char *const store_op_string = string_VkAttachmentStoreOp(checked_stencil ? ci.stencilStoreOp : ci.storeOp);
John Zulauf397e68b2022-04-19 11:44:07 -06001370 skip |= exec_context.GetSyncState().LogError(rp_state.renderPass(), string_SyncHazardVUID(hazard.hazard),
1371 "%s: Hazard %s in subpass %" PRIu32 " for attachment %" PRIu32
1372 " %s aspect during store with %s %s. Access info %s",
sjfricke0bea06e2022-06-05 09:22:26 +09001373 CommandTypeString(cmd_type), string_SyncHazard(hazard.hazard), subpass,
1374 i, aspect, op_type_string, store_op_string,
John Zulauf397e68b2022-04-19 11:44:07 -06001375 exec_context.FormatHazard(hazard).c_str());
John Zulaufaff20662020-06-01 14:07:58 -06001376 }
1377 }
1378 }
1379 return skip;
1380}
1381
John Zulaufbb890452021-12-14 11:30:18 -07001382bool AccessContext::ValidateResolveOperations(const CommandExecutionContext &exec_context, const RENDER_PASS_STATE &rp_state,
John Zulaufd0ec59f2021-03-13 14:25:08 -07001383 const VkRect2D &render_area, const AttachmentViewGenVector &attachment_views,
sjfricke0bea06e2022-06-05 09:22:26 +09001384 CMD_TYPE cmd_type, uint32_t subpass) const {
1385 ValidateResolveAction validate_action(rp_state.renderPass(), subpass, *this, exec_context, cmd_type);
John Zulaufd0ec59f2021-03-13 14:25:08 -07001386 ResolveOperation(validate_action, rp_state, attachment_views, subpass);
John Zulauf7635de32020-05-29 17:14:15 -06001387 return validate_action.GetSkip();
John Zulaufb027cdb2020-05-21 14:25:22 -06001388}
1389
John Zulauf06f6f1e2022-04-19 15:28:11 -06001390void AccessContext::AddAsyncContext(const AccessContext *context) { async_.emplace_back(context); }
1391
John Zulauf3d84f1b2020-03-09 13:33:25 -06001392class HazardDetector {
1393 SyncStageAccessIndex usage_index_;
1394
1395 public:
John Zulauf5f13a792020-03-10 07:31:21 -06001396 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const { return pos->second.DetectHazard(usage_index_); }
John Zulauf14940722021-04-12 15:19:02 -06001397 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, ResourceUsageTag start_tag) const {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07001398 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001399 }
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001400 explicit HazardDetector(SyncStageAccessIndex usage) : usage_index_(usage) {}
John Zulauf3d84f1b2020-03-09 13:33:25 -06001401};
1402
John Zulauf69133422020-05-20 14:55:53 -06001403class HazardDetectorWithOrdering {
1404 const SyncStageAccessIndex usage_index_;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001405 const SyncOrdering ordering_rule_;
John Zulauf69133422020-05-20 14:55:53 -06001406
1407 public:
1408 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
John Zulaufec943ec2022-06-29 07:52:56 -06001409 return pos->second.DetectHazard(usage_index_, ordering_rule_, QueueSyncState::kQueueIdInvalid);
John Zulauf69133422020-05-20 14:55:53 -06001410 }
John Zulauf14940722021-04-12 15:19:02 -06001411 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, ResourceUsageTag start_tag) const {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07001412 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
John Zulauf69133422020-05-20 14:55:53 -06001413 }
John Zulauf8e3c3e92021-01-06 11:19:36 -07001414 HazardDetectorWithOrdering(SyncStageAccessIndex usage, SyncOrdering ordering) : usage_index_(usage), ordering_rule_(ordering) {}
John Zulauf69133422020-05-20 14:55:53 -06001415};
1416
John Zulauf16adfc92020-04-08 10:28:33 -06001417HazardResult AccessContext::DetectHazard(const BUFFER_STATE &buffer, SyncStageAccessIndex usage_index,
John Zulauf355e49b2020-04-24 15:11:15 -06001418 const ResourceAccessRange &range) const {
John Zulauf16adfc92020-04-08 10:28:33 -06001419 if (!SimpleBinding(buffer)) return HazardResult();
John Zulauf150e5332020-12-03 08:52:52 -07001420 const auto base_address = ResourceBaseAddress(buffer);
1421 HazardDetector detector(usage_index);
1422 return DetectHazard(AccessAddressType::kLinear, detector, (range + base_address), DetectOptions::kDetectAll);
John Zulaufe5da6e52020-03-18 15:32:18 -06001423}
1424
John Zulauf69133422020-05-20 14:55:53 -06001425template <typename Detector>
John Zulaufd0ec59f2021-03-13 14:25:08 -07001426HazardResult AccessContext::DetectHazard(Detector &detector, const AttachmentViewGen &view_gen, AttachmentViewGen::Gen gen_type,
1427 DetectOptions options) const {
1428 const auto *attachment_gen = view_gen.GetRangeGen(gen_type);
1429 if (!attachment_gen) return HazardResult();
1430
1431 subresource_adapter::ImageRangeGenerator range_gen(*attachment_gen);
1432 const auto address_type = view_gen.GetAddressType();
1433 for (; range_gen->non_empty(); ++range_gen) {
1434 HazardResult hazard = DetectHazard(address_type, detector, *range_gen, options);
1435 if (hazard.hazard) return hazard;
1436 }
1437
1438 return HazardResult();
1439}
1440
1441template <typename Detector>
John Zulauf69133422020-05-20 14:55:53 -06001442HazardResult AccessContext::DetectHazard(Detector &detector, const IMAGE_STATE &image,
1443 const VkImageSubresourceRange &subresource_range, const VkOffset3D &offset,
Aitor Camachoe67f2c72022-06-08 14:41:58 +02001444 const VkExtent3D &extent, bool is_depth_sliced, DetectOptions options) const {
John Zulauf69133422020-05-20 14:55:53 -06001445 if (!SimpleBinding(image)) return HazardResult();
John Zulauf69133422020-05-20 14:55:53 -06001446 const auto base_address = ResourceBaseAddress(image);
John Zulauf150e5332020-12-03 08:52:52 -07001447 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, offset, extent,
Aitor Camachoe67f2c72022-06-08 14:41:58 +02001448 base_address, is_depth_sliced);
John Zulauf150e5332020-12-03 08:52:52 -07001449 const auto address_type = ImageAddressType(image);
John Zulauf69133422020-05-20 14:55:53 -06001450 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf150e5332020-12-03 08:52:52 -07001451 HazardResult hazard = DetectHazard(address_type, detector, *range_gen, options);
John Zulauf69133422020-05-20 14:55:53 -06001452 if (hazard.hazard) return hazard;
1453 }
1454 return HazardResult();
1455}
John Zulauf110413c2021-03-20 05:38:38 -06001456template <typename Detector>
1457HazardResult AccessContext::DetectHazard(Detector &detector, const IMAGE_STATE &image,
Aitor Camachoe67f2c72022-06-08 14:41:58 +02001458 const VkImageSubresourceRange &subresource_range, bool is_depth_sliced,
1459 DetectOptions options) const {
John Zulauf110413c2021-03-20 05:38:38 -06001460 if (!SimpleBinding(image)) return HazardResult();
1461 const auto base_address = ResourceBaseAddress(image);
Aitor Camachoe67f2c72022-06-08 14:41:58 +02001462 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, base_address,
1463 is_depth_sliced);
John Zulauf110413c2021-03-20 05:38:38 -06001464 const auto address_type = ImageAddressType(image);
1465 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf110413c2021-03-20 05:38:38 -06001466 HazardResult hazard = DetectHazard(address_type, detector, *range_gen, options);
1467 if (hazard.hazard) return hazard;
1468 }
1469 return HazardResult();
1470}
John Zulauf69133422020-05-20 14:55:53 -06001471
John Zulauf540266b2020-04-06 18:54:53 -06001472HazardResult AccessContext::DetectHazard(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
1473 const VkImageSubresourceLayers &subresource, const VkOffset3D &offset,
Aitor Camachoe67f2c72022-06-08 14:41:58 +02001474 const VkExtent3D &extent, bool is_depth_sliced) const {
John Zulauf5c5e88d2019-12-26 11:22:02 -07001475 VkImageSubresourceRange subresource_range = {subresource.aspectMask, subresource.mipLevel, 1, subresource.baseArrayLayer,
1476 subresource.layerCount};
John Zulauf110413c2021-03-20 05:38:38 -06001477 HazardDetector detector(current_usage);
Aitor Camachoe67f2c72022-06-08 14:41:58 +02001478 return DetectHazard(detector, image, subresource_range, offset, extent, is_depth_sliced, DetectOptions::kDetectAll);
John Zulauf1507ee42020-05-18 11:33:09 -06001479}
1480
1481HazardResult AccessContext::DetectHazard(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
Aitor Camachoe67f2c72022-06-08 14:41:58 +02001482 const VkImageSubresourceRange &subresource_range, bool is_depth_sliced) const {
John Zulauf69133422020-05-20 14:55:53 -06001483 HazardDetector detector(current_usage);
Aitor Camachoe67f2c72022-06-08 14:41:58 +02001484 return DetectHazard(detector, image, subresource_range, is_depth_sliced, DetectOptions::kDetectAll);
John Zulauf69133422020-05-20 14:55:53 -06001485}
1486
John Zulaufd0ec59f2021-03-13 14:25:08 -07001487HazardResult AccessContext::DetectHazard(const AttachmentViewGen &view_gen, AttachmentViewGen::Gen gen_type,
1488 SyncStageAccessIndex current_usage, SyncOrdering ordering_rule) const {
1489 HazardDetectorWithOrdering detector(current_usage, ordering_rule);
1490 return DetectHazard(detector, view_gen, gen_type, DetectOptions::kDetectAll);
1491}
1492
John Zulauf69133422020-05-20 14:55:53 -06001493HazardResult AccessContext::DetectHazard(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
John Zulauf8e3c3e92021-01-06 11:19:36 -07001494 const VkImageSubresourceRange &subresource_range, SyncOrdering ordering_rule,
Aitor Camachoe67f2c72022-06-08 14:41:58 +02001495 const VkOffset3D &offset, const VkExtent3D &extent, bool is_depth_sliced) const {
John Zulauf8e3c3e92021-01-06 11:19:36 -07001496 HazardDetectorWithOrdering detector(current_usage, ordering_rule);
Aitor Camachoe67f2c72022-06-08 14:41:58 +02001497 return DetectHazard(detector, image, subresource_range, offset, extent, is_depth_sliced, DetectOptions::kDetectAll);
John Zulauf9cb530d2019-09-30 14:14:10 -06001498}
1499
John Zulauf3d84f1b2020-03-09 13:33:25 -06001500class BarrierHazardDetector {
1501 public:
Jeremy Gebben40a22942020-12-22 14:22:06 -07001502 BarrierHazardDetector(SyncStageAccessIndex usage_index, VkPipelineStageFlags2KHR src_exec_scope,
John Zulauf3d84f1b2020-03-09 13:33:25 -06001503 SyncStageAccessFlags src_access_scope)
1504 : usage_index_(usage_index), src_exec_scope_(src_exec_scope), src_access_scope_(src_access_scope) {}
1505
John Zulauf5f13a792020-03-10 07:31:21 -06001506 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
John Zulaufec943ec2022-06-29 07:52:56 -06001507 return pos->second.DetectBarrierHazard(usage_index_, QueueSyncState::kQueueIdInvalid, src_exec_scope_, src_access_scope_);
John Zulauf0cb5be22020-01-23 12:18:22 -07001508 }
John Zulauf14940722021-04-12 15:19:02 -06001509 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, ResourceUsageTag start_tag) const {
John Zulauf3d84f1b2020-03-09 13:33:25 -06001510 // Async barrier hazard detection can use the same path as the usage index is not IsRead, but is IsWrite
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07001511 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001512 }
1513
1514 private:
1515 SyncStageAccessIndex usage_index_;
Jeremy Gebben40a22942020-12-22 14:22:06 -07001516 VkPipelineStageFlags2KHR src_exec_scope_;
John Zulauf3d84f1b2020-03-09 13:33:25 -06001517 SyncStageAccessFlags src_access_scope_;
1518};
1519
John Zulauf4a6105a2020-11-17 15:11:05 -07001520class EventBarrierHazardDetector {
1521 public:
Jeremy Gebben40a22942020-12-22 14:22:06 -07001522 EventBarrierHazardDetector(SyncStageAccessIndex usage_index, VkPipelineStageFlags2KHR src_exec_scope,
John Zulaufe0757ba2022-06-10 16:51:45 -06001523 SyncStageAccessFlags src_access_scope, const SyncEventState::ScopeMap &event_scope, QueueId queue_id,
John Zulauf14940722021-04-12 15:19:02 -06001524 ResourceUsageTag scope_tag)
John Zulauf4a6105a2020-11-17 15:11:05 -07001525 : usage_index_(usage_index),
1526 src_exec_scope_(src_exec_scope),
1527 src_access_scope_(src_access_scope),
1528 event_scope_(event_scope),
John Zulaufe0757ba2022-06-10 16:51:45 -06001529 scope_queue_id_(queue_id),
1530 scope_tag_(scope_tag),
John Zulauf4a6105a2020-11-17 15:11:05 -07001531 scope_pos_(event_scope.cbegin()),
John Zulaufe0757ba2022-06-10 16:51:45 -06001532 scope_end_(event_scope.cend()) {}
John Zulauf4a6105a2020-11-17 15:11:05 -07001533
John Zulaufe0757ba2022-06-10 16:51:45 -06001534 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) {
1535 // Need to piece together coverage of pos->first range:
1536 // Copy the range as we'll be chopping it up as needed
1537 ResourceAccessRange range = pos->first;
1538 const ResourceAccessState &access = pos->second;
1539 HazardResult hazard;
1540
1541 bool in_scope = AdvanceScope(range);
1542 bool unscoped_tested = false;
1543 while (in_scope && !hazard.IsHazard()) {
1544 if (range.begin < ScopeBegin()) {
1545 if (!unscoped_tested) {
1546 unscoped_tested = true;
1547 hazard = access.DetectHazard(usage_index_);
1548 }
1549 // Note: don't need to check for in_scope as AdvanceScope true means range and ScopeRange intersect.
1550 // Thus a [ ScopeBegin, range.end ) will be non-empty.
1551 range.begin = ScopeBegin();
1552 } else { // in_scope implied that ScopeRange and range intersect
1553 hazard = access.DetectBarrierHazard(usage_index_, ScopeState(), src_exec_scope_, src_access_scope_, scope_queue_id_,
1554 scope_tag_);
1555 if (!hazard.IsHazard()) {
1556 range.begin = ScopeEnd();
1557 in_scope = AdvanceScope(range); // contains a non_empty check
1558 }
1559 }
John Zulauf4a6105a2020-11-17 15:11:05 -07001560 }
John Zulaufe0757ba2022-06-10 16:51:45 -06001561 if (range.non_empty() && !hazard.IsHazard() && !unscoped_tested) {
1562 hazard = access.DetectHazard(usage_index_);
1563 }
1564 return hazard;
John Zulauf4a6105a2020-11-17 15:11:05 -07001565 }
John Zulaufe0757ba2022-06-10 16:51:45 -06001566
John Zulauf14940722021-04-12 15:19:02 -06001567 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, ResourceUsageTag start_tag) const {
John Zulauf4a6105a2020-11-17 15:11:05 -07001568 // Async barrier hazard detection can use the same path as the usage index is not IsRead, but is IsWrite
1569 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
1570 }
1571
1572 private:
John Zulaufe0757ba2022-06-10 16:51:45 -06001573 bool ScopeInvalid() const { return scope_pos_ == scope_end_; }
1574 bool ScopeValid() const { return !ScopeInvalid(); }
1575 void ScopeSeek(const ResourceAccessRange &range) { scope_pos_ = event_scope_.lower_bound(range); }
1576
1577 // Hiding away the std::pair grunge...
1578 ResourceAddress ScopeBegin() const { return scope_pos_->first.begin; }
1579 ResourceAddress ScopeEnd() const { return scope_pos_->first.end; }
1580 const ResourceAccessRange &ScopeRange() const { return scope_pos_->first; }
1581 const ResourceAccessState &ScopeState() const { return scope_pos_->second; }
1582
1583 bool AdvanceScope(const ResourceAccessRange &range) {
1584 // Note: non_empty is (valid && !empty), so don't change !non_empty to empty...
1585 if (!range.non_empty()) return false;
1586 if (ScopeInvalid()) return false;
1587
1588 if (ScopeRange().strictly_less(range)) {
1589 ScopeSeek(range);
1590 }
1591
1592 return ScopeValid() && ScopeRange().intersects(range);
1593 }
1594
John Zulauf4a6105a2020-11-17 15:11:05 -07001595 SyncStageAccessIndex usage_index_;
Jeremy Gebben40a22942020-12-22 14:22:06 -07001596 VkPipelineStageFlags2KHR src_exec_scope_;
John Zulauf4a6105a2020-11-17 15:11:05 -07001597 SyncStageAccessFlags src_access_scope_;
1598 const SyncEventState::ScopeMap &event_scope_;
John Zulaufe0757ba2022-06-10 16:51:45 -06001599 QueueId scope_queue_id_;
1600 const ResourceUsageTag scope_tag_;
John Zulauf4a6105a2020-11-17 15:11:05 -07001601 SyncEventState::ScopeMap::const_iterator scope_pos_;
1602 SyncEventState::ScopeMap::const_iterator scope_end_;
John Zulauf4a6105a2020-11-17 15:11:05 -07001603};
1604
John Zulaufe0757ba2022-06-10 16:51:45 -06001605HazardResult AccessContext::DetectImageBarrierHazard(const IMAGE_STATE &image, const VkImageSubresourceRange &subresource_range,
1606 VkPipelineStageFlags2KHR src_exec_scope,
1607 const SyncStageAccessFlags &src_access_scope, QueueId queue_id,
1608 const SyncEventState &sync_event, AccessContext::DetectOptions options) const {
John Zulauf4a6105a2020-11-17 15:11:05 -07001609 // It's not particularly DRY to get the address type in this function as well as lower down, but we have to select the
1610 // first access scope map to use, and there's no easy way to plumb it in below.
1611 const auto address_type = ImageAddressType(image);
1612 const auto &event_scope = sync_event.FirstScope(address_type);
1613
1614 EventBarrierHazardDetector detector(SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION, src_exec_scope, src_access_scope,
John Zulaufe0757ba2022-06-10 16:51:45 -06001615 event_scope, queue_id, sync_event.first_scope_tag);
Aitor Camachoe67f2c72022-06-08 14:41:58 +02001616 return DetectHazard(detector, image, subresource_range, false, options);
John Zulauf4a6105a2020-11-17 15:11:05 -07001617}
1618
John Zulaufd0ec59f2021-03-13 14:25:08 -07001619HazardResult AccessContext::DetectImageBarrierHazard(const AttachmentViewGen &view_gen, const SyncBarrier &barrier,
1620 DetectOptions options) const {
1621 BarrierHazardDetector detector(SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION, barrier.src_exec_scope.exec_scope,
1622 barrier.src_access_scope);
1623 return DetectHazard(detector, view_gen, AttachmentViewGen::Gen::kViewSubresource, options);
1624}
1625
Jeremy Gebben40a22942020-12-22 14:22:06 -07001626HazardResult AccessContext::DetectImageBarrierHazard(const IMAGE_STATE &image, VkPipelineStageFlags2KHR src_exec_scope,
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07001627 const SyncStageAccessFlags &src_access_scope,
John Zulauf355e49b2020-04-24 15:11:15 -06001628 const VkImageSubresourceRange &subresource_range,
John Zulauf43cc7462020-12-03 12:33:12 -07001629 const DetectOptions options) const {
John Zulauf69133422020-05-20 14:55:53 -06001630 BarrierHazardDetector detector(SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION, src_exec_scope, src_access_scope);
Aitor Camachoe67f2c72022-06-08 14:41:58 +02001631 return DetectHazard(detector, image, subresource_range, false, options);
John Zulauf0cb5be22020-01-23 12:18:22 -07001632}
1633
John Zulaufe7f6a5e2021-01-16 14:31:18 -07001634HazardResult AccessContext::DetectImageBarrierHazard(const SyncImageMemoryBarrier &image_barrier) const {
John Zulaufc523bf62021-02-16 08:20:34 -07001635 return DetectImageBarrierHazard(*image_barrier.image.get(), image_barrier.barrier.src_exec_scope.exec_scope,
John Zulauf110413c2021-03-20 05:38:38 -06001636 image_barrier.barrier.src_access_scope, image_barrier.range, kDetectAll);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07001637}
John Zulauf355e49b2020-04-24 15:11:15 -06001638
John Zulauf9cb530d2019-09-30 14:14:10 -06001639template <typename Flags, typename Map>
1640SyncStageAccessFlags AccessScopeImpl(Flags flag_mask, const Map &map) {
1641 SyncStageAccessFlags scope = 0;
1642 for (const auto &bit_scope : map) {
1643 if (flag_mask < bit_scope.first) break;
1644
1645 if (flag_mask & bit_scope.first) {
1646 scope |= bit_scope.second;
1647 }
1648 }
1649 return scope;
1650}
1651
Jeremy Gebben40a22942020-12-22 14:22:06 -07001652SyncStageAccessFlags SyncStageAccess::AccessScopeByStage(VkPipelineStageFlags2KHR stages) {
John Zulauf9cb530d2019-09-30 14:14:10 -06001653 return AccessScopeImpl(stages, syncStageAccessMaskByStageBit);
1654}
1655
Jeremy Gebben40a22942020-12-22 14:22:06 -07001656SyncStageAccessFlags SyncStageAccess::AccessScopeByAccess(VkAccessFlags2KHR accesses) {
1657 return AccessScopeImpl(sync_utils::ExpandAccessFlags(accesses), syncStageAccessMaskByAccessBit);
John Zulauf9cb530d2019-09-30 14:14:10 -06001658}
1659
Jeremy Gebben40a22942020-12-22 14:22:06 -07001660// Getting from stage mask and access mask to stage/access masks is something we need to be good at...
1661SyncStageAccessFlags SyncStageAccess::AccessScope(VkPipelineStageFlags2KHR stages, VkAccessFlags2KHR accesses) {
John Zulauf5f13a792020-03-10 07:31:21 -06001662 // The access scope is the intersection of all stage/access types possible for the enabled stages and the enables
1663 // accesses (after doing a couple factoring of common terms the union of stage/access intersections is the intersections
1664 // of the union of all stage/access types for all the stages and the same unions for the access mask...
John Zulauf9cb530d2019-09-30 14:14:10 -06001665 return AccessScopeByStage(stages) & AccessScopeByAccess(accesses);
1666}
1667
1668template <typename Action>
John Zulauf5c5e88d2019-12-26 11:22:02 -07001669void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const ResourceAccessRange &range, const Action &action) {
John Zulauf7635de32020-05-29 17:14:15 -06001670 // TODO: Optimization for operations that do a pure overwrite (i.e. WRITE usages which rewrite the state, vs READ usages
1671 // that do incrementalupdates
John Zulauf4a6105a2020-11-17 15:11:05 -07001672 assert(accesses);
John Zulauf9cb530d2019-09-30 14:14:10 -06001673 auto pos = accesses->lower_bound(range);
1674 if (pos == accesses->end() || !pos->first.intersects(range)) {
1675 // The range is empty, fill it with a default value.
1676 pos = action.Infill(accesses, pos, range);
1677 } else if (range.begin < pos->first.begin) {
1678 // Leading empty space, infill
John Zulauf5c5e88d2019-12-26 11:22:02 -07001679 pos = action.Infill(accesses, pos, ResourceAccessRange(range.begin, pos->first.begin));
John Zulauf9cb530d2019-09-30 14:14:10 -06001680 } else if (pos->first.begin < range.begin) {
1681 // Trim the beginning if needed
1682 pos = accesses->split(pos, range.begin, sparse_container::split_op_keep_both());
1683 ++pos;
1684 }
1685
1686 const auto the_end = accesses->end();
1687 while ((pos != the_end) && pos->first.intersects(range)) {
1688 if (pos->first.end > range.end) {
1689 pos = accesses->split(pos, range.end, sparse_container::split_op_keep_both());
1690 }
1691
1692 pos = action(accesses, pos);
1693 if (pos == the_end) break;
1694
1695 auto next = pos;
1696 ++next;
1697 if ((pos->first.end < range.end) && (next != the_end) && !next->first.is_subsequent_to(pos->first)) {
1698 // Need to infill if next is disjoint
1699 VkDeviceSize limit = (next == the_end) ? range.end : std::min(range.end, next->first.begin);
John Zulauf5c5e88d2019-12-26 11:22:02 -07001700 ResourceAccessRange new_range(pos->first.end, limit);
John Zulauf9cb530d2019-09-30 14:14:10 -06001701 next = action.Infill(accesses, next, new_range);
1702 }
1703 pos = next;
1704 }
1705}
John Zulaufd5115702021-01-18 12:34:33 -07001706
1707// Give a comparable interface for range generators and ranges
1708template <typename Action>
John Zulaufcb7e1672022-05-04 13:46:08 -06001709void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const Action &action, ResourceAccessRange *range) {
John Zulaufd5115702021-01-18 12:34:33 -07001710 assert(range);
1711 UpdateMemoryAccessState(accesses, *range, action);
1712}
1713
John Zulauf4a6105a2020-11-17 15:11:05 -07001714template <typename Action, typename RangeGen>
1715void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const Action &action, RangeGen *range_gen_arg) {
1716 assert(range_gen_arg);
John Zulaufd5115702021-01-18 12:34:33 -07001717 RangeGen &range_gen = *range_gen_arg; // Non-const references must be * by style requirement but deref-ing * iterator is a pain
John Zulauf4a6105a2020-11-17 15:11:05 -07001718 for (; range_gen->non_empty(); ++range_gen) {
1719 UpdateMemoryAccessState(accesses, *range_gen, action);
1720 }
1721}
John Zulauf9cb530d2019-09-30 14:14:10 -06001722
John Zulaufd0ec59f2021-03-13 14:25:08 -07001723template <typename Action, typename RangeGen>
1724void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const Action &action, const RangeGen &range_gen_prebuilt) {
1725 RangeGen range_gen(range_gen_prebuilt); // RangeGenerators can be expensive to create from scratch... initialize from built
1726 for (; range_gen->non_empty(); ++range_gen) {
1727 UpdateMemoryAccessState(accesses, *range_gen, action);
1728 }
1729}
John Zulauf9cb530d2019-09-30 14:14:10 -06001730struct UpdateMemoryAccessStateFunctor {
John Zulauf5c5e88d2019-12-26 11:22:02 -07001731 using Iterator = ResourceAccessRangeMap::iterator;
1732 Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const {
John Zulauf5f13a792020-03-10 07:31:21 -06001733 // this is only called on gaps, and never returns a gap.
1734 ResourceAccessState default_state;
John Zulauf16adfc92020-04-08 10:28:33 -06001735 context.ResolvePreviousAccess(type, range, accesses, &default_state);
John Zulauf5f13a792020-03-10 07:31:21 -06001736 return accesses->lower_bound(range);
John Zulauf9cb530d2019-09-30 14:14:10 -06001737 }
John Zulauf5f13a792020-03-10 07:31:21 -06001738
John Zulauf5c5e88d2019-12-26 11:22:02 -07001739 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -06001740 auto &access_state = pos->second;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001741 access_state.Update(usage, ordering_rule, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06001742 return pos;
1743 }
1744
John Zulauf43cc7462020-12-03 12:33:12 -07001745 UpdateMemoryAccessStateFunctor(AccessAddressType type_, const AccessContext &context_, SyncStageAccessIndex usage_,
John Zulauf14940722021-04-12 15:19:02 -06001746 SyncOrdering ordering_rule_, ResourceUsageTag tag_)
John Zulauf8e3c3e92021-01-06 11:19:36 -07001747 : type(type_), context(context_), usage(usage_), ordering_rule(ordering_rule_), tag(tag_) {}
John Zulauf43cc7462020-12-03 12:33:12 -07001748 const AccessAddressType type;
John Zulauf540266b2020-04-06 18:54:53 -06001749 const AccessContext &context;
John Zulauf16adfc92020-04-08 10:28:33 -06001750 const SyncStageAccessIndex usage;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001751 const SyncOrdering ordering_rule;
John Zulauf14940722021-04-12 15:19:02 -06001752 const ResourceUsageTag tag;
John Zulauf9cb530d2019-09-30 14:14:10 -06001753};
1754
John Zulauf4a6105a2020-11-17 15:11:05 -07001755// The barrier operation for pipeline and subpass dependencies`
John Zulauf1e331ec2020-12-04 18:29:38 -07001756struct PipelineBarrierOp {
1757 SyncBarrier barrier;
1758 bool layout_transition;
John Zulauf00119522022-05-23 19:07:42 -06001759 ResourceAccessState::QueueScopeOps scope;
1760 PipelineBarrierOp(QueueId queue_id, const SyncBarrier &barrier_, bool layout_transition_)
John Zulauff26fca92022-08-15 11:53:34 -06001761 : barrier(barrier_), layout_transition(layout_transition_), scope(queue_id) {
1762 if (queue_id != QueueSyncState::kQueueIdInvalid) {
1763 // This is a submit time application... supress layout transitions to not taint the QueueBatchContext write state
1764 layout_transition = false;
1765 }
1766 }
John Zulaufd5115702021-01-18 12:34:33 -07001767 PipelineBarrierOp(const PipelineBarrierOp &) = default;
John Zulauf00119522022-05-23 19:07:42 -06001768 void operator()(ResourceAccessState *access_state) const { access_state->ApplyBarrier(scope, barrier, layout_transition); }
John Zulauf1e331ec2020-12-04 18:29:38 -07001769};
John Zulauf00119522022-05-23 19:07:42 -06001770
John Zulaufecf4ac52022-06-06 10:08:42 -06001771// Batch barrier ops don't modify in place, and thus don't need to hold pending state, and also are *never* layout transitions.
1772struct BatchBarrierOp : public PipelineBarrierOp {
1773 void operator()(ResourceAccessState *access_state) const {
1774 access_state->ApplyBarrier(scope, barrier, layout_transition);
1775 access_state->ApplyPendingBarriers(kInvalidTag); // There can't be any need for this tag
1776 }
1777 BatchBarrierOp(QueueId queue_id, const SyncBarrier &barrier_) : PipelineBarrierOp(queue_id, barrier_, false) {}
1778};
1779
John Zulauf4a6105a2020-11-17 15:11:05 -07001780// The barrier operation for wait events
1781struct WaitEventBarrierOp {
John Zulaufb7578302022-05-19 13:50:18 -06001782 ResourceAccessState::EventScopeOps scope_ops;
John Zulauf4a6105a2020-11-17 15:11:05 -07001783 SyncBarrier barrier;
1784 bool layout_transition;
John Zulaufe0757ba2022-06-10 16:51:45 -06001785
1786 WaitEventBarrierOp(const QueueId scope_queue_, const ResourceUsageTag scope_tag_, const SyncBarrier &barrier_,
John Zulauf00119522022-05-23 19:07:42 -06001787 bool layout_transition_)
John Zulauff26fca92022-08-15 11:53:34 -06001788 : scope_ops(scope_queue_, scope_tag_), barrier(barrier_), layout_transition(layout_transition_) {
1789 if (scope_queue_ != QueueSyncState::kQueueIdInvalid) {
1790 // This is a submit time application... supress layout transitions to not taint the QueueBatchContext write state
1791 layout_transition = false;
1792 }
1793 }
John Zulaufb7578302022-05-19 13:50:18 -06001794 void operator()(ResourceAccessState *access_state) const { access_state->ApplyBarrier(scope_ops, barrier, layout_transition); }
John Zulauf4a6105a2020-11-17 15:11:05 -07001795};
John Zulauf1e331ec2020-12-04 18:29:38 -07001796
John Zulauf4a6105a2020-11-17 15:11:05 -07001797// This functor applies a collection of barriers, updating the "pending state" in each touched memory range, and optionally
1798// resolves the pending state. Suitable for processing Global memory barriers, or Subpass Barriers when the "final" barrier
1799// of a collection is known/present.
John Zulauf5c628d02021-05-04 15:46:36 -06001800template <typename BarrierOp, typename OpVector = std::vector<BarrierOp>>
John Zulauf89311b42020-09-29 16:28:47 -06001801class ApplyBarrierOpsFunctor {
1802 public:
John Zulauf5c5e88d2019-12-26 11:22:02 -07001803 using Iterator = ResourceAccessRangeMap::iterator;
John Zulauf5c628d02021-05-04 15:46:36 -06001804 // Only called with a gap, and pos at the lower_bound(range)
1805 inline Iterator Infill(ResourceAccessRangeMap *accesses, const Iterator &pos, const ResourceAccessRange &range) const {
1806 if (!infill_default_) {
1807 return pos;
1808 }
1809 ResourceAccessState default_state;
1810 auto inserted = accesses->insert(pos, std::make_pair(range, default_state));
1811 return inserted;
1812 }
John Zulauf9cb530d2019-09-30 14:14:10 -06001813
John Zulauf5c628d02021-05-04 15:46:36 -06001814 Iterator operator()(ResourceAccessRangeMap *accesses, const Iterator &pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -06001815 auto &access_state = pos->second;
John Zulauf1e331ec2020-12-04 18:29:38 -07001816 for (const auto &op : barrier_ops_) {
1817 op(&access_state);
John Zulauf89311b42020-09-29 16:28:47 -06001818 }
John Zulauf9cb530d2019-09-30 14:14:10 -06001819
John Zulauf89311b42020-09-29 16:28:47 -06001820 if (resolve_) {
1821 // If this is the last (or only) batch, we can do the pending resolve as the last step in this operation to avoid
1822 // another walk
1823 access_state.ApplyPendingBarriers(tag_);
John Zulauf9cb530d2019-09-30 14:14:10 -06001824 }
1825 return pos;
1826 }
1827
John Zulauf89311b42020-09-29 16:28:47 -06001828 // A valid tag is required IFF layout_transition is true, as transitions are write ops
John Zulauf5c628d02021-05-04 15:46:36 -06001829 ApplyBarrierOpsFunctor(bool resolve, typename OpVector::size_type size_hint, ResourceUsageTag tag)
1830 : resolve_(resolve), infill_default_(false), barrier_ops_(), tag_(tag) {
John Zulaufd5115702021-01-18 12:34:33 -07001831 barrier_ops_.reserve(size_hint);
1832 }
John Zulauf5c628d02021-05-04 15:46:36 -06001833 void EmplaceBack(const BarrierOp &op) {
1834 barrier_ops_.emplace_back(op);
1835 infill_default_ |= op.layout_transition;
1836 }
John Zulauf89311b42020-09-29 16:28:47 -06001837
1838 private:
1839 bool resolve_;
John Zulauf5c628d02021-05-04 15:46:36 -06001840 bool infill_default_;
1841 OpVector barrier_ops_;
John Zulauf14940722021-04-12 15:19:02 -06001842 const ResourceUsageTag tag_;
John Zulauf1e331ec2020-12-04 18:29:38 -07001843};
1844
John Zulauf4a6105a2020-11-17 15:11:05 -07001845// This functor applies a single barrier, updating the "pending state" in each touched memory range, but does not
1846// resolve the pendinging state. Suitable for processing Image and Buffer barriers from PipelineBarriers or Events
1847template <typename BarrierOp>
John Zulauf5c628d02021-05-04 15:46:36 -06001848class ApplyBarrierFunctor : public ApplyBarrierOpsFunctor<BarrierOp, small_vector<BarrierOp, 1>> {
1849 using Base = ApplyBarrierOpsFunctor<BarrierOp, small_vector<BarrierOp, 1>>;
1850
John Zulauf4a6105a2020-11-17 15:11:05 -07001851 public:
John Zulaufee984022022-04-13 16:39:50 -06001852 ApplyBarrierFunctor(const BarrierOp &barrier_op) : Base(false, 1, kInvalidTag) { Base::EmplaceBack(barrier_op); }
John Zulauf4a6105a2020-11-17 15:11:05 -07001853};
1854
John Zulauf1e331ec2020-12-04 18:29:38 -07001855// This functor resolves the pendinging state.
John Zulauf5c628d02021-05-04 15:46:36 -06001856class ResolvePendingBarrierFunctor : public ApplyBarrierOpsFunctor<NoopBarrierAction, small_vector<NoopBarrierAction, 1>> {
1857 using Base = ApplyBarrierOpsFunctor<NoopBarrierAction, small_vector<NoopBarrierAction, 1>>;
1858
John Zulauf1e331ec2020-12-04 18:29:38 -07001859 public:
John Zulauf5c628d02021-05-04 15:46:36 -06001860 ResolvePendingBarrierFunctor(ResourceUsageTag tag) : Base(true, 0, tag) {}
John Zulauf9cb530d2019-09-30 14:14:10 -06001861};
1862
John Zulauf8e3c3e92021-01-06 11:19:36 -07001863void AccessContext::UpdateAccessState(AccessAddressType type, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf14940722021-04-12 15:19:02 -06001864 const ResourceAccessRange &range, const ResourceUsageTag tag) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07001865 UpdateMemoryAccessStateFunctor action(type, *this, current_usage, ordering_rule, tag);
John Zulauf16adfc92020-04-08 10:28:33 -06001866 UpdateMemoryAccessState(&GetAccessStateMap(type), range, action);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001867}
1868
John Zulauf8e3c3e92021-01-06 11:19:36 -07001869void AccessContext::UpdateAccessState(const BUFFER_STATE &buffer, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf14940722021-04-12 15:19:02 -06001870 const ResourceAccessRange &range, const ResourceUsageTag tag) {
John Zulauf16adfc92020-04-08 10:28:33 -06001871 if (!SimpleBinding(buffer)) return;
1872 const auto base_address = ResourceBaseAddress(buffer);
John Zulauf8e3c3e92021-01-06 11:19:36 -07001873 UpdateAccessState(AccessAddressType::kLinear, current_usage, ordering_rule, range + base_address, tag);
John Zulauf16adfc92020-04-08 10:28:33 -06001874}
John Zulauf355e49b2020-04-24 15:11:15 -06001875
John Zulauf8e3c3e92021-01-06 11:19:36 -07001876void AccessContext::UpdateAccessState(const IMAGE_STATE &image, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf110413c2021-03-20 05:38:38 -06001877 const VkImageSubresourceRange &subresource_range, const ResourceUsageTag &tag) {
1878 if (!SimpleBinding(image)) return;
1879 const auto base_address = ResourceBaseAddress(image);
Aitor Camachoe67f2c72022-06-08 14:41:58 +02001880 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, base_address, false);
John Zulauf110413c2021-03-20 05:38:38 -06001881 const auto address_type = ImageAddressType(image);
1882 UpdateMemoryAccessStateFunctor action(address_type, *this, current_usage, ordering_rule, tag);
1883 UpdateMemoryAccessState(&GetAccessStateMap(address_type), action, &range_gen);
1884}
1885void AccessContext::UpdateAccessState(const IMAGE_STATE &image, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf355e49b2020-04-24 15:11:15 -06001886 const VkImageSubresourceRange &subresource_range, const VkOffset3D &offset,
John Zulauf14940722021-04-12 15:19:02 -06001887 const VkExtent3D &extent, const ResourceUsageTag tag) {
John Zulauf16adfc92020-04-08 10:28:33 -06001888 if (!SimpleBinding(image)) return;
John Zulauf16adfc92020-04-08 10:28:33 -06001889 const auto base_address = ResourceBaseAddress(image);
John Zulauf150e5332020-12-03 08:52:52 -07001890 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, offset, extent,
Aitor Camachoe67f2c72022-06-08 14:41:58 +02001891 base_address, false);
John Zulauf150e5332020-12-03 08:52:52 -07001892 const auto address_type = ImageAddressType(image);
John Zulauf8e3c3e92021-01-06 11:19:36 -07001893 UpdateMemoryAccessStateFunctor action(address_type, *this, current_usage, ordering_rule, tag);
John Zulauf110413c2021-03-20 05:38:38 -06001894 UpdateMemoryAccessState(&GetAccessStateMap(address_type), action, &range_gen);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001895}
John Zulaufd0ec59f2021-03-13 14:25:08 -07001896
1897void AccessContext::UpdateAccessState(const AttachmentViewGen &view_gen, AttachmentViewGen::Gen gen_type,
John Zulauf14940722021-04-12 15:19:02 -06001898 SyncStageAccessIndex current_usage, SyncOrdering ordering_rule, const ResourceUsageTag tag) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001899 const ImageRangeGen *gen = view_gen.GetRangeGen(gen_type);
1900 if (!gen) return;
1901 subresource_adapter::ImageRangeGenerator range_gen(*gen);
1902 const auto address_type = view_gen.GetAddressType();
1903 UpdateMemoryAccessStateFunctor action(address_type, *this, current_usage, ordering_rule, tag);
1904 ApplyUpdateAction(address_type, action, &range_gen);
John Zulauf7635de32020-05-29 17:14:15 -06001905}
John Zulauf3d84f1b2020-03-09 13:33:25 -06001906
John Zulauf8e3c3e92021-01-06 11:19:36 -07001907void AccessContext::UpdateAccessState(const IMAGE_STATE &image, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf355e49b2020-04-24 15:11:15 -06001908 const VkImageSubresourceLayers &subresource, const VkOffset3D &offset,
John Zulauf14940722021-04-12 15:19:02 -06001909 const VkExtent3D &extent, const ResourceUsageTag tag) {
John Zulauf355e49b2020-04-24 15:11:15 -06001910 VkImageSubresourceRange subresource_range = {subresource.aspectMask, subresource.mipLevel, 1, subresource.baseArrayLayer,
1911 subresource.layerCount};
John Zulauf8e3c3e92021-01-06 11:19:36 -07001912 UpdateAccessState(image, current_usage, ordering_rule, subresource_range, offset, extent, tag);
John Zulauf355e49b2020-04-24 15:11:15 -06001913}
1914
John Zulaufd0ec59f2021-03-13 14:25:08 -07001915template <typename Action, typename RangeGen>
1916void AccessContext::ApplyUpdateAction(AccessAddressType address_type, const Action &action, RangeGen *range_gen_arg) {
1917 assert(range_gen_arg); // Old Google C++ styleguide require non-const object pass by * not &, but this isn't an optional arg.
1918 UpdateMemoryAccessState(&GetAccessStateMap(address_type), action, range_gen_arg);
John Zulauf540266b2020-04-06 18:54:53 -06001919}
1920
1921template <typename Action>
John Zulaufd0ec59f2021-03-13 14:25:08 -07001922void AccessContext::ApplyUpdateAction(const AttachmentViewGen &view_gen, AttachmentViewGen::Gen gen_type, const Action &action) {
1923 const ImageRangeGen *gen = view_gen.GetRangeGen(gen_type);
1924 if (!gen) return;
1925 UpdateMemoryAccessState(&GetAccessStateMap(view_gen.GetAddressType()), action, *gen);
John Zulauf540266b2020-04-06 18:54:53 -06001926}
1927
John Zulaufd0ec59f2021-03-13 14:25:08 -07001928void AccessContext::UpdateAttachmentResolveAccess(const RENDER_PASS_STATE &rp_state,
1929 const AttachmentViewGenVector &attachment_views, uint32_t subpass,
John Zulauf14940722021-04-12 15:19:02 -06001930 const ResourceUsageTag tag) {
John Zulauf7635de32020-05-29 17:14:15 -06001931 UpdateStateResolveAction update(*this, tag);
John Zulaufd0ec59f2021-03-13 14:25:08 -07001932 ResolveOperation(update, rp_state, attachment_views, subpass);
John Zulauf7635de32020-05-29 17:14:15 -06001933}
1934
John Zulaufd0ec59f2021-03-13 14:25:08 -07001935void AccessContext::UpdateAttachmentStoreAccess(const RENDER_PASS_STATE &rp_state, const AttachmentViewGenVector &attachment_views,
John Zulauf14940722021-04-12 15:19:02 -06001936 uint32_t subpass, const ResourceUsageTag tag) {
John Zulaufaff20662020-06-01 14:07:58 -06001937 const auto *attachment_ci = rp_state.createInfo.pAttachments;
John Zulaufaff20662020-06-01 14:07:58 -06001938
1939 for (uint32_t i = 0; i < rp_state.createInfo.attachmentCount; i++) {
1940 if (rp_state.attachment_last_subpass[i] == subpass) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001941 const auto &view_gen = attachment_views[i];
1942 if (!view_gen.IsValid()) continue; // UNUSED
John Zulaufaff20662020-06-01 14:07:58 -06001943
1944 const auto &ci = attachment_ci[i];
1945 const bool has_depth = FormatHasDepth(ci.format);
1946 const bool has_stencil = FormatHasStencil(ci.format);
1947 const bool is_color = !(has_depth || has_stencil);
John Zulauf57261402021-08-13 11:32:06 -06001948 const bool store_op_stores = ci.storeOp != VK_ATTACHMENT_STORE_OP_NONE_EXT;
John Zulaufaff20662020-06-01 14:07:58 -06001949
1950 if (is_color && store_op_stores) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001951 UpdateAccessState(view_gen, AttachmentViewGen::Gen::kRenderArea,
1952 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kRaster, tag);
John Zulaufaff20662020-06-01 14:07:58 -06001953 } else {
John Zulaufaff20662020-06-01 14:07:58 -06001954 if (has_depth && store_op_stores) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001955 UpdateAccessState(view_gen, AttachmentViewGen::Gen::kDepthOnlyRenderArea,
1956 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, SyncOrdering::kRaster, tag);
John Zulaufaff20662020-06-01 14:07:58 -06001957 }
John Zulauf57261402021-08-13 11:32:06 -06001958 const bool stencil_op_stores = ci.stencilStoreOp != VK_ATTACHMENT_STORE_OP_NONE_EXT;
John Zulaufaff20662020-06-01 14:07:58 -06001959 if (has_stencil && stencil_op_stores) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001960 UpdateAccessState(view_gen, AttachmentViewGen::Gen::kStencilOnlyRenderArea,
1961 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, SyncOrdering::kRaster, tag);
John Zulaufaff20662020-06-01 14:07:58 -06001962 }
1963 }
1964 }
1965 }
1966}
1967
John Zulauf540266b2020-04-06 18:54:53 -06001968template <typename Action>
John Zulaufd5115702021-01-18 12:34:33 -07001969void AccessContext::ApplyToContext(const Action &barrier_action) {
John Zulauf540266b2020-04-06 18:54:53 -06001970 // Note: Barriers do *not* cross context boundaries, applying to accessess within.... (at least for renderpass subpasses)
John Zulauf16adfc92020-04-08 10:28:33 -06001971 for (const auto address_type : kAddressTypes) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001972 UpdateMemoryAccessState(&GetAccessStateMap(address_type), kFullRange, barrier_action);
John Zulauf540266b2020-04-06 18:54:53 -06001973 }
1974}
1975
1976void AccessContext::ResolveChildContexts(const std::vector<AccessContext> &contexts) {
John Zulauf540266b2020-04-06 18:54:53 -06001977 for (uint32_t subpass_index = 0; subpass_index < contexts.size(); subpass_index++) {
1978 auto &context = contexts[subpass_index];
John Zulauf22aefed2021-03-11 18:14:35 -07001979 ApplyTrackbackStackAction barrier_action(context.GetDstExternalTrackBack().barriers);
John Zulauf16adfc92020-04-08 10:28:33 -06001980 for (const auto address_type : kAddressTypes) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001981 context.ResolveAccessRange(address_type, kFullRange, barrier_action, &GetAccessStateMap(address_type), nullptr, false);
John Zulauf540266b2020-04-06 18:54:53 -06001982 }
1983 }
1984}
1985
John Zulauf4fa68462021-04-26 21:04:22 -06001986// Caller must ensure that lifespan of this is less than from
1987void AccessContext::ImportAsyncContexts(const AccessContext &from) { async_ = from.async_; }
1988
John Zulauf355e49b2020-04-24 15:11:15 -06001989// Suitable only for *subpass* access contexts
John Zulaufd0ec59f2021-03-13 14:25:08 -07001990HazardResult AccessContext::DetectSubpassTransitionHazard(const TrackBack &track_back, const AttachmentViewGen &attach_view) const {
1991 if (!attach_view.IsValid()) return HazardResult();
John Zulauf355e49b2020-04-24 15:11:15 -06001992
John Zulauf355e49b2020-04-24 15:11:15 -06001993 // We should never ask for a transition from a context we don't have
John Zulaufbb890452021-12-14 11:30:18 -07001994 assert(track_back.source_subpass);
John Zulauf355e49b2020-04-24 15:11:15 -06001995
1996 // Do the detection against the specific prior context independent of other contexts. (Synchronous only)
John Zulaufa0a98292020-09-18 09:30:10 -06001997 // Hazard detection for the transition can be against the merged of the barriers (it only uses src_...)
1998 const auto merged_barrier = MergeBarriers(track_back.barriers);
John Zulaufbb890452021-12-14 11:30:18 -07001999 HazardResult hazard = track_back.source_subpass->DetectImageBarrierHazard(attach_view, merged_barrier, kDetectPrevious);
John Zulauf355e49b2020-04-24 15:11:15 -06002000 if (!hazard.hazard) {
2001 // The Async hazard check is against the current context's async set.
John Zulaufd0ec59f2021-03-13 14:25:08 -07002002 hazard = DetectImageBarrierHazard(attach_view, merged_barrier, kDetectAsync);
John Zulauf355e49b2020-04-24 15:11:15 -06002003 }
John Zulaufa0a98292020-09-18 09:30:10 -06002004
John Zulauf355e49b2020-04-24 15:11:15 -06002005 return hazard;
2006}
2007
John Zulaufb02c1eb2020-10-06 16:33:36 -06002008void AccessContext::RecordLayoutTransitions(const RENDER_PASS_STATE &rp_state, uint32_t subpass,
John Zulauf14940722021-04-12 15:19:02 -06002009 const AttachmentViewGenVector &attachment_views, const ResourceUsageTag tag) {
John Zulaufb02c1eb2020-10-06 16:33:36 -06002010 const auto &transitions = rp_state.subpass_transitions[subpass];
John Zulauf646cc292020-10-23 09:16:45 -06002011 const ResourceAccessState empty_infill;
John Zulaufb02c1eb2020-10-06 16:33:36 -06002012 for (const auto &transition : transitions) {
2013 const auto prev_pass = transition.prev_pass;
John Zulaufd0ec59f2021-03-13 14:25:08 -07002014 const auto &view_gen = attachment_views[transition.attachment];
2015 if (!view_gen.IsValid()) continue;
John Zulaufb02c1eb2020-10-06 16:33:36 -06002016
2017 const auto *trackback = GetTrackBackFromSubpass(prev_pass);
2018 assert(trackback);
2019
2020 // Import the attachments into the current context
John Zulaufbb890452021-12-14 11:30:18 -07002021 const auto *prev_context = trackback->source_subpass;
John Zulaufb02c1eb2020-10-06 16:33:36 -06002022 assert(prev_context);
John Zulaufd0ec59f2021-03-13 14:25:08 -07002023 const auto address_type = view_gen.GetAddressType();
John Zulaufb02c1eb2020-10-06 16:33:36 -06002024 auto &target_map = GetAccessStateMap(address_type);
2025 ApplySubpassTransitionBarriersAction barrier_action(trackback->barriers);
John Zulaufd0ec59f2021-03-13 14:25:08 -07002026 prev_context->ResolveAccessRange(view_gen, AttachmentViewGen::Gen::kViewSubresource, barrier_action, &target_map,
2027 &empty_infill);
John Zulaufb02c1eb2020-10-06 16:33:36 -06002028 }
2029
John Zulauf86356ca2020-10-19 11:46:41 -06002030 // If there were no transitions skip this global map walk
2031 if (transitions.size()) {
John Zulauf1e331ec2020-12-04 18:29:38 -07002032 ResolvePendingBarrierFunctor apply_pending_action(tag);
John Zulaufd5115702021-01-18 12:34:33 -07002033 ApplyToContext(apply_pending_action);
John Zulauf86356ca2020-10-19 11:46:41 -06002034 }
John Zulaufb02c1eb2020-10-06 16:33:36 -06002035}
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002036
sjfricke0bea06e2022-06-05 09:22:26 +09002037bool CommandBufferAccessContext::ValidateDispatchDrawDescriptorSet(VkPipelineBindPoint pipelineBindPoint, CMD_TYPE cmd_type) const {
locke-lunarg61870c22020-06-09 14:51:50 -06002038 bool skip = false;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002039 const PIPELINE_STATE *pipe = nullptr;
locke-lunarg61870c22020-06-09 14:51:50 -06002040 const std::vector<LAST_BOUND_STATE::PER_SET> *per_sets = nullptr;
Jeremy Gebben159b3cc2021-06-03 09:09:03 -06002041 cb_state_->GetCurrentPipelineAndDesriptorSets(pipelineBindPoint, &pipe, &per_sets);
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002042 if (!pipe || !per_sets) {
locke-lunarg61870c22020-06-09 14:51:50 -06002043 return skip;
2044 }
sjfricke0bea06e2022-06-05 09:22:26 +09002045 const char *caller_name = CommandTypeString(cmd_type);
locke-lunarg61870c22020-06-09 14:51:50 -06002046
2047 using DescriptorClass = cvdescriptorset::DescriptorClass;
2048 using BufferDescriptor = cvdescriptorset::BufferDescriptor;
2049 using ImageDescriptor = cvdescriptorset::ImageDescriptor;
locke-lunarg61870c22020-06-09 14:51:50 -06002050 using TexelDescriptor = cvdescriptorset::TexelDescriptor;
2051
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002052 for (const auto &stage_state : pipe->stage_state) {
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002053 const auto raster_state = pipe->RasterizationState();
2054 if (stage_state.stage_flag == VK_SHADER_STAGE_FRAGMENT_BIT && raster_state && raster_state->rasterizerDiscardEnable) {
locke-lunarge9f1cdf2020-06-12 12:28:57 -06002055 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002056 }
locke-lunarg61870c22020-06-09 14:51:50 -06002057 for (const auto &set_binding : stage_state.descriptor_uses) {
Jeremy Gebben4d51c552022-01-06 21:27:15 -07002058 const auto *descriptor_set = (*per_sets)[set_binding.first.set].bound_descriptor_set.get();
Jeremy Gebben1b9fdb82022-06-15 15:31:32 -06002059 auto binding = descriptor_set->GetBinding(set_binding.first.binding);
2060 const auto descriptor_type = binding->type;
locke-lunarg61870c22020-06-09 14:51:50 -06002061 SyncStageAccessIndex sync_index =
2062 GetSyncStageAccessIndexsByDescriptorSet(descriptor_type, set_binding.second, stage_state.stage_flag);
2063
Jeremy Gebben1b9fdb82022-06-15 15:31:32 -06002064 for (uint32_t index = 0; index < binding->count; index++) {
2065 const auto *descriptor = binding->GetDescriptor(index);
locke-lunarg61870c22020-06-09 14:51:50 -06002066 switch (descriptor->GetClass()) {
2067 case DescriptorClass::ImageSampler:
2068 case DescriptorClass::Image: {
Jeremy Gebbena08da232022-02-01 15:14:52 -07002069 if (descriptor->Invalid()) {
2070 continue;
locke-lunarg61870c22020-06-09 14:51:50 -06002071 }
Jeremy Gebbena08da232022-02-01 15:14:52 -07002072
2073 // NOTE: ImageSamplerDescriptor inherits from ImageDescriptor, so this cast works for both types.
2074 const auto *image_descriptor = static_cast<const ImageDescriptor *>(descriptor);
2075 const auto *img_view_state = image_descriptor->GetImageViewState();
2076 VkImageLayout image_layout = image_descriptor->GetImageLayout();
2077
John Zulauf361fb532020-07-22 10:45:39 -06002078 HazardResult hazard;
Jeremy Gebben11a68a32021-07-29 11:59:22 -06002079 // NOTE: 2D ImageViews of VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT Images are not allowed in
2080 // Descriptors, so we do not have to worry about depth slicing here.
2081 // See: VUID 00343
2082 assert(!img_view_state->IsDepthSliced());
John Zulauf110413c2021-03-20 05:38:38 -06002083 const IMAGE_STATE *img_state = img_view_state->image_state.get();
John Zulauf361fb532020-07-22 10:45:39 -06002084 const auto &subresource_range = img_view_state->normalized_subresource_range;
John Zulauf110413c2021-03-20 05:38:38 -06002085
2086 if (sync_index == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ) {
2087 const VkExtent3D extent = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.extent);
2088 const VkOffset3D offset = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.offset);
John Zulauf361fb532020-07-22 10:45:39 -06002089 // Input attachments are subject to raster ordering rules
Aitor Camachoe67f2c72022-06-08 14:41:58 +02002090 hazard =
2091 current_context_->DetectHazard(*img_state, sync_index, subresource_range, SyncOrdering::kRaster,
2092 offset, extent, img_view_state->IsDepthSliced());
John Zulauf361fb532020-07-22 10:45:39 -06002093 } else {
Aitor Camachoe67f2c72022-06-08 14:41:58 +02002094 hazard = current_context_->DetectHazard(*img_state, sync_index, subresource_range,
2095 img_view_state->IsDepthSliced());
John Zulauf361fb532020-07-22 10:45:39 -06002096 }
John Zulauf110413c2021-03-20 05:38:38 -06002097
John Zulauf33fc1d52020-07-17 11:01:10 -06002098 if (hazard.hazard && !sync_state_->SupressedBoundDescriptorWAW(hazard)) {
John Zulauf1dae9192020-06-16 15:46:44 -06002099 skip |= sync_state_->LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002100 img_view_state->image_view(), string_SyncHazardVUID(hazard.hazard),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06002101 "%s: Hazard %s for %s, in %s, and %s, %s, type: %s, imageLayout: %s, binding #%" PRIu32
2102 ", index %" PRIu32 ". Access info %s.",
sjfricke0bea06e2022-06-05 09:22:26 +09002103 caller_name, string_SyncHazard(hazard.hazard),
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002104 sync_state_->report_data->FormatHandle(img_view_state->image_view()).c_str(),
2105 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer()).c_str(),
2106 sync_state_->report_data->FormatHandle(pipe->pipeline()).c_str(),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06002107 sync_state_->report_data->FormatHandle(descriptor_set->GetSet()).c_str(),
2108 string_VkDescriptorType(descriptor_type), string_VkImageLayout(image_layout),
John Zulauf397e68b2022-04-19 11:44:07 -06002109 set_binding.first.binding, index, FormatHazard(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06002110 }
2111 break;
2112 }
2113 case DescriptorClass::TexelBuffer: {
Jeremy Gebbena08da232022-02-01 15:14:52 -07002114 const auto *texel_descriptor = static_cast<const TexelDescriptor *>(descriptor);
2115 if (texel_descriptor->Invalid()) {
2116 continue;
2117 }
2118 const auto *buf_view_state = texel_descriptor->GetBufferViewState();
2119 const auto *buf_state = buf_view_state->buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06002120 const ResourceAccessRange range = MakeRange(*buf_view_state);
locke-lunarg61870c22020-06-09 14:51:50 -06002121 auto hazard = current_context_->DetectHazard(*buf_state, sync_index, range);
John Zulauf33fc1d52020-07-17 11:01:10 -06002122 if (hazard.hazard && !sync_state_->SupressedBoundDescriptorWAW(hazard)) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002123 skip |= sync_state_->LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002124 buf_view_state->buffer_view(), string_SyncHazardVUID(hazard.hazard),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06002125 "%s: Hazard %s for %s in %s, %s, and %s, type: %s, binding #%d index %d. Access info %s.",
sjfricke0bea06e2022-06-05 09:22:26 +09002126 caller_name, string_SyncHazard(hazard.hazard),
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002127 sync_state_->report_data->FormatHandle(buf_view_state->buffer_view()).c_str(),
2128 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer()).c_str(),
2129 sync_state_->report_data->FormatHandle(pipe->pipeline()).c_str(),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06002130 sync_state_->report_data->FormatHandle(descriptor_set->GetSet()).c_str(),
Jeremy Gebben7fc88a22021-08-25 13:30:45 -06002131 string_VkDescriptorType(descriptor_type), set_binding.first.binding, index,
John Zulauf397e68b2022-04-19 11:44:07 -06002132 FormatHazard(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06002133 }
2134 break;
2135 }
2136 case DescriptorClass::GeneralBuffer: {
2137 const auto *buffer_descriptor = static_cast<const BufferDescriptor *>(descriptor);
Jeremy Gebbena08da232022-02-01 15:14:52 -07002138 if (buffer_descriptor->Invalid()) {
2139 continue;
2140 }
2141 const auto *buf_state = buffer_descriptor->GetBufferState();
John Zulauf3e86bf02020-09-12 10:47:57 -06002142 const ResourceAccessRange range =
2143 MakeRange(*buf_state, buffer_descriptor->GetOffset(), buffer_descriptor->GetRange());
locke-lunarg61870c22020-06-09 14:51:50 -06002144 auto hazard = current_context_->DetectHazard(*buf_state, sync_index, range);
John Zulauf3ac701a2020-09-07 14:34:41 -06002145 if (hazard.hazard && !sync_state_->SupressedBoundDescriptorWAW(hazard)) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002146 skip |= sync_state_->LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002147 buf_state->buffer(), string_SyncHazardVUID(hazard.hazard),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06002148 "%s: Hazard %s for %s in %s, %s, and %s, type: %s, binding #%d index %d. Access info %s.",
sjfricke0bea06e2022-06-05 09:22:26 +09002149 caller_name, string_SyncHazard(hazard.hazard),
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002150 sync_state_->report_data->FormatHandle(buf_state->buffer()).c_str(),
2151 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer()).c_str(),
2152 sync_state_->report_data->FormatHandle(pipe->pipeline()).c_str(),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06002153 sync_state_->report_data->FormatHandle(descriptor_set->GetSet()).c_str(),
Jeremy Gebben7fc88a22021-08-25 13:30:45 -06002154 string_VkDescriptorType(descriptor_type), set_binding.first.binding, index,
John Zulauf397e68b2022-04-19 11:44:07 -06002155 FormatHazard(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06002156 }
2157 break;
2158 }
2159 // TODO: INLINE_UNIFORM_BLOCK_EXT, ACCELERATION_STRUCTURE_KHR
2160 default:
2161 break;
2162 }
2163 }
2164 }
2165 }
2166 return skip;
2167}
2168
2169void CommandBufferAccessContext::RecordDispatchDrawDescriptorSet(VkPipelineBindPoint pipelineBindPoint,
John Zulauf14940722021-04-12 15:19:02 -06002170 const ResourceUsageTag tag) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002171 const PIPELINE_STATE *pipe = nullptr;
locke-lunarg61870c22020-06-09 14:51:50 -06002172 const std::vector<LAST_BOUND_STATE::PER_SET> *per_sets = nullptr;
Jeremy Gebben159b3cc2021-06-03 09:09:03 -06002173 cb_state_->GetCurrentPipelineAndDesriptorSets(pipelineBindPoint, &pipe, &per_sets);
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002174 if (!pipe || !per_sets) {
locke-lunarg61870c22020-06-09 14:51:50 -06002175 return;
2176 }
2177
2178 using DescriptorClass = cvdescriptorset::DescriptorClass;
2179 using BufferDescriptor = cvdescriptorset::BufferDescriptor;
2180 using ImageDescriptor = cvdescriptorset::ImageDescriptor;
locke-lunarg61870c22020-06-09 14:51:50 -06002181 using TexelDescriptor = cvdescriptorset::TexelDescriptor;
2182
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002183 for (const auto &stage_state : pipe->stage_state) {
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002184 const auto raster_state = pipe->RasterizationState();
2185 if (stage_state.stage_flag == VK_SHADER_STAGE_FRAGMENT_BIT && raster_state && raster_state->rasterizerDiscardEnable) {
locke-lunarge9f1cdf2020-06-12 12:28:57 -06002186 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002187 }
locke-lunarg61870c22020-06-09 14:51:50 -06002188 for (const auto &set_binding : stage_state.descriptor_uses) {
Jeremy Gebben4d51c552022-01-06 21:27:15 -07002189 const auto *descriptor_set = (*per_sets)[set_binding.first.set].bound_descriptor_set.get();
Jeremy Gebben1b9fdb82022-06-15 15:31:32 -06002190 auto binding = descriptor_set->GetBinding(set_binding.first.binding);
2191 const auto descriptor_type = binding->type;
locke-lunarg61870c22020-06-09 14:51:50 -06002192 SyncStageAccessIndex sync_index =
2193 GetSyncStageAccessIndexsByDescriptorSet(descriptor_type, set_binding.second, stage_state.stage_flag);
2194
Jeremy Gebben1b9fdb82022-06-15 15:31:32 -06002195 for (uint32_t i = 0; i < binding->count; i++) {
2196 const auto *descriptor = binding->GetDescriptor(i);
locke-lunarg61870c22020-06-09 14:51:50 -06002197 switch (descriptor->GetClass()) {
2198 case DescriptorClass::ImageSampler:
2199 case DescriptorClass::Image: {
Jeremy Gebbena08da232022-02-01 15:14:52 -07002200 // NOTE: ImageSamplerDescriptor inherits from ImageDescriptor, so this cast works for both types.
2201 const auto *image_descriptor = static_cast<const ImageDescriptor *>(descriptor);
2202 if (image_descriptor->Invalid()) {
2203 continue;
locke-lunarg61870c22020-06-09 14:51:50 -06002204 }
Jeremy Gebbena08da232022-02-01 15:14:52 -07002205 const auto *img_view_state = image_descriptor->GetImageViewState();
Jeremy Gebben11a68a32021-07-29 11:59:22 -06002206 // NOTE: 2D ImageViews of VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT Images are not allowed in
2207 // Descriptors, so we do not have to worry about depth slicing here.
2208 // See: VUID 00343
2209 assert(!img_view_state->IsDepthSliced());
locke-lunarg61870c22020-06-09 14:51:50 -06002210 const IMAGE_STATE *img_state = img_view_state->image_state.get();
locke-lunarg61870c22020-06-09 14:51:50 -06002211 if (sync_index == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ) {
John Zulauf110413c2021-03-20 05:38:38 -06002212 const VkExtent3D extent = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.extent);
2213 const VkOffset3D offset = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.offset);
2214 current_context_->UpdateAccessState(*img_state, sync_index, SyncOrdering::kRaster,
2215 img_view_state->normalized_subresource_range, offset, extent, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002216 } else {
John Zulauf110413c2021-03-20 05:38:38 -06002217 current_context_->UpdateAccessState(*img_state, sync_index, SyncOrdering::kNonAttachment,
2218 img_view_state->normalized_subresource_range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002219 }
locke-lunarg61870c22020-06-09 14:51:50 -06002220 break;
2221 }
2222 case DescriptorClass::TexelBuffer: {
Jeremy Gebbena08da232022-02-01 15:14:52 -07002223 const auto *texel_descriptor = static_cast<const TexelDescriptor *>(descriptor);
2224 if (texel_descriptor->Invalid()) {
2225 continue;
2226 }
2227 const auto *buf_view_state = texel_descriptor->GetBufferViewState();
2228 const auto *buf_state = buf_view_state->buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06002229 const ResourceAccessRange range = MakeRange(*buf_view_state);
John Zulauf8e3c3e92021-01-06 11:19:36 -07002230 current_context_->UpdateAccessState(*buf_state, sync_index, SyncOrdering::kNonAttachment, range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002231 break;
2232 }
2233 case DescriptorClass::GeneralBuffer: {
2234 const auto *buffer_descriptor = static_cast<const BufferDescriptor *>(descriptor);
Jeremy Gebbena08da232022-02-01 15:14:52 -07002235 if (buffer_descriptor->Invalid()) {
2236 continue;
2237 }
2238 const auto *buf_state = buffer_descriptor->GetBufferState();
John Zulauf3e86bf02020-09-12 10:47:57 -06002239 const ResourceAccessRange range =
2240 MakeRange(*buf_state, buffer_descriptor->GetOffset(), buffer_descriptor->GetRange());
John Zulauf8e3c3e92021-01-06 11:19:36 -07002241 current_context_->UpdateAccessState(*buf_state, sync_index, SyncOrdering::kNonAttachment, range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002242 break;
2243 }
2244 // TODO: INLINE_UNIFORM_BLOCK_EXT, ACCELERATION_STRUCTURE_KHR
2245 default:
2246 break;
2247 }
2248 }
2249 }
2250 }
2251}
2252
sjfricke0bea06e2022-06-05 09:22:26 +09002253bool CommandBufferAccessContext::ValidateDrawVertex(uint32_t vertexCount, uint32_t firstVertex, CMD_TYPE cmd_type) const {
locke-lunarg61870c22020-06-09 14:51:50 -06002254 bool skip = false;
Jeremy Gebben159b3cc2021-06-03 09:09:03 -06002255 const auto *pipe = cb_state_->GetCurrentPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS);
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002256 if (!pipe) {
locke-lunarg61870c22020-06-09 14:51:50 -06002257 return skip;
2258 }
2259
2260 const auto &binding_buffers = cb_state_->current_vertex_buffer_binding_info.vertex_buffer_bindings;
2261 const auto &binding_buffers_size = binding_buffers.size();
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002262 const auto &binding_descriptions_size = pipe->vertex_input_state->binding_descriptions.size();
locke-lunarg61870c22020-06-09 14:51:50 -06002263
2264 for (size_t i = 0; i < binding_descriptions_size; ++i) {
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002265 const auto &binding_description = pipe->vertex_input_state->binding_descriptions[i];
locke-lunarg61870c22020-06-09 14:51:50 -06002266 if (binding_description.binding < binding_buffers_size) {
2267 const auto &binding_buffer = binding_buffers[binding_description.binding];
Jeremy Gebben9efe1cf2021-05-15 20:05:09 -06002268 if (binding_buffer.buffer_state == nullptr || binding_buffer.buffer_state->Destroyed()) continue;
locke-lunarg61870c22020-06-09 14:51:50 -06002269
locke-lunarg1ae57d62020-11-18 10:49:19 -07002270 auto *buf_state = binding_buffer.buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06002271 const ResourceAccessRange range = GetBufferRange(binding_buffer.offset, buf_state->createInfo.size, firstVertex,
2272 vertexCount, binding_description.stride);
Jeremy Gebben40a22942020-12-22 14:22:06 -07002273 auto hazard = current_context_->DetectHazard(*buf_state, SYNC_VERTEX_ATTRIBUTE_INPUT_VERTEX_ATTRIBUTE_READ, range);
locke-lunarg61870c22020-06-09 14:51:50 -06002274 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002275 skip |= sync_state_->LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002276 buf_state->buffer(), string_SyncHazardVUID(hazard.hazard), "%s: Hazard %s for vertex %s in %s. Access info %s.",
sjfricke0bea06e2022-06-05 09:22:26 +09002277 CommandTypeString(cmd_type), string_SyncHazard(hazard.hazard),
John Zulauf397e68b2022-04-19 11:44:07 -06002278 sync_state_->report_data->FormatHandle(buf_state->buffer()).c_str(),
2279 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer()).c_str(), FormatHazard(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06002280 }
2281 }
2282 }
2283 return skip;
2284}
2285
John Zulauf14940722021-04-12 15:19:02 -06002286void CommandBufferAccessContext::RecordDrawVertex(uint32_t vertexCount, uint32_t firstVertex, const ResourceUsageTag tag) {
Jeremy Gebben159b3cc2021-06-03 09:09:03 -06002287 const auto *pipe = cb_state_->GetCurrentPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS);
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002288 if (!pipe) {
locke-lunarg61870c22020-06-09 14:51:50 -06002289 return;
2290 }
2291 const auto &binding_buffers = cb_state_->current_vertex_buffer_binding_info.vertex_buffer_bindings;
2292 const auto &binding_buffers_size = binding_buffers.size();
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002293 const auto &binding_descriptions_size = pipe->vertex_input_state->binding_descriptions.size();
locke-lunarg61870c22020-06-09 14:51:50 -06002294
2295 for (size_t i = 0; i < binding_descriptions_size; ++i) {
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002296 const auto &binding_description = pipe->vertex_input_state->binding_descriptions[i];
locke-lunarg61870c22020-06-09 14:51:50 -06002297 if (binding_description.binding < binding_buffers_size) {
2298 const auto &binding_buffer = binding_buffers[binding_description.binding];
Jeremy Gebben9efe1cf2021-05-15 20:05:09 -06002299 if (binding_buffer.buffer_state == nullptr || binding_buffer.buffer_state->Destroyed()) continue;
locke-lunarg61870c22020-06-09 14:51:50 -06002300
locke-lunarg1ae57d62020-11-18 10:49:19 -07002301 auto *buf_state = binding_buffer.buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06002302 const ResourceAccessRange range = GetBufferRange(binding_buffer.offset, buf_state->createInfo.size, firstVertex,
2303 vertexCount, binding_description.stride);
Jeremy Gebben40a22942020-12-22 14:22:06 -07002304 current_context_->UpdateAccessState(*buf_state, SYNC_VERTEX_ATTRIBUTE_INPUT_VERTEX_ATTRIBUTE_READ,
2305 SyncOrdering::kNonAttachment, range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002306 }
2307 }
2308}
2309
sjfricke0bea06e2022-06-05 09:22:26 +09002310bool CommandBufferAccessContext::ValidateDrawVertexIndex(uint32_t indexCount, uint32_t firstIndex, CMD_TYPE cmd_type) const {
locke-lunarg61870c22020-06-09 14:51:50 -06002311 bool skip = false;
Jeremy Gebben9efe1cf2021-05-15 20:05:09 -06002312 if (cb_state_->index_buffer_binding.buffer_state == nullptr || cb_state_->index_buffer_binding.buffer_state->Destroyed()) {
locke-lunarg1ae57d62020-11-18 10:49:19 -07002313 return skip;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002314 }
locke-lunarg61870c22020-06-09 14:51:50 -06002315
locke-lunarg1ae57d62020-11-18 10:49:19 -07002316 auto *index_buf_state = cb_state_->index_buffer_binding.buffer_state.get();
locke-lunarg61870c22020-06-09 14:51:50 -06002317 const auto index_size = GetIndexAlignment(cb_state_->index_buffer_binding.index_type);
John Zulauf3e86bf02020-09-12 10:47:57 -06002318 const ResourceAccessRange range = GetBufferRange(cb_state_->index_buffer_binding.offset, index_buf_state->createInfo.size,
2319 firstIndex, indexCount, index_size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07002320 auto hazard = current_context_->DetectHazard(*index_buf_state, SYNC_INDEX_INPUT_INDEX_READ, range);
locke-lunarg61870c22020-06-09 14:51:50 -06002321 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002322 skip |= sync_state_->LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002323 index_buf_state->buffer(), string_SyncHazardVUID(hazard.hazard), "%s: Hazard %s for index %s in %s. Access info %s.",
sjfricke0bea06e2022-06-05 09:22:26 +09002324 CommandTypeString(cmd_type), string_SyncHazard(hazard.hazard),
2325 sync_state_->report_data->FormatHandle(index_buf_state->buffer()).c_str(),
John Zulauf397e68b2022-04-19 11:44:07 -06002326 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer()).c_str(), FormatHazard(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06002327 }
2328
2329 // TODO: For now, we detect the whole vertex buffer. Index buffer could be changed until SubmitQueue.
2330 // We will detect more accurate range in the future.
sjfricke0bea06e2022-06-05 09:22:26 +09002331 skip |= ValidateDrawVertex(UINT32_MAX, 0, cmd_type);
locke-lunarg61870c22020-06-09 14:51:50 -06002332 return skip;
2333}
2334
John Zulauf14940722021-04-12 15:19:02 -06002335void CommandBufferAccessContext::RecordDrawVertexIndex(uint32_t indexCount, uint32_t firstIndex, const ResourceUsageTag tag) {
Jeremy Gebben9efe1cf2021-05-15 20:05:09 -06002336 if (cb_state_->index_buffer_binding.buffer_state == nullptr || cb_state_->index_buffer_binding.buffer_state->Destroyed()) return;
locke-lunarg61870c22020-06-09 14:51:50 -06002337
locke-lunarg1ae57d62020-11-18 10:49:19 -07002338 auto *index_buf_state = cb_state_->index_buffer_binding.buffer_state.get();
locke-lunarg61870c22020-06-09 14:51:50 -06002339 const auto index_size = GetIndexAlignment(cb_state_->index_buffer_binding.index_type);
John Zulauf3e86bf02020-09-12 10:47:57 -06002340 const ResourceAccessRange range = GetBufferRange(cb_state_->index_buffer_binding.offset, index_buf_state->createInfo.size,
2341 firstIndex, indexCount, index_size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07002342 current_context_->UpdateAccessState(*index_buf_state, SYNC_INDEX_INPUT_INDEX_READ, SyncOrdering::kNonAttachment, range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002343
2344 // TODO: For now, we detect the whole vertex buffer. Index buffer could be changed until SubmitQueue.
2345 // We will detect more accurate range in the future.
2346 RecordDrawVertex(UINT32_MAX, 0, tag);
2347}
2348
sjfricke0bea06e2022-06-05 09:22:26 +09002349bool CommandBufferAccessContext::ValidateDrawSubpassAttachment(CMD_TYPE cmd_type) const {
locke-lunarg7077d502020-06-18 21:37:26 -06002350 bool skip = false;
2351 if (!current_renderpass_context_) return skip;
sjfricke0bea06e2022-06-05 09:22:26 +09002352 skip |= current_renderpass_context_->ValidateDrawSubpassAttachment(GetExecutionContext(), *cb_state_.get(), cmd_type);
locke-lunarg7077d502020-06-18 21:37:26 -06002353 return skip;
locke-lunarg61870c22020-06-09 14:51:50 -06002354}
2355
John Zulauf14940722021-04-12 15:19:02 -06002356void CommandBufferAccessContext::RecordDrawSubpassAttachment(const ResourceUsageTag tag) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002357 if (current_renderpass_context_) {
John Zulauf64ffe552021-02-06 10:25:07 -07002358 current_renderpass_context_->RecordDrawSubpassAttachment(*cb_state_.get(), tag);
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002359 }
locke-lunarg61870c22020-06-09 14:51:50 -06002360}
2361
John Zulauf00119522022-05-23 19:07:42 -06002362QueueId CommandBufferAccessContext::GetQueueId() const { return QueueSyncState::kQueueIdInvalid; }
2363
sjfricke0bea06e2022-06-05 09:22:26 +09002364ResourceUsageTag CommandBufferAccessContext::RecordBeginRenderPass(CMD_TYPE cmd_type, const RENDER_PASS_STATE &rp_state,
John Zulauf41a9c7c2021-12-07 15:59:53 -07002365 const VkRect2D &render_area,
2366 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views) {
John Zulauf355e49b2020-04-24 15:11:15 -06002367 // Create an access context the current renderpass.
sjfricke0bea06e2022-06-05 09:22:26 +09002368 const auto barrier_tag = NextCommandTag(cmd_type, ResourceUsageRecord::SubcommandType::kSubpassTransition);
2369 const auto load_tag = NextSubcommandTag(cmd_type, ResourceUsageRecord::SubcommandType::kLoadOp);
John Zulaufab84f242022-08-04 18:38:40 -06002370 render_pass_contexts_.emplace_back(layer_data::make_unique<RenderPassAccessContext>(rp_state, render_area, GetQueueFlags(),
2371 attachment_views, &cb_access_context_));
2372 current_renderpass_context_ = render_pass_contexts_.back().get();
John Zulauf41a9c7c2021-12-07 15:59:53 -07002373 current_renderpass_context_->RecordBeginRenderPass(barrier_tag, load_tag);
John Zulauf16adfc92020-04-08 10:28:33 -06002374 current_context_ = &current_renderpass_context_->CurrentContext();
John Zulauf41a9c7c2021-12-07 15:59:53 -07002375 return barrier_tag;
John Zulauf16adfc92020-04-08 10:28:33 -06002376}
2377
sjfricke0bea06e2022-06-05 09:22:26 +09002378ResourceUsageTag CommandBufferAccessContext::RecordNextSubpass(const CMD_TYPE cmd_type) {
John Zulauf16adfc92020-04-08 10:28:33 -06002379 assert(current_renderpass_context_);
sjfricke0bea06e2022-06-05 09:22:26 +09002380 if (!current_renderpass_context_) return NextCommandTag(cmd_type);
John Zulauf41a9c7c2021-12-07 15:59:53 -07002381
sjfricke0bea06e2022-06-05 09:22:26 +09002382 auto store_tag = NextCommandTag(cmd_type, ResourceUsageRecord::SubcommandType::kStoreOp);
2383 auto barrier_tag = NextSubcommandTag(cmd_type, ResourceUsageRecord::SubcommandType::kSubpassTransition);
2384 auto load_tag = NextSubcommandTag(cmd_type, ResourceUsageRecord::SubcommandType::kLoadOp);
John Zulauf41a9c7c2021-12-07 15:59:53 -07002385
2386 current_renderpass_context_->RecordNextSubpass(store_tag, barrier_tag, load_tag);
John Zulauf16adfc92020-04-08 10:28:33 -06002387 current_context_ = &current_renderpass_context_->CurrentContext();
John Zulauf41a9c7c2021-12-07 15:59:53 -07002388 return barrier_tag;
John Zulauf16adfc92020-04-08 10:28:33 -06002389}
2390
sjfricke0bea06e2022-06-05 09:22:26 +09002391ResourceUsageTag CommandBufferAccessContext::RecordEndRenderPass(const CMD_TYPE cmd_type) {
John Zulauf16adfc92020-04-08 10:28:33 -06002392 assert(current_renderpass_context_);
sjfricke0bea06e2022-06-05 09:22:26 +09002393 if (!current_renderpass_context_) return NextCommandTag(cmd_type);
John Zulauf16adfc92020-04-08 10:28:33 -06002394
sjfricke0bea06e2022-06-05 09:22:26 +09002395 auto store_tag = NextCommandTag(cmd_type, ResourceUsageRecord::SubcommandType::kStoreOp);
2396 auto barrier_tag = NextSubcommandTag(cmd_type, ResourceUsageRecord::SubcommandType::kSubpassTransition);
John Zulauf41a9c7c2021-12-07 15:59:53 -07002397
2398 current_renderpass_context_->RecordEndRenderPass(&cb_access_context_, store_tag, barrier_tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002399 current_context_ = &cb_access_context_;
John Zulauf16adfc92020-04-08 10:28:33 -06002400 current_renderpass_context_ = nullptr;
John Zulauf41a9c7c2021-12-07 15:59:53 -07002401 return barrier_tag;
John Zulauf16adfc92020-04-08 10:28:33 -06002402}
2403
John Zulauf4a6105a2020-11-17 15:11:05 -07002404void CommandBufferAccessContext::RecordDestroyEvent(VkEvent event) {
2405 // Erase is okay with the key not being
Jeremy Gebbenf4449392022-01-28 10:09:10 -07002406 auto event_state = sync_state_->Get<EVENT_STATE>(event);
John Zulauf669dfd52021-01-27 17:15:28 -07002407 if (event_state) {
Jeremy Gebben9f537102021-10-05 16:37:12 -06002408 GetCurrentEventsContext()->Destroy(event_state.get());
John Zulaufd5115702021-01-18 12:34:33 -07002409 }
2410}
2411
John Zulaufae842002021-04-15 18:20:55 -06002412// The is the recorded cb context
John Zulauf0223f142022-07-06 09:05:39 -06002413bool CommandBufferAccessContext::ValidateFirstUse(CommandExecutionContext &exec_context, const char *func_name,
John Zulauf4fa68462021-04-26 21:04:22 -06002414 uint32_t index) const {
John Zulauf0223f142022-07-06 09:05:39 -06002415 if (!exec_context.ValidForSyncOps()) return false;
2416
2417 const QueueId queue_id = exec_context.GetQueueId();
2418 const ResourceUsageTag base_tag = exec_context.GetTagLimit();
John Zulaufae842002021-04-15 18:20:55 -06002419 bool skip = false;
2420 ResourceUsageRange tag_range = {0, 0};
2421 const AccessContext *recorded_context = GetCurrentAccessContext();
2422 assert(recorded_context);
2423 HazardResult hazard;
John Zulaufdab327f2022-07-08 12:02:05 -06002424 ReplayGuard replay_guard(exec_context, *this);
2425
John Zulaufbb890452021-12-14 11:30:18 -07002426 auto log_msg = [this](const HazardResult &hazard, const CommandExecutionContext &exec_context, const char *func_name,
John Zulaufae842002021-04-15 18:20:55 -06002427 uint32_t index) {
John Zulaufbb890452021-12-14 11:30:18 -07002428 const auto handle = exec_context.Handle();
John Zulaufae842002021-04-15 18:20:55 -06002429 const auto recorded_handle = cb_state_->commandBuffer();
John Zulauf4fa68462021-04-26 21:04:22 -06002430 const auto *report_data = sync_state_->report_data;
John Zulaufbb890452021-12-14 11:30:18 -07002431 return sync_state_->LogError(handle, string_SyncHazardVUID(hazard.hazard),
John Zulauf4fa68462021-04-26 21:04:22 -06002432 "%s: Hazard %s for entry %" PRIu32 ", %s, Recorded access info %s. Access info %s.", func_name,
2433 string_SyncHazard(hazard.hazard), index, report_data->FormatHandle(recorded_handle).c_str(),
John Zulauf397e68b2022-04-19 11:44:07 -06002434 FormatUsage(*hazard.recorded_access).c_str(), exec_context.FormatHazard(hazard).c_str());
John Zulaufae842002021-04-15 18:20:55 -06002435 };
2436 for (const auto &sync_op : sync_ops_) {
John Zulauf4fa68462021-04-26 21:04:22 -06002437 // we update the range to any include layout transition first use writes,
2438 // as they are stored along with the source scope (as effective barrier) when recorded
2439 tag_range.end = sync_op.tag + 1;
John Zulauf0223f142022-07-06 09:05:39 -06002440 skip |= sync_op.sync_op->ReplayValidate(sync_op.tag, *this, base_tag, exec_context);
John Zulauf4fa68462021-04-26 21:04:22 -06002441
John Zulauf0223f142022-07-06 09:05:39 -06002442 // We're allowing for the ReplayRecord to modify the exec_context (e.g. for Renderpass operations), so
2443 // we need to fetch the current access context each time
John Zulaufdab327f2022-07-08 12:02:05 -06002444 hazard = exec_context.DetectFirstUseHazard(tag_range);
John Zulaufae842002021-04-15 18:20:55 -06002445 if (hazard.hazard) {
John Zulauf0223f142022-07-06 09:05:39 -06002446 skip |= log_msg(hazard, exec_context, func_name, index);
John Zulaufae842002021-04-15 18:20:55 -06002447 }
2448 // NOTE: Add call to replay validate here when we add support for syncop with non-trivial replay
John Zulauf4fa68462021-04-26 21:04:22 -06002449 // Record the barrier into the proxy context.
John Zulauf0223f142022-07-06 09:05:39 -06002450 sync_op.sync_op->ReplayRecord(exec_context, base_tag + sync_op.tag);
John Zulauf4fa68462021-04-26 21:04:22 -06002451 tag_range.begin = tag_range.end;
John Zulaufae842002021-04-15 18:20:55 -06002452 }
2453
2454 // and anything after the last syncop
John Zulaufae842002021-04-15 18:20:55 -06002455 tag_range.end = ResourceUsageRecord::kMaxIndex;
John Zulauf0223f142022-07-06 09:05:39 -06002456 hazard = recorded_context->DetectFirstUseHazard(queue_id, tag_range, *exec_context.GetCurrentAccessContext());
John Zulaufae842002021-04-15 18:20:55 -06002457 if (hazard.hazard) {
John Zulauf0223f142022-07-06 09:05:39 -06002458 skip |= log_msg(hazard, exec_context, func_name, index);
John Zulaufae842002021-04-15 18:20:55 -06002459 }
2460
2461 return skip;
2462}
2463
sjfricke0bea06e2022-06-05 09:22:26 +09002464void CommandBufferAccessContext::RecordExecutedCommandBuffer(const CommandBufferAccessContext &recorded_cb_context) {
John Zulauf4fa68462021-04-26 21:04:22 -06002465 const AccessContext *recorded_context = recorded_cb_context.GetCurrentAccessContext();
2466 assert(recorded_context);
2467
2468 // Just run through the barriers ignoring the usage from the recorded context, as Resolve will overwrite outdated state
2469 const ResourceUsageTag base_tag = GetTagLimit();
John Zulauf06f6f1e2022-04-19 15:28:11 -06002470 for (const auto &sync_op : recorded_cb_context.GetSyncOps()) {
John Zulauf4fa68462021-04-26 21:04:22 -06002471 // we update the range to any include layout transition first use writes,
2472 // as they are stored along with the source scope (as effective barrier) when recorded
John Zulauf0223f142022-07-06 09:05:39 -06002473 sync_op.sync_op->ReplayRecord(*this, base_tag + sync_op.tag);
John Zulauf4fa68462021-04-26 21:04:22 -06002474 }
2475
2476 ResourceUsageRange tag_range = ImportRecordedAccessLog(recorded_cb_context);
2477 assert(base_tag == tag_range.begin); // to ensure the to offset calculation agree
John Zulauf1d5f9c12022-05-13 14:51:08 -06002478 ResolveExecutedCommandBuffer(*recorded_context, tag_range.begin);
John Zulauf4fa68462021-04-26 21:04:22 -06002479}
2480
John Zulauf1d5f9c12022-05-13 14:51:08 -06002481void CommandBufferAccessContext::ResolveExecutedCommandBuffer(const AccessContext &recorded_context, ResourceUsageTag offset) {
John Zulauf4fa68462021-04-26 21:04:22 -06002482 auto tag_offset = [offset](ResourceAccessState *access) { access->OffsetTag(offset); };
John Zulauf1d5f9c12022-05-13 14:51:08 -06002483 GetCurrentAccessContext()->ResolveFromContext(tag_offset, recorded_context);
John Zulauf4fa68462021-04-26 21:04:22 -06002484}
2485
John Zulaufdab327f2022-07-08 12:02:05 -06002486HazardResult CommandBufferAccessContext::DetectFirstUseHazard(const ResourceUsageRange &tag_range) {
2487 return current_replay_->GetCurrentAccessContext()->DetectFirstUseHazard(GetQueueId(), tag_range, *GetCurrentAccessContext());
2488}
2489
John Zulauf3c788ef2022-02-22 12:12:30 -07002490ResourceUsageRange CommandExecutionContext::ImportRecordedAccessLog(const CommandBufferAccessContext &recorded_context) {
John Zulauf4fa68462021-04-26 21:04:22 -06002491 // The execution references ensure lifespan for the referenced child CB's...
2492 ResourceUsageRange tag_range(GetTagLimit(), 0);
John Zulauf3c788ef2022-02-22 12:12:30 -07002493 InsertRecordedAccessLogEntries(recorded_context);
2494 tag_range.end = GetTagLimit();
John Zulauf4fa68462021-04-26 21:04:22 -06002495 return tag_range;
2496}
2497
John Zulauf3c788ef2022-02-22 12:12:30 -07002498void CommandBufferAccessContext::InsertRecordedAccessLogEntries(const CommandBufferAccessContext &recorded_context) {
John Zulauf8a7b03d2022-09-20 11:41:19 -06002499 cbs_referenced_->emplace(recorded_context.GetCBStateShared());
2500 access_log_->insert(access_log_->end(), recorded_context.access_log_->cbegin(), recorded_context.access_log_->cend());
John Zulauf3c788ef2022-02-22 12:12:30 -07002501}
2502
John Zulauf41a9c7c2021-12-07 15:59:53 -07002503ResourceUsageTag CommandBufferAccessContext::NextSubcommandTag(CMD_TYPE command, ResourceUsageRecord::SubcommandType subcommand) {
John Zulauf8a7b03d2022-09-20 11:41:19 -06002504 ResourceUsageTag next = access_log_->size();
2505 access_log_->emplace_back(command, command_number_, subcommand, ++subcommand_number_, cb_state_.get(), reset_count_);
John Zulauf41a9c7c2021-12-07 15:59:53 -07002506 return next;
2507}
2508
2509ResourceUsageTag CommandBufferAccessContext::NextCommandTag(CMD_TYPE command, ResourceUsageRecord::SubcommandType subcommand) {
2510 command_number_++;
2511 subcommand_number_ = 0;
John Zulauf8a7b03d2022-09-20 11:41:19 -06002512 ResourceUsageTag next = access_log_->size();
2513 access_log_->emplace_back(command, command_number_, subcommand, subcommand_number_, cb_state_.get(), reset_count_);
John Zulauf41a9c7c2021-12-07 15:59:53 -07002514 return next;
2515}
2516
2517ResourceUsageTag CommandBufferAccessContext::NextIndexedCommandTag(CMD_TYPE command, uint32_t index) {
2518 if (index == 0) {
2519 return NextCommandTag(command, ResourceUsageRecord::SubcommandType::kIndex);
2520 }
2521 return NextSubcommandTag(command, ResourceUsageRecord::SubcommandType::kIndex);
2522}
2523
John Zulaufbb890452021-12-14 11:30:18 -07002524void CommandBufferAccessContext::RecordSyncOp(SyncOpPointer &&sync_op) {
2525 auto tag = sync_op->Record(this);
2526 // As renderpass operations can have side effects on the command buffer access context,
2527 // update the sync operation to record these if any.
John Zulaufbb890452021-12-14 11:30:18 -07002528 sync_ops_.emplace_back(tag, std::move(sync_op));
2529}
2530
John Zulaufae842002021-04-15 18:20:55 -06002531class HazardDetectFirstUse {
2532 public:
John Zulauf0223f142022-07-06 09:05:39 -06002533 HazardDetectFirstUse(const ResourceAccessState &recorded_use, QueueId queue_id, const ResourceUsageRange &tag_range)
2534 : recorded_use_(recorded_use), queue_id_(queue_id), tag_range_(tag_range) {}
John Zulaufae842002021-04-15 18:20:55 -06002535 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
John Zulaufec943ec2022-06-29 07:52:56 -06002536 return pos->second.DetectHazard(recorded_use_, queue_id_, tag_range_);
John Zulaufae842002021-04-15 18:20:55 -06002537 }
2538 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, ResourceUsageTag start_tag) const {
2539 return pos->second.DetectAsyncHazard(recorded_use_, tag_range_, start_tag);
2540 }
2541
2542 private:
2543 const ResourceAccessState &recorded_use_;
John Zulaufec943ec2022-06-29 07:52:56 -06002544 const QueueId queue_id_;
John Zulaufae842002021-04-15 18:20:55 -06002545 const ResourceUsageRange &tag_range_;
2546};
2547
2548// This is called with the *recorded* command buffers access context, with the *active* access context pass in, againsts which
2549// hazards will be detected
John Zulaufec943ec2022-06-29 07:52:56 -06002550HazardResult AccessContext::DetectFirstUseHazard(QueueId queue_id, const ResourceUsageRange &tag_range,
John Zulauf0223f142022-07-06 09:05:39 -06002551 const AccessContext &access_context) const {
John Zulaufae842002021-04-15 18:20:55 -06002552 HazardResult hazard;
2553 for (const auto address_type : kAddressTypes) {
2554 const auto &recorded_access_map = GetAccessStateMap(address_type);
2555 for (const auto &recorded_access : recorded_access_map) {
2556 // Cull any entries not in the current tag range
2557 if (!recorded_access.second.FirstAccessInTagRange(tag_range)) continue;
John Zulauf0223f142022-07-06 09:05:39 -06002558 HazardDetectFirstUse detector(recorded_access.second, queue_id, tag_range);
John Zulaufae842002021-04-15 18:20:55 -06002559 hazard = access_context.DetectHazard(address_type, detector, recorded_access.first, DetectOptions::kDetectAll);
2560 if (hazard.hazard) break;
2561 }
2562 }
2563
2564 return hazard;
2565}
2566
John Zulaufbb890452021-12-14 11:30:18 -07002567bool RenderPassAccessContext::ValidateDrawSubpassAttachment(const CommandExecutionContext &exec_context,
sjfricke0bea06e2022-06-05 09:22:26 +09002568 const CMD_BUFFER_STATE &cmd_buffer, CMD_TYPE cmd_type) const {
locke-lunarg61870c22020-06-09 14:51:50 -06002569 bool skip = false;
John Zulaufbb890452021-12-14 11:30:18 -07002570 const auto &sync_state = exec_context.GetSyncState();
sjfricke0bea06e2022-06-05 09:22:26 +09002571 const auto *pipe = cmd_buffer.GetCurrentPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS);
Jeremy Gebben11af9792021-08-20 10:20:09 -06002572 if (!pipe) {
2573 return skip;
2574 }
2575
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002576 const auto raster_state = pipe->RasterizationState();
2577 if (raster_state && raster_state->rasterizerDiscardEnable) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002578 return skip;
2579 }
sjfricke0bea06e2022-06-05 09:22:26 +09002580 const char *caller_name = CommandTypeString(cmd_type);
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002581 const auto &list = pipe->fragmentShader_writable_output_location_list;
locke-lunarg61870c22020-06-09 14:51:50 -06002582 const auto &subpass = rp_state_->createInfo.pSubpasses[current_subpass_];
locke-lunarg37047832020-06-12 13:44:45 -06002583
John Zulauf1a224292020-06-30 14:52:13 -06002584 const auto &current_context = CurrentContext();
locke-lunarg44f9bb12020-06-10 14:43:57 -06002585 // Subpass's inputAttachment has been done in ValidateDispatchDrawDescriptorSet
locke-lunarg96dc9632020-06-10 17:22:18 -06002586 if (subpass.pColorAttachments && subpass.colorAttachmentCount && !list.empty()) {
2587 for (const auto location : list) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002588 if (location >= subpass.colorAttachmentCount ||
2589 subpass.pColorAttachments[location].attachment == VK_ATTACHMENT_UNUSED) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002590 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002591 }
John Zulaufd0ec59f2021-03-13 14:25:08 -07002592 const AttachmentViewGen &view_gen = attachment_views_[subpass.pColorAttachments[location].attachment];
2593 if (!view_gen.IsValid()) continue;
2594 HazardResult hazard =
2595 current_context.DetectHazard(view_gen, AttachmentViewGen::Gen::kRenderArea,
2596 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kColorAttachment);
locke-lunarg96dc9632020-06-10 17:22:18 -06002597 if (hazard.hazard) {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002598 const VkImageView view_handle = view_gen.GetViewState()->image_view();
John Zulaufd0ec59f2021-03-13 14:25:08 -07002599 skip |= sync_state.LogError(view_handle, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06002600 "%s: Hazard %s for %s in %s, Subpass #%d, and pColorAttachments #%d. Access info %s.",
sjfricke0bea06e2022-06-05 09:22:26 +09002601 caller_name, string_SyncHazard(hazard.hazard),
John Zulaufd0ec59f2021-03-13 14:25:08 -07002602 sync_state.report_data->FormatHandle(view_handle).c_str(),
sjfricke0bea06e2022-06-05 09:22:26 +09002603 sync_state.report_data->FormatHandle(cmd_buffer.commandBuffer()).c_str(),
2604 cmd_buffer.activeSubpass, location, exec_context.FormatHazard(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06002605 }
2606 }
2607 }
locke-lunarg37047832020-06-12 13:44:45 -06002608
2609 // PHASE1 TODO: Add layout based read/vs. write selection.
2610 // PHASE1 TODO: Read operations for both depth and stencil are possible in the future.
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002611 const auto ds_state = pipe->DepthStencilState();
2612 const uint32_t depth_stencil_attachment = GetSubpassDepthStencilAttachmentIndex(ds_state, subpass.pDepthStencilAttachment);
John Zulaufd0ec59f2021-03-13 14:25:08 -07002613
2614 if ((depth_stencil_attachment != VK_ATTACHMENT_UNUSED) && attachment_views_[depth_stencil_attachment].IsValid()) {
2615 const AttachmentViewGen &view_gen = attachment_views_[depth_stencil_attachment];
2616 const IMAGE_VIEW_STATE &view_state = *view_gen.GetViewState();
locke-lunarg37047832020-06-12 13:44:45 -06002617 bool depth_write = false, stencil_write = false;
2618
2619 // PHASE1 TODO: These validation should be in core_checks.
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002620 if (!FormatIsStencilOnly(view_state.create_info.format) && ds_state->depthTestEnable && ds_state->depthWriteEnable &&
locke-lunarg37047832020-06-12 13:44:45 -06002621 IsImageLayoutDepthWritable(subpass.pDepthStencilAttachment->layout)) {
2622 depth_write = true;
2623 }
2624 // PHASE1 TODO: It needs to check if stencil is writable.
2625 // If failOp, passOp, or depthFailOp are not KEEP, and writeMask isn't 0, it's writable.
2626 // If depth test is disable, it's considered depth test passes, and then depthFailOp doesn't run.
2627 // PHASE1 TODO: These validation should be in core_checks.
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002628 if (!FormatIsDepthOnly(view_state.create_info.format) && ds_state->stencilTestEnable &&
locke-lunarg37047832020-06-12 13:44:45 -06002629 IsImageLayoutStencilWritable(subpass.pDepthStencilAttachment->layout)) {
2630 stencil_write = true;
2631 }
2632
2633 // PHASE1 TODO: Add EARLY stage detection based on ExecutionMode.
2634 if (depth_write) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07002635 HazardResult hazard = current_context.DetectHazard(view_gen, AttachmentViewGen::Gen::kDepthOnlyRenderArea,
2636 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE,
2637 SyncOrdering::kDepthStencilAttachment);
locke-lunarg37047832020-06-12 13:44:45 -06002638 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002639 skip |= sync_state.LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002640 view_state.image_view(), string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06002641 "%s: Hazard %s for %s in %s, Subpass #%d, and depth part of pDepthStencilAttachment. Access info %s.",
sjfricke0bea06e2022-06-05 09:22:26 +09002642 caller_name, string_SyncHazard(hazard.hazard),
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002643 sync_state.report_data->FormatHandle(view_state.image_view()).c_str(),
sjfricke0bea06e2022-06-05 09:22:26 +09002644 sync_state.report_data->FormatHandle(cmd_buffer.commandBuffer()).c_str(), cmd_buffer.activeSubpass,
John Zulauf397e68b2022-04-19 11:44:07 -06002645 exec_context.FormatHazard(hazard).c_str());
locke-lunarg37047832020-06-12 13:44:45 -06002646 }
2647 }
2648 if (stencil_write) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07002649 HazardResult hazard = current_context.DetectHazard(view_gen, AttachmentViewGen::Gen::kStencilOnlyRenderArea,
2650 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE,
2651 SyncOrdering::kDepthStencilAttachment);
locke-lunarg37047832020-06-12 13:44:45 -06002652 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002653 skip |= sync_state.LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002654 view_state.image_view(), string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06002655 "%s: Hazard %s for %s in %s, Subpass #%d, and stencil part of pDepthStencilAttachment. Access info %s.",
sjfricke0bea06e2022-06-05 09:22:26 +09002656 caller_name, string_SyncHazard(hazard.hazard),
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002657 sync_state.report_data->FormatHandle(view_state.image_view()).c_str(),
sjfricke0bea06e2022-06-05 09:22:26 +09002658 sync_state.report_data->FormatHandle(cmd_buffer.commandBuffer()).c_str(), cmd_buffer.activeSubpass,
John Zulauf397e68b2022-04-19 11:44:07 -06002659 exec_context.FormatHazard(hazard).c_str());
locke-lunarg37047832020-06-12 13:44:45 -06002660 }
locke-lunarg61870c22020-06-09 14:51:50 -06002661 }
2662 }
2663 return skip;
2664}
2665
sjfricke0bea06e2022-06-05 09:22:26 +09002666void RenderPassAccessContext::RecordDrawSubpassAttachment(const CMD_BUFFER_STATE &cmd_buffer, const ResourceUsageTag tag) {
2667 const auto *pipe = cmd_buffer.GetCurrentPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS);
Jeremy Gebben11af9792021-08-20 10:20:09 -06002668 if (!pipe) {
2669 return;
2670 }
2671
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002672 const auto *raster_state = pipe->RasterizationState();
2673 if (raster_state && raster_state->rasterizerDiscardEnable) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002674 return;
2675 }
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002676 const auto &list = pipe->fragmentShader_writable_output_location_list;
locke-lunarg61870c22020-06-09 14:51:50 -06002677 const auto &subpass = rp_state_->createInfo.pSubpasses[current_subpass_];
locke-lunarg61870c22020-06-09 14:51:50 -06002678
John Zulauf1a224292020-06-30 14:52:13 -06002679 auto &current_context = CurrentContext();
locke-lunarg44f9bb12020-06-10 14:43:57 -06002680 // Subpass's inputAttachment has been done in RecordDispatchDrawDescriptorSet
locke-lunarg96dc9632020-06-10 17:22:18 -06002681 if (subpass.pColorAttachments && subpass.colorAttachmentCount && !list.empty()) {
2682 for (const auto location : list) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002683 if (location >= subpass.colorAttachmentCount ||
2684 subpass.pColorAttachments[location].attachment == VK_ATTACHMENT_UNUSED) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002685 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002686 }
John Zulaufd0ec59f2021-03-13 14:25:08 -07002687 const AttachmentViewGen &view_gen = attachment_views_[subpass.pColorAttachments[location].attachment];
2688 current_context.UpdateAccessState(view_gen, AttachmentViewGen::Gen::kRenderArea,
2689 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kColorAttachment,
2690 tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002691 }
2692 }
locke-lunarg37047832020-06-12 13:44:45 -06002693
2694 // PHASE1 TODO: Add layout based read/vs. write selection.
2695 // PHASE1 TODO: Read operations for both depth and stencil are possible in the future.
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002696 const auto *ds_state = pipe->DepthStencilState();
2697 const uint32_t depth_stencil_attachment = GetSubpassDepthStencilAttachmentIndex(ds_state, subpass.pDepthStencilAttachment);
John Zulaufd0ec59f2021-03-13 14:25:08 -07002698 if ((depth_stencil_attachment != VK_ATTACHMENT_UNUSED) && attachment_views_[depth_stencil_attachment].IsValid()) {
2699 const AttachmentViewGen &view_gen = attachment_views_[depth_stencil_attachment];
2700 const IMAGE_VIEW_STATE &view_state = *view_gen.GetViewState();
locke-lunarg37047832020-06-12 13:44:45 -06002701 bool depth_write = false, stencil_write = false;
John Zulaufd0ec59f2021-03-13 14:25:08 -07002702 const bool has_depth = 0 != (view_state.normalized_subresource_range.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT);
2703 const bool has_stencil = 0 != (view_state.normalized_subresource_range.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT);
locke-lunarg37047832020-06-12 13:44:45 -06002704
2705 // PHASE1 TODO: These validation should be in core_checks.
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002706 if (has_depth && !FormatIsStencilOnly(view_state.create_info.format) && ds_state->depthTestEnable &&
2707 ds_state->depthWriteEnable && IsImageLayoutDepthWritable(subpass.pDepthStencilAttachment->layout)) {
locke-lunarg37047832020-06-12 13:44:45 -06002708 depth_write = true;
2709 }
2710 // PHASE1 TODO: It needs to check if stencil is writable.
2711 // If failOp, passOp, or depthFailOp are not KEEP, and writeMask isn't 0, it's writable.
2712 // If depth test is disable, it's considered depth test passes, and then depthFailOp doesn't run.
2713 // PHASE1 TODO: These validation should be in core_checks.
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002714 if (has_stencil && !FormatIsDepthOnly(view_state.create_info.format) && ds_state->stencilTestEnable &&
locke-lunarg37047832020-06-12 13:44:45 -06002715 IsImageLayoutStencilWritable(subpass.pDepthStencilAttachment->layout)) {
2716 stencil_write = true;
2717 }
2718
John Zulaufd0ec59f2021-03-13 14:25:08 -07002719 if (depth_write || stencil_write) {
2720 const auto ds_gentype = view_gen.GetDepthStencilRenderAreaGenType(depth_write, stencil_write);
2721 // PHASE1 TODO: Add EARLY stage detection based on ExecutionMode.
2722 current_context.UpdateAccessState(view_gen, ds_gentype, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE,
2723 SyncOrdering::kDepthStencilAttachment, tag);
locke-lunarg37047832020-06-12 13:44:45 -06002724 }
locke-lunarg61870c22020-06-09 14:51:50 -06002725 }
2726}
2727
sjfricke0bea06e2022-06-05 09:22:26 +09002728bool RenderPassAccessContext::ValidateNextSubpass(const CommandExecutionContext &exec_context, CMD_TYPE cmd_type) const {
John Zulaufaff20662020-06-01 14:07:58 -06002729 // PHASE1 TODO: Add Validate Preserve attachments
John Zulauf355e49b2020-04-24 15:11:15 -06002730 bool skip = false;
sjfricke0bea06e2022-06-05 09:22:26 +09002731 skip |= CurrentContext().ValidateResolveOperations(exec_context, *rp_state_, render_area_, attachment_views_, cmd_type,
John Zulaufb027cdb2020-05-21 14:25:22 -06002732 current_subpass_);
John Zulaufbb890452021-12-14 11:30:18 -07002733 skip |= CurrentContext().ValidateStoreOperation(exec_context, *rp_state_, render_area_, current_subpass_, attachment_views_,
sjfricke0bea06e2022-06-05 09:22:26 +09002734 cmd_type);
John Zulaufaff20662020-06-01 14:07:58 -06002735
John Zulauf355e49b2020-04-24 15:11:15 -06002736 const auto next_subpass = current_subpass_ + 1;
ziga-lunarg31a3e772022-03-22 11:48:46 +01002737 if (next_subpass >= subpass_contexts_.size()) {
2738 return skip;
2739 }
John Zulauf1507ee42020-05-18 11:33:09 -06002740 const auto &next_context = subpass_contexts_[next_subpass];
John Zulauf64ffe552021-02-06 10:25:07 -07002741 skip |=
sjfricke0bea06e2022-06-05 09:22:26 +09002742 next_context.ValidateLayoutTransitions(exec_context, *rp_state_, render_area_, next_subpass, attachment_views_, cmd_type);
John Zulaufb02c1eb2020-10-06 16:33:36 -06002743 if (!skip) {
2744 // To avoid complex (and buggy) duplication of the affect of layout transitions on load operations, we'll record them
2745 // on a copy of the (empty) next context.
2746 // Note: The resource access map should be empty so hopefully this copy isn't too horrible from a perf POV.
2747 AccessContext temp_context(next_context);
John Zulaufee984022022-04-13 16:39:50 -06002748 temp_context.RecordLayoutTransitions(*rp_state_, next_subpass, attachment_views_, kInvalidTag);
John Zulauf64ffe552021-02-06 10:25:07 -07002749 skip |=
sjfricke0bea06e2022-06-05 09:22:26 +09002750 temp_context.ValidateLoadOperation(exec_context, *rp_state_, render_area_, next_subpass, attachment_views_, cmd_type);
John Zulaufb02c1eb2020-10-06 16:33:36 -06002751 }
John Zulauf7635de32020-05-29 17:14:15 -06002752 return skip;
2753}
sjfricke0bea06e2022-06-05 09:22:26 +09002754bool RenderPassAccessContext::ValidateEndRenderPass(const CommandExecutionContext &exec_context, CMD_TYPE cmd_type) const {
John Zulaufaff20662020-06-01 14:07:58 -06002755 // PHASE1 TODO: Validate Preserve
John Zulauf7635de32020-05-29 17:14:15 -06002756 bool skip = false;
sjfricke0bea06e2022-06-05 09:22:26 +09002757 skip |= CurrentContext().ValidateResolveOperations(exec_context, *rp_state_, render_area_, attachment_views_, cmd_type,
John Zulauf7635de32020-05-29 17:14:15 -06002758 current_subpass_);
sjfricke0bea06e2022-06-05 09:22:26 +09002759 skip |= CurrentContext().ValidateStoreOperation(exec_context, *rp_state_, render_area_, current_subpass_, attachment_views_,
2760 cmd_type);
2761 skip |= ValidateFinalSubpassLayoutTransitions(exec_context, cmd_type);
John Zulauf355e49b2020-04-24 15:11:15 -06002762 return skip;
2763}
2764
John Zulauf64ffe552021-02-06 10:25:07 -07002765AccessContext *RenderPassAccessContext::CreateStoreResolveProxy() const {
John Zulaufd0ec59f2021-03-13 14:25:08 -07002766 return CreateStoreResolveProxyContext(CurrentContext(), *rp_state_, current_subpass_, attachment_views_);
John Zulauf7635de32020-05-29 17:14:15 -06002767}
2768
John Zulaufbb890452021-12-14 11:30:18 -07002769bool RenderPassAccessContext::ValidateFinalSubpassLayoutTransitions(const CommandExecutionContext &exec_context,
sjfricke0bea06e2022-06-05 09:22:26 +09002770 CMD_TYPE cmd_type) const {
John Zulauf355e49b2020-04-24 15:11:15 -06002771 bool skip = false;
2772
John Zulauf7635de32020-05-29 17:14:15 -06002773 // As validation methods are const and precede the record/update phase, for any tranistions from the current (last)
2774 // subpass, we have to validate them against a copy of the current AccessContext, with resolve operations applied.
2775 // Note: we could be more efficient by tracking whether or not we actually *have* any changes (e.g. attachment resolve)
2776 // to apply and only copy then, if this proves a hot spot.
2777 std::unique_ptr<AccessContext> proxy_for_current;
2778
John Zulauf355e49b2020-04-24 15:11:15 -06002779 // Validate the "finalLayout" transitions to external
2780 // Get them from where there we're hidding in the extra entry.
2781 const auto &final_transitions = rp_state_->subpass_transitions.back();
2782 for (const auto &transition : final_transitions) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07002783 const auto &view_gen = attachment_views_[transition.attachment];
John Zulauf355e49b2020-04-24 15:11:15 -06002784 const auto &trackback = subpass_contexts_[transition.prev_pass].GetDstExternalTrackBack();
John Zulaufbb890452021-12-14 11:30:18 -07002785 assert(trackback.source_subpass); // Transitions are given implicit transitions if the StateTracker is working correctly
2786 auto *context = trackback.source_subpass;
John Zulauf7635de32020-05-29 17:14:15 -06002787
2788 if (transition.prev_pass == current_subpass_) {
2789 if (!proxy_for_current) {
2790 // We haven't recorded resolve ofor the current_subpass, so we need to copy current and update it *as if*
John Zulauf64ffe552021-02-06 10:25:07 -07002791 proxy_for_current.reset(CreateStoreResolveProxy());
John Zulauf7635de32020-05-29 17:14:15 -06002792 }
2793 context = proxy_for_current.get();
2794 }
2795
John Zulaufa0a98292020-09-18 09:30:10 -06002796 // Use the merged barrier for the hazard check (safe since it just considers the src (first) scope.
2797 const auto merged_barrier = MergeBarriers(trackback.barriers);
John Zulaufd0ec59f2021-03-13 14:25:08 -07002798 auto hazard = context->DetectImageBarrierHazard(view_gen, merged_barrier, AccessContext::DetectOptions::kDetectPrevious);
John Zulauf355e49b2020-04-24 15:11:15 -06002799 if (hazard.hazard) {
sjfricke0bea06e2022-06-05 09:22:26 +09002800 const char *func_name = CommandTypeString(cmd_type);
John Zulaufee984022022-04-13 16:39:50 -06002801 if (hazard.tag == kInvalidTag) {
2802 // Hazard vs. ILT
John Zulaufbb890452021-12-14 11:30:18 -07002803 skip |= exec_context.GetSyncState().LogError(
John Zulaufee984022022-04-13 16:39:50 -06002804 rp_state_->renderPass(), string_SyncHazardVUID(hazard.hazard),
2805 "%s: Hazard %s vs. store/resolve operations in subpass %" PRIu32 " for attachment %" PRIu32
2806 " final image layout transition (old_layout: %s, new_layout: %s).",
2807 func_name, string_SyncHazard(hazard.hazard), transition.prev_pass, transition.attachment,
2808 string_VkImageLayout(transition.old_layout), string_VkImageLayout(transition.new_layout));
2809 } else {
John Zulaufbb890452021-12-14 11:30:18 -07002810 skip |= exec_context.GetSyncState().LogError(
John Zulaufee984022022-04-13 16:39:50 -06002811 rp_state_->renderPass(), string_SyncHazardVUID(hazard.hazard),
2812 "%s: Hazard %s with last use subpass %" PRIu32 " for attachment %" PRIu32
2813 " final image layout transition (old_layout: %s, new_layout: %s). Access info %s.",
2814 func_name, string_SyncHazard(hazard.hazard), transition.prev_pass, transition.attachment,
2815 string_VkImageLayout(transition.old_layout), string_VkImageLayout(transition.new_layout),
John Zulauf397e68b2022-04-19 11:44:07 -06002816 exec_context.FormatHazard(hazard).c_str());
John Zulaufee984022022-04-13 16:39:50 -06002817 }
John Zulauf355e49b2020-04-24 15:11:15 -06002818 }
2819 }
2820 return skip;
2821}
2822
John Zulauf14940722021-04-12 15:19:02 -06002823void RenderPassAccessContext::RecordLayoutTransitions(const ResourceUsageTag tag) {
John Zulauf355e49b2020-04-24 15:11:15 -06002824 // Add layout transitions...
John Zulaufb02c1eb2020-10-06 16:33:36 -06002825 subpass_contexts_[current_subpass_].RecordLayoutTransitions(*rp_state_, current_subpass_, attachment_views_, tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002826}
2827
John Zulauf14940722021-04-12 15:19:02 -06002828void RenderPassAccessContext::RecordLoadOperations(const ResourceUsageTag tag) {
John Zulauf1507ee42020-05-18 11:33:09 -06002829 const auto *attachment_ci = rp_state_->createInfo.pAttachments;
2830 auto &subpass_context = subpass_contexts_[current_subpass_];
John Zulauf1507ee42020-05-18 11:33:09 -06002831
2832 for (uint32_t i = 0; i < rp_state_->createInfo.attachmentCount; i++) {
2833 if (rp_state_->attachment_first_subpass[i] == current_subpass_) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07002834 const AttachmentViewGen &view_gen = attachment_views_[i];
2835 if (!view_gen.IsValid()) continue; // UNUSED
John Zulauf1507ee42020-05-18 11:33:09 -06002836
2837 const auto &ci = attachment_ci[i];
2838 const bool has_depth = FormatHasDepth(ci.format);
John Zulaufb027cdb2020-05-21 14:25:22 -06002839 const bool has_stencil = FormatHasStencil(ci.format);
John Zulauf1507ee42020-05-18 11:33:09 -06002840 const bool is_color = !(has_depth || has_stencil);
2841
2842 if (is_color) {
John Zulauf57261402021-08-13 11:32:06 -06002843 const SyncStageAccessIndex load_op = ColorLoadUsage(ci.loadOp);
2844 if (load_op != SYNC_ACCESS_INDEX_NONE) {
2845 subpass_context.UpdateAccessState(view_gen, AttachmentViewGen::Gen::kRenderArea, load_op,
2846 SyncOrdering::kColorAttachment, tag);
2847 }
John Zulauf1507ee42020-05-18 11:33:09 -06002848 } else {
John Zulauf1507ee42020-05-18 11:33:09 -06002849 if (has_depth) {
John Zulauf57261402021-08-13 11:32:06 -06002850 const SyncStageAccessIndex load_op = DepthStencilLoadUsage(ci.loadOp);
2851 if (load_op != SYNC_ACCESS_INDEX_NONE) {
2852 subpass_context.UpdateAccessState(view_gen, AttachmentViewGen::Gen::kDepthOnlyRenderArea, load_op,
2853 SyncOrdering::kDepthStencilAttachment, tag);
2854 }
John Zulauf1507ee42020-05-18 11:33:09 -06002855 }
2856 if (has_stencil) {
John Zulauf57261402021-08-13 11:32:06 -06002857 const SyncStageAccessIndex load_op = DepthStencilLoadUsage(ci.stencilLoadOp);
2858 if (load_op != SYNC_ACCESS_INDEX_NONE) {
2859 subpass_context.UpdateAccessState(view_gen, AttachmentViewGen::Gen::kStencilOnlyRenderArea, load_op,
2860 SyncOrdering::kDepthStencilAttachment, tag);
2861 }
John Zulauf1507ee42020-05-18 11:33:09 -06002862 }
2863 }
2864 }
2865 }
2866}
John Zulaufd0ec59f2021-03-13 14:25:08 -07002867AttachmentViewGenVector RenderPassAccessContext::CreateAttachmentViewGen(
2868 const VkRect2D &render_area, const std::vector<const IMAGE_VIEW_STATE *> &attachment_views) {
2869 AttachmentViewGenVector view_gens;
2870 VkExtent3D extent = CastTo3D(render_area.extent);
2871 VkOffset3D offset = CastTo3D(render_area.offset);
2872 view_gens.reserve(attachment_views.size());
2873 for (const auto *view : attachment_views) {
2874 view_gens.emplace_back(view, offset, extent);
2875 }
2876 return view_gens;
2877}
John Zulauf64ffe552021-02-06 10:25:07 -07002878RenderPassAccessContext::RenderPassAccessContext(const RENDER_PASS_STATE &rp_state, const VkRect2D &render_area,
2879 VkQueueFlags queue_flags,
2880 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views,
2881 const AccessContext *external_context)
John Zulaufd0ec59f2021-03-13 14:25:08 -07002882 : rp_state_(&rp_state), render_area_(render_area), current_subpass_(0U), attachment_views_() {
John Zulaufdab327f2022-07-08 12:02:05 -06002883 // Add this for all subpasses here so that they exist during next subpass validation
2884 InitSubpassContexts(queue_flags, rp_state, external_context, subpass_contexts_);
John Zulaufd0ec59f2021-03-13 14:25:08 -07002885 attachment_views_ = CreateAttachmentViewGen(render_area, attachment_views);
John Zulauf64ffe552021-02-06 10:25:07 -07002886}
John Zulauf41a9c7c2021-12-07 15:59:53 -07002887void RenderPassAccessContext::RecordBeginRenderPass(const ResourceUsageTag barrier_tag, const ResourceUsageTag load_tag) {
John Zulauf64ffe552021-02-06 10:25:07 -07002888 assert(0 == current_subpass_);
John Zulauf41a9c7c2021-12-07 15:59:53 -07002889 subpass_contexts_[current_subpass_].SetStartTag(barrier_tag);
2890 RecordLayoutTransitions(barrier_tag);
2891 RecordLoadOperations(load_tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002892}
John Zulauf1507ee42020-05-18 11:33:09 -06002893
John Zulauf41a9c7c2021-12-07 15:59:53 -07002894void RenderPassAccessContext::RecordNextSubpass(const ResourceUsageTag store_tag, const ResourceUsageTag barrier_tag,
2895 const ResourceUsageTag load_tag) {
John Zulauf7635de32020-05-29 17:14:15 -06002896 // Resolves are against *prior* subpass context and thus *before* the subpass increment
John Zulauf41a9c7c2021-12-07 15:59:53 -07002897 CurrentContext().UpdateAttachmentResolveAccess(*rp_state_, attachment_views_, current_subpass_, store_tag);
2898 CurrentContext().UpdateAttachmentStoreAccess(*rp_state_, attachment_views_, current_subpass_, store_tag);
John Zulauf7635de32020-05-29 17:14:15 -06002899
ziga-lunarg31a3e772022-03-22 11:48:46 +01002900 if (current_subpass_ + 1 >= subpass_contexts_.size()) {
2901 return;
2902 }
Jeremy Gebben6ea9d9e2020-12-11 09:41:01 -07002903 // Move to the next sub-command for the new subpass. The resolve and store are logically part of the previous
2904 // subpass, so their tag needs to be different from the layout and load operations below.
John Zulauf355e49b2020-04-24 15:11:15 -06002905 current_subpass_++;
John Zulauf41a9c7c2021-12-07 15:59:53 -07002906 subpass_contexts_[current_subpass_].SetStartTag(barrier_tag);
2907 RecordLayoutTransitions(barrier_tag);
2908 RecordLoadOperations(load_tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002909}
2910
John Zulauf41a9c7c2021-12-07 15:59:53 -07002911void RenderPassAccessContext::RecordEndRenderPass(AccessContext *external_context, const ResourceUsageTag store_tag,
2912 const ResourceUsageTag barrier_tag) {
John Zulaufaff20662020-06-01 14:07:58 -06002913 // Add the resolve and store accesses
John Zulauf41a9c7c2021-12-07 15:59:53 -07002914 CurrentContext().UpdateAttachmentResolveAccess(*rp_state_, attachment_views_, current_subpass_, store_tag);
2915 CurrentContext().UpdateAttachmentStoreAccess(*rp_state_, attachment_views_, current_subpass_, store_tag);
John Zulauf7635de32020-05-29 17:14:15 -06002916
John Zulauf355e49b2020-04-24 15:11:15 -06002917 // Export the accesses from the renderpass...
John Zulauf1a224292020-06-30 14:52:13 -06002918 external_context->ResolveChildContexts(subpass_contexts_);
John Zulauf355e49b2020-04-24 15:11:15 -06002919
2920 // Add the "finalLayout" transitions to external
2921 // Get them from where there we're hidding in the extra entry.
John Zulauf89311b42020-09-29 16:28:47 -06002922 // Not that since *final* always comes from *one* subpass per view, we don't have to accumulate the barriers
2923 // TODO Aliasing we may need to reconsider barrier accumulation... though I don't know that it would be valid for aliasing
2924 // that had mulitple final layout transistions from mulitple final subpasses.
John Zulauf355e49b2020-04-24 15:11:15 -06002925 const auto &final_transitions = rp_state_->subpass_transitions.back();
2926 for (const auto &transition : final_transitions) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07002927 const AttachmentViewGen &view_gen = attachment_views_[transition.attachment];
John Zulauf355e49b2020-04-24 15:11:15 -06002928 const auto &last_trackback = subpass_contexts_[transition.prev_pass].GetDstExternalTrackBack();
John Zulaufbb890452021-12-14 11:30:18 -07002929 assert(&subpass_contexts_[transition.prev_pass] == last_trackback.source_subpass);
John Zulauf41a9c7c2021-12-07 15:59:53 -07002930 ApplyBarrierOpsFunctor<PipelineBarrierOp> barrier_action(true /* resolve */, last_trackback.barriers.size(), barrier_tag);
John Zulauf1e331ec2020-12-04 18:29:38 -07002931 for (const auto &barrier : last_trackback.barriers) {
John Zulauf00119522022-05-23 19:07:42 -06002932 barrier_action.EmplaceBack(PipelineBarrierOp(QueueSyncState::kQueueIdInvalid, barrier, true));
John Zulauf1e331ec2020-12-04 18:29:38 -07002933 }
John Zulaufd0ec59f2021-03-13 14:25:08 -07002934 external_context->ApplyUpdateAction(view_gen, AttachmentViewGen::Gen::kViewSubresource, barrier_action);
John Zulauf355e49b2020-04-24 15:11:15 -06002935 }
2936}
2937
John Zulauf06f6f1e2022-04-19 15:28:11 -06002938SyncExecScope SyncExecScope::MakeSrc(VkQueueFlags queue_flags, VkPipelineStageFlags2KHR mask_param,
2939 const VkPipelineStageFlags2KHR disabled_feature_mask) {
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002940 SyncExecScope result;
2941 result.mask_param = mask_param;
John Zulauf06f6f1e2022-04-19 15:28:11 -06002942 result.expanded_mask = sync_utils::ExpandPipelineStages(mask_param, queue_flags, disabled_feature_mask);
Jeremy Gebben5f585ae2021-02-02 09:03:06 -07002943 result.exec_scope = sync_utils::WithEarlierPipelineStages(result.expanded_mask);
Jeremy Gebben87fd0422022-06-08 15:43:47 -06002944 result.valid_accesses = SyncStageAccess::AccessScopeByStage(result.expanded_mask);
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002945 return result;
2946}
2947
Jeremy Gebben40a22942020-12-22 14:22:06 -07002948SyncExecScope SyncExecScope::MakeDst(VkQueueFlags queue_flags, VkPipelineStageFlags2KHR mask_param) {
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002949 SyncExecScope result;
2950 result.mask_param = mask_param;
Jeremy Gebben5f585ae2021-02-02 09:03:06 -07002951 result.expanded_mask = sync_utils::ExpandPipelineStages(mask_param, queue_flags);
2952 result.exec_scope = sync_utils::WithLaterPipelineStages(result.expanded_mask);
Jeremy Gebben87fd0422022-06-08 15:43:47 -06002953 result.valid_accesses = SyncStageAccess::AccessScopeByStage(result.expanded_mask);
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002954 return result;
2955}
2956
John Zulaufecf4ac52022-06-06 10:08:42 -06002957SyncBarrier::SyncBarrier(const SyncExecScope &src, const SyncExecScope &dst)
2958 : src_exec_scope(src), src_access_scope(0), dst_exec_scope(dst), dst_access_scope(0) {}
2959
2960SyncBarrier::SyncBarrier(const SyncExecScope &src, const SyncExecScope &dst, const SyncBarrier::AllAccess &)
2961 : src_exec_scope(src), src_access_scope(src.valid_accesses), dst_exec_scope(dst), dst_access_scope(src.valid_accesses) {}
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002962
2963template <typename Barrier>
John Zulaufecf4ac52022-06-06 10:08:42 -06002964SyncBarrier::SyncBarrier(const Barrier &barrier, const SyncExecScope &src, const SyncExecScope &dst)
2965 : src_exec_scope(src),
2966 src_access_scope(SyncStageAccess::AccessScope(src.valid_accesses, barrier.srcAccessMask)),
2967 dst_exec_scope(dst),
2968 dst_access_scope(SyncStageAccess::AccessScope(dst.valid_accesses, barrier.dstAccessMask)) {}
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002969
2970SyncBarrier::SyncBarrier(VkQueueFlags queue_flags, const VkSubpassDependency2 &subpass) {
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002971 const auto barrier = lvl_find_in_chain<VkMemoryBarrier2KHR>(subpass.pNext);
2972 if (barrier) {
2973 auto src = SyncExecScope::MakeSrc(queue_flags, barrier->srcStageMask);
John Zulaufc523bf62021-02-16 08:20:34 -07002974 src_exec_scope = src;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002975 src_access_scope = SyncStageAccess::AccessScope(src.valid_accesses, barrier->srcAccessMask);
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002976
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002977 auto dst = SyncExecScope::MakeDst(queue_flags, barrier->dstStageMask);
John Zulaufc523bf62021-02-16 08:20:34 -07002978 dst_exec_scope = dst;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002979 dst_access_scope = SyncStageAccess::AccessScope(dst.valid_accesses, barrier->dstAccessMask);
2980
2981 } else {
2982 auto src = SyncExecScope::MakeSrc(queue_flags, subpass.srcStageMask);
John Zulaufc523bf62021-02-16 08:20:34 -07002983 src_exec_scope = src;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002984 src_access_scope = SyncStageAccess::AccessScope(src.valid_accesses, subpass.srcAccessMask);
2985
2986 auto dst = SyncExecScope::MakeDst(queue_flags, subpass.dstStageMask);
John Zulaufc523bf62021-02-16 08:20:34 -07002987 dst_exec_scope = dst;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002988 dst_access_scope = SyncStageAccess::AccessScope(dst.valid_accesses, subpass.dstAccessMask);
2989 }
2990}
2991
2992template <typename Barrier>
2993SyncBarrier::SyncBarrier(VkQueueFlags queue_flags, const Barrier &barrier) {
2994 auto src = SyncExecScope::MakeSrc(queue_flags, barrier.srcStageMask);
2995 src_exec_scope = src.exec_scope;
2996 src_access_scope = SyncStageAccess::AccessScope(src.valid_accesses, barrier.srcAccessMask);
2997
2998 auto dst = SyncExecScope::MakeDst(queue_flags, barrier.dstStageMask);
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002999 dst_exec_scope = dst.exec_scope;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07003000 dst_access_scope = SyncStageAccess::AccessScope(dst.valid_accesses, barrier.dstAccessMask);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003001}
3002
John Zulaufb02c1eb2020-10-06 16:33:36 -06003003// Apply a list of barriers, without resolving pending state, useful for subpass layout transitions
3004void ResourceAccessState::ApplyBarriers(const std::vector<SyncBarrier> &barriers, bool layout_transition) {
John Zulauf00119522022-05-23 19:07:42 -06003005 const UntaggedScopeOps scope;
John Zulaufb02c1eb2020-10-06 16:33:36 -06003006 for (const auto &barrier : barriers) {
John Zulauf00119522022-05-23 19:07:42 -06003007 ApplyBarrier(scope, barrier, layout_transition);
John Zulaufb02c1eb2020-10-06 16:33:36 -06003008 }
3009}
3010
John Zulauf89311b42020-09-29 16:28:47 -06003011// ApplyBarriers is design for *fully* inclusive barrier lists without layout tranistions. Designed use was for
3012// inter-subpass barriers for lazy-evaluation of parent context memory ranges. Subpass layout transistions are *not* done
3013// lazily, s.t. no previous access reports should need layout transitions.
John Zulaufbb890452021-12-14 11:30:18 -07003014void ResourceAccessState::ApplyBarriersImmediate(const std::vector<SyncBarrier> &barriers) {
John Zulaufb02c1eb2020-10-06 16:33:36 -06003015 assert(!pending_layout_transition); // This should never be call in the middle of another barrier application
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003016 assert(pending_write_barriers.none());
John Zulaufb02c1eb2020-10-06 16:33:36 -06003017 assert(!pending_write_dep_chain);
John Zulauf00119522022-05-23 19:07:42 -06003018 const UntaggedScopeOps scope;
John Zulaufa0a98292020-09-18 09:30:10 -06003019 for (const auto &barrier : barriers) {
John Zulauf00119522022-05-23 19:07:42 -06003020 ApplyBarrier(scope, barrier, false);
John Zulaufa0a98292020-09-18 09:30:10 -06003021 }
John Zulaufbb890452021-12-14 11:30:18 -07003022 ApplyPendingBarriers(kInvalidTag); // There can't be any need for this tag
John Zulauf3d84f1b2020-03-09 13:33:25 -06003023}
John Zulauf9cb530d2019-09-30 14:14:10 -06003024HazardResult ResourceAccessState::DetectHazard(SyncStageAccessIndex usage_index) const {
3025 HazardResult hazard;
3026 auto usage = FlagBit(usage_index);
John Zulauf361fb532020-07-22 10:45:39 -06003027 const auto usage_stage = PipelineStageBit(usage_index);
John Zulauf9cb530d2019-09-30 14:14:10 -06003028 if (IsRead(usage)) {
John Zulauf4285ee92020-09-23 10:20:52 -06003029 if (IsRAWHazard(usage_stage, usage)) {
John Zulauf59e25072020-07-17 10:55:21 -06003030 hazard.Set(this, usage_index, READ_AFTER_WRITE, last_write, write_tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003031 }
3032 } else {
John Zulauf361fb532020-07-22 10:45:39 -06003033 // Write operation:
3034 // Check for read operations more recent than last_write (as setting last_write clears reads, that would be *any*
3035 // If reads exists -- test only against them because either:
3036 // * the reads were hazards, and we've reported the hazard, so just test the current write vs. the read operations
3037 // * the read weren't hazards, and thus if the write is safe w.r.t. the reads, no hazard vs. last_write is possible if
3038 // the current write happens after the reads, so just test the write against the reades
3039 // Otherwise test against last_write
3040 //
3041 // Look for casus belli for WAR
John Zulaufab7756b2020-12-29 16:10:16 -07003042 if (last_reads.size()) {
3043 for (const auto &read_access : last_reads) {
John Zulauf361fb532020-07-22 10:45:39 -06003044 if (IsReadHazard(usage_stage, read_access)) {
3045 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
3046 break;
3047 }
3048 }
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003049 } else if (last_write.any() && IsWriteHazard(usage)) {
John Zulauf361fb532020-07-22 10:45:39 -06003050 // Write-After-Write check -- if we have a previous write to test against
3051 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003052 }
3053 }
3054 return hazard;
3055}
3056
John Zulaufec943ec2022-06-29 07:52:56 -06003057HazardResult ResourceAccessState::DetectHazard(SyncStageAccessIndex usage_index, const SyncOrdering ordering_rule,
3058 QueueId queue_id) const {
John Zulauf8e3c3e92021-01-06 11:19:36 -07003059 const auto &ordering = GetOrderingRules(ordering_rule);
John Zulaufec943ec2022-06-29 07:52:56 -06003060 return DetectHazard(usage_index, ordering, queue_id);
John Zulauf4fa68462021-04-26 21:04:22 -06003061}
3062
John Zulaufec943ec2022-06-29 07:52:56 -06003063HazardResult ResourceAccessState::DetectHazard(SyncStageAccessIndex usage_index, const OrderingBarrier &ordering,
3064 QueueId queue_id) const {
John Zulauf69133422020-05-20 14:55:53 -06003065 // The ordering guarantees act as barriers to the last accesses, independent of synchronization operations
3066 HazardResult hazard;
John Zulauf4285ee92020-09-23 10:20:52 -06003067 const auto usage_bit = FlagBit(usage_index);
John Zulauf361fb532020-07-22 10:45:39 -06003068 const auto usage_stage = PipelineStageBit(usage_index);
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003069 const bool input_attachment_ordering = (ordering.access_scope & SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT).any();
John Zulaufec943ec2022-06-29 07:52:56 -06003070 const bool last_write_is_ordered = (last_write & ordering.access_scope).any() && (write_queue == queue_id);
John Zulauf4285ee92020-09-23 10:20:52 -06003071 if (IsRead(usage_bit)) {
3072 // Exclude RAW if no write, or write not most "most recent" operation w.r.t. usage;
3073 bool is_raw_hazard = IsRAWHazard(usage_stage, usage_bit);
3074 if (is_raw_hazard) {
3075 // NOTE: we know last_write is non-zero
3076 // See if the ordering rules save us from the simple RAW check above
3077 // First check to see if the current usage is covered by the ordering rules
3078 const bool usage_is_input_attachment = (usage_index == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ);
3079 const bool usage_is_ordered =
3080 (input_attachment_ordering && usage_is_input_attachment) || (0 != (usage_stage & ordering.exec_scope));
3081 if (usage_is_ordered) {
3082 // Now see of the most recent write (or a subsequent read) are ordered
John Zulaufec943ec2022-06-29 07:52:56 -06003083 const bool most_recent_is_ordered = last_write_is_ordered || (0 != GetOrderedStages(queue_id, ordering));
John Zulauf4285ee92020-09-23 10:20:52 -06003084 is_raw_hazard = !most_recent_is_ordered;
John Zulauf361fb532020-07-22 10:45:39 -06003085 }
3086 }
John Zulauf4285ee92020-09-23 10:20:52 -06003087 if (is_raw_hazard) {
3088 hazard.Set(this, usage_index, READ_AFTER_WRITE, last_write, write_tag);
3089 }
John Zulauf5c628d02021-05-04 15:46:36 -06003090 } else if (usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION) {
3091 // For Image layout transitions, the barrier represents the first synchronization/access scope of the layout transition
John Zulaufec943ec2022-06-29 07:52:56 -06003092 return DetectBarrierHazard(usage_index, queue_id, ordering.exec_scope, ordering.access_scope);
John Zulauf361fb532020-07-22 10:45:39 -06003093 } else {
3094 // Only check for WAW if there are no reads since last_write
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003095 bool usage_write_is_ordered = (usage_bit & ordering.access_scope).any();
John Zulaufab7756b2020-12-29 16:10:16 -07003096 if (last_reads.size()) {
John Zulauf361fb532020-07-22 10:45:39 -06003097 // Look for any WAR hazards outside the ordered set of stages
Jeremy Gebben40a22942020-12-22 14:22:06 -07003098 VkPipelineStageFlags2KHR ordered_stages = 0;
John Zulauf4285ee92020-09-23 10:20:52 -06003099 if (usage_write_is_ordered) {
3100 // If the usage is ordered, we can ignore all ordered read stages w.r.t. WAR)
John Zulaufec943ec2022-06-29 07:52:56 -06003101 ordered_stages = GetOrderedStages(queue_id, ordering);
John Zulauf4285ee92020-09-23 10:20:52 -06003102 }
3103 // If we're tracking any reads that aren't ordered against the current write, got to check 'em all.
3104 if ((ordered_stages & last_read_stages) != last_read_stages) {
John Zulaufab7756b2020-12-29 16:10:16 -07003105 for (const auto &read_access : last_reads) {
John Zulauf4285ee92020-09-23 10:20:52 -06003106 if (read_access.stage & ordered_stages) continue; // but we can skip the ordered ones
3107 if (IsReadHazard(usage_stage, read_access)) {
3108 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
3109 break;
3110 }
John Zulaufd14743a2020-07-03 09:42:39 -06003111 }
3112 }
John Zulauf2a344ca2021-09-09 17:07:19 -06003113 } else if (last_write.any() && !(last_write_is_ordered && usage_write_is_ordered)) {
3114 bool ilt_ilt_hazard = false;
3115 if ((usage_index == SYNC_IMAGE_LAYOUT_TRANSITION) && (usage_bit == last_write)) {
3116 // ILT after ILT is a special case where we check the 2nd access scope of the first ILT against the first access
3117 // scope of the second ILT, which has been passed (smuggled?) in the ordering barrier
3118 ilt_ilt_hazard = !(write_barriers & ordering.access_scope).any();
3119 }
3120 if (ilt_ilt_hazard || IsWriteHazard(usage_bit)) {
John Zulauf4285ee92020-09-23 10:20:52 -06003121 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
John Zulauf361fb532020-07-22 10:45:39 -06003122 }
John Zulauf69133422020-05-20 14:55:53 -06003123 }
3124 }
3125 return hazard;
3126}
3127
John Zulaufec943ec2022-06-29 07:52:56 -06003128HazardResult ResourceAccessState::DetectHazard(const ResourceAccessState &recorded_use, QueueId queue_id,
3129 const ResourceUsageRange &tag_range) const {
John Zulaufae842002021-04-15 18:20:55 -06003130 HazardResult hazard;
John Zulauf4fa68462021-04-26 21:04:22 -06003131 using Size = FirstAccesses::size_type;
3132 const auto &recorded_accesses = recorded_use.first_accesses_;
3133 Size count = recorded_accesses.size();
3134 if (count) {
3135 const auto &last_access = recorded_accesses.back();
3136 bool do_write_last = IsWrite(last_access.usage_index);
3137 if (do_write_last) --count;
John Zulaufae842002021-04-15 18:20:55 -06003138
John Zulauf4fa68462021-04-26 21:04:22 -06003139 for (Size i = 0; i < count; ++count) {
3140 const auto &first = recorded_accesses[i];
3141 // Skip and quit logic
3142 if (first.tag < tag_range.begin) continue;
3143 if (first.tag >= tag_range.end) {
3144 do_write_last = false; // ignore last since we know it can't be in tag_range
3145 break;
3146 }
3147
John Zulaufec943ec2022-06-29 07:52:56 -06003148 hazard = DetectHazard(first.usage_index, first.ordering_rule, queue_id);
John Zulauf4fa68462021-04-26 21:04:22 -06003149 if (hazard.hazard) {
3150 hazard.AddRecordedAccess(first);
3151 break;
3152 }
3153 }
3154
3155 if (do_write_last && tag_range.includes(last_access.tag)) {
3156 // Writes are a bit special... both for the "most recent" access logic, and layout transition specific logic
3157 OrderingBarrier barrier = GetOrderingRules(last_access.ordering_rule);
3158 if (last_access.usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION) {
3159 // Or in the layout first access scope as a barrier... IFF the usage is an ILT
3160 // this was saved off in the "apply barriers" logic to simplify ILT access checks as they straddle
3161 // the barrier that applies them
3162 barrier |= recorded_use.first_write_layout_ordering_;
3163 }
3164 // Any read stages present in the recorded context (this) are most recent to the write, and thus mask those stages in
3165 // the active context
3166 if (recorded_use.first_read_stages_) {
3167 // we need to ignore the first use read stage in the active context (so we add them to the ordering rule),
3168 // reads in the active context are not "most recent" as all recorded context operations are *after* them
3169 // This supresses only RAW checks for stages present in the recorded context, but not those only present in the
3170 // active context.
3171 barrier.exec_scope |= recorded_use.first_read_stages_;
3172 // if there are any first use reads, we suppress WAW by injecting the active context write in the ordering rule
3173 barrier.access_scope |= FlagBit(last_access.usage_index);
3174 }
John Zulaufec943ec2022-06-29 07:52:56 -06003175 hazard = DetectHazard(last_access.usage_index, barrier, queue_id);
John Zulauf4fa68462021-04-26 21:04:22 -06003176 if (hazard.hazard) {
3177 hazard.AddRecordedAccess(last_access);
3178 }
3179 }
John Zulaufae842002021-04-15 18:20:55 -06003180 }
3181 return hazard;
3182}
3183
John Zulauf2f952d22020-02-10 11:34:51 -07003184// Asynchronous Hazards occur between subpasses with no connection through the DAG
John Zulauf14940722021-04-12 15:19:02 -06003185HazardResult ResourceAccessState::DetectAsyncHazard(SyncStageAccessIndex usage_index, const ResourceUsageTag start_tag) const {
John Zulauf2f952d22020-02-10 11:34:51 -07003186 HazardResult hazard;
3187 auto usage = FlagBit(usage_index);
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07003188 // Async checks need to not go back further than the start of the subpass, as we only want to find hazards between the async
3189 // subpasses. Anything older than that should have been checked at the start of each subpass, taking into account all of
3190 // the raster ordering rules.
John Zulauf2f952d22020-02-10 11:34:51 -07003191 if (IsRead(usage)) {
John Zulauf14940722021-04-12 15:19:02 -06003192 if (last_write.any() && (write_tag >= start_tag)) {
John Zulauf59e25072020-07-17 10:55:21 -06003193 hazard.Set(this, usage_index, READ_RACING_WRITE, last_write, write_tag);
John Zulauf2f952d22020-02-10 11:34:51 -07003194 }
3195 } else {
John Zulauf14940722021-04-12 15:19:02 -06003196 if (last_write.any() && (write_tag >= start_tag)) {
John Zulauf59e25072020-07-17 10:55:21 -06003197 hazard.Set(this, usage_index, WRITE_RACING_WRITE, last_write, write_tag);
John Zulaufab7756b2020-12-29 16:10:16 -07003198 } else if (last_reads.size() > 0) {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07003199 // Any reads during the other subpass will conflict with this write, so we need to check them all.
John Zulaufab7756b2020-12-29 16:10:16 -07003200 for (const auto &read_access : last_reads) {
John Zulauf14940722021-04-12 15:19:02 -06003201 if (read_access.tag >= start_tag) {
John Zulaufab7756b2020-12-29 16:10:16 -07003202 hazard.Set(this, usage_index, WRITE_RACING_READ, read_access.access, read_access.tag);
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07003203 break;
3204 }
3205 }
John Zulauf2f952d22020-02-10 11:34:51 -07003206 }
3207 }
3208 return hazard;
3209}
3210
John Zulaufae842002021-04-15 18:20:55 -06003211HazardResult ResourceAccessState::DetectAsyncHazard(const ResourceAccessState &recorded_use, const ResourceUsageRange &tag_range,
3212 ResourceUsageTag start_tag) const {
3213 HazardResult hazard;
John Zulauf4fa68462021-04-26 21:04:22 -06003214 for (const auto &first : recorded_use.first_accesses_) {
John Zulaufae842002021-04-15 18:20:55 -06003215 // Skip and quit logic
3216 if (first.tag < tag_range.begin) continue;
3217 if (first.tag >= tag_range.end) break;
John Zulaufae842002021-04-15 18:20:55 -06003218
3219 hazard = DetectAsyncHazard(first.usage_index, start_tag);
John Zulauf4fa68462021-04-26 21:04:22 -06003220 if (hazard.hazard) {
3221 hazard.AddRecordedAccess(first);
3222 break;
3223 }
John Zulaufae842002021-04-15 18:20:55 -06003224 }
3225 return hazard;
3226}
3227
John Zulaufec943ec2022-06-29 07:52:56 -06003228HazardResult ResourceAccessState::DetectBarrierHazard(SyncStageAccessIndex usage_index, QueueId queue_id,
3229 VkPipelineStageFlags2KHR src_exec_scope,
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003230 const SyncStageAccessFlags &src_access_scope) const {
John Zulauf0cb5be22020-01-23 12:18:22 -07003231 // Only supporting image layout transitions for now
3232 assert(usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION);
3233 HazardResult hazard;
John Zulauf361fb532020-07-22 10:45:39 -06003234 // only test for WAW if there no intervening read operations.
3235 // See DetectHazard(SyncStagetAccessIndex) above for more details.
John Zulaufab7756b2020-12-29 16:10:16 -07003236 if (last_reads.size()) {
John Zulauf355e49b2020-04-24 15:11:15 -06003237 // Look at the reads if any
John Zulaufab7756b2020-12-29 16:10:16 -07003238 for (const auto &read_access : last_reads) {
John Zulaufec943ec2022-06-29 07:52:56 -06003239 if (read_access.IsReadBarrierHazard(queue_id, src_exec_scope)) {
John Zulauf59e25072020-07-17 10:55:21 -06003240 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
John Zulauf0cb5be22020-01-23 12:18:22 -07003241 break;
3242 }
3243 }
John Zulaufec943ec2022-06-29 07:52:56 -06003244 } else if (last_write.any() && IsWriteBarrierHazard(queue_id, src_exec_scope, src_access_scope)) {
John Zulauf4a6105a2020-11-17 15:11:05 -07003245 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
3246 }
3247
3248 return hazard;
3249}
3250
John Zulaufe0757ba2022-06-10 16:51:45 -06003251HazardResult ResourceAccessState::DetectBarrierHazard(SyncStageAccessIndex usage_index, const ResourceAccessState &scope_state,
3252 VkPipelineStageFlags2KHR src_exec_scope,
3253 const SyncStageAccessFlags &src_access_scope, QueueId event_queue,
3254 ResourceUsageTag event_tag) const {
John Zulauf4a6105a2020-11-17 15:11:05 -07003255 // Only supporting image layout transitions for now
3256 assert(usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION);
3257 HazardResult hazard;
John Zulauf4a6105a2020-11-17 15:11:05 -07003258
John Zulaufe0757ba2022-06-10 16:51:45 -06003259 if ((write_tag >= event_tag) && last_write.any()) {
3260 // Any write after the event precludes the possibility of being in the first access scope for the layout transition
3261 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
3262 } else {
3263 // only test for WAW if there no intervening read operations.
3264 // See DetectHazard(SyncStagetAccessIndex) above for more details.
3265 if (last_reads.size()) {
3266 // Look at the reads if any... if reads exist, they are either the reason the access is in the event
3267 // first scope, or they are a hazard.
3268 const ReadStates &scope_reads = scope_state.last_reads;
3269 const ReadStates::size_type scope_read_count = scope_reads.size();
3270 // Since the hasn't been a write:
3271 // * The current read state is a superset of the scoped one
3272 // * The stage order is the same.
3273 assert(last_reads.size() >= scope_read_count);
3274 for (ReadStates::size_type read_idx = 0; read_idx < scope_read_count; ++read_idx) {
3275 const ReadState &scope_read = scope_reads[read_idx];
3276 const ReadState &current_read = last_reads[read_idx];
3277 assert(scope_read.stage == current_read.stage);
3278 if (current_read.tag > event_tag) {
3279 // The read is more recent than the set event scope, thus no barrier from the wait/ILT.
3280 hazard.Set(this, usage_index, WRITE_AFTER_READ, current_read.access, current_read.tag);
3281 } else {
3282 // The read is in the events first synchronization scope, so we use a barrier hazard check
3283 // If the read stage is not in the src sync scope
3284 // *AND* not execution chained with an existing sync barrier (that's the or)
3285 // then the barrier access is unsafe (R/W after R)
3286 if (scope_read.IsReadBarrierHazard(event_queue, src_exec_scope)) {
3287 hazard.Set(this, usage_index, WRITE_AFTER_READ, scope_read.access, scope_read.tag);
3288 break;
3289 }
John Zulauf4a6105a2020-11-17 15:11:05 -07003290 }
John Zulauf4a6105a2020-11-17 15:11:05 -07003291 }
John Zulaufe0757ba2022-06-10 16:51:45 -06003292 if (!hazard.IsHazard() && (last_reads.size() > scope_read_count)) {
3293 const ReadState &current_read = last_reads[scope_read_count];
3294 hazard.Set(this, usage_index, WRITE_AFTER_READ, current_read.access, current_read.tag);
3295 }
3296 } else if (last_write.any()) {
3297 // if there are no reads, the write is either the reason the access is in the event scope... they are a hazard
John Zulauf4a6105a2020-11-17 15:11:05 -07003298 // The write is in the first sync scope of the event (sync their aren't any reads to be the reason)
3299 // So do a normal barrier hazard check
John Zulauf7ad6a1d2022-09-02 11:46:33 -06003300 if (scope_state.IsWriteBarrierHazard(event_queue, src_exec_scope, src_access_scope)) {
John Zulaufe0757ba2022-06-10 16:51:45 -06003301 hazard.Set(&scope_state, usage_index, WRITE_AFTER_WRITE, scope_state.last_write, scope_state.write_tag);
John Zulauf4a6105a2020-11-17 15:11:05 -07003302 }
John Zulauf361fb532020-07-22 10:45:39 -06003303 }
John Zulaufd14743a2020-07-03 09:42:39 -06003304 }
John Zulauf361fb532020-07-22 10:45:39 -06003305
John Zulauf0cb5be22020-01-23 12:18:22 -07003306 return hazard;
3307}
3308
John Zulauf5f13a792020-03-10 07:31:21 -06003309// The logic behind resolves is the same as update, we assume that earlier hazards have be reported, and that no
3310// tranistive hazard can exists with a hazard between the earlier operations. Yes, an early hazard can mask that another
3311// exists, but if you fix *that* hazard it either fixes or unmasks the subsequent ones.
3312void ResourceAccessState::Resolve(const ResourceAccessState &other) {
John Zulauf14940722021-04-12 15:19:02 -06003313 if (write_tag < other.write_tag) {
John Zulauf4285ee92020-09-23 10:20:52 -06003314 // If this is a later write, we've reported any exsiting hazard, and we can just overwrite as the more recent
3315 // operation
John Zulauf5f13a792020-03-10 07:31:21 -06003316 *this = other;
John Zulauf14940722021-04-12 15:19:02 -06003317 } else if (other.write_tag == write_tag) {
3318 // In the *equals* case for write operations, we merged the write barriers and the read state (but without the
John Zulauf5f13a792020-03-10 07:31:21 -06003319 // dependency chaining logic or any stage expansion)
3320 write_barriers |= other.write_barriers;
John Zulaufb02c1eb2020-10-06 16:33:36 -06003321 pending_write_barriers |= other.pending_write_barriers;
3322 pending_layout_transition |= other.pending_layout_transition;
3323 pending_write_dep_chain |= other.pending_write_dep_chain;
John Zulauf4fa68462021-04-26 21:04:22 -06003324 pending_layout_ordering_ |= other.pending_layout_ordering_;
John Zulauf5f13a792020-03-10 07:31:21 -06003325
John Zulaufd14743a2020-07-03 09:42:39 -06003326 // Merge the read states
John Zulaufab7756b2020-12-29 16:10:16 -07003327 const auto pre_merge_count = last_reads.size();
John Zulauf4285ee92020-09-23 10:20:52 -06003328 const auto pre_merge_stages = last_read_stages;
John Zulaufab7756b2020-12-29 16:10:16 -07003329 for (uint32_t other_read_index = 0; other_read_index < other.last_reads.size(); other_read_index++) {
John Zulauf5f13a792020-03-10 07:31:21 -06003330 auto &other_read = other.last_reads[other_read_index];
John Zulauf4285ee92020-09-23 10:20:52 -06003331 if (pre_merge_stages & other_read.stage) {
John Zulauf5f13a792020-03-10 07:31:21 -06003332 // Merge in the barriers for read stages that exist in *both* this and other
John Zulauf4285ee92020-09-23 10:20:52 -06003333 // TODO: This is N^2 with stages... perhaps the ReadStates should be sorted by stage index.
3334 // but we should wait on profiling data for that.
3335 for (uint32_t my_read_index = 0; my_read_index < pre_merge_count; my_read_index++) {
John Zulauf5f13a792020-03-10 07:31:21 -06003336 auto &my_read = last_reads[my_read_index];
3337 if (other_read.stage == my_read.stage) {
John Zulauf14940722021-04-12 15:19:02 -06003338 if (my_read.tag < other_read.tag) {
John Zulauf4285ee92020-09-23 10:20:52 -06003339 // Other is more recent, copy in the state
John Zulauf37ceaed2020-07-03 16:18:15 -06003340 my_read.access = other_read.access;
John Zulauf4285ee92020-09-23 10:20:52 -06003341 my_read.tag = other_read.tag;
John Zulauf1d5f9c12022-05-13 14:51:08 -06003342 my_read.queue = other_read.queue;
John Zulaufb02c1eb2020-10-06 16:33:36 -06003343 my_read.pending_dep_chain = other_read.pending_dep_chain;
3344 // TODO: Phase 2 -- review the state merge logic to avoid false positive from overwriting the barriers
3345 // May require tracking more than one access per stage.
3346 my_read.barriers = other_read.barriers;
John Zulauf1d5f9c12022-05-13 14:51:08 -06003347 my_read.sync_stages = other_read.sync_stages;
Jeremy Gebben40a22942020-12-22 14:22:06 -07003348 if (my_read.stage == VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR) {
John Zulauf4285ee92020-09-23 10:20:52 -06003349 // Since I'm overwriting the fragement stage read, also update the input attachment info
3350 // as this is the only stage that affects it.
John Zulauff51fbb62020-10-02 14:43:24 -06003351 input_attachment_read = other.input_attachment_read;
John Zulauf4285ee92020-09-23 10:20:52 -06003352 }
John Zulauf14940722021-04-12 15:19:02 -06003353 } else if (other_read.tag == my_read.tag) {
John Zulaufb02c1eb2020-10-06 16:33:36 -06003354 // The read tags match so merge the barriers
3355 my_read.barriers |= other_read.barriers;
John Zulauf1d5f9c12022-05-13 14:51:08 -06003356 my_read.sync_stages |= other_read.sync_stages;
John Zulaufb02c1eb2020-10-06 16:33:36 -06003357 my_read.pending_dep_chain |= other_read.pending_dep_chain;
John Zulauf5f13a792020-03-10 07:31:21 -06003358 }
John Zulaufb02c1eb2020-10-06 16:33:36 -06003359
John Zulauf5f13a792020-03-10 07:31:21 -06003360 break;
3361 }
3362 }
3363 } else {
3364 // The other read stage doesn't exist in this, so add it.
John Zulaufab7756b2020-12-29 16:10:16 -07003365 last_reads.emplace_back(other_read);
John Zulauf5f13a792020-03-10 07:31:21 -06003366 last_read_stages |= other_read.stage;
Jeremy Gebben40a22942020-12-22 14:22:06 -07003367 if (other_read.stage == VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR) {
John Zulauff51fbb62020-10-02 14:43:24 -06003368 input_attachment_read = other.input_attachment_read;
John Zulauf4285ee92020-09-23 10:20:52 -06003369 }
John Zulauf5f13a792020-03-10 07:31:21 -06003370 }
3371 }
John Zulauf361fb532020-07-22 10:45:39 -06003372 read_execution_barriers |= other.read_execution_barriers;
John Zulauf4285ee92020-09-23 10:20:52 -06003373 } // the else clause would be that other write is before this write... in which case we supercede the other state and
3374 // ignore it.
John Zulauffaea0ee2021-01-14 14:01:32 -07003375
3376 // Merge first access information by making a copy of this first_access and reconstructing with a shuffle
3377 // of the copy and other into this using the update first logic.
3378 // NOTE: All sorts of additional cleverness could be put into short circuts. (for example back is write and is before front
3379 // of the other first_accesses... )
3380 if (!(first_accesses_ == other.first_accesses_) && !other.first_accesses_.empty()) {
3381 FirstAccesses firsts(std::move(first_accesses_));
3382 first_accesses_.clear();
3383 first_read_stages_ = 0U;
3384 auto a = firsts.begin();
3385 auto a_end = firsts.end();
3386 for (auto &b : other.first_accesses_) {
John Zulauf14940722021-04-12 15:19:02 -06003387 // TODO: Determine whether some tag offset will be needed for PHASE II
3388 while ((a != a_end) && (a->tag < b.tag)) {
John Zulauffaea0ee2021-01-14 14:01:32 -07003389 UpdateFirst(a->tag, a->usage_index, a->ordering_rule);
3390 ++a;
3391 }
3392 UpdateFirst(b.tag, b.usage_index, b.ordering_rule);
3393 }
3394 for (; a != a_end; ++a) {
3395 UpdateFirst(a->tag, a->usage_index, a->ordering_rule);
3396 }
3397 }
John Zulauf5f13a792020-03-10 07:31:21 -06003398}
3399
John Zulauf14940722021-04-12 15:19:02 -06003400void ResourceAccessState::Update(SyncStageAccessIndex usage_index, SyncOrdering ordering_rule, const ResourceUsageTag tag) {
John Zulauf9cb530d2019-09-30 14:14:10 -06003401 // Move this logic in the ResourceStateTracker as methods, thereof (or we'll repeat it for every flavor of resource...
3402 const auto usage_bit = FlagBit(usage_index);
John Zulauf4285ee92020-09-23 10:20:52 -06003403 if (IsRead(usage_index)) {
John Zulauf9cb530d2019-09-30 14:14:10 -06003404 // Mulitple outstanding reads may be of interest and do dependency chains independently
3405 // However, for purposes of barrier tracking, only one read per pipeline stage matters
3406 const auto usage_stage = PipelineStageBit(usage_index);
3407 if (usage_stage & last_read_stages) {
John Zulaufecf4ac52022-06-06 10:08:42 -06003408 const auto not_usage_stage = ~usage_stage;
John Zulaufab7756b2020-12-29 16:10:16 -07003409 for (auto &read_access : last_reads) {
3410 if (read_access.stage == usage_stage) {
3411 read_access.Set(usage_stage, usage_bit, 0, tag);
John Zulauf1d5f9c12022-05-13 14:51:08 -06003412 } else if (read_access.barriers & usage_stage) {
John Zulaufecf4ac52022-06-06 10:08:42 -06003413 // If the current access is barriered to this stage, mark it as "known to happen after"
John Zulauf1d5f9c12022-05-13 14:51:08 -06003414 read_access.sync_stages |= usage_stage;
John Zulaufecf4ac52022-06-06 10:08:42 -06003415 } else {
3416 // If the current access is *NOT* barriered to this stage it needs to be cleared.
3417 // Note: this is possible because semaphores can *clear* effective barriers, so the assumption
3418 // that sync_stages is a subset of barriers may not apply.
3419 read_access.sync_stages &= not_usage_stage;
John Zulauf9cb530d2019-09-30 14:14:10 -06003420 }
3421 }
3422 } else {
John Zulauf1d5f9c12022-05-13 14:51:08 -06003423 for (auto &read_access : last_reads) {
3424 if (read_access.barriers & usage_stage) {
3425 read_access.sync_stages |= usage_stage;
3426 }
3427 }
John Zulaufab7756b2020-12-29 16:10:16 -07003428 last_reads.emplace_back(usage_stage, usage_bit, 0, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003429 last_read_stages |= usage_stage;
3430 }
John Zulauf4285ee92020-09-23 10:20:52 -06003431
3432 // Fragment shader reads come in two flavors, and we need to track if the one we're tracking is the special one.
Jeremy Gebben40a22942020-12-22 14:22:06 -07003433 if (usage_stage == VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR) {
John Zulauff51fbb62020-10-02 14:43:24 -06003434 // TODO Revisit re: multiple reads for a given stage
3435 input_attachment_read = (usage_bit == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT);
John Zulauf4285ee92020-09-23 10:20:52 -06003436 }
John Zulauf9cb530d2019-09-30 14:14:10 -06003437 } else {
3438 // Assume write
3439 // TODO determine what to do with READ-WRITE operations if any
John Zulauf89311b42020-09-29 16:28:47 -06003440 SetWrite(usage_bit, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003441 }
John Zulauffaea0ee2021-01-14 14:01:32 -07003442 UpdateFirst(tag, usage_index, ordering_rule);
John Zulauf9cb530d2019-09-30 14:14:10 -06003443}
John Zulauf5f13a792020-03-10 07:31:21 -06003444
John Zulauf89311b42020-09-29 16:28:47 -06003445// Clobber last read and all barriers... because all we have is DANGER, DANGER, WILL ROBINSON!!!
3446// if the last_reads/last_write were unsafe, we've reported them, in either case the prior access is irrelevant.
3447// We can overwrite them as *this* write is now after them.
3448//
3449// Note: intentionally ignore pending barriers and chains (i.e. don't apply or clear them), let ApplyPendingBarriers handle them.
John Zulauf14940722021-04-12 15:19:02 -06003450void ResourceAccessState::SetWrite(const SyncStageAccessFlags &usage_bit, const ResourceUsageTag tag) {
John Zulauf1d5f9c12022-05-13 14:51:08 -06003451 ClearRead();
3452 ClearWrite();
John Zulauf89311b42020-09-29 16:28:47 -06003453 write_tag = tag;
3454 last_write = usage_bit;
John Zulauf9cb530d2019-09-30 14:14:10 -06003455}
3456
John Zulauf1d5f9c12022-05-13 14:51:08 -06003457void ResourceAccessState::ClearWrite() {
3458 read_execution_barriers = VK_PIPELINE_STAGE_2_NONE;
3459 input_attachment_read = false; // Denotes no outstanding input attachment read after the last write.
3460 write_barriers.reset();
3461 write_dependency_chain = VK_PIPELINE_STAGE_2_NONE;
3462 last_write.reset();
3463
3464 write_tag = 0;
3465 write_queue = QueueSyncState::kQueueIdInvalid;
3466}
3467
3468void ResourceAccessState::ClearRead() {
3469 last_reads.clear();
3470 last_read_stages = VK_PIPELINE_STAGE_2_NONE;
3471}
3472
John Zulauf8a7b03d2022-09-20 11:41:19 -06003473void ResourceAccessState::ClearPending() {
3474 pending_write_dep_chain = VK_PIPELINE_STAGE_2_NONE;
3475 pending_layout_transition = false;
3476 pending_write_barriers.reset();
3477 pending_layout_ordering_ = OrderingBarrier();
3478}
3479
3480void ResourceAccessState::ClearFirstUse() {
3481 first_accesses_.clear();
3482 first_read_stages_ = VK_PIPELINE_STAGE_2_NONE;
3483 first_write_layout_ordering_ = OrderingBarrier();
3484}
3485
John Zulauf89311b42020-09-29 16:28:47 -06003486// Apply the memory barrier without updating the existing barriers. The execution barrier
3487// changes the "chaining" state, but to keep barriers independent, we defer this until all barriers
3488// of the batch have been processed. Also, depending on whether layout transition happens, we'll either
3489// replace the current write barriers or add to them, so accumulate to pending as well.
John Zulaufb7578302022-05-19 13:50:18 -06003490template <typename ScopeOps>
3491void ResourceAccessState::ApplyBarrier(ScopeOps &&scope, const SyncBarrier &barrier, bool layout_transition) {
John Zulauf89311b42020-09-29 16:28:47 -06003492 // For independent barriers we need to track what the new barriers and dependency chain *will* be when we're done
3493 // applying the memory barriers
John Zulauf86356ca2020-10-19 11:46:41 -06003494 // NOTE: We update the write barrier if the write is in the first access scope or if there is a layout
John Zulaufb7578302022-05-19 13:50:18 -06003495 // transistion, under the theory of "most recent access". If the resource acces *isn't* safe
John Zulauf86356ca2020-10-19 11:46:41 -06003496 // vs. this layout transition DetectBarrierHazard should report it. We treat the layout
3497 // transistion *as* a write and in scope with the barrier (it's before visibility).
John Zulaufb7578302022-05-19 13:50:18 -06003498 if (layout_transition || scope.WriteInScope(barrier, *this)) {
John Zulauf89311b42020-09-29 16:28:47 -06003499 pending_write_barriers |= barrier.dst_access_scope;
John Zulaufc523bf62021-02-16 08:20:34 -07003500 pending_write_dep_chain |= barrier.dst_exec_scope.exec_scope;
John Zulauf4fa68462021-04-26 21:04:22 -06003501 if (layout_transition) {
3502 pending_layout_ordering_ |= OrderingBarrier(barrier.src_exec_scope.exec_scope, barrier.src_access_scope);
3503 }
John Zulaufa0a98292020-09-18 09:30:10 -06003504 }
John Zulauf89311b42020-09-29 16:28:47 -06003505 // Track layout transistion as pending as we can't modify last_write until all barriers processed
3506 pending_layout_transition |= layout_transition;
John Zulaufa0a98292020-09-18 09:30:10 -06003507
John Zulauf89311b42020-09-29 16:28:47 -06003508 if (!pending_layout_transition) {
John Zulaufb7578302022-05-19 13:50:18 -06003509 // Once we're dealing with a layout transition (which is modelled as a *write*) then the last reads/chains
3510 // don't need to be tracked as we're just going to clear them.
John Zulauf434c4e62022-05-19 16:03:56 -06003511 VkPipelineStageFlags2 stages_in_scope = VK_PIPELINE_STAGE_2_NONE;
3512
John Zulaufab7756b2020-12-29 16:10:16 -07003513 for (auto &read_access : last_reads) {
John Zulauf89311b42020-09-29 16:28:47 -06003514 // The | implements the "dependency chain" logic for this access, as the barriers field stores the second sync scope
John Zulaufb7578302022-05-19 13:50:18 -06003515 if (scope.ReadInScope(barrier, read_access)) {
John Zulauf434c4e62022-05-19 16:03:56 -06003516 // We'll apply the barrier in the next loop, because it's DRY'r to do it one place.
3517 stages_in_scope |= read_access.stage;
3518 }
3519 }
3520
3521 for (auto &read_access : last_reads) {
3522 if (0 != ((read_access.stage | read_access.sync_stages) & stages_in_scope)) {
3523 // If this stage, or any stage known to be synchronized after it are in scope, apply the barrier to this read
3524 // NOTE: Forwarding barriers to known prior stages changes the sync_stages from shallow to deep, because the
3525 // barriers used to determine sync_stages have been propagated to all known earlier stages
John Zulaufc523bf62021-02-16 08:20:34 -07003526 read_access.pending_dep_chain |= barrier.dst_exec_scope.exec_scope;
John Zulaufa0a98292020-09-18 09:30:10 -06003527 }
3528 }
John Zulaufa0a98292020-09-18 09:30:10 -06003529 }
John Zulaufa0a98292020-09-18 09:30:10 -06003530}
3531
John Zulauf14940722021-04-12 15:19:02 -06003532void ResourceAccessState::ApplyPendingBarriers(const ResourceUsageTag tag) {
John Zulauf89311b42020-09-29 16:28:47 -06003533 if (pending_layout_transition) {
John Zulauf4fa68462021-04-26 21:04:22 -06003534 // SetWrite clobbers the last_reads array, and thus we don't have to clear the read_state out.
John Zulauf89311b42020-09-29 16:28:47 -06003535 SetWrite(SYNC_IMAGE_LAYOUT_TRANSITION_BIT, tag); // Side effect notes below
John Zulauffaea0ee2021-01-14 14:01:32 -07003536 UpdateFirst(tag, SYNC_IMAGE_LAYOUT_TRANSITION, SyncOrdering::kNonAttachment);
John Zulauf4fa68462021-04-26 21:04:22 -06003537 TouchupFirstForLayoutTransition(tag, pending_layout_ordering_);
3538 pending_layout_ordering_ = OrderingBarrier();
John Zulauf89311b42020-09-29 16:28:47 -06003539 pending_layout_transition = false;
John Zulauf9cb530d2019-09-30 14:14:10 -06003540 }
John Zulauf89311b42020-09-29 16:28:47 -06003541
3542 // Apply the accumulate execution barriers (and thus update chaining information)
John Zulauf4fa68462021-04-26 21:04:22 -06003543 // for layout transition, last_reads is reset by SetWrite, so this will be skipped.
John Zulaufab7756b2020-12-29 16:10:16 -07003544 for (auto &read_access : last_reads) {
3545 read_access.barriers |= read_access.pending_dep_chain;
3546 read_execution_barriers |= read_access.barriers;
3547 read_access.pending_dep_chain = 0;
John Zulauf89311b42020-09-29 16:28:47 -06003548 }
3549
3550 // We OR in the accumulated write chain and barriers even in the case of a layout transition as SetWrite zeros them.
3551 write_dependency_chain |= pending_write_dep_chain;
3552 write_barriers |= pending_write_barriers;
3553 pending_write_dep_chain = 0;
3554 pending_write_barriers = 0;
John Zulauf9cb530d2019-09-30 14:14:10 -06003555}
3556
John Zulaufecf4ac52022-06-06 10:08:42 -06003557// Assumes signal queue != wait queue
3558void ResourceAccessState::ApplySemaphore(const SemaphoreScope &signal, const SemaphoreScope wait) {
3559 // Semaphores only guarantee the first scope of the signal is before the second scope of the wait.
3560 // If any access isn't in the first scope, there are no guarantees, thus those barriers are cleared
3561 assert(signal.queue != wait.queue);
3562 for (auto &read_access : last_reads) {
3563 if (read_access.ReadInQueueScopeOrChain(signal.queue, signal.exec_scope)) {
3564 // Deflects WAR on wait queue
3565 read_access.barriers = wait.exec_scope;
3566 } else {
3567 // Leave sync stages alone. Update method will clear unsynchronized stages on subsequent reads as needed.
3568 read_access.barriers = VK_PIPELINE_STAGE_2_NONE;
3569 }
3570 }
3571 if (WriteInQueueSourceScopeOrChain(signal.queue, signal.exec_scope, signal.valid_accesses)) {
3572 // Will deflect RAW wait queue, WAW needs a chained barrier on wait queue
3573 read_execution_barriers = wait.exec_scope;
3574 write_barriers = wait.valid_accesses;
3575 } else {
3576 read_execution_barriers = VK_PIPELINE_STAGE_2_NONE;
3577 write_barriers.reset();
3578 }
3579 write_dependency_chain = read_execution_barriers;
3580}
3581
John Zulauf3da08bb2022-08-01 17:56:56 -06003582bool ResourceAccessState::QueueTagPredicate::operator()(QueueId usage_queue, ResourceUsageTag usage_tag) const {
3583 return (usage_queue == queue) && (usage_tag <= tag);
John Zulauf1d5f9c12022-05-13 14:51:08 -06003584}
3585
John Zulauf3da08bb2022-08-01 17:56:56 -06003586bool ResourceAccessState::QueuePredicate::operator()(QueueId usage_queue, ResourceUsageTag) const { return queue == usage_queue; }
John Zulauf1d5f9c12022-05-13 14:51:08 -06003587
John Zulauf3da08bb2022-08-01 17:56:56 -06003588bool ResourceAccessState::TagPredicate::operator()(QueueId, ResourceUsageTag usage_tag) const { return tag <= usage_tag; }
John Zulauf1d5f9c12022-05-13 14:51:08 -06003589
3590// Return if the resulting state is "empty"
3591template <typename Pred>
3592bool ResourceAccessState::ApplyQueueTagWait(Pred &&queue_tag_test) {
3593 VkPipelineStageFlags2KHR sync_reads = VK_PIPELINE_STAGE_2_NONE;
3594
3595 // Use the predicate to build a mask of the read stages we are synchronizing
3596 // Use the sync_stages to also detect reads known to be before any synchronized reads (first pass)
John Zulauf1d5f9c12022-05-13 14:51:08 -06003597 for (auto &read_access : last_reads) {
John Zulauf434c4e62022-05-19 16:03:56 -06003598 if (queue_tag_test(read_access.queue, read_access.tag)) {
John Zulauf1d5f9c12022-05-13 14:51:08 -06003599 // If we know this stage is before any stage we syncing, or if the predicate tells us that we are waited for..
3600 sync_reads |= read_access.stage;
John Zulauf1d5f9c12022-05-13 14:51:08 -06003601 }
3602 }
3603
John Zulauf434c4e62022-05-19 16:03:56 -06003604 // Now that we know the reads directly in scopejust need to go over the list again to pick up the "known earlier" stages.
3605 // NOTE: sync_stages is "deep" catching all stages synchronized after it because we forward barriers
3606 uint32_t unsync_count = 0;
3607 for (auto &read_access : last_reads) {
3608 if (0 != ((read_access.stage | read_access.sync_stages) & sync_reads)) {
3609 // This is redundant in the "stage" case, but avoids a second branch to get an accurate count
3610 sync_reads |= read_access.stage;
3611 } else {
3612 ++unsync_count;
John Zulauf1d5f9c12022-05-13 14:51:08 -06003613 }
3614 }
3615
3616 if (unsync_count) {
3617 if (sync_reads) {
3618 // When have some remaining unsynchronized reads, we have to rewrite the last_reads array.
3619 ReadStates unsync_reads;
3620 unsync_reads.reserve(unsync_count);
3621 VkPipelineStageFlags2KHR unsync_read_stages = VK_PIPELINE_STAGE_2_NONE;
3622 for (auto &read_access : last_reads) {
3623 if (0 == (read_access.stage & sync_reads)) {
3624 unsync_reads.emplace_back(read_access);
3625 unsync_read_stages |= read_access.stage;
3626 }
3627 }
3628 last_read_stages = unsync_read_stages;
3629 last_reads = std::move(unsync_reads);
3630 }
3631 } else {
3632 // Nothing remains (or it was empty to begin with)
3633 ClearRead();
3634 }
3635
3636 bool all_clear = last_reads.size() == 0;
3637 if (last_write.any()) {
3638 if (queue_tag_test(write_queue, write_tag) || sync_reads) {
3639 // Clear any predicated write, or any the write from any any access with synchronized reads.
3640 // This could drop RAW detection, but only if the synchronized reads were RAW hazards, and given
3641 // MRR approach to reporting, this is consistent with other drops, especially since fixing the
3642 // RAW wit the sync_reads stages would preclude a subsequent RAW.
3643 ClearWrite();
3644 } else {
3645 all_clear = false;
3646 }
3647 }
3648 return all_clear;
3649}
3650
John Zulaufae842002021-04-15 18:20:55 -06003651bool ResourceAccessState::FirstAccessInTagRange(const ResourceUsageRange &tag_range) const {
3652 if (!first_accesses_.size()) return false;
3653 const ResourceUsageRange first_access_range = {first_accesses_.front().tag, first_accesses_.back().tag + 1};
3654 return tag_range.intersects(first_access_range);
3655}
3656
John Zulauf1d5f9c12022-05-13 14:51:08 -06003657void ResourceAccessState::OffsetTag(ResourceUsageTag offset) {
3658 if (last_write.any()) write_tag += offset;
3659 for (auto &read_access : last_reads) {
3660 read_access.tag += offset;
3661 }
3662 for (auto &first : first_accesses_) {
3663 first.tag += offset;
3664 }
3665}
3666
3667ResourceAccessState::ResourceAccessState()
3668 : write_barriers(~SyncStageAccessFlags(0)),
3669 write_dependency_chain(0),
3670 write_tag(),
3671 write_queue(QueueSyncState::kQueueIdInvalid),
3672 last_write(0),
3673 input_attachment_read(false),
3674 last_read_stages(0),
3675 read_execution_barriers(0),
3676 pending_write_dep_chain(0),
3677 pending_layout_transition(false),
3678 pending_write_barriers(0),
3679 pending_layout_ordering_(),
3680 first_accesses_(),
3681 first_read_stages_(0U),
3682 first_write_layout_ordering_() {}
3683
John Zulauf59e25072020-07-17 10:55:21 -06003684// This should be just Bits or Index, but we don't have an invalid state for Index
Jeremy Gebben40a22942020-12-22 14:22:06 -07003685VkPipelineStageFlags2KHR ResourceAccessState::GetReadBarriers(const SyncStageAccessFlags &usage_bit) const {
3686 VkPipelineStageFlags2KHR barriers = 0U;
John Zulauf4285ee92020-09-23 10:20:52 -06003687
John Zulaufab7756b2020-12-29 16:10:16 -07003688 for (const auto &read_access : last_reads) {
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003689 if ((read_access.access & usage_bit).any()) {
John Zulauf4285ee92020-09-23 10:20:52 -06003690 barriers = read_access.barriers;
3691 break;
John Zulauf59e25072020-07-17 10:55:21 -06003692 }
3693 }
John Zulauf4285ee92020-09-23 10:20:52 -06003694
John Zulauf59e25072020-07-17 10:55:21 -06003695 return barriers;
3696}
3697
John Zulauf1d5f9c12022-05-13 14:51:08 -06003698void ResourceAccessState::SetQueueId(QueueId id) {
3699 for (auto &read_access : last_reads) {
3700 if (read_access.queue == QueueSyncState::kQueueIdInvalid) {
3701 read_access.queue = id;
3702 }
3703 }
3704 if (last_write.any() && (write_queue == QueueSyncState::kQueueIdInvalid)) {
3705 write_queue = id;
3706 }
3707}
3708
John Zulauf00119522022-05-23 19:07:42 -06003709bool ResourceAccessState::WriteInChain(VkPipelineStageFlags2KHR src_exec_scope) const {
3710 return 0 != (write_dependency_chain & src_exec_scope);
3711}
3712
3713bool ResourceAccessState::WriteInScope(const SyncStageAccessFlags &src_access_scope) const {
3714 return (src_access_scope & last_write).any();
3715}
3716
John Zulaufec943ec2022-06-29 07:52:56 -06003717bool ResourceAccessState::WriteBarrierInScope(const SyncStageAccessFlags &src_access_scope) const {
3718 return (write_barriers & src_access_scope).any();
3719}
3720
John Zulaufb7578302022-05-19 13:50:18 -06003721bool ResourceAccessState::WriteInSourceScopeOrChain(VkPipelineStageFlags2KHR src_exec_scope,
3722 SyncStageAccessFlags src_access_scope) const {
John Zulauf00119522022-05-23 19:07:42 -06003723 return WriteInChain(src_exec_scope) || WriteInScope(src_access_scope);
3724}
3725
3726bool ResourceAccessState::WriteInQueueSourceScopeOrChain(QueueId queue, VkPipelineStageFlags2KHR src_exec_scope,
3727 SyncStageAccessFlags src_access_scope) const {
3728 return WriteInChain(src_exec_scope) || ((queue == write_queue) && WriteInScope(src_access_scope));
John Zulaufb7578302022-05-19 13:50:18 -06003729}
3730
John Zulaufe0757ba2022-06-10 16:51:45 -06003731bool ResourceAccessState::WriteInEventScope(VkPipelineStageFlags2KHR src_exec_scope, const SyncStageAccessFlags &src_access_scope,
3732 QueueId scope_queue, ResourceUsageTag scope_tag) const {
John Zulaufb7578302022-05-19 13:50:18 -06003733 // The scope logic for events is, if we're asking, the resource usage was flagged as "in the first execution scope" at
3734 // the time of the SetEvent, thus all we need check is whether the access is the same one (i.e. before the scope tag
3735 // in order to know if it's in the excecution scope
John Zulaufe0757ba2022-06-10 16:51:45 -06003736 return (write_tag < scope_tag) && WriteInQueueSourceScopeOrChain(scope_queue, src_exec_scope, src_access_scope);
John Zulaufb7578302022-05-19 13:50:18 -06003737}
3738
John Zulaufec943ec2022-06-29 07:52:56 -06003739bool ResourceAccessState::WriteInChainedScope(VkPipelineStageFlags2KHR src_exec_scope,
3740 const SyncStageAccessFlags &src_access_scope) const {
3741 return WriteInChain(src_exec_scope) && WriteBarrierInScope(src_access_scope);
3742}
3743
John Zulauf8a7b03d2022-09-20 11:41:19 -06003744// As ReadStates must be unique by stage, this is as good a sort as needed
3745bool operator<(const ResourceAccessState::ReadState &lhs, const ResourceAccessState::ReadState &rhs) {
3746 return lhs.stage < rhs.stage;
3747}
3748
3749void ResourceAccessState::Normalize() {
3750 if (!last_write.any()) {
3751 ClearWrite();
3752 }
3753 if (!last_reads.size()) {
3754 ClearRead();
3755 } else {
3756 // Sort the reads in stage order for consistent comparisons
3757 std::sort(last_reads.begin(), last_reads.end());
3758 for (auto &read_access : last_reads) {
3759 read_access.Normalize();
3760 }
3761 }
3762
3763 ClearPending();
3764 ClearFirstUse();
3765}
3766
3767void ResourceAccessState::GatherReferencedTags(ResourceUsageTagSet &used) const {
3768 if (last_write.any()) {
3769 used.insert(write_tag);
3770 }
3771
3772 for (const auto &read_access : last_reads) {
3773 used.insert(read_access.tag);
3774 }
3775}
3776
John Zulaufcb7e1672022-05-04 13:46:08 -06003777bool ResourceAccessState::IsRAWHazard(VkPipelineStageFlags2KHR usage_stage, const SyncStageAccessFlags &usage) const {
John Zulauf4285ee92020-09-23 10:20:52 -06003778 assert(IsRead(usage));
3779 // Only RAW vs. last_write if it doesn't happen-after any other read because either:
3780 // * the previous reads are not hazards, and thus last_write must be visible and available to
3781 // any reads that happen after.
3782 // * the previous reads *are* hazards to last_write, have been reported, and if that hazard is fixed
3783 // the current read will be also not be a hazard, thus reporting a hazard here adds no needed information.
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003784 return last_write.any() && (0 == (read_execution_barriers & usage_stage)) && IsWriteHazard(usage);
John Zulauf4285ee92020-09-23 10:20:52 -06003785}
3786
John Zulaufec943ec2022-06-29 07:52:56 -06003787VkPipelineStageFlags2 ResourceAccessState::GetOrderedStages(QueueId queue_id, const OrderingBarrier &ordering) const {
3788 // At apply queue submission order limits on the effect of ordering
3789 VkPipelineStageFlags2 non_qso_stages = VK_PIPELINE_STAGE_2_NONE;
3790 if (queue_id != QueueSyncState::kQueueIdInvalid) {
3791 for (const auto &read_access : last_reads) {
3792 if (read_access.queue != queue_id) {
3793 non_qso_stages |= read_access.stage;
3794 }
3795 }
3796 }
John Zulauf4285ee92020-09-23 10:20:52 -06003797 // Whether the stage are in the ordering scope only matters if the current write is ordered
John Zulaufec943ec2022-06-29 07:52:56 -06003798 const VkPipelineStageFlags2 read_stages_in_qso = last_read_stages & ~non_qso_stages;
3799 VkPipelineStageFlags2 ordered_stages = read_stages_in_qso & ordering.exec_scope;
John Zulauf4285ee92020-09-23 10:20:52 -06003800 // Special input attachment handling as always (not encoded in exec_scop)
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003801 const bool input_attachment_ordering = (ordering.access_scope & SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT).any();
John Zulauff51fbb62020-10-02 14:43:24 -06003802 if (input_attachment_ordering && input_attachment_read) {
John Zulauf4285ee92020-09-23 10:20:52 -06003803 // If we have an input attachment in last_reads and input attachments are ordered we all that stage
Jeremy Gebben40a22942020-12-22 14:22:06 -07003804 ordered_stages |= VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR;
John Zulauf4285ee92020-09-23 10:20:52 -06003805 }
3806
3807 return ordered_stages;
3808}
3809
John Zulauf14940722021-04-12 15:19:02 -06003810void ResourceAccessState::UpdateFirst(const ResourceUsageTag tag, SyncStageAccessIndex usage_index, SyncOrdering ordering_rule) {
John Zulauffaea0ee2021-01-14 14:01:32 -07003811 // Only record until we record a write.
3812 if (first_accesses_.empty() || IsRead(first_accesses_.back().usage_index)) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003813 const VkPipelineStageFlags2KHR usage_stage = IsRead(usage_index) ? PipelineStageBit(usage_index) : 0U;
John Zulauffaea0ee2021-01-14 14:01:32 -07003814 if (0 == (usage_stage & first_read_stages_)) {
3815 // If this is a read we haven't seen or a write, record.
John Zulauf4fa68462021-04-26 21:04:22 -06003816 // We always need to know what stages were found prior to write
John Zulauffaea0ee2021-01-14 14:01:32 -07003817 first_read_stages_ |= usage_stage;
John Zulauf4fa68462021-04-26 21:04:22 -06003818 if (0 == (read_execution_barriers & usage_stage)) {
3819 // If this stage isn't masked then we add it (since writes map to usage_stage 0, this also records writes)
3820 first_accesses_.emplace_back(tag, usage_index, ordering_rule);
3821 }
John Zulauffaea0ee2021-01-14 14:01:32 -07003822 }
3823 }
3824}
3825
John Zulauf4fa68462021-04-26 21:04:22 -06003826void ResourceAccessState::TouchupFirstForLayoutTransition(ResourceUsageTag tag, const OrderingBarrier &layout_ordering) {
3827 // Only call this after recording an image layout transition
3828 assert(first_accesses_.size());
3829 if (first_accesses_.back().tag == tag) {
3830 // If this layout transition is the the first write, add the additional ordering rules that guard the ILT
Samuel Iglesias Gonsálvez9b4660b2021-10-21 08:50:39 +02003831 assert(first_accesses_.back().usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION);
John Zulauf4fa68462021-04-26 21:04:22 -06003832 first_write_layout_ordering_ = layout_ordering;
3833 }
3834}
3835
John Zulauf1d5f9c12022-05-13 14:51:08 -06003836ResourceAccessState::ReadState::ReadState(VkPipelineStageFlags2KHR stage_, SyncStageAccessFlags access_,
3837 VkPipelineStageFlags2KHR barriers_, ResourceUsageTag tag_)
3838 : stage(stage_),
3839 access(access_),
3840 barriers(barriers_),
3841 sync_stages(VK_PIPELINE_STAGE_2_NONE),
3842 tag(tag_),
3843 queue(QueueSyncState::kQueueIdInvalid),
3844 pending_dep_chain(VK_PIPELINE_STAGE_2_NONE) {}
3845
John Zulaufee984022022-04-13 16:39:50 -06003846void ResourceAccessState::ReadState::Set(VkPipelineStageFlags2KHR stage_, const SyncStageAccessFlags &access_,
3847 VkPipelineStageFlags2KHR barriers_, ResourceUsageTag tag_) {
3848 stage = stage_;
3849 access = access_;
3850 barriers = barriers_;
John Zulauf1d5f9c12022-05-13 14:51:08 -06003851 sync_stages = VK_PIPELINE_STAGE_2_NONE;
John Zulaufee984022022-04-13 16:39:50 -06003852 tag = tag_;
John Zulauf1d5f9c12022-05-13 14:51:08 -06003853 pending_dep_chain = VK_PIPELINE_STAGE_2_NONE; // If this is a new read, we aren't applying a barrier set.
John Zulaufee984022022-04-13 16:39:50 -06003854}
3855
John Zulauf00119522022-05-23 19:07:42 -06003856// Scope test including "queue submission order" effects. Specifically, accesses from a different queue are not
3857// considered to be in "queue submission order" with barriers, events, or semaphore signalling, but any barriers
3858// that have bee applied (via semaphore) to those accesses can be chained off of.
3859bool ResourceAccessState::ReadState::ReadInQueueScopeOrChain(QueueId scope_queue, VkPipelineStageFlags2 exec_scope) const {
3860 VkPipelineStageFlags2 effective_stages = barriers | ((scope_queue == queue) ? stage : VK_PIPELINE_STAGE_2_NONE);
3861 return (exec_scope & effective_stages) != 0;
3862}
3863
John Zulauf697c0e12022-04-19 16:31:12 -06003864ResourceUsageRange SyncValidator::ReserveGlobalTagRange(size_t tag_count) const {
3865 ResourceUsageRange reserve;
3866 reserve.begin = tag_limit_.fetch_add(tag_count);
3867 reserve.end = reserve.begin + tag_count;
3868 return reserve;
3869}
3870
John Zulauf3da08bb2022-08-01 17:56:56 -06003871void SyncValidator::ApplyTaggedWait(QueueId queue_id, ResourceUsageTag tag) {
3872 // We need to go through every queue batch context and clear all accesses this wait synchronizes
3873 // As usual -- two groups, the "last batch" and the signaled semaphores
3874 // NOTE: Since ApplyTaggedWait crawls through every usage in every ResourceAccessState in the AccessContext of *every*
3875 // QueueBatchContext, track which we've done to avoid duplicate traversals
3876 QueueBatchContext::BatchSet queue_batch_contexts = GetQueueBatchSnapshot();
3877 for (auto &batch : queue_batch_contexts) {
3878 batch->ApplyTaggedWait(queue_id, tag);
3879 }
3880}
3881
3882void SyncValidator::UpdateFenceWaitInfo(VkFence fence, QueueId queue_id, ResourceUsageTag tag) {
3883 if (fence != VK_NULL_HANDLE) {
3884 // Overwrite the current fence wait information
3885 // NOTE: Not doing fence usage validation here, leaving that in CoreChecks intentionally
3886 auto fence_state = Get<FENCE_STATE>(fence);
3887 waitable_fences_[fence] = {fence_state, tag, queue_id};
3888 }
3889}
3890
3891void SyncValidator::WaitForFence(VkFence fence) {
3892 auto fence_it = waitable_fences_.find(fence);
3893 if (fence_it != waitable_fences_.end()) {
3894 // The fence may no longer be waitable for several valid reasons.
3895 FenceSyncState &wait_for = fence_it->second;
3896 ApplyTaggedWait(wait_for.queue_id, wait_for.tag);
3897 waitable_fences_.erase(fence_it);
3898 }
3899}
3900
John Zulaufbbda4572022-04-19 16:20:45 -06003901const QueueSyncState *SyncValidator::GetQueueSyncState(VkQueue queue) const {
3902 return GetMappedPlainFromShared(queue_sync_states_, queue);
3903}
3904
3905QueueSyncState *SyncValidator::GetQueueSyncState(VkQueue queue) { return GetMappedPlainFromShared(queue_sync_states_, queue); }
3906
3907std::shared_ptr<const QueueSyncState> SyncValidator::GetQueueSyncStateShared(VkQueue queue) const {
3908 return GetMapped(queue_sync_states_, queue, []() { return std::shared_ptr<QueueSyncState>(); });
3909}
3910
3911std::shared_ptr<QueueSyncState> SyncValidator::GetQueueSyncStateShared(VkQueue queue) {
3912 return GetMapped(queue_sync_states_, queue, []() { return std::shared_ptr<QueueSyncState>(); });
3913}
3914
John Zulaufe0757ba2022-06-10 16:51:45 -06003915template <typename T>
3916struct GetBatchTraits {};
3917template <>
3918struct GetBatchTraits<std::shared_ptr<QueueSyncState>> {
3919 using Batch = std::shared_ptr<QueueBatchContext>;
3920 static Batch Get(const std::shared_ptr<QueueSyncState> &qss) { return qss ? qss->LastBatch() : Batch(); }
3921};
3922
3923template <>
3924struct GetBatchTraits<std::shared_ptr<SignaledSemaphores::Signal>> {
3925 using Batch = std::shared_ptr<QueueBatchContext>;
3926 static Batch Get(const std::shared_ptr<SignaledSemaphores::Signal> &sig) { return sig ? sig->batch : Batch(); }
3927};
3928
3929template <typename BatchSet, typename Map, typename Predicate>
3930static BatchSet GetQueueBatchSnapshotImpl(const Map &map, Predicate &&pred) {
John Zulauf1d5f9c12022-05-13 14:51:08 -06003931 BatchSet snapshot;
John Zulaufe0757ba2022-06-10 16:51:45 -06003932 for (auto &entry : map) {
3933 // Intentional copy
3934 auto batch = GetBatchTraits<typename Map::mapped_type>::Get(entry.second);
John Zulauf1d5f9c12022-05-13 14:51:08 -06003935 if (batch && pred(batch)) snapshot.emplace(std::move(batch));
John Zulauf697c0e12022-04-19 16:31:12 -06003936 }
John Zulauf1d5f9c12022-05-13 14:51:08 -06003937 return snapshot;
3938}
3939
3940template <typename Predicate>
3941QueueBatchContext::ConstBatchSet SyncValidator::GetQueueLastBatchSnapshot(Predicate &&pred) const {
John Zulaufe0757ba2022-06-10 16:51:45 -06003942 return GetQueueBatchSnapshotImpl<QueueBatchContext::ConstBatchSet>(queue_sync_states_, std::forward<Predicate>(pred));
John Zulauf1d5f9c12022-05-13 14:51:08 -06003943}
3944
3945template <typename Predicate>
3946QueueBatchContext::BatchSet SyncValidator::GetQueueLastBatchSnapshot(Predicate &&pred) {
John Zulaufe0757ba2022-06-10 16:51:45 -06003947 return GetQueueBatchSnapshotImpl<QueueBatchContext::BatchSet>(queue_sync_states_, std::forward<Predicate>(pred));
3948}
3949
3950QueueBatchContext::BatchSet SyncValidator::GetQueueBatchSnapshot() {
3951 QueueBatchContext::BatchSet snapshot = GetQueueLastBatchSnapshot();
3952 auto append = [&snapshot](const std::shared_ptr<QueueBatchContext> batch) {
3953 if (batch && !layer_data::Contains(snapshot, batch)) {
3954 snapshot.emplace(batch);
3955 }
3956 return false;
3957 };
3958 GetQueueBatchSnapshotImpl<QueueBatchContext::BatchSet>(signaled_semaphores_, append);
3959 return snapshot;
John Zulauf697c0e12022-04-19 16:31:12 -06003960}
3961
John Zulaufa8700a52022-08-18 16:22:08 -06003962struct QueueSubmitCmdState {
3963 std::shared_ptr<const QueueSyncState> queue;
3964 std::shared_ptr<QueueBatchContext> last_batch;
3965 std::string submit_func_name;
John Zulaufa8700a52022-08-18 16:22:08 -06003966 SignaledSemaphores signaled;
John Zulauf8a7b03d2022-09-20 11:41:19 -06003967 QueueSubmitCmdState(const char *func_name, const SignaledSemaphores &parent_semaphores)
3968 : submit_func_name(func_name), signaled(parent_semaphores) {}
John Zulaufa8700a52022-08-18 16:22:08 -06003969};
3970
3971bool QueueBatchContext::DoQueueSubmitValidate(const SyncValidator &sync_state, QueueSubmitCmdState &cmd_state,
3972 const VkSubmitInfo2 &batch_info) {
3973 bool skip = false;
3974
3975 // For each submit in the batch...
3976 for (const auto &cb : command_buffers_) {
John Zulauf8a7b03d2022-09-20 11:41:19 -06003977 if (cb.cb->GetTagLimit() == 0) {
3978 batch_.cb_index++;
3979 continue; // Skip empty CB's but also skip the unused index for correct reporting
3980 }
John Zulaufa8700a52022-08-18 16:22:08 -06003981 skip |= cb.cb->ValidateFirstUse(*this, cmd_state.submit_func_name.c_str(), cb.index);
3982
3983 // The barriers have already been applied in ValidatFirstUse
3984 ResourceUsageRange tag_range = ImportRecordedAccessLog(*cb.cb);
3985 ResolveSubmittedCommandBuffer(*cb.cb->GetCurrentAccessContext(), tag_range.begin);
3986 }
3987 return skip;
3988}
3989
John Zulaufcb7e1672022-05-04 13:46:08 -06003990bool SignaledSemaphores::SignalSemaphore(const std::shared_ptr<const SEMAPHORE_STATE> &sem_state,
3991 const std::shared_ptr<QueueBatchContext> &batch,
3992 const VkSemaphoreSubmitInfo &signal_info) {
John Zulaufecf4ac52022-06-06 10:08:42 -06003993 assert(batch);
John Zulaufcb7e1672022-05-04 13:46:08 -06003994 const SyncExecScope exec_scope =
3995 SyncExecScope::MakeSrc(batch->GetQueueFlags(), signal_info.stageMask, VK_PIPELINE_STAGE_2_HOST_BIT);
3996 const VkSemaphore sem = sem_state->semaphore();
3997 auto signal_it = signaled_.find(sem);
3998 std::shared_ptr<Signal> insert_signal;
3999 if (signal_it == signaled_.end()) {
4000 if (prev_) {
4001 auto prev_sig = GetMapped(prev_->signaled_, sem_state->semaphore(), []() { return std::shared_ptr<Signal>(); });
4002 if (prev_sig) {
4003 // The is an invalid signal, as this semaphore is already signaled... copy the prev state (as prev_ is const)
4004 insert_signal = std::make_shared<Signal>(*prev_sig);
4005 }
4006 }
4007 auto insert_pair = signaled_.emplace(sem, std::move(insert_signal));
4008 signal_it = insert_pair.first;
John Zulauf697c0e12022-04-19 16:31:12 -06004009 }
John Zulaufcb7e1672022-05-04 13:46:08 -06004010
4011 bool success = false;
4012 if (!signal_it->second) {
4013 signal_it->second = std::make_shared<Signal>(sem_state, batch, exec_scope);
4014 success = true;
4015 }
4016
4017 return success;
4018}
4019
John Zulaufecf4ac52022-06-06 10:08:42 -06004020std::shared_ptr<const SignaledSemaphores::Signal> SignaledSemaphores::Unsignal(VkSemaphore sem) {
4021 std::shared_ptr<const Signal> unsignaled;
John Zulaufcb7e1672022-05-04 13:46:08 -06004022 const auto found_it = signaled_.find(sem);
4023 if (found_it != signaled_.end()) {
4024 // Move the unsignaled singal out from the signaled list, but keep the shared_ptr as the caller needs the contents for
4025 // a bit.
4026 unsignaled = std::move(found_it->second);
4027 if (!prev_) {
4028 // No parent, not need to keep the entry
4029 // IFF (prev_) leave the entry in the leaf table as we use it to export unsignal to prev_ during record phase
4030 signaled_.erase(found_it);
4031 }
4032 } else if (prev_) {
4033 // We can't unsignal prev_ because it's const * by design.
4034 // We put in an empty placeholder
4035 signaled_.emplace(sem, std::shared_ptr<Signal>());
4036 unsignaled = GetPrev(sem);
4037 }
4038 // NOTE: No else clause. Because if we didn't find it, and there's no previous, this indicates an error,
4039 // but CoreChecks should have reported it
4040
4041 // If unsignaled is null, there was a missing pending semaphore, and that's also issue CoreChecks reports
John Zulauf697c0e12022-04-19 16:31:12 -06004042 return unsignaled;
4043}
4044
John Zulaufcb7e1672022-05-04 13:46:08 -06004045void SignaledSemaphores::Import(VkSemaphore sem, std::shared_ptr<Signal> &&from) {
4046 // Overwrite the s tate with the last state from this
4047 if (from) {
4048 assert(sem == from->sem_state->semaphore());
4049 signaled_[sem] = std::move(from);
4050 } else {
4051 signaled_.erase(sem);
4052 }
4053}
4054
4055void SignaledSemaphores::Reset() {
4056 signaled_.clear();
4057 prev_ = nullptr;
4058}
4059
John Zulaufea943c52022-02-22 11:05:17 -07004060std::shared_ptr<CommandBufferAccessContext> SyncValidator::AccessContextFactory(VkCommandBuffer command_buffer) {
4061 // If we don't have one, make it.
4062 auto cb_state = Get<CMD_BUFFER_STATE>(command_buffer);
4063 assert(cb_state.get());
4064 auto queue_flags = cb_state->GetQueueFlags();
4065 return std::make_shared<CommandBufferAccessContext>(*this, cb_state, queue_flags);
4066}
4067
John Zulaufcb7e1672022-05-04 13:46:08 -06004068std::shared_ptr<CommandBufferAccessContext> SyncValidator::GetAccessContextShared(VkCommandBuffer command_buffer) {
John Zulaufea943c52022-02-22 11:05:17 -07004069 return GetMappedInsert(cb_access_state, command_buffer,
4070 [this, command_buffer]() { return AccessContextFactory(command_buffer); });
4071}
4072
4073std::shared_ptr<const CommandBufferAccessContext> SyncValidator::GetAccessContextShared(VkCommandBuffer command_buffer) const {
4074 return GetMapped(cb_access_state, command_buffer, []() { return std::shared_ptr<CommandBufferAccessContext>(); });
4075}
4076
4077const CommandBufferAccessContext *SyncValidator::GetAccessContext(VkCommandBuffer command_buffer) const {
4078 return GetMappedPlainFromShared(cb_access_state, command_buffer);
4079}
4080
4081CommandBufferAccessContext *SyncValidator::GetAccessContext(VkCommandBuffer command_buffer) {
4082 return GetAccessContextShared(command_buffer).get();
4083}
4084
4085CommandBufferAccessContext *SyncValidator::GetAccessContextNoInsert(VkCommandBuffer command_buffer) {
4086 return GetMappedPlainFromShared(cb_access_state, command_buffer);
4087}
4088
John Zulaufd1f85d42020-04-15 12:23:15 -06004089void SyncValidator::ResetCommandBufferCallback(VkCommandBuffer command_buffer) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06004090 auto *access_context = GetAccessContextNoInsert(command_buffer);
4091 if (access_context) {
4092 access_context->Reset();
John Zulauf9cb530d2019-09-30 14:14:10 -06004093 }
4094}
4095
John Zulaufd1f85d42020-04-15 12:23:15 -06004096void SyncValidator::FreeCommandBufferCallback(VkCommandBuffer command_buffer) {
4097 auto access_found = cb_access_state.find(command_buffer);
4098 if (access_found != cb_access_state.end()) {
4099 access_found->second->Reset();
John Zulauf4fa68462021-04-26 21:04:22 -06004100 access_found->second->MarkDestroyed();
John Zulaufd1f85d42020-04-15 12:23:15 -06004101 cb_access_state.erase(access_found);
4102 }
4103}
4104
John Zulauf9cb530d2019-09-30 14:14:10 -06004105bool SyncValidator::PreCallValidateCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
4106 uint32_t regionCount, const VkBufferCopy *pRegions) const {
4107 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -06004108 const auto *cb_context = GetAccessContext(commandBuffer);
4109 assert(cb_context);
4110 if (!cb_context) return skip;
4111 const auto *context = cb_context->GetCurrentAccessContext();
John Zulauf9cb530d2019-09-30 14:14:10 -06004112
John Zulauf3d84f1b2020-03-09 13:33:25 -06004113 // If we have no previous accesses, we have no hazards
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004114 auto src_buffer = Get<BUFFER_STATE>(srcBuffer);
4115 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004116
4117 for (uint32_t region = 0; region < regionCount; region++) {
4118 const auto &copy_region = pRegions[region];
John Zulauf16adfc92020-04-08 10:28:33 -06004119 if (src_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004120 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004121 auto hazard = context->DetectHazard(*src_buffer, SYNC_COPY_TRANSFER_READ, src_range);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004122 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06004123 skip |= LogError(srcBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004124 "vkCmdCopyBuffer: Hazard %s for srcBuffer %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06004125 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcBuffer).c_str(), region,
John Zulauf397e68b2022-04-19 11:44:07 -06004126 cb_context->FormatHazard(hazard).c_str());
John Zulauf9cb530d2019-09-30 14:14:10 -06004127 }
John Zulauf9cb530d2019-09-30 14:14:10 -06004128 }
John Zulauf16adfc92020-04-08 10:28:33 -06004129 if (dst_buffer && !skip) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004130 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004131 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, dst_range);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004132 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06004133 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004134 "vkCmdCopyBuffer: Hazard %s for dstBuffer %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06004135 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstBuffer).c_str(), region,
John Zulauf397e68b2022-04-19 11:44:07 -06004136 cb_context->FormatHazard(hazard).c_str());
John Zulauf3d84f1b2020-03-09 13:33:25 -06004137 }
4138 }
4139 if (skip) break;
John Zulauf9cb530d2019-09-30 14:14:10 -06004140 }
4141 return skip;
4142}
4143
4144void SyncValidator::PreCallRecordCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
4145 uint32_t regionCount, const VkBufferCopy *pRegions) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06004146 auto *cb_context = GetAccessContext(commandBuffer);
4147 assert(cb_context);
John Zulauf2b151bf2020-04-24 15:37:44 -06004148 const auto tag = cb_context->NextCommandTag(CMD_COPYBUFFER);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004149 auto *context = cb_context->GetCurrentAccessContext();
4150
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004151 auto src_buffer = Get<BUFFER_STATE>(srcBuffer);
4152 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
John Zulauf9cb530d2019-09-30 14:14:10 -06004153
4154 for (uint32_t region = 0; region < regionCount; region++) {
4155 const auto &copy_region = pRegions[region];
John Zulauf16adfc92020-04-08 10:28:33 -06004156 if (src_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004157 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004158 context->UpdateAccessState(*src_buffer, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment, src_range, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06004159 }
John Zulauf16adfc92020-04-08 10:28:33 -06004160 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004161 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004162 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, dst_range, tag);
John Zulauf5c5e88d2019-12-26 11:22:02 -07004163 }
4164 }
4165}
4166
John Zulauf4a6105a2020-11-17 15:11:05 -07004167void SyncValidator::PreCallRecordDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
4168 // Clear out events from the command buffer contexts
4169 for (auto &cb_context : cb_access_state) {
4170 cb_context.second->RecordDestroyEvent(event);
4171 }
4172}
4173
Tony-LunarGef035472021-11-02 10:23:33 -06004174bool SyncValidator::ValidateCmdCopyBuffer2(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2 *pCopyBufferInfos,
4175 CMD_TYPE cmd_type) const {
Jeff Leger178b1e52020-10-05 12:22:23 -04004176 bool skip = false;
4177 const auto *cb_context = GetAccessContext(commandBuffer);
4178 assert(cb_context);
4179 if (!cb_context) return skip;
4180 const auto *context = cb_context->GetCurrentAccessContext();
4181
4182 // If we have no previous accesses, we have no hazards
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004183 auto src_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->srcBuffer);
4184 auto dst_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->dstBuffer);
Jeff Leger178b1e52020-10-05 12:22:23 -04004185
4186 for (uint32_t region = 0; region < pCopyBufferInfos->regionCount; region++) {
4187 const auto &copy_region = pCopyBufferInfos->pRegions[region];
4188 if (src_buffer) {
4189 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004190 auto hazard = context->DetectHazard(*src_buffer, SYNC_COPY_TRANSFER_READ, src_range);
Jeff Leger178b1e52020-10-05 12:22:23 -04004191 if (hazard.hazard) {
4192 // TODO -- add tag information to log msg when useful.
sjfricke0bea06e2022-06-05 09:22:26 +09004193 skip |=
4194 LogError(pCopyBufferInfos->srcBuffer, string_SyncHazardVUID(hazard.hazard),
4195 "%s(): Hazard %s for srcBuffer %s, region %" PRIu32 ". Access info %s.", CommandTypeString(cmd_type),
4196 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyBufferInfos->srcBuffer).c_str(),
4197 region, cb_context->FormatHazard(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04004198 }
4199 }
4200 if (dst_buffer && !skip) {
4201 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004202 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, dst_range);
Jeff Leger178b1e52020-10-05 12:22:23 -04004203 if (hazard.hazard) {
sjfricke0bea06e2022-06-05 09:22:26 +09004204 skip |=
4205 LogError(pCopyBufferInfos->dstBuffer, string_SyncHazardVUID(hazard.hazard),
4206 "%s(): Hazard %s for dstBuffer %s, region %" PRIu32 ". Access info %s.", CommandTypeString(cmd_type),
4207 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyBufferInfos->dstBuffer).c_str(),
4208 region, cb_context->FormatHazard(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04004209 }
4210 }
4211 if (skip) break;
4212 }
4213 return skip;
4214}
4215
Tony-LunarGef035472021-11-02 10:23:33 -06004216bool SyncValidator::PreCallValidateCmdCopyBuffer2KHR(VkCommandBuffer commandBuffer,
4217 const VkCopyBufferInfo2KHR *pCopyBufferInfos) const {
4218 return ValidateCmdCopyBuffer2(commandBuffer, pCopyBufferInfos, CMD_COPYBUFFER2KHR);
4219}
4220
4221bool SyncValidator::PreCallValidateCmdCopyBuffer2(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2 *pCopyBufferInfos) const {
4222 return ValidateCmdCopyBuffer2(commandBuffer, pCopyBufferInfos, CMD_COPYBUFFER2);
4223}
4224
4225void SyncValidator::RecordCmdCopyBuffer2(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2KHR *pCopyBufferInfos, CMD_TYPE cmd_type) {
Jeff Leger178b1e52020-10-05 12:22:23 -04004226 auto *cb_context = GetAccessContext(commandBuffer);
4227 assert(cb_context);
Tony-LunarGef035472021-11-02 10:23:33 -06004228 const auto tag = cb_context->NextCommandTag(cmd_type);
Jeff Leger178b1e52020-10-05 12:22:23 -04004229 auto *context = cb_context->GetCurrentAccessContext();
4230
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004231 auto src_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->srcBuffer);
4232 auto dst_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->dstBuffer);
Jeff Leger178b1e52020-10-05 12:22:23 -04004233
4234 for (uint32_t region = 0; region < pCopyBufferInfos->regionCount; region++) {
4235 const auto &copy_region = pCopyBufferInfos->pRegions[region];
4236 if (src_buffer) {
4237 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004238 context->UpdateAccessState(*src_buffer, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment, src_range, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04004239 }
4240 if (dst_buffer) {
4241 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004242 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, dst_range, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04004243 }
4244 }
4245}
4246
Tony-LunarGef035472021-11-02 10:23:33 -06004247void SyncValidator::PreCallRecordCmdCopyBuffer2KHR(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2KHR *pCopyBufferInfos) {
4248 RecordCmdCopyBuffer2(commandBuffer, pCopyBufferInfos, CMD_COPYBUFFER2KHR);
4249}
4250
4251void SyncValidator::PreCallRecordCmdCopyBuffer2(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2 *pCopyBufferInfos) {
4252 RecordCmdCopyBuffer2(commandBuffer, pCopyBufferInfos, CMD_COPYBUFFER2);
4253}
4254
John Zulauf5c5e88d2019-12-26 11:22:02 -07004255bool SyncValidator::PreCallValidateCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4256 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
4257 const VkImageCopy *pRegions) const {
4258 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -06004259 const auto *cb_access_context = GetAccessContext(commandBuffer);
4260 assert(cb_access_context);
4261 if (!cb_access_context) return skip;
John Zulauf5c5e88d2019-12-26 11:22:02 -07004262
John Zulauf3d84f1b2020-03-09 13:33:25 -06004263 const auto *context = cb_access_context->GetCurrentAccessContext();
4264 assert(context);
4265 if (!context) return skip;
4266
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004267 auto src_image = Get<IMAGE_STATE>(srcImage);
4268 auto dst_image = Get<IMAGE_STATE>(dstImage);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004269 for (uint32_t region = 0; region < regionCount; region++) {
4270 const auto &copy_region = pRegions[region];
4271 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004272 auto hazard = context->DetectHazard(*src_image, SYNC_COPY_TRANSFER_READ, copy_region.srcSubresource,
Aitor Camachoe67f2c72022-06-08 14:41:58 +02004273 copy_region.srcOffset, copy_region.extent, false);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004274 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06004275 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004276 "vkCmdCopyImage: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06004277 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauf397e68b2022-04-19 11:44:07 -06004278 cb_access_context->FormatHazard(hazard).c_str());
John Zulauf5c5e88d2019-12-26 11:22:02 -07004279 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06004280 }
4281
4282 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004283 auto hazard = context->DetectHazard(*dst_image, SYNC_COPY_TRANSFER_WRITE, copy_region.dstSubresource,
Aitor Camachoe67f2c72022-06-08 14:41:58 +02004284 copy_region.dstOffset, copy_region.extent, false);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004285 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06004286 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004287 "vkCmdCopyImage: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06004288 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauf397e68b2022-04-19 11:44:07 -06004289 cb_access_context->FormatHazard(hazard).c_str());
John Zulauf5c5e88d2019-12-26 11:22:02 -07004290 }
locke-lunarg1dbbb9e2020-02-28 22:43:53 -07004291 if (skip) break;
John Zulauf5c5e88d2019-12-26 11:22:02 -07004292 }
4293 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06004294
John Zulauf5c5e88d2019-12-26 11:22:02 -07004295 return skip;
4296}
4297
4298void SyncValidator::PreCallRecordCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4299 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
4300 const VkImageCopy *pRegions) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06004301 auto *cb_access_context = GetAccessContext(commandBuffer);
4302 assert(cb_access_context);
John Zulauf2b151bf2020-04-24 15:37:44 -06004303 const auto tag = cb_access_context->NextCommandTag(CMD_COPYIMAGE);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004304 auto *context = cb_access_context->GetCurrentAccessContext();
4305 assert(context);
4306
Jeremy Gebben9f537102021-10-05 16:37:12 -06004307 auto src_image = Get<IMAGE_STATE>(srcImage);
4308 auto dst_image = Get<IMAGE_STATE>(dstImage);
John Zulauf5c5e88d2019-12-26 11:22:02 -07004309
4310 for (uint32_t region = 0; region < regionCount; region++) {
4311 const auto &copy_region = pRegions[region];
John Zulauf3d84f1b2020-03-09 13:33:25 -06004312 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004313 context->UpdateAccessState(*src_image, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07004314 copy_region.srcSubresource, copy_region.srcOffset, copy_region.extent, tag);
John Zulauf5c5e88d2019-12-26 11:22:02 -07004315 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06004316 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004317 context->UpdateAccessState(*dst_image, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
ziga-lunarg73746512022-03-23 23:08:17 +01004318 copy_region.dstSubresource, copy_region.dstOffset, copy_region.extent, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06004319 }
4320 }
4321}
4322
Tony-LunarGb61514a2021-11-02 12:36:51 -06004323bool SyncValidator::ValidateCmdCopyImage2(VkCommandBuffer commandBuffer, const VkCopyImageInfo2 *pCopyImageInfo,
4324 CMD_TYPE cmd_type) const {
Jeff Leger178b1e52020-10-05 12:22:23 -04004325 bool skip = false;
4326 const auto *cb_access_context = GetAccessContext(commandBuffer);
4327 assert(cb_access_context);
4328 if (!cb_access_context) return skip;
4329
4330 const auto *context = cb_access_context->GetCurrentAccessContext();
4331 assert(context);
4332 if (!context) return skip;
4333
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004334 auto src_image = Get<IMAGE_STATE>(pCopyImageInfo->srcImage);
4335 auto dst_image = Get<IMAGE_STATE>(pCopyImageInfo->dstImage);
Tony-LunarGb61514a2021-11-02 12:36:51 -06004336
Jeff Leger178b1e52020-10-05 12:22:23 -04004337 for (uint32_t region = 0; region < pCopyImageInfo->regionCount; region++) {
4338 const auto &copy_region = pCopyImageInfo->pRegions[region];
4339 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004340 auto hazard = context->DetectHazard(*src_image, SYNC_COPY_TRANSFER_READ, copy_region.srcSubresource,
Aitor Camachoe67f2c72022-06-08 14:41:58 +02004341 copy_region.srcOffset, copy_region.extent, false);
Jeff Leger178b1e52020-10-05 12:22:23 -04004342 if (hazard.hazard) {
4343 skip |= LogError(pCopyImageInfo->srcImage, string_SyncHazardVUID(hazard.hazard),
sjfricke0bea06e2022-06-05 09:22:26 +09004344 "%s: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.", CommandTypeString(cmd_type),
Jeff Leger178b1e52020-10-05 12:22:23 -04004345 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyImageInfo->srcImage).c_str(),
John Zulauf397e68b2022-04-19 11:44:07 -06004346 region, cb_access_context->FormatHazard(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04004347 }
4348 }
4349
4350 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004351 auto hazard = context->DetectHazard(*dst_image, SYNC_COPY_TRANSFER_WRITE, copy_region.dstSubresource,
Aitor Camachoe67f2c72022-06-08 14:41:58 +02004352 copy_region.dstOffset, copy_region.extent, false);
Jeff Leger178b1e52020-10-05 12:22:23 -04004353 if (hazard.hazard) {
4354 skip |= LogError(pCopyImageInfo->dstImage, string_SyncHazardVUID(hazard.hazard),
sjfricke0bea06e2022-06-05 09:22:26 +09004355 "%s: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.", CommandTypeString(cmd_type),
Jeff Leger178b1e52020-10-05 12:22:23 -04004356 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyImageInfo->dstImage).c_str(),
John Zulauf397e68b2022-04-19 11:44:07 -06004357 region, cb_access_context->FormatHazard(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04004358 }
4359 if (skip) break;
4360 }
4361 }
4362
4363 return skip;
4364}
4365
Tony-LunarGb61514a2021-11-02 12:36:51 -06004366bool SyncValidator::PreCallValidateCmdCopyImage2KHR(VkCommandBuffer commandBuffer,
4367 const VkCopyImageInfo2KHR *pCopyImageInfo) const {
4368 return ValidateCmdCopyImage2(commandBuffer, pCopyImageInfo, CMD_COPYIMAGE2KHR);
4369}
4370
4371bool SyncValidator::PreCallValidateCmdCopyImage2(VkCommandBuffer commandBuffer, const VkCopyImageInfo2 *pCopyImageInfo) const {
4372 return ValidateCmdCopyImage2(commandBuffer, pCopyImageInfo, CMD_COPYIMAGE2);
4373}
4374
4375void SyncValidator::RecordCmdCopyImage2(VkCommandBuffer commandBuffer, const VkCopyImageInfo2KHR *pCopyImageInfo, CMD_TYPE cmd_type) {
Jeff Leger178b1e52020-10-05 12:22:23 -04004376 auto *cb_access_context = GetAccessContext(commandBuffer);
4377 assert(cb_access_context);
Tony-LunarGb61514a2021-11-02 12:36:51 -06004378 const auto tag = cb_access_context->NextCommandTag(cmd_type);
Jeff Leger178b1e52020-10-05 12:22:23 -04004379 auto *context = cb_access_context->GetCurrentAccessContext();
4380 assert(context);
4381
Jeremy Gebben9f537102021-10-05 16:37:12 -06004382 auto src_image = Get<IMAGE_STATE>(pCopyImageInfo->srcImage);
4383 auto dst_image = Get<IMAGE_STATE>(pCopyImageInfo->dstImage);
Jeff Leger178b1e52020-10-05 12:22:23 -04004384
4385 for (uint32_t region = 0; region < pCopyImageInfo->regionCount; region++) {
4386 const auto &copy_region = pCopyImageInfo->pRegions[region];
4387 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004388 context->UpdateAccessState(*src_image, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07004389 copy_region.srcSubresource, copy_region.srcOffset, copy_region.extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04004390 }
4391 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004392 context->UpdateAccessState(*dst_image, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
ziga-lunarg73746512022-03-23 23:08:17 +01004393 copy_region.dstSubresource, copy_region.dstOffset, copy_region.extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04004394 }
4395 }
4396}
4397
Tony-LunarGb61514a2021-11-02 12:36:51 -06004398void SyncValidator::PreCallRecordCmdCopyImage2KHR(VkCommandBuffer commandBuffer, const VkCopyImageInfo2KHR *pCopyImageInfo) {
4399 RecordCmdCopyImage2(commandBuffer, pCopyImageInfo, CMD_COPYIMAGE2KHR);
4400}
4401
4402void SyncValidator::PreCallRecordCmdCopyImage2(VkCommandBuffer commandBuffer, const VkCopyImageInfo2 *pCopyImageInfo) {
4403 RecordCmdCopyImage2(commandBuffer, pCopyImageInfo, CMD_COPYIMAGE2);
4404}
4405
John Zulauf9cb530d2019-09-30 14:14:10 -06004406bool SyncValidator::PreCallValidateCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
4407 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
4408 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
4409 uint32_t bufferMemoryBarrierCount,
4410 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
4411 uint32_t imageMemoryBarrierCount,
4412 const VkImageMemoryBarrier *pImageMemoryBarriers) const {
4413 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -06004414 const auto *cb_access_context = GetAccessContext(commandBuffer);
4415 assert(cb_access_context);
4416 if (!cb_access_context) return skip;
John Zulauf0cb5be22020-01-23 12:18:22 -07004417
John Zulauf36ef9282021-02-02 11:47:24 -07004418 SyncOpPipelineBarrier pipeline_barrier(CMD_PIPELINEBARRIER, *this, cb_access_context->GetQueueFlags(), srcStageMask,
4419 dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers,
4420 bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
4421 pImageMemoryBarriers);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07004422 skip = pipeline_barrier.Validate(*cb_access_context);
John Zulauf9cb530d2019-09-30 14:14:10 -06004423 return skip;
4424}
4425
4426void SyncValidator::PreCallRecordCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
4427 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
4428 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
4429 uint32_t bufferMemoryBarrierCount,
4430 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
4431 uint32_t imageMemoryBarrierCount,
4432 const VkImageMemoryBarrier *pImageMemoryBarriers) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06004433 auto *cb_access_context = GetAccessContext(commandBuffer);
4434 assert(cb_access_context);
4435 if (!cb_access_context) return;
John Zulauf9cb530d2019-09-30 14:14:10 -06004436
John Zulauf1bf30522021-09-03 15:39:06 -06004437 cb_access_context->RecordSyncOp<SyncOpPipelineBarrier>(CMD_PIPELINEBARRIER, *this, cb_access_context->GetQueueFlags(),
4438 srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount,
4439 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
4440 imageMemoryBarrierCount, pImageMemoryBarriers);
John Zulauf9cb530d2019-09-30 14:14:10 -06004441}
4442
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07004443bool SyncValidator::PreCallValidateCmdPipelineBarrier2KHR(VkCommandBuffer commandBuffer,
4444 const VkDependencyInfoKHR *pDependencyInfo) const {
4445 bool skip = false;
4446 const auto *cb_access_context = GetAccessContext(commandBuffer);
4447 assert(cb_access_context);
4448 if (!cb_access_context) return skip;
4449
4450 SyncOpPipelineBarrier pipeline_barrier(CMD_PIPELINEBARRIER2KHR, *this, cb_access_context->GetQueueFlags(), *pDependencyInfo);
4451 skip = pipeline_barrier.Validate(*cb_access_context);
4452 return skip;
4453}
4454
Tony-LunarG3f6eceb2021-11-18 14:34:49 -07004455bool SyncValidator::PreCallValidateCmdPipelineBarrier2(VkCommandBuffer commandBuffer,
4456 const VkDependencyInfo *pDependencyInfo) const {
4457 bool skip = false;
4458 const auto *cb_access_context = GetAccessContext(commandBuffer);
4459 assert(cb_access_context);
4460 if (!cb_access_context) return skip;
4461
4462 SyncOpPipelineBarrier pipeline_barrier(CMD_PIPELINEBARRIER2, *this, cb_access_context->GetQueueFlags(), *pDependencyInfo);
4463 skip = pipeline_barrier.Validate(*cb_access_context);
4464 return skip;
4465}
4466
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07004467void SyncValidator::PreCallRecordCmdPipelineBarrier2KHR(VkCommandBuffer commandBuffer, const VkDependencyInfoKHR *pDependencyInfo) {
4468 auto *cb_access_context = GetAccessContext(commandBuffer);
4469 assert(cb_access_context);
4470 if (!cb_access_context) return;
4471
John Zulauf1bf30522021-09-03 15:39:06 -06004472 cb_access_context->RecordSyncOp<SyncOpPipelineBarrier>(CMD_PIPELINEBARRIER2KHR, *this, cb_access_context->GetQueueFlags(),
4473 *pDependencyInfo);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07004474}
4475
Tony-LunarG3f6eceb2021-11-18 14:34:49 -07004476void SyncValidator::PreCallRecordCmdPipelineBarrier2(VkCommandBuffer commandBuffer, const VkDependencyInfo *pDependencyInfo) {
4477 auto *cb_access_context = GetAccessContext(commandBuffer);
4478 assert(cb_access_context);
4479 if (!cb_access_context) return;
4480
4481 cb_access_context->RecordSyncOp<SyncOpPipelineBarrier>(CMD_PIPELINEBARRIER2, *this, cb_access_context->GetQueueFlags(),
4482 *pDependencyInfo);
4483}
4484
Jeremy Gebben36a3b832022-03-23 10:54:18 -06004485void SyncValidator::CreateDevice(const VkDeviceCreateInfo *pCreateInfo) {
John Zulauf9cb530d2019-09-30 14:14:10 -06004486 // The state tracker sets up the device state
Jeremy Gebben36a3b832022-03-23 10:54:18 -06004487 StateTracker::CreateDevice(pCreateInfo);
John Zulauf9cb530d2019-09-30 14:14:10 -06004488
John Zulauf5f13a792020-03-10 07:31:21 -06004489 // Add the callback hooks for the functions that are either broadly or deeply used and that the ValidationStateTracker
4490 // refactor would be messier without.
John Zulauf9cb530d2019-09-30 14:14:10 -06004491 // TODO: Find a good way to do this hooklessly.
Jeremy Gebben36a3b832022-03-23 10:54:18 -06004492 SetCommandBufferResetCallback([this](VkCommandBuffer command_buffer) -> void { ResetCommandBufferCallback(command_buffer); });
4493 SetCommandBufferFreeCallback([this](VkCommandBuffer command_buffer) -> void { FreeCommandBufferCallback(command_buffer); });
John Zulaufbbda4572022-04-19 16:20:45 -06004494
John Zulauf1d5f9c12022-05-13 14:51:08 -06004495 QueueId queue_id = QueueSyncState::kQueueIdBase;
4496 ForEachShared<QUEUE_STATE>([this, &queue_id](const std::shared_ptr<QUEUE_STATE> &queue_state) {
John Zulaufbbda4572022-04-19 16:20:45 -06004497 auto queue_flags = physical_device_state->queue_family_properties[queue_state->queueFamilyIndex].queueFlags;
John Zulauf1d5f9c12022-05-13 14:51:08 -06004498 std::shared_ptr<QueueSyncState> queue_sync_state = std::make_shared<QueueSyncState>(queue_state, queue_flags, queue_id++);
John Zulaufbbda4572022-04-19 16:20:45 -06004499 queue_sync_states_.emplace(std::make_pair(queue_state->Queue(), std::move(queue_sync_state)));
4500 });
John Zulauf9cb530d2019-09-30 14:14:10 -06004501}
John Zulauf3d84f1b2020-03-09 13:33:25 -06004502
John Zulauf355e49b2020-04-24 15:11:15 -06004503bool SyncValidator::ValidateBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
sjfricke0bea06e2022-06-05 09:22:26 +09004504 const VkSubpassBeginInfo *pSubpassBeginInfo, CMD_TYPE cmd_type) const {
John Zulauf355e49b2020-04-24 15:11:15 -06004505 bool skip = false;
John Zulauf355e49b2020-04-24 15:11:15 -06004506 auto cb_context = GetAccessContext(commandBuffer);
John Zulauf64ffe552021-02-06 10:25:07 -07004507 if (cb_context) {
sjfricke0bea06e2022-06-05 09:22:26 +09004508 SyncOpBeginRenderPass sync_op(cmd_type, *this, pRenderPassBegin, pSubpassBeginInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07004509 skip = sync_op.Validate(*cb_context);
John Zulauf355e49b2020-04-24 15:11:15 -06004510 }
John Zulauf355e49b2020-04-24 15:11:15 -06004511 return skip;
4512}
4513
4514bool SyncValidator::PreCallValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
4515 VkSubpassContents contents) const {
4516 bool skip = StateTracker::PreCallValidateCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07004517 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf355e49b2020-04-24 15:11:15 -06004518 subpass_begin_info.contents = contents;
John Zulauf64ffe552021-02-06 10:25:07 -07004519 skip |= ValidateBeginRenderPass(commandBuffer, pRenderPassBegin, &subpass_begin_info, CMD_BEGINRENDERPASS);
John Zulauf355e49b2020-04-24 15:11:15 -06004520 return skip;
4521}
4522
4523bool SyncValidator::PreCallValidateCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
Mike Schuchardt2df08912020-12-15 16:28:09 -08004524 const VkSubpassBeginInfo *pSubpassBeginInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06004525 bool skip = StateTracker::PreCallValidateCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07004526 skip |= ValidateBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2);
John Zulauf355e49b2020-04-24 15:11:15 -06004527 return skip;
4528}
4529
4530bool SyncValidator::PreCallValidateCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
4531 const VkRenderPassBeginInfo *pRenderPassBegin,
Mike Schuchardt2df08912020-12-15 16:28:09 -08004532 const VkSubpassBeginInfo *pSubpassBeginInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06004533 bool skip = StateTracker::PreCallValidateCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
sfricke-samsung85584a72021-09-30 21:43:38 -07004534 skip |= ValidateBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2KHR);
John Zulauf355e49b2020-04-24 15:11:15 -06004535 return skip;
4536}
4537
John Zulauf3d84f1b2020-03-09 13:33:25 -06004538void SyncValidator::PostCallRecordBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo,
4539 VkResult result) {
4540 // The state tracker sets up the command buffer state
4541 StateTracker::PostCallRecordBeginCommandBuffer(commandBuffer, pBeginInfo, result);
4542
4543 // Create/initialize the structure that trackers accesses at the command buffer scope.
4544 auto cb_access_context = GetAccessContext(commandBuffer);
4545 assert(cb_access_context);
4546 cb_access_context->Reset();
4547}
4548
4549void SyncValidator::RecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
sjfricke0bea06e2022-06-05 09:22:26 +09004550 const VkSubpassBeginInfo *pSubpassBeginInfo, CMD_TYPE cmd_type) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06004551 auto cb_context = GetAccessContext(commandBuffer);
John Zulauf355e49b2020-04-24 15:11:15 -06004552 if (cb_context) {
sjfricke0bea06e2022-06-05 09:22:26 +09004553 cb_context->RecordSyncOp<SyncOpBeginRenderPass>(cmd_type, *this, pRenderPassBegin, pSubpassBeginInfo);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004554 }
4555}
4556
4557void SyncValidator::PostCallRecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
4558 VkSubpassContents contents) {
4559 StateTracker::PostCallRecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07004560 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf3d84f1b2020-03-09 13:33:25 -06004561 subpass_begin_info.contents = contents;
John Zulauf355e49b2020-04-24 15:11:15 -06004562 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, &subpass_begin_info, CMD_BEGINRENDERPASS);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004563}
4564
4565void SyncValidator::PostCallRecordCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
4566 const VkSubpassBeginInfo *pSubpassBeginInfo) {
4567 StateTracker::PostCallRecordCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
John Zulauf355e49b2020-04-24 15:11:15 -06004568 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004569}
4570
4571void SyncValidator::PostCallRecordCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
4572 const VkRenderPassBeginInfo *pRenderPassBegin,
4573 const VkSubpassBeginInfo *pSubpassBeginInfo) {
4574 StateTracker::PostCallRecordCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
sfricke-samsung85584a72021-09-30 21:43:38 -07004575 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2KHR);
John Zulauf355e49b2020-04-24 15:11:15 -06004576}
4577
Mike Schuchardt2df08912020-12-15 16:28:09 -08004578bool SyncValidator::ValidateCmdNextSubpass(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
sjfricke0bea06e2022-06-05 09:22:26 +09004579 const VkSubpassEndInfo *pSubpassEndInfo, CMD_TYPE cmd_type) const {
John Zulauf355e49b2020-04-24 15:11:15 -06004580 bool skip = false;
4581
4582 auto cb_context = GetAccessContext(commandBuffer);
4583 assert(cb_context);
John Zulauf64ffe552021-02-06 10:25:07 -07004584 if (!cb_context) return skip;
sjfricke0bea06e2022-06-05 09:22:26 +09004585 SyncOpNextSubpass sync_op(cmd_type, *this, pSubpassBeginInfo, pSubpassEndInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07004586 return sync_op.Validate(*cb_context);
John Zulauf355e49b2020-04-24 15:11:15 -06004587}
4588
4589bool SyncValidator::PreCallValidateCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) const {
4590 bool skip = StateTracker::PreCallValidateCmdNextSubpass(commandBuffer, contents);
John Zulauf64ffe552021-02-06 10:25:07 -07004591 // Convert to a NextSubpass2
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07004592 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf355e49b2020-04-24 15:11:15 -06004593 subpass_begin_info.contents = contents;
John Zulauf64ffe552021-02-06 10:25:07 -07004594 auto subpass_end_info = LvlInitStruct<VkSubpassEndInfo>();
4595 skip |= ValidateCmdNextSubpass(commandBuffer, &subpass_begin_info, &subpass_end_info, CMD_NEXTSUBPASS);
John Zulauf355e49b2020-04-24 15:11:15 -06004596 return skip;
4597}
4598
Mike Schuchardt2df08912020-12-15 16:28:09 -08004599bool SyncValidator::PreCallValidateCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
4600 const VkSubpassEndInfo *pSubpassEndInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06004601 bool skip = StateTracker::PreCallValidateCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
sfricke-samsung85584a72021-09-30 21:43:38 -07004602 skip |= ValidateCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2KHR);
John Zulauf355e49b2020-04-24 15:11:15 -06004603 return skip;
4604}
4605
4606bool SyncValidator::PreCallValidateCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
4607 const VkSubpassEndInfo *pSubpassEndInfo) const {
4608 bool skip = StateTracker::PreCallValidateCmdNextSubpass2(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07004609 skip |= ValidateCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2);
John Zulauf355e49b2020-04-24 15:11:15 -06004610 return skip;
John Zulauf3d84f1b2020-03-09 13:33:25 -06004611}
4612
4613void SyncValidator::RecordCmdNextSubpass(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
sjfricke0bea06e2022-06-05 09:22:26 +09004614 const VkSubpassEndInfo *pSubpassEndInfo, CMD_TYPE cmd_type) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06004615 auto cb_context = GetAccessContext(commandBuffer);
4616 assert(cb_context);
John Zulauf64ffe552021-02-06 10:25:07 -07004617 if (!cb_context) return;
John Zulauf3d84f1b2020-03-09 13:33:25 -06004618
sjfricke0bea06e2022-06-05 09:22:26 +09004619 cb_context->RecordSyncOp<SyncOpNextSubpass>(cmd_type, *this, pSubpassBeginInfo, pSubpassEndInfo);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004620}
4621
4622void SyncValidator::PostCallRecordCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
4623 StateTracker::PostCallRecordCmdNextSubpass(commandBuffer, contents);
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07004624 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf3d84f1b2020-03-09 13:33:25 -06004625 subpass_begin_info.contents = contents;
John Zulauf355e49b2020-04-24 15:11:15 -06004626 RecordCmdNextSubpass(commandBuffer, &subpass_begin_info, nullptr, CMD_NEXTSUBPASS);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004627}
4628
4629void SyncValidator::PostCallRecordCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
4630 const VkSubpassEndInfo *pSubpassEndInfo) {
4631 StateTracker::PostCallRecordCmdNextSubpass2(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
John Zulauf355e49b2020-04-24 15:11:15 -06004632 RecordCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004633}
4634
4635void SyncValidator::PostCallRecordCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
4636 const VkSubpassEndInfo *pSubpassEndInfo) {
4637 StateTracker::PostCallRecordCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
sfricke-samsung85584a72021-09-30 21:43:38 -07004638 RecordCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2KHR);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004639}
4640
sfricke-samsung85584a72021-09-30 21:43:38 -07004641bool SyncValidator::ValidateCmdEndRenderPass(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo,
sjfricke0bea06e2022-06-05 09:22:26 +09004642 CMD_TYPE cmd_type) const {
John Zulauf355e49b2020-04-24 15:11:15 -06004643 bool skip = false;
4644
4645 auto cb_context = GetAccessContext(commandBuffer);
4646 assert(cb_context);
John Zulauf64ffe552021-02-06 10:25:07 -07004647 if (!cb_context) return skip;
John Zulauf355e49b2020-04-24 15:11:15 -06004648
sjfricke0bea06e2022-06-05 09:22:26 +09004649 SyncOpEndRenderPass sync_op(cmd_type, *this, pSubpassEndInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07004650 skip |= sync_op.Validate(*cb_context);
John Zulauf355e49b2020-04-24 15:11:15 -06004651 return skip;
4652}
4653
4654bool SyncValidator::PreCallValidateCmdEndRenderPass(VkCommandBuffer commandBuffer) const {
4655 bool skip = StateTracker::PreCallValidateCmdEndRenderPass(commandBuffer);
John Zulauf64ffe552021-02-06 10:25:07 -07004656 skip |= ValidateCmdEndRenderPass(commandBuffer, nullptr, CMD_ENDRENDERPASS);
John Zulauf355e49b2020-04-24 15:11:15 -06004657 return skip;
4658}
4659
Mike Schuchardt2df08912020-12-15 16:28:09 -08004660bool SyncValidator::PreCallValidateCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06004661 bool skip = StateTracker::PreCallValidateCmdEndRenderPass2(commandBuffer, pSubpassEndInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07004662 skip |= ValidateCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2);
John Zulauf355e49b2020-04-24 15:11:15 -06004663 return skip;
4664}
4665
4666bool SyncValidator::PreCallValidateCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer,
Mike Schuchardt2df08912020-12-15 16:28:09 -08004667 const VkSubpassEndInfo *pSubpassEndInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06004668 bool skip = StateTracker::PreCallValidateCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo);
sfricke-samsung85584a72021-09-30 21:43:38 -07004669 skip |= ValidateCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2KHR);
John Zulauf355e49b2020-04-24 15:11:15 -06004670 return skip;
4671}
4672
sjfricke0bea06e2022-06-05 09:22:26 +09004673void SyncValidator::RecordCmdEndRenderPass(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo,
4674 CMD_TYPE cmd_type) {
John Zulaufe5da6e52020-03-18 15:32:18 -06004675 // Resolve the all subpass contexts to the command buffer contexts
4676 auto cb_context = GetAccessContext(commandBuffer);
4677 assert(cb_context);
John Zulauf64ffe552021-02-06 10:25:07 -07004678 if (!cb_context) return;
John Zulaufe5da6e52020-03-18 15:32:18 -06004679
sjfricke0bea06e2022-06-05 09:22:26 +09004680 cb_context->RecordSyncOp<SyncOpEndRenderPass>(cmd_type, *this, pSubpassEndInfo);
John Zulaufe5da6e52020-03-18 15:32:18 -06004681}
John Zulauf3d84f1b2020-03-09 13:33:25 -06004682
John Zulauf33fc1d52020-07-17 11:01:10 -06004683// Simple heuristic rule to detect WAW operations representing algorithmically safe or increment
4684// updates to a resource which do not conflict at the byte level.
4685// TODO: Revisit this rule to see if it needs to be tighter or looser
4686// TODO: Add programatic control over suppression heuristics
4687bool SyncValidator::SupressedBoundDescriptorWAW(const HazardResult &hazard) const {
4688 return (hazard.hazard == WRITE_AFTER_WRITE) && (FlagBit(hazard.usage_index) == hazard.prior_access);
4689}
4690
John Zulauf3d84f1b2020-03-09 13:33:25 -06004691void SyncValidator::PostCallRecordCmdEndRenderPass(VkCommandBuffer commandBuffer) {
John Zulauf355e49b2020-04-24 15:11:15 -06004692 RecordCmdEndRenderPass(commandBuffer, nullptr, CMD_ENDRENDERPASS);
John Zulauf5a1a5382020-06-22 17:23:25 -06004693 StateTracker::PostCallRecordCmdEndRenderPass(commandBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004694}
4695
4696void SyncValidator::PostCallRecordCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
John Zulauf355e49b2020-04-24 15:11:15 -06004697 RecordCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2);
John Zulauf5a1a5382020-06-22 17:23:25 -06004698 StateTracker::PostCallRecordCmdEndRenderPass2(commandBuffer, pSubpassEndInfo);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004699}
4700
4701void SyncValidator::PostCallRecordCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
sfricke-samsung85584a72021-09-30 21:43:38 -07004702 RecordCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2KHR);
John Zulauf5a1a5382020-06-22 17:23:25 -06004703 StateTracker::PostCallRecordCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004704}
locke-lunarga19c71d2020-03-02 18:17:04 -07004705
sfricke-samsung71f04e32022-03-16 01:21:21 -05004706template <typename RegionType>
Jeff Leger178b1e52020-10-05 12:22:23 -04004707bool SyncValidator::ValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
sfricke-samsung71f04e32022-03-16 01:21:21 -05004708 VkImageLayout dstImageLayout, uint32_t regionCount, const RegionType *pRegions,
4709 CMD_TYPE cmd_type) const {
locke-lunarga19c71d2020-03-02 18:17:04 -07004710 bool skip = false;
4711 const auto *cb_access_context = GetAccessContext(commandBuffer);
4712 assert(cb_access_context);
4713 if (!cb_access_context) return skip;
4714
4715 const auto *context = cb_access_context->GetCurrentAccessContext();
4716 assert(context);
4717 if (!context) return skip;
4718
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004719 auto src_buffer = Get<BUFFER_STATE>(srcBuffer);
4720 auto dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07004721
4722 for (uint32_t region = 0; region < regionCount; region++) {
4723 const auto &copy_region = pRegions[region];
John Zulauf477700e2021-01-06 11:41:49 -07004724 HazardResult hazard;
locke-lunarga19c71d2020-03-02 18:17:04 -07004725 if (dst_image) {
John Zulauf477700e2021-01-06 11:41:49 -07004726 if (src_buffer) {
4727 ResourceAccessRange src_range =
4728 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, dst_image->createInfo.format));
Jeremy Gebben40a22942020-12-22 14:22:06 -07004729 hazard = context->DetectHazard(*src_buffer, SYNC_COPY_TRANSFER_READ, src_range);
John Zulauf477700e2021-01-06 11:41:49 -07004730 if (hazard.hazard) {
4731 // PHASE1 TODO -- add tag information to log msg when useful.
sjfricke0bea06e2022-06-05 09:22:26 +09004732 skip |=
4733 LogError(srcBuffer, string_SyncHazardVUID(hazard.hazard),
4734 "%s: Hazard %s for srcBuffer %s, region %" PRIu32 ". Access info %s.", CommandTypeString(cmd_type),
4735 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcBuffer).c_str(), region,
4736 cb_access_context->FormatHazard(hazard).c_str());
John Zulauf477700e2021-01-06 11:41:49 -07004737 }
4738 }
4739
Jeremy Gebben40a22942020-12-22 14:22:06 -07004740 hazard = context->DetectHazard(*dst_image, SYNC_COPY_TRANSFER_WRITE, copy_region.imageSubresource,
Aitor Camachoe67f2c72022-06-08 14:41:58 +02004741 copy_region.imageOffset, copy_region.imageExtent, false);
locke-lunarga19c71d2020-03-02 18:17:04 -07004742 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06004743 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
sjfricke0bea06e2022-06-05 09:22:26 +09004744 "%s: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.", CommandTypeString(cmd_type),
John Zulauf1dae9192020-06-16 15:46:44 -06004745 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauf397e68b2022-04-19 11:44:07 -06004746 cb_access_context->FormatHazard(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07004747 }
4748 if (skip) break;
4749 }
4750 if (skip) break;
4751 }
4752 return skip;
4753}
4754
Jeff Leger178b1e52020-10-05 12:22:23 -04004755bool SyncValidator::PreCallValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
4756 VkImageLayout dstImageLayout, uint32_t regionCount,
4757 const VkBufferImageCopy *pRegions) const {
4758 return ValidateCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions,
Tony Barbour845d29b2021-11-09 11:43:14 -07004759 CMD_COPYBUFFERTOIMAGE);
Jeff Leger178b1e52020-10-05 12:22:23 -04004760}
4761
4762bool SyncValidator::PreCallValidateCmdCopyBufferToImage2KHR(VkCommandBuffer commandBuffer,
4763 const VkCopyBufferToImageInfo2KHR *pCopyBufferToImageInfo) const {
4764 return ValidateCmdCopyBufferToImage(commandBuffer, pCopyBufferToImageInfo->srcBuffer, pCopyBufferToImageInfo->dstImage,
4765 pCopyBufferToImageInfo->dstImageLayout, pCopyBufferToImageInfo->regionCount,
Tony Barbour845d29b2021-11-09 11:43:14 -07004766 pCopyBufferToImageInfo->pRegions, CMD_COPYBUFFERTOIMAGE2KHR);
4767}
4768
4769bool SyncValidator::PreCallValidateCmdCopyBufferToImage2(VkCommandBuffer commandBuffer,
4770 const VkCopyBufferToImageInfo2 *pCopyBufferToImageInfo) const {
4771 return ValidateCmdCopyBufferToImage(commandBuffer, pCopyBufferToImageInfo->srcBuffer, pCopyBufferToImageInfo->dstImage,
4772 pCopyBufferToImageInfo->dstImageLayout, pCopyBufferToImageInfo->regionCount,
4773 pCopyBufferToImageInfo->pRegions, CMD_COPYBUFFERTOIMAGE2);
Jeff Leger178b1e52020-10-05 12:22:23 -04004774}
4775
sfricke-samsung71f04e32022-03-16 01:21:21 -05004776template <typename RegionType>
Jeff Leger178b1e52020-10-05 12:22:23 -04004777void SyncValidator::RecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
sfricke-samsung71f04e32022-03-16 01:21:21 -05004778 VkImageLayout dstImageLayout, uint32_t regionCount, const RegionType *pRegions,
4779 CMD_TYPE cmd_type) {
locke-lunarga19c71d2020-03-02 18:17:04 -07004780 auto *cb_access_context = GetAccessContext(commandBuffer);
4781 assert(cb_access_context);
Jeff Leger178b1e52020-10-05 12:22:23 -04004782
Jeff Leger178b1e52020-10-05 12:22:23 -04004783 const auto tag = cb_access_context->NextCommandTag(cmd_type);
locke-lunarga19c71d2020-03-02 18:17:04 -07004784 auto *context = cb_access_context->GetCurrentAccessContext();
4785 assert(context);
4786
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004787 auto src_buffer = Get<BUFFER_STATE>(srcBuffer);
4788 auto dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07004789
4790 for (uint32_t region = 0; region < regionCount; region++) {
4791 const auto &copy_region = pRegions[region];
locke-lunarga19c71d2020-03-02 18:17:04 -07004792 if (dst_image) {
John Zulauf477700e2021-01-06 11:41:49 -07004793 if (src_buffer) {
4794 ResourceAccessRange src_range =
4795 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, dst_image->createInfo.format));
Jeremy Gebben40a22942020-12-22 14:22:06 -07004796 context->UpdateAccessState(*src_buffer, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment, src_range, tag);
John Zulauf477700e2021-01-06 11:41:49 -07004797 }
Jeremy Gebben40a22942020-12-22 14:22:06 -07004798 context->UpdateAccessState(*dst_image, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07004799 copy_region.imageSubresource, copy_region.imageOffset, copy_region.imageExtent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07004800 }
4801 }
4802}
4803
Jeff Leger178b1e52020-10-05 12:22:23 -04004804void SyncValidator::PreCallRecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
4805 VkImageLayout dstImageLayout, uint32_t regionCount,
4806 const VkBufferImageCopy *pRegions) {
4807 StateTracker::PreCallRecordCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
Tony Barbour845d29b2021-11-09 11:43:14 -07004808 RecordCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions, CMD_COPYBUFFERTOIMAGE);
Jeff Leger178b1e52020-10-05 12:22:23 -04004809}
4810
4811void SyncValidator::PreCallRecordCmdCopyBufferToImage2KHR(VkCommandBuffer commandBuffer,
4812 const VkCopyBufferToImageInfo2KHR *pCopyBufferToImageInfo) {
4813 StateTracker::PreCallRecordCmdCopyBufferToImage2KHR(commandBuffer, pCopyBufferToImageInfo);
4814 RecordCmdCopyBufferToImage(commandBuffer, pCopyBufferToImageInfo->srcBuffer, pCopyBufferToImageInfo->dstImage,
4815 pCopyBufferToImageInfo->dstImageLayout, pCopyBufferToImageInfo->regionCount,
Tony Barbour845d29b2021-11-09 11:43:14 -07004816 pCopyBufferToImageInfo->pRegions, CMD_COPYBUFFERTOIMAGE2KHR);
4817}
4818
4819void SyncValidator::PreCallRecordCmdCopyBufferToImage2(VkCommandBuffer commandBuffer,
4820 const VkCopyBufferToImageInfo2 *pCopyBufferToImageInfo) {
4821 StateTracker::PreCallRecordCmdCopyBufferToImage2(commandBuffer, pCopyBufferToImageInfo);
4822 RecordCmdCopyBufferToImage(commandBuffer, pCopyBufferToImageInfo->srcBuffer, pCopyBufferToImageInfo->dstImage,
4823 pCopyBufferToImageInfo->dstImageLayout, pCopyBufferToImageInfo->regionCount,
4824 pCopyBufferToImageInfo->pRegions, CMD_COPYBUFFERTOIMAGE2);
Jeff Leger178b1e52020-10-05 12:22:23 -04004825}
4826
sfricke-samsung71f04e32022-03-16 01:21:21 -05004827template <typename RegionType>
Jeff Leger178b1e52020-10-05 12:22:23 -04004828bool SyncValidator::ValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
sfricke-samsung71f04e32022-03-16 01:21:21 -05004829 VkBuffer dstBuffer, uint32_t regionCount, const RegionType *pRegions,
4830 CMD_TYPE cmd_type) const {
locke-lunarga19c71d2020-03-02 18:17:04 -07004831 bool skip = false;
4832 const auto *cb_access_context = GetAccessContext(commandBuffer);
4833 assert(cb_access_context);
4834 if (!cb_access_context) return skip;
Jeff Leger178b1e52020-10-05 12:22:23 -04004835
locke-lunarga19c71d2020-03-02 18:17:04 -07004836 const auto *context = cb_access_context->GetCurrentAccessContext();
4837 assert(context);
4838 if (!context) return skip;
4839
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004840 auto src_image = Get<IMAGE_STATE>(srcImage);
4841 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
Jeremy Gebben6fbf8242021-06-21 09:14:46 -06004842 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->MemState()->mem() : VK_NULL_HANDLE;
locke-lunarga19c71d2020-03-02 18:17:04 -07004843 for (uint32_t region = 0; region < regionCount; region++) {
4844 const auto &copy_region = pRegions[region];
4845 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004846 auto hazard = context->DetectHazard(*src_image, SYNC_COPY_TRANSFER_READ, copy_region.imageSubresource,
Aitor Camachoe67f2c72022-06-08 14:41:58 +02004847 copy_region.imageOffset, copy_region.imageExtent, false);
locke-lunarga19c71d2020-03-02 18:17:04 -07004848 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06004849 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
sjfricke0bea06e2022-06-05 09:22:26 +09004850 "%s: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.", CommandTypeString(cmd_type),
John Zulauf1dae9192020-06-16 15:46:44 -06004851 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauf397e68b2022-04-19 11:44:07 -06004852 cb_access_context->FormatHazard(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07004853 }
John Zulauf477700e2021-01-06 11:41:49 -07004854 if (dst_mem) {
4855 ResourceAccessRange dst_range =
4856 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, src_image->createInfo.format));
Jeremy Gebben40a22942020-12-22 14:22:06 -07004857 hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, dst_range);
John Zulauf477700e2021-01-06 11:41:49 -07004858 if (hazard.hazard) {
sjfricke0bea06e2022-06-05 09:22:26 +09004859 skip |=
4860 LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
4861 "%s: Hazard %s for dstBuffer %s, region %" PRIu32 ". Access info %s.", CommandTypeString(cmd_type),
4862 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstBuffer).c_str(), region,
4863 cb_access_context->FormatHazard(hazard).c_str());
John Zulauf477700e2021-01-06 11:41:49 -07004864 }
locke-lunarga19c71d2020-03-02 18:17:04 -07004865 }
4866 }
4867 if (skip) break;
4868 }
4869 return skip;
4870}
4871
Jeff Leger178b1e52020-10-05 12:22:23 -04004872bool SyncValidator::PreCallValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
4873 VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount,
4874 const VkBufferImageCopy *pRegions) const {
4875 return ValidateCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions,
Tony-LunarGaf3632a2021-11-10 15:51:57 -07004876 CMD_COPYIMAGETOBUFFER);
Jeff Leger178b1e52020-10-05 12:22:23 -04004877}
4878
4879bool SyncValidator::PreCallValidateCmdCopyImageToBuffer2KHR(VkCommandBuffer commandBuffer,
4880 const VkCopyImageToBufferInfo2KHR *pCopyImageToBufferInfo) const {
4881 return ValidateCmdCopyImageToBuffer(commandBuffer, pCopyImageToBufferInfo->srcImage, pCopyImageToBufferInfo->srcImageLayout,
4882 pCopyImageToBufferInfo->dstBuffer, pCopyImageToBufferInfo->regionCount,
Tony-LunarGaf3632a2021-11-10 15:51:57 -07004883 pCopyImageToBufferInfo->pRegions, CMD_COPYIMAGETOBUFFER2KHR);
4884}
4885
4886bool SyncValidator::PreCallValidateCmdCopyImageToBuffer2(VkCommandBuffer commandBuffer,
4887 const VkCopyImageToBufferInfo2 *pCopyImageToBufferInfo) const {
4888 return ValidateCmdCopyImageToBuffer(commandBuffer, pCopyImageToBufferInfo->srcImage, pCopyImageToBufferInfo->srcImageLayout,
4889 pCopyImageToBufferInfo->dstBuffer, pCopyImageToBufferInfo->regionCount,
4890 pCopyImageToBufferInfo->pRegions, CMD_COPYIMAGETOBUFFER2);
Jeff Leger178b1e52020-10-05 12:22:23 -04004891}
4892
sfricke-samsung71f04e32022-03-16 01:21:21 -05004893template <typename RegionType>
Jeff Leger178b1e52020-10-05 12:22:23 -04004894void SyncValidator::RecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
sfricke-samsung71f04e32022-03-16 01:21:21 -05004895 VkBuffer dstBuffer, uint32_t regionCount, const RegionType *pRegions,
Tony-LunarGaf3632a2021-11-10 15:51:57 -07004896 CMD_TYPE cmd_type) {
locke-lunarga19c71d2020-03-02 18:17:04 -07004897 auto *cb_access_context = GetAccessContext(commandBuffer);
4898 assert(cb_access_context);
Jeff Leger178b1e52020-10-05 12:22:23 -04004899
Jeff Leger178b1e52020-10-05 12:22:23 -04004900 const auto tag = cb_access_context->NextCommandTag(cmd_type);
locke-lunarga19c71d2020-03-02 18:17:04 -07004901 auto *context = cb_access_context->GetCurrentAccessContext();
4902 assert(context);
4903
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004904 auto src_image = Get<IMAGE_STATE>(srcImage);
Jeremy Gebben9f537102021-10-05 16:37:12 -06004905 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
Jeremy Gebben6fbf8242021-06-21 09:14:46 -06004906 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->MemState()->mem() : VK_NULL_HANDLE;
John Zulauf5f13a792020-03-10 07:31:21 -06004907 const VulkanTypedHandle dst_handle(dst_mem, kVulkanObjectTypeDeviceMemory);
locke-lunarga19c71d2020-03-02 18:17:04 -07004908
4909 for (uint32_t region = 0; region < regionCount; region++) {
4910 const auto &copy_region = pRegions[region];
4911 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004912 context->UpdateAccessState(*src_image, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07004913 copy_region.imageSubresource, copy_region.imageOffset, copy_region.imageExtent, tag);
John Zulauf477700e2021-01-06 11:41:49 -07004914 if (dst_buffer) {
4915 ResourceAccessRange dst_range =
4916 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, src_image->createInfo.format));
Jeremy Gebben40a22942020-12-22 14:22:06 -07004917 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, dst_range, tag);
John Zulauf477700e2021-01-06 11:41:49 -07004918 }
locke-lunarga19c71d2020-03-02 18:17:04 -07004919 }
4920 }
4921}
4922
Jeff Leger178b1e52020-10-05 12:22:23 -04004923void SyncValidator::PreCallRecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4924 VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
4925 StateTracker::PreCallRecordCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
Tony-LunarGaf3632a2021-11-10 15:51:57 -07004926 RecordCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions, CMD_COPYIMAGETOBUFFER);
Jeff Leger178b1e52020-10-05 12:22:23 -04004927}
4928
4929void SyncValidator::PreCallRecordCmdCopyImageToBuffer2KHR(VkCommandBuffer commandBuffer,
4930 const VkCopyImageToBufferInfo2KHR *pCopyImageToBufferInfo) {
4931 StateTracker::PreCallRecordCmdCopyImageToBuffer2KHR(commandBuffer, pCopyImageToBufferInfo);
4932 RecordCmdCopyImageToBuffer(commandBuffer, pCopyImageToBufferInfo->srcImage, pCopyImageToBufferInfo->srcImageLayout,
4933 pCopyImageToBufferInfo->dstBuffer, pCopyImageToBufferInfo->regionCount,
Tony-LunarGaf3632a2021-11-10 15:51:57 -07004934 pCopyImageToBufferInfo->pRegions, CMD_COPYIMAGETOBUFFER2KHR);
4935}
4936
4937void SyncValidator::PreCallRecordCmdCopyImageToBuffer2(VkCommandBuffer commandBuffer,
4938 const VkCopyImageToBufferInfo2 *pCopyImageToBufferInfo) {
4939 StateTracker::PreCallRecordCmdCopyImageToBuffer2(commandBuffer, pCopyImageToBufferInfo);
4940 RecordCmdCopyImageToBuffer(commandBuffer, pCopyImageToBufferInfo->srcImage, pCopyImageToBufferInfo->srcImageLayout,
4941 pCopyImageToBufferInfo->dstBuffer, pCopyImageToBufferInfo->regionCount,
4942 pCopyImageToBufferInfo->pRegions, CMD_COPYIMAGETOBUFFER2);
Jeff Leger178b1e52020-10-05 12:22:23 -04004943}
4944
4945template <typename RegionType>
4946bool SyncValidator::ValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4947 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
sjfricke0bea06e2022-06-05 09:22:26 +09004948 const RegionType *pRegions, VkFilter filter, CMD_TYPE cmd_type) const {
locke-lunarga19c71d2020-03-02 18:17:04 -07004949 bool skip = false;
4950 const auto *cb_access_context = GetAccessContext(commandBuffer);
4951 assert(cb_access_context);
4952 if (!cb_access_context) return skip;
4953
4954 const auto *context = cb_access_context->GetCurrentAccessContext();
4955 assert(context);
4956 if (!context) return skip;
4957
sjfricke0bea06e2022-06-05 09:22:26 +09004958 const char *caller_name = CommandTypeString(cmd_type);
4959
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004960 auto src_image = Get<IMAGE_STATE>(srcImage);
4961 auto dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07004962
4963 for (uint32_t region = 0; region < regionCount; region++) {
4964 const auto &blit_region = pRegions[region];
4965 if (src_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06004966 VkOffset3D offset = {std::min(blit_region.srcOffsets[0].x, blit_region.srcOffsets[1].x),
4967 std::min(blit_region.srcOffsets[0].y, blit_region.srcOffsets[1].y),
4968 std::min(blit_region.srcOffsets[0].z, blit_region.srcOffsets[1].z)};
4969 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.srcOffsets[1].x - blit_region.srcOffsets[0].x)),
4970 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].y - blit_region.srcOffsets[0].y)),
4971 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].z - blit_region.srcOffsets[0].z))};
Aitor Camachoe67f2c72022-06-08 14:41:58 +02004972 auto hazard =
4973 context->DetectHazard(*src_image, SYNC_BLIT_TRANSFER_READ, blit_region.srcSubresource, offset, extent, false);
locke-lunarga19c71d2020-03-02 18:17:04 -07004974 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06004975 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
sjfricke0bea06e2022-06-05 09:22:26 +09004976 "%s: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.", caller_name,
John Zulauf1dae9192020-06-16 15:46:44 -06004977 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauf397e68b2022-04-19 11:44:07 -06004978 cb_access_context->FormatHazard(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07004979 }
4980 }
4981
4982 if (dst_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06004983 VkOffset3D offset = {std::min(blit_region.dstOffsets[0].x, blit_region.dstOffsets[1].x),
4984 std::min(blit_region.dstOffsets[0].y, blit_region.dstOffsets[1].y),
4985 std::min(blit_region.dstOffsets[0].z, blit_region.dstOffsets[1].z)};
4986 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.dstOffsets[1].x - blit_region.dstOffsets[0].x)),
4987 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].y - blit_region.dstOffsets[0].y)),
4988 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].z - blit_region.dstOffsets[0].z))};
Aitor Camachoe67f2c72022-06-08 14:41:58 +02004989 auto hazard =
4990 context->DetectHazard(*dst_image, SYNC_BLIT_TRANSFER_WRITE, blit_region.dstSubresource, offset, extent, false);
locke-lunarga19c71d2020-03-02 18:17:04 -07004991 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06004992 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
sjfricke0bea06e2022-06-05 09:22:26 +09004993 "%s: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.", caller_name,
John Zulauf1dae9192020-06-16 15:46:44 -06004994 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauf397e68b2022-04-19 11:44:07 -06004995 cb_access_context->FormatHazard(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07004996 }
4997 if (skip) break;
4998 }
4999 }
5000
5001 return skip;
5002}
5003
Jeff Leger178b1e52020-10-05 12:22:23 -04005004bool SyncValidator::PreCallValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
5005 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
5006 const VkImageBlit *pRegions, VkFilter filter) const {
5007 return ValidateCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter,
sjfricke0bea06e2022-06-05 09:22:26 +09005008 CMD_BLITIMAGE);
Jeff Leger178b1e52020-10-05 12:22:23 -04005009}
5010
5011bool SyncValidator::PreCallValidateCmdBlitImage2KHR(VkCommandBuffer commandBuffer,
5012 const VkBlitImageInfo2KHR *pBlitImageInfo) const {
5013 return ValidateCmdBlitImage(commandBuffer, pBlitImageInfo->srcImage, pBlitImageInfo->srcImageLayout, pBlitImageInfo->dstImage,
5014 pBlitImageInfo->dstImageLayout, pBlitImageInfo->regionCount, pBlitImageInfo->pRegions,
sjfricke0bea06e2022-06-05 09:22:26 +09005015 pBlitImageInfo->filter, CMD_BLITIMAGE2KHR);
Jeff Leger178b1e52020-10-05 12:22:23 -04005016}
5017
Tony-LunarG542ae912021-11-04 16:06:44 -06005018bool SyncValidator::PreCallValidateCmdBlitImage2(VkCommandBuffer commandBuffer,
5019 const VkBlitImageInfo2 *pBlitImageInfo) const {
5020 return ValidateCmdBlitImage(commandBuffer, pBlitImageInfo->srcImage, pBlitImageInfo->srcImageLayout, pBlitImageInfo->dstImage,
sjfricke0bea06e2022-06-05 09:22:26 +09005021 pBlitImageInfo->dstImageLayout, pBlitImageInfo->regionCount, pBlitImageInfo->pRegions,
5022 pBlitImageInfo->filter, CMD_BLITIMAGE2);
Tony-LunarG542ae912021-11-04 16:06:44 -06005023}
5024
Jeff Leger178b1e52020-10-05 12:22:23 -04005025template <typename RegionType>
5026void SyncValidator::RecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
5027 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
5028 const RegionType *pRegions, VkFilter filter, ResourceUsageTag tag) {
locke-lunarga19c71d2020-03-02 18:17:04 -07005029 auto *cb_access_context = GetAccessContext(commandBuffer);
5030 assert(cb_access_context);
5031 auto *context = cb_access_context->GetCurrentAccessContext();
5032 assert(context);
5033
Jeremy Gebben9f537102021-10-05 16:37:12 -06005034 auto src_image = Get<IMAGE_STATE>(srcImage);
5035 auto dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07005036
5037 for (uint32_t region = 0; region < regionCount; region++) {
5038 const auto &blit_region = pRegions[region];
5039 if (src_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06005040 VkOffset3D offset = {std::min(blit_region.srcOffsets[0].x, blit_region.srcOffsets[1].x),
5041 std::min(blit_region.srcOffsets[0].y, blit_region.srcOffsets[1].y),
5042 std::min(blit_region.srcOffsets[0].z, blit_region.srcOffsets[1].z)};
5043 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.srcOffsets[1].x - blit_region.srcOffsets[0].x)),
5044 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].y - blit_region.srcOffsets[0].y)),
5045 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].z - blit_region.srcOffsets[0].z))};
Jeremy Gebben40a22942020-12-22 14:22:06 -07005046 context->UpdateAccessState(*src_image, SYNC_BLIT_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07005047 blit_region.srcSubresource, offset, extent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07005048 }
5049 if (dst_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06005050 VkOffset3D offset = {std::min(blit_region.dstOffsets[0].x, blit_region.dstOffsets[1].x),
5051 std::min(blit_region.dstOffsets[0].y, blit_region.dstOffsets[1].y),
5052 std::min(blit_region.dstOffsets[0].z, blit_region.dstOffsets[1].z)};
5053 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.dstOffsets[1].x - blit_region.dstOffsets[0].x)),
5054 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].y - blit_region.dstOffsets[0].y)),
5055 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].z - blit_region.dstOffsets[0].z))};
Jeremy Gebben40a22942020-12-22 14:22:06 -07005056 context->UpdateAccessState(*dst_image, SYNC_BLIT_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07005057 blit_region.dstSubresource, offset, extent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07005058 }
5059 }
5060}
locke-lunarg36ba2592020-04-03 09:42:04 -06005061
Jeff Leger178b1e52020-10-05 12:22:23 -04005062void SyncValidator::PreCallRecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
5063 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
5064 const VkImageBlit *pRegions, VkFilter filter) {
5065 auto *cb_access_context = GetAccessContext(commandBuffer);
5066 assert(cb_access_context);
5067 const auto tag = cb_access_context->NextCommandTag(CMD_BLITIMAGE);
5068 StateTracker::PreCallRecordCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
5069 pRegions, filter);
5070 RecordCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter, tag);
5071}
5072
5073void SyncValidator::PreCallRecordCmdBlitImage2KHR(VkCommandBuffer commandBuffer, const VkBlitImageInfo2KHR *pBlitImageInfo) {
5074 StateTracker::PreCallRecordCmdBlitImage2KHR(commandBuffer, pBlitImageInfo);
5075 auto *cb_access_context = GetAccessContext(commandBuffer);
5076 assert(cb_access_context);
5077 const auto tag = cb_access_context->NextCommandTag(CMD_BLITIMAGE2KHR);
5078 RecordCmdBlitImage(commandBuffer, pBlitImageInfo->srcImage, pBlitImageInfo->srcImageLayout, pBlitImageInfo->dstImage,
5079 pBlitImageInfo->dstImageLayout, pBlitImageInfo->regionCount, pBlitImageInfo->pRegions,
5080 pBlitImageInfo->filter, tag);
5081}
5082
Tony-LunarG542ae912021-11-04 16:06:44 -06005083void SyncValidator::PreCallRecordCmdBlitImage2(VkCommandBuffer commandBuffer, const VkBlitImageInfo2 *pBlitImageInfo) {
5084 StateTracker::PreCallRecordCmdBlitImage2KHR(commandBuffer, pBlitImageInfo);
5085 auto *cb_access_context = GetAccessContext(commandBuffer);
5086 assert(cb_access_context);
5087 const auto tag = cb_access_context->NextCommandTag(CMD_BLITIMAGE2);
5088 RecordCmdBlitImage(commandBuffer, pBlitImageInfo->srcImage, pBlitImageInfo->srcImageLayout, pBlitImageInfo->dstImage,
5089 pBlitImageInfo->dstImageLayout, pBlitImageInfo->regionCount, pBlitImageInfo->pRegions,
5090 pBlitImageInfo->filter, tag);
5091}
5092
John Zulauffaea0ee2021-01-14 14:01:32 -07005093bool SyncValidator::ValidateIndirectBuffer(const CommandBufferAccessContext &cb_context, const AccessContext &context,
5094 VkCommandBuffer commandBuffer, const VkDeviceSize struct_size, const VkBuffer buffer,
5095 const VkDeviceSize offset, const uint32_t drawCount, const uint32_t stride,
sjfricke0bea06e2022-06-05 09:22:26 +09005096 CMD_TYPE cmd_type) const {
locke-lunargff255f92020-05-13 18:53:52 -06005097 bool skip = false;
5098 if (drawCount == 0) return skip;
5099
sjfricke0bea06e2022-06-05 09:22:26 +09005100 const char *caller_name = CommandTypeString(cmd_type);
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005101 auto buf_state = Get<BUFFER_STATE>(buffer);
locke-lunargff255f92020-05-13 18:53:52 -06005102 VkDeviceSize size = struct_size;
5103 if (drawCount == 1 || stride == size) {
5104 if (drawCount > 1) size *= drawCount;
John Zulauf3e86bf02020-09-12 10:47:57 -06005105 const ResourceAccessRange range = MakeRange(offset, size);
locke-lunargff255f92020-05-13 18:53:52 -06005106 auto hazard = context.DetectHazard(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, range);
5107 if (hazard.hazard) {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06005108 skip |= LogError(buf_state->buffer(), string_SyncHazardVUID(hazard.hazard),
sjfricke0bea06e2022-06-05 09:22:26 +09005109 "%s: Hazard %s for indirect %s in %s. Access info %s.", caller_name, string_SyncHazard(hazard.hazard),
John Zulauf1dae9192020-06-16 15:46:44 -06005110 report_data->FormatHandle(buffer).c_str(), report_data->FormatHandle(commandBuffer).c_str(),
John Zulauf397e68b2022-04-19 11:44:07 -06005111 cb_context.FormatHazard(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06005112 }
5113 } else {
5114 for (uint32_t i = 0; i < drawCount; ++i) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005115 const ResourceAccessRange range = MakeRange(offset + i * stride, size);
locke-lunargff255f92020-05-13 18:53:52 -06005116 auto hazard = context.DetectHazard(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, range);
5117 if (hazard.hazard) {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06005118 skip |= LogError(buf_state->buffer(), string_SyncHazardVUID(hazard.hazard),
sjfricke0bea06e2022-06-05 09:22:26 +09005119 "%s: Hazard %s for indirect %s in %s. Access info %s.", caller_name,
5120 string_SyncHazard(hazard.hazard), report_data->FormatHandle(buffer).c_str(),
5121 report_data->FormatHandle(commandBuffer).c_str(), cb_context.FormatHazard(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06005122 break;
5123 }
5124 }
5125 }
5126 return skip;
5127}
5128
John Zulauf14940722021-04-12 15:19:02 -06005129void SyncValidator::RecordIndirectBuffer(AccessContext &context, const ResourceUsageTag tag, const VkDeviceSize struct_size,
locke-lunarg61870c22020-06-09 14:51:50 -06005130 const VkBuffer buffer, const VkDeviceSize offset, const uint32_t drawCount,
5131 uint32_t stride) {
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005132 auto buf_state = Get<BUFFER_STATE>(buffer);
locke-lunargff255f92020-05-13 18:53:52 -06005133 VkDeviceSize size = struct_size;
5134 if (drawCount == 1 || stride == size) {
5135 if (drawCount > 1) size *= drawCount;
John Zulauf3e86bf02020-09-12 10:47:57 -06005136 const ResourceAccessRange range = MakeRange(offset, size);
John Zulauf8e3c3e92021-01-06 11:19:36 -07005137 context.UpdateAccessState(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, SyncOrdering::kNonAttachment, range, tag);
locke-lunargff255f92020-05-13 18:53:52 -06005138 } else {
5139 for (uint32_t i = 0; i < drawCount; ++i) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005140 const ResourceAccessRange range = MakeRange(offset + i * stride, size);
John Zulauf8e3c3e92021-01-06 11:19:36 -07005141 context.UpdateAccessState(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, SyncOrdering::kNonAttachment, range,
5142 tag);
locke-lunargff255f92020-05-13 18:53:52 -06005143 }
5144 }
5145}
5146
John Zulauffaea0ee2021-01-14 14:01:32 -07005147bool SyncValidator::ValidateCountBuffer(const CommandBufferAccessContext &cb_context, const AccessContext &context,
5148 VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
sjfricke0bea06e2022-06-05 09:22:26 +09005149 CMD_TYPE cmd_type) const {
locke-lunargff255f92020-05-13 18:53:52 -06005150 bool skip = false;
5151
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005152 auto count_buf_state = Get<BUFFER_STATE>(buffer);
John Zulauf3e86bf02020-09-12 10:47:57 -06005153 const ResourceAccessRange range = MakeRange(offset, 4);
locke-lunargff255f92020-05-13 18:53:52 -06005154 auto hazard = context.DetectHazard(*count_buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, range);
5155 if (hazard.hazard) {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06005156 skip |= LogError(count_buf_state->buffer(), string_SyncHazardVUID(hazard.hazard),
sjfricke0bea06e2022-06-05 09:22:26 +09005157 "%s: Hazard %s for countBuffer %s in %s. Access info %s.", CommandTypeString(cmd_type),
5158 string_SyncHazard(hazard.hazard), report_data->FormatHandle(buffer).c_str(),
5159 report_data->FormatHandle(commandBuffer).c_str(), cb_context.FormatHazard(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06005160 }
5161 return skip;
5162}
5163
John Zulauf14940722021-04-12 15:19:02 -06005164void SyncValidator::RecordCountBuffer(AccessContext &context, const ResourceUsageTag tag, VkBuffer buffer, VkDeviceSize offset) {
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005165 auto count_buf_state = Get<BUFFER_STATE>(buffer);
John Zulauf3e86bf02020-09-12 10:47:57 -06005166 const ResourceAccessRange range = MakeRange(offset, 4);
John Zulauf8e3c3e92021-01-06 11:19:36 -07005167 context.UpdateAccessState(*count_buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, SyncOrdering::kNonAttachment, range, tag);
locke-lunargff255f92020-05-13 18:53:52 -06005168}
5169
locke-lunarg36ba2592020-04-03 09:42:04 -06005170bool SyncValidator::PreCallValidateCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) const {
locke-lunargff255f92020-05-13 18:53:52 -06005171 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06005172 const auto *cb_access_context = GetAccessContext(commandBuffer);
5173 assert(cb_access_context);
5174 if (!cb_access_context) return skip;
5175
sjfricke0bea06e2022-06-05 09:22:26 +09005176 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, CMD_DISPATCH);
locke-lunargff255f92020-05-13 18:53:52 -06005177 return skip;
locke-lunarg36ba2592020-04-03 09:42:04 -06005178}
5179
5180void SyncValidator::PreCallRecordCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005181 StateTracker::PreCallRecordCmdDispatch(commandBuffer, x, y, z);
locke-lunargff255f92020-05-13 18:53:52 -06005182 auto *cb_access_context = GetAccessContext(commandBuffer);
5183 assert(cb_access_context);
5184 const auto tag = cb_access_context->NextCommandTag(CMD_DISPATCH);
locke-lunargff255f92020-05-13 18:53:52 -06005185
locke-lunarg61870c22020-06-09 14:51:50 -06005186 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, tag);
locke-lunarg36ba2592020-04-03 09:42:04 -06005187}
locke-lunarge1a67022020-04-29 00:15:36 -06005188
5189bool SyncValidator::PreCallValidateCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) const {
locke-lunargff255f92020-05-13 18:53:52 -06005190 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06005191 const auto *cb_access_context = GetAccessContext(commandBuffer);
5192 assert(cb_access_context);
5193 if (!cb_access_context) return skip;
5194
5195 const auto *context = cb_access_context->GetCurrentAccessContext();
5196 assert(context);
5197 if (!context) return skip;
5198
sjfricke0bea06e2022-06-05 09:22:26 +09005199 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, CMD_DISPATCHINDIRECT);
John Zulauffaea0ee2021-01-14 14:01:32 -07005200 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDispatchIndirectCommand), buffer, offset,
sjfricke0bea06e2022-06-05 09:22:26 +09005201 1, sizeof(VkDispatchIndirectCommand), CMD_DISPATCHINDIRECT);
locke-lunargff255f92020-05-13 18:53:52 -06005202 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06005203}
5204
5205void SyncValidator::PreCallRecordCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005206 StateTracker::PreCallRecordCmdDispatchIndirect(commandBuffer, buffer, offset);
locke-lunargff255f92020-05-13 18:53:52 -06005207 auto *cb_access_context = GetAccessContext(commandBuffer);
5208 assert(cb_access_context);
5209 const auto tag = cb_access_context->NextCommandTag(CMD_DISPATCHINDIRECT);
5210 auto *context = cb_access_context->GetCurrentAccessContext();
5211 assert(context);
5212
locke-lunarg61870c22020-06-09 14:51:50 -06005213 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, tag);
5214 RecordIndirectBuffer(*context, tag, sizeof(VkDispatchIndirectCommand), buffer, offset, 1, sizeof(VkDispatchIndirectCommand));
locke-lunarge1a67022020-04-29 00:15:36 -06005215}
5216
5217bool SyncValidator::PreCallValidateCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
5218 uint32_t firstVertex, uint32_t firstInstance) const {
locke-lunarga4d39ea2020-05-22 14:17:29 -06005219 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06005220 const auto *cb_access_context = GetAccessContext(commandBuffer);
5221 assert(cb_access_context);
5222 if (!cb_access_context) return skip;
5223
sjfricke0bea06e2022-06-05 09:22:26 +09005224 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, CMD_DRAW);
5225 skip |= cb_access_context->ValidateDrawVertex(vertexCount, firstVertex, CMD_DRAW);
5226 skip |= cb_access_context->ValidateDrawSubpassAttachment(CMD_DRAW);
locke-lunarga4d39ea2020-05-22 14:17:29 -06005227 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06005228}
5229
5230void SyncValidator::PreCallRecordCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
5231 uint32_t firstVertex, uint32_t firstInstance) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005232 StateTracker::PreCallRecordCmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
locke-lunargff255f92020-05-13 18:53:52 -06005233 auto *cb_access_context = GetAccessContext(commandBuffer);
5234 assert(cb_access_context);
5235 const auto tag = cb_access_context->NextCommandTag(CMD_DRAW);
locke-lunargff255f92020-05-13 18:53:52 -06005236
locke-lunarg61870c22020-06-09 14:51:50 -06005237 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
5238 cb_access_context->RecordDrawVertex(vertexCount, firstVertex, tag);
5239 cb_access_context->RecordDrawSubpassAttachment(tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005240}
5241
5242bool SyncValidator::PreCallValidateCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
5243 uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) const {
locke-lunarga4d39ea2020-05-22 14:17:29 -06005244 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06005245 const auto *cb_access_context = GetAccessContext(commandBuffer);
5246 assert(cb_access_context);
5247 if (!cb_access_context) return skip;
5248
sjfricke0bea06e2022-06-05 09:22:26 +09005249 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, CMD_DRAWINDEXED);
5250 skip |= cb_access_context->ValidateDrawVertexIndex(indexCount, firstIndex, CMD_DRAWINDEXED);
5251 skip |= cb_access_context->ValidateDrawSubpassAttachment(CMD_DRAWINDEXED);
locke-lunarga4d39ea2020-05-22 14:17:29 -06005252 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06005253}
5254
5255void SyncValidator::PreCallRecordCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
5256 uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005257 StateTracker::PreCallRecordCmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
locke-lunargff255f92020-05-13 18:53:52 -06005258 auto *cb_access_context = GetAccessContext(commandBuffer);
5259 assert(cb_access_context);
5260 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDEXED);
locke-lunargff255f92020-05-13 18:53:52 -06005261
locke-lunarg61870c22020-06-09 14:51:50 -06005262 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
5263 cb_access_context->RecordDrawVertexIndex(indexCount, firstIndex, tag);
5264 cb_access_context->RecordDrawSubpassAttachment(tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005265}
5266
5267bool SyncValidator::PreCallValidateCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5268 uint32_t drawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06005269 bool skip = false;
5270 if (drawCount == 0) return skip;
5271
locke-lunargff255f92020-05-13 18:53:52 -06005272 const auto *cb_access_context = GetAccessContext(commandBuffer);
5273 assert(cb_access_context);
5274 if (!cb_access_context) return skip;
5275
5276 const auto *context = cb_access_context->GetCurrentAccessContext();
5277 assert(context);
5278 if (!context) return skip;
5279
sjfricke0bea06e2022-06-05 09:22:26 +09005280 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, CMD_DRAWINDIRECT);
5281 skip |= cb_access_context->ValidateDrawSubpassAttachment(CMD_DRAWINDIRECT);
John Zulauffaea0ee2021-01-14 14:01:32 -07005282 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndirectCommand), buffer, offset,
sjfricke0bea06e2022-06-05 09:22:26 +09005283 drawCount, stride, CMD_DRAWINDIRECT);
locke-lunargff255f92020-05-13 18:53:52 -06005284
5285 // TODO: For now, we validate the whole vertex buffer. It might cause some false positive.
5286 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
5287 // We will validate the vertex buffer in SubmitQueue in the future.
sjfricke0bea06e2022-06-05 09:22:26 +09005288 skip |= cb_access_context->ValidateDrawVertex(UINT32_MAX, 0, CMD_DRAWINDIRECT);
locke-lunargff255f92020-05-13 18:53:52 -06005289 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06005290}
5291
5292void SyncValidator::PreCallRecordCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5293 uint32_t drawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005294 StateTracker::PreCallRecordCmdDrawIndirect(commandBuffer, buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06005295 if (drawCount == 0) return;
locke-lunargff255f92020-05-13 18:53:52 -06005296 auto *cb_access_context = GetAccessContext(commandBuffer);
5297 assert(cb_access_context);
5298 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDIRECT);
5299 auto *context = cb_access_context->GetCurrentAccessContext();
5300 assert(context);
5301
locke-lunarg61870c22020-06-09 14:51:50 -06005302 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
5303 cb_access_context->RecordDrawSubpassAttachment(tag);
5304 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndirectCommand), buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06005305
5306 // TODO: For now, we record the whole vertex buffer. It might cause some false positive.
5307 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
5308 // We will record the vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06005309 cb_access_context->RecordDrawVertex(UINT32_MAX, 0, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005310}
5311
5312bool SyncValidator::PreCallValidateCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5313 uint32_t drawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06005314 bool skip = false;
5315 if (drawCount == 0) return skip;
locke-lunargff255f92020-05-13 18:53:52 -06005316 const auto *cb_access_context = GetAccessContext(commandBuffer);
5317 assert(cb_access_context);
5318 if (!cb_access_context) return skip;
5319
5320 const auto *context = cb_access_context->GetCurrentAccessContext();
5321 assert(context);
5322 if (!context) return skip;
5323
sjfricke0bea06e2022-06-05 09:22:26 +09005324 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, CMD_DRAWINDEXEDINDIRECT);
5325 skip |= cb_access_context->ValidateDrawSubpassAttachment(CMD_DRAWINDEXEDINDIRECT);
John Zulauffaea0ee2021-01-14 14:01:32 -07005326 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndexedIndirectCommand), buffer,
sjfricke0bea06e2022-06-05 09:22:26 +09005327 offset, drawCount, stride, CMD_DRAWINDEXEDINDIRECT);
locke-lunargff255f92020-05-13 18:53:52 -06005328
5329 // TODO: For now, we validate the whole index and vertex buffer. It might cause some false positive.
5330 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
5331 // We will validate the index and vertex buffer in SubmitQueue in the future.
sjfricke0bea06e2022-06-05 09:22:26 +09005332 skip |= cb_access_context->ValidateDrawVertexIndex(UINT32_MAX, 0, CMD_DRAWINDEXEDINDIRECT);
locke-lunargff255f92020-05-13 18:53:52 -06005333 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06005334}
5335
5336void SyncValidator::PreCallRecordCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5337 uint32_t drawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005338 StateTracker::PreCallRecordCmdDrawIndexedIndirect(commandBuffer, buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06005339 auto *cb_access_context = GetAccessContext(commandBuffer);
5340 assert(cb_access_context);
5341 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDEXEDINDIRECT);
5342 auto *context = cb_access_context->GetCurrentAccessContext();
5343 assert(context);
5344
locke-lunarg61870c22020-06-09 14:51:50 -06005345 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
5346 cb_access_context->RecordDrawSubpassAttachment(tag);
5347 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndexedIndirectCommand), buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06005348
5349 // TODO: For now, we record the whole index and vertex buffer. It might cause some false positive.
5350 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
5351 // We will record the index and vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06005352 cb_access_context->RecordDrawVertexIndex(UINT32_MAX, 0, tag);
locke-lunargff255f92020-05-13 18:53:52 -06005353}
5354
5355bool SyncValidator::ValidateCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5356 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
sjfricke0bea06e2022-06-05 09:22:26 +09005357 uint32_t stride, CMD_TYPE cmd_type) const {
locke-lunargff255f92020-05-13 18:53:52 -06005358 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06005359 const auto *cb_access_context = GetAccessContext(commandBuffer);
5360 assert(cb_access_context);
5361 if (!cb_access_context) return skip;
5362
5363 const auto *context = cb_access_context->GetCurrentAccessContext();
5364 assert(context);
5365 if (!context) return skip;
5366
sjfricke0bea06e2022-06-05 09:22:26 +09005367 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, cmd_type);
5368 skip |= cb_access_context->ValidateDrawSubpassAttachment(cmd_type);
John Zulauffaea0ee2021-01-14 14:01:32 -07005369 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndirectCommand), buffer, offset,
sjfricke0bea06e2022-06-05 09:22:26 +09005370 maxDrawCount, stride, cmd_type);
5371 skip |= ValidateCountBuffer(*cb_access_context, *context, commandBuffer, countBuffer, countBufferOffset, cmd_type);
locke-lunargff255f92020-05-13 18:53:52 -06005372
5373 // TODO: For now, we validate the whole vertex buffer. It might cause some false positive.
5374 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
5375 // We will validate the vertex buffer in SubmitQueue in the future.
sjfricke0bea06e2022-06-05 09:22:26 +09005376 skip |= cb_access_context->ValidateDrawVertex(UINT32_MAX, 0, cmd_type);
locke-lunargff255f92020-05-13 18:53:52 -06005377 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06005378}
5379
5380bool SyncValidator::PreCallValidateCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5381 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
5382 uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06005383 return ValidateCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
sjfricke0bea06e2022-06-05 09:22:26 +09005384 CMD_DRAWINDIRECTCOUNT);
locke-lunarge1a67022020-04-29 00:15:36 -06005385}
5386
sfricke-samsung85584a72021-09-30 21:43:38 -07005387void SyncValidator::RecordCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5388 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
5389 uint32_t stride, CMD_TYPE cmd_type) {
locke-lunargff255f92020-05-13 18:53:52 -06005390 auto *cb_access_context = GetAccessContext(commandBuffer);
5391 assert(cb_access_context);
sfricke-samsung85584a72021-09-30 21:43:38 -07005392 const auto tag = cb_access_context->NextCommandTag(cmd_type);
locke-lunargff255f92020-05-13 18:53:52 -06005393 auto *context = cb_access_context->GetCurrentAccessContext();
5394 assert(context);
5395
locke-lunarg61870c22020-06-09 14:51:50 -06005396 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
5397 cb_access_context->RecordDrawSubpassAttachment(tag);
5398 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndirectCommand), buffer, offset, 1, stride);
5399 RecordCountBuffer(*context, tag, countBuffer, countBufferOffset);
locke-lunargff255f92020-05-13 18:53:52 -06005400
5401 // TODO: For now, we record the whole vertex buffer. It might cause some false positive.
5402 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
5403 // We will record the vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06005404 cb_access_context->RecordDrawVertex(UINT32_MAX, 0, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005405}
5406
sfricke-samsung85584a72021-09-30 21:43:38 -07005407void SyncValidator::PreCallRecordCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5408 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
5409 uint32_t stride) {
5410 StateTracker::PreCallRecordCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount,
5411 stride);
5412 RecordCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
5413 CMD_DRAWINDIRECTCOUNT);
5414}
locke-lunarge1a67022020-04-29 00:15:36 -06005415bool SyncValidator::PreCallValidateCmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5416 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
5417 uint32_t maxDrawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06005418 return ValidateCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
sjfricke0bea06e2022-06-05 09:22:26 +09005419 CMD_DRAWINDIRECTCOUNTKHR);
locke-lunarge1a67022020-04-29 00:15:36 -06005420}
5421
5422void SyncValidator::PreCallRecordCmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5423 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
5424 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005425 StateTracker::PreCallRecordCmdDrawIndirectCountKHR(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount,
5426 stride);
sfricke-samsung85584a72021-09-30 21:43:38 -07005427 RecordCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
5428 CMD_DRAWINDIRECTCOUNTKHR);
locke-lunarge1a67022020-04-29 00:15:36 -06005429}
5430
5431bool SyncValidator::PreCallValidateCmdDrawIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5432 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
5433 uint32_t maxDrawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06005434 return ValidateCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
sjfricke0bea06e2022-06-05 09:22:26 +09005435 CMD_DRAWINDIRECTCOUNTAMD);
locke-lunarge1a67022020-04-29 00:15:36 -06005436}
5437
5438void SyncValidator::PreCallRecordCmdDrawIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5439 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
5440 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005441 StateTracker::PreCallRecordCmdDrawIndirectCountAMD(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount,
5442 stride);
sfricke-samsung85584a72021-09-30 21:43:38 -07005443 RecordCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
5444 CMD_DRAWINDIRECTCOUNTAMD);
locke-lunargff255f92020-05-13 18:53:52 -06005445}
5446
5447bool SyncValidator::ValidateCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5448 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
sjfricke0bea06e2022-06-05 09:22:26 +09005449 uint32_t stride, CMD_TYPE cmd_type) const {
locke-lunargff255f92020-05-13 18:53:52 -06005450 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06005451 const auto *cb_access_context = GetAccessContext(commandBuffer);
5452 assert(cb_access_context);
5453 if (!cb_access_context) return skip;
5454
5455 const auto *context = cb_access_context->GetCurrentAccessContext();
5456 assert(context);
5457 if (!context) return skip;
5458
sjfricke0bea06e2022-06-05 09:22:26 +09005459 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, cmd_type);
5460 skip |= cb_access_context->ValidateDrawSubpassAttachment(cmd_type);
John Zulauffaea0ee2021-01-14 14:01:32 -07005461 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndexedIndirectCommand), buffer,
sjfricke0bea06e2022-06-05 09:22:26 +09005462 offset, maxDrawCount, stride, cmd_type);
5463 skip |= ValidateCountBuffer(*cb_access_context, *context, commandBuffer, countBuffer, countBufferOffset, cmd_type);
locke-lunargff255f92020-05-13 18:53:52 -06005464
5465 // TODO: For now, we validate the whole index and vertex buffer. It might cause some false positive.
5466 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
5467 // We will validate the index and vertex buffer in SubmitQueue in the future.
sjfricke0bea06e2022-06-05 09:22:26 +09005468 skip |= cb_access_context->ValidateDrawVertexIndex(UINT32_MAX, 0, cmd_type);
locke-lunargff255f92020-05-13 18:53:52 -06005469 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06005470}
5471
5472bool SyncValidator::PreCallValidateCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5473 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
5474 uint32_t maxDrawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06005475 return ValidateCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
sjfricke0bea06e2022-06-05 09:22:26 +09005476 CMD_DRAWINDEXEDINDIRECTCOUNT);
locke-lunarge1a67022020-04-29 00:15:36 -06005477}
5478
sfricke-samsung85584a72021-09-30 21:43:38 -07005479void SyncValidator::RecordCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5480 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
5481 uint32_t stride, CMD_TYPE cmd_type) {
locke-lunargff255f92020-05-13 18:53:52 -06005482 auto *cb_access_context = GetAccessContext(commandBuffer);
5483 assert(cb_access_context);
sfricke-samsung85584a72021-09-30 21:43:38 -07005484 const auto tag = cb_access_context->NextCommandTag(cmd_type);
locke-lunargff255f92020-05-13 18:53:52 -06005485 auto *context = cb_access_context->GetCurrentAccessContext();
5486 assert(context);
5487
locke-lunarg61870c22020-06-09 14:51:50 -06005488 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
5489 cb_access_context->RecordDrawSubpassAttachment(tag);
5490 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndexedIndirectCommand), buffer, offset, 1, stride);
5491 RecordCountBuffer(*context, tag, countBuffer, countBufferOffset);
locke-lunargff255f92020-05-13 18:53:52 -06005492
5493 // TODO: For now, we record the whole index and vertex buffer. It might cause some false positive.
5494 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
locke-lunarg61870c22020-06-09 14:51:50 -06005495 // We will update the index and vertex buffer in SubmitQueue in the future.
5496 cb_access_context->RecordDrawVertexIndex(UINT32_MAX, 0, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005497}
5498
sfricke-samsung85584a72021-09-30 21:43:38 -07005499void SyncValidator::PreCallRecordCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5500 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
5501 uint32_t maxDrawCount, uint32_t stride) {
5502 StateTracker::PreCallRecordCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
5503 maxDrawCount, stride);
5504 RecordCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
5505 CMD_DRAWINDEXEDINDIRECTCOUNT);
5506}
5507
locke-lunarge1a67022020-04-29 00:15:36 -06005508bool SyncValidator::PreCallValidateCmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer,
5509 VkDeviceSize offset, VkBuffer countBuffer,
5510 VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
5511 uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06005512 return ValidateCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
sjfricke0bea06e2022-06-05 09:22:26 +09005513 CMD_DRAWINDEXEDINDIRECTCOUNTKHR);
locke-lunarge1a67022020-04-29 00:15:36 -06005514}
5515
5516void SyncValidator::PreCallRecordCmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5517 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
5518 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005519 StateTracker::PreCallRecordCmdDrawIndexedIndirectCountKHR(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
5520 maxDrawCount, stride);
sfricke-samsung85584a72021-09-30 21:43:38 -07005521 RecordCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
5522 CMD_DRAWINDEXEDINDIRECTCOUNTKHR);
locke-lunarge1a67022020-04-29 00:15:36 -06005523}
5524
5525bool SyncValidator::PreCallValidateCmdDrawIndexedIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer,
5526 VkDeviceSize offset, VkBuffer countBuffer,
5527 VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
5528 uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06005529 return ValidateCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
sjfricke0bea06e2022-06-05 09:22:26 +09005530 CMD_DRAWINDEXEDINDIRECTCOUNTAMD);
locke-lunarge1a67022020-04-29 00:15:36 -06005531}
5532
5533void SyncValidator::PreCallRecordCmdDrawIndexedIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5534 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
5535 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005536 StateTracker::PreCallRecordCmdDrawIndexedIndirectCountAMD(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
5537 maxDrawCount, stride);
sfricke-samsung85584a72021-09-30 21:43:38 -07005538 RecordCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
5539 CMD_DRAWINDEXEDINDIRECTCOUNTAMD);
locke-lunarge1a67022020-04-29 00:15:36 -06005540}
5541
5542bool SyncValidator::PreCallValidateCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
5543 const VkClearColorValue *pColor, uint32_t rangeCount,
5544 const VkImageSubresourceRange *pRanges) const {
5545 bool skip = false;
5546 const auto *cb_access_context = GetAccessContext(commandBuffer);
5547 assert(cb_access_context);
5548 if (!cb_access_context) return skip;
5549
5550 const auto *context = cb_access_context->GetCurrentAccessContext();
5551 assert(context);
5552 if (!context) return skip;
5553
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005554 auto image_state = Get<IMAGE_STATE>(image);
locke-lunarge1a67022020-04-29 00:15:36 -06005555
5556 for (uint32_t index = 0; index < rangeCount; index++) {
5557 const auto &range = pRanges[index];
5558 if (image_state) {
Aitor Camachoe67f2c72022-06-08 14:41:58 +02005559 auto hazard = context->DetectHazard(*image_state, SYNC_CLEAR_TRANSFER_WRITE, range, false);
locke-lunarge1a67022020-04-29 00:15:36 -06005560 if (hazard.hazard) {
5561 skip |= LogError(image, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06005562 "vkCmdClearColorImage: Hazard %s for %s, range index %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06005563 string_SyncHazard(hazard.hazard), report_data->FormatHandle(image).c_str(), index,
John Zulauf397e68b2022-04-19 11:44:07 -06005564 cb_access_context->FormatHazard(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06005565 }
5566 }
5567 }
5568 return skip;
5569}
5570
5571void SyncValidator::PreCallRecordCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
5572 const VkClearColorValue *pColor, uint32_t rangeCount,
5573 const VkImageSubresourceRange *pRanges) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005574 StateTracker::PreCallRecordCmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
locke-lunarge1a67022020-04-29 00:15:36 -06005575 auto *cb_access_context = GetAccessContext(commandBuffer);
5576 assert(cb_access_context);
5577 const auto tag = cb_access_context->NextCommandTag(CMD_CLEARCOLORIMAGE);
5578 auto *context = cb_access_context->GetCurrentAccessContext();
5579 assert(context);
5580
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005581 auto image_state = Get<IMAGE_STATE>(image);
locke-lunarge1a67022020-04-29 00:15:36 -06005582
5583 for (uint32_t index = 0; index < rangeCount; index++) {
5584 const auto &range = pRanges[index];
5585 if (image_state) {
John Zulauf110413c2021-03-20 05:38:38 -06005586 context->UpdateAccessState(*image_state, SYNC_CLEAR_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005587 }
5588 }
5589}
5590
5591bool SyncValidator::PreCallValidateCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image,
5592 VkImageLayout imageLayout,
5593 const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
5594 const VkImageSubresourceRange *pRanges) const {
5595 bool skip = false;
5596 const auto *cb_access_context = GetAccessContext(commandBuffer);
5597 assert(cb_access_context);
5598 if (!cb_access_context) return skip;
5599
5600 const auto *context = cb_access_context->GetCurrentAccessContext();
5601 assert(context);
5602 if (!context) return skip;
5603
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005604 auto image_state = Get<IMAGE_STATE>(image);
locke-lunarge1a67022020-04-29 00:15:36 -06005605
5606 for (uint32_t index = 0; index < rangeCount; index++) {
5607 const auto &range = pRanges[index];
5608 if (image_state) {
Aitor Camachoe67f2c72022-06-08 14:41:58 +02005609 auto hazard = context->DetectHazard(*image_state, SYNC_CLEAR_TRANSFER_WRITE, range, false);
locke-lunarge1a67022020-04-29 00:15:36 -06005610 if (hazard.hazard) {
5611 skip |= LogError(image, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06005612 "vkCmdClearDepthStencilImage: Hazard %s for %s, range index %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06005613 string_SyncHazard(hazard.hazard), report_data->FormatHandle(image).c_str(), index,
John Zulauf397e68b2022-04-19 11:44:07 -06005614 cb_access_context->FormatHazard(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06005615 }
5616 }
5617 }
5618 return skip;
5619}
5620
5621void SyncValidator::PreCallRecordCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
5622 const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
5623 const VkImageSubresourceRange *pRanges) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005624 StateTracker::PreCallRecordCmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
locke-lunarge1a67022020-04-29 00:15:36 -06005625 auto *cb_access_context = GetAccessContext(commandBuffer);
5626 assert(cb_access_context);
5627 const auto tag = cb_access_context->NextCommandTag(CMD_CLEARDEPTHSTENCILIMAGE);
5628 auto *context = cb_access_context->GetCurrentAccessContext();
5629 assert(context);
5630
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005631 auto image_state = Get<IMAGE_STATE>(image);
locke-lunarge1a67022020-04-29 00:15:36 -06005632
5633 for (uint32_t index = 0; index < rangeCount; index++) {
5634 const auto &range = pRanges[index];
5635 if (image_state) {
John Zulauf110413c2021-03-20 05:38:38 -06005636 context->UpdateAccessState(*image_state, SYNC_CLEAR_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005637 }
5638 }
5639}
5640
5641bool SyncValidator::PreCallValidateCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool,
5642 uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer,
5643 VkDeviceSize dstOffset, VkDeviceSize stride,
5644 VkQueryResultFlags flags) const {
5645 bool skip = false;
5646 const auto *cb_access_context = GetAccessContext(commandBuffer);
5647 assert(cb_access_context);
5648 if (!cb_access_context) return skip;
5649
5650 const auto *context = cb_access_context->GetCurrentAccessContext();
5651 assert(context);
5652 if (!context) return skip;
5653
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005654 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
locke-lunarge1a67022020-04-29 00:15:36 -06005655
5656 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005657 const ResourceAccessRange range = MakeRange(dstOffset, stride * queryCount);
Jeremy Gebben40a22942020-12-22 14:22:06 -07005658 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
locke-lunarge1a67022020-04-29 00:15:36 -06005659 if (hazard.hazard) {
John Zulauf59e25072020-07-17 10:55:21 -06005660 skip |=
5661 LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
5662 "vkCmdCopyQueryPoolResults: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauf397e68b2022-04-19 11:44:07 -06005663 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatHazard(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06005664 }
5665 }
locke-lunargff255f92020-05-13 18:53:52 -06005666
5667 // TODO:Track VkQueryPool
locke-lunarge1a67022020-04-29 00:15:36 -06005668 return skip;
5669}
5670
5671void SyncValidator::PreCallRecordCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
5672 uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
5673 VkDeviceSize stride, VkQueryResultFlags flags) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005674 StateTracker::PreCallRecordCmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset,
5675 stride, flags);
locke-lunarge1a67022020-04-29 00:15:36 -06005676 auto *cb_access_context = GetAccessContext(commandBuffer);
5677 assert(cb_access_context);
locke-lunargff255f92020-05-13 18:53:52 -06005678 const auto tag = cb_access_context->NextCommandTag(CMD_COPYQUERYPOOLRESULTS);
locke-lunarge1a67022020-04-29 00:15:36 -06005679 auto *context = cb_access_context->GetCurrentAccessContext();
5680 assert(context);
5681
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005682 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
locke-lunarge1a67022020-04-29 00:15:36 -06005683
5684 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005685 const ResourceAccessRange range = MakeRange(dstOffset, stride * queryCount);
Jeremy Gebben40a22942020-12-22 14:22:06 -07005686 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005687 }
locke-lunargff255f92020-05-13 18:53:52 -06005688
5689 // TODO:Track VkQueryPool
locke-lunarge1a67022020-04-29 00:15:36 -06005690}
5691
5692bool SyncValidator::PreCallValidateCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
5693 VkDeviceSize size, uint32_t data) const {
5694 bool skip = false;
5695 const auto *cb_access_context = GetAccessContext(commandBuffer);
5696 assert(cb_access_context);
5697 if (!cb_access_context) return skip;
5698
5699 const auto *context = cb_access_context->GetCurrentAccessContext();
5700 assert(context);
5701 if (!context) return skip;
5702
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005703 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
locke-lunarge1a67022020-04-29 00:15:36 -06005704
5705 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005706 const ResourceAccessRange range = MakeRange(*dst_buffer, dstOffset, size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07005707 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
locke-lunarge1a67022020-04-29 00:15:36 -06005708 if (hazard.hazard) {
John Zulauf1dae9192020-06-16 15:46:44 -06005709 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06005710 "vkCmdFillBuffer: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauf397e68b2022-04-19 11:44:07 -06005711 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatHazard(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06005712 }
5713 }
5714 return skip;
5715}
5716
5717void SyncValidator::PreCallRecordCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
5718 VkDeviceSize size, uint32_t data) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005719 StateTracker::PreCallRecordCmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
locke-lunarge1a67022020-04-29 00:15:36 -06005720 auto *cb_access_context = GetAccessContext(commandBuffer);
5721 assert(cb_access_context);
5722 const auto tag = cb_access_context->NextCommandTag(CMD_FILLBUFFER);
5723 auto *context = cb_access_context->GetCurrentAccessContext();
5724 assert(context);
5725
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005726 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
locke-lunarge1a67022020-04-29 00:15:36 -06005727
5728 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005729 const ResourceAccessRange range = MakeRange(*dst_buffer, dstOffset, size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07005730 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005731 }
5732}
5733
5734bool SyncValidator::PreCallValidateCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
5735 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
5736 const VkImageResolve *pRegions) const {
5737 bool skip = false;
5738 const auto *cb_access_context = GetAccessContext(commandBuffer);
5739 assert(cb_access_context);
5740 if (!cb_access_context) return skip;
5741
5742 const auto *context = cb_access_context->GetCurrentAccessContext();
5743 assert(context);
5744 if (!context) return skip;
5745
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005746 auto src_image = Get<IMAGE_STATE>(srcImage);
5747 auto dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarge1a67022020-04-29 00:15:36 -06005748
5749 for (uint32_t region = 0; region < regionCount; region++) {
5750 const auto &resolve_region = pRegions[region];
5751 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07005752 auto hazard = context->DetectHazard(*src_image, SYNC_RESOLVE_TRANSFER_READ, resolve_region.srcSubresource,
Aitor Camachoe67f2c72022-06-08 14:41:58 +02005753 resolve_region.srcOffset, resolve_region.extent, false);
locke-lunarge1a67022020-04-29 00:15:36 -06005754 if (hazard.hazard) {
5755 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06005756 "vkCmdResolveImage: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06005757 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauf397e68b2022-04-19 11:44:07 -06005758 cb_access_context->FormatHazard(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06005759 }
5760 }
5761
5762 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07005763 auto hazard = context->DetectHazard(*dst_image, SYNC_RESOLVE_TRANSFER_WRITE, resolve_region.dstSubresource,
Aitor Camachoe67f2c72022-06-08 14:41:58 +02005764 resolve_region.dstOffset, resolve_region.extent, false);
locke-lunarge1a67022020-04-29 00:15:36 -06005765 if (hazard.hazard) {
5766 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06005767 "vkCmdResolveImage: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06005768 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauf397e68b2022-04-19 11:44:07 -06005769 cb_access_context->FormatHazard(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06005770 }
5771 if (skip) break;
5772 }
5773 }
5774
5775 return skip;
5776}
5777
5778void SyncValidator::PreCallRecordCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
5779 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
5780 const VkImageResolve *pRegions) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005781 StateTracker::PreCallRecordCmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
5782 pRegions);
locke-lunarge1a67022020-04-29 00:15:36 -06005783 auto *cb_access_context = GetAccessContext(commandBuffer);
5784 assert(cb_access_context);
5785 const auto tag = cb_access_context->NextCommandTag(CMD_RESOLVEIMAGE);
5786 auto *context = cb_access_context->GetCurrentAccessContext();
5787 assert(context);
5788
Jeremy Gebben9f537102021-10-05 16:37:12 -06005789 auto src_image = Get<IMAGE_STATE>(srcImage);
5790 auto dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarge1a67022020-04-29 00:15:36 -06005791
5792 for (uint32_t region = 0; region < regionCount; region++) {
5793 const auto &resolve_region = pRegions[region];
5794 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07005795 context->UpdateAccessState(*src_image, SYNC_RESOLVE_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07005796 resolve_region.srcSubresource, resolve_region.srcOffset, resolve_region.extent, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005797 }
5798 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07005799 context->UpdateAccessState(*dst_image, SYNC_RESOLVE_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07005800 resolve_region.dstSubresource, resolve_region.dstOffset, resolve_region.extent, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005801 }
5802 }
5803}
5804
Tony-LunarG562fc102021-11-12 13:58:35 -07005805bool SyncValidator::ValidateCmdResolveImage2(VkCommandBuffer commandBuffer, const VkResolveImageInfo2KHR *pResolveImageInfo,
5806 CMD_TYPE cmd_type) const {
Jeff Leger178b1e52020-10-05 12:22:23 -04005807 bool skip = false;
5808 const auto *cb_access_context = GetAccessContext(commandBuffer);
5809 assert(cb_access_context);
5810 if (!cb_access_context) return skip;
5811
5812 const auto *context = cb_access_context->GetCurrentAccessContext();
5813 assert(context);
5814 if (!context) return skip;
5815
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005816 auto src_image = Get<IMAGE_STATE>(pResolveImageInfo->srcImage);
5817 auto dst_image = Get<IMAGE_STATE>(pResolveImageInfo->dstImage);
Jeff Leger178b1e52020-10-05 12:22:23 -04005818
5819 for (uint32_t region = 0; region < pResolveImageInfo->regionCount; region++) {
5820 const auto &resolve_region = pResolveImageInfo->pRegions[region];
5821 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07005822 auto hazard = context->DetectHazard(*src_image, SYNC_RESOLVE_TRANSFER_READ, resolve_region.srcSubresource,
Aitor Camachoe67f2c72022-06-08 14:41:58 +02005823 resolve_region.srcOffset, resolve_region.extent, false);
Jeff Leger178b1e52020-10-05 12:22:23 -04005824 if (hazard.hazard) {
5825 skip |= LogError(pResolveImageInfo->srcImage, string_SyncHazardVUID(hazard.hazard),
sjfricke0bea06e2022-06-05 09:22:26 +09005826 "%s: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.", CommandTypeString(cmd_type),
Jeff Leger178b1e52020-10-05 12:22:23 -04005827 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pResolveImageInfo->srcImage).c_str(),
John Zulauf397e68b2022-04-19 11:44:07 -06005828 region, cb_access_context->FormatHazard(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04005829 }
5830 }
5831
5832 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07005833 auto hazard = context->DetectHazard(*dst_image, SYNC_RESOLVE_TRANSFER_WRITE, resolve_region.dstSubresource,
Aitor Camachoe67f2c72022-06-08 14:41:58 +02005834 resolve_region.dstOffset, resolve_region.extent, false);
Jeff Leger178b1e52020-10-05 12:22:23 -04005835 if (hazard.hazard) {
5836 skip |= LogError(pResolveImageInfo->dstImage, string_SyncHazardVUID(hazard.hazard),
sjfricke0bea06e2022-06-05 09:22:26 +09005837 "%s: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.", CommandTypeString(cmd_type),
Jeff Leger178b1e52020-10-05 12:22:23 -04005838 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pResolveImageInfo->dstImage).c_str(),
John Zulauf397e68b2022-04-19 11:44:07 -06005839 region, cb_access_context->FormatHazard(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04005840 }
5841 if (skip) break;
5842 }
5843 }
5844
5845 return skip;
5846}
5847
Tony-LunarG562fc102021-11-12 13:58:35 -07005848bool SyncValidator::PreCallValidateCmdResolveImage2KHR(VkCommandBuffer commandBuffer,
5849 const VkResolveImageInfo2KHR *pResolveImageInfo) const {
5850 return ValidateCmdResolveImage2(commandBuffer, pResolveImageInfo, CMD_RESOLVEIMAGE2KHR);
5851}
5852
5853bool SyncValidator::PreCallValidateCmdResolveImage2(VkCommandBuffer commandBuffer,
5854 const VkResolveImageInfo2 *pResolveImageInfo) const {
5855 return ValidateCmdResolveImage2(commandBuffer, pResolveImageInfo, CMD_RESOLVEIMAGE2);
5856}
5857
5858void SyncValidator::RecordCmdResolveImage2(VkCommandBuffer commandBuffer, const VkResolveImageInfo2KHR *pResolveImageInfo,
5859 CMD_TYPE cmd_type) {
Jeff Leger178b1e52020-10-05 12:22:23 -04005860 StateTracker::PreCallRecordCmdResolveImage2KHR(commandBuffer, pResolveImageInfo);
5861 auto *cb_access_context = GetAccessContext(commandBuffer);
5862 assert(cb_access_context);
Tony-LunarG562fc102021-11-12 13:58:35 -07005863 const auto tag = cb_access_context->NextCommandTag(cmd_type);
Jeff Leger178b1e52020-10-05 12:22:23 -04005864 auto *context = cb_access_context->GetCurrentAccessContext();
5865 assert(context);
5866
Jeremy Gebben9f537102021-10-05 16:37:12 -06005867 auto src_image = Get<IMAGE_STATE>(pResolveImageInfo->srcImage);
5868 auto dst_image = Get<IMAGE_STATE>(pResolveImageInfo->dstImage);
Jeff Leger178b1e52020-10-05 12:22:23 -04005869
5870 for (uint32_t region = 0; region < pResolveImageInfo->regionCount; region++) {
5871 const auto &resolve_region = pResolveImageInfo->pRegions[region];
5872 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07005873 context->UpdateAccessState(*src_image, SYNC_RESOLVE_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07005874 resolve_region.srcSubresource, resolve_region.srcOffset, resolve_region.extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04005875 }
5876 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07005877 context->UpdateAccessState(*dst_image, SYNC_RESOLVE_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07005878 resolve_region.dstSubresource, resolve_region.dstOffset, resolve_region.extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04005879 }
5880 }
5881}
5882
Tony-LunarG562fc102021-11-12 13:58:35 -07005883void SyncValidator::PreCallRecordCmdResolveImage2KHR(VkCommandBuffer commandBuffer,
5884 const VkResolveImageInfo2KHR *pResolveImageInfo) {
5885 RecordCmdResolveImage2(commandBuffer, pResolveImageInfo, CMD_RESOLVEIMAGE2KHR);
5886}
5887
5888void SyncValidator::PreCallRecordCmdResolveImage2(VkCommandBuffer commandBuffer, const VkResolveImageInfo2 *pResolveImageInfo) {
5889 RecordCmdResolveImage2(commandBuffer, pResolveImageInfo, CMD_RESOLVEIMAGE2);
5890}
5891
locke-lunarge1a67022020-04-29 00:15:36 -06005892bool SyncValidator::PreCallValidateCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
5893 VkDeviceSize dataSize, const void *pData) const {
5894 bool skip = false;
5895 const auto *cb_access_context = GetAccessContext(commandBuffer);
5896 assert(cb_access_context);
5897 if (!cb_access_context) return skip;
5898
5899 const auto *context = cb_access_context->GetCurrentAccessContext();
5900 assert(context);
5901 if (!context) return skip;
5902
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005903 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
locke-lunarge1a67022020-04-29 00:15:36 -06005904
5905 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005906 // VK_WHOLE_SIZE not allowed
5907 const ResourceAccessRange range = MakeRange(dstOffset, dataSize);
Jeremy Gebben40a22942020-12-22 14:22:06 -07005908 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
locke-lunarge1a67022020-04-29 00:15:36 -06005909 if (hazard.hazard) {
John Zulauf1dae9192020-06-16 15:46:44 -06005910 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06005911 "vkCmdUpdateBuffer: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauf397e68b2022-04-19 11:44:07 -06005912 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatHazard(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06005913 }
5914 }
5915 return skip;
5916}
5917
5918void SyncValidator::PreCallRecordCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
5919 VkDeviceSize dataSize, const void *pData) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005920 StateTracker::PreCallRecordCmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
locke-lunarge1a67022020-04-29 00:15:36 -06005921 auto *cb_access_context = GetAccessContext(commandBuffer);
5922 assert(cb_access_context);
5923 const auto tag = cb_access_context->NextCommandTag(CMD_UPDATEBUFFER);
5924 auto *context = cb_access_context->GetCurrentAccessContext();
5925 assert(context);
5926
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005927 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
locke-lunarge1a67022020-04-29 00:15:36 -06005928
5929 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005930 // VK_WHOLE_SIZE not allowed
5931 const ResourceAccessRange range = MakeRange(dstOffset, dataSize);
Jeremy Gebben40a22942020-12-22 14:22:06 -07005932 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005933 }
5934}
locke-lunargff255f92020-05-13 18:53:52 -06005935
5936bool SyncValidator::PreCallValidateCmdWriteBufferMarkerAMD(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
5937 VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) const {
5938 bool skip = false;
5939 const auto *cb_access_context = GetAccessContext(commandBuffer);
5940 assert(cb_access_context);
5941 if (!cb_access_context) return skip;
5942
5943 const auto *context = cb_access_context->GetCurrentAccessContext();
5944 assert(context);
5945 if (!context) return skip;
5946
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005947 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
locke-lunargff255f92020-05-13 18:53:52 -06005948
5949 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005950 const ResourceAccessRange range = MakeRange(dstOffset, 4);
Jeremy Gebben40a22942020-12-22 14:22:06 -07005951 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
locke-lunargff255f92020-05-13 18:53:52 -06005952 if (hazard.hazard) {
John Zulauf59e25072020-07-17 10:55:21 -06005953 skip |=
5954 LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
5955 "vkCmdWriteBufferMarkerAMD: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauf397e68b2022-04-19 11:44:07 -06005956 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatHazard(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06005957 }
5958 }
5959 return skip;
5960}
5961
5962void SyncValidator::PreCallRecordCmdWriteBufferMarkerAMD(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
5963 VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005964 StateTracker::PreCallRecordCmdWriteBufferMarkerAMD(commandBuffer, pipelineStage, dstBuffer, dstOffset, marker);
locke-lunargff255f92020-05-13 18:53:52 -06005965 auto *cb_access_context = GetAccessContext(commandBuffer);
5966 assert(cb_access_context);
5967 const auto tag = cb_access_context->NextCommandTag(CMD_WRITEBUFFERMARKERAMD);
5968 auto *context = cb_access_context->GetCurrentAccessContext();
5969 assert(context);
5970
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005971 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
locke-lunargff255f92020-05-13 18:53:52 -06005972
5973 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005974 const ResourceAccessRange range = MakeRange(dstOffset, 4);
Jeremy Gebben40a22942020-12-22 14:22:06 -07005975 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunargff255f92020-05-13 18:53:52 -06005976 }
5977}
John Zulauf49beb112020-11-04 16:06:31 -07005978
5979bool SyncValidator::PreCallValidateCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) const {
5980 bool skip = false;
5981 const auto *cb_context = GetAccessContext(commandBuffer);
5982 assert(cb_context);
5983 if (!cb_context) return skip;
John Zulaufe0757ba2022-06-10 16:51:45 -06005984 const auto *access_context = cb_context->GetCurrentAccessContext();
5985 assert(access_context);
5986 if (!access_context) return skip;
John Zulauf49beb112020-11-04 16:06:31 -07005987
John Zulaufe0757ba2022-06-10 16:51:45 -06005988 SyncOpSetEvent set_event_op(CMD_SETEVENT, *this, cb_context->GetQueueFlags(), event, stageMask, nullptr);
John Zulauf6ce24372021-01-30 05:56:25 -07005989 return set_event_op.Validate(*cb_context);
John Zulauf49beb112020-11-04 16:06:31 -07005990}
5991
5992void SyncValidator::PostCallRecordCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
5993 StateTracker::PostCallRecordCmdSetEvent(commandBuffer, event, stageMask);
5994 auto *cb_context = GetAccessContext(commandBuffer);
5995 assert(cb_context);
5996 if (!cb_context) return;
John Zulaufe0757ba2022-06-10 16:51:45 -06005997
5998 cb_context->RecordSyncOp<SyncOpSetEvent>(CMD_SETEVENT, *this, cb_context->GetQueueFlags(), event, stageMask,
5999 cb_context->GetCurrentAccessContext());
John Zulauf49beb112020-11-04 16:06:31 -07006000}
6001
John Zulauf4edde622021-02-15 08:54:50 -07006002bool SyncValidator::PreCallValidateCmdSetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event,
6003 const VkDependencyInfoKHR *pDependencyInfo) const {
6004 bool skip = false;
6005 const auto *cb_context = GetAccessContext(commandBuffer);
6006 assert(cb_context);
6007 if (!cb_context || !pDependencyInfo) return skip;
6008
John Zulaufe0757ba2022-06-10 16:51:45 -06006009 const auto *access_context = cb_context->GetCurrentAccessContext();
6010 assert(access_context);
6011 if (!access_context) return skip;
6012
6013 SyncOpSetEvent set_event_op(CMD_SETEVENT2KHR, *this, cb_context->GetQueueFlags(), event, *pDependencyInfo, nullptr);
John Zulauf4edde622021-02-15 08:54:50 -07006014 return set_event_op.Validate(*cb_context);
6015}
6016
Tony-LunarGc43525f2021-11-15 16:12:38 -07006017bool SyncValidator::PreCallValidateCmdSetEvent2(VkCommandBuffer commandBuffer, VkEvent event,
6018 const VkDependencyInfo *pDependencyInfo) const {
6019 bool skip = false;
6020 const auto *cb_context = GetAccessContext(commandBuffer);
6021 assert(cb_context);
6022 if (!cb_context || !pDependencyInfo) return skip;
6023
John Zulaufe0757ba2022-06-10 16:51:45 -06006024 SyncOpSetEvent set_event_op(CMD_SETEVENT2, *this, cb_context->GetQueueFlags(), event, *pDependencyInfo, nullptr);
Tony-LunarGc43525f2021-11-15 16:12:38 -07006025 return set_event_op.Validate(*cb_context);
6026}
6027
John Zulauf4edde622021-02-15 08:54:50 -07006028void SyncValidator::PostCallRecordCmdSetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event,
6029 const VkDependencyInfoKHR *pDependencyInfo) {
6030 StateTracker::PostCallRecordCmdSetEvent2KHR(commandBuffer, event, pDependencyInfo);
6031 auto *cb_context = GetAccessContext(commandBuffer);
6032 assert(cb_context);
6033 if (!cb_context || !pDependencyInfo) return;
6034
John Zulaufe0757ba2022-06-10 16:51:45 -06006035 cb_context->RecordSyncOp<SyncOpSetEvent>(CMD_SETEVENT2KHR, *this, cb_context->GetQueueFlags(), event, *pDependencyInfo,
6036 cb_context->GetCurrentAccessContext());
John Zulauf4edde622021-02-15 08:54:50 -07006037}
6038
Tony-LunarGc43525f2021-11-15 16:12:38 -07006039void SyncValidator::PostCallRecordCmdSetEvent2(VkCommandBuffer commandBuffer, VkEvent event,
6040 const VkDependencyInfo *pDependencyInfo) {
6041 StateTracker::PostCallRecordCmdSetEvent2(commandBuffer, event, pDependencyInfo);
6042 auto *cb_context = GetAccessContext(commandBuffer);
6043 assert(cb_context);
6044 if (!cb_context || !pDependencyInfo) return;
6045
John Zulaufe0757ba2022-06-10 16:51:45 -06006046 cb_context->RecordSyncOp<SyncOpSetEvent>(CMD_SETEVENT2, *this, cb_context->GetQueueFlags(), event, *pDependencyInfo,
6047 cb_context->GetCurrentAccessContext());
Tony-LunarGc43525f2021-11-15 16:12:38 -07006048}
6049
John Zulauf49beb112020-11-04 16:06:31 -07006050bool SyncValidator::PreCallValidateCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event,
6051 VkPipelineStageFlags stageMask) const {
6052 bool skip = false;
6053 const auto *cb_context = GetAccessContext(commandBuffer);
6054 assert(cb_context);
6055 if (!cb_context) return skip;
6056
John Zulauf36ef9282021-02-02 11:47:24 -07006057 SyncOpResetEvent reset_event_op(CMD_RESETEVENT, *this, cb_context->GetQueueFlags(), event, stageMask);
John Zulauf6ce24372021-01-30 05:56:25 -07006058 return reset_event_op.Validate(*cb_context);
John Zulauf49beb112020-11-04 16:06:31 -07006059}
6060
6061void SyncValidator::PostCallRecordCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
6062 StateTracker::PostCallRecordCmdResetEvent(commandBuffer, event, stageMask);
6063 auto *cb_context = GetAccessContext(commandBuffer);
6064 assert(cb_context);
6065 if (!cb_context) return;
6066
John Zulauf1bf30522021-09-03 15:39:06 -06006067 cb_context->RecordSyncOp<SyncOpResetEvent>(CMD_RESETEVENT, *this, cb_context->GetQueueFlags(), event, stageMask);
John Zulauf49beb112020-11-04 16:06:31 -07006068}
6069
John Zulauf4edde622021-02-15 08:54:50 -07006070bool SyncValidator::PreCallValidateCmdResetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event,
6071 VkPipelineStageFlags2KHR stageMask) const {
6072 bool skip = false;
6073 const auto *cb_context = GetAccessContext(commandBuffer);
6074 assert(cb_context);
6075 if (!cb_context) return skip;
6076
6077 SyncOpResetEvent reset_event_op(CMD_RESETEVENT2KHR, *this, cb_context->GetQueueFlags(), event, stageMask);
6078 return reset_event_op.Validate(*cb_context);
6079}
6080
Tony-LunarGa2662db2021-11-16 07:26:24 -07006081bool SyncValidator::PreCallValidateCmdResetEvent2(VkCommandBuffer commandBuffer, VkEvent event,
6082 VkPipelineStageFlags2 stageMask) const {
6083 bool skip = false;
6084 const auto *cb_context = GetAccessContext(commandBuffer);
6085 assert(cb_context);
6086 if (!cb_context) return skip;
6087
6088 SyncOpResetEvent reset_event_op(CMD_RESETEVENT2, *this, cb_context->GetQueueFlags(), event, stageMask);
6089 return reset_event_op.Validate(*cb_context);
6090}
6091
John Zulauf4edde622021-02-15 08:54:50 -07006092void SyncValidator::PostCallRecordCmdResetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event,
6093 VkPipelineStageFlags2KHR stageMask) {
6094 StateTracker::PostCallRecordCmdResetEvent2KHR(commandBuffer, event, stageMask);
6095 auto *cb_context = GetAccessContext(commandBuffer);
6096 assert(cb_context);
6097 if (!cb_context) return;
6098
John Zulauf1bf30522021-09-03 15:39:06 -06006099 cb_context->RecordSyncOp<SyncOpResetEvent>(CMD_RESETEVENT2KHR, *this, cb_context->GetQueueFlags(), event, stageMask);
John Zulauf4edde622021-02-15 08:54:50 -07006100}
6101
Tony-LunarGa2662db2021-11-16 07:26:24 -07006102void SyncValidator::PostCallRecordCmdResetEvent2(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags2 stageMask) {
6103 StateTracker::PostCallRecordCmdResetEvent2(commandBuffer, event, stageMask);
6104 auto *cb_context = GetAccessContext(commandBuffer);
6105 assert(cb_context);
6106 if (!cb_context) return;
6107
6108 cb_context->RecordSyncOp<SyncOpResetEvent>(CMD_RESETEVENT2, *this, cb_context->GetQueueFlags(), event, stageMask);
6109}
6110
John Zulauf49beb112020-11-04 16:06:31 -07006111bool SyncValidator::PreCallValidateCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
6112 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
6113 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
6114 uint32_t bufferMemoryBarrierCount,
6115 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
6116 uint32_t imageMemoryBarrierCount,
6117 const VkImageMemoryBarrier *pImageMemoryBarriers) const {
6118 bool skip = false;
6119 const auto *cb_context = GetAccessContext(commandBuffer);
6120 assert(cb_context);
6121 if (!cb_context) return skip;
6122
John Zulauf36ef9282021-02-02 11:47:24 -07006123 SyncOpWaitEvents wait_events_op(CMD_WAITEVENTS, *this, cb_context->GetQueueFlags(), eventCount, pEvents, srcStageMask,
6124 dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
6125 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
John Zulaufd5115702021-01-18 12:34:33 -07006126 return wait_events_op.Validate(*cb_context);
John Zulauf49beb112020-11-04 16:06:31 -07006127}
6128
6129void SyncValidator::PostCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
6130 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
6131 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
6132 uint32_t bufferMemoryBarrierCount,
6133 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
6134 uint32_t imageMemoryBarrierCount,
6135 const VkImageMemoryBarrier *pImageMemoryBarriers) {
6136 StateTracker::PostCallRecordCmdWaitEvents(commandBuffer, eventCount, pEvents, srcStageMask, dstStageMask, memoryBarrierCount,
6137 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
6138 imageMemoryBarrierCount, pImageMemoryBarriers);
6139
6140 auto *cb_context = GetAccessContext(commandBuffer);
6141 assert(cb_context);
6142 if (!cb_context) return;
6143
John Zulauf1bf30522021-09-03 15:39:06 -06006144 cb_context->RecordSyncOp<SyncOpWaitEvents>(
John Zulauf610e28c2021-08-03 17:46:23 -06006145 CMD_WAITEVENTS, *this, cb_context->GetQueueFlags(), eventCount, pEvents, srcStageMask, dstStageMask, memoryBarrierCount,
John Zulauf1bf30522021-09-03 15:39:06 -06006146 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
John Zulauf4a6105a2020-11-17 15:11:05 -07006147}
6148
John Zulauf4edde622021-02-15 08:54:50 -07006149bool SyncValidator::PreCallValidateCmdWaitEvents2KHR(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
6150 const VkDependencyInfoKHR *pDependencyInfos) const {
6151 bool skip = false;
6152 const auto *cb_context = GetAccessContext(commandBuffer);
6153 assert(cb_context);
6154 if (!cb_context) return skip;
6155
6156 SyncOpWaitEvents wait_events_op(CMD_WAITEVENTS2KHR, *this, cb_context->GetQueueFlags(), eventCount, pEvents, pDependencyInfos);
6157 skip |= wait_events_op.Validate(*cb_context);
6158 return skip;
6159}
6160
6161void SyncValidator::PostCallRecordCmdWaitEvents2KHR(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
6162 const VkDependencyInfoKHR *pDependencyInfos) {
6163 StateTracker::PostCallRecordCmdWaitEvents2KHR(commandBuffer, eventCount, pEvents, pDependencyInfos);
6164
6165 auto *cb_context = GetAccessContext(commandBuffer);
6166 assert(cb_context);
6167 if (!cb_context) return;
6168
John Zulauf1bf30522021-09-03 15:39:06 -06006169 cb_context->RecordSyncOp<SyncOpWaitEvents>(CMD_WAITEVENTS2KHR, *this, cb_context->GetQueueFlags(), eventCount, pEvents,
6170 pDependencyInfos);
John Zulauf4edde622021-02-15 08:54:50 -07006171}
6172
Tony-LunarG1364cf52021-11-17 16:10:11 -07006173bool SyncValidator::PreCallValidateCmdWaitEvents2(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
6174 const VkDependencyInfo *pDependencyInfos) const {
6175 bool skip = false;
6176 const auto *cb_context = GetAccessContext(commandBuffer);
6177 assert(cb_context);
6178 if (!cb_context) return skip;
6179
6180 SyncOpWaitEvents wait_events_op(CMD_WAITEVENTS2, *this, cb_context->GetQueueFlags(), eventCount, pEvents, pDependencyInfos);
6181 skip |= wait_events_op.Validate(*cb_context);
6182 return skip;
6183}
6184
6185void SyncValidator::PostCallRecordCmdWaitEvents2(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
6186 const VkDependencyInfo *pDependencyInfos) {
6187 StateTracker::PostCallRecordCmdWaitEvents2KHR(commandBuffer, eventCount, pEvents, pDependencyInfos);
6188
6189 auto *cb_context = GetAccessContext(commandBuffer);
6190 assert(cb_context);
6191 if (!cb_context) return;
6192
6193 cb_context->RecordSyncOp<SyncOpWaitEvents>(CMD_WAITEVENTS2, *this, cb_context->GetQueueFlags(), eventCount, pEvents,
6194 pDependencyInfos);
6195}
6196
John Zulauf4a6105a2020-11-17 15:11:05 -07006197void SyncEventState::ResetFirstScope() {
John Zulaufe0757ba2022-06-10 16:51:45 -06006198 first_scope.reset();
Jeremy Gebben9893daf2021-01-04 10:40:50 -07006199 scope = SyncExecScope();
John Zulauf78b1f892021-09-20 15:02:09 -06006200 first_scope_tag = 0;
John Zulauf4a6105a2020-11-17 15:11:05 -07006201}
6202
6203// Keep the "ignore this event" logic in same place for ValidateWait and RecordWait to use
sjfricke0bea06e2022-06-05 09:22:26 +09006204SyncEventState::IgnoreReason SyncEventState::IsIgnoredByWait(CMD_TYPE cmd_type, VkPipelineStageFlags2KHR srcStageMask) const {
John Zulauf4a6105a2020-11-17 15:11:05 -07006205 IgnoreReason reason = NotIgnored;
6206
sjfricke0bea06e2022-06-05 09:22:26 +09006207 if ((CMD_WAITEVENTS2KHR == cmd_type || CMD_WAITEVENTS2 == cmd_type) && (CMD_SETEVENT == last_command)) {
John Zulauf4edde622021-02-15 08:54:50 -07006208 reason = SetVsWait2;
6209 } else if ((last_command == CMD_RESETEVENT || last_command == CMD_RESETEVENT2KHR) && !HasBarrier(0U, 0U)) {
6210 reason = (last_command == CMD_RESETEVENT) ? ResetWaitRace : Reset2WaitRace;
John Zulauf4a6105a2020-11-17 15:11:05 -07006211 } else if (unsynchronized_set) {
6212 reason = SetRace;
John Zulaufe0757ba2022-06-10 16:51:45 -06006213 } else if (first_scope) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07006214 const VkPipelineStageFlags2KHR missing_bits = scope.mask_param & ~srcStageMask;
John Zulaufe0757ba2022-06-10 16:51:45 -06006215 // Note it is the "not missing bits" path that is the only "NotIgnored" path
John Zulauf4a6105a2020-11-17 15:11:05 -07006216 if (missing_bits) reason = MissingStageBits;
John Zulaufe0757ba2022-06-10 16:51:45 -06006217 } else {
6218 reason = MissingSetEvent;
John Zulauf4a6105a2020-11-17 15:11:05 -07006219 }
6220
6221 return reason;
6222}
6223
Jeremy Gebben40a22942020-12-22 14:22:06 -07006224bool SyncEventState::HasBarrier(VkPipelineStageFlags2KHR stageMask, VkPipelineStageFlags2KHR exec_scope_arg) const {
John Zulauf4a6105a2020-11-17 15:11:05 -07006225 bool has_barrier = (last_command == CMD_NONE) || (stageMask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) ||
6226 (barriers & exec_scope_arg) || (barriers & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
6227 return has_barrier;
John Zulauf49beb112020-11-04 16:06:31 -07006228}
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006229
John Zulauf8a7b03d2022-09-20 11:41:19 -06006230void SyncEventState::AddReferencedTags(ResourceUsageTagSet &referenced) const {
6231 if (first_scope) {
6232 first_scope->AddReferencedTags(referenced);
6233 }
6234}
6235
sjfricke0bea06e2022-06-05 09:22:26 +09006236SyncOpBarriers::SyncOpBarriers(CMD_TYPE cmd_type, const SyncValidator &sync_state, VkQueueFlags queue_flags,
John Zulauf36ef9282021-02-02 11:47:24 -07006237 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
6238 VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount,
John Zulaufd5115702021-01-18 12:34:33 -07006239 const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount,
6240 const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount,
6241 const VkImageMemoryBarrier *pImageMemoryBarriers)
sjfricke0bea06e2022-06-05 09:22:26 +09006242 : SyncOpBase(cmd_type), barriers_(1) {
John Zulauf4edde622021-02-15 08:54:50 -07006243 auto &barrier_set = barriers_[0];
6244 barrier_set.dependency_flags = dependencyFlags;
6245 barrier_set.src_exec_scope = SyncExecScope::MakeSrc(queue_flags, srcStageMask);
6246 barrier_set.dst_exec_scope = SyncExecScope::MakeDst(queue_flags, dstStageMask);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006247 // Translate the API parameters into structures SyncVal understands directly, and dehandle for safer/faster replay.
John Zulauf4edde622021-02-15 08:54:50 -07006248 barrier_set.MakeMemoryBarriers(barrier_set.src_exec_scope, barrier_set.dst_exec_scope, dependencyFlags, memoryBarrierCount,
6249 pMemoryBarriers);
6250 barrier_set.MakeBufferMemoryBarriers(sync_state, barrier_set.src_exec_scope, barrier_set.dst_exec_scope, dependencyFlags,
6251 bufferMemoryBarrierCount, pBufferMemoryBarriers);
6252 barrier_set.MakeImageMemoryBarriers(sync_state, barrier_set.src_exec_scope, barrier_set.dst_exec_scope, dependencyFlags,
6253 imageMemoryBarrierCount, pImageMemoryBarriers);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006254}
6255
sjfricke0bea06e2022-06-05 09:22:26 +09006256SyncOpBarriers::SyncOpBarriers(CMD_TYPE cmd_type, const SyncValidator &sync_state, VkQueueFlags queue_flags, uint32_t event_count,
John Zulauf4edde622021-02-15 08:54:50 -07006257 const VkDependencyInfoKHR *dep_infos)
sjfricke0bea06e2022-06-05 09:22:26 +09006258 : SyncOpBase(cmd_type), barriers_(event_count) {
John Zulauf4edde622021-02-15 08:54:50 -07006259 for (uint32_t i = 0; i < event_count; i++) {
6260 const auto &dep_info = dep_infos[i];
6261 auto &barrier_set = barriers_[i];
6262 barrier_set.dependency_flags = dep_info.dependencyFlags;
6263 auto stage_masks = sync_utils::GetGlobalStageMasks(dep_info);
6264 barrier_set.src_exec_scope = SyncExecScope::MakeSrc(queue_flags, stage_masks.src);
6265 barrier_set.dst_exec_scope = SyncExecScope::MakeDst(queue_flags, stage_masks.dst);
6266 // Translate the API parameters into structures SyncVal understands directly, and dehandle for safer/faster replay.
6267 barrier_set.MakeMemoryBarriers(queue_flags, dep_info.dependencyFlags, dep_info.memoryBarrierCount,
6268 dep_info.pMemoryBarriers);
6269 barrier_set.MakeBufferMemoryBarriers(sync_state, queue_flags, dep_info.dependencyFlags, dep_info.bufferMemoryBarrierCount,
6270 dep_info.pBufferMemoryBarriers);
6271 barrier_set.MakeImageMemoryBarriers(sync_state, queue_flags, dep_info.dependencyFlags, dep_info.imageMemoryBarrierCount,
6272 dep_info.pImageMemoryBarriers);
6273 }
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006274}
6275
sjfricke0bea06e2022-06-05 09:22:26 +09006276SyncOpPipelineBarrier::SyncOpPipelineBarrier(CMD_TYPE cmd_type, const SyncValidator &sync_state, VkQueueFlags queue_flags,
John Zulaufd5115702021-01-18 12:34:33 -07006277 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
6278 VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount,
6279 const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount,
6280 const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount,
6281 const VkImageMemoryBarrier *pImageMemoryBarriers)
sjfricke0bea06e2022-06-05 09:22:26 +09006282 : SyncOpBarriers(cmd_type, sync_state, queue_flags, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount,
6283 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
6284 pImageMemoryBarriers) {}
John Zulaufd5115702021-01-18 12:34:33 -07006285
sjfricke0bea06e2022-06-05 09:22:26 +09006286SyncOpPipelineBarrier::SyncOpPipelineBarrier(CMD_TYPE cmd_type, const SyncValidator &sync_state, VkQueueFlags queue_flags,
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006287 const VkDependencyInfoKHR &dep_info)
sjfricke0bea06e2022-06-05 09:22:26 +09006288 : SyncOpBarriers(cmd_type, sync_state, queue_flags, 1, &dep_info) {}
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006289
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006290bool SyncOpPipelineBarrier::Validate(const CommandBufferAccessContext &cb_context) const {
6291 bool skip = false;
6292 const auto *context = cb_context.GetCurrentAccessContext();
6293 assert(context);
6294 if (!context) return skip;
John Zulauf6fdf3d02021-03-05 16:50:47 -07006295 assert(barriers_.size() == 1); // PipelineBarriers only support a single barrier set.
6296
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006297 // Validate Image Layout transitions
John Zulauf6fdf3d02021-03-05 16:50:47 -07006298 const auto &barrier_set = barriers_[0];
6299 for (const auto &image_barrier : barrier_set.image_memory_barriers) {
6300 if (image_barrier.new_layout == image_barrier.old_layout) continue; // Only interested in layout transitions at this point.
6301 const auto *image_state = image_barrier.image.get();
6302 if (!image_state) continue;
6303 const auto hazard = context->DetectImageBarrierHazard(image_barrier);
6304 if (hazard.hazard) {
6305 // PHASE1 TODO -- add tag information to log msg when useful.
6306 const auto &sync_state = cb_context.GetSyncState();
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06006307 const auto image_handle = image_state->image();
John Zulauf6fdf3d02021-03-05 16:50:47 -07006308 skip |= sync_state.LogError(image_handle, string_SyncHazardVUID(hazard.hazard),
6309 "%s: Hazard %s for image barrier %" PRIu32 " %s. Access info %s.", CmdName(),
6310 string_SyncHazard(hazard.hazard), image_barrier.index,
6311 sync_state.report_data->FormatHandle(image_handle).c_str(),
John Zulauf397e68b2022-04-19 11:44:07 -06006312 cb_context.FormatHazard(hazard).c_str());
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006313 }
6314 }
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006315 return skip;
6316}
6317
John Zulaufd5115702021-01-18 12:34:33 -07006318struct SyncOpPipelineBarrierFunctorFactory {
6319 using BarrierOpFunctor = PipelineBarrierOp;
6320 using ApplyFunctor = ApplyBarrierFunctor<BarrierOpFunctor>;
6321 using GlobalBarrierOpFunctor = PipelineBarrierOp;
6322 using GlobalApplyFunctor = ApplyBarrierOpsFunctor<GlobalBarrierOpFunctor>;
6323 using BufferRange = ResourceAccessRange;
6324 using ImageRange = subresource_adapter::ImageRangeGenerator;
6325 using GlobalRange = ResourceAccessRange;
6326
John Zulauf00119522022-05-23 19:07:42 -06006327 ApplyFunctor MakeApplyFunctor(QueueId queue_id, const SyncBarrier &barrier, bool layout_transition) const {
6328 return ApplyFunctor(BarrierOpFunctor(queue_id, barrier, layout_transition));
John Zulaufd5115702021-01-18 12:34:33 -07006329 }
John Zulauf14940722021-04-12 15:19:02 -06006330 GlobalApplyFunctor MakeGlobalApplyFunctor(size_t size_hint, ResourceUsageTag tag) const {
John Zulaufd5115702021-01-18 12:34:33 -07006331 return GlobalApplyFunctor(true /* resolve */, size_hint, tag);
6332 }
John Zulauf00119522022-05-23 19:07:42 -06006333 GlobalBarrierOpFunctor MakeGlobalBarrierOpFunctor(QueueId queue_id, const SyncBarrier &barrier) const {
6334 return GlobalBarrierOpFunctor(queue_id, barrier, false);
John Zulaufd5115702021-01-18 12:34:33 -07006335 }
6336
6337 BufferRange MakeRangeGen(const BUFFER_STATE &buffer, const ResourceAccessRange &range) const {
6338 if (!SimpleBinding(buffer)) return ResourceAccessRange();
6339 const auto base_address = ResourceBaseAddress(buffer);
6340 return (range + base_address);
6341 }
John Zulauf110413c2021-03-20 05:38:38 -06006342 ImageRange MakeRangeGen(const IMAGE_STATE &image, const VkImageSubresourceRange &subresource_range) const {
John Zulauf264cce02021-02-05 14:40:47 -07006343 if (!SimpleBinding(image)) return subresource_adapter::ImageRangeGenerator();
John Zulaufd5115702021-01-18 12:34:33 -07006344
6345 const auto base_address = ResourceBaseAddress(image);
Aitor Camachoe67f2c72022-06-08 14:41:58 +02006346 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, base_address, false);
John Zulaufd5115702021-01-18 12:34:33 -07006347 return range_gen;
6348 }
6349 GlobalRange MakeGlobalRangeGen(AccessAddressType) const { return kFullRange; }
6350};
6351
6352template <typename Barriers, typename FunctorFactory>
John Zulauf00119522022-05-23 19:07:42 -06006353void SyncOpBarriers::ApplyBarriers(const Barriers &barriers, const FunctorFactory &factory, const QueueId queue_id,
6354 const ResourceUsageTag tag, AccessContext *context) {
John Zulaufd5115702021-01-18 12:34:33 -07006355 for (const auto &barrier : barriers) {
6356 const auto *state = barrier.GetState();
6357 if (state) {
6358 auto *const accesses = &context->GetAccessStateMap(GetAccessAddressType(*state));
John Zulauf00119522022-05-23 19:07:42 -06006359 auto update_action = factory.MakeApplyFunctor(queue_id, barrier.barrier, barrier.IsLayoutTransition());
John Zulaufd5115702021-01-18 12:34:33 -07006360 auto range_gen = factory.MakeRangeGen(*state, barrier.Range());
6361 UpdateMemoryAccessState(accesses, update_action, &range_gen);
6362 }
6363 }
6364}
6365
6366template <typename Barriers, typename FunctorFactory>
John Zulauf00119522022-05-23 19:07:42 -06006367void SyncOpBarriers::ApplyGlobalBarriers(const Barriers &barriers, const FunctorFactory &factory, const QueueId queue_id,
6368 const ResourceUsageTag tag, AccessContext *access_context) {
John Zulaufd5115702021-01-18 12:34:33 -07006369 auto barriers_functor = factory.MakeGlobalApplyFunctor(barriers.size(), tag);
6370 for (const auto &barrier : barriers) {
John Zulauf00119522022-05-23 19:07:42 -06006371 barriers_functor.EmplaceBack(factory.MakeGlobalBarrierOpFunctor(queue_id, barrier));
John Zulaufd5115702021-01-18 12:34:33 -07006372 }
6373 for (const auto address_type : kAddressTypes) {
6374 auto range_gen = factory.MakeGlobalRangeGen(address_type);
6375 UpdateMemoryAccessState(&(access_context->GetAccessStateMap(address_type)), barriers_functor, &range_gen);
6376 }
6377}
6378
John Zulaufdab327f2022-07-08 12:02:05 -06006379ResourceUsageTag SyncOpPipelineBarrier::Record(CommandBufferAccessContext *cb_context) {
sjfricke0bea06e2022-06-05 09:22:26 +09006380 const auto tag = cb_context->NextCommandTag(cmd_type_);
John Zulauf0223f142022-07-06 09:05:39 -06006381 ReplayRecord(*cb_context, tag);
John Zulauf4fa68462021-04-26 21:04:22 -06006382 return tag;
6383}
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006384
John Zulauf0223f142022-07-06 09:05:39 -06006385void SyncOpPipelineBarrier::ReplayRecord(CommandExecutionContext &exec_context, const ResourceUsageTag tag) const {
John Zulauf8eda1562021-04-13 17:06:41 -06006386 SyncOpPipelineBarrierFunctorFactory factory;
John Zulauf4edde622021-02-15 08:54:50 -07006387 // Pipeline barriers only have a single barrier set, unlike WaitEvents2
6388 assert(barriers_.size() == 1);
6389 const auto &barrier_set = barriers_[0];
John Zulauf0223f142022-07-06 09:05:39 -06006390 if (!exec_context.ValidForSyncOps()) return;
6391
6392 SyncEventsContext *events_context = exec_context.GetCurrentEventsContext();
6393 AccessContext *access_context = exec_context.GetCurrentAccessContext();
6394 const auto queue_id = exec_context.GetQueueId();
John Zulauf00119522022-05-23 19:07:42 -06006395 ApplyBarriers(barrier_set.buffer_memory_barriers, factory, queue_id, tag, access_context);
6396 ApplyBarriers(barrier_set.image_memory_barriers, factory, queue_id, tag, access_context);
6397 ApplyGlobalBarriers(barrier_set.memory_barriers, factory, queue_id, tag, access_context);
John Zulauf4edde622021-02-15 08:54:50 -07006398 if (barrier_set.single_exec_scope) {
John Zulaufe0757ba2022-06-10 16:51:45 -06006399 events_context->ApplyBarrier(barrier_set.src_exec_scope, barrier_set.dst_exec_scope, tag);
John Zulauf4edde622021-02-15 08:54:50 -07006400 } else {
6401 for (const auto &barrier : barrier_set.memory_barriers) {
John Zulaufe0757ba2022-06-10 16:51:45 -06006402 events_context->ApplyBarrier(barrier.src_exec_scope, barrier.dst_exec_scope, tag);
John Zulauf4edde622021-02-15 08:54:50 -07006403 }
6404 }
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006405}
6406
John Zulauf8eda1562021-04-13 17:06:41 -06006407bool SyncOpPipelineBarrier::ReplayValidate(ResourceUsageTag recorded_tag, const CommandBufferAccessContext &recorded_context,
John Zulauf0223f142022-07-06 09:05:39 -06006408 ResourceUsageTag base_tag, CommandExecutionContext &exec_context) const {
John Zulauf4fa68462021-04-26 21:04:22 -06006409 // No Validation for replay, as the layout transition accesses are checked directly, and the src*Mask ordering is captured
6410 // with first access information.
John Zulauf8eda1562021-04-13 17:06:41 -06006411 return false;
6412}
6413
John Zulauf4edde622021-02-15 08:54:50 -07006414void SyncOpBarriers::BarrierSet::MakeMemoryBarriers(const SyncExecScope &src, const SyncExecScope &dst,
6415 VkDependencyFlags dependency_flags, uint32_t memory_barrier_count,
6416 const VkMemoryBarrier *barriers) {
6417 memory_barriers.reserve(std::max<uint32_t>(1, memory_barrier_count));
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006418 for (uint32_t barrier_index = 0; barrier_index < memory_barrier_count; barrier_index++) {
John Zulauf4edde622021-02-15 08:54:50 -07006419 const auto &barrier = barriers[barrier_index];
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006420 SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07006421 memory_barriers.emplace_back(sync_barrier);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006422 }
6423 if (0 == memory_barrier_count) {
6424 // If there are no global memory barriers, force an exec barrier
John Zulauf4edde622021-02-15 08:54:50 -07006425 memory_barriers.emplace_back(SyncBarrier(src, dst));
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006426 }
John Zulauf4edde622021-02-15 08:54:50 -07006427 single_exec_scope = true;
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006428}
6429
John Zulauf4edde622021-02-15 08:54:50 -07006430void SyncOpBarriers::BarrierSet::MakeBufferMemoryBarriers(const SyncValidator &sync_state, const SyncExecScope &src,
6431 const SyncExecScope &dst, VkDependencyFlags dependencyFlags,
6432 uint32_t barrier_count, const VkBufferMemoryBarrier *barriers) {
6433 buffer_memory_barriers.reserve(barrier_count);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006434 for (uint32_t index = 0; index < barrier_count; index++) {
6435 const auto &barrier = barriers[index];
Jeremy Gebben9f537102021-10-05 16:37:12 -06006436 auto buffer = sync_state.Get<BUFFER_STATE>(barrier.buffer);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006437 if (buffer) {
6438 const auto barrier_size = GetBufferWholeSize(*buffer, barrier.offset, barrier.size);
6439 const auto range = MakeRange(barrier.offset, barrier_size);
6440 const SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07006441 buffer_memory_barriers.emplace_back(buffer, sync_barrier, range);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006442 } else {
John Zulauf4edde622021-02-15 08:54:50 -07006443 buffer_memory_barriers.emplace_back();
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006444 }
6445 }
6446}
6447
John Zulauf4edde622021-02-15 08:54:50 -07006448void SyncOpBarriers::BarrierSet::MakeMemoryBarriers(VkQueueFlags queue_flags, VkDependencyFlags dependency_flags,
Tony-LunarG3f6eceb2021-11-18 14:34:49 -07006449 uint32_t memory_barrier_count, const VkMemoryBarrier2 *barriers) {
John Zulauf4edde622021-02-15 08:54:50 -07006450 memory_barriers.reserve(memory_barrier_count);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006451 for (uint32_t barrier_index = 0; barrier_index < memory_barrier_count; barrier_index++) {
John Zulauf4edde622021-02-15 08:54:50 -07006452 const auto &barrier = barriers[barrier_index];
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006453 auto src = SyncExecScope::MakeSrc(queue_flags, barrier.srcStageMask);
6454 auto dst = SyncExecScope::MakeDst(queue_flags, barrier.dstStageMask);
6455 SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07006456 memory_barriers.emplace_back(sync_barrier);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006457 }
John Zulauf4edde622021-02-15 08:54:50 -07006458 single_exec_scope = false;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006459}
6460
John Zulauf4edde622021-02-15 08:54:50 -07006461void SyncOpBarriers::BarrierSet::MakeBufferMemoryBarriers(const SyncValidator &sync_state, VkQueueFlags queue_flags,
6462 VkDependencyFlags dependencyFlags, uint32_t barrier_count,
Tony-LunarG3f6eceb2021-11-18 14:34:49 -07006463 const VkBufferMemoryBarrier2 *barriers) {
John Zulauf4edde622021-02-15 08:54:50 -07006464 buffer_memory_barriers.reserve(barrier_count);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006465 for (uint32_t index = 0; index < barrier_count; index++) {
6466 const auto &barrier = barriers[index];
6467 auto src = SyncExecScope::MakeSrc(queue_flags, barrier.srcStageMask);
6468 auto dst = SyncExecScope::MakeDst(queue_flags, barrier.dstStageMask);
Jeremy Gebben9f537102021-10-05 16:37:12 -06006469 auto buffer = sync_state.Get<BUFFER_STATE>(barrier.buffer);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006470 if (buffer) {
6471 const auto barrier_size = GetBufferWholeSize(*buffer, barrier.offset, barrier.size);
6472 const auto range = MakeRange(barrier.offset, barrier_size);
6473 const SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07006474 buffer_memory_barriers.emplace_back(buffer, sync_barrier, range);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006475 } else {
John Zulauf4edde622021-02-15 08:54:50 -07006476 buffer_memory_barriers.emplace_back();
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006477 }
6478 }
6479}
6480
John Zulauf4edde622021-02-15 08:54:50 -07006481void SyncOpBarriers::BarrierSet::MakeImageMemoryBarriers(const SyncValidator &sync_state, const SyncExecScope &src,
6482 const SyncExecScope &dst, VkDependencyFlags dependencyFlags,
6483 uint32_t barrier_count, const VkImageMemoryBarrier *barriers) {
6484 image_memory_barriers.reserve(barrier_count);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006485 for (uint32_t index = 0; index < barrier_count; index++) {
6486 const auto &barrier = barriers[index];
Jeremy Gebbenf4449392022-01-28 10:09:10 -07006487 auto image = sync_state.Get<IMAGE_STATE>(barrier.image);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006488 if (image) {
6489 auto subresource_range = NormalizeSubresourceRange(image->createInfo, barrier.subresourceRange);
6490 const SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07006491 image_memory_barriers.emplace_back(image, index, sync_barrier, barrier.oldLayout, barrier.newLayout, subresource_range);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006492 } else {
John Zulauf4edde622021-02-15 08:54:50 -07006493 image_memory_barriers.emplace_back();
6494 image_memory_barriers.back().index = index; // Just in case we're interested in the ones we skipped.
John Zulaufe7f6a5e2021-01-16 14:31:18 -07006495 }
6496 }
6497}
John Zulaufd5115702021-01-18 12:34:33 -07006498
John Zulauf4edde622021-02-15 08:54:50 -07006499void SyncOpBarriers::BarrierSet::MakeImageMemoryBarriers(const SyncValidator &sync_state, VkQueueFlags queue_flags,
6500 VkDependencyFlags dependencyFlags, uint32_t barrier_count,
Tony-LunarG3f6eceb2021-11-18 14:34:49 -07006501 const VkImageMemoryBarrier2 *barriers) {
John Zulauf4edde622021-02-15 08:54:50 -07006502 image_memory_barriers.reserve(barrier_count);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006503 for (uint32_t index = 0; index < barrier_count; index++) {
6504 const auto &barrier = barriers[index];
6505 auto src = SyncExecScope::MakeSrc(queue_flags, barrier.srcStageMask);
6506 auto dst = SyncExecScope::MakeDst(queue_flags, barrier.dstStageMask);
Jeremy Gebbenf4449392022-01-28 10:09:10 -07006507 auto image = sync_state.Get<IMAGE_STATE>(barrier.image);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006508 if (image) {
6509 auto subresource_range = NormalizeSubresourceRange(image->createInfo, barrier.subresourceRange);
6510 const SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07006511 image_memory_barriers.emplace_back(image, index, sync_barrier, barrier.oldLayout, barrier.newLayout, subresource_range);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006512 } else {
John Zulauf4edde622021-02-15 08:54:50 -07006513 image_memory_barriers.emplace_back();
6514 image_memory_barriers.back().index = index; // Just in case we're interested in the ones we skipped.
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006515 }
6516 }
6517}
6518
sjfricke0bea06e2022-06-05 09:22:26 +09006519SyncOpWaitEvents::SyncOpWaitEvents(CMD_TYPE cmd_type, const SyncValidator &sync_state, VkQueueFlags queue_flags,
6520 uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags srcStageMask,
6521 VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount,
6522 const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount,
6523 const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount,
6524 const VkImageMemoryBarrier *pImageMemoryBarriers)
6525 : SyncOpBarriers(cmd_type, sync_state, queue_flags, srcStageMask, dstStageMask, VkDependencyFlags(0U), memoryBarrierCount,
John Zulaufd5115702021-01-18 12:34:33 -07006526 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
6527 pImageMemoryBarriers) {
John Zulauf669dfd52021-01-27 17:15:28 -07006528 MakeEventsList(sync_state, eventCount, pEvents);
John Zulaufd5115702021-01-18 12:34:33 -07006529}
6530
sjfricke0bea06e2022-06-05 09:22:26 +09006531SyncOpWaitEvents::SyncOpWaitEvents(CMD_TYPE cmd_type, const SyncValidator &sync_state, VkQueueFlags queue_flags,
6532 uint32_t eventCount, const VkEvent *pEvents, const VkDependencyInfoKHR *pDependencyInfo)
6533 : SyncOpBarriers(cmd_type, sync_state, queue_flags, eventCount, pDependencyInfo) {
John Zulauf4edde622021-02-15 08:54:50 -07006534 MakeEventsList(sync_state, eventCount, pEvents);
6535 assert(events_.size() == barriers_.size()); // Just so nobody gets clever and decides to cull the event or barrier arrays
6536}
6537
John Zulauf610e28c2021-08-03 17:46:23 -06006538const char *const SyncOpWaitEvents::kIgnored = "Wait operation is ignored for this event.";
6539
John Zulaufd5115702021-01-18 12:34:33 -07006540bool SyncOpWaitEvents::Validate(const CommandBufferAccessContext &cb_context) const {
John Zulaufd5115702021-01-18 12:34:33 -07006541 bool skip = false;
6542 const auto &sync_state = cb_context.GetSyncState();
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06006543 const auto command_buffer_handle = cb_context.GetCBState().commandBuffer();
John Zulaufd5115702021-01-18 12:34:33 -07006544
John Zulauf610e28c2021-08-03 17:46:23 -06006545 // This is only interesting at record and not replay (Execute/Submit) time.
John Zulauf4edde622021-02-15 08:54:50 -07006546 for (size_t barrier_set_index = 0; barrier_set_index < barriers_.size(); barrier_set_index++) {
6547 const auto &barrier_set = barriers_[barrier_set_index];
6548 if (barrier_set.single_exec_scope) {
6549 if (barrier_set.src_exec_scope.mask_param & VK_PIPELINE_STAGE_HOST_BIT) {
6550 const std::string vuid = std::string("SYNC-") + std::string(CmdName()) + std::string("-hostevent-unsupported");
6551 skip = sync_state.LogInfo(command_buffer_handle, vuid,
6552 "%s, srcStageMask includes %s, unsupported by synchronization validation.", CmdName(),
6553 string_VkPipelineStageFlagBits(VK_PIPELINE_STAGE_HOST_BIT));
6554 } else {
6555 const auto &barriers = barrier_set.memory_barriers;
6556 for (size_t barrier_index = 0; barrier_index < barriers.size(); barrier_index++) {
6557 const auto &barrier = barriers[barrier_index];
6558 if (barrier.src_exec_scope.mask_param & VK_PIPELINE_STAGE_HOST_BIT) {
6559 const std::string vuid =
6560 std::string("SYNC-") + std::string(CmdName()) + std::string("-hostevent-unsupported");
6561 skip =
6562 sync_state.LogInfo(command_buffer_handle, vuid,
6563 "%s, srcStageMask %s of %s %zu, %s %zu, unsupported by synchronization validation.",
6564 CmdName(), string_VkPipelineStageFlagBits(VK_PIPELINE_STAGE_HOST_BIT),
6565 "pDependencyInfo", barrier_set_index, "pMemoryBarriers", barrier_index);
6566 }
6567 }
6568 }
6569 }
John Zulaufd5115702021-01-18 12:34:33 -07006570 }
6571
John Zulauf610e28c2021-08-03 17:46:23 -06006572 // The rest is common to record time and replay time.
6573 skip |= DoValidate(cb_context, ResourceUsageRecord::kMaxIndex);
6574 return skip;
6575}
6576
John Zulaufbb890452021-12-14 11:30:18 -07006577bool SyncOpWaitEvents::DoValidate(const CommandExecutionContext &exec_context, const ResourceUsageTag base_tag) const {
John Zulauf610e28c2021-08-03 17:46:23 -06006578 bool skip = false;
John Zulaufbb890452021-12-14 11:30:18 -07006579 const auto &sync_state = exec_context.GetSyncState();
John Zulaufe0757ba2022-06-10 16:51:45 -06006580 const QueueId queue_id = exec_context.GetQueueId();
John Zulauf610e28c2021-08-03 17:46:23 -06006581
Jeremy Gebben40a22942020-12-22 14:22:06 -07006582 VkPipelineStageFlags2KHR event_stage_masks = 0U;
John Zulauf4edde622021-02-15 08:54:50 -07006583 VkPipelineStageFlags2KHR barrier_mask_params = 0U;
John Zulaufd5115702021-01-18 12:34:33 -07006584 bool events_not_found = false;
John Zulaufbb890452021-12-14 11:30:18 -07006585 const auto *events_context = exec_context.GetCurrentEventsContext();
John Zulauf669dfd52021-01-27 17:15:28 -07006586 assert(events_context);
John Zulauf4edde622021-02-15 08:54:50 -07006587 size_t barrier_set_index = 0;
6588 size_t barrier_set_incr = (barriers_.size() == 1) ? 0 : 1;
John Zulauf78394fc2021-07-12 15:41:40 -06006589 for (const auto &event : events_) {
6590 const auto *sync_event = events_context->Get(event.get());
6591 const auto &barrier_set = barriers_[barrier_set_index];
6592 if (!sync_event) {
6593 // NOTE PHASE2: This is where we'll need queue submit time validation to come back and check the srcStageMask bits
6594 // or solve this with replay creating the SyncEventState in the queue context... also this will be a
6595 // new validation error... wait without previously submitted set event...
6596 events_not_found = true; // Demote "extra_stage_bits" error to warning, to avoid false positives at *record time*
John Zulauf4edde622021-02-15 08:54:50 -07006597 barrier_set_index += barrier_set_incr;
John Zulauf78394fc2021-07-12 15:41:40 -06006598 continue; // Core, Lifetimes, or Param check needs to catch invalid events.
John Zulaufd5115702021-01-18 12:34:33 -07006599 }
John Zulauf610e28c2021-08-03 17:46:23 -06006600
6601 // For replay calls, don't revalidate "same command buffer" events
6602 if (sync_event->last_command_tag > base_tag) continue;
6603
John Zulauf78394fc2021-07-12 15:41:40 -06006604 const auto event_handle = sync_event->event->event();
6605 // TODO add "destroyed" checks
6606
John Zulaufe0757ba2022-06-10 16:51:45 -06006607 if (sync_event->first_scope) {
John Zulauf78b1f892021-09-20 15:02:09 -06006608 // Only accumulate barrier and event stages if there is a pending set in the current context
6609 barrier_mask_params |= barrier_set.src_exec_scope.mask_param;
6610 event_stage_masks |= sync_event->scope.mask_param;
6611 }
6612
John Zulauf78394fc2021-07-12 15:41:40 -06006613 const auto &src_exec_scope = barrier_set.src_exec_scope;
John Zulauf78b1f892021-09-20 15:02:09 -06006614
sjfricke0bea06e2022-06-05 09:22:26 +09006615 const auto ignore_reason = sync_event->IsIgnoredByWait(cmd_type_, src_exec_scope.mask_param);
John Zulauf78394fc2021-07-12 15:41:40 -06006616 if (ignore_reason) {
6617 switch (ignore_reason) {
6618 case SyncEventState::ResetWaitRace:
6619 case SyncEventState::Reset2WaitRace: {
6620 // Four permuations of Reset and Wait calls...
6621 const char *vuid =
sjfricke0bea06e2022-06-05 09:22:26 +09006622 (cmd_type_ == CMD_WAITEVENTS) ? "VUID-vkCmdResetEvent-event-03834" : "VUID-vkCmdResetEvent-event-03835";
John Zulauf78394fc2021-07-12 15:41:40 -06006623 if (ignore_reason == SyncEventState::Reset2WaitRace) {
sjfricke0bea06e2022-06-05 09:22:26 +09006624 vuid = (cmd_type_ == CMD_WAITEVENTS) ? "VUID-vkCmdResetEvent2-event-03831"
6625 : "VUID-vkCmdResetEvent2-event-03832";
John Zulauf78394fc2021-07-12 15:41:40 -06006626 }
6627 const char *const message =
6628 "%s: %s %s operation following %s without intervening execution barrier, may cause race condition. %s";
6629 skip |= sync_state.LogError(event_handle, vuid, message, CmdName(),
6630 sync_state.report_data->FormatHandle(event_handle).c_str(), CmdName(),
John Zulauf610e28c2021-08-03 17:46:23 -06006631 CommandTypeString(sync_event->last_command), kIgnored);
John Zulauf78394fc2021-07-12 15:41:40 -06006632 break;
6633 }
6634 case SyncEventState::SetRace: {
6635 // Issue error message that Wait is waiting on an signal subject to race condition, and is thus ignored for
6636 // this event
6637 const char *const vuid = "SYNC-vkCmdWaitEvents-unsynchronized-setops";
6638 const char *const message =
6639 "%s: %s Unsychronized %s calls result in race conditions w.r.t. event signalling, %s %s";
6640 const char *const reason = "First synchronization scope is undefined.";
6641 skip |= sync_state.LogError(event_handle, vuid, message, CmdName(),
6642 sync_state.report_data->FormatHandle(event_handle).c_str(),
John Zulauf610e28c2021-08-03 17:46:23 -06006643 CommandTypeString(sync_event->last_command), reason, kIgnored);
John Zulauf78394fc2021-07-12 15:41:40 -06006644 break;
6645 }
6646 case SyncEventState::MissingStageBits: {
6647 const auto missing_bits = sync_event->scope.mask_param & ~src_exec_scope.mask_param;
6648 // Issue error message that event waited for is not in wait events scope
6649 const char *const vuid = "VUID-vkCmdWaitEvents-srcStageMask-01158";
6650 const char *const message = "%s: %s stageMask %" PRIx64 " includes bits not present in srcStageMask 0x%" PRIx64
6651 ". Bits missing from srcStageMask %s. %s";
6652 skip |= sync_state.LogError(event_handle, vuid, message, CmdName(),
6653 sync_state.report_data->FormatHandle(event_handle).c_str(),
6654 sync_event->scope.mask_param, src_exec_scope.mask_param,
John Zulauf610e28c2021-08-03 17:46:23 -06006655 sync_utils::StringPipelineStageFlags(missing_bits).c_str(), kIgnored);
John Zulauf78394fc2021-07-12 15:41:40 -06006656 break;
6657 }
6658 case SyncEventState::SetVsWait2: {
Tony-LunarG279601c2021-11-16 10:50:51 -07006659 skip |= sync_state.LogError(event_handle, "VUID-vkCmdWaitEvents2-pEvents-03837",
John Zulauf78394fc2021-07-12 15:41:40 -06006660 "%s: Follows set of %s by %s. Disallowed.", CmdName(),
6661 sync_state.report_data->FormatHandle(event_handle).c_str(),
6662 CommandTypeString(sync_event->last_command));
6663 break;
6664 }
John Zulaufe0757ba2022-06-10 16:51:45 -06006665 case SyncEventState::MissingSetEvent: {
6666 // TODO: There are conditions at queue submit time where we can definitively say that
6667 // a missing set event is an error. Add those if not captured in CoreChecks
6668 break;
6669 }
John Zulauf78394fc2021-07-12 15:41:40 -06006670 default:
6671 assert(ignore_reason == SyncEventState::NotIgnored);
6672 }
6673 } else if (barrier_set.image_memory_barriers.size()) {
6674 const auto &image_memory_barriers = barrier_set.image_memory_barriers;
John Zulaufbb890452021-12-14 11:30:18 -07006675 const auto *context = exec_context.GetCurrentAccessContext();
John Zulauf78394fc2021-07-12 15:41:40 -06006676 assert(context);
6677 for (const auto &image_memory_barrier : image_memory_barriers) {
6678 if (image_memory_barrier.old_layout == image_memory_barrier.new_layout) continue;
6679 const auto *image_state = image_memory_barrier.image.get();
6680 if (!image_state) continue;
6681 const auto &subresource_range = image_memory_barrier.range;
6682 const auto &src_access_scope = image_memory_barrier.barrier.src_access_scope;
John Zulaufe0757ba2022-06-10 16:51:45 -06006683 const auto hazard = context->DetectImageBarrierHazard(*image_state, subresource_range, sync_event->scope.exec_scope,
6684 src_access_scope, queue_id, *sync_event,
6685 AccessContext::DetectOptions::kDetectAll);
John Zulauf78394fc2021-07-12 15:41:40 -06006686 if (hazard.hazard) {
6687 skip |= sync_state.LogError(image_state->image(), string_SyncHazardVUID(hazard.hazard),
6688 "%s: Hazard %s for image barrier %" PRIu32 " %s. Access info %s.", CmdName(),
6689 string_SyncHazard(hazard.hazard), image_memory_barrier.index,
6690 sync_state.report_data->FormatHandle(image_state->image()).c_str(),
John Zulauf397e68b2022-04-19 11:44:07 -06006691 exec_context.FormatHazard(hazard).c_str());
John Zulauf78394fc2021-07-12 15:41:40 -06006692 break;
6693 }
6694 }
6695 }
6696 // TODO: Add infrastructure for checking pDependencyInfo's vs. CmdSetEvent2 VUID - vkCmdWaitEvents2KHR - pEvents -
6697 // 03839
6698 barrier_set_index += barrier_set_incr;
6699 }
John Zulaufd5115702021-01-18 12:34:33 -07006700
6701 // Note that we can't check for HOST in pEvents as we don't track that set event type
John Zulauf4edde622021-02-15 08:54:50 -07006702 const auto extra_stage_bits = (barrier_mask_params & ~VK_PIPELINE_STAGE_2_HOST_BIT_KHR) & ~event_stage_masks;
John Zulaufd5115702021-01-18 12:34:33 -07006703 if (extra_stage_bits) {
6704 // Issue error message that event waited for is not in wait events scope
John Zulauf4edde622021-02-15 08:54:50 -07006705 // NOTE: This isn't exactly the right VUID for WaitEvents2, but it's as close as we currently have support for
6706 const char *const vuid =
sjfricke0bea06e2022-06-05 09:22:26 +09006707 (CMD_WAITEVENTS == cmd_type_) ? "VUID-vkCmdWaitEvents-srcStageMask-01158" : "VUID-vkCmdWaitEvents2-pEvents-03838";
John Zulaufd5115702021-01-18 12:34:33 -07006708 const char *const message =
Jeremy Gebben40a22942020-12-22 14:22:06 -07006709 "%s: srcStageMask 0x%" PRIx64 " contains stages not present in pEvents stageMask. Extra stages are %s.%s";
John Zulaufbb890452021-12-14 11:30:18 -07006710 const auto handle = exec_context.Handle();
John Zulaufd5115702021-01-18 12:34:33 -07006711 if (events_not_found) {
John Zulaufbb890452021-12-14 11:30:18 -07006712 skip |= sync_state.LogInfo(handle, vuid, message, CmdName(), barrier_mask_params,
Jeremy Gebben40a22942020-12-22 14:22:06 -07006713 sync_utils::StringPipelineStageFlags(extra_stage_bits).c_str(),
John Zulaufd5115702021-01-18 12:34:33 -07006714 " vkCmdSetEvent may be in previously submitted command buffer.");
6715 } else {
John Zulaufbb890452021-12-14 11:30:18 -07006716 skip |= sync_state.LogError(handle, vuid, message, CmdName(), barrier_mask_params,
Jeremy Gebben40a22942020-12-22 14:22:06 -07006717 sync_utils::StringPipelineStageFlags(extra_stage_bits).c_str(), "");
John Zulaufd5115702021-01-18 12:34:33 -07006718 }
6719 }
6720 return skip;
6721}
6722
6723struct SyncOpWaitEventsFunctorFactory {
6724 using BarrierOpFunctor = WaitEventBarrierOp;
6725 using ApplyFunctor = ApplyBarrierFunctor<BarrierOpFunctor>;
6726 using GlobalBarrierOpFunctor = WaitEventBarrierOp;
6727 using GlobalApplyFunctor = ApplyBarrierOpsFunctor<GlobalBarrierOpFunctor>;
6728 using BufferRange = EventSimpleRangeGenerator;
6729 using ImageRange = EventImageRangeGenerator;
6730 using GlobalRange = EventSimpleRangeGenerator;
6731
6732 // Need to restrict to only valid exec and access scope for this event
6733 // Pass by value is intentional to get a copy we can change without modifying the passed barrier
6734 SyncBarrier RestrictToEvent(SyncBarrier barrier) const {
John Zulaufc523bf62021-02-16 08:20:34 -07006735 barrier.src_exec_scope.exec_scope = sync_event->scope.exec_scope & barrier.src_exec_scope.exec_scope;
John Zulaufd5115702021-01-18 12:34:33 -07006736 barrier.src_access_scope = sync_event->scope.valid_accesses & barrier.src_access_scope;
6737 return barrier;
6738 }
John Zulauf00119522022-05-23 19:07:42 -06006739 ApplyFunctor MakeApplyFunctor(QueueId queue_id, const SyncBarrier &barrier_arg, bool layout_transition) const {
John Zulaufd5115702021-01-18 12:34:33 -07006740 auto barrier = RestrictToEvent(barrier_arg);
John Zulauf00119522022-05-23 19:07:42 -06006741 return ApplyFunctor(BarrierOpFunctor(queue_id, sync_event->first_scope_tag, barrier, layout_transition));
John Zulaufd5115702021-01-18 12:34:33 -07006742 }
John Zulauf14940722021-04-12 15:19:02 -06006743 GlobalApplyFunctor MakeGlobalApplyFunctor(size_t size_hint, ResourceUsageTag tag) const {
John Zulaufd5115702021-01-18 12:34:33 -07006744 return GlobalApplyFunctor(false /* don't resolve */, size_hint, tag);
6745 }
John Zulauf00119522022-05-23 19:07:42 -06006746 GlobalBarrierOpFunctor MakeGlobalBarrierOpFunctor(const QueueId queue_id, const SyncBarrier &barrier_arg) const {
John Zulaufd5115702021-01-18 12:34:33 -07006747 auto barrier = RestrictToEvent(barrier_arg);
John Zulauf00119522022-05-23 19:07:42 -06006748 return GlobalBarrierOpFunctor(queue_id, sync_event->first_scope_tag, barrier, false);
John Zulaufd5115702021-01-18 12:34:33 -07006749 }
6750
6751 BufferRange MakeRangeGen(const BUFFER_STATE &buffer, const ResourceAccessRange &range_arg) const {
6752 const AccessAddressType address_type = GetAccessAddressType(buffer);
6753 const auto base_address = ResourceBaseAddress(buffer);
6754 ResourceAccessRange range = SimpleBinding(buffer) ? (range_arg + base_address) : ResourceAccessRange();
6755 EventSimpleRangeGenerator filtered_range_gen(sync_event->FirstScope(address_type), range);
6756 return filtered_range_gen;
6757 }
John Zulauf110413c2021-03-20 05:38:38 -06006758 ImageRange MakeRangeGen(const IMAGE_STATE &image, const VkImageSubresourceRange &subresource_range) const {
John Zulaufd5115702021-01-18 12:34:33 -07006759 if (!SimpleBinding(image)) return ImageRange();
6760 const auto address_type = GetAccessAddressType(image);
6761 const auto base_address = ResourceBaseAddress(image);
Aitor Camachoe67f2c72022-06-08 14:41:58 +02006762 subresource_adapter::ImageRangeGenerator image_range_gen(*image.fragment_encoder.get(), subresource_range, base_address,
6763 false);
John Zulaufd5115702021-01-18 12:34:33 -07006764 EventImageRangeGenerator filtered_range_gen(sync_event->FirstScope(address_type), image_range_gen);
6765
6766 return filtered_range_gen;
6767 }
6768 GlobalRange MakeGlobalRangeGen(AccessAddressType address_type) const {
6769 return EventSimpleRangeGenerator(sync_event->FirstScope(address_type), kFullRange);
6770 }
6771 SyncOpWaitEventsFunctorFactory(SyncEventState *sync_event_) : sync_event(sync_event_) { assert(sync_event); }
6772 SyncEventState *sync_event;
6773};
6774
John Zulaufdab327f2022-07-08 12:02:05 -06006775ResourceUsageTag SyncOpWaitEvents::Record(CommandBufferAccessContext *cb_context) {
sjfricke0bea06e2022-06-05 09:22:26 +09006776 const auto tag = cb_context->NextCommandTag(cmd_type_);
John Zulaufd5115702021-01-18 12:34:33 -07006777
John Zulauf0223f142022-07-06 09:05:39 -06006778 ReplayRecord(*cb_context, tag);
John Zulauf610e28c2021-08-03 17:46:23 -06006779 return tag;
6780}
6781
John Zulauf0223f142022-07-06 09:05:39 -06006782void SyncOpWaitEvents::ReplayRecord(CommandExecutionContext &exec_context, ResourceUsageTag tag) const {
John Zulaufd5115702021-01-18 12:34:33 -07006783 // Unlike PipelineBarrier, WaitEvent is *not* limited to accesses within the current subpass (if any) and thus needs to import
6784 // all accesses. Can instead import for all first_scopes, or a union of them, if this becomes a performance/memory issue,
6785 // but with no idea of the performance of the union, nor of whether it even matters... take the simplest approach here,
John Zulauf0223f142022-07-06 09:05:39 -06006786 if (!exec_context.ValidForSyncOps()) return;
6787 AccessContext *access_context = exec_context.GetCurrentAccessContext();
6788 SyncEventsContext *events_context = exec_context.GetCurrentEventsContext();
6789 const QueueId queue_id = exec_context.GetQueueId();
6790
John Zulaufd5115702021-01-18 12:34:33 -07006791 access_context->ResolvePreviousAccesses();
6792
John Zulauf4edde622021-02-15 08:54:50 -07006793 size_t barrier_set_index = 0;
6794 size_t barrier_set_incr = (barriers_.size() == 1) ? 0 : 1;
6795 assert(barriers_.size() == 1 || (barriers_.size() == events_.size()));
John Zulauf669dfd52021-01-27 17:15:28 -07006796 for (auto &event_shared : events_) {
6797 if (!event_shared.get()) continue;
6798 auto *sync_event = events_context->GetFromShared(event_shared);
John Zulaufd5115702021-01-18 12:34:33 -07006799
sjfricke0bea06e2022-06-05 09:22:26 +09006800 sync_event->last_command = cmd_type_;
John Zulauf610e28c2021-08-03 17:46:23 -06006801 sync_event->last_command_tag = tag;
John Zulaufd5115702021-01-18 12:34:33 -07006802
John Zulauf4edde622021-02-15 08:54:50 -07006803 const auto &barrier_set = barriers_[barrier_set_index];
6804 const auto &dst = barrier_set.dst_exec_scope;
sjfricke0bea06e2022-06-05 09:22:26 +09006805 if (!sync_event->IsIgnoredByWait(cmd_type_, barrier_set.src_exec_scope.mask_param)) {
John Zulaufd5115702021-01-18 12:34:33 -07006806 // These apply barriers one at a time as the are restricted to the resource ranges specified per each barrier,
6807 // but do not update the dependency chain information (but set the "pending" state) // s.t. the order independence
6808 // of the barriers is maintained.
6809 SyncOpWaitEventsFunctorFactory factory(sync_event);
John Zulauf00119522022-05-23 19:07:42 -06006810 ApplyBarriers(barrier_set.buffer_memory_barriers, factory, queue_id, tag, access_context);
6811 ApplyBarriers(barrier_set.image_memory_barriers, factory, queue_id, tag, access_context);
6812 ApplyGlobalBarriers(barrier_set.memory_barriers, factory, queue_id, tag, access_context);
John Zulaufd5115702021-01-18 12:34:33 -07006813
6814 // Apply the global barrier to the event itself (for race condition tracking)
6815 // Events don't happen at a stage, so we need to store the unexpanded ALL_COMMANDS if set for inter-event-calls
6816 sync_event->barriers = dst.mask_param & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
6817 sync_event->barriers |= dst.exec_scope;
6818 } else {
6819 // We ignored this wait, so we don't have any effective synchronization barriers for it.
6820 sync_event->barriers = 0U;
6821 }
John Zulauf4edde622021-02-15 08:54:50 -07006822 barrier_set_index += barrier_set_incr;
John Zulaufd5115702021-01-18 12:34:33 -07006823 }
6824
6825 // Apply the pending barriers
6826 ResolvePendingBarrierFunctor apply_pending_action(tag);
6827 access_context->ApplyToContext(apply_pending_action);
6828}
6829
John Zulauf8eda1562021-04-13 17:06:41 -06006830bool SyncOpWaitEvents::ReplayValidate(ResourceUsageTag recorded_tag, const CommandBufferAccessContext &recorded_context,
John Zulauf0223f142022-07-06 09:05:39 -06006831 ResourceUsageTag base_tag, CommandExecutionContext &exec_context) const {
6832 return DoValidate(exec_context, base_tag);
John Zulauf8eda1562021-04-13 17:06:41 -06006833}
6834
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006835bool SyncValidator::PreCallValidateCmdWriteBufferMarker2AMD(VkCommandBuffer commandBuffer, VkPipelineStageFlags2KHR pipelineStage,
6836 VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) const {
6837 bool skip = false;
6838 const auto *cb_access_context = GetAccessContext(commandBuffer);
6839 assert(cb_access_context);
6840 if (!cb_access_context) return skip;
6841
6842 const auto *context = cb_access_context->GetCurrentAccessContext();
6843 assert(context);
6844 if (!context) return skip;
6845
Jeremy Gebbenf4449392022-01-28 10:09:10 -07006846 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006847
6848 if (dst_buffer) {
6849 const ResourceAccessRange range = MakeRange(dstOffset, 4);
6850 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
6851 if (hazard.hazard) {
6852 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
6853 "vkCmdWriteBufferMarkerAMD2: Hazard %s for dstBuffer %s. Access info %s.",
6854 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstBuffer).c_str(),
John Zulauf397e68b2022-04-19 11:44:07 -06006855 cb_access_context->FormatHazard(hazard).c_str());
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006856 }
6857 }
6858 return skip;
6859}
6860
John Zulauf669dfd52021-01-27 17:15:28 -07006861void SyncOpWaitEvents::MakeEventsList(const SyncValidator &sync_state, uint32_t event_count, const VkEvent *events) {
John Zulaufd5115702021-01-18 12:34:33 -07006862 events_.reserve(event_count);
6863 for (uint32_t event_index = 0; event_index < event_count; event_index++) {
Jeremy Gebben9f537102021-10-05 16:37:12 -06006864 events_.emplace_back(sync_state.Get<EVENT_STATE>(events[event_index]));
John Zulaufd5115702021-01-18 12:34:33 -07006865 }
6866}
John Zulauf6ce24372021-01-30 05:56:25 -07006867
sjfricke0bea06e2022-06-05 09:22:26 +09006868SyncOpResetEvent::SyncOpResetEvent(CMD_TYPE cmd_type, const SyncValidator &sync_state, VkQueueFlags queue_flags, VkEvent event,
John Zulauf4edde622021-02-15 08:54:50 -07006869 VkPipelineStageFlags2KHR stageMask)
sjfricke0bea06e2022-06-05 09:22:26 +09006870 : SyncOpBase(cmd_type),
6871 event_(sync_state.Get<EVENT_STATE>(event)),
6872 exec_scope_(SyncExecScope::MakeSrc(queue_flags, stageMask)) {}
John Zulauf6ce24372021-01-30 05:56:25 -07006873
John Zulauf1bf30522021-09-03 15:39:06 -06006874bool SyncOpResetEvent::Validate(const CommandBufferAccessContext& cb_context) const {
6875 return DoValidate(cb_context, ResourceUsageRecord::kMaxIndex);
6876}
6877
John Zulaufbb890452021-12-14 11:30:18 -07006878bool SyncOpResetEvent::DoValidate(const CommandExecutionContext &exec_context, const ResourceUsageTag base_tag) const {
6879 auto *events_context = exec_context.GetCurrentEventsContext();
John Zulauf6ce24372021-01-30 05:56:25 -07006880 assert(events_context);
6881 bool skip = false;
6882 if (!events_context) return skip;
6883
John Zulaufbb890452021-12-14 11:30:18 -07006884 const auto &sync_state = exec_context.GetSyncState();
John Zulauf6ce24372021-01-30 05:56:25 -07006885 const auto *sync_event = events_context->Get(event_);
6886 if (!sync_event) return skip; // Core, Lifetimes, or Param check needs to catch invalid events.
6887
John Zulauf1bf30522021-09-03 15:39:06 -06006888 if (sync_event->last_command_tag > base_tag) return skip; // if we validated this in recording of the secondary, don't repeat
6889
John Zulauf6ce24372021-01-30 05:56:25 -07006890 const char *const set_wait =
6891 "%s: %s %s operation following %s without intervening execution barrier, is a race condition and may result in data "
6892 "hazards.";
6893 const char *message = set_wait; // Only one message this call.
6894 if (!sync_event->HasBarrier(exec_scope_.mask_param, exec_scope_.exec_scope)) {
6895 const char *vuid = nullptr;
6896 switch (sync_event->last_command) {
6897 case CMD_SETEVENT:
John Zulauf4edde622021-02-15 08:54:50 -07006898 case CMD_SETEVENT2KHR:
Tony-LunarG8d71c4f2022-01-27 15:25:53 -07006899 case CMD_SETEVENT2:
John Zulauf6ce24372021-01-30 05:56:25 -07006900 // Needs a barrier between set and reset
6901 vuid = "SYNC-vkCmdResetEvent-missingbarrier-set";
6902 break;
John Zulauf4edde622021-02-15 08:54:50 -07006903 case CMD_WAITEVENTS:
Tony-LunarG1364cf52021-11-17 16:10:11 -07006904 case CMD_WAITEVENTS2:
John Zulauf4edde622021-02-15 08:54:50 -07006905 case CMD_WAITEVENTS2KHR: {
John Zulauf6ce24372021-01-30 05:56:25 -07006906 // Needs to be in the barriers chain (either because of a barrier, or because of dstStageMask
6907 vuid = "SYNC-vkCmdResetEvent-missingbarrier-wait";
6908 break;
6909 }
6910 default:
6911 // The only other valid last command that wasn't one.
John Zulauf4edde622021-02-15 08:54:50 -07006912 assert((sync_event->last_command == CMD_NONE) || (sync_event->last_command == CMD_RESETEVENT) ||
6913 (sync_event->last_command == CMD_RESETEVENT2KHR));
John Zulauf6ce24372021-01-30 05:56:25 -07006914 break;
6915 }
6916 if (vuid) {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06006917 skip |= sync_state.LogError(event_->event(), vuid, message, CmdName(),
6918 sync_state.report_data->FormatHandle(event_->event()).c_str(), CmdName(),
John Zulauf6ce24372021-01-30 05:56:25 -07006919 CommandTypeString(sync_event->last_command));
6920 }
6921 }
6922 return skip;
6923}
6924
John Zulaufdab327f2022-07-08 12:02:05 -06006925ResourceUsageTag SyncOpResetEvent::Record(CommandBufferAccessContext *cb_context) {
sjfricke0bea06e2022-06-05 09:22:26 +09006926 const auto tag = cb_context->NextCommandTag(cmd_type_);
John Zulauf0223f142022-07-06 09:05:39 -06006927 ReplayRecord(*cb_context, tag);
John Zulauf8eda1562021-04-13 17:06:41 -06006928 return tag;
John Zulauf6ce24372021-01-30 05:56:25 -07006929}
6930
John Zulauf8eda1562021-04-13 17:06:41 -06006931bool SyncOpResetEvent::ReplayValidate(ResourceUsageTag recorded_tag, const CommandBufferAccessContext &recorded_context,
John Zulauf0223f142022-07-06 09:05:39 -06006932 ResourceUsageTag base_tag, CommandExecutionContext &exec_context) const {
6933 return DoValidate(exec_context, base_tag);
John Zulauf8eda1562021-04-13 17:06:41 -06006934}
6935
John Zulauf0223f142022-07-06 09:05:39 -06006936void SyncOpResetEvent::ReplayRecord(CommandExecutionContext &exec_context, ResourceUsageTag tag) const {
6937 if (!exec_context.ValidForSyncOps()) return;
6938 SyncEventsContext *events_context = exec_context.GetCurrentEventsContext();
6939
John Zulaufe0757ba2022-06-10 16:51:45 -06006940 auto *sync_event = events_context->GetFromShared(event_);
6941 if (!sync_event) return; // Core, Lifetimes, or Param check needs to catch invalid events.
6942
6943 // Update the event state
6944 sync_event->last_command = cmd_type_;
6945 sync_event->last_command_tag = tag;
6946 sync_event->unsynchronized_set = CMD_NONE;
6947 sync_event->ResetFirstScope();
6948 sync_event->barriers = 0U;
6949}
John Zulauf8eda1562021-04-13 17:06:41 -06006950
sjfricke0bea06e2022-06-05 09:22:26 +09006951SyncOpSetEvent::SyncOpSetEvent(CMD_TYPE cmd_type, const SyncValidator &sync_state, VkQueueFlags queue_flags, VkEvent event,
John Zulaufe0757ba2022-06-10 16:51:45 -06006952 VkPipelineStageFlags2KHR stageMask, const AccessContext *access_context)
sjfricke0bea06e2022-06-05 09:22:26 +09006953 : SyncOpBase(cmd_type),
Jeremy Gebben9f537102021-10-05 16:37:12 -06006954 event_(sync_state.Get<EVENT_STATE>(event)),
John Zulaufe0757ba2022-06-10 16:51:45 -06006955 recorded_context_(),
John Zulauf4edde622021-02-15 08:54:50 -07006956 src_exec_scope_(SyncExecScope::MakeSrc(queue_flags, stageMask)),
John Zulaufe0757ba2022-06-10 16:51:45 -06006957 dep_info_() {
6958 // Snapshot the current access_context for later inspection at wait time.
6959 // NOTE: This appears brute force, but given that we only save a "first-last" model of access history, the current
6960 // access context (include barrier state for chaining) won't necessarily contain the needed information at Wait
6961 // or Submit time reference.
6962 if (access_context) {
6963 recorded_context_ = std::make_shared<const AccessContext>(*access_context);
6964 }
6965}
John Zulauf4edde622021-02-15 08:54:50 -07006966
sjfricke0bea06e2022-06-05 09:22:26 +09006967SyncOpSetEvent::SyncOpSetEvent(CMD_TYPE cmd_type, const SyncValidator &sync_state, VkQueueFlags queue_flags, VkEvent event,
John Zulaufe0757ba2022-06-10 16:51:45 -06006968 const VkDependencyInfoKHR &dep_info, const AccessContext *access_context)
sjfricke0bea06e2022-06-05 09:22:26 +09006969 : SyncOpBase(cmd_type),
Jeremy Gebben9f537102021-10-05 16:37:12 -06006970 event_(sync_state.Get<EVENT_STATE>(event)),
John Zulaufe0757ba2022-06-10 16:51:45 -06006971 recorded_context_(),
John Zulauf4edde622021-02-15 08:54:50 -07006972 src_exec_scope_(SyncExecScope::MakeSrc(queue_flags, sync_utils::GetGlobalStageMasks(dep_info).src)),
John Zulaufe0757ba2022-06-10 16:51:45 -06006973 dep_info_(new safe_VkDependencyInfo(&dep_info)) {
6974 if (access_context) {
6975 recorded_context_ = std::make_shared<const AccessContext>(*access_context);
6976 }
6977}
John Zulauf6ce24372021-01-30 05:56:25 -07006978
6979bool SyncOpSetEvent::Validate(const CommandBufferAccessContext &cb_context) const {
John Zulauf610e28c2021-08-03 17:46:23 -06006980 return DoValidate(cb_context, ResourceUsageRecord::kMaxIndex);
6981}
6982bool SyncOpSetEvent::ReplayValidate(ResourceUsageTag recorded_tag, const CommandBufferAccessContext &recorded_context,
John Zulauf0223f142022-07-06 09:05:39 -06006983 ResourceUsageTag base_tag, CommandExecutionContext &exec_context) const {
6984 return DoValidate(exec_context, base_tag);
John Zulauf610e28c2021-08-03 17:46:23 -06006985}
6986
John Zulaufbb890452021-12-14 11:30:18 -07006987bool SyncOpSetEvent::DoValidate(const CommandExecutionContext &exec_context, const ResourceUsageTag base_tag) const {
John Zulauf6ce24372021-01-30 05:56:25 -07006988 bool skip = false;
6989
John Zulaufbb890452021-12-14 11:30:18 -07006990 const auto &sync_state = exec_context.GetSyncState();
6991 auto *events_context = exec_context.GetCurrentEventsContext();
John Zulauf6ce24372021-01-30 05:56:25 -07006992 assert(events_context);
6993 if (!events_context) return skip;
6994
6995 const auto *sync_event = events_context->Get(event_);
6996 if (!sync_event) return skip; // Core, Lifetimes, or Param check needs to catch invalid events.
6997
John Zulauf610e28c2021-08-03 17:46:23 -06006998 if (sync_event->last_command_tag >= base_tag) return skip; // for replay we don't want to revalidate internal "last commmand"
6999
John Zulauf6ce24372021-01-30 05:56:25 -07007000 const char *const reset_set =
7001 "%s: %s %s operation following %s without intervening execution barrier, is a race condition and may result in data "
7002 "hazards.";
7003 const char *const wait =
7004 "%s: %s %s operation following %s without intervening vkCmdResetEvent, may result in data hazard and is ignored.";
7005
7006 if (!sync_event->HasBarrier(src_exec_scope_.mask_param, src_exec_scope_.exec_scope)) {
John Zulauf4edde622021-02-15 08:54:50 -07007007 const char *vuid_stem = nullptr;
John Zulauf6ce24372021-01-30 05:56:25 -07007008 const char *message = nullptr;
7009 switch (sync_event->last_command) {
7010 case CMD_RESETEVENT:
John Zulauf4edde622021-02-15 08:54:50 -07007011 case CMD_RESETEVENT2KHR:
Tony-LunarG8d71c4f2022-01-27 15:25:53 -07007012 case CMD_RESETEVENT2:
John Zulauf6ce24372021-01-30 05:56:25 -07007013 // Needs a barrier between reset and set
John Zulauf4edde622021-02-15 08:54:50 -07007014 vuid_stem = "-missingbarrier-reset";
John Zulauf6ce24372021-01-30 05:56:25 -07007015 message = reset_set;
7016 break;
7017 case CMD_SETEVENT:
John Zulauf4edde622021-02-15 08:54:50 -07007018 case CMD_SETEVENT2KHR:
Tony-LunarG8d71c4f2022-01-27 15:25:53 -07007019 case CMD_SETEVENT2:
John Zulauf6ce24372021-01-30 05:56:25 -07007020 // Needs a barrier between set and set
John Zulauf4edde622021-02-15 08:54:50 -07007021 vuid_stem = "-missingbarrier-set";
John Zulauf6ce24372021-01-30 05:56:25 -07007022 message = reset_set;
7023 break;
7024 case CMD_WAITEVENTS:
Tony-LunarG1364cf52021-11-17 16:10:11 -07007025 case CMD_WAITEVENTS2:
John Zulauf4edde622021-02-15 08:54:50 -07007026 case CMD_WAITEVENTS2KHR:
John Zulauf6ce24372021-01-30 05:56:25 -07007027 // Needs a barrier or is in second execution scope
John Zulauf4edde622021-02-15 08:54:50 -07007028 vuid_stem = "-missingbarrier-wait";
John Zulauf6ce24372021-01-30 05:56:25 -07007029 message = wait;
7030 break;
7031 default:
7032 // The only other valid last command that wasn't one.
7033 assert(sync_event->last_command == CMD_NONE);
7034 break;
7035 }
John Zulauf4edde622021-02-15 08:54:50 -07007036 if (vuid_stem) {
John Zulauf6ce24372021-01-30 05:56:25 -07007037 assert(nullptr != message);
John Zulauf4edde622021-02-15 08:54:50 -07007038 std::string vuid("SYNC-");
7039 vuid.append(CmdName()).append(vuid_stem);
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06007040 skip |= sync_state.LogError(event_->event(), vuid.c_str(), message, CmdName(),
7041 sync_state.report_data->FormatHandle(event_->event()).c_str(), CmdName(),
John Zulauf6ce24372021-01-30 05:56:25 -07007042 CommandTypeString(sync_event->last_command));
7043 }
7044 }
7045
7046 return skip;
7047}
7048
John Zulaufdab327f2022-07-08 12:02:05 -06007049ResourceUsageTag SyncOpSetEvent::Record(CommandBufferAccessContext *cb_context) {
sjfricke0bea06e2022-06-05 09:22:26 +09007050 const auto tag = cb_context->NextCommandTag(cmd_type_);
John Zulauf6ce24372021-01-30 05:56:25 -07007051 auto *events_context = cb_context->GetCurrentEventsContext();
John Zulauf00119522022-05-23 19:07:42 -06007052 const QueueId queue_id = cb_context->GetQueueId();
John Zulaufe0757ba2022-06-10 16:51:45 -06007053 assert(recorded_context_);
7054 if (recorded_context_ && events_context) {
7055 DoRecord(queue_id, tag, recorded_context_, events_context);
John Zulauf610e28c2021-08-03 17:46:23 -06007056 }
7057 return tag;
7058}
John Zulauf6ce24372021-01-30 05:56:25 -07007059
John Zulauf0223f142022-07-06 09:05:39 -06007060void SyncOpSetEvent::ReplayRecord(CommandExecutionContext &exec_context, ResourceUsageTag tag) const {
John Zulaufe0757ba2022-06-10 16:51:45 -06007061 // Create a copy of the current context, and merge in the state snapshot at record set event time
7062 // Note: we mustn't change the recorded context copy, as a given CB could be submitted more than once (in generaL)
John Zulauf0223f142022-07-06 09:05:39 -06007063 if (!exec_context.ValidForSyncOps()) return;
7064 SyncEventsContext *events_context = exec_context.GetCurrentEventsContext();
7065 AccessContext *access_context = exec_context.GetCurrentAccessContext();
7066 const QueueId queue_id = exec_context.GetQueueId();
7067
John Zulauf8a7b03d2022-09-20 11:41:19 -06007068 // Note: merged_context is a copy of the access_context, combined with the recorded context
John Zulaufe0757ba2022-06-10 16:51:45 -06007069 auto merged_context = std::make_shared<AccessContext>(*access_context);
7070 merged_context->ResolveFromContext(QueueTagOffsetBarrierAction(queue_id, tag), *recorded_context_);
John Zulauf8a7b03d2022-09-20 11:41:19 -06007071 merged_context->Trim(); // Ensure the copy is minimal and normalized
John Zulaufe0757ba2022-06-10 16:51:45 -06007072 DoRecord(queue_id, tag, merged_context, events_context);
7073}
7074
7075void SyncOpSetEvent::DoRecord(QueueId queue_id, ResourceUsageTag tag, const std::shared_ptr<const AccessContext> &access_context,
7076 SyncEventsContext *events_context) const {
John Zulauf6ce24372021-01-30 05:56:25 -07007077 auto *sync_event = events_context->GetFromShared(event_);
John Zulauf610e28c2021-08-03 17:46:23 -06007078 if (!sync_event) return; // Core, Lifetimes, or Param check needs to catch invalid events.
John Zulauf6ce24372021-01-30 05:56:25 -07007079
7080 // NOTE: We're going to simply record the sync scope here, as anything else would be implementation defined/undefined
7081 // and we're issuing errors re: missing barriers between event commands, which if the user fixes would fix
7082 // any issues caused by naive scope setting here.
7083
7084 // What happens with two SetEvent is that one cannot know what group of operations will be waited for.
7085 // Given:
7086 // Stuff1; SetEvent; Stuff2; SetEvent; WaitEvents;
7087 // WaitEvents cannot know which of Stuff1, Stuff2, or both has completed execution.
7088
7089 if (!sync_event->HasBarrier(src_exec_scope_.mask_param, src_exec_scope_.exec_scope)) {
7090 sync_event->unsynchronized_set = sync_event->last_command;
7091 sync_event->ResetFirstScope();
John Zulaufe0757ba2022-06-10 16:51:45 -06007092 } else if (!sync_event->first_scope) {
John Zulauf6ce24372021-01-30 05:56:25 -07007093 // We only set the scope if there isn't one
7094 sync_event->scope = src_exec_scope_;
7095
John Zulaufe0757ba2022-06-10 16:51:45 -06007096 // Save the shared_ptr to copy of the access_context present at set time (sent us by the caller)
7097 sync_event->first_scope = access_context;
John Zulauf6ce24372021-01-30 05:56:25 -07007098 sync_event->unsynchronized_set = CMD_NONE;
7099 sync_event->first_scope_tag = tag;
7100 }
John Zulauf4edde622021-02-15 08:54:50 -07007101 // TODO: Store dep_info_ shared ptr in sync_state for WaitEvents2 validation
sjfricke0bea06e2022-06-05 09:22:26 +09007102 sync_event->last_command = cmd_type_;
John Zulauf610e28c2021-08-03 17:46:23 -06007103 sync_event->last_command_tag = tag;
John Zulauf6ce24372021-01-30 05:56:25 -07007104 sync_event->barriers = 0U;
7105}
John Zulauf64ffe552021-02-06 10:25:07 -07007106
sjfricke0bea06e2022-06-05 09:22:26 +09007107SyncOpBeginRenderPass::SyncOpBeginRenderPass(CMD_TYPE cmd_type, const SyncValidator &sync_state,
John Zulauf64ffe552021-02-06 10:25:07 -07007108 const VkRenderPassBeginInfo *pRenderPassBegin,
sfricke-samsung85584a72021-09-30 21:43:38 -07007109 const VkSubpassBeginInfo *pSubpassBeginInfo)
John Zulaufdab327f2022-07-08 12:02:05 -06007110 : SyncOpBase(cmd_type), rp_context_(nullptr) {
John Zulauf64ffe552021-02-06 10:25:07 -07007111 if (pRenderPassBegin) {
Jeremy Gebben9f537102021-10-05 16:37:12 -06007112 rp_state_ = sync_state.Get<RENDER_PASS_STATE>(pRenderPassBegin->renderPass);
John Zulauf64ffe552021-02-06 10:25:07 -07007113 renderpass_begin_info_ = safe_VkRenderPassBeginInfo(pRenderPassBegin);
Jeremy Gebbenf4449392022-01-28 10:09:10 -07007114 auto fb_state = sync_state.Get<FRAMEBUFFER_STATE>(pRenderPassBegin->framebuffer);
John Zulauf64ffe552021-02-06 10:25:07 -07007115 if (fb_state) {
Jeremy Gebben9f537102021-10-05 16:37:12 -06007116 shared_attachments_ = sync_state.GetAttachmentViews(*renderpass_begin_info_.ptr(), *fb_state);
John Zulauf64ffe552021-02-06 10:25:07 -07007117 // TODO: Revisit this when all attachment validation is through SyncOps to see if we can discard the plain pointer copy
7118 // Note that this a safe to presist as long as shared_attachments is not cleared
7119 attachments_.reserve(shared_attachments_.size());
sfricke-samsung01c9ae92021-02-09 22:30:52 -08007120 for (const auto &attachment : shared_attachments_) {
John Zulauf64ffe552021-02-06 10:25:07 -07007121 attachments_.emplace_back(attachment.get());
7122 }
7123 }
7124 if (pSubpassBeginInfo) {
7125 subpass_begin_info_ = safe_VkSubpassBeginInfo(pSubpassBeginInfo);
7126 }
7127 }
7128}
7129
7130bool SyncOpBeginRenderPass::Validate(const CommandBufferAccessContext &cb_context) const {
7131 // Check if any of the layout transitions are hazardous.... but we don't have the renderpass context to work with, so we
7132 bool skip = false;
7133
7134 assert(rp_state_.get());
7135 if (nullptr == rp_state_.get()) return skip;
7136 auto &rp_state = *rp_state_.get();
7137
7138 const uint32_t subpass = 0;
7139
7140 // Construct the state we can use to validate against... (since validation is const and RecordCmdBeginRenderPass
7141 // hasn't happened yet)
7142 const std::vector<AccessContext> empty_context_vector;
7143 AccessContext temp_context(subpass, cb_context.GetQueueFlags(), rp_state.subpass_dependencies, empty_context_vector,
7144 cb_context.GetCurrentAccessContext());
7145
7146 // Validate attachment operations
7147 if (attachments_.size() == 0) return skip;
7148 const auto &render_area = renderpass_begin_info_.renderArea;
John Zulaufd0ec59f2021-03-13 14:25:08 -07007149
7150 // Since the isn't a valid RenderPassAccessContext until Record, needs to create the view/generator list... we could limit this
7151 // by predicating on whether subpass 0 uses the attachment if it is too expensive to create the full list redundantly here.
7152 // More broadly we could look at thread specific state shared between Validate and Record as is done for other heavyweight
7153 // operations (though it's currently a messy approach)
7154 AttachmentViewGenVector view_gens = RenderPassAccessContext::CreateAttachmentViewGen(render_area, attachments_);
sjfricke0bea06e2022-06-05 09:22:26 +09007155 skip |= temp_context.ValidateLayoutTransitions(cb_context, rp_state, render_area, subpass, view_gens, cmd_type_);
John Zulauf64ffe552021-02-06 10:25:07 -07007156
7157 // Validate load operations if there were no layout transition hazards
7158 if (!skip) {
John Zulaufee984022022-04-13 16:39:50 -06007159 temp_context.RecordLayoutTransitions(rp_state, subpass, view_gens, kInvalidTag);
sjfricke0bea06e2022-06-05 09:22:26 +09007160 skip |= temp_context.ValidateLoadOperation(cb_context, rp_state, render_area, subpass, view_gens, cmd_type_);
John Zulauf64ffe552021-02-06 10:25:07 -07007161 }
7162
7163 return skip;
7164}
7165
John Zulaufdab327f2022-07-08 12:02:05 -06007166ResourceUsageTag SyncOpBeginRenderPass::Record(CommandBufferAccessContext *cb_context) {
John Zulauf64ffe552021-02-06 10:25:07 -07007167 assert(rp_state_.get());
sjfricke0bea06e2022-06-05 09:22:26 +09007168 if (nullptr == rp_state_.get()) return cb_context->NextCommandTag(cmd_type_);
John Zulaufdab327f2022-07-08 12:02:05 -06007169 const ResourceUsageTag begin_tag =
7170 cb_context->RecordBeginRenderPass(cmd_type_, *rp_state_.get(), renderpass_begin_info_.renderArea, attachments_);
7171
7172 // Note: this state update must be after RecordBeginRenderPass as there is no current render pass until that function runs
7173 rp_context_ = cb_context->GetCurrentRenderPassContext();
7174
7175 return begin_tag;
John Zulauf64ffe552021-02-06 10:25:07 -07007176}
7177
John Zulauf8eda1562021-04-13 17:06:41 -06007178bool SyncOpBeginRenderPass::ReplayValidate(ResourceUsageTag recorded_tag, const CommandBufferAccessContext &recorded_context,
John Zulauf0223f142022-07-06 09:05:39 -06007179 ResourceUsageTag base_tag, CommandExecutionContext &exec_context) const {
John Zulauf8eda1562021-04-13 17:06:41 -06007180 return false;
7181}
7182
John Zulaufdab327f2022-07-08 12:02:05 -06007183void SyncOpBeginRenderPass::ReplayRecord(CommandExecutionContext &exec_context, ResourceUsageTag tag) const {
7184 // Need to update the exec_contexts state (which for RenderPass operations *must* be a QueueBatchContext, as
7185 // render pass operations are not allowed in secondary command buffers.
7186 const QueueId queue_id = exec_context.GetQueueId();
7187 assert(queue_id != QueueSyncState::kQueueIdInvalid); // Renderpass replay only valid at submit (not exec) time
7188 if (queue_id == QueueSyncState::kQueueIdInvalid) return;
7189
7190 exec_context.BeginRenderPassReplay(*this, tag);
7191}
John Zulauf8eda1562021-04-13 17:06:41 -06007192
sjfricke0bea06e2022-06-05 09:22:26 +09007193SyncOpNextSubpass::SyncOpNextSubpass(CMD_TYPE cmd_type, const SyncValidator &sync_state,
7194 const VkSubpassBeginInfo *pSubpassBeginInfo, const VkSubpassEndInfo *pSubpassEndInfo)
7195 : SyncOpBase(cmd_type) {
John Zulauf64ffe552021-02-06 10:25:07 -07007196 if (pSubpassBeginInfo) {
7197 subpass_begin_info_.initialize(pSubpassBeginInfo);
7198 }
7199 if (pSubpassEndInfo) {
7200 subpass_end_info_.initialize(pSubpassEndInfo);
7201 }
7202}
7203
7204bool SyncOpNextSubpass::Validate(const CommandBufferAccessContext &cb_context) const {
7205 bool skip = false;
7206 const auto *renderpass_context = cb_context.GetCurrentRenderPassContext();
7207 if (!renderpass_context) return skip;
7208
sjfricke0bea06e2022-06-05 09:22:26 +09007209 skip |= renderpass_context->ValidateNextSubpass(cb_context.GetExecutionContext(), cmd_type_);
John Zulauf64ffe552021-02-06 10:25:07 -07007210 return skip;
7211}
7212
John Zulaufdab327f2022-07-08 12:02:05 -06007213ResourceUsageTag SyncOpNextSubpass::Record(CommandBufferAccessContext *cb_context) {
sjfricke0bea06e2022-06-05 09:22:26 +09007214 return cb_context->RecordNextSubpass(cmd_type_);
John Zulauf8eda1562021-04-13 17:06:41 -06007215}
7216
7217bool SyncOpNextSubpass::ReplayValidate(ResourceUsageTag recorded_tag, const CommandBufferAccessContext &recorded_context,
John Zulauf0223f142022-07-06 09:05:39 -06007218 ResourceUsageTag base_tag, CommandExecutionContext &exec_context) const {
John Zulauf8eda1562021-04-13 17:06:41 -06007219 return false;
John Zulauf64ffe552021-02-06 10:25:07 -07007220}
7221
sjfricke0bea06e2022-06-05 09:22:26 +09007222SyncOpEndRenderPass::SyncOpEndRenderPass(CMD_TYPE cmd_type, const SyncValidator &sync_state,
7223 const VkSubpassEndInfo *pSubpassEndInfo)
7224 : SyncOpBase(cmd_type) {
John Zulauf64ffe552021-02-06 10:25:07 -07007225 if (pSubpassEndInfo) {
7226 subpass_end_info_.initialize(pSubpassEndInfo);
7227 }
7228}
7229
John Zulaufdab327f2022-07-08 12:02:05 -06007230void SyncOpNextSubpass::ReplayRecord(CommandExecutionContext &exec_context, ResourceUsageTag tag) const {
7231 exec_context.NextSubpassReplay();
7232}
John Zulauf8eda1562021-04-13 17:06:41 -06007233
John Zulauf64ffe552021-02-06 10:25:07 -07007234bool SyncOpEndRenderPass::Validate(const CommandBufferAccessContext &cb_context) const {
7235 bool skip = false;
7236 const auto *renderpass_context = cb_context.GetCurrentRenderPassContext();
7237
7238 if (!renderpass_context) return skip;
sjfricke0bea06e2022-06-05 09:22:26 +09007239 skip |= renderpass_context->ValidateEndRenderPass(cb_context.GetExecutionContext(), cmd_type_);
John Zulauf64ffe552021-02-06 10:25:07 -07007240 return skip;
7241}
7242
John Zulaufdab327f2022-07-08 12:02:05 -06007243ResourceUsageTag SyncOpEndRenderPass::Record(CommandBufferAccessContext *cb_context) {
sjfricke0bea06e2022-06-05 09:22:26 +09007244 return cb_context->RecordEndRenderPass(cmd_type_);
John Zulauf64ffe552021-02-06 10:25:07 -07007245}
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07007246
John Zulauf8eda1562021-04-13 17:06:41 -06007247bool SyncOpEndRenderPass::ReplayValidate(ResourceUsageTag recorded_tag, const CommandBufferAccessContext &recorded_context,
John Zulauf0223f142022-07-06 09:05:39 -06007248 ResourceUsageTag base_tag, CommandExecutionContext &exec_context) const {
John Zulauf8eda1562021-04-13 17:06:41 -06007249 return false;
7250}
7251
John Zulaufdab327f2022-07-08 12:02:05 -06007252void SyncOpEndRenderPass::ReplayRecord(CommandExecutionContext &exec_context, ResourceUsageTag tag) const {
7253 exec_context.EndRenderPassReplay();
7254}
John Zulauf8eda1562021-04-13 17:06:41 -06007255
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07007256void SyncValidator::PreCallRecordCmdWriteBufferMarker2AMD(VkCommandBuffer commandBuffer, VkPipelineStageFlags2KHR pipelineStage,
7257 VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) {
7258 StateTracker::PreCallRecordCmdWriteBufferMarker2AMD(commandBuffer, pipelineStage, dstBuffer, dstOffset, marker);
7259 auto *cb_access_context = GetAccessContext(commandBuffer);
7260 assert(cb_access_context);
7261 const auto tag = cb_access_context->NextCommandTag(CMD_WRITEBUFFERMARKERAMD);
7262 auto *context = cb_access_context->GetCurrentAccessContext();
7263 assert(context);
7264
Jeremy Gebbenf4449392022-01-28 10:09:10 -07007265 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07007266
7267 if (dst_buffer) {
7268 const ResourceAccessRange range = MakeRange(dstOffset, 4);
7269 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
7270 }
7271}
John Zulaufd05c5842021-03-26 11:32:16 -06007272
John Zulaufae842002021-04-15 18:20:55 -06007273bool SyncValidator::PreCallValidateCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBufferCount,
7274 const VkCommandBuffer *pCommandBuffers) const {
7275 bool skip = StateTracker::PreCallValidateCmdExecuteCommands(commandBuffer, commandBufferCount, pCommandBuffers);
John Zulaufae842002021-04-15 18:20:55 -06007276 const auto *cb_context = GetAccessContext(commandBuffer);
7277 assert(cb_context);
John Zulauf4fa68462021-04-26 21:04:22 -06007278
7279 // Heavyweight, but we need a proxy copy of the active command buffer access context
7280 CommandBufferAccessContext proxy_cb_context(*cb_context, CommandBufferAccessContext::AsProxyContext());
John Zulaufae842002021-04-15 18:20:55 -06007281
7282 // Make working copies of the access and events contexts
John Zulaufae842002021-04-15 18:20:55 -06007283 for (uint32_t cb_index = 0; cb_index < commandBufferCount; ++cb_index) {
John Zulauf41a9c7c2021-12-07 15:59:53 -07007284 proxy_cb_context.NextIndexedCommandTag(CMD_EXECUTECOMMANDS, cb_index);
7285
John Zulaufae842002021-04-15 18:20:55 -06007286 const auto *recorded_cb_context = GetAccessContext(pCommandBuffers[cb_index]);
7287 if (!recorded_cb_context) continue;
John Zulauf4fa68462021-04-26 21:04:22 -06007288
7289 const auto *recorded_context = recorded_cb_context->GetCurrentAccessContext();
7290 assert(recorded_context);
John Zulauf0223f142022-07-06 09:05:39 -06007291 skip |= recorded_cb_context->ValidateFirstUse(proxy_cb_context, "vkCmdExecuteCommands", cb_index);
John Zulauf4fa68462021-04-26 21:04:22 -06007292
7293 // The barriers have already been applied in ValidatFirstUse
7294 ResourceUsageRange tag_range = proxy_cb_context.ImportRecordedAccessLog(*recorded_cb_context);
John Zulauf1d5f9c12022-05-13 14:51:08 -06007295 proxy_cb_context.ResolveExecutedCommandBuffer(*recorded_context, tag_range.begin);
John Zulaufae842002021-04-15 18:20:55 -06007296 }
7297
John Zulaufae842002021-04-15 18:20:55 -06007298 return skip;
7299}
7300
7301void SyncValidator::PreCallRecordCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBufferCount,
7302 const VkCommandBuffer *pCommandBuffers) {
7303 StateTracker::PreCallRecordCmdExecuteCommands(commandBuffer, commandBufferCount, pCommandBuffers);
John Zulauf4fa68462021-04-26 21:04:22 -06007304 auto *cb_context = GetAccessContext(commandBuffer);
7305 assert(cb_context);
John Zulauf4fa68462021-04-26 21:04:22 -06007306 for (uint32_t cb_index = 0; cb_index < commandBufferCount; ++cb_index) {
John Zulauf41a9c7c2021-12-07 15:59:53 -07007307 cb_context->NextIndexedCommandTag(CMD_EXECUTECOMMANDS, cb_index);
John Zulauf4fa68462021-04-26 21:04:22 -06007308 const auto *recorded_cb_context = GetAccessContext(pCommandBuffers[cb_index]);
7309 if (!recorded_cb_context) continue;
sjfricke0bea06e2022-06-05 09:22:26 +09007310 cb_context->RecordExecutedCommandBuffer(*recorded_cb_context);
John Zulauf4fa68462021-04-26 21:04:22 -06007311 }
John Zulaufae842002021-04-15 18:20:55 -06007312}
7313
John Zulauf1d5f9c12022-05-13 14:51:08 -06007314void SyncValidator::PostCallRecordQueueWaitIdle(VkQueue queue, VkResult result) {
7315 StateTracker::PostCallRecordQueueWaitIdle(queue, result);
7316 if ((result != VK_SUCCESS) || (!enabled[sync_validation_queue_submit]) || (queue == VK_NULL_HANDLE)) return;
7317
7318 const auto queue_state = GetQueueSyncStateShared(queue);
7319 if (!queue_state) return; // Invalid queue
7320 QueueId waited_queue = queue_state->GetQueueId();
John Zulauf3da08bb2022-08-01 17:56:56 -06007321 ApplyTaggedWait(waited_queue, ResourceUsageRecord::kMaxIndex);
John Zulauf1d5f9c12022-05-13 14:51:08 -06007322
John Zulauf3da08bb2022-08-01 17:56:56 -06007323 // Eliminate waitable fences from the current queue.
7324 layer_data::EraseIf(waitable_fences_, [waited_queue](const SignaledFence &sf) { return sf.second.queue_id == waited_queue; });
John Zulauf1d5f9c12022-05-13 14:51:08 -06007325}
7326
7327void SyncValidator::PostCallRecordDeviceWaitIdle(VkDevice device, VkResult result) {
7328 StateTracker::PostCallRecordDeviceWaitIdle(device, result);
John Zulaufe0757ba2022-06-10 16:51:45 -06007329
7330 QueueBatchContext::BatchSet queue_batch_contexts = GetQueueBatchSnapshot();
7331 for (auto &batch : queue_batch_contexts) {
7332 batch->ApplyDeviceWait();
John Zulauf1d5f9c12022-05-13 14:51:08 -06007333 }
7334
John Zulauf3da08bb2022-08-01 17:56:56 -06007335 // As we we've waited for everything on device, any waits are mooted.
7336 waitable_fences_.clear();
John Zulauf1d5f9c12022-05-13 14:51:08 -06007337}
7338
John Zulauf697c0e12022-04-19 16:31:12 -06007339template <>
7340thread_local layer_data::optional<QueueSubmitCmdState> layer_data::TlsGuard<QueueSubmitCmdState>::payload_{};
7341
John Zulaufbbda4572022-04-19 16:20:45 -06007342bool SyncValidator::PreCallValidateQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
7343 VkFence fence) const {
John Zulaufa8700a52022-08-18 16:22:08 -06007344 SubmitInfoConverter submit_info(submitCount, pSubmits);
John Zulauf3298da92022-09-01 13:58:39 -06007345 return ValidateQueueSubmit(queue, submitCount, submit_info.info2s.data(), fence, "vkQueueSubmit");
John Zulaufa8700a52022-08-18 16:22:08 -06007346}
7347
7348bool SyncValidator::ValidateQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo2 *pSubmits, VkFence fence,
7349 const char *func_name) const {
John Zulaufbbda4572022-04-19 16:20:45 -06007350 bool skip = false;
John Zulaufcb7e1672022-05-04 13:46:08 -06007351
7352 // Since this early return is above the TlsGuard, the Record phase must also be.
John Zulauf78cb2082022-04-20 16:37:48 -06007353 if (!enabled[sync_validation_queue_submit]) return skip;
7354
John Zulauf8a7b03d2022-09-20 11:41:19 -06007355 layer_data::TlsGuard<QueueSubmitCmdState> cmd_state(&skip, func_name, signaled_semaphores_);
John Zulauf697c0e12022-04-19 16:31:12 -06007356 cmd_state->queue = GetQueueSyncStateShared(queue);
7357 if (!cmd_state->queue) return skip; // Invalid Queue
John Zulaufbbda4572022-04-19 16:20:45 -06007358
John Zulauf697c0e12022-04-19 16:31:12 -06007359 // The submit id is a mutable automic which is not recoverable on a skip == true condition
7360 uint64_t submit_id = cmd_state->queue->ReserveSubmitId();
7361
7362 // verify each submit batch
7363 // Since the last batch from the queue state is const, we need to track the last_batch separately from the
7364 // most recently created batch
7365 std::shared_ptr<const QueueBatchContext> last_batch = cmd_state->queue->LastBatch();
7366 std::shared_ptr<QueueBatchContext> batch;
7367 for (uint32_t batch_idx = 0; batch_idx < submitCount; batch_idx++) {
John Zulaufa8700a52022-08-18 16:22:08 -06007368 const VkSubmitInfo2 &submit = pSubmits[batch_idx];
John Zulauf8a7b03d2022-09-20 11:41:19 -06007369 batch = std::make_shared<QueueBatchContext>(*this, *cmd_state->queue, submit_id, batch_idx);
John Zulaufa8700a52022-08-18 16:22:08 -06007370 batch->SetupCommandBufferInfo(submit);
7371 batch->SetupAccessContext(last_batch, submit, cmd_state->signaled);
John Zulauf697c0e12022-04-19 16:31:12 -06007372
John Zulaufe0757ba2022-06-10 16:51:45 -06007373 // Skip import and validation of empty batches
7374 if (batch->GetTagRange().size()) {
John Zulauf8a7b03d2022-09-20 11:41:19 -06007375 batch->SetupBatchTags();
John Zulaufa8700a52022-08-18 16:22:08 -06007376 skip |= batch->DoQueueSubmitValidate(*this, *cmd_state, submit);
John Zulauf697c0e12022-04-19 16:31:12 -06007377 }
7378
John Zulaufe0757ba2022-06-10 16:51:45 -06007379 // Empty batches could have semaphores, though.
John Zulaufa8700a52022-08-18 16:22:08 -06007380 for (uint32_t sem_idx = 0; sem_idx < submit.signalSemaphoreInfoCount; ++sem_idx) {
7381 const VkSemaphoreSubmitInfo &semaphore_info = submit.pSignalSemaphoreInfos[sem_idx];
John Zulauf697c0e12022-04-19 16:31:12 -06007382 // Make a copy of the state, signal the copy and pend it...
John Zulaufa8700a52022-08-18 16:22:08 -06007383 auto sem_state = Get<SEMAPHORE_STATE>(semaphore_info.semaphore);
John Zulauf697c0e12022-04-19 16:31:12 -06007384 if (!sem_state) continue;
John Zulaufcb7e1672022-05-04 13:46:08 -06007385 cmd_state->signaled.SignalSemaphore(sem_state, batch, semaphore_info);
John Zulauf697c0e12022-04-19 16:31:12 -06007386 }
7387 // Unless the previous batch was referenced by a signal, the QueueBatchContext will self destruct, but as
7388 // we ResolvePrevious as we can let any contexts we've fully referenced go.
7389 last_batch = batch;
7390 }
7391 // The most recently created batch will become the queue's "last batch" in the record phase
7392 if (batch) {
7393 cmd_state->last_batch = std::move(batch);
7394 }
7395
7396 // Note that if we skip, guard cleans up for us, but cannot release the reserved tag range
John Zulaufbbda4572022-04-19 16:20:45 -06007397 return skip;
7398}
7399
7400void SyncValidator::PostCallRecordQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence,
7401 VkResult result) {
7402 StateTracker::PostCallRecordQueueSubmit(queue, submitCount, pSubmits, fence, result);
John Zulauf78cb2082022-04-20 16:37:48 -06007403
John Zulaufa8700a52022-08-18 16:22:08 -06007404 RecordQueueSubmit(queue, fence, result);
7405}
7406
7407void SyncValidator::RecordQueueSubmit(VkQueue queue, VkFence fence, VkResult result) {
John Zulaufcb7e1672022-05-04 13:46:08 -06007408 // If this return is above the TlsGuard, then the Validate phase return must also be.
John Zulauf78cb2082022-04-20 16:37:48 -06007409 if (!enabled[sync_validation_queue_submit]) return; // Queue submit validation must be affirmatively enabled
7410
John Zulaufcb7e1672022-05-04 13:46:08 -06007411 // The earliest return (when enabled), must be *after* the TlsGuard, as it is the TlsGuard that cleans up the cmd_state
7412 // static payload
John Zulauf697c0e12022-04-19 16:31:12 -06007413 layer_data::TlsGuard<QueueSubmitCmdState> cmd_state;
John Zulaufcb7e1672022-05-04 13:46:08 -06007414
7415 if (VK_SUCCESS != result) return; // dispatched QueueSubmit failed
John Zulauf78cb2082022-04-20 16:37:48 -06007416 if (!cmd_state->queue) return; // Validation couldn't find a valid queue object
7417
John Zulauf697c0e12022-04-19 16:31:12 -06007418 // Don't need to look up the queue state again, but we need a non-const version
7419 std::shared_ptr<QueueSyncState> queue_state = std::const_pointer_cast<QueueSyncState>(std::move(cmd_state->queue));
John Zulauf697c0e12022-04-19 16:31:12 -06007420
John Zulaufcb7e1672022-05-04 13:46:08 -06007421 // The global the semaphores we applied to the cmd_state QueueBatchContexts
7422 // NOTE: All conserved QueueBatchContext's need to have there access logs reset to use the global logger and the only conserved
7423 // QBC's are those referenced by unwaited signals and the last batch.
7424 for (auto &sig_sem : cmd_state->signaled) {
7425 if (sig_sem.second && sig_sem.second->batch) {
John Zulaufe0757ba2022-06-10 16:51:45 -06007426 auto &sig_batch = sig_sem.second->batch;
John Zulaufe0757ba2022-06-10 16:51:45 -06007427 // Batches retained for signalled semaphore don't need to retain event data, unless it's the last batch in the submit
7428 if (sig_batch != cmd_state->last_batch) {
7429 sig_batch->ResetEventsContext();
John Zulauf8a7b03d2022-09-20 11:41:19 -06007430 // Make sure that retained batches are minimal, and trim after the events contexts has been cleared.
7431 sig_batch->Trim();
John Zulaufe0757ba2022-06-10 16:51:45 -06007432 }
John Zulaufcb7e1672022-05-04 13:46:08 -06007433 }
7434 signaled_semaphores_.Import(sig_sem.first, std::move(sig_sem.second));
John Zulauf697c0e12022-04-19 16:31:12 -06007435 }
John Zulaufcb7e1672022-05-04 13:46:08 -06007436 cmd_state->signaled.Reset();
John Zulauf697c0e12022-04-19 16:31:12 -06007437
John Zulaufcb7e1672022-05-04 13:46:08 -06007438 // Update the queue to point to the last batch from the submit
7439 if (cmd_state->last_batch) {
John Zulaufe0757ba2022-06-10 16:51:45 -06007440 // Clean up the events data in the previous last batch on queue, as only the subsequent batches have valid use for them
7441 // and the QueueBatchContext::Setup calls have be copying them along from batch to batch during submit.
7442 auto last_batch = queue_state->LastBatch();
7443 if (last_batch) {
7444 last_batch->ResetEventsContext();
7445 }
John Zulauf8a7b03d2022-09-20 11:41:19 -06007446 cmd_state->last_batch->Trim();
John Zulaufcb7e1672022-05-04 13:46:08 -06007447 queue_state->SetLastBatch(std::move(cmd_state->last_batch));
John Zulauf697c0e12022-04-19 16:31:12 -06007448 }
7449
John Zulauf3da08bb2022-08-01 17:56:56 -06007450 ResourceUsageRange fence_tag_range = ReserveGlobalTagRange(1U);
7451 UpdateFenceWaitInfo(fence, queue_state->GetQueueId(), fence_tag_range.begin);
John Zulaufbbda4572022-04-19 16:20:45 -06007452}
7453
7454bool SyncValidator::PreCallValidateQueueSubmit2KHR(VkQueue queue, uint32_t submitCount, const VkSubmitInfo2KHR *pSubmits,
7455 VkFence fence) const {
John Zulauf3298da92022-09-01 13:58:39 -06007456 return ValidateQueueSubmit(queue, submitCount, pSubmits, fence, "vkQueueSubmit2KHR");
John Zulaufa8700a52022-08-18 16:22:08 -06007457}
7458bool SyncValidator::PreCallValidateQueueSubmit2(VkQueue queue, uint32_t submitCount, const VkSubmitInfo2KHR *pSubmits,
7459 VkFence fence) const {
John Zulauf3298da92022-09-01 13:58:39 -06007460 return ValidateQueueSubmit(queue, submitCount, pSubmits, fence, "vkQueueSubmit2");
John Zulaufbbda4572022-04-19 16:20:45 -06007461}
7462
7463void SyncValidator::PostCallRecordQueueSubmit2KHR(VkQueue queue, uint32_t submitCount, const VkSubmitInfo2KHR *pSubmits,
7464 VkFence fence, VkResult result) {
7465 StateTracker::PostCallRecordQueueSubmit2KHR(queue, submitCount, pSubmits, fence, result);
John Zulaufa8700a52022-08-18 16:22:08 -06007466 RecordQueueSubmit(queue, fence, result);
7467}
7468void SyncValidator::PostCallRecordQueueSubmit2(VkQueue queue, uint32_t submitCount, const VkSubmitInfo2KHR *pSubmits, VkFence fence,
7469 VkResult result) {
7470 StateTracker::PostCallRecordQueueSubmit2(queue, submitCount, pSubmits, fence, result);
7471 RecordQueueSubmit(queue, fence, result);
John Zulaufbbda4572022-04-19 16:20:45 -06007472}
7473
John Zulauf3da08bb2022-08-01 17:56:56 -06007474void SyncValidator::PostCallRecordGetFenceStatus(VkDevice device, VkFence fence, VkResult result) {
7475 StateTracker::PostCallRecordGetFenceStatus(device, fence, result);
7476 if (!enabled[sync_validation_queue_submit]) return;
7477 if (result == VK_SUCCESS) {
7478 // fence is signalled, mark it as waited for
7479 WaitForFence(fence);
7480 }
7481}
7482
7483void SyncValidator::PostCallRecordWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll,
7484 uint64_t timeout, VkResult result) {
7485 StateTracker::PostCallRecordWaitForFences(device, fenceCount, pFences, waitAll, timeout, result);
7486 if (!enabled[sync_validation_queue_submit]) return;
7487 if ((result == VK_SUCCESS) && ((VK_TRUE == waitAll) || (1 == fenceCount))) {
7488 // We can only know the pFences have signal if we waited for all of them, or there was only one of them
7489 for (uint32_t i = 0; i < fenceCount; i++) {
7490 WaitForFence(pFences[i]);
7491 }
7492 }
7493}
7494
John Zulaufd0ec59f2021-03-13 14:25:08 -07007495AttachmentViewGen::AttachmentViewGen(const IMAGE_VIEW_STATE *view, const VkOffset3D &offset, const VkExtent3D &extent)
7496 : view_(view), view_mask_(), gen_store_() {
7497 if (!view_ || !view_->image_state || !SimpleBinding(*view_->image_state)) return;
7498 const IMAGE_STATE &image_state = *view_->image_state.get();
7499 const auto base_address = ResourceBaseAddress(image_state);
7500 const auto *encoder = image_state.fragment_encoder.get();
7501 if (!encoder) return;
Jeremy Gebben11a68a32021-07-29 11:59:22 -06007502 // Get offset and extent for the view, accounting for possible depth slicing
7503 const VkOffset3D zero_offset = view->GetOffset();
7504 const VkExtent3D &image_extent = view->GetExtent();
John Zulaufd0ec59f2021-03-13 14:25:08 -07007505 // Intentional copy
7506 VkImageSubresourceRange subres_range = view_->normalized_subresource_range;
7507 view_mask_ = subres_range.aspectMask;
Aitor Camachoe67f2c72022-06-08 14:41:58 +02007508 gen_store_[Gen::kViewSubresource].emplace(*encoder, subres_range, zero_offset, image_extent, base_address,
7509 view->IsDepthSliced());
7510 gen_store_[Gen::kRenderArea].emplace(*encoder, subres_range, offset, extent, base_address, view->IsDepthSliced());
John Zulaufd0ec59f2021-03-13 14:25:08 -07007511
7512 const auto depth = view_mask_ & VK_IMAGE_ASPECT_DEPTH_BIT;
7513 if (depth && (depth != view_mask_)) {
7514 subres_range.aspectMask = depth;
Aitor Camachoe67f2c72022-06-08 14:41:58 +02007515 gen_store_[Gen::kDepthOnlyRenderArea].emplace(*encoder, subres_range, offset, extent, base_address, view->IsDepthSliced());
John Zulaufd0ec59f2021-03-13 14:25:08 -07007516 }
7517 const auto stencil = view_mask_ & VK_IMAGE_ASPECT_STENCIL_BIT;
7518 if (stencil && (stencil != view_mask_)) {
7519 subres_range.aspectMask = stencil;
Aitor Camachoe67f2c72022-06-08 14:41:58 +02007520 gen_store_[Gen::kStencilOnlyRenderArea].emplace(*encoder, subres_range, offset, extent, base_address,
7521 view->IsDepthSliced());
John Zulaufd0ec59f2021-03-13 14:25:08 -07007522 }
7523}
7524
7525const ImageRangeGen *AttachmentViewGen::GetRangeGen(AttachmentViewGen::Gen gen_type) const {
7526 const ImageRangeGen *got = nullptr;
7527 switch (gen_type) {
7528 case kViewSubresource:
7529 got = &gen_store_[kViewSubresource];
7530 break;
7531 case kRenderArea:
7532 got = &gen_store_[kRenderArea];
7533 break;
7534 case kDepthOnlyRenderArea:
7535 got =
7536 (view_mask_ == VK_IMAGE_ASPECT_DEPTH_BIT) ? &gen_store_[Gen::kRenderArea] : &gen_store_[Gen::kDepthOnlyRenderArea];
7537 break;
7538 case kStencilOnlyRenderArea:
7539 got = (view_mask_ == VK_IMAGE_ASPECT_STENCIL_BIT) ? &gen_store_[Gen::kRenderArea]
7540 : &gen_store_[Gen::kStencilOnlyRenderArea];
7541 break;
7542 default:
7543 assert(got);
7544 }
7545 return got;
7546}
7547
7548AttachmentViewGen::Gen AttachmentViewGen::GetDepthStencilRenderAreaGenType(bool depth_op, bool stencil_op) const {
7549 assert(IsValid());
7550 assert(view_mask_ & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT));
7551 if (depth_op) {
7552 assert(view_mask_ & VK_IMAGE_ASPECT_DEPTH_BIT);
7553 if (stencil_op) {
7554 assert(view_mask_ & VK_IMAGE_ASPECT_STENCIL_BIT);
7555 return kRenderArea;
7556 }
7557 return kDepthOnlyRenderArea;
7558 }
7559 if (stencil_op) {
7560 assert(view_mask_ & VK_IMAGE_ASPECT_STENCIL_BIT);
7561 return kStencilOnlyRenderArea;
7562 }
7563
7564 assert(depth_op || stencil_op);
7565 return kRenderArea;
7566}
7567
7568AccessAddressType AttachmentViewGen::GetAddressType() const { return AccessContext::ImageAddressType(*view_->image_state); }
John Zulauf8eda1562021-04-13 17:06:41 -06007569
John Zulaufe0757ba2022-06-10 16:51:45 -06007570void SyncEventsContext::ApplyBarrier(const SyncExecScope &src, const SyncExecScope &dst, ResourceUsageTag tag) {
John Zulauf8eda1562021-04-13 17:06:41 -06007571 const bool all_commands_bit = 0 != (src.mask_param & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
7572 for (auto &event_pair : map_) {
7573 assert(event_pair.second); // Shouldn't be storing empty
7574 auto &sync_event = *event_pair.second;
7575 // Events don't happen at a stage, so we need to check and store the unexpanded ALL_COMMANDS if set for inter-event-calls
John Zulaufe0757ba2022-06-10 16:51:45 -06007576 // But only if occuring before the tag
7577 if (((sync_event.barriers & src.exec_scope) || all_commands_bit) && (sync_event.last_command_tag <= tag)) {
John Zulauf8eda1562021-04-13 17:06:41 -06007578 sync_event.barriers |= dst.exec_scope;
7579 sync_event.barriers |= dst.mask_param & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
7580 }
7581 }
7582}
John Zulaufbb890452021-12-14 11:30:18 -07007583
John Zulaufe0757ba2022-06-10 16:51:45 -06007584void SyncEventsContext::ApplyTaggedWait(VkQueueFlags queue_flags, ResourceUsageTag tag) {
7585 const SyncExecScope src_scope =
7586 SyncExecScope::MakeSrc(queue_flags, VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_2_HOST_BIT);
7587 const SyncExecScope dst_scope = SyncExecScope::MakeDst(queue_flags, VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT);
7588 ApplyBarrier(src_scope, dst_scope, tag);
7589}
7590
7591SyncEventsContext &SyncEventsContext::DeepCopy(const SyncEventsContext &from) {
7592 // We need a deep copy of the const context to update during validation phase
7593 for (const auto &event : from.map_) {
7594 map_.emplace(event.first, std::make_shared<SyncEventState>(*event.second));
7595 }
7596 return *this;
7597}
7598
John Zulauf8a7b03d2022-09-20 11:41:19 -06007599void SyncEventsContext::AddReferencedTags(ResourceUsageTagSet &referenced) const {
7600 for (const auto &event : map_) {
7601 const std::shared_ptr<const SyncEventState> &event_state = event.second;
7602 if (event_state) {
7603 event_state->AddReferencedTags(referenced);
7604 }
7605 }
7606}
7607
7608QueueBatchContext::QueueBatchContext(const SyncValidator &sync_state, const QueueSyncState &queue_state, uint64_t submit_index,
7609 uint32_t batch_index)
John Zulaufdab327f2022-07-08 12:02:05 -06007610 : CommandExecutionContext(&sync_state),
7611 queue_state_(&queue_state),
7612 tag_range_(0, 0),
7613 current_access_context_(&access_context_),
John Zulauf8a7b03d2022-09-20 11:41:19 -06007614 batch_log_(),
7615 batch_(queue_state, submit_index, batch_index) {}
7616
7617void QueueBatchContext::Trim() {
7618 // Clean up unneeded access context contents and log information
7619 access_context_.Trim();
7620
7621 ResourceUsageTagSet used_tags;
7622 access_context_.AddReferencedTags(used_tags);
7623
7624 // Note: AccessContexts in the SyncEventsState are trimmed when created.
7625 events_context_.AddReferencedTags(used_tags);
7626
7627 // Only conserve AccessLog references that are referenced by used_tags
7628 batch_log_.Trim(used_tags);
7629}
John Zulaufcb7e1672022-05-04 13:46:08 -06007630
John Zulauf1d5f9c12022-05-13 14:51:08 -06007631void QueueBatchContext::ResolveSubmittedCommandBuffer(const AccessContext &recorded_context, ResourceUsageTag offset) {
John Zulaufe0757ba2022-06-10 16:51:45 -06007632 GetCurrentAccessContext()->ResolveFromContext(QueueTagOffsetBarrierAction(GetQueueId(), offset), recorded_context);
John Zulauf1d5f9c12022-05-13 14:51:08 -06007633}
John Zulauf697c0e12022-04-19 16:31:12 -06007634
7635VulkanTypedHandle QueueBatchContext::Handle() const { return queue_state_->Handle(); }
7636
John Zulauf1d5f9c12022-05-13 14:51:08 -06007637void QueueBatchContext::ApplyTaggedWait(QueueId queue_id, ResourceUsageTag tag) {
John Zulauf3da08bb2022-08-01 17:56:56 -06007638 ResourceAccessState::QueueTagPredicate predicate{queue_id, tag};
7639 access_context_.EraseIf([&predicate](ResourceAccessRangeMap::value_type &access) {
7640 // Apply..Wait returns true if the waited access is empty...
7641 return access.second.ApplyQueueTagWait(predicate);
7642 });
John Zulaufe0757ba2022-06-10 16:51:45 -06007643
7644 if (queue_id == GetQueueId()) {
7645 events_context_.ApplyTaggedWait(GetQueueFlags(), tag);
7646 }
John Zulauf1d5f9c12022-05-13 14:51:08 -06007647}
7648
7649// Clear all accesses
John Zulaufe0757ba2022-06-10 16:51:45 -06007650void QueueBatchContext::ApplyDeviceWait() {
7651 access_context_.Reset();
7652 events_context_.ApplyTaggedWait(GetQueueFlags(), ResourceUsageRecord::kMaxIndex);
7653}
John Zulauf1d5f9c12022-05-13 14:51:08 -06007654
John Zulaufdab327f2022-07-08 12:02:05 -06007655HazardResult QueueBatchContext::DetectFirstUseHazard(const ResourceUsageRange &tag_range) {
7656 // Queue batch handling requires dealing with renderpass state and picking the correct access context
7657 if (rp_replay_) {
7658 return rp_replay_.replay_context->DetectFirstUseHazard(GetQueueId(), tag_range, *current_access_context_);
7659 }
7660 return current_replay_->GetCurrentAccessContext()->DetectFirstUseHazard(GetQueueId(), tag_range, access_context_);
7661}
7662
7663void QueueBatchContext::BeginRenderPassReplay(const SyncOpBeginRenderPass &begin_op, const ResourceUsageTag tag) {
7664 current_access_context_ = rp_replay_.Begin(GetQueueFlags(), begin_op, access_context_);
7665 current_access_context_->ResolvePreviousAccesses();
7666}
7667
7668void QueueBatchContext::NextSubpassReplay() {
7669 current_access_context_ = rp_replay_.Next();
7670 current_access_context_->ResolvePreviousAccesses();
7671}
7672
7673void QueueBatchContext::EndRenderPassReplay() {
7674 rp_replay_.End(access_context_);
7675 current_access_context_ = &access_context_;
7676}
7677
7678AccessContext *QueueBatchContext::RenderPassReplayState::Begin(VkQueueFlags queue_flags, const SyncOpBeginRenderPass &begin_op_,
7679 const AccessContext &external_context) {
7680 Reset();
7681
7682 begin_op = &begin_op_;
7683 subpass = 0;
7684
7685 const RenderPassAccessContext *rp_context = begin_op->GetRenderPassAccessContext();
7686 assert(rp_context);
7687 replay_context = &rp_context->GetContexts()[0];
7688
7689 InitSubpassContexts(queue_flags, *rp_context->GetRenderPassState(), &external_context, subpass_contexts);
7690 return &subpass_contexts[0];
7691}
7692
7693AccessContext *QueueBatchContext::RenderPassReplayState::Next() {
7694 subpass++;
7695
7696 const RenderPassAccessContext *rp_context = begin_op->GetRenderPassAccessContext();
7697
7698 replay_context = &rp_context->GetContexts()[subpass];
7699 return &subpass_contexts[subpass];
7700}
7701
7702void QueueBatchContext::RenderPassReplayState::End(AccessContext &external_context) {
7703 external_context.ResolveChildContexts(subpass_contexts);
7704 Reset();
7705}
7706
John Zulaufecf4ac52022-06-06 10:08:42 -06007707class ApplySemaphoreBarrierAction {
7708 public:
7709 ApplySemaphoreBarrierAction(const SemaphoreScope &signal, const SemaphoreScope &wait) : signal_(signal), wait_(wait) {}
7710 void operator()(ResourceAccessState *access) const { access->ApplySemaphore(signal_, wait_); }
7711
7712 private:
7713 const SemaphoreScope &signal_;
7714 const SemaphoreScope wait_;
7715};
7716
7717std::shared_ptr<QueueBatchContext> QueueBatchContext::ResolveOneWaitSemaphore(VkSemaphore sem, VkPipelineStageFlags2 wait_mask,
7718 SignaledSemaphores &signaled) {
John Zulaufcb7e1672022-05-04 13:46:08 -06007719 auto sem_state = sync_state_->Get<SEMAPHORE_STATE>(sem);
John Zulaufecf4ac52022-06-06 10:08:42 -06007720 if (!sem_state) return nullptr; // Semaphore validity is handled by CoreChecks
John Zulauf697c0e12022-04-19 16:31:12 -06007721
John Zulaufcb7e1672022-05-04 13:46:08 -06007722 // When signal state goes out of scope, the signal information will be dropped, as Unsignal has released ownership.
7723 auto signal_state = signaled.Unsignal(sem);
John Zulaufecf4ac52022-06-06 10:08:42 -06007724 if (!signal_state) return nullptr; // Invalid signal, skip it.
John Zulaufcb7e1672022-05-04 13:46:08 -06007725
John Zulaufecf4ac52022-06-06 10:08:42 -06007726 assert(signal_state->batch);
John Zulauf697c0e12022-04-19 16:31:12 -06007727
John Zulaufecf4ac52022-06-06 10:08:42 -06007728 const SemaphoreScope &signal_scope = signal_state->first_scope;
John Zulauf697c0e12022-04-19 16:31:12 -06007729 const auto queue_flags = queue_state_->GetQueueFlags();
John Zulaufecf4ac52022-06-06 10:08:42 -06007730 SemaphoreScope wait_scope{GetQueueId(), SyncExecScope::MakeDst(queue_flags, wait_mask)};
7731 if (signal_scope.queue == wait_scope.queue) {
7732 // If signal queue == wait queue, signal is treated as a memory barrier with an access scope equal to the
7733 // valid accesses for the sync scope.
7734 SyncBarrier sem_barrier(signal_scope, wait_scope, SyncBarrier::AllAccess());
7735 const BatchBarrierOp sem_barrier_op(wait_scope.queue, sem_barrier);
7736 access_context_.ResolveFromContext(sem_barrier_op, signal_state->batch->access_context_);
John Zulaufe0757ba2022-06-10 16:51:45 -06007737 events_context_.ApplyBarrier(sem_barrier.src_exec_scope, sem_barrier.dst_exec_scope, ResourceUsageRecord::kMaxIndex);
John Zulaufecf4ac52022-06-06 10:08:42 -06007738 } else {
7739 ApplySemaphoreBarrierAction sem_op(signal_scope, wait_scope);
7740 access_context_.ResolveFromContext(sem_op, signal_state->batch->access_context_);
John Zulauf697c0e12022-04-19 16:31:12 -06007741 }
John Zulaufecf4ac52022-06-06 10:08:42 -06007742 // Cannot move from the signal state because it could be from the const global state, and C++ doesn't
7743 // enforce deep constness.
7744 return signal_state->batch;
John Zulauf697c0e12022-04-19 16:31:12 -06007745}
7746
John Zulaufa8700a52022-08-18 16:22:08 -06007747void QueueBatchContext::SetupAccessContext(const std::shared_ptr<const QueueBatchContext> &prev, const VkSubmitInfo2 &submit_info,
John Zulaufcb7e1672022-05-04 13:46:08 -06007748 SignaledSemaphores &signaled) {
John Zulaufe0757ba2022-06-10 16:51:45 -06007749 // Copy in the event state from the previous batch (on this queue)
7750 if (prev) {
7751 events_context_.DeepCopy(prev->events_context_);
7752 }
7753
John Zulaufecf4ac52022-06-06 10:08:42 -06007754 // Import (resolve) the batches that are waited on, with the semaphore's effective barriers applied
7755 layer_data::unordered_set<std::shared_ptr<const QueueBatchContext>> batches_resolved;
John Zulaufa8700a52022-08-18 16:22:08 -06007756 const uint32_t wait_count = submit_info.waitSemaphoreInfoCount;
7757 const VkSemaphoreSubmitInfo *wait_infos = submit_info.pWaitSemaphoreInfos;
7758 for (const auto &wait_info : layer_data::make_span(wait_infos, wait_count)) {
7759 std::shared_ptr<QueueBatchContext> resolved = ResolveOneWaitSemaphore(wait_info.semaphore, wait_info.stageMask, signaled);
John Zulaufecf4ac52022-06-06 10:08:42 -06007760 if (resolved) {
7761 batches_resolved.emplace(std::move(resolved));
7762 }
John Zulaufa8700a52022-08-18 16:22:08 -06007763 }
John Zulauf697c0e12022-04-19 16:31:12 -06007764
John Zulaufecf4ac52022-06-06 10:08:42 -06007765 // If there are no semaphores to the previous batch, make sure a "submit order" non-barriered import is done
7766 if (prev && !layer_data::Contains(batches_resolved, prev)) {
7767 access_context_.ResolveFromContext(NoopBarrierAction(), prev->access_context_);
John Zulauf8a7b03d2022-09-20 11:41:19 -06007768 batches_resolved.emplace(prev);
7769 }
7770
7771 // Get all the log information for the resolved contexts
7772 for (const auto &batch : batches_resolved) {
7773 batch_log_.Import(batch->batch_log_);
John Zulauf78cb2082022-04-20 16:37:48 -06007774 }
7775
John Zulauf697c0e12022-04-19 16:31:12 -06007776 // Gather async context information for hazard checks and conserve the QBC's for the async batches
John Zulaufecf4ac52022-06-06 10:08:42 -06007777 async_batches_ =
John Zulauf8a7b03d2022-09-20 11:41:19 -06007778 sync_state_->GetQueueLastBatchSnapshot([&batches_resolved](const std::shared_ptr<const QueueBatchContext> &batch) {
7779 return !layer_data::Contains(batches_resolved, batch);
John Zulauf697c0e12022-04-19 16:31:12 -06007780 });
7781 for (const auto &async_batch : async_batches_) {
7782 access_context_.AddAsyncContext(async_batch->GetCurrentAccessContext());
John Zulauf8a7b03d2022-09-20 11:41:19 -06007783 // We need to snapshot the async log information for async hazard reporting
7784 batch_log_.Import(async_batch->batch_log_);
John Zulauf697c0e12022-04-19 16:31:12 -06007785 }
7786}
7787
John Zulaufa8700a52022-08-18 16:22:08 -06007788void QueueBatchContext::SetupCommandBufferInfo(const VkSubmitInfo2 &submit_info) {
John Zulauf697c0e12022-04-19 16:31:12 -06007789 // Create the list of command buffers to submit
John Zulaufa8700a52022-08-18 16:22:08 -06007790 const uint32_t cb_count = submit_info.commandBufferInfoCount;
7791 const VkCommandBufferSubmitInfo *const cb_infos = submit_info.pCommandBufferInfos;
John Zulauf697c0e12022-04-19 16:31:12 -06007792 command_buffers_.reserve(cb_count);
John Zulaufa8700a52022-08-18 16:22:08 -06007793
7794 for (const auto &cb_info : layer_data::make_span(cb_infos, cb_count)) {
7795 auto cb_context = sync_state_->GetAccessContextShared(cb_info.commandBuffer);
John Zulauf697c0e12022-04-19 16:31:12 -06007796 if (cb_context) {
7797 tag_range_.end += cb_context->GetTagLimit();
John Zulaufa8700a52022-08-18 16:22:08 -06007798 command_buffers_.emplace_back(static_cast<uint32_t>(&cb_info - cb_infos), std::move(cb_context));
John Zulauf697c0e12022-04-19 16:31:12 -06007799 }
7800 }
7801}
7802
7803// Look up the usage informaiton from the local or global logger
7804std::string QueueBatchContext::FormatUsage(ResourceUsageTag tag) const {
John Zulauf697c0e12022-04-19 16:31:12 -06007805 std::stringstream out;
John Zulauf8a7b03d2022-09-20 11:41:19 -06007806 BatchAccessLog::AccessRecord access = batch_log_[tag];
John Zulauf697c0e12022-04-19 16:31:12 -06007807 if (access.IsValid()) {
John Zulauf8a7b03d2022-09-20 11:41:19 -06007808 const BatchAccessLog::BatchRecord &batch = *access.batch;
John Zulauf697c0e12022-04-19 16:31:12 -06007809 const ResourceUsageRecord &record = *access.record;
7810 // Queue and Batch information
7811 out << SyncNodeFormatter(*sync_state_, batch.queue->GetQueueState());
7812 out << ", submit: " << batch.submit_index << ", batch: " << batch.batch_index;
7813
7814 // Commandbuffer Usages Information
John Zulauf3298da92022-09-01 13:58:39 -06007815 out << ", " << record;
7816 out << ", " << SyncNodeFormatter(*sync_state_, record.cb_state);
John Zulauf697c0e12022-04-19 16:31:12 -06007817 out << ", reset_no: " << std::to_string(record.reset_count);
7818 }
7819 return out.str();
7820}
7821
7822VkQueueFlags QueueBatchContext::GetQueueFlags() const { return queue_state_->GetQueueFlags(); }
7823
John Zulauf00119522022-05-23 19:07:42 -06007824QueueId QueueBatchContext::GetQueueId() const {
7825 QueueId id = queue_state_ ? queue_state_->GetQueueId() : QueueSyncState::kQueueIdInvalid;
7826 return id;
7827}
7828
John Zulauf8a7b03d2022-09-20 11:41:19 -06007829void QueueBatchContext::SetupBatchTags() {
John Zulauf697c0e12022-04-19 16:31:12 -06007830 // Need new global tags for all accesses... the Reserve updates a mutable atomic
7831 ResourceUsageRange global_tags = sync_state_->ReserveGlobalTagRange(GetTagRange().size());
7832 SetTagBias(global_tags.begin);
John Zulauf697c0e12022-04-19 16:31:12 -06007833}
7834
7835void QueueBatchContext::InsertRecordedAccessLogEntries(const CommandBufferAccessContext &submitted_cb) {
John Zulauf8a7b03d2022-09-20 11:41:19 -06007836 const ResourceUsageTag end_tag = batch_log_.Import(batch_, submitted_cb);
7837 batch_.bias = end_tag;
7838 batch_.cb_index++;
John Zulauf697c0e12022-04-19 16:31:12 -06007839}
7840
7841void QueueBatchContext::SetTagBias(ResourceUsageTag bias) {
7842 const auto size = tag_range_.size();
7843 tag_range_.begin = bias;
7844 tag_range_.end = bias + size;
7845 access_context_.SetStartTag(bias);
John Zulauf8a7b03d2022-09-20 11:41:19 -06007846 batch_.bias = bias;
John Zulauf697c0e12022-04-19 16:31:12 -06007847}
7848
7849// Since we're updating the QueueSync state, this is Record phase and the access log needs to point to the global one
7850// Batch Contexts saved during signalling have their AccessLog reset when the pending signals are signalled.
7851// NOTE: By design, QueueBatchContexts that are neither last, nor referenced by a signal are abandoned as unowned, since
7852// the contexts Resolve all history from previous all contexts when created
7853void QueueSyncState::SetLastBatch(std::shared_ptr<QueueBatchContext> &&last) {
7854 last_batch_ = std::move(last);
John Zulauf697c0e12022-04-19 16:31:12 -06007855}
7856
7857// Note that function is const, but updates mutable submit_index to allow Validate to create correct tagging for command invocation
7858// scope state.
7859// Given that queue submits are supposed to be externally synchronized for the same queue, this should safe without being
7860// atomic... but as the ops are per submit, the performance cost is negible for the peace of mind.
7861uint64_t QueueSyncState::ReserveSubmitId() const { return submit_index_.fetch_add(1); }
7862
John Zulaufecf4ac52022-06-06 10:08:42 -06007863// This is a const method, force the returned value to be const
7864std::shared_ptr<const SignaledSemaphores::Signal> SignaledSemaphores::GetPrev(VkSemaphore sem) const {
John Zulaufcb7e1672022-05-04 13:46:08 -06007865 std::shared_ptr<Signal> prev_state;
7866 if (prev_) {
7867 prev_state = GetMapped(prev_->signaled_, sem, [&prev_state]() { return prev_state; });
7868 }
7869 return prev_state;
7870}
John Zulaufecf4ac52022-06-06 10:08:42 -06007871
7872SignaledSemaphores::Signal::Signal(const std::shared_ptr<const SEMAPHORE_STATE> &sem_state_,
7873 const std::shared_ptr<QueueBatchContext> &batch_, const SyncExecScope &exec_scope_)
7874 : sem_state(sem_state_), batch(batch_), first_scope({batch->GetQueueId(), exec_scope_}) {
7875 // Illegal to create a signal from no batch or an invalid semaphore... caller must assure validity
7876 assert(batch);
7877 assert(sem_state);
7878}
John Zulauf3da08bb2022-08-01 17:56:56 -06007879
7880FenceSyncState::FenceSyncState() : fence(), tag(kInvalidTag), queue_id(QueueSyncState::kQueueIdInvalid) {}
John Zulaufa8700a52022-08-18 16:22:08 -06007881
7882VkSemaphoreSubmitInfo SubmitInfoConverter::BatchStore::WaitSemaphore(const VkSubmitInfo &info, uint32_t index) {
7883 auto semaphore_info = lvl_init_struct<VkSemaphoreSubmitInfo>();
7884 semaphore_info.semaphore = info.pWaitSemaphores[index];
7885 semaphore_info.stageMask = info.pWaitDstStageMask[index];
7886 return semaphore_info;
7887}
7888VkCommandBufferSubmitInfo SubmitInfoConverter::BatchStore::CommandBuffer(const VkSubmitInfo &info, uint32_t index) {
7889 auto cb_info = lvl_init_struct<VkCommandBufferSubmitInfo>();
7890 cb_info.commandBuffer = info.pCommandBuffers[index];
7891 return cb_info;
7892}
7893
7894VkSemaphoreSubmitInfo SubmitInfoConverter::BatchStore::SignalSemaphore(const VkSubmitInfo &info, uint32_t index) {
7895 auto semaphore_info = lvl_init_struct<VkSemaphoreSubmitInfo>();
7896 semaphore_info.semaphore = info.pSignalSemaphores[index];
7897 semaphore_info.stageMask = VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT;
7898 return semaphore_info;
7899}
7900
7901SubmitInfoConverter::BatchStore::BatchStore(const VkSubmitInfo &info) {
7902 info2 = lvl_init_struct<VkSubmitInfo2>();
7903
7904 info2.waitSemaphoreInfoCount = info.waitSemaphoreCount;
7905 waits.reserve(info2.waitSemaphoreInfoCount);
7906 for (uint32_t i = 0; i < info2.waitSemaphoreInfoCount; ++i) {
7907 waits.emplace_back(WaitSemaphore(info, i));
7908 }
7909 info2.pWaitSemaphoreInfos = waits.data();
7910
7911 info2.commandBufferInfoCount = info.commandBufferCount;
7912 cbs.reserve(info2.commandBufferInfoCount);
7913 for (uint32_t i = 0; i < info2.commandBufferInfoCount; ++i) {
7914 cbs.emplace_back(CommandBuffer(info, i));
7915 }
7916 info2.pCommandBufferInfos = cbs.data();
7917
7918 info2.signalSemaphoreInfoCount = info.signalSemaphoreCount;
7919 signals.reserve(info2.signalSemaphoreInfoCount);
7920 for (uint32_t i = 0; i < info2.signalSemaphoreInfoCount; ++i) {
7921 signals.emplace_back(SignalSemaphore(info, i));
7922 }
7923 info2.pSignalSemaphoreInfos = signals.data();
7924}
7925
7926SubmitInfoConverter::SubmitInfoConverter(uint32_t count, const VkSubmitInfo *infos) {
7927 info_store.reserve(count);
7928 info2s.reserve(count);
7929 for (uint32_t batch = 0; batch < count; ++batch) {
7930 info_store.emplace_back(infos[batch]);
7931 info2s.emplace_back(info_store.back().info2);
7932 }
7933}
John Zulauf8a7b03d2022-09-20 11:41:19 -06007934
7935ResourceUsageTag BatchAccessLog::Import(const BatchRecord &batch, const CommandBufferAccessContext &cb_access) {
7936 ResourceUsageTag bias = batch.bias;
7937 ResourceUsageTag tag_limit = bias + cb_access.GetTagLimit();
7938 ResourceUsageRange import_range = {bias, tag_limit};
7939 log_map_.insert(std::make_pair(import_range, CBSubmitLog(batch, cb_access)));
7940 return tag_limit;
7941}
7942
7943void BatchAccessLog::Import(const BatchAccessLog &other) {
7944 for (const auto &entry : other.log_map_) {
7945 log_map_.insert(entry);
7946 }
7947}
7948
7949// Trim: Remove any unreferenced AccessLog ranges from a BatchAccessLog
7950//
7951// In order to contain memory growth in the AccessLog information regarding prior submitted command buffers,
7952// the Trim call removes any AccessLog references that do not correspond to any tags in use. The set of referenced tag, used_tags,
7953// is generated by scanning the AccessContext and EventContext of the containing QueueBatchContext.
7954//
7955// Upon return the BatchAccessLog should only contain references to the AccessLog information needed by the
7956// containing parent QueueBatchContext.
7957//
7958// The algorithm used is another example of the "parallel iteration" pattern common within SyncVal. In this case we are
7959// traversing the ordered range_map containing the AccessLog references and the ordered set of tags in use.
7960//
7961// To efficiently perform the parallel iteration, optimizations within this function include:
7962// * when ranges are detected that have no tags referenced, all ranges between the last tag and the current tag are erased
7963// * when used tags prior to the current range are found, all tags up to the current range are skipped
7964// * when a tag is found within the current range, that range is skipped (and thus kept in the map), and further used tags
7965// within the range are skipped.
7966//
7967// Note that for each subcase, any "next steps" logic is designed to be handled within the subsequent iteration -- meaning that
7968// each subcase simply handles the specifics of the current update/skip/erase action needed, and leaves the iterators in a sensible
7969// state for the top of loop... intentionally eliding special case handling.
7970void BatchAccessLog::Trim(const ResourceUsageTagSet &used_tags) {
7971 auto current_tag = used_tags.cbegin();
7972 const auto end_tag = used_tags.cend();
7973 auto current_map_range = log_map_.begin();
7974 const auto end_map = log_map_.end();
7975
7976 while (current_map_range != end_map) {
7977 if (current_tag == end_tag) {
7978 // We're out of tags, the rest of the map isn't referenced, so erase it
7979 current_map_range = log_map_.erase(current_map_range, end_map);
7980 } else {
7981 auto &range = current_map_range->first;
7982 const ResourceUsageTag tag = *current_tag;
7983 if (tag < range.begin) {
7984 // Skip to the next tag potentially in range
7985 // if this is end_tag, we'll handle that next iteration
7986 current_tag = used_tags.lower_bound(range.begin);
7987 } else if (tag >= range.end) {
7988 // This tag is beyond the current range, delete all ranges between current_map_range,
7989 // and the next that includes the tag. Next is not erased.
7990 auto next_used = log_map_.lower_bound(ResourceUsageRange(tag, tag + 1));
7991 current_map_range = log_map_.erase(current_map_range, next_used);
7992 } else {
7993 // Skip the rest of the tags in this range
7994 // If this is end, the next iteration will handle
7995 current_tag = used_tags.lower_bound(range.end);
7996
7997 // This is a range we will keep, advance to the next. Next iteration handles end condition
7998 ++current_map_range;
7999 }
8000 }
8001 }
8002}
8003
8004BatchAccessLog::AccessRecord BatchAccessLog::operator[](ResourceUsageTag tag) const {
8005 auto found_log = log_map_.find(tag);
8006 if (found_log != log_map_.cend()) {
8007 return found_log->second[tag];
8008 }
8009 assert("tag not found" == nullptr);
8010 return AccessRecord();
8011}
8012
8013BatchAccessLog::AccessRecord BatchAccessLog::CBSubmitLog::operator[](ResourceUsageTag tag) const {
8014 assert(tag >= batch_.bias);
8015 const size_t index = tag - batch_.bias;
8016 assert(log_);
8017 assert(index < log_->size());
8018 return AccessRecord{&batch_, &(*log_)[index]};
8019}