blob: 196277c70ad36fd0382758bb6b0f828d30e5245c [file] [log] [blame]
Jeremy Gebben4d51c552022-01-06 21:27:15 -07001/* Copyright (c) 2019-2022 The Khronos Group Inc.
2 * Copyright (c) 2019-2022 Valve Corporation
3 * Copyright (c) 2019-2022 LunarG, Inc.
John Zulauf9cb530d2019-09-30 14:14:10 -06004 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 * Author: John Zulauf <jzulauf@lunarg.com>
John Zulaufab7756b2020-12-29 16:10:16 -070018 * Author: Locke Lin <locke@lunarg.com>
19 * Author: Jeremy Gebben <jeremyg@lunarg.com>
John Zulauf9cb530d2019-09-30 14:14:10 -060020 */
21
22#include <limits>
23#include <vector>
locke-lunarg296a3c92020-03-25 01:04:29 -060024#include <memory>
25#include <bitset>
John Zulauf9cb530d2019-09-30 14:14:10 -060026#include "synchronization_validation.h"
Jeremy Gebben5f585ae2021-02-02 09:03:06 -070027#include "sync_utils.h"
John Zulauf9cb530d2019-09-30 14:14:10 -060028
John Zulaufea943c52022-02-22 11:05:17 -070029// Utilities to DRY up Get... calls
30template <typename Map, typename Key = typename Map::key_type, typename RetVal = layer_data::optional<typename Map::mapped_type>>
31RetVal GetMappedOptional(const Map &map, const Key &key) {
32 RetVal ret_val;
33 auto it = map.find(key);
34 if (it != map.cend()) {
35 ret_val.emplace(it->second);
36 }
37 return ret_val;
38}
39template <typename Map, typename Fn>
40typename Map::mapped_type GetMapped(const Map &map, const typename Map::key_type &key, Fn &&default_factory) {
41 auto value = GetMappedOptional(map, key);
42 return (value) ? *value : default_factory();
43}
44
45template <typename Map, typename Fn>
46typename Map::mapped_type GetMappedInsert(Map &map, const typename Map::key_type &key, Fn &&default_factory) {
47 auto value = GetMappedOptional(map, key);
48 if (value) {
49 return *value;
50 }
51 auto insert_it = map.emplace(std::make_pair(key, default_factory()));
52 assert(insert_it.second);
53
54 return insert_it.first->second;
55}
56
57template <typename Map, typename Key = typename Map::key_type, typename Mapped = typename Map::mapped_type,
58 typename Value = typename Mapped::element_type>
59Value *GetMappedPlainFromShared(const Map &map, const Key &key) {
60 auto value = GetMappedOptional<Map, Key>(map, key);
61 if (value) return value->get();
62 return nullptr;
63}
64
Jeremy Gebben6fbf8242021-06-21 09:14:46 -060065static bool SimpleBinding(const BINDABLE &bindable) { return !bindable.sparse && bindable.Binding(); }
John Zulauf264cce02021-02-05 14:40:47 -070066
John Zulauf29d00532021-03-04 13:28:54 -070067static bool SimpleBinding(const IMAGE_STATE &image_state) {
Jeremy Gebben62c3bf42021-07-21 15:38:24 -060068 bool simple =
Jeremy Gebben82e11d52021-07-26 09:19:37 -060069 SimpleBinding(static_cast<const BINDABLE &>(image_state)) || image_state.IsSwapchainImage() || image_state.bind_swapchain;
John Zulauf29d00532021-03-04 13:28:54 -070070
71 // If it's not simple we must have an encoder.
72 assert(!simple || image_state.fragment_encoder.get());
73 return simple;
74}
75
John Zulauf4fa68462021-04-26 21:04:22 -060076static const ResourceAccessRange kFullRange(std::numeric_limits<VkDeviceSize>::min(), std::numeric_limits<VkDeviceSize>::max());
77static const std::array<AccessAddressType, static_cast<size_t>(AccessAddressType::kTypeCount)> kAddressTypes = {
John Zulauf43cc7462020-12-03 12:33:12 -070078 AccessAddressType::kLinear, AccessAddressType::kIdealized};
79
John Zulaufd5115702021-01-18 12:34:33 -070080static constexpr AccessAddressType GetAccessAddressType(const BUFFER_STATE &) { return AccessAddressType::kLinear; };
John Zulauf264cce02021-02-05 14:40:47 -070081static AccessAddressType GetAccessAddressType(const IMAGE_STATE &image) {
82 return SimpleBinding(image) ? AccessContext::ImageAddressType(image) : AccessAddressType::kIdealized;
83}
John Zulaufd5115702021-01-18 12:34:33 -070084
John Zulauf9cb530d2019-09-30 14:14:10 -060085static const char *string_SyncHazardVUID(SyncHazard hazard) {
86 switch (hazard) {
87 case SyncHazard::NONE:
John Zulauf2f952d22020-02-10 11:34:51 -070088 return "SYNC-HAZARD-NONE";
John Zulauf9cb530d2019-09-30 14:14:10 -060089 break;
90 case SyncHazard::READ_AFTER_WRITE:
91 return "SYNC-HAZARD-READ_AFTER_WRITE";
92 break;
93 case SyncHazard::WRITE_AFTER_READ:
94 return "SYNC-HAZARD-WRITE_AFTER_READ";
95 break;
96 case SyncHazard::WRITE_AFTER_WRITE:
97 return "SYNC-HAZARD-WRITE_AFTER_WRITE";
98 break;
John Zulauf2f952d22020-02-10 11:34:51 -070099 case SyncHazard::READ_RACING_WRITE:
100 return "SYNC-HAZARD-READ-RACING-WRITE";
101 break;
102 case SyncHazard::WRITE_RACING_WRITE:
103 return "SYNC-HAZARD-WRITE-RACING-WRITE";
104 break;
105 case SyncHazard::WRITE_RACING_READ:
106 return "SYNC-HAZARD-WRITE-RACING-READ";
107 break;
John Zulauf9cb530d2019-09-30 14:14:10 -0600108 default:
109 assert(0);
110 }
111 return "SYNC-HAZARD-INVALID";
112}
113
John Zulauf59e25072020-07-17 10:55:21 -0600114static bool IsHazardVsRead(SyncHazard hazard) {
115 switch (hazard) {
116 case SyncHazard::NONE:
117 return false;
118 break;
119 case SyncHazard::READ_AFTER_WRITE:
120 return false;
121 break;
122 case SyncHazard::WRITE_AFTER_READ:
123 return true;
124 break;
125 case SyncHazard::WRITE_AFTER_WRITE:
126 return false;
127 break;
128 case SyncHazard::READ_RACING_WRITE:
129 return false;
130 break;
131 case SyncHazard::WRITE_RACING_WRITE:
132 return false;
133 break;
134 case SyncHazard::WRITE_RACING_READ:
135 return true;
136 break;
137 default:
138 assert(0);
139 }
140 return false;
141}
142
John Zulauf9cb530d2019-09-30 14:14:10 -0600143static const char *string_SyncHazard(SyncHazard hazard) {
144 switch (hazard) {
145 case SyncHazard::NONE:
146 return "NONR";
147 break;
148 case SyncHazard::READ_AFTER_WRITE:
149 return "READ_AFTER_WRITE";
150 break;
151 case SyncHazard::WRITE_AFTER_READ:
152 return "WRITE_AFTER_READ";
153 break;
154 case SyncHazard::WRITE_AFTER_WRITE:
155 return "WRITE_AFTER_WRITE";
156 break;
John Zulauf2f952d22020-02-10 11:34:51 -0700157 case SyncHazard::READ_RACING_WRITE:
158 return "READ_RACING_WRITE";
159 break;
160 case SyncHazard::WRITE_RACING_WRITE:
161 return "WRITE_RACING_WRITE";
162 break;
163 case SyncHazard::WRITE_RACING_READ:
164 return "WRITE_RACING_READ";
165 break;
John Zulauf9cb530d2019-09-30 14:14:10 -0600166 default:
167 assert(0);
168 }
169 return "INVALID HAZARD";
170}
171
John Zulauf37ceaed2020-07-03 16:18:15 -0600172static const SyncStageAccessInfoType *SyncStageAccessInfoFromMask(SyncStageAccessFlags flags) {
173 // Return the info for the first bit found
174 const SyncStageAccessInfoType *info = nullptr;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700175 for (size_t i = 0; i < flags.size(); i++) {
176 if (flags.test(i)) {
177 info = &syncStageAccessInfoByStageAccessIndex[i];
178 break;
John Zulauf37ceaed2020-07-03 16:18:15 -0600179 }
180 }
181 return info;
182}
183
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700184static std::string string_SyncStageAccessFlags(const SyncStageAccessFlags &flags, const char *sep = "|") {
John Zulauf59e25072020-07-17 10:55:21 -0600185 std::string out_str;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700186 if (flags.none()) {
John Zulauf389c34b2020-07-28 11:19:35 -0600187 out_str = "0";
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700188 } else {
189 for (size_t i = 0; i < syncStageAccessInfoByStageAccessIndex.size(); i++) {
190 const auto &info = syncStageAccessInfoByStageAccessIndex[i];
191 if ((flags & info.stage_access_bit).any()) {
192 if (!out_str.empty()) {
193 out_str.append(sep);
194 }
195 out_str.append(info.name);
John Zulauf59e25072020-07-17 10:55:21 -0600196 }
John Zulauf59e25072020-07-17 10:55:21 -0600197 }
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700198 if (out_str.length() == 0) {
199 out_str.append("Unhandled SyncStageAccess");
200 }
John Zulauf59e25072020-07-17 10:55:21 -0600201 }
202 return out_str;
203}
204
John Zulauf14940722021-04-12 15:19:02 -0600205static std::string string_UsageTag(const ResourceUsageRecord &tag) {
Jeremy Gebben6ea9d9e2020-12-11 09:41:01 -0700206 std::stringstream out;
207
John Zulauffaea0ee2021-01-14 14:01:32 -0700208 out << "command: " << CommandTypeString(tag.command);
209 out << ", seq_no: " << tag.seq_num;
210 if (tag.sub_command != 0) {
211 out << ", subcmd: " << tag.sub_command;
Jeremy Gebben6ea9d9e2020-12-11 09:41:01 -0700212 }
213 return out.str();
214}
John Zulauf4fa68462021-04-26 21:04:22 -0600215static std::string string_UsageIndex(SyncStageAccessIndex usage_index) {
216 const char *stage_access_name = "INVALID_STAGE_ACCESS";
217 if (usage_index < static_cast<SyncStageAccessIndex>(syncStageAccessInfoByStageAccessIndex.size())) {
218 stage_access_name = syncStageAccessInfoByStageAccessIndex[usage_index].name;
219 }
220 return std::string(stage_access_name);
221}
222
223struct NoopBarrierAction {
224 explicit NoopBarrierAction() {}
225 void operator()(ResourceAccessState *access) const {}
John Zulauf5c628d02021-05-04 15:46:36 -0600226 const bool layout_transition = false;
John Zulauf4fa68462021-04-26 21:04:22 -0600227};
228
229// NOTE: Make sure the proxy doesn't outlive from, as the proxy is pointing directly to access contexts owned by from.
230CommandBufferAccessContext::CommandBufferAccessContext(const CommandBufferAccessContext &from, AsProxyContext dummy)
231 : CommandBufferAccessContext(from.sync_state_) {
232 // Copy only the needed fields out of from for a temporary, proxy command buffer context
233 cb_state_ = from.cb_state_;
234 queue_flags_ = from.queue_flags_;
235 destroyed_ = from.destroyed_;
236 access_log_ = from.access_log_; // potentially large, but no choice given tagging lookup.
237 command_number_ = from.command_number_;
238 subcommand_number_ = from.subcommand_number_;
239 reset_count_ = from.reset_count_;
240
241 const auto *from_context = from.GetCurrentAccessContext();
242 assert(from_context);
243
244 // Construct a fully resolved single access context out of from
245 const NoopBarrierAction noop_barrier;
246 for (AccessAddressType address_type : kAddressTypes) {
247 from_context->ResolveAccessRange(address_type, kFullRange, noop_barrier,
248 &cb_access_context_.GetAccessStateMap(address_type), nullptr);
249 }
250 // The proxy has flatten the current render pass context (if any), but the async contexts are needed for hazard detection
251 cb_access_context_.ImportAsyncContexts(*from_context);
252
253 events_context_ = from.events_context_;
254
255 // We don't want to copy the full render_pass_context_ history just for the proxy.
256}
257
258std::string CommandBufferAccessContext::FormatUsage(const ResourceUsageTag tag) const {
259 std::stringstream out;
260 assert(tag < access_log_.size());
261 const auto &record = access_log_[tag];
262 out << string_UsageTag(record);
263 if (record.cb_state != cb_state_.get()) {
264 out << ", command_buffer: " << sync_state_->report_data->FormatHandle(record.cb_state->commandBuffer()).c_str();
265 if (record.cb_state->Destroyed()) {
266 out << " (destroyed)";
267 }
268
John Zulauf4fa68462021-04-26 21:04:22 -0600269 }
John Zulaufd142c9a2022-04-12 14:22:44 -0600270 out << ", reset_no: " << std::to_string(record.reset_count);
John Zulauf4fa68462021-04-26 21:04:22 -0600271 return out.str();
272}
273std::string CommandBufferAccessContext::FormatUsage(const ResourceFirstAccess &access) const {
274 std::stringstream out;
275 out << "(recorded_usage: " << string_UsageIndex(access.usage_index);
276 out << ", " << FormatUsage(access.tag) << ")";
277 return out.str();
278}
Jeremy Gebben6ea9d9e2020-12-11 09:41:01 -0700279
John Zulauffaea0ee2021-01-14 14:01:32 -0700280std::string CommandBufferAccessContext::FormatUsage(const HazardResult &hazard) const {
John Zulauf37ceaed2020-07-03 16:18:15 -0600281 const auto &tag = hazard.tag;
John Zulauf59e25072020-07-17 10:55:21 -0600282 assert(hazard.usage_index < static_cast<SyncStageAccessIndex>(syncStageAccessInfoByStageAccessIndex.size()));
283 const auto &usage_info = syncStageAccessInfoByStageAccessIndex[hazard.usage_index];
John Zulauf1dae9192020-06-16 15:46:44 -0600284 std::stringstream out;
John Zulauf37ceaed2020-07-03 16:18:15 -0600285 const auto *info = SyncStageAccessInfoFromMask(hazard.prior_access);
286 const char *stage_access_name = info ? info->name : "INVALID_STAGE_ACCESS";
John Zulauf4fa68462021-04-26 21:04:22 -0600287 out << "(";
288 if (!hazard.recorded_access.get()) {
289 // if we have a recorded usage the usage is reported from the recorded contexts point of view
290 out << "usage: " << usage_info.name << ", ";
291 }
292 out << "prior_usage: " << stage_access_name;
John Zulauf59e25072020-07-17 10:55:21 -0600293 if (IsHazardVsRead(hazard.hazard)) {
294 const auto barriers = hazard.access_state->GetReadBarriers(hazard.prior_access);
Jeremy Gebben40a22942020-12-22 14:22:06 -0700295 out << ", read_barriers: " << string_VkPipelineStageFlags2KHR(barriers);
John Zulauf59e25072020-07-17 10:55:21 -0600296 } else {
297 SyncStageAccessFlags write_barrier = hazard.access_state->GetWriteBarriers();
298 out << ", write_barriers: " << string_SyncStageAccessFlags(write_barrier);
299 }
300
ziga-lunarg0f248902022-03-24 16:42:45 +0100301 if (tag < access_log_.size()) {
302 out << ", " << FormatUsage(tag) << ")";
303 }
John Zulauf1dae9192020-06-16 15:46:44 -0600304 return out.str();
305}
306
John Zulaufd14743a2020-07-03 09:42:39 -0600307// NOTE: the attachement read flag is put *only* in the access scope and not in the exect scope, since the ordering
308// rules apply only to this specific access for this stage, and not the stage as a whole. The ordering detection
309// also reflects this special case for read hazard detection (using access instead of exec scope)
Jeremy Gebben40a22942020-12-22 14:22:06 -0700310static constexpr VkPipelineStageFlags2KHR kColorAttachmentExecScope = VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT_KHR;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700311static const SyncStageAccessFlags kColorAttachmentAccessScope =
312 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ_BIT |
313 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT |
314 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE_BIT |
315 SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT; // Note: this is intentionally not in the exec scope
Jeremy Gebben40a22942020-12-22 14:22:06 -0700316static constexpr VkPipelineStageFlags2KHR kDepthStencilAttachmentExecScope =
317 VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT_KHR | VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT_KHR;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700318static const SyncStageAccessFlags kDepthStencilAttachmentAccessScope =
319 SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
320 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
321 SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT; // Note: this is intentionally not in the exec scope
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -0700322static constexpr VkPipelineStageFlags2KHR kRasterAttachmentExecScope = kDepthStencilAttachmentExecScope | kColorAttachmentExecScope;
John Zulauf8e3c3e92021-01-06 11:19:36 -0700323static const SyncStageAccessFlags kRasterAttachmentAccessScope = kDepthStencilAttachmentAccessScope | kColorAttachmentAccessScope;
John Zulaufb027cdb2020-05-21 14:25:22 -0600324
John Zulauf8e3c3e92021-01-06 11:19:36 -0700325ResourceAccessState::OrderingBarriers ResourceAccessState::kOrderingRules = {
Jeremy Gebben40a22942020-12-22 14:22:06 -0700326 {{VK_PIPELINE_STAGE_2_NONE_KHR, SyncStageAccessFlags()},
John Zulauf8e3c3e92021-01-06 11:19:36 -0700327 {kColorAttachmentExecScope, kColorAttachmentAccessScope},
328 {kDepthStencilAttachmentExecScope, kDepthStencilAttachmentAccessScope},
329 {kRasterAttachmentExecScope, kRasterAttachmentAccessScope}}};
330
John Zulaufee984022022-04-13 16:39:50 -0600331// Sometimes we have an internal access conflict, and we using the kInvalidTag to set and detect in temporary/proxy contexts
332static const ResourceUsageTag kInvalidTag(ResourceUsageRecord::kMaxIndex);
John Zulaufb027cdb2020-05-21 14:25:22 -0600333
Jeremy Gebben62c3bf42021-07-21 15:38:24 -0600334static VkDeviceSize ResourceBaseAddress(const BINDABLE &bindable) { return bindable.GetFakeBaseAddress(); }
John Zulaufb02c1eb2020-10-06 16:33:36 -0600335
locke-lunarg3c038002020-04-30 23:08:08 -0600336inline VkDeviceSize GetRealWholeSize(VkDeviceSize offset, VkDeviceSize size, VkDeviceSize whole_size) {
337 if (size == VK_WHOLE_SIZE) {
338 return (whole_size - offset);
339 }
340 return size;
341}
342
John Zulauf3e86bf02020-09-12 10:47:57 -0600343static inline VkDeviceSize GetBufferWholeSize(const BUFFER_STATE &buf_state, VkDeviceSize offset, VkDeviceSize size) {
344 return GetRealWholeSize(offset, size, buf_state.createInfo.size);
345}
346
John Zulauf16adfc92020-04-08 10:28:33 -0600347template <typename T>
John Zulauf355e49b2020-04-24 15:11:15 -0600348static ResourceAccessRange MakeRange(const T &has_offset_and_size) {
John Zulauf16adfc92020-04-08 10:28:33 -0600349 return ResourceAccessRange(has_offset_and_size.offset, (has_offset_and_size.offset + has_offset_and_size.size));
350}
351
John Zulauf355e49b2020-04-24 15:11:15 -0600352static ResourceAccessRange MakeRange(VkDeviceSize start, VkDeviceSize size) { return ResourceAccessRange(start, (start + size)); }
John Zulauf16adfc92020-04-08 10:28:33 -0600353
John Zulauf3e86bf02020-09-12 10:47:57 -0600354static inline ResourceAccessRange MakeRange(const BUFFER_STATE &buffer, VkDeviceSize offset, VkDeviceSize size) {
355 return MakeRange(offset, GetBufferWholeSize(buffer, offset, size));
356}
357
358static inline ResourceAccessRange MakeRange(const BUFFER_VIEW_STATE &buf_view_state) {
359 return MakeRange(*buf_view_state.buffer_state.get(), buf_view_state.create_info.offset, buf_view_state.create_info.range);
360}
361
John Zulauf4a6105a2020-11-17 15:11:05 -0700362// Range generators for to allow event scope filtration to be limited to the top of the resource access traversal pipeline
363//
John Zulauf10f1f522020-12-18 12:00:35 -0700364// Note: there is no "begin/end" or reset facility. These are each written as "one time through" generators.
365//
John Zulauf4a6105a2020-11-17 15:11:05 -0700366// Usage:
367// Constructor() -- initializes the generator to point to the begin of the space declared.
368// * -- the current range of the generator empty signfies end
369// ++ -- advance to the next non-empty range (or end)
370
371// A wrapper for a single range with the same semantics as the actual generators below
372template <typename KeyType>
373class SingleRangeGenerator {
374 public:
375 SingleRangeGenerator(const KeyType &range) : current_(range) {}
John Zulaufd5115702021-01-18 12:34:33 -0700376 const KeyType &operator*() const { return current_; }
377 const KeyType *operator->() const { return &current_; }
John Zulauf4a6105a2020-11-17 15:11:05 -0700378 SingleRangeGenerator &operator++() {
379 current_ = KeyType(); // just one real range
380 return *this;
381 }
382
383 bool operator==(const SingleRangeGenerator &other) const { return current_ == other.current_; }
384
385 private:
386 SingleRangeGenerator() = default;
387 const KeyType range_;
388 KeyType current_;
389};
390
John Zulaufae842002021-04-15 18:20:55 -0600391// Generate the ranges that are the intersection of range and the entries in the RangeMap
392template <typename RangeMap, typename KeyType = typename RangeMap::key_type>
393class MapRangesRangeGenerator {
John Zulauf4a6105a2020-11-17 15:11:05 -0700394 public:
John Zulaufd5115702021-01-18 12:34:33 -0700395 // Default constructed is safe to dereference for "empty" test, but for no other operation.
John Zulaufae842002021-04-15 18:20:55 -0600396 MapRangesRangeGenerator() : range_(), map_(nullptr), map_pos_(), current_() {
John Zulaufd5115702021-01-18 12:34:33 -0700397 // Default construction for KeyType *must* be empty range
398 assert(current_.empty());
399 }
John Zulaufae842002021-04-15 18:20:55 -0600400 MapRangesRangeGenerator(const RangeMap &filter, const KeyType &range) : range_(range), map_(&filter), map_pos_(), current_() {
John Zulauf4a6105a2020-11-17 15:11:05 -0700401 SeekBegin();
402 }
John Zulaufae842002021-04-15 18:20:55 -0600403 MapRangesRangeGenerator(const MapRangesRangeGenerator &from) = default;
John Zulaufd5115702021-01-18 12:34:33 -0700404
John Zulauf4a6105a2020-11-17 15:11:05 -0700405 const KeyType &operator*() const { return current_; }
406 const KeyType *operator->() const { return &current_; }
John Zulaufae842002021-04-15 18:20:55 -0600407 MapRangesRangeGenerator &operator++() {
408 ++map_pos_;
John Zulauf4a6105a2020-11-17 15:11:05 -0700409 UpdateCurrent();
410 return *this;
411 }
412
John Zulaufae842002021-04-15 18:20:55 -0600413 bool operator==(const MapRangesRangeGenerator &other) const { return current_ == other.current_; }
John Zulauf4a6105a2020-11-17 15:11:05 -0700414
John Zulaufae842002021-04-15 18:20:55 -0600415 protected:
John Zulauf4a6105a2020-11-17 15:11:05 -0700416 void UpdateCurrent() {
John Zulaufae842002021-04-15 18:20:55 -0600417 if (map_pos_ != map_->cend()) {
418 current_ = range_ & map_pos_->first;
John Zulauf4a6105a2020-11-17 15:11:05 -0700419 } else {
420 current_ = KeyType();
421 }
422 }
423 void SeekBegin() {
John Zulaufae842002021-04-15 18:20:55 -0600424 map_pos_ = map_->lower_bound(range_);
John Zulauf4a6105a2020-11-17 15:11:05 -0700425 UpdateCurrent();
426 }
John Zulaufae842002021-04-15 18:20:55 -0600427
428 // Adding this functionality here, to avoid gratuitous Base:: qualifiers in the derived class
429 // Note: Not exposed in this classes public interface to encourage using a consistent ++/empty generator semantic
430 template <typename Pred>
431 MapRangesRangeGenerator &PredicatedIncrement(Pred &pred) {
432 do {
433 ++map_pos_;
434 } while (map_pos_ != map_->cend() && map_pos_->first.intersects(range_) && !pred(map_pos_));
435 UpdateCurrent();
436 return *this;
437 }
438
John Zulauf4a6105a2020-11-17 15:11:05 -0700439 const KeyType range_;
John Zulaufae842002021-04-15 18:20:55 -0600440 const RangeMap *map_;
441 typename RangeMap::const_iterator map_pos_;
John Zulauf4a6105a2020-11-17 15:11:05 -0700442 KeyType current_;
443};
John Zulaufd5115702021-01-18 12:34:33 -0700444using SingleAccessRangeGenerator = SingleRangeGenerator<ResourceAccessRange>;
John Zulaufae842002021-04-15 18:20:55 -0600445using EventSimpleRangeGenerator = MapRangesRangeGenerator<SyncEventState::ScopeMap>;
John Zulauf4a6105a2020-11-17 15:11:05 -0700446
John Zulaufae842002021-04-15 18:20:55 -0600447// Generate the ranges for entries meeting the predicate that are the intersection of range and the entries in the RangeMap
448template <typename RangeMap, typename Predicate, typename KeyType = typename RangeMap::key_type>
449class PredicatedMapRangesRangeGenerator : public MapRangesRangeGenerator<RangeMap, KeyType> {
450 public:
451 using Base = MapRangesRangeGenerator<RangeMap, KeyType>;
452 // Default constructed is safe to dereference for "empty" test, but for no other operation.
453 PredicatedMapRangesRangeGenerator() : Base(), pred_() {}
454 PredicatedMapRangesRangeGenerator(const RangeMap &filter, const KeyType &range, Predicate pred)
455 : Base(filter, range), pred_(pred) {}
456 PredicatedMapRangesRangeGenerator(const PredicatedMapRangesRangeGenerator &from) = default;
457
458 PredicatedMapRangesRangeGenerator &operator++() {
459 Base::PredicatedIncrement(pred_);
460 return *this;
461 }
462
463 protected:
464 Predicate pred_;
465};
John Zulauf4a6105a2020-11-17 15:11:05 -0700466
467// Generate the ranges that are the intersection of the RangeGen ranges and the entries in the FilterMap
John Zulaufae842002021-04-15 18:20:55 -0600468// Templated to allow for different Range generators or map sources...
469template <typename RangeMap, typename RangeGen, typename KeyType = typename RangeMap::key_type>
John Zulauf4a6105a2020-11-17 15:11:05 -0700470class FilteredGeneratorGenerator {
471 public:
John Zulaufd5115702021-01-18 12:34:33 -0700472 // Default constructed is safe to dereference for "empty" test, but for no other operation.
473 FilteredGeneratorGenerator() : filter_(nullptr), gen_(), filter_pos_(), current_() {
474 // Default construction for KeyType *must* be empty range
475 assert(current_.empty());
476 }
John Zulaufae842002021-04-15 18:20:55 -0600477 FilteredGeneratorGenerator(const RangeMap &filter, RangeGen &gen) : filter_(&filter), gen_(gen), filter_pos_(), current_() {
John Zulauf4a6105a2020-11-17 15:11:05 -0700478 SeekBegin();
479 }
John Zulaufd5115702021-01-18 12:34:33 -0700480 FilteredGeneratorGenerator(const FilteredGeneratorGenerator &from) = default;
John Zulauf4a6105a2020-11-17 15:11:05 -0700481 const KeyType &operator*() const { return current_; }
482 const KeyType *operator->() const { return &current_; }
483 FilteredGeneratorGenerator &operator++() {
484 KeyType gen_range = GenRange();
485 KeyType filter_range = FilterRange();
486 current_ = KeyType();
487 while (gen_range.non_empty() && filter_range.non_empty() && current_.empty()) {
488 if (gen_range.end > filter_range.end) {
489 // if the generated range is beyond the filter_range, advance the filter range
490 filter_range = AdvanceFilter();
491 } else {
492 gen_range = AdvanceGen();
493 }
494 current_ = gen_range & filter_range;
495 }
496 return *this;
497 }
498
499 bool operator==(const FilteredGeneratorGenerator &other) const { return current_ == other.current_; }
500
501 private:
502 KeyType AdvanceFilter() {
503 ++filter_pos_;
504 auto filter_range = FilterRange();
505 if (filter_range.valid()) {
506 FastForwardGen(filter_range);
507 }
508 return filter_range;
509 }
510 KeyType AdvanceGen() {
John Zulaufd5115702021-01-18 12:34:33 -0700511 ++gen_;
John Zulauf4a6105a2020-11-17 15:11:05 -0700512 auto gen_range = GenRange();
513 if (gen_range.valid()) {
514 FastForwardFilter(gen_range);
515 }
516 return gen_range;
517 }
518
519 KeyType FilterRange() const { return (filter_pos_ != filter_->cend()) ? filter_pos_->first : KeyType(); }
John Zulaufd5115702021-01-18 12:34:33 -0700520 KeyType GenRange() const { return *gen_; }
John Zulauf4a6105a2020-11-17 15:11:05 -0700521
522 KeyType FastForwardFilter(const KeyType &range) {
523 auto filter_range = FilterRange();
524 int retry_count = 0;
John Zulauf10f1f522020-12-18 12:00:35 -0700525 const static int kRetryLimit = 2; // TODO -- determine whether this limit is optimal
John Zulauf4a6105a2020-11-17 15:11:05 -0700526 while (!filter_range.empty() && (filter_range.end <= range.begin)) {
527 if (retry_count < kRetryLimit) {
528 ++filter_pos_;
529 filter_range = FilterRange();
530 retry_count++;
531 } else {
532 // Okay we've tried walking, do a seek.
533 filter_pos_ = filter_->lower_bound(range);
534 break;
535 }
536 }
537 return FilterRange();
538 }
539
540 // TODO: Consider adding "seek" (or an absolute bound "get" to range generators to make this walk
541 // faster.
542 KeyType FastForwardGen(const KeyType &range) {
543 auto gen_range = GenRange();
544 while (!gen_range.empty() && (gen_range.end <= range.begin)) {
John Zulaufd5115702021-01-18 12:34:33 -0700545 ++gen_;
John Zulauf4a6105a2020-11-17 15:11:05 -0700546 gen_range = GenRange();
547 }
548 return gen_range;
549 }
550
551 void SeekBegin() {
552 auto gen_range = GenRange();
553 if (gen_range.empty()) {
554 current_ = KeyType();
555 filter_pos_ = filter_->cend();
556 } else {
557 filter_pos_ = filter_->lower_bound(gen_range);
558 current_ = gen_range & FilterRange();
559 }
560 }
561
John Zulaufae842002021-04-15 18:20:55 -0600562 const RangeMap *filter_;
John Zulaufd5115702021-01-18 12:34:33 -0700563 RangeGen gen_;
John Zulaufae842002021-04-15 18:20:55 -0600564 typename RangeMap::const_iterator filter_pos_;
John Zulauf4a6105a2020-11-17 15:11:05 -0700565 KeyType current_;
566};
567
568using EventImageRangeGenerator = FilteredGeneratorGenerator<SyncEventState::ScopeMap, subresource_adapter::ImageRangeGenerator>;
569
John Zulauf5c5e88d2019-12-26 11:22:02 -0700570
John Zulauf3e86bf02020-09-12 10:47:57 -0600571ResourceAccessRange GetBufferRange(VkDeviceSize offset, VkDeviceSize buf_whole_size, uint32_t first_index, uint32_t count,
572 VkDeviceSize stride) {
573 VkDeviceSize range_start = offset + first_index * stride;
574 VkDeviceSize range_size = 0;
locke-lunargff255f92020-05-13 18:53:52 -0600575 if (count == UINT32_MAX) {
576 range_size = buf_whole_size - range_start;
577 } else {
578 range_size = count * stride;
579 }
John Zulauf3e86bf02020-09-12 10:47:57 -0600580 return MakeRange(range_start, range_size);
locke-lunargff255f92020-05-13 18:53:52 -0600581}
582
locke-lunarg654e3692020-06-04 17:19:15 -0600583SyncStageAccessIndex GetSyncStageAccessIndexsByDescriptorSet(VkDescriptorType descriptor_type, const interface_var &descriptor_data,
584 VkShaderStageFlagBits stage_flag) {
585 if (descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT) {
586 assert(stage_flag == VK_SHADER_STAGE_FRAGMENT_BIT);
587 return SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ;
588 }
589 auto stage_access = syncStageAccessMaskByShaderStage.find(stage_flag);
590 if (stage_access == syncStageAccessMaskByShaderStage.end()) {
591 assert(0);
592 }
593 if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER || descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
594 return stage_access->second.uniform_read;
595 }
596
597 // If the desriptorSet is writable, we don't need to care SHADER_READ. SHADER_WRITE is enough.
598 // Because if write hazard happens, read hazard might or might not happen.
599 // But if write hazard doesn't happen, read hazard is impossible to happen.
600 if (descriptor_data.is_writable) {
Jeremy Gebben40a22942020-12-22 14:22:06 -0700601 return stage_access->second.storage_write;
locke-lunarg654e3692020-06-04 17:19:15 -0600602 }
Jeremy Gebben40a22942020-12-22 14:22:06 -0700603 // TODO: sampled_read
604 return stage_access->second.storage_read;
locke-lunarg654e3692020-06-04 17:19:15 -0600605}
606
locke-lunarg37047832020-06-12 13:44:45 -0600607bool IsImageLayoutDepthWritable(VkImageLayout image_layout) {
608 return (image_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
609 image_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL ||
610 image_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL)
611 ? true
612 : false;
613}
614
615bool IsImageLayoutStencilWritable(VkImageLayout image_layout) {
616 return (image_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
617 image_layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL ||
618 image_layout == VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL)
619 ? true
620 : false;
621}
622
John Zulauf355e49b2020-04-24 15:11:15 -0600623// Class AccessContext stores the state of accesses specific to a Command, Subpass, or Queue
John Zulaufb02c1eb2020-10-06 16:33:36 -0600624template <typename Action>
625static void ApplyOverImageRange(const IMAGE_STATE &image_state, const VkImageSubresourceRange &subresource_range_arg,
626 Action &action) {
627 // At this point the "apply over range" logic only supports a single memory binding
628 if (!SimpleBinding(image_state)) return;
629 auto subresource_range = NormalizeSubresourceRange(image_state.createInfo, subresource_range_arg);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600630 const auto base_address = ResourceBaseAddress(image_state);
John Zulauf150e5332020-12-03 08:52:52 -0700631 subresource_adapter::ImageRangeGenerator range_gen(*image_state.fragment_encoder.get(), subresource_range, {0, 0, 0},
632 image_state.createInfo.extent, base_address);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600633 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf150e5332020-12-03 08:52:52 -0700634 action(*range_gen);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600635 }
636}
637
John Zulauf7635de32020-05-29 17:14:15 -0600638// Tranverse the attachment resolves for this a specific subpass, and do action() to them.
639// Used by both validation and record operations
640//
641// The signature for Action() reflect the needs of both uses.
642template <typename Action>
John Zulaufd0ec59f2021-03-13 14:25:08 -0700643void ResolveOperation(Action &action, const RENDER_PASS_STATE &rp_state, const AttachmentViewGenVector &attachment_views,
644 uint32_t subpass) {
John Zulauf7635de32020-05-29 17:14:15 -0600645 const auto &rp_ci = rp_state.createInfo;
646 const auto *attachment_ci = rp_ci.pAttachments;
647 const auto &subpass_ci = rp_ci.pSubpasses[subpass];
648
649 // Color resolves -- require an inuse color attachment and a matching inuse resolve attachment
650 const auto *color_attachments = subpass_ci.pColorAttachments;
651 const auto *color_resolve = subpass_ci.pResolveAttachments;
652 if (color_resolve && color_attachments) {
653 for (uint32_t i = 0; i < subpass_ci.colorAttachmentCount; i++) {
654 const auto &color_attach = color_attachments[i].attachment;
655 const auto &resolve_attach = subpass_ci.pResolveAttachments[i].attachment;
656 if ((color_attach != VK_ATTACHMENT_UNUSED) && (resolve_attach != VK_ATTACHMENT_UNUSED)) {
657 action("color", "resolve read", color_attach, resolve_attach, attachment_views[color_attach],
John Zulaufd0ec59f2021-03-13 14:25:08 -0700658 AttachmentViewGen::Gen::kRenderArea, SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ,
659 SyncOrdering::kColorAttachment);
John Zulauf7635de32020-05-29 17:14:15 -0600660 action("color", "resolve write", color_attach, resolve_attach, attachment_views[resolve_attach],
John Zulaufd0ec59f2021-03-13 14:25:08 -0700661 AttachmentViewGen::Gen::kRenderArea, SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE,
662 SyncOrdering::kColorAttachment);
John Zulauf7635de32020-05-29 17:14:15 -0600663 }
664 }
665 }
666
667 // Depth stencil resolve only if the extension is present
Mark Lobodzinski1f887d32020-12-30 15:31:33 -0700668 const auto ds_resolve = LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(subpass_ci.pNext);
John Zulauf7635de32020-05-29 17:14:15 -0600669 if (ds_resolve && ds_resolve->pDepthStencilResolveAttachment &&
670 (ds_resolve->pDepthStencilResolveAttachment->attachment != VK_ATTACHMENT_UNUSED) && subpass_ci.pDepthStencilAttachment &&
671 (subpass_ci.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
672 const auto src_at = subpass_ci.pDepthStencilAttachment->attachment;
673 const auto src_ci = attachment_ci[src_at];
674 // The formats are required to match so we can pick either
675 const bool resolve_depth = (ds_resolve->depthResolveMode != VK_RESOLVE_MODE_NONE) && FormatHasDepth(src_ci.format);
676 const bool resolve_stencil = (ds_resolve->stencilResolveMode != VK_RESOLVE_MODE_NONE) && FormatHasStencil(src_ci.format);
677 const auto dst_at = ds_resolve->pDepthStencilResolveAttachment->attachment;
John Zulauf7635de32020-05-29 17:14:15 -0600678
679 // Figure out which aspects are actually touched during resolve operations
680 const char *aspect_string = nullptr;
John Zulaufd0ec59f2021-03-13 14:25:08 -0700681 AttachmentViewGen::Gen gen_type = AttachmentViewGen::Gen::kRenderArea;
John Zulauf7635de32020-05-29 17:14:15 -0600682 if (resolve_depth && resolve_stencil) {
John Zulauf7635de32020-05-29 17:14:15 -0600683 aspect_string = "depth/stencil";
684 } else if (resolve_depth) {
685 // Validate depth only
John Zulaufd0ec59f2021-03-13 14:25:08 -0700686 gen_type = AttachmentViewGen::Gen::kDepthOnlyRenderArea;
John Zulauf7635de32020-05-29 17:14:15 -0600687 aspect_string = "depth";
688 } else if (resolve_stencil) {
689 // Validate all stencil only
John Zulaufd0ec59f2021-03-13 14:25:08 -0700690 gen_type = AttachmentViewGen::Gen::kStencilOnlyRenderArea;
John Zulauf7635de32020-05-29 17:14:15 -0600691 aspect_string = "stencil";
692 }
693
John Zulaufd0ec59f2021-03-13 14:25:08 -0700694 if (aspect_string) {
695 action(aspect_string, "resolve read", src_at, dst_at, attachment_views[src_at], gen_type,
696 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ, SyncOrdering::kRaster);
697 action(aspect_string, "resolve write", src_at, dst_at, attachment_views[dst_at], gen_type,
698 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kRaster);
John Zulauf7635de32020-05-29 17:14:15 -0600699 }
700 }
701}
702
703// Action for validating resolve operations
704class ValidateResolveAction {
705 public:
John Zulauffaea0ee2021-01-14 14:01:32 -0700706 ValidateResolveAction(VkRenderPass render_pass, uint32_t subpass, const AccessContext &context,
John Zulaufbb890452021-12-14 11:30:18 -0700707 const CommandExecutionContext &exec_context, const char *func_name)
John Zulauf7635de32020-05-29 17:14:15 -0600708 : render_pass_(render_pass),
709 subpass_(subpass),
710 context_(context),
John Zulaufbb890452021-12-14 11:30:18 -0700711 exec_context_(exec_context),
John Zulauf7635de32020-05-29 17:14:15 -0600712 func_name_(func_name),
713 skip_(false) {}
714 void operator()(const char *aspect_name, const char *attachment_name, uint32_t src_at, uint32_t dst_at,
John Zulaufd0ec59f2021-03-13 14:25:08 -0700715 const AttachmentViewGen &view_gen, AttachmentViewGen::Gen gen_type, SyncStageAccessIndex current_usage,
716 SyncOrdering ordering_rule) {
John Zulauf7635de32020-05-29 17:14:15 -0600717 HazardResult hazard;
John Zulaufd0ec59f2021-03-13 14:25:08 -0700718 hazard = context_.DetectHazard(view_gen, gen_type, current_usage, ordering_rule);
John Zulauf7635de32020-05-29 17:14:15 -0600719 if (hazard.hazard) {
John Zulauffaea0ee2021-01-14 14:01:32 -0700720 skip_ |=
John Zulaufbb890452021-12-14 11:30:18 -0700721 exec_context_.GetSyncState().LogError(render_pass_, string_SyncHazardVUID(hazard.hazard),
722 "%s: Hazard %s in subpass %" PRIu32 "during %s %s, from attachment %" PRIu32
723 " to resolve attachment %" PRIu32 ". Access info %s.",
724 func_name_, string_SyncHazard(hazard.hazard), subpass_, aspect_name,
725 attachment_name, src_at, dst_at, exec_context_.FormatUsage(hazard).c_str());
John Zulauf7635de32020-05-29 17:14:15 -0600726 }
727 }
728 // Providing a mechanism for the constructing caller to get the result of the validation
729 bool GetSkip() const { return skip_; }
730
731 private:
732 VkRenderPass render_pass_;
733 const uint32_t subpass_;
734 const AccessContext &context_;
John Zulaufbb890452021-12-14 11:30:18 -0700735 const CommandExecutionContext &exec_context_;
John Zulauf7635de32020-05-29 17:14:15 -0600736 const char *func_name_;
737 bool skip_;
738};
739
740// Update action for resolve operations
741class UpdateStateResolveAction {
742 public:
John Zulauf14940722021-04-12 15:19:02 -0600743 UpdateStateResolveAction(AccessContext &context, ResourceUsageTag tag) : context_(context), tag_(tag) {}
John Zulaufd0ec59f2021-03-13 14:25:08 -0700744 void operator()(const char *, const char *, uint32_t, uint32_t, const AttachmentViewGen &view_gen,
745 AttachmentViewGen::Gen gen_type, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule) {
John Zulauf7635de32020-05-29 17:14:15 -0600746 // Ignores validation only arguments...
John Zulaufd0ec59f2021-03-13 14:25:08 -0700747 context_.UpdateAccessState(view_gen, gen_type, current_usage, ordering_rule, tag_);
John Zulauf7635de32020-05-29 17:14:15 -0600748 }
749
750 private:
751 AccessContext &context_;
John Zulauf14940722021-04-12 15:19:02 -0600752 const ResourceUsageTag tag_;
John Zulauf7635de32020-05-29 17:14:15 -0600753};
754
John Zulauf59e25072020-07-17 10:55:21 -0600755void HazardResult::Set(const ResourceAccessState *access_state_, SyncStageAccessIndex usage_index_, SyncHazard hazard_,
John Zulauf14940722021-04-12 15:19:02 -0600756 const SyncStageAccessFlags &prior_, const ResourceUsageTag tag_) {
John Zulauf4fa68462021-04-26 21:04:22 -0600757 access_state = layer_data::make_unique<const ResourceAccessState>(*access_state_);
John Zulauf59e25072020-07-17 10:55:21 -0600758 usage_index = usage_index_;
759 hazard = hazard_;
760 prior_access = prior_;
761 tag = tag_;
762}
763
John Zulauf4fa68462021-04-26 21:04:22 -0600764void HazardResult::AddRecordedAccess(const ResourceFirstAccess &first_access) {
765 recorded_access = layer_data::make_unique<const ResourceFirstAccess>(first_access);
766}
767
John Zulauf540266b2020-04-06 18:54:53 -0600768AccessContext::AccessContext(uint32_t subpass, VkQueueFlags queue_flags,
769 const std::vector<SubpassDependencyGraphNode> &dependencies,
John Zulauf1a224292020-06-30 14:52:13 -0600770 const std::vector<AccessContext> &contexts, const AccessContext *external_context) {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600771 Reset();
772 const auto &subpass_dep = dependencies[subpass];
John Zulauf22aefed2021-03-11 18:14:35 -0700773 bool has_barrier_from_external = subpass_dep.barrier_from_external.size() > 0U;
774 prev_.reserve(subpass_dep.prev.size() + (has_barrier_from_external ? 1U : 0U));
John Zulauf355e49b2020-04-24 15:11:15 -0600775 prev_by_subpass_.resize(subpass, nullptr); // Can't be more prevs than the subpass we're on
John Zulauf3d84f1b2020-03-09 13:33:25 -0600776 for (const auto &prev_dep : subpass_dep.prev) {
John Zulaufbaea94f2020-09-15 17:55:16 -0600777 const auto prev_pass = prev_dep.first->pass;
778 const auto &prev_barriers = prev_dep.second;
779 assert(prev_dep.second.size());
780 prev_.emplace_back(&contexts[prev_pass], queue_flags, prev_barriers);
781 prev_by_subpass_[prev_pass] = &prev_.back();
John Zulauf5c5e88d2019-12-26 11:22:02 -0700782 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600783
784 async_.reserve(subpass_dep.async.size());
785 for (const auto async_subpass : subpass_dep.async) {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -0700786 async_.emplace_back(&contexts[async_subpass]);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600787 }
John Zulauf22aefed2021-03-11 18:14:35 -0700788 if (has_barrier_from_external) {
789 // Store the barrier from external with the reat, but save pointer for "by subpass" lookups.
790 prev_.emplace_back(external_context, queue_flags, subpass_dep.barrier_from_external);
791 src_external_ = &prev_.back();
John Zulaufe5da6e52020-03-18 15:32:18 -0600792 }
John Zulaufbaea94f2020-09-15 17:55:16 -0600793 if (subpass_dep.barrier_to_external.size()) {
794 dst_external_ = TrackBack(this, queue_flags, subpass_dep.barrier_to_external);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600795 }
John Zulauf5c5e88d2019-12-26 11:22:02 -0700796}
797
John Zulauf5f13a792020-03-10 07:31:21 -0600798template <typename Detector>
John Zulauf43cc7462020-12-03 12:33:12 -0700799HazardResult AccessContext::DetectPreviousHazard(AccessAddressType type, const Detector &detector,
John Zulauf540266b2020-04-06 18:54:53 -0600800 const ResourceAccessRange &range) const {
John Zulauf5f13a792020-03-10 07:31:21 -0600801 ResourceAccessRangeMap descent_map;
John Zulauf69133422020-05-20 14:55:53 -0600802 ResolvePreviousAccess(type, range, &descent_map, nullptr);
John Zulauf5f13a792020-03-10 07:31:21 -0600803
804 HazardResult hazard;
805 for (auto prev = descent_map.begin(); prev != descent_map.end() && !hazard.hazard; ++prev) {
806 hazard = detector.Detect(prev);
807 }
808 return hazard;
809}
810
John Zulauf4a6105a2020-11-17 15:11:05 -0700811template <typename Action>
812void AccessContext::ForAll(Action &&action) {
813 for (const auto address_type : kAddressTypes) {
814 auto &accesses = GetAccessStateMap(address_type);
815 for (const auto &access : accesses) {
816 action(address_type, access);
817 }
818 }
819}
820
John Zulauf3d84f1b2020-03-09 13:33:25 -0600821// A recursive range walker for hazard detection, first for the current context and the (DetectHazardRecur) to walk
822// the DAG of the contexts (for example subpasses)
823template <typename Detector>
John Zulauf43cc7462020-12-03 12:33:12 -0700824HazardResult AccessContext::DetectHazard(AccessAddressType type, const Detector &detector, const ResourceAccessRange &range,
John Zulauf355e49b2020-04-24 15:11:15 -0600825 DetectOptions options) const {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600826 HazardResult hazard;
John Zulauf5f13a792020-03-10 07:31:21 -0600827
John Zulauf1a224292020-06-30 14:52:13 -0600828 if (static_cast<uint32_t>(options) & DetectOptions::kDetectAsync) {
John Zulauf355e49b2020-04-24 15:11:15 -0600829 // Async checks don't require recursive lookups, as the async lists are exhaustive for the top-level context
830 // so we'll check these first
831 for (const auto &async_context : async_) {
832 hazard = async_context->DetectAsyncHazard(type, detector, range);
833 if (hazard.hazard) return hazard;
834 }
John Zulauf5f13a792020-03-10 07:31:21 -0600835 }
836
John Zulauf1a224292020-06-30 14:52:13 -0600837 const bool detect_prev = (static_cast<uint32_t>(options) & DetectOptions::kDetectPrevious) != 0;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600838
John Zulauf69133422020-05-20 14:55:53 -0600839 const auto &accesses = GetAccessStateMap(type);
John Zulauf3cafbf72021-03-26 16:55:19 -0600840 const auto the_end = accesses.cend(); // End is not invalidated
841 auto pos = accesses.lower_bound(range);
John Zulauf69133422020-05-20 14:55:53 -0600842 ResourceAccessRange gap = {range.begin, range.begin};
John Zulauf5f13a792020-03-10 07:31:21 -0600843
John Zulauf3cafbf72021-03-26 16:55:19 -0600844 while (pos != the_end && pos->first.begin < range.end) {
John Zulauf69133422020-05-20 14:55:53 -0600845 // Cover any leading gap, or gap between entries
846 if (detect_prev) {
847 // TODO: After profiling we may want to change the descent logic such that we don't recur per gap...
848 // Cover any leading gap, or gap between entries
849 gap.end = pos->first.begin; // We know this begin is < range.end
John Zulauf355e49b2020-04-24 15:11:15 -0600850 if (gap.non_empty()) {
John Zulauf69133422020-05-20 14:55:53 -0600851 // Recur on all gaps
John Zulauf16adfc92020-04-08 10:28:33 -0600852 hazard = DetectPreviousHazard(type, detector, gap);
John Zulauf5f13a792020-03-10 07:31:21 -0600853 if (hazard.hazard) return hazard;
854 }
John Zulauf69133422020-05-20 14:55:53 -0600855 // Set up for the next gap. If pos..end is >= range.end, loop will exit, and trailing gap will be empty
856 gap.begin = pos->first.end;
857 }
858
859 hazard = detector.Detect(pos);
860 if (hazard.hazard) return hazard;
John Zulauf3cafbf72021-03-26 16:55:19 -0600861 ++pos;
John Zulauf69133422020-05-20 14:55:53 -0600862 }
863
864 if (detect_prev) {
865 // Detect in the trailing empty as needed
866 gap.end = range.end;
867 if (gap.non_empty()) {
868 hazard = DetectPreviousHazard(type, detector, gap);
John Zulauf16adfc92020-04-08 10:28:33 -0600869 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600870 }
871
872 return hazard;
873}
874
875// A non recursive range walker for the asynchronous contexts (those we have no barriers with)
876template <typename Detector>
John Zulauf43cc7462020-12-03 12:33:12 -0700877HazardResult AccessContext::DetectAsyncHazard(AccessAddressType type, const Detector &detector,
878 const ResourceAccessRange &range) const {
John Zulauf16adfc92020-04-08 10:28:33 -0600879 auto &accesses = GetAccessStateMap(type);
John Zulauf3cafbf72021-03-26 16:55:19 -0600880 auto pos = accesses.lower_bound(range);
881 const auto the_end = accesses.end();
John Zulauf16adfc92020-04-08 10:28:33 -0600882
John Zulauf3d84f1b2020-03-09 13:33:25 -0600883 HazardResult hazard;
John Zulauf3cafbf72021-03-26 16:55:19 -0600884 while (pos != the_end && pos->first.begin < range.end) {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -0700885 hazard = detector.DetectAsync(pos, start_tag_);
John Zulauf3cafbf72021-03-26 16:55:19 -0600886 if (hazard.hazard) break;
887 ++pos;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600888 }
John Zulauf16adfc92020-04-08 10:28:33 -0600889
John Zulauf3d84f1b2020-03-09 13:33:25 -0600890 return hazard;
891}
892
John Zulaufb02c1eb2020-10-06 16:33:36 -0600893struct ApplySubpassTransitionBarriersAction {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -0700894 explicit ApplySubpassTransitionBarriersAction(const std::vector<SyncBarrier> &barriers_) : barriers(barriers_) {}
John Zulaufb02c1eb2020-10-06 16:33:36 -0600895 void operator()(ResourceAccessState *access) const {
896 assert(access);
897 access->ApplyBarriers(barriers, true);
898 }
899 const std::vector<SyncBarrier> &barriers;
900};
901
John Zulauf22aefed2021-03-11 18:14:35 -0700902struct ApplyTrackbackStackAction {
903 explicit ApplyTrackbackStackAction(const std::vector<SyncBarrier> &barriers_,
904 const ResourceAccessStateFunction *previous_barrier_ = nullptr)
905 : barriers(barriers_), previous_barrier(previous_barrier_) {}
John Zulaufb02c1eb2020-10-06 16:33:36 -0600906 void operator()(ResourceAccessState *access) const {
907 assert(access);
908 assert(!access->HasPendingState());
909 access->ApplyBarriers(barriers, false);
John Zulaufee984022022-04-13 16:39:50 -0600910 // NOTE: We can use invalid tag, as these barriers do no include layout transitions (would assert in SetWrite)
911 access->ApplyPendingBarriers(kInvalidTag);
John Zulauf22aefed2021-03-11 18:14:35 -0700912 if (previous_barrier) {
913 assert(bool(*previous_barrier));
914 (*previous_barrier)(access);
915 }
John Zulaufb02c1eb2020-10-06 16:33:36 -0600916 }
917 const std::vector<SyncBarrier> &barriers;
John Zulauf22aefed2021-03-11 18:14:35 -0700918 const ResourceAccessStateFunction *previous_barrier;
John Zulaufb02c1eb2020-10-06 16:33:36 -0600919};
920
921// Splits a single map entry into piece matching the entries in [first, last) the total range over [first, last) must be
922// contained with entry. Entry must be an iterator pointing to dest, first and last must be iterators pointing to a
923// *different* map from dest.
924// Returns the position past the last resolved range -- the entry covering the remainder of entry->first not included in the
925// range [first, last)
926template <typename BarrierAction>
John Zulauf355e49b2020-04-24 15:11:15 -0600927static void ResolveMapToEntry(ResourceAccessRangeMap *dest, ResourceAccessRangeMap::iterator entry,
928 ResourceAccessRangeMap::const_iterator first, ResourceAccessRangeMap::const_iterator last,
John Zulaufb02c1eb2020-10-06 16:33:36 -0600929 BarrierAction &barrier_action) {
John Zulauf355e49b2020-04-24 15:11:15 -0600930 auto at = entry;
931 for (auto pos = first; pos != last; ++pos) {
932 // Every member of the input iterator range must fit within the remaining portion of entry
933 assert(at->first.includes(pos->first));
934 assert(at != dest->end());
935 // Trim up at to the same size as the entry to resolve
936 at = sparse_container::split(at, *dest, pos->first);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600937 auto access = pos->second; // intentional copy
938 barrier_action(&access);
John Zulauf355e49b2020-04-24 15:11:15 -0600939 at->second.Resolve(access);
940 ++at; // Go to the remaining unused section of entry
941 }
942}
943
John Zulaufa0a98292020-09-18 09:30:10 -0600944static SyncBarrier MergeBarriers(const std::vector<SyncBarrier> &barriers) {
945 SyncBarrier merged = {};
946 for (const auto &barrier : barriers) {
947 merged.Merge(barrier);
948 }
949 return merged;
950}
951
John Zulaufb02c1eb2020-10-06 16:33:36 -0600952template <typename BarrierAction>
John Zulauf43cc7462020-12-03 12:33:12 -0700953void AccessContext::ResolveAccessRange(AccessAddressType type, const ResourceAccessRange &range, BarrierAction &barrier_action,
John Zulauf355e49b2020-04-24 15:11:15 -0600954 ResourceAccessRangeMap *resolve_map, const ResourceAccessState *infill_state,
955 bool recur_to_infill) const {
John Zulauf3bcab5e2020-06-19 14:42:32 -0600956 if (!range.non_empty()) return;
957
John Zulauf355e49b2020-04-24 15:11:15 -0600958 ResourceRangeMergeIterator current(*resolve_map, GetAccessStateMap(type), range.begin);
959 while (current->range.non_empty() && range.includes(current->range.begin)) {
John Zulauf3bcab5e2020-06-19 14:42:32 -0600960 const auto current_range = current->range & range;
John Zulauf16adfc92020-04-08 10:28:33 -0600961 if (current->pos_B->valid) {
962 const auto &src_pos = current->pos_B->lower_bound;
John Zulaufb02c1eb2020-10-06 16:33:36 -0600963 auto access = src_pos->second; // intentional copy
964 barrier_action(&access);
965
John Zulauf16adfc92020-04-08 10:28:33 -0600966 if (current->pos_A->valid) {
John Zulauf3bcab5e2020-06-19 14:42:32 -0600967 const auto trimmed = sparse_container::split(current->pos_A->lower_bound, *resolve_map, current_range);
968 trimmed->second.Resolve(access);
969 current.invalidate_A(trimmed);
John Zulauf5f13a792020-03-10 07:31:21 -0600970 } else {
John Zulauf3bcab5e2020-06-19 14:42:32 -0600971 auto inserted = resolve_map->insert(current->pos_A->lower_bound, std::make_pair(current_range, access));
John Zulauf355e49b2020-04-24 15:11:15 -0600972 current.invalidate_A(inserted); // Update the parallel iterator to point at the insert segment
John Zulauf5f13a792020-03-10 07:31:21 -0600973 }
John Zulauf16adfc92020-04-08 10:28:33 -0600974 } else {
975 // we have to descend to fill this gap
976 if (recur_to_infill) {
John Zulauf22aefed2021-03-11 18:14:35 -0700977 ResourceAccessRange recurrence_range = current_range;
978 // The current context is empty for the current range, so recur to fill the gap.
979 // Since we will be recurring back up the DAG, expand the gap descent to cover the full range for which B
980 // is not valid, to minimize that recurrence
981 if (current->pos_B.at_end()) {
982 // Do the remainder here....
983 recurrence_range.end = range.end;
John Zulauf355e49b2020-04-24 15:11:15 -0600984 } else {
John Zulauf22aefed2021-03-11 18:14:35 -0700985 // Recur only over the range until B becomes valid (within the limits of range).
986 recurrence_range.end = std::min(range.end, current->pos_B->lower_bound->first.begin);
John Zulauf355e49b2020-04-24 15:11:15 -0600987 }
John Zulauf22aefed2021-03-11 18:14:35 -0700988 ResolvePreviousAccessStack(type, recurrence_range, resolve_map, infill_state, barrier_action);
989
John Zulauf355e49b2020-04-24 15:11:15 -0600990 // Given that there could be gaps we need to seek carefully to not repeatedly search the same gaps in the next
991 // iterator of the outer while.
992
993 // Set the parallel iterator to the end of this range s.t. ++ will move us to the next range whether or
994 // not the end of the range is a gap. For the seek to work, first we need to warn the parallel iterator
995 // we stepped on the dest map
John Zulauf22aefed2021-03-11 18:14:35 -0700996 const auto seek_to = recurrence_range.end - 1; // The subtraction is safe as range can't be empty (loop condition)
locke-lunarg88dbb542020-06-23 22:05:42 -0600997 current.invalidate_A(); // Changes current->range
John Zulauf355e49b2020-04-24 15:11:15 -0600998 current.seek(seek_to);
999 } else if (!current->pos_A->valid && infill_state) {
1000 // If we didn't find anything in the current range, and we aren't reccuring... we infill if required
1001 auto inserted = resolve_map->insert(current->pos_A->lower_bound, std::make_pair(current->range, *infill_state));
1002 current.invalidate_A(inserted); // Update the parallel iterator to point at the correct segment after insert
John Zulauf16adfc92020-04-08 10:28:33 -06001003 }
John Zulauf5f13a792020-03-10 07:31:21 -06001004 }
ziga-lunargf0e27ad2022-03-28 00:44:12 +02001005 if (current->range.non_empty()) {
1006 ++current;
1007 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06001008 }
John Zulauf1a224292020-06-30 14:52:13 -06001009
1010 // Infill if range goes passed both the current and resolve map prior contents
1011 if (recur_to_infill && (current->range.end < range.end)) {
1012 ResourceAccessRange trailing_fill_range = {current->range.end, range.end};
John Zulauf22aefed2021-03-11 18:14:35 -07001013 ResolvePreviousAccessStack<BarrierAction>(type, trailing_fill_range, resolve_map, infill_state, barrier_action);
John Zulauf1a224292020-06-30 14:52:13 -06001014 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06001015}
1016
John Zulauf22aefed2021-03-11 18:14:35 -07001017template <typename BarrierAction>
1018void AccessContext::ResolvePreviousAccessStack(AccessAddressType type, const ResourceAccessRange &range,
1019 ResourceAccessRangeMap *descent_map, const ResourceAccessState *infill_state,
1020 const BarrierAction &previous_barrier) const {
1021 ResourceAccessStateFunction stacked_barrier(std::ref(previous_barrier));
1022 ResolvePreviousAccess(type, range, descent_map, infill_state, &stacked_barrier);
1023}
1024
John Zulauf43cc7462020-12-03 12:33:12 -07001025void AccessContext::ResolvePreviousAccess(AccessAddressType type, const ResourceAccessRange &range,
John Zulauf22aefed2021-03-11 18:14:35 -07001026 ResourceAccessRangeMap *descent_map, const ResourceAccessState *infill_state,
1027 const ResourceAccessStateFunction *previous_barrier) const {
1028 if (prev_.size() == 0) {
John Zulauf5f13a792020-03-10 07:31:21 -06001029 if (range.non_empty() && infill_state) {
John Zulauf22aefed2021-03-11 18:14:35 -07001030 // Fill the empty poritions of descent_map with the default_state with the barrier function applied (iff present)
1031 ResourceAccessState state_copy;
1032 if (previous_barrier) {
1033 assert(bool(*previous_barrier));
1034 state_copy = *infill_state;
1035 (*previous_barrier)(&state_copy);
1036 infill_state = &state_copy;
1037 }
1038 sparse_container::update_range_value(*descent_map, range, *infill_state,
1039 sparse_container::value_precedence::prefer_dest);
John Zulauf5f13a792020-03-10 07:31:21 -06001040 }
1041 } else {
1042 // Look for something to fill the gap further along.
1043 for (const auto &prev_dep : prev_) {
John Zulauf22aefed2021-03-11 18:14:35 -07001044 const ApplyTrackbackStackAction barrier_action(prev_dep.barriers, previous_barrier);
John Zulaufbb890452021-12-14 11:30:18 -07001045 prev_dep.source_subpass->ResolveAccessRange(type, range, barrier_action, descent_map, infill_state);
John Zulauf5f13a792020-03-10 07:31:21 -06001046 }
John Zulauf5f13a792020-03-10 07:31:21 -06001047 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06001048}
1049
John Zulauf4a6105a2020-11-17 15:11:05 -07001050// Non-lazy import of all accesses, WaitEvents needs this.
1051void AccessContext::ResolvePreviousAccesses() {
1052 ResourceAccessState default_state;
John Zulauf22aefed2021-03-11 18:14:35 -07001053 if (!prev_.size()) return; // If no previous contexts, nothing to do
1054
John Zulauf4a6105a2020-11-17 15:11:05 -07001055 for (const auto address_type : kAddressTypes) {
1056 ResolvePreviousAccess(address_type, kFullRange, &GetAccessStateMap(address_type), &default_state);
1057 }
1058}
1059
John Zulauf43cc7462020-12-03 12:33:12 -07001060AccessAddressType AccessContext::ImageAddressType(const IMAGE_STATE &image) {
1061 return (image.fragment_encoder->IsLinearImage()) ? AccessAddressType::kLinear : AccessAddressType::kIdealized;
John Zulauf16adfc92020-04-08 10:28:33 -06001062}
1063
John Zulauf1507ee42020-05-18 11:33:09 -06001064static SyncStageAccessIndex ColorLoadUsage(VkAttachmentLoadOp load_op) {
John Zulauf57261402021-08-13 11:32:06 -06001065 const auto stage_access = (load_op == VK_ATTACHMENT_LOAD_OP_NONE_EXT)
1066 ? SYNC_ACCESS_INDEX_NONE
1067 : ((load_op == VK_ATTACHMENT_LOAD_OP_LOAD) ? SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ
1068 : SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE);
John Zulauf1507ee42020-05-18 11:33:09 -06001069 return stage_access;
1070}
1071static SyncStageAccessIndex DepthStencilLoadUsage(VkAttachmentLoadOp load_op) {
John Zulauf57261402021-08-13 11:32:06 -06001072 const auto stage_access =
1073 (load_op == VK_ATTACHMENT_LOAD_OP_NONE_EXT)
1074 ? SYNC_ACCESS_INDEX_NONE
1075 : ((load_op == VK_ATTACHMENT_LOAD_OP_LOAD) ? SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_READ
1076 : SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE);
John Zulauf1507ee42020-05-18 11:33:09 -06001077 return stage_access;
1078}
1079
John Zulauf7635de32020-05-29 17:14:15 -06001080// Caller must manage returned pointer
1081static AccessContext *CreateStoreResolveProxyContext(const AccessContext &context, const RENDER_PASS_STATE &rp_state,
John Zulaufd0ec59f2021-03-13 14:25:08 -07001082 uint32_t subpass, const AttachmentViewGenVector &attachment_views) {
John Zulauf7635de32020-05-29 17:14:15 -06001083 auto *proxy = new AccessContext(context);
John Zulaufee984022022-04-13 16:39:50 -06001084 proxy->UpdateAttachmentResolveAccess(rp_state, attachment_views, subpass, kInvalidTag);
1085 proxy->UpdateAttachmentStoreAccess(rp_state, attachment_views, subpass, kInvalidTag);
John Zulauf7635de32020-05-29 17:14:15 -06001086 return proxy;
1087}
1088
John Zulaufb02c1eb2020-10-06 16:33:36 -06001089template <typename BarrierAction>
John Zulaufd0ec59f2021-03-13 14:25:08 -07001090void AccessContext::ResolveAccessRange(const AttachmentViewGen &view_gen, AttachmentViewGen::Gen gen_type,
1091 BarrierAction &barrier_action, ResourceAccessRangeMap *descent_map,
1092 const ResourceAccessState *infill_state) const {
1093 const auto *attachment_gen = view_gen.GetRangeGen(gen_type);
1094 if (!attachment_gen) return;
1095
1096 subresource_adapter::ImageRangeGenerator range_gen(*attachment_gen);
1097 const AccessAddressType address_type = view_gen.GetAddressType();
1098 for (; range_gen->non_empty(); ++range_gen) {
1099 ResolveAccessRange(address_type, *range_gen, barrier_action, descent_map, infill_state);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001100 }
John Zulauf62f10592020-04-03 12:20:02 -06001101}
1102
John Zulauf7635de32020-05-29 17:14:15 -06001103// Layout transitions are handled as if the were occuring in the beginning of the next subpass
John Zulaufbb890452021-12-14 11:30:18 -07001104bool AccessContext::ValidateLayoutTransitions(const CommandExecutionContext &exec_context, const RENDER_PASS_STATE &rp_state,
John Zulauf7635de32020-05-29 17:14:15 -06001105 const VkRect2D &render_area, uint32_t subpass,
John Zulaufd0ec59f2021-03-13 14:25:08 -07001106 const AttachmentViewGenVector &attachment_views, const char *func_name) const {
John Zulauf355e49b2020-04-24 15:11:15 -06001107 bool skip = false;
John Zulauf7635de32020-05-29 17:14:15 -06001108 // As validation methods are const and precede the record/update phase, for any tranistions from the immediately
1109 // previous subpass, we have to validate them against a copy of the AccessContext, with resolve operations applied, as
1110 // those affects have not been recorded yet.
1111 //
1112 // Note: we could be more efficient by tracking whether or not we actually *have* any changes (e.g. attachment resolve)
1113 // to apply and only copy then, if this proves a hot spot.
1114 std::unique_ptr<AccessContext> proxy_for_prev;
1115 TrackBack proxy_track_back;
1116
John Zulauf355e49b2020-04-24 15:11:15 -06001117 const auto &transitions = rp_state.subpass_transitions[subpass];
1118 for (const auto &transition : transitions) {
John Zulauf7635de32020-05-29 17:14:15 -06001119 const bool prev_needs_proxy = transition.prev_pass != VK_SUBPASS_EXTERNAL && (transition.prev_pass + 1 == subpass);
1120
1121 const auto *track_back = GetTrackBackFromSubpass(transition.prev_pass);
John Zulauf22aefed2021-03-11 18:14:35 -07001122 assert(track_back);
John Zulauf7635de32020-05-29 17:14:15 -06001123 if (prev_needs_proxy) {
1124 if (!proxy_for_prev) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001125 proxy_for_prev.reset(
John Zulaufbb890452021-12-14 11:30:18 -07001126 CreateStoreResolveProxyContext(*track_back->source_subpass, rp_state, transition.prev_pass, attachment_views));
John Zulauf7635de32020-05-29 17:14:15 -06001127 proxy_track_back = *track_back;
John Zulaufbb890452021-12-14 11:30:18 -07001128 proxy_track_back.source_subpass = proxy_for_prev.get();
John Zulauf7635de32020-05-29 17:14:15 -06001129 }
1130 track_back = &proxy_track_back;
1131 }
1132 auto hazard = DetectSubpassTransitionHazard(*track_back, attachment_views[transition.attachment]);
John Zulauf355e49b2020-04-24 15:11:15 -06001133 if (hazard.hazard) {
John Zulaufee984022022-04-13 16:39:50 -06001134 if (hazard.tag == kInvalidTag) {
John Zulaufbb890452021-12-14 11:30:18 -07001135 skip |= exec_context.GetSyncState().LogError(
John Zulaufee984022022-04-13 16:39:50 -06001136 rp_state.renderPass(), string_SyncHazardVUID(hazard.hazard),
1137 "%s: Hazard %s in subpass %" PRIu32 " for attachment %" PRIu32
1138 " image layout transition (old_layout: %s, new_layout: %s) after store/resolve operation in subpass %" PRIu32,
1139 func_name, string_SyncHazard(hazard.hazard), subpass, transition.attachment,
1140 string_VkImageLayout(transition.old_layout), string_VkImageLayout(transition.new_layout), transition.prev_pass);
1141 } else {
John Zulaufbb890452021-12-14 11:30:18 -07001142 skip |= exec_context.GetSyncState().LogError(
John Zulaufee984022022-04-13 16:39:50 -06001143 rp_state.renderPass(), string_SyncHazardVUID(hazard.hazard),
1144 "%s: Hazard %s in subpass %" PRIu32 " for attachment %" PRIu32
1145 " image layout transition (old_layout: %s, new_layout: %s). Access info %s.",
1146 func_name, string_SyncHazard(hazard.hazard), subpass, transition.attachment,
1147 string_VkImageLayout(transition.old_layout), string_VkImageLayout(transition.new_layout),
John Zulaufbb890452021-12-14 11:30:18 -07001148 exec_context.FormatUsage(hazard).c_str());
John Zulaufee984022022-04-13 16:39:50 -06001149 }
John Zulauf355e49b2020-04-24 15:11:15 -06001150 }
1151 }
1152 return skip;
1153}
1154
John Zulaufbb890452021-12-14 11:30:18 -07001155bool AccessContext::ValidateLoadOperation(const CommandExecutionContext &exec_context, const RENDER_PASS_STATE &rp_state,
John Zulauf7635de32020-05-29 17:14:15 -06001156 const VkRect2D &render_area, uint32_t subpass,
John Zulaufd0ec59f2021-03-13 14:25:08 -07001157 const AttachmentViewGenVector &attachment_views, const char *func_name) const {
John Zulauf1507ee42020-05-18 11:33:09 -06001158 bool skip = false;
1159 const auto *attachment_ci = rp_state.createInfo.pAttachments;
John Zulaufa0a98292020-09-18 09:30:10 -06001160
John Zulauf1507ee42020-05-18 11:33:09 -06001161 for (uint32_t i = 0; i < rp_state.createInfo.attachmentCount; i++) {
1162 if (subpass == rp_state.attachment_first_subpass[i]) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001163 const auto &view_gen = attachment_views[i];
1164 if (!view_gen.IsValid()) continue;
John Zulauf1507ee42020-05-18 11:33:09 -06001165 const auto &ci = attachment_ci[i];
John Zulauf1507ee42020-05-18 11:33:09 -06001166
1167 // Need check in the following way
1168 // 1) if the usage bit isn't in the dest_access_scope, and there is layout traniition for initial use, report hazard
1169 // vs. transition
1170 // 2) if there isn't a layout transition, we need to look at the external context with a "detect hazard" operation
1171 // for each aspect loaded.
1172
1173 const bool has_depth = FormatHasDepth(ci.format);
John Zulaufb027cdb2020-05-21 14:25:22 -06001174 const bool has_stencil = FormatHasStencil(ci.format);
John Zulauf1507ee42020-05-18 11:33:09 -06001175 const bool is_color = !(has_depth || has_stencil);
1176
1177 const SyncStageAccessIndex load_index = has_depth ? DepthStencilLoadUsage(ci.loadOp) : ColorLoadUsage(ci.loadOp);
John Zulauf1507ee42020-05-18 11:33:09 -06001178 const SyncStageAccessIndex stencil_load_index = has_stencil ? DepthStencilLoadUsage(ci.stencilLoadOp) : load_index;
John Zulauf1507ee42020-05-18 11:33:09 -06001179
John Zulaufaff20662020-06-01 14:07:58 -06001180 HazardResult hazard;
John Zulauf1507ee42020-05-18 11:33:09 -06001181 const char *aspect = nullptr;
John Zulauf1507ee42020-05-18 11:33:09 -06001182
John Zulaufb02c1eb2020-10-06 16:33:36 -06001183 bool checked_stencil = false;
John Zulauf57261402021-08-13 11:32:06 -06001184 if (is_color && (load_index != SYNC_ACCESS_INDEX_NONE)) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001185 hazard = DetectHazard(view_gen, AttachmentViewGen::Gen::kRenderArea, load_index, SyncOrdering::kColorAttachment);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001186 aspect = "color";
1187 } else {
John Zulauf57261402021-08-13 11:32:06 -06001188 if (has_depth && (load_index != SYNC_ACCESS_INDEX_NONE)) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001189 hazard = DetectHazard(view_gen, AttachmentViewGen::Gen::kDepthOnlyRenderArea, load_index,
1190 SyncOrdering::kDepthStencilAttachment);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001191 aspect = "depth";
1192 }
John Zulauf57261402021-08-13 11:32:06 -06001193 if (!hazard.hazard && has_stencil && (stencil_load_index != SYNC_ACCESS_INDEX_NONE)) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001194 hazard = DetectHazard(view_gen, AttachmentViewGen::Gen::kStencilOnlyRenderArea, stencil_load_index,
1195 SyncOrdering::kDepthStencilAttachment);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001196 aspect = "stencil";
1197 checked_stencil = true;
1198 }
1199 }
1200
1201 if (hazard.hazard) {
1202 auto load_op_string = string_VkAttachmentLoadOp(checked_stencil ? ci.stencilLoadOp : ci.loadOp);
John Zulaufbb890452021-12-14 11:30:18 -07001203 const auto &sync_state = exec_context.GetSyncState();
John Zulaufee984022022-04-13 16:39:50 -06001204 if (hazard.tag == kInvalidTag) {
John Zulaufb02c1eb2020-10-06 16:33:36 -06001205 // Hazard vs. ILT
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06001206 skip |= sync_state.LogError(rp_state.renderPass(), string_SyncHazardVUID(hazard.hazard),
John Zulaufb02c1eb2020-10-06 16:33:36 -06001207 "%s: Hazard %s vs. layout transition in subpass %" PRIu32 " for attachment %" PRIu32
1208 " aspect %s during load with loadOp %s.",
1209 func_name, string_SyncHazard(hazard.hazard), subpass, i, aspect, load_op_string);
1210 } else {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06001211 skip |= sync_state.LogError(rp_state.renderPass(), string_SyncHazardVUID(hazard.hazard),
John Zulauf1507ee42020-05-18 11:33:09 -06001212 "%s: Hazard %s in subpass %" PRIu32 " for attachment %" PRIu32
John Zulauf59e25072020-07-17 10:55:21 -06001213 " aspect %s during load with loadOp %s. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06001214 func_name, string_SyncHazard(hazard.hazard), subpass, i, aspect, load_op_string,
John Zulaufbb890452021-12-14 11:30:18 -07001215 exec_context.FormatUsage(hazard).c_str());
John Zulauf1507ee42020-05-18 11:33:09 -06001216 }
1217 }
1218 }
1219 }
1220 return skip;
1221}
1222
John Zulaufaff20662020-06-01 14:07:58 -06001223// Store operation validation can ignore resolve (before it) and layout tranistions after it. The first is ignored
1224// because of the ordering guarantees w.r.t. sample access and that the resolve validation hasn't altered the state, because
1225// store is part of the same Next/End operation.
1226// The latter is handled in layout transistion validation directly
John Zulaufbb890452021-12-14 11:30:18 -07001227bool AccessContext::ValidateStoreOperation(const CommandExecutionContext &exec_context, const RENDER_PASS_STATE &rp_state,
John Zulaufaff20662020-06-01 14:07:58 -06001228 const VkRect2D &render_area, uint32_t subpass,
John Zulaufd0ec59f2021-03-13 14:25:08 -07001229 const AttachmentViewGenVector &attachment_views, const char *func_name) const {
John Zulaufaff20662020-06-01 14:07:58 -06001230 bool skip = false;
1231 const auto *attachment_ci = rp_state.createInfo.pAttachments;
John Zulaufaff20662020-06-01 14:07:58 -06001232
1233 for (uint32_t i = 0; i < rp_state.createInfo.attachmentCount; i++) {
1234 if (subpass == rp_state.attachment_last_subpass[i]) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001235 const AttachmentViewGen &view_gen = attachment_views[i];
1236 if (!view_gen.IsValid()) continue;
John Zulaufaff20662020-06-01 14:07:58 -06001237 const auto &ci = attachment_ci[i];
1238
1239 // The spec states that "don't care" is an operation with VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
1240 // so we assume that an implementation is *free* to write in that case, meaning that for correctness
1241 // sake, we treat DONT_CARE as writing.
1242 const bool has_depth = FormatHasDepth(ci.format);
1243 const bool has_stencil = FormatHasStencil(ci.format);
1244 const bool is_color = !(has_depth || has_stencil);
John Zulauf57261402021-08-13 11:32:06 -06001245 const bool store_op_stores = ci.storeOp != VK_ATTACHMENT_STORE_OP_NONE_EXT;
John Zulaufaff20662020-06-01 14:07:58 -06001246 if (!has_stencil && !store_op_stores) continue;
1247
1248 HazardResult hazard;
1249 const char *aspect = nullptr;
1250 bool checked_stencil = false;
1251 if (is_color) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001252 hazard = DetectHazard(view_gen, AttachmentViewGen::Gen::kRenderArea,
1253 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kRaster);
John Zulaufaff20662020-06-01 14:07:58 -06001254 aspect = "color";
1255 } else {
John Zulauf57261402021-08-13 11:32:06 -06001256 const bool stencil_op_stores = ci.stencilStoreOp != VK_ATTACHMENT_STORE_OP_NONE_EXT;
John Zulaufaff20662020-06-01 14:07:58 -06001257 if (has_depth && store_op_stores) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001258 hazard = DetectHazard(view_gen, AttachmentViewGen::Gen::kDepthOnlyRenderArea,
1259 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, SyncOrdering::kRaster);
John Zulaufaff20662020-06-01 14:07:58 -06001260 aspect = "depth";
1261 }
1262 if (!hazard.hazard && has_stencil && stencil_op_stores) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001263 hazard = DetectHazard(view_gen, AttachmentViewGen::Gen::kStencilOnlyRenderArea,
1264 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, SyncOrdering::kRaster);
John Zulaufaff20662020-06-01 14:07:58 -06001265 aspect = "stencil";
1266 checked_stencil = true;
1267 }
1268 }
1269
1270 if (hazard.hazard) {
1271 const char *const op_type_string = checked_stencil ? "stencilStoreOp" : "storeOp";
1272 const char *const store_op_string = string_VkAttachmentStoreOp(checked_stencil ? ci.stencilStoreOp : ci.storeOp);
John Zulaufbb890452021-12-14 11:30:18 -07001273 skip |=
1274 exec_context.GetSyncState().LogError(rp_state.renderPass(), string_SyncHazardVUID(hazard.hazard),
1275 "%s: Hazard %s in subpass %" PRIu32 " for attachment %" PRIu32
1276 " %s aspect during store with %s %s. Access info %s",
1277 func_name, string_SyncHazard(hazard.hazard), subpass, i, aspect,
1278 op_type_string, store_op_string, exec_context.FormatUsage(hazard).c_str());
John Zulaufaff20662020-06-01 14:07:58 -06001279 }
1280 }
1281 }
1282 return skip;
1283}
1284
John Zulaufbb890452021-12-14 11:30:18 -07001285bool AccessContext::ValidateResolveOperations(const CommandExecutionContext &exec_context, const RENDER_PASS_STATE &rp_state,
John Zulaufd0ec59f2021-03-13 14:25:08 -07001286 const VkRect2D &render_area, const AttachmentViewGenVector &attachment_views,
1287 const char *func_name, uint32_t subpass) const {
John Zulaufbb890452021-12-14 11:30:18 -07001288 ValidateResolveAction validate_action(rp_state.renderPass(), subpass, *this, exec_context, func_name);
John Zulaufd0ec59f2021-03-13 14:25:08 -07001289 ResolveOperation(validate_action, rp_state, attachment_views, subpass);
John Zulauf7635de32020-05-29 17:14:15 -06001290 return validate_action.GetSkip();
John Zulaufb027cdb2020-05-21 14:25:22 -06001291}
1292
John Zulauf3d84f1b2020-03-09 13:33:25 -06001293class HazardDetector {
1294 SyncStageAccessIndex usage_index_;
1295
1296 public:
John Zulauf5f13a792020-03-10 07:31:21 -06001297 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const { return pos->second.DetectHazard(usage_index_); }
John Zulauf14940722021-04-12 15:19:02 -06001298 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, ResourceUsageTag start_tag) const {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07001299 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001300 }
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001301 explicit HazardDetector(SyncStageAccessIndex usage) : usage_index_(usage) {}
John Zulauf3d84f1b2020-03-09 13:33:25 -06001302};
1303
John Zulauf69133422020-05-20 14:55:53 -06001304class HazardDetectorWithOrdering {
1305 const SyncStageAccessIndex usage_index_;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001306 const SyncOrdering ordering_rule_;
John Zulauf69133422020-05-20 14:55:53 -06001307
1308 public:
1309 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
John Zulauf8e3c3e92021-01-06 11:19:36 -07001310 return pos->second.DetectHazard(usage_index_, ordering_rule_);
John Zulauf69133422020-05-20 14:55:53 -06001311 }
John Zulauf14940722021-04-12 15:19:02 -06001312 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, ResourceUsageTag start_tag) const {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07001313 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
John Zulauf69133422020-05-20 14:55:53 -06001314 }
John Zulauf8e3c3e92021-01-06 11:19:36 -07001315 HazardDetectorWithOrdering(SyncStageAccessIndex usage, SyncOrdering ordering) : usage_index_(usage), ordering_rule_(ordering) {}
John Zulauf69133422020-05-20 14:55:53 -06001316};
1317
John Zulauf16adfc92020-04-08 10:28:33 -06001318HazardResult AccessContext::DetectHazard(const BUFFER_STATE &buffer, SyncStageAccessIndex usage_index,
John Zulauf355e49b2020-04-24 15:11:15 -06001319 const ResourceAccessRange &range) const {
John Zulauf16adfc92020-04-08 10:28:33 -06001320 if (!SimpleBinding(buffer)) return HazardResult();
John Zulauf150e5332020-12-03 08:52:52 -07001321 const auto base_address = ResourceBaseAddress(buffer);
1322 HazardDetector detector(usage_index);
1323 return DetectHazard(AccessAddressType::kLinear, detector, (range + base_address), DetectOptions::kDetectAll);
John Zulaufe5da6e52020-03-18 15:32:18 -06001324}
1325
John Zulauf69133422020-05-20 14:55:53 -06001326template <typename Detector>
John Zulaufd0ec59f2021-03-13 14:25:08 -07001327HazardResult AccessContext::DetectHazard(Detector &detector, const AttachmentViewGen &view_gen, AttachmentViewGen::Gen gen_type,
1328 DetectOptions options) const {
1329 const auto *attachment_gen = view_gen.GetRangeGen(gen_type);
1330 if (!attachment_gen) return HazardResult();
1331
1332 subresource_adapter::ImageRangeGenerator range_gen(*attachment_gen);
1333 const auto address_type = view_gen.GetAddressType();
1334 for (; range_gen->non_empty(); ++range_gen) {
1335 HazardResult hazard = DetectHazard(address_type, detector, *range_gen, options);
1336 if (hazard.hazard) return hazard;
1337 }
1338
1339 return HazardResult();
1340}
1341
1342template <typename Detector>
John Zulauf69133422020-05-20 14:55:53 -06001343HazardResult AccessContext::DetectHazard(Detector &detector, const IMAGE_STATE &image,
1344 const VkImageSubresourceRange &subresource_range, const VkOffset3D &offset,
1345 const VkExtent3D &extent, DetectOptions options) const {
1346 if (!SimpleBinding(image)) return HazardResult();
John Zulauf69133422020-05-20 14:55:53 -06001347 const auto base_address = ResourceBaseAddress(image);
John Zulauf150e5332020-12-03 08:52:52 -07001348 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, offset, extent,
1349 base_address);
1350 const auto address_type = ImageAddressType(image);
John Zulauf69133422020-05-20 14:55:53 -06001351 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf150e5332020-12-03 08:52:52 -07001352 HazardResult hazard = DetectHazard(address_type, detector, *range_gen, options);
John Zulauf69133422020-05-20 14:55:53 -06001353 if (hazard.hazard) return hazard;
1354 }
1355 return HazardResult();
1356}
John Zulauf110413c2021-03-20 05:38:38 -06001357template <typename Detector>
1358HazardResult AccessContext::DetectHazard(Detector &detector, const IMAGE_STATE &image,
1359 const VkImageSubresourceRange &subresource_range, DetectOptions options) const {
1360 if (!SimpleBinding(image)) return HazardResult();
1361 const auto base_address = ResourceBaseAddress(image);
1362 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, base_address);
1363 const auto address_type = ImageAddressType(image);
1364 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf110413c2021-03-20 05:38:38 -06001365 HazardResult hazard = DetectHazard(address_type, detector, *range_gen, options);
1366 if (hazard.hazard) return hazard;
1367 }
1368 return HazardResult();
1369}
John Zulauf69133422020-05-20 14:55:53 -06001370
John Zulauf540266b2020-04-06 18:54:53 -06001371HazardResult AccessContext::DetectHazard(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
1372 const VkImageSubresourceLayers &subresource, const VkOffset3D &offset,
1373 const VkExtent3D &extent) const {
John Zulauf5c5e88d2019-12-26 11:22:02 -07001374 VkImageSubresourceRange subresource_range = {subresource.aspectMask, subresource.mipLevel, 1, subresource.baseArrayLayer,
1375 subresource.layerCount};
John Zulauf110413c2021-03-20 05:38:38 -06001376 HazardDetector detector(current_usage);
1377 return DetectHazard(detector, image, subresource_range, offset, extent, DetectOptions::kDetectAll);
John Zulauf1507ee42020-05-18 11:33:09 -06001378}
1379
1380HazardResult AccessContext::DetectHazard(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
John Zulauf110413c2021-03-20 05:38:38 -06001381 const VkImageSubresourceRange &subresource_range) const {
John Zulauf69133422020-05-20 14:55:53 -06001382 HazardDetector detector(current_usage);
John Zulauf110413c2021-03-20 05:38:38 -06001383 return DetectHazard(detector, image, subresource_range, DetectOptions::kDetectAll);
John Zulauf69133422020-05-20 14:55:53 -06001384}
1385
John Zulaufd0ec59f2021-03-13 14:25:08 -07001386HazardResult AccessContext::DetectHazard(const AttachmentViewGen &view_gen, AttachmentViewGen::Gen gen_type,
1387 SyncStageAccessIndex current_usage, SyncOrdering ordering_rule) const {
1388 HazardDetectorWithOrdering detector(current_usage, ordering_rule);
1389 return DetectHazard(detector, view_gen, gen_type, DetectOptions::kDetectAll);
1390}
1391
John Zulauf69133422020-05-20 14:55:53 -06001392HazardResult AccessContext::DetectHazard(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
John Zulauf8e3c3e92021-01-06 11:19:36 -07001393 const VkImageSubresourceRange &subresource_range, SyncOrdering ordering_rule,
John Zulauf69133422020-05-20 14:55:53 -06001394 const VkOffset3D &offset, const VkExtent3D &extent) const {
John Zulauf8e3c3e92021-01-06 11:19:36 -07001395 HazardDetectorWithOrdering detector(current_usage, ordering_rule);
John Zulauf69133422020-05-20 14:55:53 -06001396 return DetectHazard(detector, image, subresource_range, offset, extent, DetectOptions::kDetectAll);
John Zulauf9cb530d2019-09-30 14:14:10 -06001397}
1398
John Zulauf3d84f1b2020-03-09 13:33:25 -06001399class BarrierHazardDetector {
1400 public:
Jeremy Gebben40a22942020-12-22 14:22:06 -07001401 BarrierHazardDetector(SyncStageAccessIndex usage_index, VkPipelineStageFlags2KHR src_exec_scope,
John Zulauf3d84f1b2020-03-09 13:33:25 -06001402 SyncStageAccessFlags src_access_scope)
1403 : usage_index_(usage_index), src_exec_scope_(src_exec_scope), src_access_scope_(src_access_scope) {}
1404
John Zulauf5f13a792020-03-10 07:31:21 -06001405 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
1406 return pos->second.DetectBarrierHazard(usage_index_, src_exec_scope_, src_access_scope_);
John Zulauf0cb5be22020-01-23 12:18:22 -07001407 }
John Zulauf14940722021-04-12 15:19:02 -06001408 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, ResourceUsageTag start_tag) const {
John Zulauf3d84f1b2020-03-09 13:33:25 -06001409 // Async barrier hazard detection can use the same path as the usage index is not IsRead, but is IsWrite
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07001410 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001411 }
1412
1413 private:
1414 SyncStageAccessIndex usage_index_;
Jeremy Gebben40a22942020-12-22 14:22:06 -07001415 VkPipelineStageFlags2KHR src_exec_scope_;
John Zulauf3d84f1b2020-03-09 13:33:25 -06001416 SyncStageAccessFlags src_access_scope_;
1417};
1418
John Zulauf4a6105a2020-11-17 15:11:05 -07001419class EventBarrierHazardDetector {
1420 public:
Jeremy Gebben40a22942020-12-22 14:22:06 -07001421 EventBarrierHazardDetector(SyncStageAccessIndex usage_index, VkPipelineStageFlags2KHR src_exec_scope,
John Zulauf4a6105a2020-11-17 15:11:05 -07001422 SyncStageAccessFlags src_access_scope, const SyncEventState::ScopeMap &event_scope,
John Zulauf14940722021-04-12 15:19:02 -06001423 ResourceUsageTag scope_tag)
John Zulauf4a6105a2020-11-17 15:11:05 -07001424 : usage_index_(usage_index),
1425 src_exec_scope_(src_exec_scope),
1426 src_access_scope_(src_access_scope),
1427 event_scope_(event_scope),
1428 scope_pos_(event_scope.cbegin()),
1429 scope_end_(event_scope.cend()),
1430 scope_tag_(scope_tag) {}
1431
1432 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
1433 // TODO NOTE: This is almost the slowest way to do this... need to intelligently walk this...
1434 // Need to find a more efficient sync, since we know pos->first is strictly increasing call to call
1435 // NOTE: "cached_lower_bound_impl" with upgrades could do this.
1436 if (scope_pos_ == scope_end_) return HazardResult();
1437 if (!scope_pos_->first.intersects(pos->first)) {
1438 event_scope_.lower_bound(pos->first);
1439 if ((scope_pos_ == scope_end_) || !scope_pos_->first.intersects(pos->first)) return HazardResult();
1440 }
1441
1442 // Some portion of this pos is in the event_scope, so check for a barrier hazard
1443 return pos->second.DetectBarrierHazard(usage_index_, src_exec_scope_, src_access_scope_, scope_tag_);
1444 }
John Zulauf14940722021-04-12 15:19:02 -06001445 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, ResourceUsageTag start_tag) const {
John Zulauf4a6105a2020-11-17 15:11:05 -07001446 // Async barrier hazard detection can use the same path as the usage index is not IsRead, but is IsWrite
1447 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
1448 }
1449
1450 private:
1451 SyncStageAccessIndex usage_index_;
Jeremy Gebben40a22942020-12-22 14:22:06 -07001452 VkPipelineStageFlags2KHR src_exec_scope_;
John Zulauf4a6105a2020-11-17 15:11:05 -07001453 SyncStageAccessFlags src_access_scope_;
1454 const SyncEventState::ScopeMap &event_scope_;
1455 SyncEventState::ScopeMap::const_iterator scope_pos_;
1456 SyncEventState::ScopeMap::const_iterator scope_end_;
John Zulauf14940722021-04-12 15:19:02 -06001457 const ResourceUsageTag scope_tag_;
John Zulauf4a6105a2020-11-17 15:11:05 -07001458};
1459
Jeremy Gebben40a22942020-12-22 14:22:06 -07001460HazardResult AccessContext::DetectImageBarrierHazard(const IMAGE_STATE &image, VkPipelineStageFlags2KHR src_exec_scope,
John Zulauf4a6105a2020-11-17 15:11:05 -07001461 const SyncStageAccessFlags &src_access_scope,
1462 const VkImageSubresourceRange &subresource_range,
1463 const SyncEventState &sync_event, DetectOptions options) const {
1464 // It's not particularly DRY to get the address type in this function as well as lower down, but we have to select the
1465 // first access scope map to use, and there's no easy way to plumb it in below.
1466 const auto address_type = ImageAddressType(image);
1467 const auto &event_scope = sync_event.FirstScope(address_type);
1468
1469 EventBarrierHazardDetector detector(SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION, src_exec_scope, src_access_scope,
1470 event_scope, sync_event.first_scope_tag);
John Zulauf110413c2021-03-20 05:38:38 -06001471 return DetectHazard(detector, image, subresource_range, options);
John Zulauf4a6105a2020-11-17 15:11:05 -07001472}
1473
John Zulaufd0ec59f2021-03-13 14:25:08 -07001474HazardResult AccessContext::DetectImageBarrierHazard(const AttachmentViewGen &view_gen, const SyncBarrier &barrier,
1475 DetectOptions options) const {
1476 BarrierHazardDetector detector(SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION, barrier.src_exec_scope.exec_scope,
1477 barrier.src_access_scope);
1478 return DetectHazard(detector, view_gen, AttachmentViewGen::Gen::kViewSubresource, options);
1479}
1480
Jeremy Gebben40a22942020-12-22 14:22:06 -07001481HazardResult AccessContext::DetectImageBarrierHazard(const IMAGE_STATE &image, VkPipelineStageFlags2KHR src_exec_scope,
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07001482 const SyncStageAccessFlags &src_access_scope,
John Zulauf355e49b2020-04-24 15:11:15 -06001483 const VkImageSubresourceRange &subresource_range,
John Zulauf43cc7462020-12-03 12:33:12 -07001484 const DetectOptions options) const {
John Zulauf69133422020-05-20 14:55:53 -06001485 BarrierHazardDetector detector(SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION, src_exec_scope, src_access_scope);
John Zulauf110413c2021-03-20 05:38:38 -06001486 return DetectHazard(detector, image, subresource_range, options);
John Zulauf0cb5be22020-01-23 12:18:22 -07001487}
1488
Jeremy Gebben40a22942020-12-22 14:22:06 -07001489HazardResult AccessContext::DetectImageBarrierHazard(const IMAGE_STATE &image, VkPipelineStageFlags2KHR src_exec_scope,
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07001490 const SyncStageAccessFlags &src_stage_accesses,
John Zulauf355e49b2020-04-24 15:11:15 -06001491 const VkImageMemoryBarrier &barrier) const {
1492 auto subresource_range = NormalizeSubresourceRange(image.createInfo, barrier.subresourceRange);
1493 const auto src_access_scope = SyncStageAccess::AccessScope(src_stage_accesses, barrier.srcAccessMask);
1494 return DetectImageBarrierHazard(image, src_exec_scope, src_access_scope, subresource_range, kDetectAll);
1495}
John Zulaufe7f6a5e2021-01-16 14:31:18 -07001496HazardResult AccessContext::DetectImageBarrierHazard(const SyncImageMemoryBarrier &image_barrier) const {
John Zulaufc523bf62021-02-16 08:20:34 -07001497 return DetectImageBarrierHazard(*image_barrier.image.get(), image_barrier.barrier.src_exec_scope.exec_scope,
John Zulauf110413c2021-03-20 05:38:38 -06001498 image_barrier.barrier.src_access_scope, image_barrier.range, kDetectAll);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07001499}
John Zulauf355e49b2020-04-24 15:11:15 -06001500
John Zulauf9cb530d2019-09-30 14:14:10 -06001501template <typename Flags, typename Map>
1502SyncStageAccessFlags AccessScopeImpl(Flags flag_mask, const Map &map) {
1503 SyncStageAccessFlags scope = 0;
1504 for (const auto &bit_scope : map) {
1505 if (flag_mask < bit_scope.first) break;
1506
1507 if (flag_mask & bit_scope.first) {
1508 scope |= bit_scope.second;
1509 }
1510 }
1511 return scope;
1512}
1513
Jeremy Gebben40a22942020-12-22 14:22:06 -07001514SyncStageAccessFlags SyncStageAccess::AccessScopeByStage(VkPipelineStageFlags2KHR stages) {
John Zulauf9cb530d2019-09-30 14:14:10 -06001515 return AccessScopeImpl(stages, syncStageAccessMaskByStageBit);
1516}
1517
Jeremy Gebben40a22942020-12-22 14:22:06 -07001518SyncStageAccessFlags SyncStageAccess::AccessScopeByAccess(VkAccessFlags2KHR accesses) {
1519 return AccessScopeImpl(sync_utils::ExpandAccessFlags(accesses), syncStageAccessMaskByAccessBit);
John Zulauf9cb530d2019-09-30 14:14:10 -06001520}
1521
Jeremy Gebben40a22942020-12-22 14:22:06 -07001522// Getting from stage mask and access mask to stage/access masks is something we need to be good at...
1523SyncStageAccessFlags SyncStageAccess::AccessScope(VkPipelineStageFlags2KHR stages, VkAccessFlags2KHR accesses) {
John Zulauf5f13a792020-03-10 07:31:21 -06001524 // The access scope is the intersection of all stage/access types possible for the enabled stages and the enables
1525 // accesses (after doing a couple factoring of common terms the union of stage/access intersections is the intersections
1526 // of the union of all stage/access types for all the stages and the same unions for the access mask...
John Zulauf9cb530d2019-09-30 14:14:10 -06001527 return AccessScopeByStage(stages) & AccessScopeByAccess(accesses);
1528}
1529
1530template <typename Action>
John Zulauf5c5e88d2019-12-26 11:22:02 -07001531void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const ResourceAccessRange &range, const Action &action) {
John Zulauf7635de32020-05-29 17:14:15 -06001532 // TODO: Optimization for operations that do a pure overwrite (i.e. WRITE usages which rewrite the state, vs READ usages
1533 // that do incrementalupdates
John Zulauf4a6105a2020-11-17 15:11:05 -07001534 assert(accesses);
John Zulauf9cb530d2019-09-30 14:14:10 -06001535 auto pos = accesses->lower_bound(range);
1536 if (pos == accesses->end() || !pos->first.intersects(range)) {
1537 // The range is empty, fill it with a default value.
1538 pos = action.Infill(accesses, pos, range);
1539 } else if (range.begin < pos->first.begin) {
1540 // Leading empty space, infill
John Zulauf5c5e88d2019-12-26 11:22:02 -07001541 pos = action.Infill(accesses, pos, ResourceAccessRange(range.begin, pos->first.begin));
John Zulauf9cb530d2019-09-30 14:14:10 -06001542 } else if (pos->first.begin < range.begin) {
1543 // Trim the beginning if needed
1544 pos = accesses->split(pos, range.begin, sparse_container::split_op_keep_both());
1545 ++pos;
1546 }
1547
1548 const auto the_end = accesses->end();
1549 while ((pos != the_end) && pos->first.intersects(range)) {
1550 if (pos->first.end > range.end) {
1551 pos = accesses->split(pos, range.end, sparse_container::split_op_keep_both());
1552 }
1553
1554 pos = action(accesses, pos);
1555 if (pos == the_end) break;
1556
1557 auto next = pos;
1558 ++next;
1559 if ((pos->first.end < range.end) && (next != the_end) && !next->first.is_subsequent_to(pos->first)) {
1560 // Need to infill if next is disjoint
1561 VkDeviceSize limit = (next == the_end) ? range.end : std::min(range.end, next->first.begin);
John Zulauf5c5e88d2019-12-26 11:22:02 -07001562 ResourceAccessRange new_range(pos->first.end, limit);
John Zulauf9cb530d2019-09-30 14:14:10 -06001563 next = action.Infill(accesses, next, new_range);
1564 }
1565 pos = next;
1566 }
1567}
John Zulaufd5115702021-01-18 12:34:33 -07001568
1569// Give a comparable interface for range generators and ranges
1570template <typename Action>
1571inline void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const Action &action, ResourceAccessRange *range) {
1572 assert(range);
1573 UpdateMemoryAccessState(accesses, *range, action);
1574}
1575
John Zulauf4a6105a2020-11-17 15:11:05 -07001576template <typename Action, typename RangeGen>
1577void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const Action &action, RangeGen *range_gen_arg) {
1578 assert(range_gen_arg);
John Zulaufd5115702021-01-18 12:34:33 -07001579 RangeGen &range_gen = *range_gen_arg; // Non-const references must be * by style requirement but deref-ing * iterator is a pain
John Zulauf4a6105a2020-11-17 15:11:05 -07001580 for (; range_gen->non_empty(); ++range_gen) {
1581 UpdateMemoryAccessState(accesses, *range_gen, action);
1582 }
1583}
John Zulauf9cb530d2019-09-30 14:14:10 -06001584
John Zulaufd0ec59f2021-03-13 14:25:08 -07001585template <typename Action, typename RangeGen>
1586void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const Action &action, const RangeGen &range_gen_prebuilt) {
1587 RangeGen range_gen(range_gen_prebuilt); // RangeGenerators can be expensive to create from scratch... initialize from built
1588 for (; range_gen->non_empty(); ++range_gen) {
1589 UpdateMemoryAccessState(accesses, *range_gen, action);
1590 }
1591}
John Zulauf9cb530d2019-09-30 14:14:10 -06001592struct UpdateMemoryAccessStateFunctor {
John Zulauf5c5e88d2019-12-26 11:22:02 -07001593 using Iterator = ResourceAccessRangeMap::iterator;
1594 Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const {
John Zulauf5f13a792020-03-10 07:31:21 -06001595 // this is only called on gaps, and never returns a gap.
1596 ResourceAccessState default_state;
John Zulauf16adfc92020-04-08 10:28:33 -06001597 context.ResolvePreviousAccess(type, range, accesses, &default_state);
John Zulauf5f13a792020-03-10 07:31:21 -06001598 return accesses->lower_bound(range);
John Zulauf9cb530d2019-09-30 14:14:10 -06001599 }
John Zulauf5f13a792020-03-10 07:31:21 -06001600
John Zulauf5c5e88d2019-12-26 11:22:02 -07001601 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -06001602 auto &access_state = pos->second;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001603 access_state.Update(usage, ordering_rule, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06001604 return pos;
1605 }
1606
John Zulauf43cc7462020-12-03 12:33:12 -07001607 UpdateMemoryAccessStateFunctor(AccessAddressType type_, const AccessContext &context_, SyncStageAccessIndex usage_,
John Zulauf14940722021-04-12 15:19:02 -06001608 SyncOrdering ordering_rule_, ResourceUsageTag tag_)
John Zulauf8e3c3e92021-01-06 11:19:36 -07001609 : type(type_), context(context_), usage(usage_), ordering_rule(ordering_rule_), tag(tag_) {}
John Zulauf43cc7462020-12-03 12:33:12 -07001610 const AccessAddressType type;
John Zulauf540266b2020-04-06 18:54:53 -06001611 const AccessContext &context;
John Zulauf16adfc92020-04-08 10:28:33 -06001612 const SyncStageAccessIndex usage;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001613 const SyncOrdering ordering_rule;
John Zulauf14940722021-04-12 15:19:02 -06001614 const ResourceUsageTag tag;
John Zulauf9cb530d2019-09-30 14:14:10 -06001615};
1616
John Zulauf4a6105a2020-11-17 15:11:05 -07001617// The barrier operation for pipeline and subpass dependencies`
John Zulauf1e331ec2020-12-04 18:29:38 -07001618struct PipelineBarrierOp {
1619 SyncBarrier barrier;
1620 bool layout_transition;
1621 PipelineBarrierOp(const SyncBarrier &barrier_, bool layout_transition_)
1622 : barrier(barrier_), layout_transition(layout_transition_) {}
1623 PipelineBarrierOp() = default;
John Zulaufd5115702021-01-18 12:34:33 -07001624 PipelineBarrierOp(const PipelineBarrierOp &) = default;
John Zulauf1e331ec2020-12-04 18:29:38 -07001625 void operator()(ResourceAccessState *access_state) const { access_state->ApplyBarrier(barrier, layout_transition); }
1626};
John Zulauf4a6105a2020-11-17 15:11:05 -07001627// The barrier operation for wait events
1628struct WaitEventBarrierOp {
John Zulauf14940722021-04-12 15:19:02 -06001629 ResourceUsageTag scope_tag;
John Zulauf4a6105a2020-11-17 15:11:05 -07001630 SyncBarrier barrier;
1631 bool layout_transition;
John Zulauf14940722021-04-12 15:19:02 -06001632 WaitEventBarrierOp(const ResourceUsageTag scope_tag_, const SyncBarrier &barrier_, bool layout_transition_)
1633 : scope_tag(scope_tag_), barrier(barrier_), layout_transition(layout_transition_) {}
John Zulauf4a6105a2020-11-17 15:11:05 -07001634 WaitEventBarrierOp() = default;
John Zulauf14940722021-04-12 15:19:02 -06001635 void operator()(ResourceAccessState *access_state) const { access_state->ApplyBarrier(scope_tag, barrier, layout_transition); }
John Zulauf4a6105a2020-11-17 15:11:05 -07001636};
John Zulauf1e331ec2020-12-04 18:29:38 -07001637
John Zulauf4a6105a2020-11-17 15:11:05 -07001638// This functor applies a collection of barriers, updating the "pending state" in each touched memory range, and optionally
1639// resolves the pending state. Suitable for processing Global memory barriers, or Subpass Barriers when the "final" barrier
1640// of a collection is known/present.
John Zulauf5c628d02021-05-04 15:46:36 -06001641template <typename BarrierOp, typename OpVector = std::vector<BarrierOp>>
John Zulauf89311b42020-09-29 16:28:47 -06001642class ApplyBarrierOpsFunctor {
1643 public:
John Zulauf5c5e88d2019-12-26 11:22:02 -07001644 using Iterator = ResourceAccessRangeMap::iterator;
John Zulauf5c628d02021-05-04 15:46:36 -06001645 // Only called with a gap, and pos at the lower_bound(range)
1646 inline Iterator Infill(ResourceAccessRangeMap *accesses, const Iterator &pos, const ResourceAccessRange &range) const {
1647 if (!infill_default_) {
1648 return pos;
1649 }
1650 ResourceAccessState default_state;
1651 auto inserted = accesses->insert(pos, std::make_pair(range, default_state));
1652 return inserted;
1653 }
John Zulauf9cb530d2019-09-30 14:14:10 -06001654
John Zulauf5c628d02021-05-04 15:46:36 -06001655 Iterator operator()(ResourceAccessRangeMap *accesses, const Iterator &pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -06001656 auto &access_state = pos->second;
John Zulauf1e331ec2020-12-04 18:29:38 -07001657 for (const auto &op : barrier_ops_) {
1658 op(&access_state);
John Zulauf89311b42020-09-29 16:28:47 -06001659 }
John Zulauf9cb530d2019-09-30 14:14:10 -06001660
John Zulauf89311b42020-09-29 16:28:47 -06001661 if (resolve_) {
1662 // If this is the last (or only) batch, we can do the pending resolve as the last step in this operation to avoid
1663 // another walk
1664 access_state.ApplyPendingBarriers(tag_);
John Zulauf9cb530d2019-09-30 14:14:10 -06001665 }
1666 return pos;
1667 }
1668
John Zulauf89311b42020-09-29 16:28:47 -06001669 // A valid tag is required IFF layout_transition is true, as transitions are write ops
John Zulauf5c628d02021-05-04 15:46:36 -06001670 ApplyBarrierOpsFunctor(bool resolve, typename OpVector::size_type size_hint, ResourceUsageTag tag)
1671 : resolve_(resolve), infill_default_(false), barrier_ops_(), tag_(tag) {
John Zulaufd5115702021-01-18 12:34:33 -07001672 barrier_ops_.reserve(size_hint);
1673 }
John Zulauf5c628d02021-05-04 15:46:36 -06001674 void EmplaceBack(const BarrierOp &op) {
1675 barrier_ops_.emplace_back(op);
1676 infill_default_ |= op.layout_transition;
1677 }
John Zulauf89311b42020-09-29 16:28:47 -06001678
1679 private:
1680 bool resolve_;
John Zulauf5c628d02021-05-04 15:46:36 -06001681 bool infill_default_;
1682 OpVector barrier_ops_;
John Zulauf14940722021-04-12 15:19:02 -06001683 const ResourceUsageTag tag_;
John Zulauf1e331ec2020-12-04 18:29:38 -07001684};
1685
John Zulauf4a6105a2020-11-17 15:11:05 -07001686// This functor applies a single barrier, updating the "pending state" in each touched memory range, but does not
1687// resolve the pendinging state. Suitable for processing Image and Buffer barriers from PipelineBarriers or Events
1688template <typename BarrierOp>
John Zulauf5c628d02021-05-04 15:46:36 -06001689class ApplyBarrierFunctor : public ApplyBarrierOpsFunctor<BarrierOp, small_vector<BarrierOp, 1>> {
1690 using Base = ApplyBarrierOpsFunctor<BarrierOp, small_vector<BarrierOp, 1>>;
1691
John Zulauf4a6105a2020-11-17 15:11:05 -07001692 public:
John Zulaufee984022022-04-13 16:39:50 -06001693 ApplyBarrierFunctor(const BarrierOp &barrier_op) : Base(false, 1, kInvalidTag) { Base::EmplaceBack(barrier_op); }
John Zulauf4a6105a2020-11-17 15:11:05 -07001694};
1695
John Zulauf1e331ec2020-12-04 18:29:38 -07001696// This functor resolves the pendinging state.
John Zulauf5c628d02021-05-04 15:46:36 -06001697class ResolvePendingBarrierFunctor : public ApplyBarrierOpsFunctor<NoopBarrierAction, small_vector<NoopBarrierAction, 1>> {
1698 using Base = ApplyBarrierOpsFunctor<NoopBarrierAction, small_vector<NoopBarrierAction, 1>>;
1699
John Zulauf1e331ec2020-12-04 18:29:38 -07001700 public:
John Zulauf5c628d02021-05-04 15:46:36 -06001701 ResolvePendingBarrierFunctor(ResourceUsageTag tag) : Base(true, 0, tag) {}
John Zulauf9cb530d2019-09-30 14:14:10 -06001702};
1703
John Zulauf8e3c3e92021-01-06 11:19:36 -07001704void AccessContext::UpdateAccessState(AccessAddressType type, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf14940722021-04-12 15:19:02 -06001705 const ResourceAccessRange &range, const ResourceUsageTag tag) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07001706 UpdateMemoryAccessStateFunctor action(type, *this, current_usage, ordering_rule, tag);
John Zulauf16adfc92020-04-08 10:28:33 -06001707 UpdateMemoryAccessState(&GetAccessStateMap(type), range, action);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001708}
1709
John Zulauf8e3c3e92021-01-06 11:19:36 -07001710void AccessContext::UpdateAccessState(const BUFFER_STATE &buffer, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf14940722021-04-12 15:19:02 -06001711 const ResourceAccessRange &range, const ResourceUsageTag tag) {
John Zulauf16adfc92020-04-08 10:28:33 -06001712 if (!SimpleBinding(buffer)) return;
1713 const auto base_address = ResourceBaseAddress(buffer);
John Zulauf8e3c3e92021-01-06 11:19:36 -07001714 UpdateAccessState(AccessAddressType::kLinear, current_usage, ordering_rule, range + base_address, tag);
John Zulauf16adfc92020-04-08 10:28:33 -06001715}
John Zulauf355e49b2020-04-24 15:11:15 -06001716
John Zulauf8e3c3e92021-01-06 11:19:36 -07001717void AccessContext::UpdateAccessState(const IMAGE_STATE &image, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf110413c2021-03-20 05:38:38 -06001718 const VkImageSubresourceRange &subresource_range, const ResourceUsageTag &tag) {
1719 if (!SimpleBinding(image)) return;
1720 const auto base_address = ResourceBaseAddress(image);
1721 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, base_address);
1722 const auto address_type = ImageAddressType(image);
1723 UpdateMemoryAccessStateFunctor action(address_type, *this, current_usage, ordering_rule, tag);
1724 UpdateMemoryAccessState(&GetAccessStateMap(address_type), action, &range_gen);
1725}
1726void AccessContext::UpdateAccessState(const IMAGE_STATE &image, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf355e49b2020-04-24 15:11:15 -06001727 const VkImageSubresourceRange &subresource_range, const VkOffset3D &offset,
John Zulauf14940722021-04-12 15:19:02 -06001728 const VkExtent3D &extent, const ResourceUsageTag tag) {
John Zulauf16adfc92020-04-08 10:28:33 -06001729 if (!SimpleBinding(image)) return;
John Zulauf16adfc92020-04-08 10:28:33 -06001730 const auto base_address = ResourceBaseAddress(image);
John Zulauf150e5332020-12-03 08:52:52 -07001731 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, offset, extent,
1732 base_address);
1733 const auto address_type = ImageAddressType(image);
John Zulauf8e3c3e92021-01-06 11:19:36 -07001734 UpdateMemoryAccessStateFunctor action(address_type, *this, current_usage, ordering_rule, tag);
John Zulauf110413c2021-03-20 05:38:38 -06001735 UpdateMemoryAccessState(&GetAccessStateMap(address_type), action, &range_gen);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001736}
John Zulaufd0ec59f2021-03-13 14:25:08 -07001737
1738void AccessContext::UpdateAccessState(const AttachmentViewGen &view_gen, AttachmentViewGen::Gen gen_type,
John Zulauf14940722021-04-12 15:19:02 -06001739 SyncStageAccessIndex current_usage, SyncOrdering ordering_rule, const ResourceUsageTag tag) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001740 const ImageRangeGen *gen = view_gen.GetRangeGen(gen_type);
1741 if (!gen) return;
1742 subresource_adapter::ImageRangeGenerator range_gen(*gen);
1743 const auto address_type = view_gen.GetAddressType();
1744 UpdateMemoryAccessStateFunctor action(address_type, *this, current_usage, ordering_rule, tag);
1745 ApplyUpdateAction(address_type, action, &range_gen);
John Zulauf7635de32020-05-29 17:14:15 -06001746}
John Zulauf3d84f1b2020-03-09 13:33:25 -06001747
John Zulauf8e3c3e92021-01-06 11:19:36 -07001748void AccessContext::UpdateAccessState(const IMAGE_STATE &image, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf355e49b2020-04-24 15:11:15 -06001749 const VkImageSubresourceLayers &subresource, const VkOffset3D &offset,
John Zulauf14940722021-04-12 15:19:02 -06001750 const VkExtent3D &extent, const ResourceUsageTag tag) {
John Zulauf355e49b2020-04-24 15:11:15 -06001751 VkImageSubresourceRange subresource_range = {subresource.aspectMask, subresource.mipLevel, 1, subresource.baseArrayLayer,
1752 subresource.layerCount};
John Zulauf8e3c3e92021-01-06 11:19:36 -07001753 UpdateAccessState(image, current_usage, ordering_rule, subresource_range, offset, extent, tag);
John Zulauf355e49b2020-04-24 15:11:15 -06001754}
1755
John Zulaufd0ec59f2021-03-13 14:25:08 -07001756template <typename Action, typename RangeGen>
1757void AccessContext::ApplyUpdateAction(AccessAddressType address_type, const Action &action, RangeGen *range_gen_arg) {
1758 assert(range_gen_arg); // Old Google C++ styleguide require non-const object pass by * not &, but this isn't an optional arg.
1759 UpdateMemoryAccessState(&GetAccessStateMap(address_type), action, range_gen_arg);
John Zulauf540266b2020-04-06 18:54:53 -06001760}
1761
1762template <typename Action>
John Zulaufd0ec59f2021-03-13 14:25:08 -07001763void AccessContext::ApplyUpdateAction(const AttachmentViewGen &view_gen, AttachmentViewGen::Gen gen_type, const Action &action) {
1764 const ImageRangeGen *gen = view_gen.GetRangeGen(gen_type);
1765 if (!gen) return;
1766 UpdateMemoryAccessState(&GetAccessStateMap(view_gen.GetAddressType()), action, *gen);
John Zulauf540266b2020-04-06 18:54:53 -06001767}
1768
John Zulaufd0ec59f2021-03-13 14:25:08 -07001769void AccessContext::UpdateAttachmentResolveAccess(const RENDER_PASS_STATE &rp_state,
1770 const AttachmentViewGenVector &attachment_views, uint32_t subpass,
John Zulauf14940722021-04-12 15:19:02 -06001771 const ResourceUsageTag tag) {
John Zulauf7635de32020-05-29 17:14:15 -06001772 UpdateStateResolveAction update(*this, tag);
John Zulaufd0ec59f2021-03-13 14:25:08 -07001773 ResolveOperation(update, rp_state, attachment_views, subpass);
John Zulauf7635de32020-05-29 17:14:15 -06001774}
1775
John Zulaufd0ec59f2021-03-13 14:25:08 -07001776void AccessContext::UpdateAttachmentStoreAccess(const RENDER_PASS_STATE &rp_state, const AttachmentViewGenVector &attachment_views,
John Zulauf14940722021-04-12 15:19:02 -06001777 uint32_t subpass, const ResourceUsageTag tag) {
John Zulaufaff20662020-06-01 14:07:58 -06001778 const auto *attachment_ci = rp_state.createInfo.pAttachments;
John Zulaufaff20662020-06-01 14:07:58 -06001779
1780 for (uint32_t i = 0; i < rp_state.createInfo.attachmentCount; i++) {
1781 if (rp_state.attachment_last_subpass[i] == subpass) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001782 const auto &view_gen = attachment_views[i];
1783 if (!view_gen.IsValid()) continue; // UNUSED
John Zulaufaff20662020-06-01 14:07:58 -06001784
1785 const auto &ci = attachment_ci[i];
1786 const bool has_depth = FormatHasDepth(ci.format);
1787 const bool has_stencil = FormatHasStencil(ci.format);
1788 const bool is_color = !(has_depth || has_stencil);
John Zulauf57261402021-08-13 11:32:06 -06001789 const bool store_op_stores = ci.storeOp != VK_ATTACHMENT_STORE_OP_NONE_EXT;
John Zulaufaff20662020-06-01 14:07:58 -06001790
1791 if (is_color && store_op_stores) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001792 UpdateAccessState(view_gen, AttachmentViewGen::Gen::kRenderArea,
1793 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kRaster, tag);
John Zulaufaff20662020-06-01 14:07:58 -06001794 } else {
John Zulaufaff20662020-06-01 14:07:58 -06001795 if (has_depth && store_op_stores) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001796 UpdateAccessState(view_gen, AttachmentViewGen::Gen::kDepthOnlyRenderArea,
1797 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, SyncOrdering::kRaster, tag);
John Zulaufaff20662020-06-01 14:07:58 -06001798 }
John Zulauf57261402021-08-13 11:32:06 -06001799 const bool stencil_op_stores = ci.stencilStoreOp != VK_ATTACHMENT_STORE_OP_NONE_EXT;
John Zulaufaff20662020-06-01 14:07:58 -06001800 if (has_stencil && stencil_op_stores) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001801 UpdateAccessState(view_gen, AttachmentViewGen::Gen::kStencilOnlyRenderArea,
1802 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, SyncOrdering::kRaster, tag);
John Zulaufaff20662020-06-01 14:07:58 -06001803 }
1804 }
1805 }
1806 }
1807}
1808
John Zulauf540266b2020-04-06 18:54:53 -06001809template <typename Action>
John Zulaufd5115702021-01-18 12:34:33 -07001810void AccessContext::ApplyToContext(const Action &barrier_action) {
John Zulauf540266b2020-04-06 18:54:53 -06001811 // Note: Barriers do *not* cross context boundaries, applying to accessess within.... (at least for renderpass subpasses)
John Zulauf16adfc92020-04-08 10:28:33 -06001812 for (const auto address_type : kAddressTypes) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001813 UpdateMemoryAccessState(&GetAccessStateMap(address_type), kFullRange, barrier_action);
John Zulauf540266b2020-04-06 18:54:53 -06001814 }
1815}
1816
1817void AccessContext::ResolveChildContexts(const std::vector<AccessContext> &contexts) {
John Zulauf540266b2020-04-06 18:54:53 -06001818 for (uint32_t subpass_index = 0; subpass_index < contexts.size(); subpass_index++) {
1819 auto &context = contexts[subpass_index];
John Zulauf22aefed2021-03-11 18:14:35 -07001820 ApplyTrackbackStackAction barrier_action(context.GetDstExternalTrackBack().barriers);
John Zulauf16adfc92020-04-08 10:28:33 -06001821 for (const auto address_type : kAddressTypes) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001822 context.ResolveAccessRange(address_type, kFullRange, barrier_action, &GetAccessStateMap(address_type), nullptr, false);
John Zulauf540266b2020-04-06 18:54:53 -06001823 }
1824 }
1825}
1826
John Zulauf4fa68462021-04-26 21:04:22 -06001827// Caller must ensure that lifespan of this is less than from
1828void AccessContext::ImportAsyncContexts(const AccessContext &from) { async_ = from.async_; }
1829
John Zulauf355e49b2020-04-24 15:11:15 -06001830// Suitable only for *subpass* access contexts
John Zulaufd0ec59f2021-03-13 14:25:08 -07001831HazardResult AccessContext::DetectSubpassTransitionHazard(const TrackBack &track_back, const AttachmentViewGen &attach_view) const {
1832 if (!attach_view.IsValid()) return HazardResult();
John Zulauf355e49b2020-04-24 15:11:15 -06001833
John Zulauf355e49b2020-04-24 15:11:15 -06001834 // We should never ask for a transition from a context we don't have
John Zulaufbb890452021-12-14 11:30:18 -07001835 assert(track_back.source_subpass);
John Zulauf355e49b2020-04-24 15:11:15 -06001836
1837 // Do the detection against the specific prior context independent of other contexts. (Synchronous only)
John Zulaufa0a98292020-09-18 09:30:10 -06001838 // Hazard detection for the transition can be against the merged of the barriers (it only uses src_...)
1839 const auto merged_barrier = MergeBarriers(track_back.barriers);
John Zulaufbb890452021-12-14 11:30:18 -07001840 HazardResult hazard = track_back.source_subpass->DetectImageBarrierHazard(attach_view, merged_barrier, kDetectPrevious);
John Zulauf355e49b2020-04-24 15:11:15 -06001841 if (!hazard.hazard) {
1842 // The Async hazard check is against the current context's async set.
John Zulaufd0ec59f2021-03-13 14:25:08 -07001843 hazard = DetectImageBarrierHazard(attach_view, merged_barrier, kDetectAsync);
John Zulauf355e49b2020-04-24 15:11:15 -06001844 }
John Zulaufa0a98292020-09-18 09:30:10 -06001845
John Zulauf355e49b2020-04-24 15:11:15 -06001846 return hazard;
1847}
1848
John Zulaufb02c1eb2020-10-06 16:33:36 -06001849void AccessContext::RecordLayoutTransitions(const RENDER_PASS_STATE &rp_state, uint32_t subpass,
John Zulauf14940722021-04-12 15:19:02 -06001850 const AttachmentViewGenVector &attachment_views, const ResourceUsageTag tag) {
John Zulaufb02c1eb2020-10-06 16:33:36 -06001851 const auto &transitions = rp_state.subpass_transitions[subpass];
John Zulauf646cc292020-10-23 09:16:45 -06001852 const ResourceAccessState empty_infill;
John Zulaufb02c1eb2020-10-06 16:33:36 -06001853 for (const auto &transition : transitions) {
1854 const auto prev_pass = transition.prev_pass;
John Zulaufd0ec59f2021-03-13 14:25:08 -07001855 const auto &view_gen = attachment_views[transition.attachment];
1856 if (!view_gen.IsValid()) continue;
John Zulaufb02c1eb2020-10-06 16:33:36 -06001857
1858 const auto *trackback = GetTrackBackFromSubpass(prev_pass);
1859 assert(trackback);
1860
1861 // Import the attachments into the current context
John Zulaufbb890452021-12-14 11:30:18 -07001862 const auto *prev_context = trackback->source_subpass;
John Zulaufb02c1eb2020-10-06 16:33:36 -06001863 assert(prev_context);
John Zulaufd0ec59f2021-03-13 14:25:08 -07001864 const auto address_type = view_gen.GetAddressType();
John Zulaufb02c1eb2020-10-06 16:33:36 -06001865 auto &target_map = GetAccessStateMap(address_type);
1866 ApplySubpassTransitionBarriersAction barrier_action(trackback->barriers);
John Zulaufd0ec59f2021-03-13 14:25:08 -07001867 prev_context->ResolveAccessRange(view_gen, AttachmentViewGen::Gen::kViewSubresource, barrier_action, &target_map,
1868 &empty_infill);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001869 }
1870
John Zulauf86356ca2020-10-19 11:46:41 -06001871 // If there were no transitions skip this global map walk
1872 if (transitions.size()) {
John Zulauf1e331ec2020-12-04 18:29:38 -07001873 ResolvePendingBarrierFunctor apply_pending_action(tag);
John Zulaufd5115702021-01-18 12:34:33 -07001874 ApplyToContext(apply_pending_action);
John Zulauf86356ca2020-10-19 11:46:41 -06001875 }
John Zulaufb02c1eb2020-10-06 16:33:36 -06001876}
Jeremy Gebben9893daf2021-01-04 10:40:50 -07001877
Jeremy Gebben9893daf2021-01-04 10:40:50 -07001878void CommandBufferAccessContext::ApplyGlobalBarriersToEvents(const SyncExecScope &src, const SyncExecScope &dst) {
John Zulauf669dfd52021-01-27 17:15:28 -07001879 auto *events_context = GetCurrentEventsContext();
1880 assert(events_context);
John Zulauf8eda1562021-04-13 17:06:41 -06001881 events_context->ApplyBarrier(src, dst);
John Zulauf4a6105a2020-11-17 15:11:05 -07001882}
1883
locke-lunarg61870c22020-06-09 14:51:50 -06001884bool CommandBufferAccessContext::ValidateDispatchDrawDescriptorSet(VkPipelineBindPoint pipelineBindPoint,
1885 const char *func_name) const {
1886 bool skip = false;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001887 const PIPELINE_STATE *pipe = nullptr;
locke-lunarg61870c22020-06-09 14:51:50 -06001888 const std::vector<LAST_BOUND_STATE::PER_SET> *per_sets = nullptr;
Jeremy Gebben159b3cc2021-06-03 09:09:03 -06001889 cb_state_->GetCurrentPipelineAndDesriptorSets(pipelineBindPoint, &pipe, &per_sets);
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001890 if (!pipe || !per_sets) {
locke-lunarg61870c22020-06-09 14:51:50 -06001891 return skip;
1892 }
1893
1894 using DescriptorClass = cvdescriptorset::DescriptorClass;
1895 using BufferDescriptor = cvdescriptorset::BufferDescriptor;
1896 using ImageDescriptor = cvdescriptorset::ImageDescriptor;
locke-lunarg61870c22020-06-09 14:51:50 -06001897 using TexelDescriptor = cvdescriptorset::TexelDescriptor;
1898
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001899 for (const auto &stage_state : pipe->stage_state) {
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07001900 const auto raster_state = pipe->RasterizationState();
1901 if (stage_state.stage_flag == VK_SHADER_STAGE_FRAGMENT_BIT && raster_state && raster_state->rasterizerDiscardEnable) {
locke-lunarge9f1cdf2020-06-12 12:28:57 -06001902 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001903 }
locke-lunarg61870c22020-06-09 14:51:50 -06001904 for (const auto &set_binding : stage_state.descriptor_uses) {
Jeremy Gebben4d51c552022-01-06 21:27:15 -07001905 const auto *descriptor_set = (*per_sets)[set_binding.first.set].bound_descriptor_set.get();
locke-lunarg61870c22020-06-09 14:51:50 -06001906 cvdescriptorset::DescriptorSetLayout::ConstBindingIterator binding_it(descriptor_set->GetLayout().get(),
Jeremy Gebben7fc88a22021-08-25 13:30:45 -06001907 set_binding.first.binding);
locke-lunarg61870c22020-06-09 14:51:50 -06001908 const auto descriptor_type = binding_it.GetType();
1909 cvdescriptorset::IndexRange index_range = binding_it.GetGlobalIndexRange();
1910 auto array_idx = 0;
1911
1912 if (binding_it.IsVariableDescriptorCount()) {
1913 index_range.end = index_range.start + descriptor_set->GetVariableDescriptorCount();
1914 }
1915 SyncStageAccessIndex sync_index =
1916 GetSyncStageAccessIndexsByDescriptorSet(descriptor_type, set_binding.second, stage_state.stage_flag);
1917
1918 for (uint32_t i = index_range.start; i < index_range.end; ++i, ++array_idx) {
1919 uint32_t index = i - index_range.start;
1920 const auto *descriptor = descriptor_set->GetDescriptorFromGlobalIndex(i);
1921 switch (descriptor->GetClass()) {
1922 case DescriptorClass::ImageSampler:
1923 case DescriptorClass::Image: {
Jeremy Gebbena08da232022-02-01 15:14:52 -07001924 if (descriptor->Invalid()) {
1925 continue;
locke-lunarg61870c22020-06-09 14:51:50 -06001926 }
Jeremy Gebbena08da232022-02-01 15:14:52 -07001927
1928 // NOTE: ImageSamplerDescriptor inherits from ImageDescriptor, so this cast works for both types.
1929 const auto *image_descriptor = static_cast<const ImageDescriptor *>(descriptor);
1930 const auto *img_view_state = image_descriptor->GetImageViewState();
1931 VkImageLayout image_layout = image_descriptor->GetImageLayout();
1932
John Zulauf361fb532020-07-22 10:45:39 -06001933 HazardResult hazard;
Jeremy Gebben11a68a32021-07-29 11:59:22 -06001934 // NOTE: 2D ImageViews of VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT Images are not allowed in
1935 // Descriptors, so we do not have to worry about depth slicing here.
1936 // See: VUID 00343
1937 assert(!img_view_state->IsDepthSliced());
John Zulauf110413c2021-03-20 05:38:38 -06001938 const IMAGE_STATE *img_state = img_view_state->image_state.get();
John Zulauf361fb532020-07-22 10:45:39 -06001939 const auto &subresource_range = img_view_state->normalized_subresource_range;
John Zulauf110413c2021-03-20 05:38:38 -06001940
1941 if (sync_index == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ) {
1942 const VkExtent3D extent = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.extent);
1943 const VkOffset3D offset = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.offset);
John Zulauf361fb532020-07-22 10:45:39 -06001944 // Input attachments are subject to raster ordering rules
1945 hazard = current_context_->DetectHazard(*img_state, sync_index, subresource_range,
John Zulauf8e3c3e92021-01-06 11:19:36 -07001946 SyncOrdering::kRaster, offset, extent);
John Zulauf361fb532020-07-22 10:45:39 -06001947 } else {
John Zulauf110413c2021-03-20 05:38:38 -06001948 hazard = current_context_->DetectHazard(*img_state, sync_index, subresource_range);
John Zulauf361fb532020-07-22 10:45:39 -06001949 }
John Zulauf110413c2021-03-20 05:38:38 -06001950
John Zulauf33fc1d52020-07-17 11:01:10 -06001951 if (hazard.hazard && !sync_state_->SupressedBoundDescriptorWAW(hazard)) {
John Zulauf1dae9192020-06-16 15:46:44 -06001952 skip |= sync_state_->LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06001953 img_view_state->image_view(), string_SyncHazardVUID(hazard.hazard),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001954 "%s: Hazard %s for %s, in %s, and %s, %s, type: %s, imageLayout: %s, binding #%" PRIu32
1955 ", index %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06001956 func_name, string_SyncHazard(hazard.hazard),
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06001957 sync_state_->report_data->FormatHandle(img_view_state->image_view()).c_str(),
1958 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer()).c_str(),
1959 sync_state_->report_data->FormatHandle(pipe->pipeline()).c_str(),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001960 sync_state_->report_data->FormatHandle(descriptor_set->GetSet()).c_str(),
1961 string_VkDescriptorType(descriptor_type), string_VkImageLayout(image_layout),
Jeremy Gebben7fc88a22021-08-25 13:30:45 -06001962 set_binding.first.binding, index, FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06001963 }
1964 break;
1965 }
1966 case DescriptorClass::TexelBuffer: {
Jeremy Gebbena08da232022-02-01 15:14:52 -07001967 const auto *texel_descriptor = static_cast<const TexelDescriptor *>(descriptor);
1968 if (texel_descriptor->Invalid()) {
1969 continue;
1970 }
1971 const auto *buf_view_state = texel_descriptor->GetBufferViewState();
1972 const auto *buf_state = buf_view_state->buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06001973 const ResourceAccessRange range = MakeRange(*buf_view_state);
locke-lunarg61870c22020-06-09 14:51:50 -06001974 auto hazard = current_context_->DetectHazard(*buf_state, sync_index, range);
John Zulauf33fc1d52020-07-17 11:01:10 -06001975 if (hazard.hazard && !sync_state_->SupressedBoundDescriptorWAW(hazard)) {
locke-lunarg88dbb542020-06-23 22:05:42 -06001976 skip |= sync_state_->LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06001977 buf_view_state->buffer_view(), string_SyncHazardVUID(hazard.hazard),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001978 "%s: Hazard %s for %s in %s, %s, and %s, type: %s, binding #%d index %d. Access info %s.",
1979 func_name, string_SyncHazard(hazard.hazard),
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06001980 sync_state_->report_data->FormatHandle(buf_view_state->buffer_view()).c_str(),
1981 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer()).c_str(),
1982 sync_state_->report_data->FormatHandle(pipe->pipeline()).c_str(),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001983 sync_state_->report_data->FormatHandle(descriptor_set->GetSet()).c_str(),
Jeremy Gebben7fc88a22021-08-25 13:30:45 -06001984 string_VkDescriptorType(descriptor_type), set_binding.first.binding, index,
John Zulauffaea0ee2021-01-14 14:01:32 -07001985 FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06001986 }
1987 break;
1988 }
1989 case DescriptorClass::GeneralBuffer: {
1990 const auto *buffer_descriptor = static_cast<const BufferDescriptor *>(descriptor);
Jeremy Gebbena08da232022-02-01 15:14:52 -07001991 if (buffer_descriptor->Invalid()) {
1992 continue;
1993 }
1994 const auto *buf_state = buffer_descriptor->GetBufferState();
John Zulauf3e86bf02020-09-12 10:47:57 -06001995 const ResourceAccessRange range =
1996 MakeRange(*buf_state, buffer_descriptor->GetOffset(), buffer_descriptor->GetRange());
locke-lunarg61870c22020-06-09 14:51:50 -06001997 auto hazard = current_context_->DetectHazard(*buf_state, sync_index, range);
John Zulauf3ac701a2020-09-07 14:34:41 -06001998 if (hazard.hazard && !sync_state_->SupressedBoundDescriptorWAW(hazard)) {
locke-lunarg88dbb542020-06-23 22:05:42 -06001999 skip |= sync_state_->LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002000 buf_state->buffer(), string_SyncHazardVUID(hazard.hazard),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06002001 "%s: Hazard %s for %s in %s, %s, and %s, type: %s, binding #%d index %d. Access info %s.",
2002 func_name, string_SyncHazard(hazard.hazard),
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002003 sync_state_->report_data->FormatHandle(buf_state->buffer()).c_str(),
2004 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer()).c_str(),
2005 sync_state_->report_data->FormatHandle(pipe->pipeline()).c_str(),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06002006 sync_state_->report_data->FormatHandle(descriptor_set->GetSet()).c_str(),
Jeremy Gebben7fc88a22021-08-25 13:30:45 -06002007 string_VkDescriptorType(descriptor_type), set_binding.first.binding, index,
John Zulauffaea0ee2021-01-14 14:01:32 -07002008 FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06002009 }
2010 break;
2011 }
2012 // TODO: INLINE_UNIFORM_BLOCK_EXT, ACCELERATION_STRUCTURE_KHR
2013 default:
2014 break;
2015 }
2016 }
2017 }
2018 }
2019 return skip;
2020}
2021
2022void CommandBufferAccessContext::RecordDispatchDrawDescriptorSet(VkPipelineBindPoint pipelineBindPoint,
John Zulauf14940722021-04-12 15:19:02 -06002023 const ResourceUsageTag tag) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002024 const PIPELINE_STATE *pipe = nullptr;
locke-lunarg61870c22020-06-09 14:51:50 -06002025 const std::vector<LAST_BOUND_STATE::PER_SET> *per_sets = nullptr;
Jeremy Gebben159b3cc2021-06-03 09:09:03 -06002026 cb_state_->GetCurrentPipelineAndDesriptorSets(pipelineBindPoint, &pipe, &per_sets);
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002027 if (!pipe || !per_sets) {
locke-lunarg61870c22020-06-09 14:51:50 -06002028 return;
2029 }
2030
2031 using DescriptorClass = cvdescriptorset::DescriptorClass;
2032 using BufferDescriptor = cvdescriptorset::BufferDescriptor;
2033 using ImageDescriptor = cvdescriptorset::ImageDescriptor;
locke-lunarg61870c22020-06-09 14:51:50 -06002034 using TexelDescriptor = cvdescriptorset::TexelDescriptor;
2035
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002036 for (const auto &stage_state : pipe->stage_state) {
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002037 const auto raster_state = pipe->RasterizationState();
2038 if (stage_state.stage_flag == VK_SHADER_STAGE_FRAGMENT_BIT && raster_state && raster_state->rasterizerDiscardEnable) {
locke-lunarge9f1cdf2020-06-12 12:28:57 -06002039 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002040 }
locke-lunarg61870c22020-06-09 14:51:50 -06002041 for (const auto &set_binding : stage_state.descriptor_uses) {
Jeremy Gebben4d51c552022-01-06 21:27:15 -07002042 const auto *descriptor_set = (*per_sets)[set_binding.first.set].bound_descriptor_set.get();
locke-lunarg61870c22020-06-09 14:51:50 -06002043 cvdescriptorset::DescriptorSetLayout::ConstBindingIterator binding_it(descriptor_set->GetLayout().get(),
Jeremy Gebben7fc88a22021-08-25 13:30:45 -06002044 set_binding.first.binding);
locke-lunarg61870c22020-06-09 14:51:50 -06002045 const auto descriptor_type = binding_it.GetType();
2046 cvdescriptorset::IndexRange index_range = binding_it.GetGlobalIndexRange();
2047 auto array_idx = 0;
2048
2049 if (binding_it.IsVariableDescriptorCount()) {
2050 index_range.end = index_range.start + descriptor_set->GetVariableDescriptorCount();
2051 }
2052 SyncStageAccessIndex sync_index =
2053 GetSyncStageAccessIndexsByDescriptorSet(descriptor_type, set_binding.second, stage_state.stage_flag);
2054
2055 for (uint32_t i = index_range.start; i < index_range.end; ++i, ++array_idx) {
2056 const auto *descriptor = descriptor_set->GetDescriptorFromGlobalIndex(i);
2057 switch (descriptor->GetClass()) {
2058 case DescriptorClass::ImageSampler:
2059 case DescriptorClass::Image: {
Jeremy Gebbena08da232022-02-01 15:14:52 -07002060 // NOTE: ImageSamplerDescriptor inherits from ImageDescriptor, so this cast works for both types.
2061 const auto *image_descriptor = static_cast<const ImageDescriptor *>(descriptor);
2062 if (image_descriptor->Invalid()) {
2063 continue;
locke-lunarg61870c22020-06-09 14:51:50 -06002064 }
Jeremy Gebbena08da232022-02-01 15:14:52 -07002065 const auto *img_view_state = image_descriptor->GetImageViewState();
Jeremy Gebben11a68a32021-07-29 11:59:22 -06002066 // NOTE: 2D ImageViews of VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT Images are not allowed in
2067 // Descriptors, so we do not have to worry about depth slicing here.
2068 // See: VUID 00343
2069 assert(!img_view_state->IsDepthSliced());
locke-lunarg61870c22020-06-09 14:51:50 -06002070 const IMAGE_STATE *img_state = img_view_state->image_state.get();
locke-lunarg61870c22020-06-09 14:51:50 -06002071 if (sync_index == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ) {
John Zulauf110413c2021-03-20 05:38:38 -06002072 const VkExtent3D extent = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.extent);
2073 const VkOffset3D offset = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.offset);
2074 current_context_->UpdateAccessState(*img_state, sync_index, SyncOrdering::kRaster,
2075 img_view_state->normalized_subresource_range, offset, extent, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002076 } else {
John Zulauf110413c2021-03-20 05:38:38 -06002077 current_context_->UpdateAccessState(*img_state, sync_index, SyncOrdering::kNonAttachment,
2078 img_view_state->normalized_subresource_range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002079 }
locke-lunarg61870c22020-06-09 14:51:50 -06002080 break;
2081 }
2082 case DescriptorClass::TexelBuffer: {
Jeremy Gebbena08da232022-02-01 15:14:52 -07002083 const auto *texel_descriptor = static_cast<const TexelDescriptor *>(descriptor);
2084 if (texel_descriptor->Invalid()) {
2085 continue;
2086 }
2087 const auto *buf_view_state = texel_descriptor->GetBufferViewState();
2088 const auto *buf_state = buf_view_state->buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06002089 const ResourceAccessRange range = MakeRange(*buf_view_state);
John Zulauf8e3c3e92021-01-06 11:19:36 -07002090 current_context_->UpdateAccessState(*buf_state, sync_index, SyncOrdering::kNonAttachment, range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002091 break;
2092 }
2093 case DescriptorClass::GeneralBuffer: {
2094 const auto *buffer_descriptor = static_cast<const BufferDescriptor *>(descriptor);
Jeremy Gebbena08da232022-02-01 15:14:52 -07002095 if (buffer_descriptor->Invalid()) {
2096 continue;
2097 }
2098 const auto *buf_state = buffer_descriptor->GetBufferState();
John Zulauf3e86bf02020-09-12 10:47:57 -06002099 const ResourceAccessRange range =
2100 MakeRange(*buf_state, buffer_descriptor->GetOffset(), buffer_descriptor->GetRange());
John Zulauf8e3c3e92021-01-06 11:19:36 -07002101 current_context_->UpdateAccessState(*buf_state, sync_index, SyncOrdering::kNonAttachment, range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002102 break;
2103 }
2104 // TODO: INLINE_UNIFORM_BLOCK_EXT, ACCELERATION_STRUCTURE_KHR
2105 default:
2106 break;
2107 }
2108 }
2109 }
2110 }
2111}
2112
2113bool CommandBufferAccessContext::ValidateDrawVertex(uint32_t vertexCount, uint32_t firstVertex, const char *func_name) const {
2114 bool skip = false;
Jeremy Gebben159b3cc2021-06-03 09:09:03 -06002115 const auto *pipe = cb_state_->GetCurrentPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS);
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002116 if (!pipe) {
locke-lunarg61870c22020-06-09 14:51:50 -06002117 return skip;
2118 }
2119
2120 const auto &binding_buffers = cb_state_->current_vertex_buffer_binding_info.vertex_buffer_bindings;
2121 const auto &binding_buffers_size = binding_buffers.size();
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002122 const auto &binding_descriptions_size = pipe->vertex_input_state->binding_descriptions.size();
locke-lunarg61870c22020-06-09 14:51:50 -06002123
2124 for (size_t i = 0; i < binding_descriptions_size; ++i) {
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002125 const auto &binding_description = pipe->vertex_input_state->binding_descriptions[i];
locke-lunarg61870c22020-06-09 14:51:50 -06002126 if (binding_description.binding < binding_buffers_size) {
2127 const auto &binding_buffer = binding_buffers[binding_description.binding];
Jeremy Gebben9efe1cf2021-05-15 20:05:09 -06002128 if (binding_buffer.buffer_state == nullptr || binding_buffer.buffer_state->Destroyed()) continue;
locke-lunarg61870c22020-06-09 14:51:50 -06002129
locke-lunarg1ae57d62020-11-18 10:49:19 -07002130 auto *buf_state = binding_buffer.buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06002131 const ResourceAccessRange range = GetBufferRange(binding_buffer.offset, buf_state->createInfo.size, firstVertex,
2132 vertexCount, binding_description.stride);
Jeremy Gebben40a22942020-12-22 14:22:06 -07002133 auto hazard = current_context_->DetectHazard(*buf_state, SYNC_VERTEX_ATTRIBUTE_INPUT_VERTEX_ATTRIBUTE_READ, range);
locke-lunarg61870c22020-06-09 14:51:50 -06002134 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002135 skip |= sync_state_->LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002136 buf_state->buffer(), string_SyncHazardVUID(hazard.hazard), "%s: Hazard %s for vertex %s in %s. Access info %s.",
2137 func_name, string_SyncHazard(hazard.hazard), sync_state_->report_data->FormatHandle(buf_state->buffer()).c_str(),
2138 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer()).c_str(), FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06002139 }
2140 }
2141 }
2142 return skip;
2143}
2144
John Zulauf14940722021-04-12 15:19:02 -06002145void CommandBufferAccessContext::RecordDrawVertex(uint32_t vertexCount, uint32_t firstVertex, const ResourceUsageTag tag) {
Jeremy Gebben159b3cc2021-06-03 09:09:03 -06002146 const auto *pipe = cb_state_->GetCurrentPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS);
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002147 if (!pipe) {
locke-lunarg61870c22020-06-09 14:51:50 -06002148 return;
2149 }
2150 const auto &binding_buffers = cb_state_->current_vertex_buffer_binding_info.vertex_buffer_bindings;
2151 const auto &binding_buffers_size = binding_buffers.size();
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002152 const auto &binding_descriptions_size = pipe->vertex_input_state->binding_descriptions.size();
locke-lunarg61870c22020-06-09 14:51:50 -06002153
2154 for (size_t i = 0; i < binding_descriptions_size; ++i) {
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002155 const auto &binding_description = pipe->vertex_input_state->binding_descriptions[i];
locke-lunarg61870c22020-06-09 14:51:50 -06002156 if (binding_description.binding < binding_buffers_size) {
2157 const auto &binding_buffer = binding_buffers[binding_description.binding];
Jeremy Gebben9efe1cf2021-05-15 20:05:09 -06002158 if (binding_buffer.buffer_state == nullptr || binding_buffer.buffer_state->Destroyed()) continue;
locke-lunarg61870c22020-06-09 14:51:50 -06002159
locke-lunarg1ae57d62020-11-18 10:49:19 -07002160 auto *buf_state = binding_buffer.buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06002161 const ResourceAccessRange range = GetBufferRange(binding_buffer.offset, buf_state->createInfo.size, firstVertex,
2162 vertexCount, binding_description.stride);
Jeremy Gebben40a22942020-12-22 14:22:06 -07002163 current_context_->UpdateAccessState(*buf_state, SYNC_VERTEX_ATTRIBUTE_INPUT_VERTEX_ATTRIBUTE_READ,
2164 SyncOrdering::kNonAttachment, range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002165 }
2166 }
2167}
2168
2169bool CommandBufferAccessContext::ValidateDrawVertexIndex(uint32_t indexCount, uint32_t firstIndex, const char *func_name) const {
2170 bool skip = false;
Jeremy Gebben9efe1cf2021-05-15 20:05:09 -06002171 if (cb_state_->index_buffer_binding.buffer_state == nullptr || cb_state_->index_buffer_binding.buffer_state->Destroyed()) {
locke-lunarg1ae57d62020-11-18 10:49:19 -07002172 return skip;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002173 }
locke-lunarg61870c22020-06-09 14:51:50 -06002174
locke-lunarg1ae57d62020-11-18 10:49:19 -07002175 auto *index_buf_state = cb_state_->index_buffer_binding.buffer_state.get();
locke-lunarg61870c22020-06-09 14:51:50 -06002176 const auto index_size = GetIndexAlignment(cb_state_->index_buffer_binding.index_type);
John Zulauf3e86bf02020-09-12 10:47:57 -06002177 const ResourceAccessRange range = GetBufferRange(cb_state_->index_buffer_binding.offset, index_buf_state->createInfo.size,
2178 firstIndex, indexCount, index_size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07002179 auto hazard = current_context_->DetectHazard(*index_buf_state, SYNC_INDEX_INPUT_INDEX_READ, range);
locke-lunarg61870c22020-06-09 14:51:50 -06002180 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002181 skip |= sync_state_->LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002182 index_buf_state->buffer(), string_SyncHazardVUID(hazard.hazard), "%s: Hazard %s for index %s in %s. Access info %s.",
2183 func_name, string_SyncHazard(hazard.hazard), sync_state_->report_data->FormatHandle(index_buf_state->buffer()).c_str(),
2184 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer()).c_str(), FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06002185 }
2186
2187 // TODO: For now, we detect the whole vertex buffer. Index buffer could be changed until SubmitQueue.
2188 // We will detect more accurate range in the future.
2189 skip |= ValidateDrawVertex(UINT32_MAX, 0, func_name);
2190 return skip;
2191}
2192
John Zulauf14940722021-04-12 15:19:02 -06002193void CommandBufferAccessContext::RecordDrawVertexIndex(uint32_t indexCount, uint32_t firstIndex, const ResourceUsageTag tag) {
Jeremy Gebben9efe1cf2021-05-15 20:05:09 -06002194 if (cb_state_->index_buffer_binding.buffer_state == nullptr || cb_state_->index_buffer_binding.buffer_state->Destroyed()) return;
locke-lunarg61870c22020-06-09 14:51:50 -06002195
locke-lunarg1ae57d62020-11-18 10:49:19 -07002196 auto *index_buf_state = cb_state_->index_buffer_binding.buffer_state.get();
locke-lunarg61870c22020-06-09 14:51:50 -06002197 const auto index_size = GetIndexAlignment(cb_state_->index_buffer_binding.index_type);
John Zulauf3e86bf02020-09-12 10:47:57 -06002198 const ResourceAccessRange range = GetBufferRange(cb_state_->index_buffer_binding.offset, index_buf_state->createInfo.size,
2199 firstIndex, indexCount, index_size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07002200 current_context_->UpdateAccessState(*index_buf_state, SYNC_INDEX_INPUT_INDEX_READ, SyncOrdering::kNonAttachment, range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002201
2202 // TODO: For now, we detect the whole vertex buffer. Index buffer could be changed until SubmitQueue.
2203 // We will detect more accurate range in the future.
2204 RecordDrawVertex(UINT32_MAX, 0, tag);
2205}
2206
2207bool CommandBufferAccessContext::ValidateDrawSubpassAttachment(const char *func_name) const {
locke-lunarg7077d502020-06-18 21:37:26 -06002208 bool skip = false;
2209 if (!current_renderpass_context_) return skip;
John Zulauf64ffe552021-02-06 10:25:07 -07002210 skip |= current_renderpass_context_->ValidateDrawSubpassAttachment(GetExecutionContext(), *cb_state_.get(), func_name);
locke-lunarg7077d502020-06-18 21:37:26 -06002211 return skip;
locke-lunarg61870c22020-06-09 14:51:50 -06002212}
2213
John Zulauf14940722021-04-12 15:19:02 -06002214void CommandBufferAccessContext::RecordDrawSubpassAttachment(const ResourceUsageTag tag) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002215 if (current_renderpass_context_) {
John Zulauf64ffe552021-02-06 10:25:07 -07002216 current_renderpass_context_->RecordDrawSubpassAttachment(*cb_state_.get(), tag);
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002217 }
locke-lunarg61870c22020-06-09 14:51:50 -06002218}
2219
John Zulauf41a9c7c2021-12-07 15:59:53 -07002220ResourceUsageTag CommandBufferAccessContext::RecordBeginRenderPass(CMD_TYPE cmd, const RENDER_PASS_STATE &rp_state,
2221 const VkRect2D &render_area,
2222 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views) {
John Zulauf355e49b2020-04-24 15:11:15 -06002223 // Create an access context the current renderpass.
John Zulauf41a9c7c2021-12-07 15:59:53 -07002224 const auto barrier_tag = NextCommandTag(cmd, ResourceUsageRecord::SubcommandType::kSubpassTransition);
2225 const auto load_tag = NextSubcommandTag(cmd, ResourceUsageRecord::SubcommandType::kLoadOp);
John Zulauf64ffe552021-02-06 10:25:07 -07002226 render_pass_contexts_.emplace_back(rp_state, render_area, GetQueueFlags(), attachment_views, &cb_access_context_);
John Zulauf16adfc92020-04-08 10:28:33 -06002227 current_renderpass_context_ = &render_pass_contexts_.back();
John Zulauf41a9c7c2021-12-07 15:59:53 -07002228 current_renderpass_context_->RecordBeginRenderPass(barrier_tag, load_tag);
John Zulauf16adfc92020-04-08 10:28:33 -06002229 current_context_ = &current_renderpass_context_->CurrentContext();
John Zulauf41a9c7c2021-12-07 15:59:53 -07002230 return barrier_tag;
John Zulauf16adfc92020-04-08 10:28:33 -06002231}
2232
John Zulauf41a9c7c2021-12-07 15:59:53 -07002233ResourceUsageTag CommandBufferAccessContext::RecordNextSubpass(const CMD_TYPE cmd) {
John Zulauf16adfc92020-04-08 10:28:33 -06002234 assert(current_renderpass_context_);
John Zulauf41a9c7c2021-12-07 15:59:53 -07002235 if (!current_renderpass_context_) return NextCommandTag(cmd);
2236
2237 auto store_tag = NextCommandTag(cmd, ResourceUsageRecord::SubcommandType::kStoreOp);
2238 auto barrier_tag = NextSubcommandTag(cmd, ResourceUsageRecord::SubcommandType::kSubpassTransition);
2239 auto load_tag = NextSubcommandTag(cmd, ResourceUsageRecord::SubcommandType::kLoadOp);
2240
2241 current_renderpass_context_->RecordNextSubpass(store_tag, barrier_tag, load_tag);
John Zulauf16adfc92020-04-08 10:28:33 -06002242 current_context_ = &current_renderpass_context_->CurrentContext();
John Zulauf41a9c7c2021-12-07 15:59:53 -07002243 return barrier_tag;
John Zulauf16adfc92020-04-08 10:28:33 -06002244}
2245
John Zulauf41a9c7c2021-12-07 15:59:53 -07002246ResourceUsageTag CommandBufferAccessContext::RecordEndRenderPass(const CMD_TYPE cmd) {
John Zulauf16adfc92020-04-08 10:28:33 -06002247 assert(current_renderpass_context_);
John Zulauf41a9c7c2021-12-07 15:59:53 -07002248 if (!current_renderpass_context_) return NextCommandTag(cmd);
John Zulauf16adfc92020-04-08 10:28:33 -06002249
John Zulauf41a9c7c2021-12-07 15:59:53 -07002250 auto store_tag = NextCommandTag(cmd, ResourceUsageRecord::SubcommandType::kStoreOp);
2251 auto barrier_tag = NextSubcommandTag(cmd, ResourceUsageRecord::SubcommandType::kSubpassTransition);
2252
2253 current_renderpass_context_->RecordEndRenderPass(&cb_access_context_, store_tag, barrier_tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002254 current_context_ = &cb_access_context_;
John Zulauf16adfc92020-04-08 10:28:33 -06002255 current_renderpass_context_ = nullptr;
John Zulauf41a9c7c2021-12-07 15:59:53 -07002256 return barrier_tag;
John Zulauf16adfc92020-04-08 10:28:33 -06002257}
2258
John Zulauf4a6105a2020-11-17 15:11:05 -07002259void CommandBufferAccessContext::RecordDestroyEvent(VkEvent event) {
2260 // Erase is okay with the key not being
Jeremy Gebbenf4449392022-01-28 10:09:10 -07002261 auto event_state = sync_state_->Get<EVENT_STATE>(event);
John Zulauf669dfd52021-01-27 17:15:28 -07002262 if (event_state) {
Jeremy Gebben9f537102021-10-05 16:37:12 -06002263 GetCurrentEventsContext()->Destroy(event_state.get());
John Zulaufd5115702021-01-18 12:34:33 -07002264 }
2265}
2266
John Zulaufae842002021-04-15 18:20:55 -06002267// The is the recorded cb context
John Zulaufbb890452021-12-14 11:30:18 -07002268bool CommandBufferAccessContext::ValidateFirstUse(CommandExecutionContext *proxy_context, const char *func_name,
John Zulauf4fa68462021-04-26 21:04:22 -06002269 uint32_t index) const {
2270 assert(proxy_context);
2271 auto *events_context = proxy_context->GetCurrentEventsContext();
2272 auto *access_context = proxy_context->GetCurrentAccessContext();
2273 const ResourceUsageTag base_tag = proxy_context->GetTagLimit();
John Zulaufae842002021-04-15 18:20:55 -06002274 bool skip = false;
2275 ResourceUsageRange tag_range = {0, 0};
2276 const AccessContext *recorded_context = GetCurrentAccessContext();
2277 assert(recorded_context);
2278 HazardResult hazard;
John Zulaufbb890452021-12-14 11:30:18 -07002279 auto log_msg = [this](const HazardResult &hazard, const CommandExecutionContext &exec_context, const char *func_name,
John Zulaufae842002021-04-15 18:20:55 -06002280 uint32_t index) {
John Zulaufbb890452021-12-14 11:30:18 -07002281 const auto handle = exec_context.Handle();
John Zulaufae842002021-04-15 18:20:55 -06002282 const auto recorded_handle = cb_state_->commandBuffer();
John Zulauf4fa68462021-04-26 21:04:22 -06002283 const auto *report_data = sync_state_->report_data;
John Zulaufbb890452021-12-14 11:30:18 -07002284 return sync_state_->LogError(handle, string_SyncHazardVUID(hazard.hazard),
John Zulauf4fa68462021-04-26 21:04:22 -06002285 "%s: Hazard %s for entry %" PRIu32 ", %s, Recorded access info %s. Access info %s.", func_name,
2286 string_SyncHazard(hazard.hazard), index, report_data->FormatHandle(recorded_handle).c_str(),
John Zulaufbb890452021-12-14 11:30:18 -07002287 FormatUsage(*hazard.recorded_access).c_str(), exec_context.FormatUsage(hazard).c_str());
John Zulaufae842002021-04-15 18:20:55 -06002288 };
John Zulaufbb890452021-12-14 11:30:18 -07002289 const ReplayTrackbackBarriersAction *replay_context = nullptr;
John Zulaufae842002021-04-15 18:20:55 -06002290 for (const auto &sync_op : sync_ops_) {
John Zulauf4fa68462021-04-26 21:04:22 -06002291 // we update the range to any include layout transition first use writes,
2292 // as they are stored along with the source scope (as effective barrier) when recorded
2293 tag_range.end = sync_op.tag + 1;
John Zulauf610e28c2021-08-03 17:46:23 -06002294 skip |= sync_op.sync_op->ReplayValidate(sync_op.tag, *this, base_tag, proxy_context);
John Zulauf4fa68462021-04-26 21:04:22 -06002295
John Zulaufbb890452021-12-14 11:30:18 -07002296 hazard = recorded_context->DetectFirstUseHazard(tag_range, *access_context, replay_context);
John Zulaufae842002021-04-15 18:20:55 -06002297 if (hazard.hazard) {
John Zulauf4fa68462021-04-26 21:04:22 -06002298 skip |= log_msg(hazard, *proxy_context, func_name, index);
John Zulaufae842002021-04-15 18:20:55 -06002299 }
2300 // NOTE: Add call to replay validate here when we add support for syncop with non-trivial replay
John Zulauf4fa68462021-04-26 21:04:22 -06002301 // Record the barrier into the proxy context.
John Zulaufbb890452021-12-14 11:30:18 -07002302 sync_op.sync_op->ReplayRecord(base_tag + sync_op.tag, access_context, events_context);
2303 replay_context = sync_op.sync_op->GetReplayTrackback();
John Zulauf4fa68462021-04-26 21:04:22 -06002304 tag_range.begin = tag_range.end;
John Zulaufae842002021-04-15 18:20:55 -06002305 }
2306
John Zulaufbb890452021-12-14 11:30:18 -07002307 // Renderpasses may not cross command buffer boundaries
2308 assert(replay_context == nullptr);
2309
John Zulaufae842002021-04-15 18:20:55 -06002310 // and anything after the last syncop
John Zulaufae842002021-04-15 18:20:55 -06002311 tag_range.end = ResourceUsageRecord::kMaxIndex;
John Zulaufbb890452021-12-14 11:30:18 -07002312 hazard = recorded_context->DetectFirstUseHazard(tag_range, *access_context, replay_context);
John Zulaufae842002021-04-15 18:20:55 -06002313 if (hazard.hazard) {
John Zulauf4fa68462021-04-26 21:04:22 -06002314 skip |= log_msg(hazard, *proxy_context, func_name, index);
John Zulaufae842002021-04-15 18:20:55 -06002315 }
2316
2317 return skip;
2318}
2319
John Zulauf4fa68462021-04-26 21:04:22 -06002320void CommandBufferAccessContext::RecordExecutedCommandBuffer(const CommandBufferAccessContext &recorded_cb_context, CMD_TYPE cmd) {
2321 auto *events_context = GetCurrentEventsContext();
2322 auto *access_context = GetCurrentAccessContext();
2323 const AccessContext *recorded_context = recorded_cb_context.GetCurrentAccessContext();
2324 assert(recorded_context);
2325
2326 // Just run through the barriers ignoring the usage from the recorded context, as Resolve will overwrite outdated state
2327 const ResourceUsageTag base_tag = GetTagLimit();
2328 for (const auto &sync_op : recorded_cb_context.sync_ops_) {
2329 // we update the range to any include layout transition first use writes,
2330 // as they are stored along with the source scope (as effective barrier) when recorded
John Zulaufbb890452021-12-14 11:30:18 -07002331 sync_op.sync_op->ReplayRecord(base_tag + sync_op.tag, access_context, events_context);
John Zulauf4fa68462021-04-26 21:04:22 -06002332 }
2333
2334 ResourceUsageRange tag_range = ImportRecordedAccessLog(recorded_cb_context);
2335 assert(base_tag == tag_range.begin); // to ensure the to offset calculation agree
2336 ResolveRecordedContext(*recorded_context, tag_range.begin);
2337}
2338
John Zulauf3c788ef2022-02-22 12:12:30 -07002339void CommandExecutionContext::ResolveRecordedContext(const AccessContext &recorded_context, ResourceUsageTag offset) {
John Zulauf4fa68462021-04-26 21:04:22 -06002340 auto tag_offset = [offset](ResourceAccessState *access) { access->OffsetTag(offset); };
2341
2342 auto *access_context = GetCurrentAccessContext();
2343 for (auto address_type : kAddressTypes) {
2344 recorded_context.ResolveAccessRange(address_type, kFullRange, tag_offset, &access_context->GetAccessStateMap(address_type),
2345 nullptr, false);
2346 }
2347}
2348
John Zulauf3c788ef2022-02-22 12:12:30 -07002349ResourceUsageRange CommandExecutionContext::ImportRecordedAccessLog(const CommandBufferAccessContext &recorded_context) {
John Zulauf4fa68462021-04-26 21:04:22 -06002350 // The execution references ensure lifespan for the referenced child CB's...
2351 ResourceUsageRange tag_range(GetTagLimit(), 0);
John Zulauf3c788ef2022-02-22 12:12:30 -07002352 InsertRecordedAccessLogEntries(recorded_context);
2353 tag_range.end = GetTagLimit();
John Zulauf4fa68462021-04-26 21:04:22 -06002354 return tag_range;
2355}
2356
John Zulauf3c788ef2022-02-22 12:12:30 -07002357void CommandBufferAccessContext::InsertRecordedAccessLogEntries(const CommandBufferAccessContext &recorded_context) {
2358 cbs_referenced_.emplace(recorded_context.GetCBStateShared());
2359 access_log_.insert(access_log_.end(), recorded_context.access_log_.cbegin(), recorded_context.access_log_.end());
2360}
2361
John Zulauf41a9c7c2021-12-07 15:59:53 -07002362ResourceUsageTag CommandBufferAccessContext::NextSubcommandTag(CMD_TYPE command, ResourceUsageRecord::SubcommandType subcommand) {
2363 ResourceUsageTag next = access_log_.size();
2364 access_log_.emplace_back(command, command_number_, subcommand, ++subcommand_number_, cb_state_.get(), reset_count_);
2365 return next;
2366}
2367
2368ResourceUsageTag CommandBufferAccessContext::NextCommandTag(CMD_TYPE command, ResourceUsageRecord::SubcommandType subcommand) {
2369 command_number_++;
2370 subcommand_number_ = 0;
2371 ResourceUsageTag next = access_log_.size();
2372 access_log_.emplace_back(command, command_number_, subcommand, subcommand_number_, cb_state_.get(), reset_count_);
2373 return next;
2374}
2375
2376ResourceUsageTag CommandBufferAccessContext::NextIndexedCommandTag(CMD_TYPE command, uint32_t index) {
2377 if (index == 0) {
2378 return NextCommandTag(command, ResourceUsageRecord::SubcommandType::kIndex);
2379 }
2380 return NextSubcommandTag(command, ResourceUsageRecord::SubcommandType::kIndex);
2381}
2382
John Zulaufbb890452021-12-14 11:30:18 -07002383void CommandBufferAccessContext::RecordSyncOp(SyncOpPointer &&sync_op) {
2384 auto tag = sync_op->Record(this);
2385 // As renderpass operations can have side effects on the command buffer access context,
2386 // update the sync operation to record these if any.
2387 if (current_renderpass_context_) {
2388 const auto &rpc = *current_renderpass_context_;
2389 sync_op->SetReplayContext(rpc.GetCurrentSubpass(), rpc.GetReplayContext());
2390 }
2391 sync_ops_.emplace_back(tag, std::move(sync_op));
2392}
2393
John Zulaufae842002021-04-15 18:20:55 -06002394class HazardDetectFirstUse {
2395 public:
John Zulaufbb890452021-12-14 11:30:18 -07002396 HazardDetectFirstUse(const ResourceAccessState &recorded_use, const ResourceUsageRange &tag_range,
2397 const ReplayTrackbackBarriersAction *replay_barrier)
2398 : recorded_use_(recorded_use), tag_range_(tag_range), replay_barrier_(replay_barrier) {}
John Zulaufae842002021-04-15 18:20:55 -06002399 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
John Zulaufbb890452021-12-14 11:30:18 -07002400 if (replay_barrier_) {
2401 // Intentional copy to apply the replay barrier
2402 auto access = pos->second;
2403 (*replay_barrier_)(&access);
2404 return access.DetectHazard(recorded_use_, tag_range_);
2405 }
John Zulaufae842002021-04-15 18:20:55 -06002406 return pos->second.DetectHazard(recorded_use_, tag_range_);
2407 }
2408 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, ResourceUsageTag start_tag) const {
2409 return pos->second.DetectAsyncHazard(recorded_use_, tag_range_, start_tag);
2410 }
2411
2412 private:
2413 const ResourceAccessState &recorded_use_;
2414 const ResourceUsageRange &tag_range_;
John Zulaufbb890452021-12-14 11:30:18 -07002415 const ReplayTrackbackBarriersAction *replay_barrier_;
John Zulaufae842002021-04-15 18:20:55 -06002416};
2417
2418// This is called with the *recorded* command buffers access context, with the *active* access context pass in, againsts which
2419// hazards will be detected
John Zulaufbb890452021-12-14 11:30:18 -07002420HazardResult AccessContext::DetectFirstUseHazard(const ResourceUsageRange &tag_range, const AccessContext &access_context,
2421 const ReplayTrackbackBarriersAction *replay_barrier) const {
John Zulaufae842002021-04-15 18:20:55 -06002422 HazardResult hazard;
2423 for (const auto address_type : kAddressTypes) {
2424 const auto &recorded_access_map = GetAccessStateMap(address_type);
2425 for (const auto &recorded_access : recorded_access_map) {
2426 // Cull any entries not in the current tag range
2427 if (!recorded_access.second.FirstAccessInTagRange(tag_range)) continue;
John Zulaufbb890452021-12-14 11:30:18 -07002428 HazardDetectFirstUse detector(recorded_access.second, tag_range, replay_barrier);
John Zulaufae842002021-04-15 18:20:55 -06002429 hazard = access_context.DetectHazard(address_type, detector, recorded_access.first, DetectOptions::kDetectAll);
2430 if (hazard.hazard) break;
2431 }
2432 }
2433
2434 return hazard;
2435}
2436
John Zulaufbb890452021-12-14 11:30:18 -07002437bool RenderPassAccessContext::ValidateDrawSubpassAttachment(const CommandExecutionContext &exec_context,
2438 const CMD_BUFFER_STATE &cmd, const char *func_name) const {
locke-lunarg61870c22020-06-09 14:51:50 -06002439 bool skip = false;
John Zulaufbb890452021-12-14 11:30:18 -07002440 const auto &sync_state = exec_context.GetSyncState();
Jeremy Gebben159b3cc2021-06-03 09:09:03 -06002441 const auto *pipe = cmd.GetCurrentPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS);
Jeremy Gebben11af9792021-08-20 10:20:09 -06002442 if (!pipe) {
2443 return skip;
2444 }
2445
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002446 const auto raster_state = pipe->RasterizationState();
2447 if (raster_state && raster_state->rasterizerDiscardEnable) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002448 return skip;
2449 }
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002450 const auto &list = pipe->fragmentShader_writable_output_location_list;
locke-lunarg61870c22020-06-09 14:51:50 -06002451 const auto &subpass = rp_state_->createInfo.pSubpasses[current_subpass_];
locke-lunarg37047832020-06-12 13:44:45 -06002452
John Zulauf1a224292020-06-30 14:52:13 -06002453 const auto &current_context = CurrentContext();
locke-lunarg44f9bb12020-06-10 14:43:57 -06002454 // Subpass's inputAttachment has been done in ValidateDispatchDrawDescriptorSet
locke-lunarg96dc9632020-06-10 17:22:18 -06002455 if (subpass.pColorAttachments && subpass.colorAttachmentCount && !list.empty()) {
2456 for (const auto location : list) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002457 if (location >= subpass.colorAttachmentCount ||
2458 subpass.pColorAttachments[location].attachment == VK_ATTACHMENT_UNUSED) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002459 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002460 }
John Zulaufd0ec59f2021-03-13 14:25:08 -07002461 const AttachmentViewGen &view_gen = attachment_views_[subpass.pColorAttachments[location].attachment];
2462 if (!view_gen.IsValid()) continue;
2463 HazardResult hazard =
2464 current_context.DetectHazard(view_gen, AttachmentViewGen::Gen::kRenderArea,
2465 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kColorAttachment);
locke-lunarg96dc9632020-06-10 17:22:18 -06002466 if (hazard.hazard) {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002467 const VkImageView view_handle = view_gen.GetViewState()->image_view();
John Zulaufd0ec59f2021-03-13 14:25:08 -07002468 skip |= sync_state.LogError(view_handle, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06002469 "%s: Hazard %s for %s in %s, Subpass #%d, and pColorAttachments #%d. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06002470 func_name, string_SyncHazard(hazard.hazard),
John Zulaufd0ec59f2021-03-13 14:25:08 -07002471 sync_state.report_data->FormatHandle(view_handle).c_str(),
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002472 sync_state.report_data->FormatHandle(cmd.commandBuffer()).c_str(), cmd.activeSubpass,
John Zulaufbb890452021-12-14 11:30:18 -07002473 location, exec_context.FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06002474 }
2475 }
2476 }
locke-lunarg37047832020-06-12 13:44:45 -06002477
2478 // PHASE1 TODO: Add layout based read/vs. write selection.
2479 // PHASE1 TODO: Read operations for both depth and stencil are possible in the future.
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002480 const auto ds_state = pipe->DepthStencilState();
2481 const uint32_t depth_stencil_attachment = GetSubpassDepthStencilAttachmentIndex(ds_state, subpass.pDepthStencilAttachment);
John Zulaufd0ec59f2021-03-13 14:25:08 -07002482
2483 if ((depth_stencil_attachment != VK_ATTACHMENT_UNUSED) && attachment_views_[depth_stencil_attachment].IsValid()) {
2484 const AttachmentViewGen &view_gen = attachment_views_[depth_stencil_attachment];
2485 const IMAGE_VIEW_STATE &view_state = *view_gen.GetViewState();
locke-lunarg37047832020-06-12 13:44:45 -06002486 bool depth_write = false, stencil_write = false;
2487
2488 // PHASE1 TODO: These validation should be in core_checks.
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002489 if (!FormatIsStencilOnly(view_state.create_info.format) && ds_state->depthTestEnable && ds_state->depthWriteEnable &&
locke-lunarg37047832020-06-12 13:44:45 -06002490 IsImageLayoutDepthWritable(subpass.pDepthStencilAttachment->layout)) {
2491 depth_write = true;
2492 }
2493 // PHASE1 TODO: It needs to check if stencil is writable.
2494 // If failOp, passOp, or depthFailOp are not KEEP, and writeMask isn't 0, it's writable.
2495 // If depth test is disable, it's considered depth test passes, and then depthFailOp doesn't run.
2496 // PHASE1 TODO: These validation should be in core_checks.
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002497 if (!FormatIsDepthOnly(view_state.create_info.format) && ds_state->stencilTestEnable &&
locke-lunarg37047832020-06-12 13:44:45 -06002498 IsImageLayoutStencilWritable(subpass.pDepthStencilAttachment->layout)) {
2499 stencil_write = true;
2500 }
2501
2502 // PHASE1 TODO: Add EARLY stage detection based on ExecutionMode.
2503 if (depth_write) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07002504 HazardResult hazard = current_context.DetectHazard(view_gen, AttachmentViewGen::Gen::kDepthOnlyRenderArea,
2505 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE,
2506 SyncOrdering::kDepthStencilAttachment);
locke-lunarg37047832020-06-12 13:44:45 -06002507 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002508 skip |= sync_state.LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002509 view_state.image_view(), string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06002510 "%s: Hazard %s for %s in %s, Subpass #%d, and depth part of pDepthStencilAttachment. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06002511 func_name, string_SyncHazard(hazard.hazard),
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002512 sync_state.report_data->FormatHandle(view_state.image_view()).c_str(),
2513 sync_state.report_data->FormatHandle(cmd.commandBuffer()).c_str(), cmd.activeSubpass,
John Zulaufbb890452021-12-14 11:30:18 -07002514 exec_context.FormatUsage(hazard).c_str());
locke-lunarg37047832020-06-12 13:44:45 -06002515 }
2516 }
2517 if (stencil_write) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07002518 HazardResult hazard = current_context.DetectHazard(view_gen, AttachmentViewGen::Gen::kStencilOnlyRenderArea,
2519 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE,
2520 SyncOrdering::kDepthStencilAttachment);
locke-lunarg37047832020-06-12 13:44:45 -06002521 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002522 skip |= sync_state.LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002523 view_state.image_view(), string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06002524 "%s: Hazard %s for %s in %s, Subpass #%d, and stencil part of pDepthStencilAttachment. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06002525 func_name, string_SyncHazard(hazard.hazard),
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002526 sync_state.report_data->FormatHandle(view_state.image_view()).c_str(),
2527 sync_state.report_data->FormatHandle(cmd.commandBuffer()).c_str(), cmd.activeSubpass,
John Zulaufbb890452021-12-14 11:30:18 -07002528 exec_context.FormatUsage(hazard).c_str());
locke-lunarg37047832020-06-12 13:44:45 -06002529 }
locke-lunarg61870c22020-06-09 14:51:50 -06002530 }
2531 }
2532 return skip;
2533}
2534
John Zulauf14940722021-04-12 15:19:02 -06002535void RenderPassAccessContext::RecordDrawSubpassAttachment(const CMD_BUFFER_STATE &cmd, const ResourceUsageTag tag) {
Jeremy Gebben159b3cc2021-06-03 09:09:03 -06002536 const auto *pipe = cmd.GetCurrentPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS);
Jeremy Gebben11af9792021-08-20 10:20:09 -06002537 if (!pipe) {
2538 return;
2539 }
2540
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002541 const auto *raster_state = pipe->RasterizationState();
2542 if (raster_state && raster_state->rasterizerDiscardEnable) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002543 return;
2544 }
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002545 const auto &list = pipe->fragmentShader_writable_output_location_list;
locke-lunarg61870c22020-06-09 14:51:50 -06002546 const auto &subpass = rp_state_->createInfo.pSubpasses[current_subpass_];
locke-lunarg61870c22020-06-09 14:51:50 -06002547
John Zulauf1a224292020-06-30 14:52:13 -06002548 auto &current_context = CurrentContext();
locke-lunarg44f9bb12020-06-10 14:43:57 -06002549 // Subpass's inputAttachment has been done in RecordDispatchDrawDescriptorSet
locke-lunarg96dc9632020-06-10 17:22:18 -06002550 if (subpass.pColorAttachments && subpass.colorAttachmentCount && !list.empty()) {
2551 for (const auto location : list) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002552 if (location >= subpass.colorAttachmentCount ||
2553 subpass.pColorAttachments[location].attachment == VK_ATTACHMENT_UNUSED) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002554 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002555 }
John Zulaufd0ec59f2021-03-13 14:25:08 -07002556 const AttachmentViewGen &view_gen = attachment_views_[subpass.pColorAttachments[location].attachment];
2557 current_context.UpdateAccessState(view_gen, AttachmentViewGen::Gen::kRenderArea,
2558 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kColorAttachment,
2559 tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002560 }
2561 }
locke-lunarg37047832020-06-12 13:44:45 -06002562
2563 // PHASE1 TODO: Add layout based read/vs. write selection.
2564 // PHASE1 TODO: Read operations for both depth and stencil are possible in the future.
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002565 const auto *ds_state = pipe->DepthStencilState();
2566 const uint32_t depth_stencil_attachment = GetSubpassDepthStencilAttachmentIndex(ds_state, subpass.pDepthStencilAttachment);
John Zulaufd0ec59f2021-03-13 14:25:08 -07002567 if ((depth_stencil_attachment != VK_ATTACHMENT_UNUSED) && attachment_views_[depth_stencil_attachment].IsValid()) {
2568 const AttachmentViewGen &view_gen = attachment_views_[depth_stencil_attachment];
2569 const IMAGE_VIEW_STATE &view_state = *view_gen.GetViewState();
locke-lunarg37047832020-06-12 13:44:45 -06002570 bool depth_write = false, stencil_write = false;
John Zulaufd0ec59f2021-03-13 14:25:08 -07002571 const bool has_depth = 0 != (view_state.normalized_subresource_range.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT);
2572 const bool has_stencil = 0 != (view_state.normalized_subresource_range.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT);
locke-lunarg37047832020-06-12 13:44:45 -06002573
2574 // PHASE1 TODO: These validation should be in core_checks.
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002575 if (has_depth && !FormatIsStencilOnly(view_state.create_info.format) && ds_state->depthTestEnable &&
2576 ds_state->depthWriteEnable && IsImageLayoutDepthWritable(subpass.pDepthStencilAttachment->layout)) {
locke-lunarg37047832020-06-12 13:44:45 -06002577 depth_write = true;
2578 }
2579 // PHASE1 TODO: It needs to check if stencil is writable.
2580 // If failOp, passOp, or depthFailOp are not KEEP, and writeMask isn't 0, it's writable.
2581 // If depth test is disable, it's considered depth test passes, and then depthFailOp doesn't run.
2582 // PHASE1 TODO: These validation should be in core_checks.
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002583 if (has_stencil && !FormatIsDepthOnly(view_state.create_info.format) && ds_state->stencilTestEnable &&
locke-lunarg37047832020-06-12 13:44:45 -06002584 IsImageLayoutStencilWritable(subpass.pDepthStencilAttachment->layout)) {
2585 stencil_write = true;
2586 }
2587
John Zulaufd0ec59f2021-03-13 14:25:08 -07002588 if (depth_write || stencil_write) {
2589 const auto ds_gentype = view_gen.GetDepthStencilRenderAreaGenType(depth_write, stencil_write);
2590 // PHASE1 TODO: Add EARLY stage detection based on ExecutionMode.
2591 current_context.UpdateAccessState(view_gen, ds_gentype, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE,
2592 SyncOrdering::kDepthStencilAttachment, tag);
locke-lunarg37047832020-06-12 13:44:45 -06002593 }
locke-lunarg61870c22020-06-09 14:51:50 -06002594 }
2595}
2596
John Zulaufbb890452021-12-14 11:30:18 -07002597bool RenderPassAccessContext::ValidateNextSubpass(const CommandExecutionContext &exec_context, const char *func_name) const {
John Zulaufaff20662020-06-01 14:07:58 -06002598 // PHASE1 TODO: Add Validate Preserve attachments
John Zulauf355e49b2020-04-24 15:11:15 -06002599 bool skip = false;
John Zulaufbb890452021-12-14 11:30:18 -07002600 skip |= CurrentContext().ValidateResolveOperations(exec_context, *rp_state_, render_area_, attachment_views_, func_name,
John Zulaufb027cdb2020-05-21 14:25:22 -06002601 current_subpass_);
John Zulaufbb890452021-12-14 11:30:18 -07002602 skip |= CurrentContext().ValidateStoreOperation(exec_context, *rp_state_, render_area_, current_subpass_, attachment_views_,
John Zulaufaff20662020-06-01 14:07:58 -06002603 func_name);
2604
John Zulauf355e49b2020-04-24 15:11:15 -06002605 const auto next_subpass = current_subpass_ + 1;
ziga-lunarg31a3e772022-03-22 11:48:46 +01002606 if (next_subpass >= subpass_contexts_.size()) {
2607 return skip;
2608 }
John Zulauf1507ee42020-05-18 11:33:09 -06002609 const auto &next_context = subpass_contexts_[next_subpass];
John Zulauf64ffe552021-02-06 10:25:07 -07002610 skip |=
John Zulaufbb890452021-12-14 11:30:18 -07002611 next_context.ValidateLayoutTransitions(exec_context, *rp_state_, render_area_, next_subpass, attachment_views_, func_name);
John Zulaufb02c1eb2020-10-06 16:33:36 -06002612 if (!skip) {
2613 // To avoid complex (and buggy) duplication of the affect of layout transitions on load operations, we'll record them
2614 // on a copy of the (empty) next context.
2615 // Note: The resource access map should be empty so hopefully this copy isn't too horrible from a perf POV.
2616 AccessContext temp_context(next_context);
John Zulaufee984022022-04-13 16:39:50 -06002617 temp_context.RecordLayoutTransitions(*rp_state_, next_subpass, attachment_views_, kInvalidTag);
John Zulauf64ffe552021-02-06 10:25:07 -07002618 skip |=
John Zulaufbb890452021-12-14 11:30:18 -07002619 temp_context.ValidateLoadOperation(exec_context, *rp_state_, render_area_, next_subpass, attachment_views_, func_name);
John Zulaufb02c1eb2020-10-06 16:33:36 -06002620 }
John Zulauf7635de32020-05-29 17:14:15 -06002621 return skip;
2622}
John Zulaufbb890452021-12-14 11:30:18 -07002623bool RenderPassAccessContext::ValidateEndRenderPass(const CommandExecutionContext &exec_context, const char *func_name) const {
John Zulaufaff20662020-06-01 14:07:58 -06002624 // PHASE1 TODO: Validate Preserve
John Zulauf7635de32020-05-29 17:14:15 -06002625 bool skip = false;
John Zulaufbb890452021-12-14 11:30:18 -07002626 skip |= CurrentContext().ValidateResolveOperations(exec_context, *rp_state_, render_area_, attachment_views_, func_name,
John Zulauf7635de32020-05-29 17:14:15 -06002627 current_subpass_);
John Zulaufbb890452021-12-14 11:30:18 -07002628 skip |= CurrentContext().ValidateStoreOperation(exec_context, *rp_state_, render_area_, current_subpass_,
John Zulaufd0ec59f2021-03-13 14:25:08 -07002629
2630 attachment_views_, func_name);
John Zulaufbb890452021-12-14 11:30:18 -07002631 skip |= ValidateFinalSubpassLayoutTransitions(exec_context, func_name);
John Zulauf355e49b2020-04-24 15:11:15 -06002632 return skip;
2633}
2634
John Zulauf64ffe552021-02-06 10:25:07 -07002635AccessContext *RenderPassAccessContext::CreateStoreResolveProxy() const {
John Zulaufd0ec59f2021-03-13 14:25:08 -07002636 return CreateStoreResolveProxyContext(CurrentContext(), *rp_state_, current_subpass_, attachment_views_);
John Zulauf7635de32020-05-29 17:14:15 -06002637}
2638
John Zulaufbb890452021-12-14 11:30:18 -07002639bool RenderPassAccessContext::ValidateFinalSubpassLayoutTransitions(const CommandExecutionContext &exec_context,
John Zulauf64ffe552021-02-06 10:25:07 -07002640 const char *func_name) const {
John Zulauf355e49b2020-04-24 15:11:15 -06002641 bool skip = false;
2642
John Zulauf7635de32020-05-29 17:14:15 -06002643 // As validation methods are const and precede the record/update phase, for any tranistions from the current (last)
2644 // subpass, we have to validate them against a copy of the current AccessContext, with resolve operations applied.
2645 // Note: we could be more efficient by tracking whether or not we actually *have* any changes (e.g. attachment resolve)
2646 // to apply and only copy then, if this proves a hot spot.
2647 std::unique_ptr<AccessContext> proxy_for_current;
2648
John Zulauf355e49b2020-04-24 15:11:15 -06002649 // Validate the "finalLayout" transitions to external
2650 // Get them from where there we're hidding in the extra entry.
2651 const auto &final_transitions = rp_state_->subpass_transitions.back();
2652 for (const auto &transition : final_transitions) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07002653 const auto &view_gen = attachment_views_[transition.attachment];
John Zulauf355e49b2020-04-24 15:11:15 -06002654 const auto &trackback = subpass_contexts_[transition.prev_pass].GetDstExternalTrackBack();
John Zulaufbb890452021-12-14 11:30:18 -07002655 assert(trackback.source_subpass); // Transitions are given implicit transitions if the StateTracker is working correctly
2656 auto *context = trackback.source_subpass;
John Zulauf7635de32020-05-29 17:14:15 -06002657
2658 if (transition.prev_pass == current_subpass_) {
2659 if (!proxy_for_current) {
2660 // We haven't recorded resolve ofor the current_subpass, so we need to copy current and update it *as if*
John Zulauf64ffe552021-02-06 10:25:07 -07002661 proxy_for_current.reset(CreateStoreResolveProxy());
John Zulauf7635de32020-05-29 17:14:15 -06002662 }
2663 context = proxy_for_current.get();
2664 }
2665
John Zulaufa0a98292020-09-18 09:30:10 -06002666 // Use the merged barrier for the hazard check (safe since it just considers the src (first) scope.
2667 const auto merged_barrier = MergeBarriers(trackback.barriers);
John Zulaufd0ec59f2021-03-13 14:25:08 -07002668 auto hazard = context->DetectImageBarrierHazard(view_gen, merged_barrier, AccessContext::DetectOptions::kDetectPrevious);
John Zulauf355e49b2020-04-24 15:11:15 -06002669 if (hazard.hazard) {
John Zulaufee984022022-04-13 16:39:50 -06002670 if (hazard.tag == kInvalidTag) {
2671 // Hazard vs. ILT
John Zulaufbb890452021-12-14 11:30:18 -07002672 skip |= exec_context.GetSyncState().LogError(
John Zulaufee984022022-04-13 16:39:50 -06002673 rp_state_->renderPass(), string_SyncHazardVUID(hazard.hazard),
2674 "%s: Hazard %s vs. store/resolve operations in subpass %" PRIu32 " for attachment %" PRIu32
2675 " final image layout transition (old_layout: %s, new_layout: %s).",
2676 func_name, string_SyncHazard(hazard.hazard), transition.prev_pass, transition.attachment,
2677 string_VkImageLayout(transition.old_layout), string_VkImageLayout(transition.new_layout));
2678 } else {
John Zulaufbb890452021-12-14 11:30:18 -07002679 skip |= exec_context.GetSyncState().LogError(
John Zulaufee984022022-04-13 16:39:50 -06002680 rp_state_->renderPass(), string_SyncHazardVUID(hazard.hazard),
2681 "%s: Hazard %s with last use subpass %" PRIu32 " for attachment %" PRIu32
2682 " final image layout transition (old_layout: %s, new_layout: %s). Access info %s.",
2683 func_name, string_SyncHazard(hazard.hazard), transition.prev_pass, transition.attachment,
2684 string_VkImageLayout(transition.old_layout), string_VkImageLayout(transition.new_layout),
John Zulaufbb890452021-12-14 11:30:18 -07002685 exec_context.FormatUsage(hazard).c_str());
John Zulaufee984022022-04-13 16:39:50 -06002686 }
John Zulauf355e49b2020-04-24 15:11:15 -06002687 }
2688 }
2689 return skip;
2690}
2691
John Zulauf14940722021-04-12 15:19:02 -06002692void RenderPassAccessContext::RecordLayoutTransitions(const ResourceUsageTag tag) {
John Zulauf355e49b2020-04-24 15:11:15 -06002693 // Add layout transitions...
John Zulaufb02c1eb2020-10-06 16:33:36 -06002694 subpass_contexts_[current_subpass_].RecordLayoutTransitions(*rp_state_, current_subpass_, attachment_views_, tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002695}
2696
John Zulauf14940722021-04-12 15:19:02 -06002697void RenderPassAccessContext::RecordLoadOperations(const ResourceUsageTag tag) {
John Zulauf1507ee42020-05-18 11:33:09 -06002698 const auto *attachment_ci = rp_state_->createInfo.pAttachments;
2699 auto &subpass_context = subpass_contexts_[current_subpass_];
John Zulauf1507ee42020-05-18 11:33:09 -06002700
2701 for (uint32_t i = 0; i < rp_state_->createInfo.attachmentCount; i++) {
2702 if (rp_state_->attachment_first_subpass[i] == current_subpass_) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07002703 const AttachmentViewGen &view_gen = attachment_views_[i];
2704 if (!view_gen.IsValid()) continue; // UNUSED
John Zulauf1507ee42020-05-18 11:33:09 -06002705
2706 const auto &ci = attachment_ci[i];
2707 const bool has_depth = FormatHasDepth(ci.format);
John Zulaufb027cdb2020-05-21 14:25:22 -06002708 const bool has_stencil = FormatHasStencil(ci.format);
John Zulauf1507ee42020-05-18 11:33:09 -06002709 const bool is_color = !(has_depth || has_stencil);
2710
2711 if (is_color) {
John Zulauf57261402021-08-13 11:32:06 -06002712 const SyncStageAccessIndex load_op = ColorLoadUsage(ci.loadOp);
2713 if (load_op != SYNC_ACCESS_INDEX_NONE) {
2714 subpass_context.UpdateAccessState(view_gen, AttachmentViewGen::Gen::kRenderArea, load_op,
2715 SyncOrdering::kColorAttachment, tag);
2716 }
John Zulauf1507ee42020-05-18 11:33:09 -06002717 } else {
John Zulauf1507ee42020-05-18 11:33:09 -06002718 if (has_depth) {
John Zulauf57261402021-08-13 11:32:06 -06002719 const SyncStageAccessIndex load_op = DepthStencilLoadUsage(ci.loadOp);
2720 if (load_op != SYNC_ACCESS_INDEX_NONE) {
2721 subpass_context.UpdateAccessState(view_gen, AttachmentViewGen::Gen::kDepthOnlyRenderArea, load_op,
2722 SyncOrdering::kDepthStencilAttachment, tag);
2723 }
John Zulauf1507ee42020-05-18 11:33:09 -06002724 }
2725 if (has_stencil) {
John Zulauf57261402021-08-13 11:32:06 -06002726 const SyncStageAccessIndex load_op = DepthStencilLoadUsage(ci.stencilLoadOp);
2727 if (load_op != SYNC_ACCESS_INDEX_NONE) {
2728 subpass_context.UpdateAccessState(view_gen, AttachmentViewGen::Gen::kStencilOnlyRenderArea, load_op,
2729 SyncOrdering::kDepthStencilAttachment, tag);
2730 }
John Zulauf1507ee42020-05-18 11:33:09 -06002731 }
2732 }
2733 }
2734 }
2735}
John Zulaufd0ec59f2021-03-13 14:25:08 -07002736AttachmentViewGenVector RenderPassAccessContext::CreateAttachmentViewGen(
2737 const VkRect2D &render_area, const std::vector<const IMAGE_VIEW_STATE *> &attachment_views) {
2738 AttachmentViewGenVector view_gens;
2739 VkExtent3D extent = CastTo3D(render_area.extent);
2740 VkOffset3D offset = CastTo3D(render_area.offset);
2741 view_gens.reserve(attachment_views.size());
2742 for (const auto *view : attachment_views) {
2743 view_gens.emplace_back(view, offset, extent);
2744 }
2745 return view_gens;
2746}
John Zulauf64ffe552021-02-06 10:25:07 -07002747RenderPassAccessContext::RenderPassAccessContext(const RENDER_PASS_STATE &rp_state, const VkRect2D &render_area,
2748 VkQueueFlags queue_flags,
2749 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views,
2750 const AccessContext *external_context)
John Zulaufd0ec59f2021-03-13 14:25:08 -07002751 : rp_state_(&rp_state), render_area_(render_area), current_subpass_(0U), attachment_views_() {
John Zulauf355e49b2020-04-24 15:11:15 -06002752 // Add this for all subpasses here so that they exsist during next subpass validation
John Zulauf64ffe552021-02-06 10:25:07 -07002753 subpass_contexts_.reserve(rp_state_->createInfo.subpassCount);
John Zulaufbb890452021-12-14 11:30:18 -07002754 replay_context_ = std::make_shared<ReplayRenderpassContext>();
2755 auto &replay_subpass_contexts = replay_context_->subpass_contexts;
2756 replay_subpass_contexts.reserve(rp_state_->createInfo.subpassCount);
John Zulauf355e49b2020-04-24 15:11:15 -06002757 for (uint32_t pass = 0; pass < rp_state_->createInfo.subpassCount; pass++) {
John Zulauf1a224292020-06-30 14:52:13 -06002758 subpass_contexts_.emplace_back(pass, queue_flags, rp_state_->subpass_dependencies, subpass_contexts_, external_context);
John Zulaufbb890452021-12-14 11:30:18 -07002759 replay_subpass_contexts.emplace_back(queue_flags, rp_state_->subpass_dependencies[pass], replay_subpass_contexts);
John Zulauf355e49b2020-04-24 15:11:15 -06002760 }
John Zulaufd0ec59f2021-03-13 14:25:08 -07002761 attachment_views_ = CreateAttachmentViewGen(render_area, attachment_views);
John Zulauf64ffe552021-02-06 10:25:07 -07002762}
John Zulauf41a9c7c2021-12-07 15:59:53 -07002763void RenderPassAccessContext::RecordBeginRenderPass(const ResourceUsageTag barrier_tag, const ResourceUsageTag load_tag) {
John Zulauf64ffe552021-02-06 10:25:07 -07002764 assert(0 == current_subpass_);
John Zulauf41a9c7c2021-12-07 15:59:53 -07002765 subpass_contexts_[current_subpass_].SetStartTag(barrier_tag);
2766 RecordLayoutTransitions(barrier_tag);
2767 RecordLoadOperations(load_tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002768}
John Zulauf1507ee42020-05-18 11:33:09 -06002769
John Zulauf41a9c7c2021-12-07 15:59:53 -07002770void RenderPassAccessContext::RecordNextSubpass(const ResourceUsageTag store_tag, const ResourceUsageTag barrier_tag,
2771 const ResourceUsageTag load_tag) {
John Zulauf7635de32020-05-29 17:14:15 -06002772 // Resolves are against *prior* subpass context and thus *before* the subpass increment
John Zulauf41a9c7c2021-12-07 15:59:53 -07002773 CurrentContext().UpdateAttachmentResolveAccess(*rp_state_, attachment_views_, current_subpass_, store_tag);
2774 CurrentContext().UpdateAttachmentStoreAccess(*rp_state_, attachment_views_, current_subpass_, store_tag);
John Zulauf7635de32020-05-29 17:14:15 -06002775
ziga-lunarg31a3e772022-03-22 11:48:46 +01002776 if (current_subpass_ + 1 >= subpass_contexts_.size()) {
2777 return;
2778 }
Jeremy Gebben6ea9d9e2020-12-11 09:41:01 -07002779 // Move to the next sub-command for the new subpass. The resolve and store are logically part of the previous
2780 // subpass, so their tag needs to be different from the layout and load operations below.
John Zulauf355e49b2020-04-24 15:11:15 -06002781 current_subpass_++;
John Zulauf41a9c7c2021-12-07 15:59:53 -07002782 subpass_contexts_[current_subpass_].SetStartTag(barrier_tag);
2783 RecordLayoutTransitions(barrier_tag);
2784 RecordLoadOperations(load_tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002785}
2786
John Zulauf41a9c7c2021-12-07 15:59:53 -07002787void RenderPassAccessContext::RecordEndRenderPass(AccessContext *external_context, const ResourceUsageTag store_tag,
2788 const ResourceUsageTag barrier_tag) {
John Zulaufaff20662020-06-01 14:07:58 -06002789 // Add the resolve and store accesses
John Zulauf41a9c7c2021-12-07 15:59:53 -07002790 CurrentContext().UpdateAttachmentResolveAccess(*rp_state_, attachment_views_, current_subpass_, store_tag);
2791 CurrentContext().UpdateAttachmentStoreAccess(*rp_state_, attachment_views_, current_subpass_, store_tag);
John Zulauf7635de32020-05-29 17:14:15 -06002792
John Zulauf355e49b2020-04-24 15:11:15 -06002793 // Export the accesses from the renderpass...
John Zulauf1a224292020-06-30 14:52:13 -06002794 external_context->ResolveChildContexts(subpass_contexts_);
John Zulauf355e49b2020-04-24 15:11:15 -06002795
2796 // Add the "finalLayout" transitions to external
2797 // Get them from where there we're hidding in the extra entry.
John Zulauf89311b42020-09-29 16:28:47 -06002798 // Not that since *final* always comes from *one* subpass per view, we don't have to accumulate the barriers
2799 // TODO Aliasing we may need to reconsider barrier accumulation... though I don't know that it would be valid for aliasing
2800 // that had mulitple final layout transistions from mulitple final subpasses.
John Zulauf355e49b2020-04-24 15:11:15 -06002801 const auto &final_transitions = rp_state_->subpass_transitions.back();
2802 for (const auto &transition : final_transitions) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07002803 const AttachmentViewGen &view_gen = attachment_views_[transition.attachment];
John Zulauf355e49b2020-04-24 15:11:15 -06002804 const auto &last_trackback = subpass_contexts_[transition.prev_pass].GetDstExternalTrackBack();
John Zulaufbb890452021-12-14 11:30:18 -07002805 assert(&subpass_contexts_[transition.prev_pass] == last_trackback.source_subpass);
John Zulauf41a9c7c2021-12-07 15:59:53 -07002806 ApplyBarrierOpsFunctor<PipelineBarrierOp> barrier_action(true /* resolve */, last_trackback.barriers.size(), barrier_tag);
John Zulauf1e331ec2020-12-04 18:29:38 -07002807 for (const auto &barrier : last_trackback.barriers) {
John Zulaufd5115702021-01-18 12:34:33 -07002808 barrier_action.EmplaceBack(PipelineBarrierOp(barrier, true));
John Zulauf1e331ec2020-12-04 18:29:38 -07002809 }
John Zulaufd0ec59f2021-03-13 14:25:08 -07002810 external_context->ApplyUpdateAction(view_gen, AttachmentViewGen::Gen::kViewSubresource, barrier_action);
John Zulauf355e49b2020-04-24 15:11:15 -06002811 }
2812}
2813
Jeremy Gebben40a22942020-12-22 14:22:06 -07002814SyncExecScope SyncExecScope::MakeSrc(VkQueueFlags queue_flags, VkPipelineStageFlags2KHR mask_param) {
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002815 SyncExecScope result;
2816 result.mask_param = mask_param;
Jeremy Gebben5f585ae2021-02-02 09:03:06 -07002817 result.expanded_mask = sync_utils::ExpandPipelineStages(mask_param, queue_flags);
2818 result.exec_scope = sync_utils::WithEarlierPipelineStages(result.expanded_mask);
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002819 result.valid_accesses = SyncStageAccess::AccessScopeByStage(result.exec_scope);
2820 return result;
2821}
2822
Jeremy Gebben40a22942020-12-22 14:22:06 -07002823SyncExecScope SyncExecScope::MakeDst(VkQueueFlags queue_flags, VkPipelineStageFlags2KHR mask_param) {
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002824 SyncExecScope result;
2825 result.mask_param = mask_param;
Jeremy Gebben5f585ae2021-02-02 09:03:06 -07002826 result.expanded_mask = sync_utils::ExpandPipelineStages(mask_param, queue_flags);
2827 result.exec_scope = sync_utils::WithLaterPipelineStages(result.expanded_mask);
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002828 result.valid_accesses = SyncStageAccess::AccessScopeByStage(result.exec_scope);
2829 return result;
2830}
2831
2832SyncBarrier::SyncBarrier(const SyncExecScope &src, const SyncExecScope &dst) {
John Zulaufc523bf62021-02-16 08:20:34 -07002833 src_exec_scope = src;
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002834 src_access_scope = 0;
John Zulaufc523bf62021-02-16 08:20:34 -07002835 dst_exec_scope = dst;
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002836 dst_access_scope = 0;
2837}
2838
2839template <typename Barrier>
2840SyncBarrier::SyncBarrier(const Barrier &barrier, const SyncExecScope &src, const SyncExecScope &dst) {
John Zulaufc523bf62021-02-16 08:20:34 -07002841 src_exec_scope = src;
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002842 src_access_scope = SyncStageAccess::AccessScope(src.valid_accesses, barrier.srcAccessMask);
John Zulaufc523bf62021-02-16 08:20:34 -07002843 dst_exec_scope = dst;
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002844 dst_access_scope = SyncStageAccess::AccessScope(dst.valid_accesses, barrier.dstAccessMask);
2845}
2846
2847SyncBarrier::SyncBarrier(VkQueueFlags queue_flags, const VkSubpassDependency2 &subpass) {
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002848 const auto barrier = lvl_find_in_chain<VkMemoryBarrier2KHR>(subpass.pNext);
2849 if (barrier) {
2850 auto src = SyncExecScope::MakeSrc(queue_flags, barrier->srcStageMask);
John Zulaufc523bf62021-02-16 08:20:34 -07002851 src_exec_scope = src;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002852 src_access_scope = SyncStageAccess::AccessScope(src.valid_accesses, barrier->srcAccessMask);
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002853
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002854 auto dst = SyncExecScope::MakeDst(queue_flags, barrier->dstStageMask);
John Zulaufc523bf62021-02-16 08:20:34 -07002855 dst_exec_scope = dst;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002856 dst_access_scope = SyncStageAccess::AccessScope(dst.valid_accesses, barrier->dstAccessMask);
2857
2858 } else {
2859 auto src = SyncExecScope::MakeSrc(queue_flags, subpass.srcStageMask);
John Zulaufc523bf62021-02-16 08:20:34 -07002860 src_exec_scope = src;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002861 src_access_scope = SyncStageAccess::AccessScope(src.valid_accesses, subpass.srcAccessMask);
2862
2863 auto dst = SyncExecScope::MakeDst(queue_flags, subpass.dstStageMask);
John Zulaufc523bf62021-02-16 08:20:34 -07002864 dst_exec_scope = dst;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002865 dst_access_scope = SyncStageAccess::AccessScope(dst.valid_accesses, subpass.dstAccessMask);
2866 }
2867}
2868
2869template <typename Barrier>
2870SyncBarrier::SyncBarrier(VkQueueFlags queue_flags, const Barrier &barrier) {
2871 auto src = SyncExecScope::MakeSrc(queue_flags, barrier.srcStageMask);
2872 src_exec_scope = src.exec_scope;
2873 src_access_scope = SyncStageAccess::AccessScope(src.valid_accesses, barrier.srcAccessMask);
2874
2875 auto dst = SyncExecScope::MakeDst(queue_flags, barrier.dstStageMask);
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002876 dst_exec_scope = dst.exec_scope;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002877 dst_access_scope = SyncStageAccess::AccessScope(dst.valid_accesses, barrier.dstAccessMask);
John Zulauf3d84f1b2020-03-09 13:33:25 -06002878}
2879
John Zulaufb02c1eb2020-10-06 16:33:36 -06002880// Apply a list of barriers, without resolving pending state, useful for subpass layout transitions
2881void ResourceAccessState::ApplyBarriers(const std::vector<SyncBarrier> &barriers, bool layout_transition) {
2882 for (const auto &barrier : barriers) {
2883 ApplyBarrier(barrier, layout_transition);
2884 }
2885}
2886
John Zulauf89311b42020-09-29 16:28:47 -06002887// ApplyBarriers is design for *fully* inclusive barrier lists without layout tranistions. Designed use was for
2888// inter-subpass barriers for lazy-evaluation of parent context memory ranges. Subpass layout transistions are *not* done
2889// lazily, s.t. no previous access reports should need layout transitions.
John Zulaufbb890452021-12-14 11:30:18 -07002890void ResourceAccessState::ApplyBarriersImmediate(const std::vector<SyncBarrier> &barriers) {
John Zulaufb02c1eb2020-10-06 16:33:36 -06002891 assert(!pending_layout_transition); // This should never be call in the middle of another barrier application
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002892 assert(pending_write_barriers.none());
John Zulaufb02c1eb2020-10-06 16:33:36 -06002893 assert(!pending_write_dep_chain);
John Zulaufa0a98292020-09-18 09:30:10 -06002894 for (const auto &barrier : barriers) {
John Zulauf89311b42020-09-29 16:28:47 -06002895 ApplyBarrier(barrier, false);
John Zulaufa0a98292020-09-18 09:30:10 -06002896 }
John Zulaufbb890452021-12-14 11:30:18 -07002897 ApplyPendingBarriers(kInvalidTag); // There can't be any need for this tag
John Zulauf3d84f1b2020-03-09 13:33:25 -06002898}
John Zulauf9cb530d2019-09-30 14:14:10 -06002899HazardResult ResourceAccessState::DetectHazard(SyncStageAccessIndex usage_index) const {
2900 HazardResult hazard;
2901 auto usage = FlagBit(usage_index);
John Zulauf361fb532020-07-22 10:45:39 -06002902 const auto usage_stage = PipelineStageBit(usage_index);
John Zulauf9cb530d2019-09-30 14:14:10 -06002903 if (IsRead(usage)) {
John Zulauf4285ee92020-09-23 10:20:52 -06002904 if (IsRAWHazard(usage_stage, usage)) {
John Zulauf59e25072020-07-17 10:55:21 -06002905 hazard.Set(this, usage_index, READ_AFTER_WRITE, last_write, write_tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06002906 }
2907 } else {
John Zulauf361fb532020-07-22 10:45:39 -06002908 // Write operation:
2909 // Check for read operations more recent than last_write (as setting last_write clears reads, that would be *any*
2910 // If reads exists -- test only against them because either:
2911 // * the reads were hazards, and we've reported the hazard, so just test the current write vs. the read operations
2912 // * the read weren't hazards, and thus if the write is safe w.r.t. the reads, no hazard vs. last_write is possible if
2913 // the current write happens after the reads, so just test the write against the reades
2914 // Otherwise test against last_write
2915 //
2916 // Look for casus belli for WAR
John Zulaufab7756b2020-12-29 16:10:16 -07002917 if (last_reads.size()) {
2918 for (const auto &read_access : last_reads) {
John Zulauf361fb532020-07-22 10:45:39 -06002919 if (IsReadHazard(usage_stage, read_access)) {
2920 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
2921 break;
2922 }
2923 }
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002924 } else if (last_write.any() && IsWriteHazard(usage)) {
John Zulauf361fb532020-07-22 10:45:39 -06002925 // Write-After-Write check -- if we have a previous write to test against
2926 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06002927 }
2928 }
2929 return hazard;
2930}
2931
John Zulauf4fa68462021-04-26 21:04:22 -06002932HazardResult ResourceAccessState::DetectHazard(SyncStageAccessIndex usage_index, const SyncOrdering ordering_rule) const {
John Zulauf8e3c3e92021-01-06 11:19:36 -07002933 const auto &ordering = GetOrderingRules(ordering_rule);
John Zulauf4fa68462021-04-26 21:04:22 -06002934 return DetectHazard(usage_index, ordering);
2935}
2936
2937HazardResult ResourceAccessState::DetectHazard(SyncStageAccessIndex usage_index, const OrderingBarrier &ordering) const {
John Zulauf69133422020-05-20 14:55:53 -06002938 // The ordering guarantees act as barriers to the last accesses, independent of synchronization operations
2939 HazardResult hazard;
John Zulauf4285ee92020-09-23 10:20:52 -06002940 const auto usage_bit = FlagBit(usage_index);
John Zulauf361fb532020-07-22 10:45:39 -06002941 const auto usage_stage = PipelineStageBit(usage_index);
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002942 const bool input_attachment_ordering = (ordering.access_scope & SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT).any();
2943 const bool last_write_is_ordered = (last_write & ordering.access_scope).any();
John Zulauf4285ee92020-09-23 10:20:52 -06002944 if (IsRead(usage_bit)) {
2945 // Exclude RAW if no write, or write not most "most recent" operation w.r.t. usage;
2946 bool is_raw_hazard = IsRAWHazard(usage_stage, usage_bit);
2947 if (is_raw_hazard) {
2948 // NOTE: we know last_write is non-zero
2949 // See if the ordering rules save us from the simple RAW check above
2950 // First check to see if the current usage is covered by the ordering rules
2951 const bool usage_is_input_attachment = (usage_index == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ);
2952 const bool usage_is_ordered =
2953 (input_attachment_ordering && usage_is_input_attachment) || (0 != (usage_stage & ordering.exec_scope));
2954 if (usage_is_ordered) {
2955 // Now see of the most recent write (or a subsequent read) are ordered
2956 const bool most_recent_is_ordered = last_write_is_ordered || (0 != GetOrderedStages(ordering));
2957 is_raw_hazard = !most_recent_is_ordered;
John Zulauf361fb532020-07-22 10:45:39 -06002958 }
2959 }
John Zulauf4285ee92020-09-23 10:20:52 -06002960 if (is_raw_hazard) {
2961 hazard.Set(this, usage_index, READ_AFTER_WRITE, last_write, write_tag);
2962 }
John Zulauf5c628d02021-05-04 15:46:36 -06002963 } else if (usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION) {
2964 // For Image layout transitions, the barrier represents the first synchronization/access scope of the layout transition
2965 return DetectBarrierHazard(usage_index, ordering.exec_scope, ordering.access_scope);
John Zulauf361fb532020-07-22 10:45:39 -06002966 } else {
2967 // Only check for WAW if there are no reads since last_write
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002968 bool usage_write_is_ordered = (usage_bit & ordering.access_scope).any();
John Zulaufab7756b2020-12-29 16:10:16 -07002969 if (last_reads.size()) {
John Zulauf361fb532020-07-22 10:45:39 -06002970 // Look for any WAR hazards outside the ordered set of stages
Jeremy Gebben40a22942020-12-22 14:22:06 -07002971 VkPipelineStageFlags2KHR ordered_stages = 0;
John Zulauf4285ee92020-09-23 10:20:52 -06002972 if (usage_write_is_ordered) {
2973 // If the usage is ordered, we can ignore all ordered read stages w.r.t. WAR)
2974 ordered_stages = GetOrderedStages(ordering);
2975 }
2976 // If we're tracking any reads that aren't ordered against the current write, got to check 'em all.
2977 if ((ordered_stages & last_read_stages) != last_read_stages) {
John Zulaufab7756b2020-12-29 16:10:16 -07002978 for (const auto &read_access : last_reads) {
John Zulauf4285ee92020-09-23 10:20:52 -06002979 if (read_access.stage & ordered_stages) continue; // but we can skip the ordered ones
2980 if (IsReadHazard(usage_stage, read_access)) {
2981 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
2982 break;
2983 }
John Zulaufd14743a2020-07-03 09:42:39 -06002984 }
2985 }
John Zulauf2a344ca2021-09-09 17:07:19 -06002986 } else if (last_write.any() && !(last_write_is_ordered && usage_write_is_ordered)) {
2987 bool ilt_ilt_hazard = false;
2988 if ((usage_index == SYNC_IMAGE_LAYOUT_TRANSITION) && (usage_bit == last_write)) {
2989 // ILT after ILT is a special case where we check the 2nd access scope of the first ILT against the first access
2990 // scope of the second ILT, which has been passed (smuggled?) in the ordering barrier
2991 ilt_ilt_hazard = !(write_barriers & ordering.access_scope).any();
2992 }
2993 if (ilt_ilt_hazard || IsWriteHazard(usage_bit)) {
John Zulauf4285ee92020-09-23 10:20:52 -06002994 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
John Zulauf361fb532020-07-22 10:45:39 -06002995 }
John Zulauf69133422020-05-20 14:55:53 -06002996 }
2997 }
2998 return hazard;
2999}
3000
John Zulaufae842002021-04-15 18:20:55 -06003001HazardResult ResourceAccessState::DetectHazard(const ResourceAccessState &recorded_use, const ResourceUsageRange &tag_range) const {
3002 HazardResult hazard;
John Zulauf4fa68462021-04-26 21:04:22 -06003003 using Size = FirstAccesses::size_type;
3004 const auto &recorded_accesses = recorded_use.first_accesses_;
3005 Size count = recorded_accesses.size();
3006 if (count) {
3007 const auto &last_access = recorded_accesses.back();
3008 bool do_write_last = IsWrite(last_access.usage_index);
3009 if (do_write_last) --count;
John Zulaufae842002021-04-15 18:20:55 -06003010
John Zulauf4fa68462021-04-26 21:04:22 -06003011 for (Size i = 0; i < count; ++count) {
3012 const auto &first = recorded_accesses[i];
3013 // Skip and quit logic
3014 if (first.tag < tag_range.begin) continue;
3015 if (first.tag >= tag_range.end) {
3016 do_write_last = false; // ignore last since we know it can't be in tag_range
3017 break;
3018 }
3019
3020 hazard = DetectHazard(first.usage_index, first.ordering_rule);
3021 if (hazard.hazard) {
3022 hazard.AddRecordedAccess(first);
3023 break;
3024 }
3025 }
3026
3027 if (do_write_last && tag_range.includes(last_access.tag)) {
3028 // Writes are a bit special... both for the "most recent" access logic, and layout transition specific logic
3029 OrderingBarrier barrier = GetOrderingRules(last_access.ordering_rule);
3030 if (last_access.usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION) {
3031 // Or in the layout first access scope as a barrier... IFF the usage is an ILT
3032 // this was saved off in the "apply barriers" logic to simplify ILT access checks as they straddle
3033 // the barrier that applies them
3034 barrier |= recorded_use.first_write_layout_ordering_;
3035 }
3036 // Any read stages present in the recorded context (this) are most recent to the write, and thus mask those stages in
3037 // the active context
3038 if (recorded_use.first_read_stages_) {
3039 // we need to ignore the first use read stage in the active context (so we add them to the ordering rule),
3040 // reads in the active context are not "most recent" as all recorded context operations are *after* them
3041 // This supresses only RAW checks for stages present in the recorded context, but not those only present in the
3042 // active context.
3043 barrier.exec_scope |= recorded_use.first_read_stages_;
3044 // if there are any first use reads, we suppress WAW by injecting the active context write in the ordering rule
3045 barrier.access_scope |= FlagBit(last_access.usage_index);
3046 }
3047 hazard = DetectHazard(last_access.usage_index, barrier);
3048 if (hazard.hazard) {
3049 hazard.AddRecordedAccess(last_access);
3050 }
3051 }
John Zulaufae842002021-04-15 18:20:55 -06003052 }
3053 return hazard;
3054}
3055
John Zulauf2f952d22020-02-10 11:34:51 -07003056// Asynchronous Hazards occur between subpasses with no connection through the DAG
John Zulauf14940722021-04-12 15:19:02 -06003057HazardResult ResourceAccessState::DetectAsyncHazard(SyncStageAccessIndex usage_index, const ResourceUsageTag start_tag) const {
John Zulauf2f952d22020-02-10 11:34:51 -07003058 HazardResult hazard;
3059 auto usage = FlagBit(usage_index);
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07003060 // Async checks need to not go back further than the start of the subpass, as we only want to find hazards between the async
3061 // subpasses. Anything older than that should have been checked at the start of each subpass, taking into account all of
3062 // the raster ordering rules.
John Zulauf2f952d22020-02-10 11:34:51 -07003063 if (IsRead(usage)) {
John Zulauf14940722021-04-12 15:19:02 -06003064 if (last_write.any() && (write_tag >= start_tag)) {
John Zulauf59e25072020-07-17 10:55:21 -06003065 hazard.Set(this, usage_index, READ_RACING_WRITE, last_write, write_tag);
John Zulauf2f952d22020-02-10 11:34:51 -07003066 }
3067 } else {
John Zulauf14940722021-04-12 15:19:02 -06003068 if (last_write.any() && (write_tag >= start_tag)) {
John Zulauf59e25072020-07-17 10:55:21 -06003069 hazard.Set(this, usage_index, WRITE_RACING_WRITE, last_write, write_tag);
John Zulaufab7756b2020-12-29 16:10:16 -07003070 } else if (last_reads.size() > 0) {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07003071 // Any reads during the other subpass will conflict with this write, so we need to check them all.
John Zulaufab7756b2020-12-29 16:10:16 -07003072 for (const auto &read_access : last_reads) {
John Zulauf14940722021-04-12 15:19:02 -06003073 if (read_access.tag >= start_tag) {
John Zulaufab7756b2020-12-29 16:10:16 -07003074 hazard.Set(this, usage_index, WRITE_RACING_READ, read_access.access, read_access.tag);
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07003075 break;
3076 }
3077 }
John Zulauf2f952d22020-02-10 11:34:51 -07003078 }
3079 }
3080 return hazard;
3081}
3082
John Zulaufae842002021-04-15 18:20:55 -06003083HazardResult ResourceAccessState::DetectAsyncHazard(const ResourceAccessState &recorded_use, const ResourceUsageRange &tag_range,
3084 ResourceUsageTag start_tag) const {
3085 HazardResult hazard;
John Zulauf4fa68462021-04-26 21:04:22 -06003086 for (const auto &first : recorded_use.first_accesses_) {
John Zulaufae842002021-04-15 18:20:55 -06003087 // Skip and quit logic
3088 if (first.tag < tag_range.begin) continue;
3089 if (first.tag >= tag_range.end) break;
John Zulaufae842002021-04-15 18:20:55 -06003090
3091 hazard = DetectAsyncHazard(first.usage_index, start_tag);
John Zulauf4fa68462021-04-26 21:04:22 -06003092 if (hazard.hazard) {
3093 hazard.AddRecordedAccess(first);
3094 break;
3095 }
John Zulaufae842002021-04-15 18:20:55 -06003096 }
3097 return hazard;
3098}
3099
Jeremy Gebben40a22942020-12-22 14:22:06 -07003100HazardResult ResourceAccessState::DetectBarrierHazard(SyncStageAccessIndex usage_index, VkPipelineStageFlags2KHR src_exec_scope,
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003101 const SyncStageAccessFlags &src_access_scope) const {
John Zulauf0cb5be22020-01-23 12:18:22 -07003102 // Only supporting image layout transitions for now
3103 assert(usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION);
3104 HazardResult hazard;
John Zulauf361fb532020-07-22 10:45:39 -06003105 // only test for WAW if there no intervening read operations.
3106 // See DetectHazard(SyncStagetAccessIndex) above for more details.
John Zulaufab7756b2020-12-29 16:10:16 -07003107 if (last_reads.size()) {
John Zulauf355e49b2020-04-24 15:11:15 -06003108 // Look at the reads if any
John Zulaufab7756b2020-12-29 16:10:16 -07003109 for (const auto &read_access : last_reads) {
John Zulauf4a6105a2020-11-17 15:11:05 -07003110 if (read_access.IsReadBarrierHazard(src_exec_scope)) {
John Zulauf59e25072020-07-17 10:55:21 -06003111 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
John Zulauf0cb5be22020-01-23 12:18:22 -07003112 break;
3113 }
3114 }
John Zulauf4a6105a2020-11-17 15:11:05 -07003115 } else if (last_write.any() && IsWriteBarrierHazard(src_exec_scope, src_access_scope)) {
3116 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
3117 }
3118
3119 return hazard;
3120}
3121
Jeremy Gebben40a22942020-12-22 14:22:06 -07003122HazardResult ResourceAccessState::DetectBarrierHazard(SyncStageAccessIndex usage_index, VkPipelineStageFlags2KHR src_exec_scope,
John Zulauf4a6105a2020-11-17 15:11:05 -07003123 const SyncStageAccessFlags &src_access_scope,
John Zulauf14940722021-04-12 15:19:02 -06003124 const ResourceUsageTag event_tag) const {
John Zulauf4a6105a2020-11-17 15:11:05 -07003125 // Only supporting image layout transitions for now
3126 assert(usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION);
3127 HazardResult hazard;
3128 // only test for WAW if there no intervening read operations.
3129 // See DetectHazard(SyncStagetAccessIndex) above for more details.
3130
John Zulaufab7756b2020-12-29 16:10:16 -07003131 if (last_reads.size()) {
John Zulauf4a6105a2020-11-17 15:11:05 -07003132 // Look at the reads if any... if reads exist, they are either the resaon the access is in the event
3133 // first scope, or they are a hazard.
John Zulaufab7756b2020-12-29 16:10:16 -07003134 for (const auto &read_access : last_reads) {
John Zulauf14940722021-04-12 15:19:02 -06003135 if (read_access.tag < event_tag) {
John Zulauf4a6105a2020-11-17 15:11:05 -07003136 // The read is in the events first synchronization scope, so we use a barrier hazard check
3137 // If the read stage is not in the src sync scope
3138 // *AND* not execution chained with an existing sync barrier (that's the or)
3139 // then the barrier access is unsafe (R/W after R)
3140 if (read_access.IsReadBarrierHazard(src_exec_scope)) {
3141 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
3142 break;
3143 }
3144 } else {
3145 // The read not in the event first sync scope and so is a hazard vs. the layout transition
3146 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
3147 }
3148 }
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003149 } else if (last_write.any()) {
John Zulauf4a6105a2020-11-17 15:11:05 -07003150 // if there are no reads, the write is either the reason the access is in the event scope... they are a hazard
John Zulauf14940722021-04-12 15:19:02 -06003151 if (write_tag < event_tag) {
John Zulauf4a6105a2020-11-17 15:11:05 -07003152 // The write is in the first sync scope of the event (sync their aren't any reads to be the reason)
3153 // So do a normal barrier hazard check
3154 if (IsWriteBarrierHazard(src_exec_scope, src_access_scope)) {
3155 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
3156 }
3157 } else {
3158 // The write isn't in scope, and is thus a hazard to the layout transistion for wait
John Zulauf361fb532020-07-22 10:45:39 -06003159 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
3160 }
John Zulaufd14743a2020-07-03 09:42:39 -06003161 }
John Zulauf361fb532020-07-22 10:45:39 -06003162
John Zulauf0cb5be22020-01-23 12:18:22 -07003163 return hazard;
3164}
3165
John Zulauf5f13a792020-03-10 07:31:21 -06003166// The logic behind resolves is the same as update, we assume that earlier hazards have be reported, and that no
3167// tranistive hazard can exists with a hazard between the earlier operations. Yes, an early hazard can mask that another
3168// exists, but if you fix *that* hazard it either fixes or unmasks the subsequent ones.
3169void ResourceAccessState::Resolve(const ResourceAccessState &other) {
John Zulauf14940722021-04-12 15:19:02 -06003170 if (write_tag < other.write_tag) {
John Zulauf4285ee92020-09-23 10:20:52 -06003171 // If this is a later write, we've reported any exsiting hazard, and we can just overwrite as the more recent
3172 // operation
John Zulauf5f13a792020-03-10 07:31:21 -06003173 *this = other;
John Zulauf14940722021-04-12 15:19:02 -06003174 } else if (other.write_tag == write_tag) {
3175 // In the *equals* case for write operations, we merged the write barriers and the read state (but without the
John Zulauf5f13a792020-03-10 07:31:21 -06003176 // dependency chaining logic or any stage expansion)
3177 write_barriers |= other.write_barriers;
John Zulaufb02c1eb2020-10-06 16:33:36 -06003178 pending_write_barriers |= other.pending_write_barriers;
3179 pending_layout_transition |= other.pending_layout_transition;
3180 pending_write_dep_chain |= other.pending_write_dep_chain;
John Zulauf4fa68462021-04-26 21:04:22 -06003181 pending_layout_ordering_ |= other.pending_layout_ordering_;
John Zulauf5f13a792020-03-10 07:31:21 -06003182
John Zulaufd14743a2020-07-03 09:42:39 -06003183 // Merge the read states
John Zulaufab7756b2020-12-29 16:10:16 -07003184 const auto pre_merge_count = last_reads.size();
John Zulauf4285ee92020-09-23 10:20:52 -06003185 const auto pre_merge_stages = last_read_stages;
John Zulaufab7756b2020-12-29 16:10:16 -07003186 for (uint32_t other_read_index = 0; other_read_index < other.last_reads.size(); other_read_index++) {
John Zulauf5f13a792020-03-10 07:31:21 -06003187 auto &other_read = other.last_reads[other_read_index];
John Zulauf4285ee92020-09-23 10:20:52 -06003188 if (pre_merge_stages & other_read.stage) {
John Zulauf5f13a792020-03-10 07:31:21 -06003189 // Merge in the barriers for read stages that exist in *both* this and other
John Zulauf4285ee92020-09-23 10:20:52 -06003190 // TODO: This is N^2 with stages... perhaps the ReadStates should be sorted by stage index.
3191 // but we should wait on profiling data for that.
3192 for (uint32_t my_read_index = 0; my_read_index < pre_merge_count; my_read_index++) {
John Zulauf5f13a792020-03-10 07:31:21 -06003193 auto &my_read = last_reads[my_read_index];
3194 if (other_read.stage == my_read.stage) {
John Zulauf14940722021-04-12 15:19:02 -06003195 if (my_read.tag < other_read.tag) {
John Zulauf4285ee92020-09-23 10:20:52 -06003196 // Other is more recent, copy in the state
John Zulauf37ceaed2020-07-03 16:18:15 -06003197 my_read.access = other_read.access;
John Zulauf4285ee92020-09-23 10:20:52 -06003198 my_read.tag = other_read.tag;
John Zulaufb02c1eb2020-10-06 16:33:36 -06003199 my_read.pending_dep_chain = other_read.pending_dep_chain;
3200 // TODO: Phase 2 -- review the state merge logic to avoid false positive from overwriting the barriers
3201 // May require tracking more than one access per stage.
3202 my_read.barriers = other_read.barriers;
Jeremy Gebben40a22942020-12-22 14:22:06 -07003203 if (my_read.stage == VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR) {
John Zulauf4285ee92020-09-23 10:20:52 -06003204 // Since I'm overwriting the fragement stage read, also update the input attachment info
3205 // as this is the only stage that affects it.
John Zulauff51fbb62020-10-02 14:43:24 -06003206 input_attachment_read = other.input_attachment_read;
John Zulauf4285ee92020-09-23 10:20:52 -06003207 }
John Zulauf14940722021-04-12 15:19:02 -06003208 } else if (other_read.tag == my_read.tag) {
John Zulaufb02c1eb2020-10-06 16:33:36 -06003209 // The read tags match so merge the barriers
3210 my_read.barriers |= other_read.barriers;
3211 my_read.pending_dep_chain |= other_read.pending_dep_chain;
John Zulauf5f13a792020-03-10 07:31:21 -06003212 }
John Zulaufb02c1eb2020-10-06 16:33:36 -06003213
John Zulauf5f13a792020-03-10 07:31:21 -06003214 break;
3215 }
3216 }
3217 } else {
3218 // The other read stage doesn't exist in this, so add it.
John Zulaufab7756b2020-12-29 16:10:16 -07003219 last_reads.emplace_back(other_read);
John Zulauf5f13a792020-03-10 07:31:21 -06003220 last_read_stages |= other_read.stage;
Jeremy Gebben40a22942020-12-22 14:22:06 -07003221 if (other_read.stage == VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR) {
John Zulauff51fbb62020-10-02 14:43:24 -06003222 input_attachment_read = other.input_attachment_read;
John Zulauf4285ee92020-09-23 10:20:52 -06003223 }
John Zulauf5f13a792020-03-10 07:31:21 -06003224 }
3225 }
John Zulauf361fb532020-07-22 10:45:39 -06003226 read_execution_barriers |= other.read_execution_barriers;
John Zulauf4285ee92020-09-23 10:20:52 -06003227 } // the else clause would be that other write is before this write... in which case we supercede the other state and
3228 // ignore it.
John Zulauffaea0ee2021-01-14 14:01:32 -07003229
3230 // Merge first access information by making a copy of this first_access and reconstructing with a shuffle
3231 // of the copy and other into this using the update first logic.
3232 // NOTE: All sorts of additional cleverness could be put into short circuts. (for example back is write and is before front
3233 // of the other first_accesses... )
3234 if (!(first_accesses_ == other.first_accesses_) && !other.first_accesses_.empty()) {
3235 FirstAccesses firsts(std::move(first_accesses_));
3236 first_accesses_.clear();
3237 first_read_stages_ = 0U;
3238 auto a = firsts.begin();
3239 auto a_end = firsts.end();
3240 for (auto &b : other.first_accesses_) {
John Zulauf14940722021-04-12 15:19:02 -06003241 // TODO: Determine whether some tag offset will be needed for PHASE II
3242 while ((a != a_end) && (a->tag < b.tag)) {
John Zulauffaea0ee2021-01-14 14:01:32 -07003243 UpdateFirst(a->tag, a->usage_index, a->ordering_rule);
3244 ++a;
3245 }
3246 UpdateFirst(b.tag, b.usage_index, b.ordering_rule);
3247 }
3248 for (; a != a_end; ++a) {
3249 UpdateFirst(a->tag, a->usage_index, a->ordering_rule);
3250 }
3251 }
John Zulauf5f13a792020-03-10 07:31:21 -06003252}
3253
John Zulauf14940722021-04-12 15:19:02 -06003254void ResourceAccessState::Update(SyncStageAccessIndex usage_index, SyncOrdering ordering_rule, const ResourceUsageTag tag) {
John Zulauf9cb530d2019-09-30 14:14:10 -06003255 // Move this logic in the ResourceStateTracker as methods, thereof (or we'll repeat it for every flavor of resource...
3256 const auto usage_bit = FlagBit(usage_index);
John Zulauf4285ee92020-09-23 10:20:52 -06003257 if (IsRead(usage_index)) {
John Zulauf9cb530d2019-09-30 14:14:10 -06003258 // Mulitple outstanding reads may be of interest and do dependency chains independently
3259 // However, for purposes of barrier tracking, only one read per pipeline stage matters
3260 const auto usage_stage = PipelineStageBit(usage_index);
3261 if (usage_stage & last_read_stages) {
John Zulaufab7756b2020-12-29 16:10:16 -07003262 for (auto &read_access : last_reads) {
3263 if (read_access.stage == usage_stage) {
3264 read_access.Set(usage_stage, usage_bit, 0, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003265 break;
3266 }
3267 }
3268 } else {
John Zulaufab7756b2020-12-29 16:10:16 -07003269 last_reads.emplace_back(usage_stage, usage_bit, 0, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003270 last_read_stages |= usage_stage;
3271 }
John Zulauf4285ee92020-09-23 10:20:52 -06003272
3273 // Fragment shader reads come in two flavors, and we need to track if the one we're tracking is the special one.
Jeremy Gebben40a22942020-12-22 14:22:06 -07003274 if (usage_stage == VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR) {
John Zulauff51fbb62020-10-02 14:43:24 -06003275 // TODO Revisit re: multiple reads for a given stage
3276 input_attachment_read = (usage_bit == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT);
John Zulauf4285ee92020-09-23 10:20:52 -06003277 }
John Zulauf9cb530d2019-09-30 14:14:10 -06003278 } else {
3279 // Assume write
3280 // TODO determine what to do with READ-WRITE operations if any
John Zulauf89311b42020-09-29 16:28:47 -06003281 SetWrite(usage_bit, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003282 }
John Zulauffaea0ee2021-01-14 14:01:32 -07003283 UpdateFirst(tag, usage_index, ordering_rule);
John Zulauf9cb530d2019-09-30 14:14:10 -06003284}
John Zulauf5f13a792020-03-10 07:31:21 -06003285
John Zulauf89311b42020-09-29 16:28:47 -06003286// Clobber last read and all barriers... because all we have is DANGER, DANGER, WILL ROBINSON!!!
3287// if the last_reads/last_write were unsafe, we've reported them, in either case the prior access is irrelevant.
3288// We can overwrite them as *this* write is now after them.
3289//
3290// Note: intentionally ignore pending barriers and chains (i.e. don't apply or clear them), let ApplyPendingBarriers handle them.
John Zulauf14940722021-04-12 15:19:02 -06003291void ResourceAccessState::SetWrite(const SyncStageAccessFlags &usage_bit, const ResourceUsageTag tag) {
John Zulaufab7756b2020-12-29 16:10:16 -07003292 last_reads.clear();
John Zulauf89311b42020-09-29 16:28:47 -06003293 last_read_stages = 0;
3294 read_execution_barriers = 0;
John Zulauff51fbb62020-10-02 14:43:24 -06003295 input_attachment_read = false; // Denotes no outstanding input attachment read after the last write.
John Zulauf89311b42020-09-29 16:28:47 -06003296
3297 write_barriers = 0;
3298 write_dependency_chain = 0;
3299 write_tag = tag;
3300 last_write = usage_bit;
John Zulauf9cb530d2019-09-30 14:14:10 -06003301}
3302
John Zulauf89311b42020-09-29 16:28:47 -06003303// Apply the memory barrier without updating the existing barriers. The execution barrier
3304// changes the "chaining" state, but to keep barriers independent, we defer this until all barriers
3305// of the batch have been processed. Also, depending on whether layout transition happens, we'll either
3306// replace the current write barriers or add to them, so accumulate to pending as well.
3307void ResourceAccessState::ApplyBarrier(const SyncBarrier &barrier, bool layout_transition) {
3308 // For independent barriers we need to track what the new barriers and dependency chain *will* be when we're done
3309 // applying the memory barriers
John Zulauf86356ca2020-10-19 11:46:41 -06003310 // NOTE: We update the write barrier if the write is in the first access scope or if there is a layout
3311 // transistion, under the theory of "most recent access". If the read/write *isn't* safe
3312 // vs. this layout transition DetectBarrierHazard should report it. We treat the layout
3313 // transistion *as* a write and in scope with the barrier (it's before visibility).
John Zulaufc523bf62021-02-16 08:20:34 -07003314 if (layout_transition || WriteInSourceScopeOrChain(barrier.src_exec_scope.exec_scope, barrier.src_access_scope)) {
John Zulauf89311b42020-09-29 16:28:47 -06003315 pending_write_barriers |= barrier.dst_access_scope;
John Zulaufc523bf62021-02-16 08:20:34 -07003316 pending_write_dep_chain |= barrier.dst_exec_scope.exec_scope;
John Zulauf4fa68462021-04-26 21:04:22 -06003317 if (layout_transition) {
3318 pending_layout_ordering_ |= OrderingBarrier(barrier.src_exec_scope.exec_scope, barrier.src_access_scope);
3319 }
John Zulaufa0a98292020-09-18 09:30:10 -06003320 }
John Zulauf89311b42020-09-29 16:28:47 -06003321 // Track layout transistion as pending as we can't modify last_write until all barriers processed
3322 pending_layout_transition |= layout_transition;
John Zulaufa0a98292020-09-18 09:30:10 -06003323
John Zulauf89311b42020-09-29 16:28:47 -06003324 if (!pending_layout_transition) {
3325 // Once we're dealing with a layout transition (which is modelled as a *write*) then the last reads/writes/chains
3326 // don't need to be tracked as we're just going to zero them.
John Zulaufab7756b2020-12-29 16:10:16 -07003327 for (auto &read_access : last_reads) {
John Zulauf89311b42020-09-29 16:28:47 -06003328 // The | implements the "dependency chain" logic for this access, as the barriers field stores the second sync scope
John Zulaufc523bf62021-02-16 08:20:34 -07003329 if (barrier.src_exec_scope.exec_scope & (read_access.stage | read_access.barriers)) {
3330 read_access.pending_dep_chain |= barrier.dst_exec_scope.exec_scope;
John Zulaufa0a98292020-09-18 09:30:10 -06003331 }
3332 }
John Zulaufa0a98292020-09-18 09:30:10 -06003333 }
John Zulaufa0a98292020-09-18 09:30:10 -06003334}
3335
John Zulauf4a6105a2020-11-17 15:11:05 -07003336// Apply the tag scoped memory barrier without updating the existing barriers. The execution barrier
3337// changes the "chaining" state, but to keep barriers independent. See discussion above.
John Zulauf14940722021-04-12 15:19:02 -06003338void ResourceAccessState::ApplyBarrier(const ResourceUsageTag scope_tag, const SyncBarrier &barrier, bool layout_transition) {
John Zulauf4a6105a2020-11-17 15:11:05 -07003339 // The scope logic for events is, if we're here, the resource usage was flagged as "in the first execution scope" at
3340 // the time of the SetEvent, thus all we need check is whether the access is the same one (i.e. before the scope tag
3341 // in order to know if it's in the excecution scope
3342 // Notice that the layout transition sets the pending barriers *regardless*, as any lack of src_access_scope to
3343 // guard against the layout transition should be reported in the detect barrier hazard phase, and we only report
3344 // errors w.r.t. "most recent" accesses.
John Zulauf14940722021-04-12 15:19:02 -06003345 if (layout_transition || ((write_tag < scope_tag) && (barrier.src_access_scope & last_write).any())) {
John Zulauf4a6105a2020-11-17 15:11:05 -07003346 pending_write_barriers |= barrier.dst_access_scope;
John Zulaufc523bf62021-02-16 08:20:34 -07003347 pending_write_dep_chain |= barrier.dst_exec_scope.exec_scope;
John Zulauf4fa68462021-04-26 21:04:22 -06003348 if (layout_transition) {
3349 pending_layout_ordering_ |= OrderingBarrier(barrier.src_exec_scope.exec_scope, barrier.src_access_scope);
3350 }
John Zulauf4a6105a2020-11-17 15:11:05 -07003351 }
3352 // Track layout transistion as pending as we can't modify last_write until all barriers processed
3353 pending_layout_transition |= layout_transition;
3354
3355 if (!pending_layout_transition) {
3356 // Once we're dealing with a layout transition (which is modelled as a *write*) then the last reads/writes/chains
3357 // don't need to be tracked as we're just going to zero them.
John Zulaufab7756b2020-12-29 16:10:16 -07003358 for (auto &read_access : last_reads) {
John Zulauf4a6105a2020-11-17 15:11:05 -07003359 // If this read is the same one we included in the set event and in scope, then apply the execution barrier...
3360 // NOTE: That's not really correct... this read stage might *not* have been included in the setevent, and the barriers
3361 // representing the chain might have changed since then (that would be an odd usage), so as a first approximation
3362 // we'll assume the barriers *haven't* been changed since (if the tag hasn't), and while this could be a false
3363 // positive in the case of Set; SomeBarrier; Wait; we'll live with it until we can add more state to the first scope
3364 // capture (the specific write and read stages that *were* in scope at the moment of SetEvents.
3365 // TODO: eliminate the false positive by including write/read-stages "in scope" information in SetEvents first_scope
John Zulauf14940722021-04-12 15:19:02 -06003366 if ((read_access.tag < scope_tag) && (barrier.src_exec_scope.exec_scope & (read_access.stage | read_access.barriers))) {
John Zulaufc523bf62021-02-16 08:20:34 -07003367 read_access.pending_dep_chain |= barrier.dst_exec_scope.exec_scope;
John Zulauf4a6105a2020-11-17 15:11:05 -07003368 }
3369 }
3370 }
3371}
John Zulauf14940722021-04-12 15:19:02 -06003372void ResourceAccessState::ApplyPendingBarriers(const ResourceUsageTag tag) {
John Zulauf89311b42020-09-29 16:28:47 -06003373 if (pending_layout_transition) {
John Zulauf4fa68462021-04-26 21:04:22 -06003374 // SetWrite clobbers the last_reads array, and thus we don't have to clear the read_state out.
John Zulauf89311b42020-09-29 16:28:47 -06003375 SetWrite(SYNC_IMAGE_LAYOUT_TRANSITION_BIT, tag); // Side effect notes below
John Zulauffaea0ee2021-01-14 14:01:32 -07003376 UpdateFirst(tag, SYNC_IMAGE_LAYOUT_TRANSITION, SyncOrdering::kNonAttachment);
John Zulauf4fa68462021-04-26 21:04:22 -06003377 TouchupFirstForLayoutTransition(tag, pending_layout_ordering_);
3378 pending_layout_ordering_ = OrderingBarrier();
John Zulauf89311b42020-09-29 16:28:47 -06003379 pending_layout_transition = false;
John Zulauf9cb530d2019-09-30 14:14:10 -06003380 }
John Zulauf89311b42020-09-29 16:28:47 -06003381
3382 // Apply the accumulate execution barriers (and thus update chaining information)
John Zulauf4fa68462021-04-26 21:04:22 -06003383 // for layout transition, last_reads is reset by SetWrite, so this will be skipped.
John Zulaufab7756b2020-12-29 16:10:16 -07003384 for (auto &read_access : last_reads) {
3385 read_access.barriers |= read_access.pending_dep_chain;
3386 read_execution_barriers |= read_access.barriers;
3387 read_access.pending_dep_chain = 0;
John Zulauf89311b42020-09-29 16:28:47 -06003388 }
3389
3390 // We OR in the accumulated write chain and barriers even in the case of a layout transition as SetWrite zeros them.
3391 write_dependency_chain |= pending_write_dep_chain;
3392 write_barriers |= pending_write_barriers;
3393 pending_write_dep_chain = 0;
3394 pending_write_barriers = 0;
John Zulauf9cb530d2019-09-30 14:14:10 -06003395}
3396
John Zulaufae842002021-04-15 18:20:55 -06003397bool ResourceAccessState::FirstAccessInTagRange(const ResourceUsageRange &tag_range) const {
3398 if (!first_accesses_.size()) return false;
3399 const ResourceUsageRange first_access_range = {first_accesses_.front().tag, first_accesses_.back().tag + 1};
3400 return tag_range.intersects(first_access_range);
3401}
3402
John Zulauf59e25072020-07-17 10:55:21 -06003403// This should be just Bits or Index, but we don't have an invalid state for Index
Jeremy Gebben40a22942020-12-22 14:22:06 -07003404VkPipelineStageFlags2KHR ResourceAccessState::GetReadBarriers(const SyncStageAccessFlags &usage_bit) const {
3405 VkPipelineStageFlags2KHR barriers = 0U;
John Zulauf4285ee92020-09-23 10:20:52 -06003406
John Zulaufab7756b2020-12-29 16:10:16 -07003407 for (const auto &read_access : last_reads) {
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003408 if ((read_access.access & usage_bit).any()) {
John Zulauf4285ee92020-09-23 10:20:52 -06003409 barriers = read_access.barriers;
3410 break;
John Zulauf59e25072020-07-17 10:55:21 -06003411 }
3412 }
John Zulauf4285ee92020-09-23 10:20:52 -06003413
John Zulauf59e25072020-07-17 10:55:21 -06003414 return barriers;
3415}
3416
Jeremy Gebben40a22942020-12-22 14:22:06 -07003417inline bool ResourceAccessState::IsRAWHazard(VkPipelineStageFlags2KHR usage_stage, const SyncStageAccessFlags &usage) const {
John Zulauf4285ee92020-09-23 10:20:52 -06003418 assert(IsRead(usage));
3419 // Only RAW vs. last_write if it doesn't happen-after any other read because either:
3420 // * the previous reads are not hazards, and thus last_write must be visible and available to
3421 // any reads that happen after.
3422 // * the previous reads *are* hazards to last_write, have been reported, and if that hazard is fixed
3423 // the current read will be also not be a hazard, thus reporting a hazard here adds no needed information.
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003424 return last_write.any() && (0 == (read_execution_barriers & usage_stage)) && IsWriteHazard(usage);
John Zulauf4285ee92020-09-23 10:20:52 -06003425}
3426
Jeremy Gebben40a22942020-12-22 14:22:06 -07003427VkPipelineStageFlags2KHR ResourceAccessState::GetOrderedStages(const OrderingBarrier &ordering) const {
John Zulauf4285ee92020-09-23 10:20:52 -06003428 // Whether the stage are in the ordering scope only matters if the current write is ordered
Jeremy Gebben40a22942020-12-22 14:22:06 -07003429 VkPipelineStageFlags2KHR ordered_stages = last_read_stages & ordering.exec_scope;
John Zulauf4285ee92020-09-23 10:20:52 -06003430 // Special input attachment handling as always (not encoded in exec_scop)
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003431 const bool input_attachment_ordering = (ordering.access_scope & SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT).any();
John Zulauff51fbb62020-10-02 14:43:24 -06003432 if (input_attachment_ordering && input_attachment_read) {
John Zulauf4285ee92020-09-23 10:20:52 -06003433 // If we have an input attachment in last_reads and input attachments are ordered we all that stage
Jeremy Gebben40a22942020-12-22 14:22:06 -07003434 ordered_stages |= VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR;
John Zulauf4285ee92020-09-23 10:20:52 -06003435 }
3436
3437 return ordered_stages;
3438}
3439
John Zulauf14940722021-04-12 15:19:02 -06003440void ResourceAccessState::UpdateFirst(const ResourceUsageTag tag, SyncStageAccessIndex usage_index, SyncOrdering ordering_rule) {
John Zulauffaea0ee2021-01-14 14:01:32 -07003441 // Only record until we record a write.
3442 if (first_accesses_.empty() || IsRead(first_accesses_.back().usage_index)) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003443 const VkPipelineStageFlags2KHR usage_stage = IsRead(usage_index) ? PipelineStageBit(usage_index) : 0U;
John Zulauffaea0ee2021-01-14 14:01:32 -07003444 if (0 == (usage_stage & first_read_stages_)) {
3445 // If this is a read we haven't seen or a write, record.
John Zulauf4fa68462021-04-26 21:04:22 -06003446 // We always need to know what stages were found prior to write
John Zulauffaea0ee2021-01-14 14:01:32 -07003447 first_read_stages_ |= usage_stage;
John Zulauf4fa68462021-04-26 21:04:22 -06003448 if (0 == (read_execution_barriers & usage_stage)) {
3449 // If this stage isn't masked then we add it (since writes map to usage_stage 0, this also records writes)
3450 first_accesses_.emplace_back(tag, usage_index, ordering_rule);
3451 }
John Zulauffaea0ee2021-01-14 14:01:32 -07003452 }
3453 }
3454}
3455
John Zulauf4fa68462021-04-26 21:04:22 -06003456void ResourceAccessState::TouchupFirstForLayoutTransition(ResourceUsageTag tag, const OrderingBarrier &layout_ordering) {
3457 // Only call this after recording an image layout transition
3458 assert(first_accesses_.size());
3459 if (first_accesses_.back().tag == tag) {
3460 // If this layout transition is the the first write, add the additional ordering rules that guard the ILT
Samuel Iglesias Gonsálvez9b4660b2021-10-21 08:50:39 +02003461 assert(first_accesses_.back().usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION);
John Zulauf4fa68462021-04-26 21:04:22 -06003462 first_write_layout_ordering_ = layout_ordering;
3463 }
3464}
3465
John Zulaufee984022022-04-13 16:39:50 -06003466void ResourceAccessState::ReadState::Set(VkPipelineStageFlags2KHR stage_, const SyncStageAccessFlags &access_,
3467 VkPipelineStageFlags2KHR barriers_, ResourceUsageTag tag_) {
3468 stage = stage_;
3469 access = access_;
3470 barriers = barriers_;
3471 tag = tag_;
3472 pending_dep_chain = 0; // If this is a new read, we aren't applying a barrier set.
3473}
3474
John Zulaufea943c52022-02-22 11:05:17 -07003475std::shared_ptr<CommandBufferAccessContext> SyncValidator::AccessContextFactory(VkCommandBuffer command_buffer) {
3476 // If we don't have one, make it.
3477 auto cb_state = Get<CMD_BUFFER_STATE>(command_buffer);
3478 assert(cb_state.get());
3479 auto queue_flags = cb_state->GetQueueFlags();
3480 return std::make_shared<CommandBufferAccessContext>(*this, cb_state, queue_flags);
3481}
3482
3483inline std::shared_ptr<CommandBufferAccessContext> SyncValidator::GetAccessContextShared(VkCommandBuffer command_buffer) {
3484 return GetMappedInsert(cb_access_state, command_buffer,
3485 [this, command_buffer]() { return AccessContextFactory(command_buffer); });
3486}
3487
3488std::shared_ptr<const CommandBufferAccessContext> SyncValidator::GetAccessContextShared(VkCommandBuffer command_buffer) const {
3489 return GetMapped(cb_access_state, command_buffer, []() { return std::shared_ptr<CommandBufferAccessContext>(); });
3490}
3491
3492const CommandBufferAccessContext *SyncValidator::GetAccessContext(VkCommandBuffer command_buffer) const {
3493 return GetMappedPlainFromShared(cb_access_state, command_buffer);
3494}
3495
3496CommandBufferAccessContext *SyncValidator::GetAccessContext(VkCommandBuffer command_buffer) {
3497 return GetAccessContextShared(command_buffer).get();
3498}
3499
3500CommandBufferAccessContext *SyncValidator::GetAccessContextNoInsert(VkCommandBuffer command_buffer) {
3501 return GetMappedPlainFromShared(cb_access_state, command_buffer);
3502}
3503
John Zulaufd1f85d42020-04-15 12:23:15 -06003504void SyncValidator::ResetCommandBufferCallback(VkCommandBuffer command_buffer) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003505 auto *access_context = GetAccessContextNoInsert(command_buffer);
3506 if (access_context) {
3507 access_context->Reset();
John Zulauf9cb530d2019-09-30 14:14:10 -06003508 }
3509}
3510
John Zulaufd1f85d42020-04-15 12:23:15 -06003511void SyncValidator::FreeCommandBufferCallback(VkCommandBuffer command_buffer) {
3512 auto access_found = cb_access_state.find(command_buffer);
3513 if (access_found != cb_access_state.end()) {
3514 access_found->second->Reset();
John Zulauf4fa68462021-04-26 21:04:22 -06003515 access_found->second->MarkDestroyed();
John Zulaufd1f85d42020-04-15 12:23:15 -06003516 cb_access_state.erase(access_found);
3517 }
3518}
3519
John Zulauf9cb530d2019-09-30 14:14:10 -06003520bool SyncValidator::PreCallValidateCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
3521 uint32_t regionCount, const VkBufferCopy *pRegions) const {
3522 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -06003523 const auto *cb_context = GetAccessContext(commandBuffer);
3524 assert(cb_context);
3525 if (!cb_context) return skip;
3526 const auto *context = cb_context->GetCurrentAccessContext();
John Zulauf9cb530d2019-09-30 14:14:10 -06003527
John Zulauf3d84f1b2020-03-09 13:33:25 -06003528 // If we have no previous accesses, we have no hazards
Jeremy Gebbenf4449392022-01-28 10:09:10 -07003529 auto src_buffer = Get<BUFFER_STATE>(srcBuffer);
3530 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003531
3532 for (uint32_t region = 0; region < regionCount; region++) {
3533 const auto &copy_region = pRegions[region];
John Zulauf16adfc92020-04-08 10:28:33 -06003534 if (src_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06003535 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003536 auto hazard = context->DetectHazard(*src_buffer, SYNC_COPY_TRANSFER_READ, src_range);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003537 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003538 skip |= LogError(srcBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003539 "vkCmdCopyBuffer: Hazard %s for srcBuffer %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06003540 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcBuffer).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003541 cb_context->FormatUsage(hazard).c_str());
John Zulauf9cb530d2019-09-30 14:14:10 -06003542 }
John Zulauf9cb530d2019-09-30 14:14:10 -06003543 }
John Zulauf16adfc92020-04-08 10:28:33 -06003544 if (dst_buffer && !skip) {
John Zulauf3e86bf02020-09-12 10:47:57 -06003545 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003546 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, dst_range);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003547 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003548 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003549 "vkCmdCopyBuffer: Hazard %s for dstBuffer %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06003550 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstBuffer).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003551 cb_context->FormatUsage(hazard).c_str());
John Zulauf3d84f1b2020-03-09 13:33:25 -06003552 }
3553 }
3554 if (skip) break;
John Zulauf9cb530d2019-09-30 14:14:10 -06003555 }
3556 return skip;
3557}
3558
3559void SyncValidator::PreCallRecordCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
3560 uint32_t regionCount, const VkBufferCopy *pRegions) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003561 auto *cb_context = GetAccessContext(commandBuffer);
3562 assert(cb_context);
John Zulauf2b151bf2020-04-24 15:37:44 -06003563 const auto tag = cb_context->NextCommandTag(CMD_COPYBUFFER);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003564 auto *context = cb_context->GetCurrentAccessContext();
3565
Jeremy Gebbenf4449392022-01-28 10:09:10 -07003566 auto src_buffer = Get<BUFFER_STATE>(srcBuffer);
3567 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
John Zulauf9cb530d2019-09-30 14:14:10 -06003568
3569 for (uint32_t region = 0; region < regionCount; region++) {
3570 const auto &copy_region = pRegions[region];
John Zulauf16adfc92020-04-08 10:28:33 -06003571 if (src_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06003572 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003573 context->UpdateAccessState(*src_buffer, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment, src_range, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003574 }
John Zulauf16adfc92020-04-08 10:28:33 -06003575 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06003576 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003577 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, dst_range, tag);
John Zulauf5c5e88d2019-12-26 11:22:02 -07003578 }
3579 }
3580}
3581
John Zulauf4a6105a2020-11-17 15:11:05 -07003582void SyncValidator::PreCallRecordDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
3583 // Clear out events from the command buffer contexts
3584 for (auto &cb_context : cb_access_state) {
3585 cb_context.second->RecordDestroyEvent(event);
3586 }
3587}
3588
Tony-LunarGef035472021-11-02 10:23:33 -06003589bool SyncValidator::ValidateCmdCopyBuffer2(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2 *pCopyBufferInfos,
3590 CMD_TYPE cmd_type) const {
Jeff Leger178b1e52020-10-05 12:22:23 -04003591 bool skip = false;
3592 const auto *cb_context = GetAccessContext(commandBuffer);
3593 assert(cb_context);
3594 if (!cb_context) return skip;
3595 const auto *context = cb_context->GetCurrentAccessContext();
Tony-LunarGef035472021-11-02 10:23:33 -06003596 const char *func_name = CommandTypeString(cmd_type);
Jeff Leger178b1e52020-10-05 12:22:23 -04003597
3598 // If we have no previous accesses, we have no hazards
Jeremy Gebbenf4449392022-01-28 10:09:10 -07003599 auto src_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->srcBuffer);
3600 auto dst_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->dstBuffer);
Jeff Leger178b1e52020-10-05 12:22:23 -04003601
3602 for (uint32_t region = 0; region < pCopyBufferInfos->regionCount; region++) {
3603 const auto &copy_region = pCopyBufferInfos->pRegions[region];
3604 if (src_buffer) {
3605 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003606 auto hazard = context->DetectHazard(*src_buffer, SYNC_COPY_TRANSFER_READ, src_range);
Jeff Leger178b1e52020-10-05 12:22:23 -04003607 if (hazard.hazard) {
3608 // TODO -- add tag information to log msg when useful.
3609 skip |= LogError(pCopyBufferInfos->srcBuffer, string_SyncHazardVUID(hazard.hazard),
Tony-LunarGef035472021-11-02 10:23:33 -06003610 "%s(): Hazard %s for srcBuffer %s, region %" PRIu32 ". Access info %s.", func_name,
Jeff Leger178b1e52020-10-05 12:22:23 -04003611 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyBufferInfos->srcBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07003612 region, cb_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04003613 }
3614 }
3615 if (dst_buffer && !skip) {
3616 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003617 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, dst_range);
Jeff Leger178b1e52020-10-05 12:22:23 -04003618 if (hazard.hazard) {
3619 skip |= LogError(pCopyBufferInfos->dstBuffer, string_SyncHazardVUID(hazard.hazard),
Tony-LunarGef035472021-11-02 10:23:33 -06003620 "%s(): Hazard %s for dstBuffer %s, region %" PRIu32 ". Access info %s.", func_name,
Jeff Leger178b1e52020-10-05 12:22:23 -04003621 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyBufferInfos->dstBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07003622 region, cb_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04003623 }
3624 }
3625 if (skip) break;
3626 }
3627 return skip;
3628}
3629
Tony-LunarGef035472021-11-02 10:23:33 -06003630bool SyncValidator::PreCallValidateCmdCopyBuffer2KHR(VkCommandBuffer commandBuffer,
3631 const VkCopyBufferInfo2KHR *pCopyBufferInfos) const {
3632 return ValidateCmdCopyBuffer2(commandBuffer, pCopyBufferInfos, CMD_COPYBUFFER2KHR);
3633}
3634
3635bool SyncValidator::PreCallValidateCmdCopyBuffer2(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2 *pCopyBufferInfos) const {
3636 return ValidateCmdCopyBuffer2(commandBuffer, pCopyBufferInfos, CMD_COPYBUFFER2);
3637}
3638
3639void SyncValidator::RecordCmdCopyBuffer2(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2KHR *pCopyBufferInfos, CMD_TYPE cmd_type) {
Jeff Leger178b1e52020-10-05 12:22:23 -04003640 auto *cb_context = GetAccessContext(commandBuffer);
3641 assert(cb_context);
Tony-LunarGef035472021-11-02 10:23:33 -06003642 const auto tag = cb_context->NextCommandTag(cmd_type);
Jeff Leger178b1e52020-10-05 12:22:23 -04003643 auto *context = cb_context->GetCurrentAccessContext();
3644
Jeremy Gebbenf4449392022-01-28 10:09:10 -07003645 auto src_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->srcBuffer);
3646 auto dst_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->dstBuffer);
Jeff Leger178b1e52020-10-05 12:22:23 -04003647
3648 for (uint32_t region = 0; region < pCopyBufferInfos->regionCount; region++) {
3649 const auto &copy_region = pCopyBufferInfos->pRegions[region];
3650 if (src_buffer) {
3651 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003652 context->UpdateAccessState(*src_buffer, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment, src_range, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04003653 }
3654 if (dst_buffer) {
3655 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003656 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, dst_range, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04003657 }
3658 }
3659}
3660
Tony-LunarGef035472021-11-02 10:23:33 -06003661void SyncValidator::PreCallRecordCmdCopyBuffer2KHR(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2KHR *pCopyBufferInfos) {
3662 RecordCmdCopyBuffer2(commandBuffer, pCopyBufferInfos, CMD_COPYBUFFER2KHR);
3663}
3664
3665void SyncValidator::PreCallRecordCmdCopyBuffer2(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2 *pCopyBufferInfos) {
3666 RecordCmdCopyBuffer2(commandBuffer, pCopyBufferInfos, CMD_COPYBUFFER2);
3667}
3668
John Zulauf5c5e88d2019-12-26 11:22:02 -07003669bool SyncValidator::PreCallValidateCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
3670 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
3671 const VkImageCopy *pRegions) const {
3672 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -06003673 const auto *cb_access_context = GetAccessContext(commandBuffer);
3674 assert(cb_access_context);
3675 if (!cb_access_context) return skip;
John Zulauf5c5e88d2019-12-26 11:22:02 -07003676
John Zulauf3d84f1b2020-03-09 13:33:25 -06003677 const auto *context = cb_access_context->GetCurrentAccessContext();
3678 assert(context);
3679 if (!context) return skip;
3680
Jeremy Gebbenf4449392022-01-28 10:09:10 -07003681 auto src_image = Get<IMAGE_STATE>(srcImage);
3682 auto dst_image = Get<IMAGE_STATE>(dstImage);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003683 for (uint32_t region = 0; region < regionCount; region++) {
3684 const auto &copy_region = pRegions[region];
3685 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003686 auto hazard = context->DetectHazard(*src_image, SYNC_COPY_TRANSFER_READ, copy_region.srcSubresource,
John Zulauf3d84f1b2020-03-09 13:33:25 -06003687 copy_region.srcOffset, copy_region.extent);
3688 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003689 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003690 "vkCmdCopyImage: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06003691 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003692 cb_access_context->FormatUsage(hazard).c_str());
John Zulauf5c5e88d2019-12-26 11:22:02 -07003693 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06003694 }
3695
3696 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003697 auto hazard = context->DetectHazard(*dst_image, SYNC_COPY_TRANSFER_WRITE, copy_region.dstSubresource,
ziga-lunarg73746512022-03-23 23:08:17 +01003698 copy_region.dstOffset, copy_region.extent);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003699 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003700 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003701 "vkCmdCopyImage: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06003702 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003703 cb_access_context->FormatUsage(hazard).c_str());
John Zulauf5c5e88d2019-12-26 11:22:02 -07003704 }
locke-lunarg1dbbb9e2020-02-28 22:43:53 -07003705 if (skip) break;
John Zulauf5c5e88d2019-12-26 11:22:02 -07003706 }
3707 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06003708
John Zulauf5c5e88d2019-12-26 11:22:02 -07003709 return skip;
3710}
3711
3712void SyncValidator::PreCallRecordCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
3713 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
3714 const VkImageCopy *pRegions) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003715 auto *cb_access_context = GetAccessContext(commandBuffer);
3716 assert(cb_access_context);
John Zulauf2b151bf2020-04-24 15:37:44 -06003717 const auto tag = cb_access_context->NextCommandTag(CMD_COPYIMAGE);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003718 auto *context = cb_access_context->GetCurrentAccessContext();
3719 assert(context);
3720
Jeremy Gebben9f537102021-10-05 16:37:12 -06003721 auto src_image = Get<IMAGE_STATE>(srcImage);
3722 auto dst_image = Get<IMAGE_STATE>(dstImage);
John Zulauf5c5e88d2019-12-26 11:22:02 -07003723
3724 for (uint32_t region = 0; region < regionCount; region++) {
3725 const auto &copy_region = pRegions[region];
John Zulauf3d84f1b2020-03-09 13:33:25 -06003726 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003727 context->UpdateAccessState(*src_image, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07003728 copy_region.srcSubresource, copy_region.srcOffset, copy_region.extent, tag);
John Zulauf5c5e88d2019-12-26 11:22:02 -07003729 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06003730 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003731 context->UpdateAccessState(*dst_image, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
ziga-lunarg73746512022-03-23 23:08:17 +01003732 copy_region.dstSubresource, copy_region.dstOffset, copy_region.extent, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003733 }
3734 }
3735}
3736
Tony-LunarGb61514a2021-11-02 12:36:51 -06003737bool SyncValidator::ValidateCmdCopyImage2(VkCommandBuffer commandBuffer, const VkCopyImageInfo2 *pCopyImageInfo,
3738 CMD_TYPE cmd_type) const {
Jeff Leger178b1e52020-10-05 12:22:23 -04003739 bool skip = false;
3740 const auto *cb_access_context = GetAccessContext(commandBuffer);
3741 assert(cb_access_context);
3742 if (!cb_access_context) return skip;
3743
3744 const auto *context = cb_access_context->GetCurrentAccessContext();
3745 assert(context);
3746 if (!context) return skip;
3747
Tony-LunarGb61514a2021-11-02 12:36:51 -06003748 const char *func_name = CommandTypeString(cmd_type);
Jeremy Gebbenf4449392022-01-28 10:09:10 -07003749 auto src_image = Get<IMAGE_STATE>(pCopyImageInfo->srcImage);
3750 auto dst_image = Get<IMAGE_STATE>(pCopyImageInfo->dstImage);
Tony-LunarGb61514a2021-11-02 12:36:51 -06003751
Jeff Leger178b1e52020-10-05 12:22:23 -04003752 for (uint32_t region = 0; region < pCopyImageInfo->regionCount; region++) {
3753 const auto &copy_region = pCopyImageInfo->pRegions[region];
3754 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003755 auto hazard = context->DetectHazard(*src_image, SYNC_COPY_TRANSFER_READ, copy_region.srcSubresource,
Jeff Leger178b1e52020-10-05 12:22:23 -04003756 copy_region.srcOffset, copy_region.extent);
3757 if (hazard.hazard) {
3758 skip |= LogError(pCopyImageInfo->srcImage, string_SyncHazardVUID(hazard.hazard),
sfricke-samsung71f04e32022-03-16 01:21:21 -05003759 "%s: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.", func_name,
Jeff Leger178b1e52020-10-05 12:22:23 -04003760 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyImageInfo->srcImage).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07003761 region, cb_access_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04003762 }
3763 }
3764
3765 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003766 auto hazard = context->DetectHazard(*dst_image, SYNC_COPY_TRANSFER_WRITE, copy_region.dstSubresource,
ziga-lunarg73746512022-03-23 23:08:17 +01003767 copy_region.dstOffset, copy_region.extent);
Jeff Leger178b1e52020-10-05 12:22:23 -04003768 if (hazard.hazard) {
3769 skip |= LogError(pCopyImageInfo->dstImage, string_SyncHazardVUID(hazard.hazard),
sfricke-samsung71f04e32022-03-16 01:21:21 -05003770 "%s: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.", func_name,
Jeff Leger178b1e52020-10-05 12:22:23 -04003771 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyImageInfo->dstImage).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07003772 region, cb_access_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04003773 }
3774 if (skip) break;
3775 }
3776 }
3777
3778 return skip;
3779}
3780
Tony-LunarGb61514a2021-11-02 12:36:51 -06003781bool SyncValidator::PreCallValidateCmdCopyImage2KHR(VkCommandBuffer commandBuffer,
3782 const VkCopyImageInfo2KHR *pCopyImageInfo) const {
3783 return ValidateCmdCopyImage2(commandBuffer, pCopyImageInfo, CMD_COPYIMAGE2KHR);
3784}
3785
3786bool SyncValidator::PreCallValidateCmdCopyImage2(VkCommandBuffer commandBuffer, const VkCopyImageInfo2 *pCopyImageInfo) const {
3787 return ValidateCmdCopyImage2(commandBuffer, pCopyImageInfo, CMD_COPYIMAGE2);
3788}
3789
3790void SyncValidator::RecordCmdCopyImage2(VkCommandBuffer commandBuffer, const VkCopyImageInfo2KHR *pCopyImageInfo, CMD_TYPE cmd_type) {
Jeff Leger178b1e52020-10-05 12:22:23 -04003791 auto *cb_access_context = GetAccessContext(commandBuffer);
3792 assert(cb_access_context);
Tony-LunarGb61514a2021-11-02 12:36:51 -06003793 const auto tag = cb_access_context->NextCommandTag(cmd_type);
Jeff Leger178b1e52020-10-05 12:22:23 -04003794 auto *context = cb_access_context->GetCurrentAccessContext();
3795 assert(context);
3796
Jeremy Gebben9f537102021-10-05 16:37:12 -06003797 auto src_image = Get<IMAGE_STATE>(pCopyImageInfo->srcImage);
3798 auto dst_image = Get<IMAGE_STATE>(pCopyImageInfo->dstImage);
Jeff Leger178b1e52020-10-05 12:22:23 -04003799
3800 for (uint32_t region = 0; region < pCopyImageInfo->regionCount; region++) {
3801 const auto &copy_region = pCopyImageInfo->pRegions[region];
3802 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003803 context->UpdateAccessState(*src_image, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07003804 copy_region.srcSubresource, copy_region.srcOffset, copy_region.extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04003805 }
3806 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003807 context->UpdateAccessState(*dst_image, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
ziga-lunarg73746512022-03-23 23:08:17 +01003808 copy_region.dstSubresource, copy_region.dstOffset, copy_region.extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04003809 }
3810 }
3811}
3812
Tony-LunarGb61514a2021-11-02 12:36:51 -06003813void SyncValidator::PreCallRecordCmdCopyImage2KHR(VkCommandBuffer commandBuffer, const VkCopyImageInfo2KHR *pCopyImageInfo) {
3814 RecordCmdCopyImage2(commandBuffer, pCopyImageInfo, CMD_COPYIMAGE2KHR);
3815}
3816
3817void SyncValidator::PreCallRecordCmdCopyImage2(VkCommandBuffer commandBuffer, const VkCopyImageInfo2 *pCopyImageInfo) {
3818 RecordCmdCopyImage2(commandBuffer, pCopyImageInfo, CMD_COPYIMAGE2);
3819}
3820
John Zulauf9cb530d2019-09-30 14:14:10 -06003821bool SyncValidator::PreCallValidateCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
3822 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
3823 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
3824 uint32_t bufferMemoryBarrierCount,
3825 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
3826 uint32_t imageMemoryBarrierCount,
3827 const VkImageMemoryBarrier *pImageMemoryBarriers) const {
3828 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -06003829 const auto *cb_access_context = GetAccessContext(commandBuffer);
3830 assert(cb_access_context);
3831 if (!cb_access_context) return skip;
John Zulauf0cb5be22020-01-23 12:18:22 -07003832
John Zulauf36ef9282021-02-02 11:47:24 -07003833 SyncOpPipelineBarrier pipeline_barrier(CMD_PIPELINEBARRIER, *this, cb_access_context->GetQueueFlags(), srcStageMask,
3834 dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers,
3835 bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
3836 pImageMemoryBarriers);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07003837 skip = pipeline_barrier.Validate(*cb_access_context);
John Zulauf9cb530d2019-09-30 14:14:10 -06003838 return skip;
3839}
3840
3841void SyncValidator::PreCallRecordCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
3842 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
3843 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
3844 uint32_t bufferMemoryBarrierCount,
3845 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
3846 uint32_t imageMemoryBarrierCount,
3847 const VkImageMemoryBarrier *pImageMemoryBarriers) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003848 auto *cb_access_context = GetAccessContext(commandBuffer);
3849 assert(cb_access_context);
3850 if (!cb_access_context) return;
John Zulauf9cb530d2019-09-30 14:14:10 -06003851
John Zulauf1bf30522021-09-03 15:39:06 -06003852 cb_access_context->RecordSyncOp<SyncOpPipelineBarrier>(CMD_PIPELINEBARRIER, *this, cb_access_context->GetQueueFlags(),
3853 srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount,
3854 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
3855 imageMemoryBarrierCount, pImageMemoryBarriers);
John Zulauf9cb530d2019-09-30 14:14:10 -06003856}
3857
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07003858bool SyncValidator::PreCallValidateCmdPipelineBarrier2KHR(VkCommandBuffer commandBuffer,
3859 const VkDependencyInfoKHR *pDependencyInfo) const {
3860 bool skip = false;
3861 const auto *cb_access_context = GetAccessContext(commandBuffer);
3862 assert(cb_access_context);
3863 if (!cb_access_context) return skip;
3864
3865 SyncOpPipelineBarrier pipeline_barrier(CMD_PIPELINEBARRIER2KHR, *this, cb_access_context->GetQueueFlags(), *pDependencyInfo);
3866 skip = pipeline_barrier.Validate(*cb_access_context);
3867 return skip;
3868}
3869
Tony-LunarG3f6eceb2021-11-18 14:34:49 -07003870bool SyncValidator::PreCallValidateCmdPipelineBarrier2(VkCommandBuffer commandBuffer,
3871 const VkDependencyInfo *pDependencyInfo) const {
3872 bool skip = false;
3873 const auto *cb_access_context = GetAccessContext(commandBuffer);
3874 assert(cb_access_context);
3875 if (!cb_access_context) return skip;
3876
3877 SyncOpPipelineBarrier pipeline_barrier(CMD_PIPELINEBARRIER2, *this, cb_access_context->GetQueueFlags(), *pDependencyInfo);
3878 skip = pipeline_barrier.Validate(*cb_access_context);
3879 return skip;
3880}
3881
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07003882void SyncValidator::PreCallRecordCmdPipelineBarrier2KHR(VkCommandBuffer commandBuffer, const VkDependencyInfoKHR *pDependencyInfo) {
3883 auto *cb_access_context = GetAccessContext(commandBuffer);
3884 assert(cb_access_context);
3885 if (!cb_access_context) return;
3886
John Zulauf1bf30522021-09-03 15:39:06 -06003887 cb_access_context->RecordSyncOp<SyncOpPipelineBarrier>(CMD_PIPELINEBARRIER2KHR, *this, cb_access_context->GetQueueFlags(),
3888 *pDependencyInfo);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07003889}
3890
Tony-LunarG3f6eceb2021-11-18 14:34:49 -07003891void SyncValidator::PreCallRecordCmdPipelineBarrier2(VkCommandBuffer commandBuffer, const VkDependencyInfo *pDependencyInfo) {
3892 auto *cb_access_context = GetAccessContext(commandBuffer);
3893 assert(cb_access_context);
3894 if (!cb_access_context) return;
3895
3896 cb_access_context->RecordSyncOp<SyncOpPipelineBarrier>(CMD_PIPELINEBARRIER2, *this, cb_access_context->GetQueueFlags(),
3897 *pDependencyInfo);
3898}
3899
Jeremy Gebben36a3b832022-03-23 10:54:18 -06003900void SyncValidator::CreateDevice(const VkDeviceCreateInfo *pCreateInfo) {
John Zulauf9cb530d2019-09-30 14:14:10 -06003901 // The state tracker sets up the device state
Jeremy Gebben36a3b832022-03-23 10:54:18 -06003902 StateTracker::CreateDevice(pCreateInfo);
John Zulauf9cb530d2019-09-30 14:14:10 -06003903
John Zulauf5f13a792020-03-10 07:31:21 -06003904 // Add the callback hooks for the functions that are either broadly or deeply used and that the ValidationStateTracker
3905 // refactor would be messier without.
John Zulauf9cb530d2019-09-30 14:14:10 -06003906 // TODO: Find a good way to do this hooklessly.
Jeremy Gebben36a3b832022-03-23 10:54:18 -06003907 SetCommandBufferResetCallback([this](VkCommandBuffer command_buffer) -> void { ResetCommandBufferCallback(command_buffer); });
3908 SetCommandBufferFreeCallback([this](VkCommandBuffer command_buffer) -> void { FreeCommandBufferCallback(command_buffer); });
John Zulauf9cb530d2019-09-30 14:14:10 -06003909}
John Zulauf3d84f1b2020-03-09 13:33:25 -06003910
John Zulauf355e49b2020-04-24 15:11:15 -06003911bool SyncValidator::ValidateBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
sfricke-samsung85584a72021-09-30 21:43:38 -07003912 const VkSubpassBeginInfo *pSubpassBeginInfo, CMD_TYPE cmd) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003913 bool skip = false;
John Zulauf355e49b2020-04-24 15:11:15 -06003914 auto cb_context = GetAccessContext(commandBuffer);
John Zulauf64ffe552021-02-06 10:25:07 -07003915 if (cb_context) {
sfricke-samsung85584a72021-09-30 21:43:38 -07003916 SyncOpBeginRenderPass sync_op(cmd, *this, pRenderPassBegin, pSubpassBeginInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07003917 skip = sync_op.Validate(*cb_context);
John Zulauf355e49b2020-04-24 15:11:15 -06003918 }
John Zulauf355e49b2020-04-24 15:11:15 -06003919 return skip;
3920}
3921
3922bool SyncValidator::PreCallValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
3923 VkSubpassContents contents) const {
3924 bool skip = StateTracker::PreCallValidateCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07003925 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf355e49b2020-04-24 15:11:15 -06003926 subpass_begin_info.contents = contents;
John Zulauf64ffe552021-02-06 10:25:07 -07003927 skip |= ValidateBeginRenderPass(commandBuffer, pRenderPassBegin, &subpass_begin_info, CMD_BEGINRENDERPASS);
John Zulauf355e49b2020-04-24 15:11:15 -06003928 return skip;
3929}
3930
3931bool SyncValidator::PreCallValidateCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
Mike Schuchardt2df08912020-12-15 16:28:09 -08003932 const VkSubpassBeginInfo *pSubpassBeginInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003933 bool skip = StateTracker::PreCallValidateCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07003934 skip |= ValidateBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2);
John Zulauf355e49b2020-04-24 15:11:15 -06003935 return skip;
3936}
3937
3938bool SyncValidator::PreCallValidateCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
3939 const VkRenderPassBeginInfo *pRenderPassBegin,
Mike Schuchardt2df08912020-12-15 16:28:09 -08003940 const VkSubpassBeginInfo *pSubpassBeginInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003941 bool skip = StateTracker::PreCallValidateCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
sfricke-samsung85584a72021-09-30 21:43:38 -07003942 skip |= ValidateBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2KHR);
John Zulauf355e49b2020-04-24 15:11:15 -06003943 return skip;
3944}
3945
John Zulauf3d84f1b2020-03-09 13:33:25 -06003946void SyncValidator::PostCallRecordBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo,
3947 VkResult result) {
3948 // The state tracker sets up the command buffer state
3949 StateTracker::PostCallRecordBeginCommandBuffer(commandBuffer, pBeginInfo, result);
3950
3951 // Create/initialize the structure that trackers accesses at the command buffer scope.
3952 auto cb_access_context = GetAccessContext(commandBuffer);
3953 assert(cb_access_context);
3954 cb_access_context->Reset();
3955}
3956
3957void SyncValidator::RecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
sfricke-samsung85584a72021-09-30 21:43:38 -07003958 const VkSubpassBeginInfo *pSubpassBeginInfo, CMD_TYPE cmd) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003959 auto cb_context = GetAccessContext(commandBuffer);
John Zulauf355e49b2020-04-24 15:11:15 -06003960 if (cb_context) {
John Zulaufbb890452021-12-14 11:30:18 -07003961 cb_context->RecordSyncOp<SyncOpBeginRenderPass>(cmd, *this, pRenderPassBegin, pSubpassBeginInfo);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003962 }
3963}
3964
3965void SyncValidator::PostCallRecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
3966 VkSubpassContents contents) {
3967 StateTracker::PostCallRecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07003968 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf3d84f1b2020-03-09 13:33:25 -06003969 subpass_begin_info.contents = contents;
John Zulauf355e49b2020-04-24 15:11:15 -06003970 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, &subpass_begin_info, CMD_BEGINRENDERPASS);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003971}
3972
3973void SyncValidator::PostCallRecordCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
3974 const VkSubpassBeginInfo *pSubpassBeginInfo) {
3975 StateTracker::PostCallRecordCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
John Zulauf355e49b2020-04-24 15:11:15 -06003976 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003977}
3978
3979void SyncValidator::PostCallRecordCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
3980 const VkRenderPassBeginInfo *pRenderPassBegin,
3981 const VkSubpassBeginInfo *pSubpassBeginInfo) {
3982 StateTracker::PostCallRecordCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
sfricke-samsung85584a72021-09-30 21:43:38 -07003983 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2KHR);
John Zulauf355e49b2020-04-24 15:11:15 -06003984}
3985
Mike Schuchardt2df08912020-12-15 16:28:09 -08003986bool SyncValidator::ValidateCmdNextSubpass(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
sfricke-samsung85584a72021-09-30 21:43:38 -07003987 const VkSubpassEndInfo *pSubpassEndInfo, CMD_TYPE cmd) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003988 bool skip = false;
3989
3990 auto cb_context = GetAccessContext(commandBuffer);
3991 assert(cb_context);
John Zulauf64ffe552021-02-06 10:25:07 -07003992 if (!cb_context) return skip;
sfricke-samsung85584a72021-09-30 21:43:38 -07003993 SyncOpNextSubpass sync_op(cmd, *this, pSubpassBeginInfo, pSubpassEndInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07003994 return sync_op.Validate(*cb_context);
John Zulauf355e49b2020-04-24 15:11:15 -06003995}
3996
3997bool SyncValidator::PreCallValidateCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) const {
3998 bool skip = StateTracker::PreCallValidateCmdNextSubpass(commandBuffer, contents);
John Zulauf64ffe552021-02-06 10:25:07 -07003999 // Convert to a NextSubpass2
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07004000 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf355e49b2020-04-24 15:11:15 -06004001 subpass_begin_info.contents = contents;
John Zulauf64ffe552021-02-06 10:25:07 -07004002 auto subpass_end_info = LvlInitStruct<VkSubpassEndInfo>();
4003 skip |= ValidateCmdNextSubpass(commandBuffer, &subpass_begin_info, &subpass_end_info, CMD_NEXTSUBPASS);
John Zulauf355e49b2020-04-24 15:11:15 -06004004 return skip;
4005}
4006
Mike Schuchardt2df08912020-12-15 16:28:09 -08004007bool SyncValidator::PreCallValidateCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
4008 const VkSubpassEndInfo *pSubpassEndInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06004009 bool skip = StateTracker::PreCallValidateCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
sfricke-samsung85584a72021-09-30 21:43:38 -07004010 skip |= ValidateCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2KHR);
John Zulauf355e49b2020-04-24 15:11:15 -06004011 return skip;
4012}
4013
4014bool SyncValidator::PreCallValidateCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
4015 const VkSubpassEndInfo *pSubpassEndInfo) const {
4016 bool skip = StateTracker::PreCallValidateCmdNextSubpass2(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07004017 skip |= ValidateCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2);
John Zulauf355e49b2020-04-24 15:11:15 -06004018 return skip;
John Zulauf3d84f1b2020-03-09 13:33:25 -06004019}
4020
4021void SyncValidator::RecordCmdNextSubpass(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
sfricke-samsung85584a72021-09-30 21:43:38 -07004022 const VkSubpassEndInfo *pSubpassEndInfo, CMD_TYPE cmd) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06004023 auto cb_context = GetAccessContext(commandBuffer);
4024 assert(cb_context);
John Zulauf64ffe552021-02-06 10:25:07 -07004025 if (!cb_context) return;
John Zulauf3d84f1b2020-03-09 13:33:25 -06004026
John Zulaufbb890452021-12-14 11:30:18 -07004027 cb_context->RecordSyncOp<SyncOpNextSubpass>(cmd, *this, pSubpassBeginInfo, pSubpassEndInfo);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004028}
4029
4030void SyncValidator::PostCallRecordCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
4031 StateTracker::PostCallRecordCmdNextSubpass(commandBuffer, contents);
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07004032 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf3d84f1b2020-03-09 13:33:25 -06004033 subpass_begin_info.contents = contents;
John Zulauf355e49b2020-04-24 15:11:15 -06004034 RecordCmdNextSubpass(commandBuffer, &subpass_begin_info, nullptr, CMD_NEXTSUBPASS);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004035}
4036
4037void SyncValidator::PostCallRecordCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
4038 const VkSubpassEndInfo *pSubpassEndInfo) {
4039 StateTracker::PostCallRecordCmdNextSubpass2(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
John Zulauf355e49b2020-04-24 15:11:15 -06004040 RecordCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004041}
4042
4043void SyncValidator::PostCallRecordCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
4044 const VkSubpassEndInfo *pSubpassEndInfo) {
4045 StateTracker::PostCallRecordCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
sfricke-samsung85584a72021-09-30 21:43:38 -07004046 RecordCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2KHR);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004047}
4048
sfricke-samsung85584a72021-09-30 21:43:38 -07004049bool SyncValidator::ValidateCmdEndRenderPass(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo,
4050 CMD_TYPE cmd) const {
John Zulauf355e49b2020-04-24 15:11:15 -06004051 bool skip = false;
4052
4053 auto cb_context = GetAccessContext(commandBuffer);
4054 assert(cb_context);
John Zulauf64ffe552021-02-06 10:25:07 -07004055 if (!cb_context) return skip;
John Zulauf355e49b2020-04-24 15:11:15 -06004056
sfricke-samsung85584a72021-09-30 21:43:38 -07004057 SyncOpEndRenderPass sync_op(cmd, *this, pSubpassEndInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07004058 skip |= sync_op.Validate(*cb_context);
John Zulauf355e49b2020-04-24 15:11:15 -06004059 return skip;
4060}
4061
4062bool SyncValidator::PreCallValidateCmdEndRenderPass(VkCommandBuffer commandBuffer) const {
4063 bool skip = StateTracker::PreCallValidateCmdEndRenderPass(commandBuffer);
John Zulauf64ffe552021-02-06 10:25:07 -07004064 skip |= ValidateCmdEndRenderPass(commandBuffer, nullptr, CMD_ENDRENDERPASS);
John Zulauf355e49b2020-04-24 15:11:15 -06004065 return skip;
4066}
4067
Mike Schuchardt2df08912020-12-15 16:28:09 -08004068bool SyncValidator::PreCallValidateCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06004069 bool skip = StateTracker::PreCallValidateCmdEndRenderPass2(commandBuffer, pSubpassEndInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07004070 skip |= ValidateCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2);
John Zulauf355e49b2020-04-24 15:11:15 -06004071 return skip;
4072}
4073
4074bool SyncValidator::PreCallValidateCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer,
Mike Schuchardt2df08912020-12-15 16:28:09 -08004075 const VkSubpassEndInfo *pSubpassEndInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06004076 bool skip = StateTracker::PreCallValidateCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo);
sfricke-samsung85584a72021-09-30 21:43:38 -07004077 skip |= ValidateCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2KHR);
John Zulauf355e49b2020-04-24 15:11:15 -06004078 return skip;
4079}
4080
sfricke-samsung85584a72021-09-30 21:43:38 -07004081void SyncValidator::RecordCmdEndRenderPass(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo, CMD_TYPE cmd) {
John Zulaufe5da6e52020-03-18 15:32:18 -06004082 // Resolve the all subpass contexts to the command buffer contexts
4083 auto cb_context = GetAccessContext(commandBuffer);
4084 assert(cb_context);
John Zulauf64ffe552021-02-06 10:25:07 -07004085 if (!cb_context) return;
John Zulaufe5da6e52020-03-18 15:32:18 -06004086
John Zulaufbb890452021-12-14 11:30:18 -07004087 cb_context->RecordSyncOp<SyncOpEndRenderPass>(cmd, *this, pSubpassEndInfo);
John Zulaufe5da6e52020-03-18 15:32:18 -06004088}
John Zulauf3d84f1b2020-03-09 13:33:25 -06004089
John Zulauf33fc1d52020-07-17 11:01:10 -06004090// Simple heuristic rule to detect WAW operations representing algorithmically safe or increment
4091// updates to a resource which do not conflict at the byte level.
4092// TODO: Revisit this rule to see if it needs to be tighter or looser
4093// TODO: Add programatic control over suppression heuristics
4094bool SyncValidator::SupressedBoundDescriptorWAW(const HazardResult &hazard) const {
4095 return (hazard.hazard == WRITE_AFTER_WRITE) && (FlagBit(hazard.usage_index) == hazard.prior_access);
4096}
4097
John Zulauf3d84f1b2020-03-09 13:33:25 -06004098void SyncValidator::PostCallRecordCmdEndRenderPass(VkCommandBuffer commandBuffer) {
John Zulauf355e49b2020-04-24 15:11:15 -06004099 RecordCmdEndRenderPass(commandBuffer, nullptr, CMD_ENDRENDERPASS);
John Zulauf5a1a5382020-06-22 17:23:25 -06004100 StateTracker::PostCallRecordCmdEndRenderPass(commandBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004101}
4102
4103void SyncValidator::PostCallRecordCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
John Zulauf355e49b2020-04-24 15:11:15 -06004104 RecordCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2);
John Zulauf5a1a5382020-06-22 17:23:25 -06004105 StateTracker::PostCallRecordCmdEndRenderPass2(commandBuffer, pSubpassEndInfo);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004106}
4107
4108void SyncValidator::PostCallRecordCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
sfricke-samsung85584a72021-09-30 21:43:38 -07004109 RecordCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2KHR);
John Zulauf5a1a5382020-06-22 17:23:25 -06004110 StateTracker::PostCallRecordCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004111}
locke-lunarga19c71d2020-03-02 18:17:04 -07004112
sfricke-samsung71f04e32022-03-16 01:21:21 -05004113template <typename RegionType>
Jeff Leger178b1e52020-10-05 12:22:23 -04004114bool SyncValidator::ValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
sfricke-samsung71f04e32022-03-16 01:21:21 -05004115 VkImageLayout dstImageLayout, uint32_t regionCount, const RegionType *pRegions,
4116 CMD_TYPE cmd_type) const {
locke-lunarga19c71d2020-03-02 18:17:04 -07004117 bool skip = false;
4118 const auto *cb_access_context = GetAccessContext(commandBuffer);
4119 assert(cb_access_context);
4120 if (!cb_access_context) return skip;
4121
Tony Barbour845d29b2021-11-09 11:43:14 -07004122 const char *func_name = CommandTypeString(cmd_type);
Jeff Leger178b1e52020-10-05 12:22:23 -04004123
locke-lunarga19c71d2020-03-02 18:17:04 -07004124 const auto *context = cb_access_context->GetCurrentAccessContext();
4125 assert(context);
4126 if (!context) return skip;
4127
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004128 auto src_buffer = Get<BUFFER_STATE>(srcBuffer);
4129 auto dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07004130
4131 for (uint32_t region = 0; region < regionCount; region++) {
4132 const auto &copy_region = pRegions[region];
John Zulauf477700e2021-01-06 11:41:49 -07004133 HazardResult hazard;
locke-lunarga19c71d2020-03-02 18:17:04 -07004134 if (dst_image) {
John Zulauf477700e2021-01-06 11:41:49 -07004135 if (src_buffer) {
4136 ResourceAccessRange src_range =
4137 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, dst_image->createInfo.format));
Jeremy Gebben40a22942020-12-22 14:22:06 -07004138 hazard = context->DetectHazard(*src_buffer, SYNC_COPY_TRANSFER_READ, src_range);
John Zulauf477700e2021-01-06 11:41:49 -07004139 if (hazard.hazard) {
4140 // PHASE1 TODO -- add tag information to log msg when useful.
4141 skip |= LogError(srcBuffer, string_SyncHazardVUID(hazard.hazard),
4142 "%s: Hazard %s for srcBuffer %s, region %" PRIu32 ". Access info %s.", func_name,
4143 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcBuffer).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07004144 cb_access_context->FormatUsage(hazard).c_str());
John Zulauf477700e2021-01-06 11:41:49 -07004145 }
4146 }
4147
Jeremy Gebben40a22942020-12-22 14:22:06 -07004148 hazard = context->DetectHazard(*dst_image, SYNC_COPY_TRANSFER_WRITE, copy_region.imageSubresource,
John Zulauf477700e2021-01-06 11:41:49 -07004149 copy_region.imageOffset, copy_region.imageExtent);
locke-lunarga19c71d2020-03-02 18:17:04 -07004150 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06004151 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
Jeff Leger178b1e52020-10-05 12:22:23 -04004152 "%s: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.", func_name,
John Zulauf1dae9192020-06-16 15:46:44 -06004153 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07004154 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07004155 }
4156 if (skip) break;
4157 }
4158 if (skip) break;
4159 }
4160 return skip;
4161}
4162
Jeff Leger178b1e52020-10-05 12:22:23 -04004163bool SyncValidator::PreCallValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
4164 VkImageLayout dstImageLayout, uint32_t regionCount,
4165 const VkBufferImageCopy *pRegions) const {
4166 return ValidateCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions,
Tony Barbour845d29b2021-11-09 11:43:14 -07004167 CMD_COPYBUFFERTOIMAGE);
Jeff Leger178b1e52020-10-05 12:22:23 -04004168}
4169
4170bool SyncValidator::PreCallValidateCmdCopyBufferToImage2KHR(VkCommandBuffer commandBuffer,
4171 const VkCopyBufferToImageInfo2KHR *pCopyBufferToImageInfo) const {
4172 return ValidateCmdCopyBufferToImage(commandBuffer, pCopyBufferToImageInfo->srcBuffer, pCopyBufferToImageInfo->dstImage,
4173 pCopyBufferToImageInfo->dstImageLayout, pCopyBufferToImageInfo->regionCount,
Tony Barbour845d29b2021-11-09 11:43:14 -07004174 pCopyBufferToImageInfo->pRegions, CMD_COPYBUFFERTOIMAGE2KHR);
4175}
4176
4177bool SyncValidator::PreCallValidateCmdCopyBufferToImage2(VkCommandBuffer commandBuffer,
4178 const VkCopyBufferToImageInfo2 *pCopyBufferToImageInfo) const {
4179 return ValidateCmdCopyBufferToImage(commandBuffer, pCopyBufferToImageInfo->srcBuffer, pCopyBufferToImageInfo->dstImage,
4180 pCopyBufferToImageInfo->dstImageLayout, pCopyBufferToImageInfo->regionCount,
4181 pCopyBufferToImageInfo->pRegions, CMD_COPYBUFFERTOIMAGE2);
Jeff Leger178b1e52020-10-05 12:22:23 -04004182}
4183
sfricke-samsung71f04e32022-03-16 01:21:21 -05004184template <typename RegionType>
Jeff Leger178b1e52020-10-05 12:22:23 -04004185void SyncValidator::RecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
sfricke-samsung71f04e32022-03-16 01:21:21 -05004186 VkImageLayout dstImageLayout, uint32_t regionCount, const RegionType *pRegions,
4187 CMD_TYPE cmd_type) {
locke-lunarga19c71d2020-03-02 18:17:04 -07004188 auto *cb_access_context = GetAccessContext(commandBuffer);
4189 assert(cb_access_context);
Jeff Leger178b1e52020-10-05 12:22:23 -04004190
Jeff Leger178b1e52020-10-05 12:22:23 -04004191 const auto tag = cb_access_context->NextCommandTag(cmd_type);
locke-lunarga19c71d2020-03-02 18:17:04 -07004192 auto *context = cb_access_context->GetCurrentAccessContext();
4193 assert(context);
4194
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004195 auto src_buffer = Get<BUFFER_STATE>(srcBuffer);
4196 auto dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07004197
4198 for (uint32_t region = 0; region < regionCount; region++) {
4199 const auto &copy_region = pRegions[region];
locke-lunarga19c71d2020-03-02 18:17:04 -07004200 if (dst_image) {
John Zulauf477700e2021-01-06 11:41:49 -07004201 if (src_buffer) {
4202 ResourceAccessRange src_range =
4203 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, dst_image->createInfo.format));
Jeremy Gebben40a22942020-12-22 14:22:06 -07004204 context->UpdateAccessState(*src_buffer, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment, src_range, tag);
John Zulauf477700e2021-01-06 11:41:49 -07004205 }
Jeremy Gebben40a22942020-12-22 14:22:06 -07004206 context->UpdateAccessState(*dst_image, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07004207 copy_region.imageSubresource, copy_region.imageOffset, copy_region.imageExtent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07004208 }
4209 }
4210}
4211
Jeff Leger178b1e52020-10-05 12:22:23 -04004212void SyncValidator::PreCallRecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
4213 VkImageLayout dstImageLayout, uint32_t regionCount,
4214 const VkBufferImageCopy *pRegions) {
4215 StateTracker::PreCallRecordCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
Tony Barbour845d29b2021-11-09 11:43:14 -07004216 RecordCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions, CMD_COPYBUFFERTOIMAGE);
Jeff Leger178b1e52020-10-05 12:22:23 -04004217}
4218
4219void SyncValidator::PreCallRecordCmdCopyBufferToImage2KHR(VkCommandBuffer commandBuffer,
4220 const VkCopyBufferToImageInfo2KHR *pCopyBufferToImageInfo) {
4221 StateTracker::PreCallRecordCmdCopyBufferToImage2KHR(commandBuffer, pCopyBufferToImageInfo);
4222 RecordCmdCopyBufferToImage(commandBuffer, pCopyBufferToImageInfo->srcBuffer, pCopyBufferToImageInfo->dstImage,
4223 pCopyBufferToImageInfo->dstImageLayout, pCopyBufferToImageInfo->regionCount,
Tony Barbour845d29b2021-11-09 11:43:14 -07004224 pCopyBufferToImageInfo->pRegions, CMD_COPYBUFFERTOIMAGE2KHR);
4225}
4226
4227void SyncValidator::PreCallRecordCmdCopyBufferToImage2(VkCommandBuffer commandBuffer,
4228 const VkCopyBufferToImageInfo2 *pCopyBufferToImageInfo) {
4229 StateTracker::PreCallRecordCmdCopyBufferToImage2(commandBuffer, pCopyBufferToImageInfo);
4230 RecordCmdCopyBufferToImage(commandBuffer, pCopyBufferToImageInfo->srcBuffer, pCopyBufferToImageInfo->dstImage,
4231 pCopyBufferToImageInfo->dstImageLayout, pCopyBufferToImageInfo->regionCount,
4232 pCopyBufferToImageInfo->pRegions, CMD_COPYBUFFERTOIMAGE2);
Jeff Leger178b1e52020-10-05 12:22:23 -04004233}
4234
sfricke-samsung71f04e32022-03-16 01:21:21 -05004235template <typename RegionType>
Jeff Leger178b1e52020-10-05 12:22:23 -04004236bool SyncValidator::ValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
sfricke-samsung71f04e32022-03-16 01:21:21 -05004237 VkBuffer dstBuffer, uint32_t regionCount, const RegionType *pRegions,
4238 CMD_TYPE cmd_type) const {
locke-lunarga19c71d2020-03-02 18:17:04 -07004239 bool skip = false;
4240 const auto *cb_access_context = GetAccessContext(commandBuffer);
4241 assert(cb_access_context);
4242 if (!cb_access_context) return skip;
Tony-LunarGaf3632a2021-11-10 15:51:57 -07004243 const char *func_name = CommandTypeString(cmd_type);
Jeff Leger178b1e52020-10-05 12:22:23 -04004244
locke-lunarga19c71d2020-03-02 18:17:04 -07004245 const auto *context = cb_access_context->GetCurrentAccessContext();
4246 assert(context);
4247 if (!context) return skip;
4248
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004249 auto src_image = Get<IMAGE_STATE>(srcImage);
4250 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
Jeremy Gebben6fbf8242021-06-21 09:14:46 -06004251 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->MemState()->mem() : VK_NULL_HANDLE;
locke-lunarga19c71d2020-03-02 18:17:04 -07004252 for (uint32_t region = 0; region < regionCount; region++) {
4253 const auto &copy_region = pRegions[region];
4254 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004255 auto hazard = context->DetectHazard(*src_image, SYNC_COPY_TRANSFER_READ, copy_region.imageSubresource,
locke-lunarga19c71d2020-03-02 18:17:04 -07004256 copy_region.imageOffset, copy_region.imageExtent);
4257 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06004258 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
Jeff Leger178b1e52020-10-05 12:22:23 -04004259 "%s: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.", func_name,
John Zulauf1dae9192020-06-16 15:46:44 -06004260 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07004261 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07004262 }
John Zulauf477700e2021-01-06 11:41:49 -07004263 if (dst_mem) {
4264 ResourceAccessRange dst_range =
4265 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, src_image->createInfo.format));
Jeremy Gebben40a22942020-12-22 14:22:06 -07004266 hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, dst_range);
John Zulauf477700e2021-01-06 11:41:49 -07004267 if (hazard.hazard) {
4268 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
4269 "%s: Hazard %s for dstBuffer %s, region %" PRIu32 ". Access info %s.", func_name,
4270 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstBuffer).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07004271 cb_access_context->FormatUsage(hazard).c_str());
John Zulauf477700e2021-01-06 11:41:49 -07004272 }
locke-lunarga19c71d2020-03-02 18:17:04 -07004273 }
4274 }
4275 if (skip) break;
4276 }
4277 return skip;
4278}
4279
Jeff Leger178b1e52020-10-05 12:22:23 -04004280bool SyncValidator::PreCallValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
4281 VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount,
4282 const VkBufferImageCopy *pRegions) const {
4283 return ValidateCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions,
Tony-LunarGaf3632a2021-11-10 15:51:57 -07004284 CMD_COPYIMAGETOBUFFER);
Jeff Leger178b1e52020-10-05 12:22:23 -04004285}
4286
4287bool SyncValidator::PreCallValidateCmdCopyImageToBuffer2KHR(VkCommandBuffer commandBuffer,
4288 const VkCopyImageToBufferInfo2KHR *pCopyImageToBufferInfo) const {
4289 return ValidateCmdCopyImageToBuffer(commandBuffer, pCopyImageToBufferInfo->srcImage, pCopyImageToBufferInfo->srcImageLayout,
4290 pCopyImageToBufferInfo->dstBuffer, pCopyImageToBufferInfo->regionCount,
Tony-LunarGaf3632a2021-11-10 15:51:57 -07004291 pCopyImageToBufferInfo->pRegions, CMD_COPYIMAGETOBUFFER2KHR);
4292}
4293
4294bool SyncValidator::PreCallValidateCmdCopyImageToBuffer2(VkCommandBuffer commandBuffer,
4295 const VkCopyImageToBufferInfo2 *pCopyImageToBufferInfo) const {
4296 return ValidateCmdCopyImageToBuffer(commandBuffer, pCopyImageToBufferInfo->srcImage, pCopyImageToBufferInfo->srcImageLayout,
4297 pCopyImageToBufferInfo->dstBuffer, pCopyImageToBufferInfo->regionCount,
4298 pCopyImageToBufferInfo->pRegions, CMD_COPYIMAGETOBUFFER2);
Jeff Leger178b1e52020-10-05 12:22:23 -04004299}
4300
sfricke-samsung71f04e32022-03-16 01:21:21 -05004301template <typename RegionType>
Jeff Leger178b1e52020-10-05 12:22:23 -04004302void SyncValidator::RecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
sfricke-samsung71f04e32022-03-16 01:21:21 -05004303 VkBuffer dstBuffer, uint32_t regionCount, const RegionType *pRegions,
Tony-LunarGaf3632a2021-11-10 15:51:57 -07004304 CMD_TYPE cmd_type) {
locke-lunarga19c71d2020-03-02 18:17:04 -07004305 auto *cb_access_context = GetAccessContext(commandBuffer);
4306 assert(cb_access_context);
Jeff Leger178b1e52020-10-05 12:22:23 -04004307
Jeff Leger178b1e52020-10-05 12:22:23 -04004308 const auto tag = cb_access_context->NextCommandTag(cmd_type);
locke-lunarga19c71d2020-03-02 18:17:04 -07004309 auto *context = cb_access_context->GetCurrentAccessContext();
4310 assert(context);
4311
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004312 auto src_image = Get<IMAGE_STATE>(srcImage);
Jeremy Gebben9f537102021-10-05 16:37:12 -06004313 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
Jeremy Gebben6fbf8242021-06-21 09:14:46 -06004314 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->MemState()->mem() : VK_NULL_HANDLE;
John Zulauf5f13a792020-03-10 07:31:21 -06004315 const VulkanTypedHandle dst_handle(dst_mem, kVulkanObjectTypeDeviceMemory);
locke-lunarga19c71d2020-03-02 18:17:04 -07004316
4317 for (uint32_t region = 0; region < regionCount; region++) {
4318 const auto &copy_region = pRegions[region];
4319 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004320 context->UpdateAccessState(*src_image, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07004321 copy_region.imageSubresource, copy_region.imageOffset, copy_region.imageExtent, tag);
John Zulauf477700e2021-01-06 11:41:49 -07004322 if (dst_buffer) {
4323 ResourceAccessRange dst_range =
4324 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, src_image->createInfo.format));
Jeremy Gebben40a22942020-12-22 14:22:06 -07004325 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, dst_range, tag);
John Zulauf477700e2021-01-06 11:41:49 -07004326 }
locke-lunarga19c71d2020-03-02 18:17:04 -07004327 }
4328 }
4329}
4330
Jeff Leger178b1e52020-10-05 12:22:23 -04004331void SyncValidator::PreCallRecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4332 VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
4333 StateTracker::PreCallRecordCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
Tony-LunarGaf3632a2021-11-10 15:51:57 -07004334 RecordCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions, CMD_COPYIMAGETOBUFFER);
Jeff Leger178b1e52020-10-05 12:22:23 -04004335}
4336
4337void SyncValidator::PreCallRecordCmdCopyImageToBuffer2KHR(VkCommandBuffer commandBuffer,
4338 const VkCopyImageToBufferInfo2KHR *pCopyImageToBufferInfo) {
4339 StateTracker::PreCallRecordCmdCopyImageToBuffer2KHR(commandBuffer, pCopyImageToBufferInfo);
4340 RecordCmdCopyImageToBuffer(commandBuffer, pCopyImageToBufferInfo->srcImage, pCopyImageToBufferInfo->srcImageLayout,
4341 pCopyImageToBufferInfo->dstBuffer, pCopyImageToBufferInfo->regionCount,
Tony-LunarGaf3632a2021-11-10 15:51:57 -07004342 pCopyImageToBufferInfo->pRegions, CMD_COPYIMAGETOBUFFER2KHR);
4343}
4344
4345void SyncValidator::PreCallRecordCmdCopyImageToBuffer2(VkCommandBuffer commandBuffer,
4346 const VkCopyImageToBufferInfo2 *pCopyImageToBufferInfo) {
4347 StateTracker::PreCallRecordCmdCopyImageToBuffer2(commandBuffer, pCopyImageToBufferInfo);
4348 RecordCmdCopyImageToBuffer(commandBuffer, pCopyImageToBufferInfo->srcImage, pCopyImageToBufferInfo->srcImageLayout,
4349 pCopyImageToBufferInfo->dstBuffer, pCopyImageToBufferInfo->regionCount,
4350 pCopyImageToBufferInfo->pRegions, CMD_COPYIMAGETOBUFFER2);
Jeff Leger178b1e52020-10-05 12:22:23 -04004351}
4352
4353template <typename RegionType>
4354bool SyncValidator::ValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4355 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
4356 const RegionType *pRegions, VkFilter filter, const char *apiName) const {
locke-lunarga19c71d2020-03-02 18:17:04 -07004357 bool skip = false;
4358 const auto *cb_access_context = GetAccessContext(commandBuffer);
4359 assert(cb_access_context);
4360 if (!cb_access_context) return skip;
4361
4362 const auto *context = cb_access_context->GetCurrentAccessContext();
4363 assert(context);
4364 if (!context) return skip;
4365
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004366 auto src_image = Get<IMAGE_STATE>(srcImage);
4367 auto dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07004368
4369 for (uint32_t region = 0; region < regionCount; region++) {
4370 const auto &blit_region = pRegions[region];
4371 if (src_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06004372 VkOffset3D offset = {std::min(blit_region.srcOffsets[0].x, blit_region.srcOffsets[1].x),
4373 std::min(blit_region.srcOffsets[0].y, blit_region.srcOffsets[1].y),
4374 std::min(blit_region.srcOffsets[0].z, blit_region.srcOffsets[1].z)};
4375 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.srcOffsets[1].x - blit_region.srcOffsets[0].x)),
4376 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].y - blit_region.srcOffsets[0].y)),
4377 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].z - blit_region.srcOffsets[0].z))};
Jeremy Gebben40a22942020-12-22 14:22:06 -07004378 auto hazard = context->DetectHazard(*src_image, SYNC_BLIT_TRANSFER_READ, blit_region.srcSubresource, offset, extent);
locke-lunarga19c71d2020-03-02 18:17:04 -07004379 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06004380 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
Jeff Leger178b1e52020-10-05 12:22:23 -04004381 "%s: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.", apiName,
John Zulauf1dae9192020-06-16 15:46:44 -06004382 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07004383 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07004384 }
4385 }
4386
4387 if (dst_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06004388 VkOffset3D offset = {std::min(blit_region.dstOffsets[0].x, blit_region.dstOffsets[1].x),
4389 std::min(blit_region.dstOffsets[0].y, blit_region.dstOffsets[1].y),
4390 std::min(blit_region.dstOffsets[0].z, blit_region.dstOffsets[1].z)};
4391 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.dstOffsets[1].x - blit_region.dstOffsets[0].x)),
4392 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].y - blit_region.dstOffsets[0].y)),
4393 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].z - blit_region.dstOffsets[0].z))};
Jeremy Gebben40a22942020-12-22 14:22:06 -07004394 auto hazard = context->DetectHazard(*dst_image, SYNC_BLIT_TRANSFER_WRITE, blit_region.dstSubresource, offset, extent);
locke-lunarga19c71d2020-03-02 18:17:04 -07004395 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06004396 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
Jeff Leger178b1e52020-10-05 12:22:23 -04004397 "%s: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.", apiName,
John Zulauf1dae9192020-06-16 15:46:44 -06004398 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07004399 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07004400 }
4401 if (skip) break;
4402 }
4403 }
4404
4405 return skip;
4406}
4407
Jeff Leger178b1e52020-10-05 12:22:23 -04004408bool SyncValidator::PreCallValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4409 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
4410 const VkImageBlit *pRegions, VkFilter filter) const {
4411 return ValidateCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter,
4412 "vkCmdBlitImage");
4413}
4414
4415bool SyncValidator::PreCallValidateCmdBlitImage2KHR(VkCommandBuffer commandBuffer,
4416 const VkBlitImageInfo2KHR *pBlitImageInfo) const {
4417 return ValidateCmdBlitImage(commandBuffer, pBlitImageInfo->srcImage, pBlitImageInfo->srcImageLayout, pBlitImageInfo->dstImage,
4418 pBlitImageInfo->dstImageLayout, pBlitImageInfo->regionCount, pBlitImageInfo->pRegions,
4419 pBlitImageInfo->filter, "vkCmdBlitImage2KHR");
4420}
4421
Tony-LunarG542ae912021-11-04 16:06:44 -06004422bool SyncValidator::PreCallValidateCmdBlitImage2(VkCommandBuffer commandBuffer,
4423 const VkBlitImageInfo2 *pBlitImageInfo) const {
4424 return ValidateCmdBlitImage(commandBuffer, pBlitImageInfo->srcImage, pBlitImageInfo->srcImageLayout, pBlitImageInfo->dstImage,
4425 pBlitImageInfo->dstImageLayout, pBlitImageInfo->regionCount, pBlitImageInfo->pRegions,
4426 pBlitImageInfo->filter, "vkCmdBlitImage2");
4427}
4428
Jeff Leger178b1e52020-10-05 12:22:23 -04004429template <typename RegionType>
4430void SyncValidator::RecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4431 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
4432 const RegionType *pRegions, VkFilter filter, ResourceUsageTag tag) {
locke-lunarga19c71d2020-03-02 18:17:04 -07004433 auto *cb_access_context = GetAccessContext(commandBuffer);
4434 assert(cb_access_context);
4435 auto *context = cb_access_context->GetCurrentAccessContext();
4436 assert(context);
4437
Jeremy Gebben9f537102021-10-05 16:37:12 -06004438 auto src_image = Get<IMAGE_STATE>(srcImage);
4439 auto dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07004440
4441 for (uint32_t region = 0; region < regionCount; region++) {
4442 const auto &blit_region = pRegions[region];
4443 if (src_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06004444 VkOffset3D offset = {std::min(blit_region.srcOffsets[0].x, blit_region.srcOffsets[1].x),
4445 std::min(blit_region.srcOffsets[0].y, blit_region.srcOffsets[1].y),
4446 std::min(blit_region.srcOffsets[0].z, blit_region.srcOffsets[1].z)};
4447 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.srcOffsets[1].x - blit_region.srcOffsets[0].x)),
4448 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].y - blit_region.srcOffsets[0].y)),
4449 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].z - blit_region.srcOffsets[0].z))};
Jeremy Gebben40a22942020-12-22 14:22:06 -07004450 context->UpdateAccessState(*src_image, SYNC_BLIT_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07004451 blit_region.srcSubresource, offset, extent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07004452 }
4453 if (dst_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06004454 VkOffset3D offset = {std::min(blit_region.dstOffsets[0].x, blit_region.dstOffsets[1].x),
4455 std::min(blit_region.dstOffsets[0].y, blit_region.dstOffsets[1].y),
4456 std::min(blit_region.dstOffsets[0].z, blit_region.dstOffsets[1].z)};
4457 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.dstOffsets[1].x - blit_region.dstOffsets[0].x)),
4458 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].y - blit_region.dstOffsets[0].y)),
4459 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].z - blit_region.dstOffsets[0].z))};
Jeremy Gebben40a22942020-12-22 14:22:06 -07004460 context->UpdateAccessState(*dst_image, SYNC_BLIT_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07004461 blit_region.dstSubresource, offset, extent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07004462 }
4463 }
4464}
locke-lunarg36ba2592020-04-03 09:42:04 -06004465
Jeff Leger178b1e52020-10-05 12:22:23 -04004466void SyncValidator::PreCallRecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4467 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
4468 const VkImageBlit *pRegions, VkFilter filter) {
4469 auto *cb_access_context = GetAccessContext(commandBuffer);
4470 assert(cb_access_context);
4471 const auto tag = cb_access_context->NextCommandTag(CMD_BLITIMAGE);
4472 StateTracker::PreCallRecordCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
4473 pRegions, filter);
4474 RecordCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter, tag);
4475}
4476
4477void SyncValidator::PreCallRecordCmdBlitImage2KHR(VkCommandBuffer commandBuffer, const VkBlitImageInfo2KHR *pBlitImageInfo) {
4478 StateTracker::PreCallRecordCmdBlitImage2KHR(commandBuffer, pBlitImageInfo);
4479 auto *cb_access_context = GetAccessContext(commandBuffer);
4480 assert(cb_access_context);
4481 const auto tag = cb_access_context->NextCommandTag(CMD_BLITIMAGE2KHR);
4482 RecordCmdBlitImage(commandBuffer, pBlitImageInfo->srcImage, pBlitImageInfo->srcImageLayout, pBlitImageInfo->dstImage,
4483 pBlitImageInfo->dstImageLayout, pBlitImageInfo->regionCount, pBlitImageInfo->pRegions,
4484 pBlitImageInfo->filter, tag);
4485}
4486
Tony-LunarG542ae912021-11-04 16:06:44 -06004487void SyncValidator::PreCallRecordCmdBlitImage2(VkCommandBuffer commandBuffer, const VkBlitImageInfo2 *pBlitImageInfo) {
4488 StateTracker::PreCallRecordCmdBlitImage2KHR(commandBuffer, pBlitImageInfo);
4489 auto *cb_access_context = GetAccessContext(commandBuffer);
4490 assert(cb_access_context);
4491 const auto tag = cb_access_context->NextCommandTag(CMD_BLITIMAGE2);
4492 RecordCmdBlitImage(commandBuffer, pBlitImageInfo->srcImage, pBlitImageInfo->srcImageLayout, pBlitImageInfo->dstImage,
4493 pBlitImageInfo->dstImageLayout, pBlitImageInfo->regionCount, pBlitImageInfo->pRegions,
4494 pBlitImageInfo->filter, tag);
4495}
4496
John Zulauffaea0ee2021-01-14 14:01:32 -07004497bool SyncValidator::ValidateIndirectBuffer(const CommandBufferAccessContext &cb_context, const AccessContext &context,
4498 VkCommandBuffer commandBuffer, const VkDeviceSize struct_size, const VkBuffer buffer,
4499 const VkDeviceSize offset, const uint32_t drawCount, const uint32_t stride,
4500 const char *function) const {
locke-lunargff255f92020-05-13 18:53:52 -06004501 bool skip = false;
4502 if (drawCount == 0) return skip;
4503
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004504 auto buf_state = Get<BUFFER_STATE>(buffer);
locke-lunargff255f92020-05-13 18:53:52 -06004505 VkDeviceSize size = struct_size;
4506 if (drawCount == 1 || stride == size) {
4507 if (drawCount > 1) size *= drawCount;
John Zulauf3e86bf02020-09-12 10:47:57 -06004508 const ResourceAccessRange range = MakeRange(offset, size);
locke-lunargff255f92020-05-13 18:53:52 -06004509 auto hazard = context.DetectHazard(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, range);
4510 if (hazard.hazard) {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06004511 skip |= LogError(buf_state->buffer(), string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004512 "%s: Hazard %s for indirect %s in %s. Access info %s.", function, string_SyncHazard(hazard.hazard),
John Zulauf1dae9192020-06-16 15:46:44 -06004513 report_data->FormatHandle(buffer).c_str(), report_data->FormatHandle(commandBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07004514 cb_context.FormatUsage(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06004515 }
4516 } else {
4517 for (uint32_t i = 0; i < drawCount; ++i) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004518 const ResourceAccessRange range = MakeRange(offset + i * stride, size);
locke-lunargff255f92020-05-13 18:53:52 -06004519 auto hazard = context.DetectHazard(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, range);
4520 if (hazard.hazard) {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06004521 skip |= LogError(buf_state->buffer(), string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004522 "%s: Hazard %s for indirect %s in %s. Access info %s.", function, string_SyncHazard(hazard.hazard),
4523 report_data->FormatHandle(buffer).c_str(), report_data->FormatHandle(commandBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07004524 cb_context.FormatUsage(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06004525 break;
4526 }
4527 }
4528 }
4529 return skip;
4530}
4531
John Zulauf14940722021-04-12 15:19:02 -06004532void SyncValidator::RecordIndirectBuffer(AccessContext &context, const ResourceUsageTag tag, const VkDeviceSize struct_size,
locke-lunarg61870c22020-06-09 14:51:50 -06004533 const VkBuffer buffer, const VkDeviceSize offset, const uint32_t drawCount,
4534 uint32_t stride) {
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004535 auto buf_state = Get<BUFFER_STATE>(buffer);
locke-lunargff255f92020-05-13 18:53:52 -06004536 VkDeviceSize size = struct_size;
4537 if (drawCount == 1 || stride == size) {
4538 if (drawCount > 1) size *= drawCount;
John Zulauf3e86bf02020-09-12 10:47:57 -06004539 const ResourceAccessRange range = MakeRange(offset, size);
John Zulauf8e3c3e92021-01-06 11:19:36 -07004540 context.UpdateAccessState(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, SyncOrdering::kNonAttachment, range, tag);
locke-lunargff255f92020-05-13 18:53:52 -06004541 } else {
4542 for (uint32_t i = 0; i < drawCount; ++i) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004543 const ResourceAccessRange range = MakeRange(offset + i * stride, size);
John Zulauf8e3c3e92021-01-06 11:19:36 -07004544 context.UpdateAccessState(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, SyncOrdering::kNonAttachment, range,
4545 tag);
locke-lunargff255f92020-05-13 18:53:52 -06004546 }
4547 }
4548}
4549
John Zulauffaea0ee2021-01-14 14:01:32 -07004550bool SyncValidator::ValidateCountBuffer(const CommandBufferAccessContext &cb_context, const AccessContext &context,
4551 VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4552 const char *function) const {
locke-lunargff255f92020-05-13 18:53:52 -06004553 bool skip = false;
4554
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004555 auto count_buf_state = Get<BUFFER_STATE>(buffer);
John Zulauf3e86bf02020-09-12 10:47:57 -06004556 const ResourceAccessRange range = MakeRange(offset, 4);
locke-lunargff255f92020-05-13 18:53:52 -06004557 auto hazard = context.DetectHazard(*count_buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, range);
4558 if (hazard.hazard) {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06004559 skip |= LogError(count_buf_state->buffer(), string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004560 "%s: Hazard %s for countBuffer %s in %s. Access info %s.", function, string_SyncHazard(hazard.hazard),
John Zulauf1dae9192020-06-16 15:46:44 -06004561 report_data->FormatHandle(buffer).c_str(), report_data->FormatHandle(commandBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07004562 cb_context.FormatUsage(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06004563 }
4564 return skip;
4565}
4566
John Zulauf14940722021-04-12 15:19:02 -06004567void SyncValidator::RecordCountBuffer(AccessContext &context, const ResourceUsageTag tag, VkBuffer buffer, VkDeviceSize offset) {
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004568 auto count_buf_state = Get<BUFFER_STATE>(buffer);
John Zulauf3e86bf02020-09-12 10:47:57 -06004569 const ResourceAccessRange range = MakeRange(offset, 4);
John Zulauf8e3c3e92021-01-06 11:19:36 -07004570 context.UpdateAccessState(*count_buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, SyncOrdering::kNonAttachment, range, tag);
locke-lunargff255f92020-05-13 18:53:52 -06004571}
4572
locke-lunarg36ba2592020-04-03 09:42:04 -06004573bool SyncValidator::PreCallValidateCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) const {
locke-lunargff255f92020-05-13 18:53:52 -06004574 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004575 const auto *cb_access_context = GetAccessContext(commandBuffer);
4576 assert(cb_access_context);
4577 if (!cb_access_context) return skip;
4578
locke-lunarg61870c22020-06-09 14:51:50 -06004579 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, "vkCmdDispatch");
locke-lunargff255f92020-05-13 18:53:52 -06004580 return skip;
locke-lunarg36ba2592020-04-03 09:42:04 -06004581}
4582
4583void SyncValidator::PreCallRecordCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004584 StateTracker::PreCallRecordCmdDispatch(commandBuffer, x, y, z);
locke-lunargff255f92020-05-13 18:53:52 -06004585 auto *cb_access_context = GetAccessContext(commandBuffer);
4586 assert(cb_access_context);
4587 const auto tag = cb_access_context->NextCommandTag(CMD_DISPATCH);
locke-lunargff255f92020-05-13 18:53:52 -06004588
locke-lunarg61870c22020-06-09 14:51:50 -06004589 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, tag);
locke-lunarg36ba2592020-04-03 09:42:04 -06004590}
locke-lunarge1a67022020-04-29 00:15:36 -06004591
4592bool SyncValidator::PreCallValidateCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) const {
locke-lunargff255f92020-05-13 18:53:52 -06004593 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004594 const auto *cb_access_context = GetAccessContext(commandBuffer);
4595 assert(cb_access_context);
4596 if (!cb_access_context) return skip;
4597
4598 const auto *context = cb_access_context->GetCurrentAccessContext();
4599 assert(context);
4600 if (!context) return skip;
4601
locke-lunarg61870c22020-06-09 14:51:50 -06004602 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, "vkCmdDispatchIndirect");
John Zulauffaea0ee2021-01-14 14:01:32 -07004603 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDispatchIndirectCommand), buffer, offset,
4604 1, sizeof(VkDispatchIndirectCommand), "vkCmdDispatchIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004605 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004606}
4607
4608void SyncValidator::PreCallRecordCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004609 StateTracker::PreCallRecordCmdDispatchIndirect(commandBuffer, buffer, offset);
locke-lunargff255f92020-05-13 18:53:52 -06004610 auto *cb_access_context = GetAccessContext(commandBuffer);
4611 assert(cb_access_context);
4612 const auto tag = cb_access_context->NextCommandTag(CMD_DISPATCHINDIRECT);
4613 auto *context = cb_access_context->GetCurrentAccessContext();
4614 assert(context);
4615
locke-lunarg61870c22020-06-09 14:51:50 -06004616 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, tag);
4617 RecordIndirectBuffer(*context, tag, sizeof(VkDispatchIndirectCommand), buffer, offset, 1, sizeof(VkDispatchIndirectCommand));
locke-lunarge1a67022020-04-29 00:15:36 -06004618}
4619
4620bool SyncValidator::PreCallValidateCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
4621 uint32_t firstVertex, uint32_t firstInstance) const {
locke-lunarga4d39ea2020-05-22 14:17:29 -06004622 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004623 const auto *cb_access_context = GetAccessContext(commandBuffer);
4624 assert(cb_access_context);
4625 if (!cb_access_context) return skip;
4626
locke-lunarg61870c22020-06-09 14:51:50 -06004627 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDraw");
4628 skip |= cb_access_context->ValidateDrawVertex(vertexCount, firstVertex, "vkCmdDraw");
4629 skip |= cb_access_context->ValidateDrawSubpassAttachment("vkCmdDraw");
locke-lunarga4d39ea2020-05-22 14:17:29 -06004630 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004631}
4632
4633void SyncValidator::PreCallRecordCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
4634 uint32_t firstVertex, uint32_t firstInstance) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004635 StateTracker::PreCallRecordCmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
locke-lunargff255f92020-05-13 18:53:52 -06004636 auto *cb_access_context = GetAccessContext(commandBuffer);
4637 assert(cb_access_context);
4638 const auto tag = cb_access_context->NextCommandTag(CMD_DRAW);
locke-lunargff255f92020-05-13 18:53:52 -06004639
locke-lunarg61870c22020-06-09 14:51:50 -06004640 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4641 cb_access_context->RecordDrawVertex(vertexCount, firstVertex, tag);
4642 cb_access_context->RecordDrawSubpassAttachment(tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004643}
4644
4645bool SyncValidator::PreCallValidateCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
4646 uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) const {
locke-lunarga4d39ea2020-05-22 14:17:29 -06004647 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004648 const auto *cb_access_context = GetAccessContext(commandBuffer);
4649 assert(cb_access_context);
4650 if (!cb_access_context) return skip;
4651
locke-lunarg61870c22020-06-09 14:51:50 -06004652 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndexed");
4653 skip |= cb_access_context->ValidateDrawVertexIndex(indexCount, firstIndex, "vkCmdDrawIndexed");
4654 skip |= cb_access_context->ValidateDrawSubpassAttachment("vkCmdDrawIndexed");
locke-lunarga4d39ea2020-05-22 14:17:29 -06004655 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004656}
4657
4658void SyncValidator::PreCallRecordCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
4659 uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004660 StateTracker::PreCallRecordCmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
locke-lunargff255f92020-05-13 18:53:52 -06004661 auto *cb_access_context = GetAccessContext(commandBuffer);
4662 assert(cb_access_context);
4663 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDEXED);
locke-lunargff255f92020-05-13 18:53:52 -06004664
locke-lunarg61870c22020-06-09 14:51:50 -06004665 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4666 cb_access_context->RecordDrawVertexIndex(indexCount, firstIndex, tag);
4667 cb_access_context->RecordDrawSubpassAttachment(tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004668}
4669
4670bool SyncValidator::PreCallValidateCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4671 uint32_t drawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004672 bool skip = false;
4673 if (drawCount == 0) return skip;
4674
locke-lunargff255f92020-05-13 18:53:52 -06004675 const auto *cb_access_context = GetAccessContext(commandBuffer);
4676 assert(cb_access_context);
4677 if (!cb_access_context) return skip;
4678
4679 const auto *context = cb_access_context->GetCurrentAccessContext();
4680 assert(context);
4681 if (!context) return skip;
4682
locke-lunarg61870c22020-06-09 14:51:50 -06004683 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndirect");
4684 skip |= cb_access_context->ValidateDrawSubpassAttachment("vkCmdDrawIndirect");
John Zulauffaea0ee2021-01-14 14:01:32 -07004685 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndirectCommand), buffer, offset,
4686 drawCount, stride, "vkCmdDrawIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004687
4688 // TODO: For now, we validate the whole vertex buffer. It might cause some false positive.
4689 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
4690 // We will validate the vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004691 skip |= cb_access_context->ValidateDrawVertex(UINT32_MAX, 0, "vkCmdDrawIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004692 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004693}
4694
4695void SyncValidator::PreCallRecordCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4696 uint32_t drawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004697 StateTracker::PreCallRecordCmdDrawIndirect(commandBuffer, buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06004698 if (drawCount == 0) return;
locke-lunargff255f92020-05-13 18:53:52 -06004699 auto *cb_access_context = GetAccessContext(commandBuffer);
4700 assert(cb_access_context);
4701 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDIRECT);
4702 auto *context = cb_access_context->GetCurrentAccessContext();
4703 assert(context);
4704
locke-lunarg61870c22020-06-09 14:51:50 -06004705 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4706 cb_access_context->RecordDrawSubpassAttachment(tag);
4707 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndirectCommand), buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06004708
4709 // TODO: For now, we record the whole vertex buffer. It might cause some false positive.
4710 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
4711 // We will record the vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004712 cb_access_context->RecordDrawVertex(UINT32_MAX, 0, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004713}
4714
4715bool SyncValidator::PreCallValidateCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4716 uint32_t drawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004717 bool skip = false;
4718 if (drawCount == 0) return skip;
locke-lunargff255f92020-05-13 18:53:52 -06004719 const auto *cb_access_context = GetAccessContext(commandBuffer);
4720 assert(cb_access_context);
4721 if (!cb_access_context) return skip;
4722
4723 const auto *context = cb_access_context->GetCurrentAccessContext();
4724 assert(context);
4725 if (!context) return skip;
4726
locke-lunarg61870c22020-06-09 14:51:50 -06004727 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndexedIndirect");
4728 skip |= cb_access_context->ValidateDrawSubpassAttachment("vkCmdDrawIndexedIndirect");
John Zulauffaea0ee2021-01-14 14:01:32 -07004729 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndexedIndirectCommand), buffer,
4730 offset, drawCount, stride, "vkCmdDrawIndexedIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004731
4732 // TODO: For now, we validate the whole index and vertex buffer. It might cause some false positive.
4733 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
4734 // We will validate the index and vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004735 skip |= cb_access_context->ValidateDrawVertexIndex(UINT32_MAX, 0, "vkCmdDrawIndexedIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004736 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004737}
4738
4739void SyncValidator::PreCallRecordCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4740 uint32_t drawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004741 StateTracker::PreCallRecordCmdDrawIndexedIndirect(commandBuffer, buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06004742 auto *cb_access_context = GetAccessContext(commandBuffer);
4743 assert(cb_access_context);
4744 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDEXEDINDIRECT);
4745 auto *context = cb_access_context->GetCurrentAccessContext();
4746 assert(context);
4747
locke-lunarg61870c22020-06-09 14:51:50 -06004748 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4749 cb_access_context->RecordDrawSubpassAttachment(tag);
4750 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndexedIndirectCommand), buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06004751
4752 // TODO: For now, we record the whole index and vertex buffer. It might cause some false positive.
4753 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
4754 // We will record the index and vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004755 cb_access_context->RecordDrawVertexIndex(UINT32_MAX, 0, tag);
locke-lunargff255f92020-05-13 18:53:52 -06004756}
4757
4758bool SyncValidator::ValidateCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4759 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4760 uint32_t stride, const char *function) const {
4761 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004762 const auto *cb_access_context = GetAccessContext(commandBuffer);
4763 assert(cb_access_context);
4764 if (!cb_access_context) return skip;
4765
4766 const auto *context = cb_access_context->GetCurrentAccessContext();
4767 assert(context);
4768 if (!context) return skip;
4769
locke-lunarg61870c22020-06-09 14:51:50 -06004770 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, function);
4771 skip |= cb_access_context->ValidateDrawSubpassAttachment(function);
John Zulauffaea0ee2021-01-14 14:01:32 -07004772 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndirectCommand), buffer, offset,
4773 maxDrawCount, stride, function);
4774 skip |= ValidateCountBuffer(*cb_access_context, *context, commandBuffer, countBuffer, countBufferOffset, function);
locke-lunargff255f92020-05-13 18:53:52 -06004775
4776 // TODO: For now, we validate the whole vertex buffer. It might cause some false positive.
4777 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
4778 // We will validate the vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004779 skip |= cb_access_context->ValidateDrawVertex(UINT32_MAX, 0, function);
locke-lunargff255f92020-05-13 18:53:52 -06004780 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004781}
4782
4783bool SyncValidator::PreCallValidateCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4784 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4785 uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004786 return ValidateCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4787 "vkCmdDrawIndirectCount");
locke-lunarge1a67022020-04-29 00:15:36 -06004788}
4789
sfricke-samsung85584a72021-09-30 21:43:38 -07004790void SyncValidator::RecordCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4791 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4792 uint32_t stride, CMD_TYPE cmd_type) {
locke-lunargff255f92020-05-13 18:53:52 -06004793 auto *cb_access_context = GetAccessContext(commandBuffer);
4794 assert(cb_access_context);
sfricke-samsung85584a72021-09-30 21:43:38 -07004795 const auto tag = cb_access_context->NextCommandTag(cmd_type);
locke-lunargff255f92020-05-13 18:53:52 -06004796 auto *context = cb_access_context->GetCurrentAccessContext();
4797 assert(context);
4798
locke-lunarg61870c22020-06-09 14:51:50 -06004799 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4800 cb_access_context->RecordDrawSubpassAttachment(tag);
4801 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndirectCommand), buffer, offset, 1, stride);
4802 RecordCountBuffer(*context, tag, countBuffer, countBufferOffset);
locke-lunargff255f92020-05-13 18:53:52 -06004803
4804 // TODO: For now, we record the whole vertex buffer. It might cause some false positive.
4805 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
4806 // We will record the vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004807 cb_access_context->RecordDrawVertex(UINT32_MAX, 0, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004808}
4809
sfricke-samsung85584a72021-09-30 21:43:38 -07004810void SyncValidator::PreCallRecordCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4811 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4812 uint32_t stride) {
4813 StateTracker::PreCallRecordCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount,
4814 stride);
4815 RecordCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4816 CMD_DRAWINDIRECTCOUNT);
4817}
locke-lunarge1a67022020-04-29 00:15:36 -06004818bool SyncValidator::PreCallValidateCmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4819 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4820 uint32_t maxDrawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004821 return ValidateCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4822 "vkCmdDrawIndirectCountKHR");
locke-lunarge1a67022020-04-29 00:15:36 -06004823}
4824
4825void SyncValidator::PreCallRecordCmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4826 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4827 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004828 StateTracker::PreCallRecordCmdDrawIndirectCountKHR(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount,
4829 stride);
sfricke-samsung85584a72021-09-30 21:43:38 -07004830 RecordCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4831 CMD_DRAWINDIRECTCOUNTKHR);
locke-lunarge1a67022020-04-29 00:15:36 -06004832}
4833
4834bool SyncValidator::PreCallValidateCmdDrawIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4835 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4836 uint32_t maxDrawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004837 return ValidateCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4838 "vkCmdDrawIndirectCountAMD");
locke-lunarge1a67022020-04-29 00:15:36 -06004839}
4840
4841void SyncValidator::PreCallRecordCmdDrawIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4842 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4843 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004844 StateTracker::PreCallRecordCmdDrawIndirectCountAMD(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount,
4845 stride);
sfricke-samsung85584a72021-09-30 21:43:38 -07004846 RecordCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4847 CMD_DRAWINDIRECTCOUNTAMD);
locke-lunargff255f92020-05-13 18:53:52 -06004848}
4849
4850bool SyncValidator::ValidateCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4851 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4852 uint32_t stride, const char *function) const {
4853 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004854 const auto *cb_access_context = GetAccessContext(commandBuffer);
4855 assert(cb_access_context);
4856 if (!cb_access_context) return skip;
4857
4858 const auto *context = cb_access_context->GetCurrentAccessContext();
4859 assert(context);
4860 if (!context) return skip;
4861
locke-lunarg61870c22020-06-09 14:51:50 -06004862 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, function);
4863 skip |= cb_access_context->ValidateDrawSubpassAttachment(function);
John Zulauffaea0ee2021-01-14 14:01:32 -07004864 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndexedIndirectCommand), buffer,
4865 offset, maxDrawCount, stride, function);
4866 skip |= ValidateCountBuffer(*cb_access_context, *context, commandBuffer, countBuffer, countBufferOffset, function);
locke-lunargff255f92020-05-13 18:53:52 -06004867
4868 // TODO: For now, we validate the whole index and vertex buffer. It might cause some false positive.
4869 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
4870 // We will validate the index and vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004871 skip |= cb_access_context->ValidateDrawVertexIndex(UINT32_MAX, 0, function);
locke-lunargff255f92020-05-13 18:53:52 -06004872 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004873}
4874
4875bool SyncValidator::PreCallValidateCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4876 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4877 uint32_t maxDrawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004878 return ValidateCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4879 "vkCmdDrawIndexedIndirectCount");
locke-lunarge1a67022020-04-29 00:15:36 -06004880}
4881
sfricke-samsung85584a72021-09-30 21:43:38 -07004882void SyncValidator::RecordCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4883 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4884 uint32_t stride, CMD_TYPE cmd_type) {
locke-lunargff255f92020-05-13 18:53:52 -06004885 auto *cb_access_context = GetAccessContext(commandBuffer);
4886 assert(cb_access_context);
sfricke-samsung85584a72021-09-30 21:43:38 -07004887 const auto tag = cb_access_context->NextCommandTag(cmd_type);
locke-lunargff255f92020-05-13 18:53:52 -06004888 auto *context = cb_access_context->GetCurrentAccessContext();
4889 assert(context);
4890
locke-lunarg61870c22020-06-09 14:51:50 -06004891 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4892 cb_access_context->RecordDrawSubpassAttachment(tag);
4893 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndexedIndirectCommand), buffer, offset, 1, stride);
4894 RecordCountBuffer(*context, tag, countBuffer, countBufferOffset);
locke-lunargff255f92020-05-13 18:53:52 -06004895
4896 // TODO: For now, we record the whole index and vertex buffer. It might cause some false positive.
4897 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
locke-lunarg61870c22020-06-09 14:51:50 -06004898 // We will update the index and vertex buffer in SubmitQueue in the future.
4899 cb_access_context->RecordDrawVertexIndex(UINT32_MAX, 0, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004900}
4901
sfricke-samsung85584a72021-09-30 21:43:38 -07004902void SyncValidator::PreCallRecordCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4903 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4904 uint32_t maxDrawCount, uint32_t stride) {
4905 StateTracker::PreCallRecordCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
4906 maxDrawCount, stride);
4907 RecordCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4908 CMD_DRAWINDEXEDINDIRECTCOUNT);
4909}
4910
locke-lunarge1a67022020-04-29 00:15:36 -06004911bool SyncValidator::PreCallValidateCmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer,
4912 VkDeviceSize offset, VkBuffer countBuffer,
4913 VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4914 uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004915 return ValidateCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4916 "vkCmdDrawIndexedIndirectCountKHR");
locke-lunarge1a67022020-04-29 00:15:36 -06004917}
4918
4919void SyncValidator::PreCallRecordCmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4920 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4921 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004922 StateTracker::PreCallRecordCmdDrawIndexedIndirectCountKHR(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
4923 maxDrawCount, stride);
sfricke-samsung85584a72021-09-30 21:43:38 -07004924 RecordCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4925 CMD_DRAWINDEXEDINDIRECTCOUNTKHR);
locke-lunarge1a67022020-04-29 00:15:36 -06004926}
4927
4928bool SyncValidator::PreCallValidateCmdDrawIndexedIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer,
4929 VkDeviceSize offset, VkBuffer countBuffer,
4930 VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4931 uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004932 return ValidateCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4933 "vkCmdDrawIndexedIndirectCountAMD");
locke-lunarge1a67022020-04-29 00:15:36 -06004934}
4935
4936void SyncValidator::PreCallRecordCmdDrawIndexedIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4937 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4938 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004939 StateTracker::PreCallRecordCmdDrawIndexedIndirectCountAMD(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
4940 maxDrawCount, stride);
sfricke-samsung85584a72021-09-30 21:43:38 -07004941 RecordCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4942 CMD_DRAWINDEXEDINDIRECTCOUNTAMD);
locke-lunarge1a67022020-04-29 00:15:36 -06004943}
4944
4945bool SyncValidator::PreCallValidateCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
4946 const VkClearColorValue *pColor, uint32_t rangeCount,
4947 const VkImageSubresourceRange *pRanges) const {
4948 bool skip = false;
4949 const auto *cb_access_context = GetAccessContext(commandBuffer);
4950 assert(cb_access_context);
4951 if (!cb_access_context) return skip;
4952
4953 const auto *context = cb_access_context->GetCurrentAccessContext();
4954 assert(context);
4955 if (!context) return skip;
4956
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004957 auto image_state = Get<IMAGE_STATE>(image);
locke-lunarge1a67022020-04-29 00:15:36 -06004958
4959 for (uint32_t index = 0; index < rangeCount; index++) {
4960 const auto &range = pRanges[index];
4961 if (image_state) {
John Zulauf110413c2021-03-20 05:38:38 -06004962 auto hazard = context->DetectHazard(*image_state, SYNC_CLEAR_TRANSFER_WRITE, range);
locke-lunarge1a67022020-04-29 00:15:36 -06004963 if (hazard.hazard) {
4964 skip |= LogError(image, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004965 "vkCmdClearColorImage: Hazard %s for %s, range index %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06004966 string_SyncHazard(hazard.hazard), report_data->FormatHandle(image).c_str(), index,
John Zulauffaea0ee2021-01-14 14:01:32 -07004967 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06004968 }
4969 }
4970 }
4971 return skip;
4972}
4973
4974void SyncValidator::PreCallRecordCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
4975 const VkClearColorValue *pColor, uint32_t rangeCount,
4976 const VkImageSubresourceRange *pRanges) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004977 StateTracker::PreCallRecordCmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
locke-lunarge1a67022020-04-29 00:15:36 -06004978 auto *cb_access_context = GetAccessContext(commandBuffer);
4979 assert(cb_access_context);
4980 const auto tag = cb_access_context->NextCommandTag(CMD_CLEARCOLORIMAGE);
4981 auto *context = cb_access_context->GetCurrentAccessContext();
4982 assert(context);
4983
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004984 auto image_state = Get<IMAGE_STATE>(image);
locke-lunarge1a67022020-04-29 00:15:36 -06004985
4986 for (uint32_t index = 0; index < rangeCount; index++) {
4987 const auto &range = pRanges[index];
4988 if (image_state) {
John Zulauf110413c2021-03-20 05:38:38 -06004989 context->UpdateAccessState(*image_state, SYNC_CLEAR_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004990 }
4991 }
4992}
4993
4994bool SyncValidator::PreCallValidateCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image,
4995 VkImageLayout imageLayout,
4996 const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
4997 const VkImageSubresourceRange *pRanges) const {
4998 bool skip = false;
4999 const auto *cb_access_context = GetAccessContext(commandBuffer);
5000 assert(cb_access_context);
5001 if (!cb_access_context) return skip;
5002
5003 const auto *context = cb_access_context->GetCurrentAccessContext();
5004 assert(context);
5005 if (!context) return skip;
5006
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005007 auto image_state = Get<IMAGE_STATE>(image);
locke-lunarge1a67022020-04-29 00:15:36 -06005008
5009 for (uint32_t index = 0; index < rangeCount; index++) {
5010 const auto &range = pRanges[index];
5011 if (image_state) {
John Zulauf110413c2021-03-20 05:38:38 -06005012 auto hazard = context->DetectHazard(*image_state, SYNC_CLEAR_TRANSFER_WRITE, range);
locke-lunarge1a67022020-04-29 00:15:36 -06005013 if (hazard.hazard) {
5014 skip |= LogError(image, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06005015 "vkCmdClearDepthStencilImage: Hazard %s for %s, range index %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06005016 string_SyncHazard(hazard.hazard), report_data->FormatHandle(image).c_str(), index,
John Zulauffaea0ee2021-01-14 14:01:32 -07005017 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06005018 }
5019 }
5020 }
5021 return skip;
5022}
5023
5024void SyncValidator::PreCallRecordCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
5025 const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
5026 const VkImageSubresourceRange *pRanges) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005027 StateTracker::PreCallRecordCmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
locke-lunarge1a67022020-04-29 00:15:36 -06005028 auto *cb_access_context = GetAccessContext(commandBuffer);
5029 assert(cb_access_context);
5030 const auto tag = cb_access_context->NextCommandTag(CMD_CLEARDEPTHSTENCILIMAGE);
5031 auto *context = cb_access_context->GetCurrentAccessContext();
5032 assert(context);
5033
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005034 auto image_state = Get<IMAGE_STATE>(image);
locke-lunarge1a67022020-04-29 00:15:36 -06005035
5036 for (uint32_t index = 0; index < rangeCount; index++) {
5037 const auto &range = pRanges[index];
5038 if (image_state) {
John Zulauf110413c2021-03-20 05:38:38 -06005039 context->UpdateAccessState(*image_state, SYNC_CLEAR_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005040 }
5041 }
5042}
5043
5044bool SyncValidator::PreCallValidateCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool,
5045 uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer,
5046 VkDeviceSize dstOffset, VkDeviceSize stride,
5047 VkQueryResultFlags flags) const {
5048 bool skip = false;
5049 const auto *cb_access_context = GetAccessContext(commandBuffer);
5050 assert(cb_access_context);
5051 if (!cb_access_context) return skip;
5052
5053 const auto *context = cb_access_context->GetCurrentAccessContext();
5054 assert(context);
5055 if (!context) return skip;
5056
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005057 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
locke-lunarge1a67022020-04-29 00:15:36 -06005058
5059 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005060 const ResourceAccessRange range = MakeRange(dstOffset, stride * queryCount);
Jeremy Gebben40a22942020-12-22 14:22:06 -07005061 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
locke-lunarge1a67022020-04-29 00:15:36 -06005062 if (hazard.hazard) {
John Zulauf59e25072020-07-17 10:55:21 -06005063 skip |=
5064 LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
5065 "vkCmdCopyQueryPoolResults: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07005066 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06005067 }
5068 }
locke-lunargff255f92020-05-13 18:53:52 -06005069
5070 // TODO:Track VkQueryPool
locke-lunarge1a67022020-04-29 00:15:36 -06005071 return skip;
5072}
5073
5074void SyncValidator::PreCallRecordCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
5075 uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
5076 VkDeviceSize stride, VkQueryResultFlags flags) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005077 StateTracker::PreCallRecordCmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset,
5078 stride, flags);
locke-lunarge1a67022020-04-29 00:15:36 -06005079 auto *cb_access_context = GetAccessContext(commandBuffer);
5080 assert(cb_access_context);
locke-lunargff255f92020-05-13 18:53:52 -06005081 const auto tag = cb_access_context->NextCommandTag(CMD_COPYQUERYPOOLRESULTS);
locke-lunarge1a67022020-04-29 00:15:36 -06005082 auto *context = cb_access_context->GetCurrentAccessContext();
5083 assert(context);
5084
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005085 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
locke-lunarge1a67022020-04-29 00:15:36 -06005086
5087 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005088 const ResourceAccessRange range = MakeRange(dstOffset, stride * queryCount);
Jeremy Gebben40a22942020-12-22 14:22:06 -07005089 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005090 }
locke-lunargff255f92020-05-13 18:53:52 -06005091
5092 // TODO:Track VkQueryPool
locke-lunarge1a67022020-04-29 00:15:36 -06005093}
5094
5095bool SyncValidator::PreCallValidateCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
5096 VkDeviceSize size, uint32_t data) const {
5097 bool skip = false;
5098 const auto *cb_access_context = GetAccessContext(commandBuffer);
5099 assert(cb_access_context);
5100 if (!cb_access_context) return skip;
5101
5102 const auto *context = cb_access_context->GetCurrentAccessContext();
5103 assert(context);
5104 if (!context) return skip;
5105
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005106 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
locke-lunarge1a67022020-04-29 00:15:36 -06005107
5108 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005109 const ResourceAccessRange range = MakeRange(*dst_buffer, dstOffset, size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07005110 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
locke-lunarge1a67022020-04-29 00:15:36 -06005111 if (hazard.hazard) {
John Zulauf1dae9192020-06-16 15:46:44 -06005112 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06005113 "vkCmdFillBuffer: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07005114 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06005115 }
5116 }
5117 return skip;
5118}
5119
5120void SyncValidator::PreCallRecordCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
5121 VkDeviceSize size, uint32_t data) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005122 StateTracker::PreCallRecordCmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
locke-lunarge1a67022020-04-29 00:15:36 -06005123 auto *cb_access_context = GetAccessContext(commandBuffer);
5124 assert(cb_access_context);
5125 const auto tag = cb_access_context->NextCommandTag(CMD_FILLBUFFER);
5126 auto *context = cb_access_context->GetCurrentAccessContext();
5127 assert(context);
5128
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005129 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
locke-lunarge1a67022020-04-29 00:15:36 -06005130
5131 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005132 const ResourceAccessRange range = MakeRange(*dst_buffer, dstOffset, size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07005133 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005134 }
5135}
5136
5137bool SyncValidator::PreCallValidateCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
5138 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
5139 const VkImageResolve *pRegions) const {
5140 bool skip = false;
5141 const auto *cb_access_context = GetAccessContext(commandBuffer);
5142 assert(cb_access_context);
5143 if (!cb_access_context) return skip;
5144
5145 const auto *context = cb_access_context->GetCurrentAccessContext();
5146 assert(context);
5147 if (!context) return skip;
5148
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005149 auto src_image = Get<IMAGE_STATE>(srcImage);
5150 auto dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarge1a67022020-04-29 00:15:36 -06005151
5152 for (uint32_t region = 0; region < regionCount; region++) {
5153 const auto &resolve_region = pRegions[region];
5154 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07005155 auto hazard = context->DetectHazard(*src_image, SYNC_RESOLVE_TRANSFER_READ, resolve_region.srcSubresource,
locke-lunarge1a67022020-04-29 00:15:36 -06005156 resolve_region.srcOffset, resolve_region.extent);
5157 if (hazard.hazard) {
5158 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06005159 "vkCmdResolveImage: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06005160 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07005161 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06005162 }
5163 }
5164
5165 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07005166 auto hazard = context->DetectHazard(*dst_image, SYNC_RESOLVE_TRANSFER_WRITE, resolve_region.dstSubresource,
locke-lunarge1a67022020-04-29 00:15:36 -06005167 resolve_region.dstOffset, resolve_region.extent);
5168 if (hazard.hazard) {
5169 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06005170 "vkCmdResolveImage: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06005171 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07005172 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06005173 }
5174 if (skip) break;
5175 }
5176 }
5177
5178 return skip;
5179}
5180
5181void SyncValidator::PreCallRecordCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
5182 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
5183 const VkImageResolve *pRegions) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005184 StateTracker::PreCallRecordCmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
5185 pRegions);
locke-lunarge1a67022020-04-29 00:15:36 -06005186 auto *cb_access_context = GetAccessContext(commandBuffer);
5187 assert(cb_access_context);
5188 const auto tag = cb_access_context->NextCommandTag(CMD_RESOLVEIMAGE);
5189 auto *context = cb_access_context->GetCurrentAccessContext();
5190 assert(context);
5191
Jeremy Gebben9f537102021-10-05 16:37:12 -06005192 auto src_image = Get<IMAGE_STATE>(srcImage);
5193 auto dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarge1a67022020-04-29 00:15:36 -06005194
5195 for (uint32_t region = 0; region < regionCount; region++) {
5196 const auto &resolve_region = pRegions[region];
5197 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07005198 context->UpdateAccessState(*src_image, SYNC_RESOLVE_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07005199 resolve_region.srcSubresource, resolve_region.srcOffset, resolve_region.extent, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005200 }
5201 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07005202 context->UpdateAccessState(*dst_image, SYNC_RESOLVE_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07005203 resolve_region.dstSubresource, resolve_region.dstOffset, resolve_region.extent, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005204 }
5205 }
5206}
5207
Tony-LunarG562fc102021-11-12 13:58:35 -07005208bool SyncValidator::ValidateCmdResolveImage2(VkCommandBuffer commandBuffer, const VkResolveImageInfo2KHR *pResolveImageInfo,
5209 CMD_TYPE cmd_type) const {
Jeff Leger178b1e52020-10-05 12:22:23 -04005210 bool skip = false;
5211 const auto *cb_access_context = GetAccessContext(commandBuffer);
5212 assert(cb_access_context);
5213 if (!cb_access_context) return skip;
5214
5215 const auto *context = cb_access_context->GetCurrentAccessContext();
5216 assert(context);
5217 if (!context) return skip;
5218
Tony-LunarG562fc102021-11-12 13:58:35 -07005219 const char *func_name = CommandTypeString(cmd_type);
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005220 auto src_image = Get<IMAGE_STATE>(pResolveImageInfo->srcImage);
5221 auto dst_image = Get<IMAGE_STATE>(pResolveImageInfo->dstImage);
Jeff Leger178b1e52020-10-05 12:22:23 -04005222
5223 for (uint32_t region = 0; region < pResolveImageInfo->regionCount; region++) {
5224 const auto &resolve_region = pResolveImageInfo->pRegions[region];
5225 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07005226 auto hazard = context->DetectHazard(*src_image, SYNC_RESOLVE_TRANSFER_READ, resolve_region.srcSubresource,
Jeff Leger178b1e52020-10-05 12:22:23 -04005227 resolve_region.srcOffset, resolve_region.extent);
5228 if (hazard.hazard) {
5229 skip |= LogError(pResolveImageInfo->srcImage, string_SyncHazardVUID(hazard.hazard),
Tony-LunarG562fc102021-11-12 13:58:35 -07005230 "%s: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.", func_name,
Jeff Leger178b1e52020-10-05 12:22:23 -04005231 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pResolveImageInfo->srcImage).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07005232 region, cb_access_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04005233 }
5234 }
5235
5236 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07005237 auto hazard = context->DetectHazard(*dst_image, SYNC_RESOLVE_TRANSFER_WRITE, resolve_region.dstSubresource,
Jeff Leger178b1e52020-10-05 12:22:23 -04005238 resolve_region.dstOffset, resolve_region.extent);
5239 if (hazard.hazard) {
5240 skip |= LogError(pResolveImageInfo->dstImage, string_SyncHazardVUID(hazard.hazard),
Tony-LunarG562fc102021-11-12 13:58:35 -07005241 "%s: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.", func_name,
Jeff Leger178b1e52020-10-05 12:22:23 -04005242 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pResolveImageInfo->dstImage).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07005243 region, cb_access_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04005244 }
5245 if (skip) break;
5246 }
5247 }
5248
5249 return skip;
5250}
5251
Tony-LunarG562fc102021-11-12 13:58:35 -07005252bool SyncValidator::PreCallValidateCmdResolveImage2KHR(VkCommandBuffer commandBuffer,
5253 const VkResolveImageInfo2KHR *pResolveImageInfo) const {
5254 return ValidateCmdResolveImage2(commandBuffer, pResolveImageInfo, CMD_RESOLVEIMAGE2KHR);
5255}
5256
5257bool SyncValidator::PreCallValidateCmdResolveImage2(VkCommandBuffer commandBuffer,
5258 const VkResolveImageInfo2 *pResolveImageInfo) const {
5259 return ValidateCmdResolveImage2(commandBuffer, pResolveImageInfo, CMD_RESOLVEIMAGE2);
5260}
5261
5262void SyncValidator::RecordCmdResolveImage2(VkCommandBuffer commandBuffer, const VkResolveImageInfo2KHR *pResolveImageInfo,
5263 CMD_TYPE cmd_type) {
Jeff Leger178b1e52020-10-05 12:22:23 -04005264 StateTracker::PreCallRecordCmdResolveImage2KHR(commandBuffer, pResolveImageInfo);
5265 auto *cb_access_context = GetAccessContext(commandBuffer);
5266 assert(cb_access_context);
Tony-LunarG562fc102021-11-12 13:58:35 -07005267 const auto tag = cb_access_context->NextCommandTag(cmd_type);
Jeff Leger178b1e52020-10-05 12:22:23 -04005268 auto *context = cb_access_context->GetCurrentAccessContext();
5269 assert(context);
5270
Jeremy Gebben9f537102021-10-05 16:37:12 -06005271 auto src_image = Get<IMAGE_STATE>(pResolveImageInfo->srcImage);
5272 auto dst_image = Get<IMAGE_STATE>(pResolveImageInfo->dstImage);
Jeff Leger178b1e52020-10-05 12:22:23 -04005273
5274 for (uint32_t region = 0; region < pResolveImageInfo->regionCount; region++) {
5275 const auto &resolve_region = pResolveImageInfo->pRegions[region];
5276 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07005277 context->UpdateAccessState(*src_image, SYNC_RESOLVE_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07005278 resolve_region.srcSubresource, resolve_region.srcOffset, resolve_region.extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04005279 }
5280 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07005281 context->UpdateAccessState(*dst_image, SYNC_RESOLVE_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07005282 resolve_region.dstSubresource, resolve_region.dstOffset, resolve_region.extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04005283 }
5284 }
5285}
5286
Tony-LunarG562fc102021-11-12 13:58:35 -07005287void SyncValidator::PreCallRecordCmdResolveImage2KHR(VkCommandBuffer commandBuffer,
5288 const VkResolveImageInfo2KHR *pResolveImageInfo) {
5289 RecordCmdResolveImage2(commandBuffer, pResolveImageInfo, CMD_RESOLVEIMAGE2KHR);
5290}
5291
5292void SyncValidator::PreCallRecordCmdResolveImage2(VkCommandBuffer commandBuffer, const VkResolveImageInfo2 *pResolveImageInfo) {
5293 RecordCmdResolveImage2(commandBuffer, pResolveImageInfo, CMD_RESOLVEIMAGE2);
5294}
5295
locke-lunarge1a67022020-04-29 00:15:36 -06005296bool SyncValidator::PreCallValidateCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
5297 VkDeviceSize dataSize, const void *pData) const {
5298 bool skip = false;
5299 const auto *cb_access_context = GetAccessContext(commandBuffer);
5300 assert(cb_access_context);
5301 if (!cb_access_context) return skip;
5302
5303 const auto *context = cb_access_context->GetCurrentAccessContext();
5304 assert(context);
5305 if (!context) return skip;
5306
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005307 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
locke-lunarge1a67022020-04-29 00:15:36 -06005308
5309 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005310 // VK_WHOLE_SIZE not allowed
5311 const ResourceAccessRange range = MakeRange(dstOffset, dataSize);
Jeremy Gebben40a22942020-12-22 14:22:06 -07005312 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
locke-lunarge1a67022020-04-29 00:15:36 -06005313 if (hazard.hazard) {
John Zulauf1dae9192020-06-16 15:46:44 -06005314 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06005315 "vkCmdUpdateBuffer: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07005316 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06005317 }
5318 }
5319 return skip;
5320}
5321
5322void SyncValidator::PreCallRecordCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
5323 VkDeviceSize dataSize, const void *pData) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005324 StateTracker::PreCallRecordCmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
locke-lunarge1a67022020-04-29 00:15:36 -06005325 auto *cb_access_context = GetAccessContext(commandBuffer);
5326 assert(cb_access_context);
5327 const auto tag = cb_access_context->NextCommandTag(CMD_UPDATEBUFFER);
5328 auto *context = cb_access_context->GetCurrentAccessContext();
5329 assert(context);
5330
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005331 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
locke-lunarge1a67022020-04-29 00:15:36 -06005332
5333 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005334 // VK_WHOLE_SIZE not allowed
5335 const ResourceAccessRange range = MakeRange(dstOffset, dataSize);
Jeremy Gebben40a22942020-12-22 14:22:06 -07005336 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005337 }
5338}
locke-lunargff255f92020-05-13 18:53:52 -06005339
5340bool SyncValidator::PreCallValidateCmdWriteBufferMarkerAMD(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
5341 VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) const {
5342 bool skip = false;
5343 const auto *cb_access_context = GetAccessContext(commandBuffer);
5344 assert(cb_access_context);
5345 if (!cb_access_context) return skip;
5346
5347 const auto *context = cb_access_context->GetCurrentAccessContext();
5348 assert(context);
5349 if (!context) return skip;
5350
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005351 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
locke-lunargff255f92020-05-13 18:53:52 -06005352
5353 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005354 const ResourceAccessRange range = MakeRange(dstOffset, 4);
Jeremy Gebben40a22942020-12-22 14:22:06 -07005355 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
locke-lunargff255f92020-05-13 18:53:52 -06005356 if (hazard.hazard) {
John Zulauf59e25072020-07-17 10:55:21 -06005357 skip |=
5358 LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
5359 "vkCmdWriteBufferMarkerAMD: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07005360 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatUsage(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06005361 }
5362 }
5363 return skip;
5364}
5365
5366void SyncValidator::PreCallRecordCmdWriteBufferMarkerAMD(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
5367 VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005368 StateTracker::PreCallRecordCmdWriteBufferMarkerAMD(commandBuffer, pipelineStage, dstBuffer, dstOffset, marker);
locke-lunargff255f92020-05-13 18:53:52 -06005369 auto *cb_access_context = GetAccessContext(commandBuffer);
5370 assert(cb_access_context);
5371 const auto tag = cb_access_context->NextCommandTag(CMD_WRITEBUFFERMARKERAMD);
5372 auto *context = cb_access_context->GetCurrentAccessContext();
5373 assert(context);
5374
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005375 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
locke-lunargff255f92020-05-13 18:53:52 -06005376
5377 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005378 const ResourceAccessRange range = MakeRange(dstOffset, 4);
Jeremy Gebben40a22942020-12-22 14:22:06 -07005379 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunargff255f92020-05-13 18:53:52 -06005380 }
5381}
John Zulauf49beb112020-11-04 16:06:31 -07005382
5383bool SyncValidator::PreCallValidateCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) const {
5384 bool skip = false;
5385 const auto *cb_context = GetAccessContext(commandBuffer);
5386 assert(cb_context);
5387 if (!cb_context) return skip;
5388
John Zulauf36ef9282021-02-02 11:47:24 -07005389 SyncOpSetEvent set_event_op(CMD_SETEVENT, *this, cb_context->GetQueueFlags(), event, stageMask);
John Zulauf6ce24372021-01-30 05:56:25 -07005390 return set_event_op.Validate(*cb_context);
John Zulauf49beb112020-11-04 16:06:31 -07005391}
5392
5393void SyncValidator::PostCallRecordCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
5394 StateTracker::PostCallRecordCmdSetEvent(commandBuffer, event, stageMask);
5395 auto *cb_context = GetAccessContext(commandBuffer);
5396 assert(cb_context);
5397 if (!cb_context) return;
John Zulauf1bf30522021-09-03 15:39:06 -06005398 cb_context->RecordSyncOp<SyncOpSetEvent>(CMD_SETEVENT, *this, cb_context->GetQueueFlags(), event, stageMask);
John Zulauf49beb112020-11-04 16:06:31 -07005399}
5400
John Zulauf4edde622021-02-15 08:54:50 -07005401bool SyncValidator::PreCallValidateCmdSetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event,
5402 const VkDependencyInfoKHR *pDependencyInfo) const {
5403 bool skip = false;
5404 const auto *cb_context = GetAccessContext(commandBuffer);
5405 assert(cb_context);
5406 if (!cb_context || !pDependencyInfo) return skip;
5407
5408 SyncOpSetEvent set_event_op(CMD_SETEVENT2KHR, *this, cb_context->GetQueueFlags(), event, *pDependencyInfo);
5409 return set_event_op.Validate(*cb_context);
5410}
5411
Tony-LunarGc43525f2021-11-15 16:12:38 -07005412bool SyncValidator::PreCallValidateCmdSetEvent2(VkCommandBuffer commandBuffer, VkEvent event,
5413 const VkDependencyInfo *pDependencyInfo) const {
5414 bool skip = false;
5415 const auto *cb_context = GetAccessContext(commandBuffer);
5416 assert(cb_context);
5417 if (!cb_context || !pDependencyInfo) return skip;
5418
5419 SyncOpSetEvent set_event_op(CMD_SETEVENT2, *this, cb_context->GetQueueFlags(), event, *pDependencyInfo);
5420 return set_event_op.Validate(*cb_context);
5421}
5422
John Zulauf4edde622021-02-15 08:54:50 -07005423void SyncValidator::PostCallRecordCmdSetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event,
5424 const VkDependencyInfoKHR *pDependencyInfo) {
5425 StateTracker::PostCallRecordCmdSetEvent2KHR(commandBuffer, event, pDependencyInfo);
5426 auto *cb_context = GetAccessContext(commandBuffer);
5427 assert(cb_context);
5428 if (!cb_context || !pDependencyInfo) return;
5429
John Zulauf1bf30522021-09-03 15:39:06 -06005430 cb_context->RecordSyncOp<SyncOpSetEvent>(CMD_SETEVENT2KHR, *this, cb_context->GetQueueFlags(), event, *pDependencyInfo);
John Zulauf4edde622021-02-15 08:54:50 -07005431}
5432
Tony-LunarGc43525f2021-11-15 16:12:38 -07005433void SyncValidator::PostCallRecordCmdSetEvent2(VkCommandBuffer commandBuffer, VkEvent event,
5434 const VkDependencyInfo *pDependencyInfo) {
5435 StateTracker::PostCallRecordCmdSetEvent2(commandBuffer, event, pDependencyInfo);
5436 auto *cb_context = GetAccessContext(commandBuffer);
5437 assert(cb_context);
5438 if (!cb_context || !pDependencyInfo) return;
5439
5440 cb_context->RecordSyncOp<SyncOpSetEvent>(CMD_SETEVENT2, *this, cb_context->GetQueueFlags(), event, *pDependencyInfo);
5441}
5442
John Zulauf49beb112020-11-04 16:06:31 -07005443bool SyncValidator::PreCallValidateCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event,
5444 VkPipelineStageFlags stageMask) const {
5445 bool skip = false;
5446 const auto *cb_context = GetAccessContext(commandBuffer);
5447 assert(cb_context);
5448 if (!cb_context) return skip;
5449
John Zulauf36ef9282021-02-02 11:47:24 -07005450 SyncOpResetEvent reset_event_op(CMD_RESETEVENT, *this, cb_context->GetQueueFlags(), event, stageMask);
John Zulauf6ce24372021-01-30 05:56:25 -07005451 return reset_event_op.Validate(*cb_context);
John Zulauf49beb112020-11-04 16:06:31 -07005452}
5453
5454void SyncValidator::PostCallRecordCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
5455 StateTracker::PostCallRecordCmdResetEvent(commandBuffer, event, stageMask);
5456 auto *cb_context = GetAccessContext(commandBuffer);
5457 assert(cb_context);
5458 if (!cb_context) return;
5459
John Zulauf1bf30522021-09-03 15:39:06 -06005460 cb_context->RecordSyncOp<SyncOpResetEvent>(CMD_RESETEVENT, *this, cb_context->GetQueueFlags(), event, stageMask);
John Zulauf49beb112020-11-04 16:06:31 -07005461}
5462
John Zulauf4edde622021-02-15 08:54:50 -07005463bool SyncValidator::PreCallValidateCmdResetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event,
5464 VkPipelineStageFlags2KHR stageMask) const {
5465 bool skip = false;
5466 const auto *cb_context = GetAccessContext(commandBuffer);
5467 assert(cb_context);
5468 if (!cb_context) return skip;
5469
5470 SyncOpResetEvent reset_event_op(CMD_RESETEVENT2KHR, *this, cb_context->GetQueueFlags(), event, stageMask);
5471 return reset_event_op.Validate(*cb_context);
5472}
5473
Tony-LunarGa2662db2021-11-16 07:26:24 -07005474bool SyncValidator::PreCallValidateCmdResetEvent2(VkCommandBuffer commandBuffer, VkEvent event,
5475 VkPipelineStageFlags2 stageMask) const {
5476 bool skip = false;
5477 const auto *cb_context = GetAccessContext(commandBuffer);
5478 assert(cb_context);
5479 if (!cb_context) return skip;
5480
5481 SyncOpResetEvent reset_event_op(CMD_RESETEVENT2, *this, cb_context->GetQueueFlags(), event, stageMask);
5482 return reset_event_op.Validate(*cb_context);
5483}
5484
John Zulauf4edde622021-02-15 08:54:50 -07005485void SyncValidator::PostCallRecordCmdResetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event,
5486 VkPipelineStageFlags2KHR stageMask) {
5487 StateTracker::PostCallRecordCmdResetEvent2KHR(commandBuffer, event, stageMask);
5488 auto *cb_context = GetAccessContext(commandBuffer);
5489 assert(cb_context);
5490 if (!cb_context) return;
5491
John Zulauf1bf30522021-09-03 15:39:06 -06005492 cb_context->RecordSyncOp<SyncOpResetEvent>(CMD_RESETEVENT2KHR, *this, cb_context->GetQueueFlags(), event, stageMask);
John Zulauf4edde622021-02-15 08:54:50 -07005493}
5494
Tony-LunarGa2662db2021-11-16 07:26:24 -07005495void SyncValidator::PostCallRecordCmdResetEvent2(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags2 stageMask) {
5496 StateTracker::PostCallRecordCmdResetEvent2(commandBuffer, event, stageMask);
5497 auto *cb_context = GetAccessContext(commandBuffer);
5498 assert(cb_context);
5499 if (!cb_context) return;
5500
5501 cb_context->RecordSyncOp<SyncOpResetEvent>(CMD_RESETEVENT2, *this, cb_context->GetQueueFlags(), event, stageMask);
5502}
5503
John Zulauf49beb112020-11-04 16:06:31 -07005504bool SyncValidator::PreCallValidateCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
5505 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
5506 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
5507 uint32_t bufferMemoryBarrierCount,
5508 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
5509 uint32_t imageMemoryBarrierCount,
5510 const VkImageMemoryBarrier *pImageMemoryBarriers) const {
5511 bool skip = false;
5512 const auto *cb_context = GetAccessContext(commandBuffer);
5513 assert(cb_context);
5514 if (!cb_context) return skip;
5515
John Zulauf36ef9282021-02-02 11:47:24 -07005516 SyncOpWaitEvents wait_events_op(CMD_WAITEVENTS, *this, cb_context->GetQueueFlags(), eventCount, pEvents, srcStageMask,
5517 dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
5518 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
John Zulaufd5115702021-01-18 12:34:33 -07005519 return wait_events_op.Validate(*cb_context);
John Zulauf49beb112020-11-04 16:06:31 -07005520}
5521
5522void SyncValidator::PostCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
5523 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
5524 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
5525 uint32_t bufferMemoryBarrierCount,
5526 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
5527 uint32_t imageMemoryBarrierCount,
5528 const VkImageMemoryBarrier *pImageMemoryBarriers) {
5529 StateTracker::PostCallRecordCmdWaitEvents(commandBuffer, eventCount, pEvents, srcStageMask, dstStageMask, memoryBarrierCount,
5530 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
5531 imageMemoryBarrierCount, pImageMemoryBarriers);
5532
5533 auto *cb_context = GetAccessContext(commandBuffer);
5534 assert(cb_context);
5535 if (!cb_context) return;
5536
John Zulauf1bf30522021-09-03 15:39:06 -06005537 cb_context->RecordSyncOp<SyncOpWaitEvents>(
John Zulauf610e28c2021-08-03 17:46:23 -06005538 CMD_WAITEVENTS, *this, cb_context->GetQueueFlags(), eventCount, pEvents, srcStageMask, dstStageMask, memoryBarrierCount,
John Zulauf1bf30522021-09-03 15:39:06 -06005539 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
John Zulauf4a6105a2020-11-17 15:11:05 -07005540}
5541
John Zulauf4edde622021-02-15 08:54:50 -07005542bool SyncValidator::PreCallValidateCmdWaitEvents2KHR(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
5543 const VkDependencyInfoKHR *pDependencyInfos) const {
5544 bool skip = false;
5545 const auto *cb_context = GetAccessContext(commandBuffer);
5546 assert(cb_context);
5547 if (!cb_context) return skip;
5548
5549 SyncOpWaitEvents wait_events_op(CMD_WAITEVENTS2KHR, *this, cb_context->GetQueueFlags(), eventCount, pEvents, pDependencyInfos);
5550 skip |= wait_events_op.Validate(*cb_context);
5551 return skip;
5552}
5553
5554void SyncValidator::PostCallRecordCmdWaitEvents2KHR(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
5555 const VkDependencyInfoKHR *pDependencyInfos) {
5556 StateTracker::PostCallRecordCmdWaitEvents2KHR(commandBuffer, eventCount, pEvents, pDependencyInfos);
5557
5558 auto *cb_context = GetAccessContext(commandBuffer);
5559 assert(cb_context);
5560 if (!cb_context) return;
5561
John Zulauf1bf30522021-09-03 15:39:06 -06005562 cb_context->RecordSyncOp<SyncOpWaitEvents>(CMD_WAITEVENTS2KHR, *this, cb_context->GetQueueFlags(), eventCount, pEvents,
5563 pDependencyInfos);
John Zulauf4edde622021-02-15 08:54:50 -07005564}
5565
Tony-LunarG1364cf52021-11-17 16:10:11 -07005566bool SyncValidator::PreCallValidateCmdWaitEvents2(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
5567 const VkDependencyInfo *pDependencyInfos) const {
5568 bool skip = false;
5569 const auto *cb_context = GetAccessContext(commandBuffer);
5570 assert(cb_context);
5571 if (!cb_context) return skip;
5572
5573 SyncOpWaitEvents wait_events_op(CMD_WAITEVENTS2, *this, cb_context->GetQueueFlags(), eventCount, pEvents, pDependencyInfos);
5574 skip |= wait_events_op.Validate(*cb_context);
5575 return skip;
5576}
5577
5578void SyncValidator::PostCallRecordCmdWaitEvents2(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
5579 const VkDependencyInfo *pDependencyInfos) {
5580 StateTracker::PostCallRecordCmdWaitEvents2KHR(commandBuffer, eventCount, pEvents, pDependencyInfos);
5581
5582 auto *cb_context = GetAccessContext(commandBuffer);
5583 assert(cb_context);
5584 if (!cb_context) return;
5585
5586 cb_context->RecordSyncOp<SyncOpWaitEvents>(CMD_WAITEVENTS2, *this, cb_context->GetQueueFlags(), eventCount, pEvents,
5587 pDependencyInfos);
5588}
5589
John Zulauf4a6105a2020-11-17 15:11:05 -07005590void SyncEventState::ResetFirstScope() {
5591 for (const auto address_type : kAddressTypes) {
5592 first_scope[static_cast<size_t>(address_type)].clear();
5593 }
Jeremy Gebben9893daf2021-01-04 10:40:50 -07005594 scope = SyncExecScope();
John Zulauf78b1f892021-09-20 15:02:09 -06005595 first_scope_set = false;
5596 first_scope_tag = 0;
John Zulauf4a6105a2020-11-17 15:11:05 -07005597}
5598
5599// Keep the "ignore this event" logic in same place for ValidateWait and RecordWait to use
John Zulauf4edde622021-02-15 08:54:50 -07005600SyncEventState::IgnoreReason SyncEventState::IsIgnoredByWait(CMD_TYPE cmd, VkPipelineStageFlags2KHR srcStageMask) const {
John Zulauf4a6105a2020-11-17 15:11:05 -07005601 IgnoreReason reason = NotIgnored;
5602
Tony-LunarG1364cf52021-11-17 16:10:11 -07005603 if ((CMD_WAITEVENTS2KHR == cmd || CMD_WAITEVENTS2 == cmd) && (CMD_SETEVENT == last_command)) {
John Zulauf4edde622021-02-15 08:54:50 -07005604 reason = SetVsWait2;
5605 } else if ((last_command == CMD_RESETEVENT || last_command == CMD_RESETEVENT2KHR) && !HasBarrier(0U, 0U)) {
5606 reason = (last_command == CMD_RESETEVENT) ? ResetWaitRace : Reset2WaitRace;
John Zulauf4a6105a2020-11-17 15:11:05 -07005607 } else if (unsynchronized_set) {
5608 reason = SetRace;
John Zulauf78b1f892021-09-20 15:02:09 -06005609 } else if (first_scope_set) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07005610 const VkPipelineStageFlags2KHR missing_bits = scope.mask_param & ~srcStageMask;
John Zulauf4a6105a2020-11-17 15:11:05 -07005611 if (missing_bits) reason = MissingStageBits;
5612 }
5613
5614 return reason;
5615}
5616
Jeremy Gebben40a22942020-12-22 14:22:06 -07005617bool SyncEventState::HasBarrier(VkPipelineStageFlags2KHR stageMask, VkPipelineStageFlags2KHR exec_scope_arg) const {
John Zulauf4a6105a2020-11-17 15:11:05 -07005618 bool has_barrier = (last_command == CMD_NONE) || (stageMask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) ||
5619 (barriers & exec_scope_arg) || (barriers & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
5620 return has_barrier;
John Zulauf49beb112020-11-04 16:06:31 -07005621}
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005622
John Zulaufbb890452021-12-14 11:30:18 -07005623void SyncOpBase::SetReplayContext(uint32_t subpass, ReplayContextPtr &&replay) {
5624 subpass_ = subpass;
5625 replay_context_ = std::move(replay);
5626}
5627
5628const ReplayTrackbackBarriersAction *SyncOpBase::GetReplayTrackback() const {
5629 if (replay_context_) {
5630 assert(subpass_ < replay_context_->subpass_contexts.size());
5631 return &replay_context_->subpass_contexts[subpass_];
5632 }
5633 return nullptr;
5634}
5635
John Zulauf36ef9282021-02-02 11:47:24 -07005636SyncOpBarriers::SyncOpBarriers(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags,
5637 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
5638 VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount,
John Zulaufd5115702021-01-18 12:34:33 -07005639 const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount,
5640 const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount,
5641 const VkImageMemoryBarrier *pImageMemoryBarriers)
John Zulauf4edde622021-02-15 08:54:50 -07005642 : SyncOpBase(cmd), barriers_(1) {
5643 auto &barrier_set = barriers_[0];
5644 barrier_set.dependency_flags = dependencyFlags;
5645 barrier_set.src_exec_scope = SyncExecScope::MakeSrc(queue_flags, srcStageMask);
5646 barrier_set.dst_exec_scope = SyncExecScope::MakeDst(queue_flags, dstStageMask);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005647 // Translate the API parameters into structures SyncVal understands directly, and dehandle for safer/faster replay.
John Zulauf4edde622021-02-15 08:54:50 -07005648 barrier_set.MakeMemoryBarriers(barrier_set.src_exec_scope, barrier_set.dst_exec_scope, dependencyFlags, memoryBarrierCount,
5649 pMemoryBarriers);
5650 barrier_set.MakeBufferMemoryBarriers(sync_state, barrier_set.src_exec_scope, barrier_set.dst_exec_scope, dependencyFlags,
5651 bufferMemoryBarrierCount, pBufferMemoryBarriers);
5652 barrier_set.MakeImageMemoryBarriers(sync_state, barrier_set.src_exec_scope, barrier_set.dst_exec_scope, dependencyFlags,
5653 imageMemoryBarrierCount, pImageMemoryBarriers);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005654}
5655
John Zulauf4edde622021-02-15 08:54:50 -07005656SyncOpBarriers::SyncOpBarriers(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags, uint32_t event_count,
5657 const VkDependencyInfoKHR *dep_infos)
5658 : SyncOpBase(cmd), barriers_(event_count) {
5659 for (uint32_t i = 0; i < event_count; i++) {
5660 const auto &dep_info = dep_infos[i];
5661 auto &barrier_set = barriers_[i];
5662 barrier_set.dependency_flags = dep_info.dependencyFlags;
5663 auto stage_masks = sync_utils::GetGlobalStageMasks(dep_info);
5664 barrier_set.src_exec_scope = SyncExecScope::MakeSrc(queue_flags, stage_masks.src);
5665 barrier_set.dst_exec_scope = SyncExecScope::MakeDst(queue_flags, stage_masks.dst);
5666 // Translate the API parameters into structures SyncVal understands directly, and dehandle for safer/faster replay.
5667 barrier_set.MakeMemoryBarriers(queue_flags, dep_info.dependencyFlags, dep_info.memoryBarrierCount,
5668 dep_info.pMemoryBarriers);
5669 barrier_set.MakeBufferMemoryBarriers(sync_state, queue_flags, dep_info.dependencyFlags, dep_info.bufferMemoryBarrierCount,
5670 dep_info.pBufferMemoryBarriers);
5671 barrier_set.MakeImageMemoryBarriers(sync_state, queue_flags, dep_info.dependencyFlags, dep_info.imageMemoryBarrierCount,
5672 dep_info.pImageMemoryBarriers);
5673 }
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005674}
5675
John Zulauf36ef9282021-02-02 11:47:24 -07005676SyncOpPipelineBarrier::SyncOpPipelineBarrier(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags,
John Zulaufd5115702021-01-18 12:34:33 -07005677 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
5678 VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount,
5679 const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount,
5680 const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount,
5681 const VkImageMemoryBarrier *pImageMemoryBarriers)
John Zulauf36ef9282021-02-02 11:47:24 -07005682 : SyncOpBarriers(cmd, sync_state, queue_flags, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers,
John Zulaufd5115702021-01-18 12:34:33 -07005683 bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers) {}
5684
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005685SyncOpPipelineBarrier::SyncOpPipelineBarrier(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags,
5686 const VkDependencyInfoKHR &dep_info)
John Zulauf4edde622021-02-15 08:54:50 -07005687 : SyncOpBarriers(cmd, sync_state, queue_flags, 1, &dep_info) {}
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005688
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005689bool SyncOpPipelineBarrier::Validate(const CommandBufferAccessContext &cb_context) const {
5690 bool skip = false;
5691 const auto *context = cb_context.GetCurrentAccessContext();
5692 assert(context);
5693 if (!context) return skip;
John Zulauf6fdf3d02021-03-05 16:50:47 -07005694 assert(barriers_.size() == 1); // PipelineBarriers only support a single barrier set.
5695
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005696 // Validate Image Layout transitions
John Zulauf6fdf3d02021-03-05 16:50:47 -07005697 const auto &barrier_set = barriers_[0];
5698 for (const auto &image_barrier : barrier_set.image_memory_barriers) {
5699 if (image_barrier.new_layout == image_barrier.old_layout) continue; // Only interested in layout transitions at this point.
5700 const auto *image_state = image_barrier.image.get();
5701 if (!image_state) continue;
5702 const auto hazard = context->DetectImageBarrierHazard(image_barrier);
5703 if (hazard.hazard) {
5704 // PHASE1 TODO -- add tag information to log msg when useful.
5705 const auto &sync_state = cb_context.GetSyncState();
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06005706 const auto image_handle = image_state->image();
John Zulauf6fdf3d02021-03-05 16:50:47 -07005707 skip |= sync_state.LogError(image_handle, string_SyncHazardVUID(hazard.hazard),
5708 "%s: Hazard %s for image barrier %" PRIu32 " %s. Access info %s.", CmdName(),
5709 string_SyncHazard(hazard.hazard), image_barrier.index,
5710 sync_state.report_data->FormatHandle(image_handle).c_str(),
5711 cb_context.FormatUsage(hazard).c_str());
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005712 }
5713 }
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005714 return skip;
5715}
5716
John Zulaufd5115702021-01-18 12:34:33 -07005717struct SyncOpPipelineBarrierFunctorFactory {
5718 using BarrierOpFunctor = PipelineBarrierOp;
5719 using ApplyFunctor = ApplyBarrierFunctor<BarrierOpFunctor>;
5720 using GlobalBarrierOpFunctor = PipelineBarrierOp;
5721 using GlobalApplyFunctor = ApplyBarrierOpsFunctor<GlobalBarrierOpFunctor>;
5722 using BufferRange = ResourceAccessRange;
5723 using ImageRange = subresource_adapter::ImageRangeGenerator;
5724 using GlobalRange = ResourceAccessRange;
5725
5726 ApplyFunctor MakeApplyFunctor(const SyncBarrier &barrier, bool layout_transition) const {
5727 return ApplyFunctor(BarrierOpFunctor(barrier, layout_transition));
5728 }
John Zulauf14940722021-04-12 15:19:02 -06005729 GlobalApplyFunctor MakeGlobalApplyFunctor(size_t size_hint, ResourceUsageTag tag) const {
John Zulaufd5115702021-01-18 12:34:33 -07005730 return GlobalApplyFunctor(true /* resolve */, size_hint, tag);
5731 }
5732 GlobalBarrierOpFunctor MakeGlobalBarrierOpFunctor(const SyncBarrier &barrier) const {
5733 return GlobalBarrierOpFunctor(barrier, false);
5734 }
5735
5736 BufferRange MakeRangeGen(const BUFFER_STATE &buffer, const ResourceAccessRange &range) const {
5737 if (!SimpleBinding(buffer)) return ResourceAccessRange();
5738 const auto base_address = ResourceBaseAddress(buffer);
5739 return (range + base_address);
5740 }
John Zulauf110413c2021-03-20 05:38:38 -06005741 ImageRange MakeRangeGen(const IMAGE_STATE &image, const VkImageSubresourceRange &subresource_range) const {
John Zulauf264cce02021-02-05 14:40:47 -07005742 if (!SimpleBinding(image)) return subresource_adapter::ImageRangeGenerator();
John Zulaufd5115702021-01-18 12:34:33 -07005743
5744 const auto base_address = ResourceBaseAddress(image);
John Zulauf110413c2021-03-20 05:38:38 -06005745 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, base_address);
John Zulaufd5115702021-01-18 12:34:33 -07005746 return range_gen;
5747 }
5748 GlobalRange MakeGlobalRangeGen(AccessAddressType) const { return kFullRange; }
5749};
5750
5751template <typename Barriers, typename FunctorFactory>
John Zulauf14940722021-04-12 15:19:02 -06005752void SyncOpBarriers::ApplyBarriers(const Barriers &barriers, const FunctorFactory &factory, const ResourceUsageTag tag,
John Zulaufd5115702021-01-18 12:34:33 -07005753 AccessContext *context) {
5754 for (const auto &barrier : barriers) {
5755 const auto *state = barrier.GetState();
5756 if (state) {
5757 auto *const accesses = &context->GetAccessStateMap(GetAccessAddressType(*state));
5758 auto update_action = factory.MakeApplyFunctor(barrier.barrier, barrier.IsLayoutTransition());
5759 auto range_gen = factory.MakeRangeGen(*state, barrier.Range());
5760 UpdateMemoryAccessState(accesses, update_action, &range_gen);
5761 }
5762 }
5763}
5764
5765template <typename Barriers, typename FunctorFactory>
John Zulauf14940722021-04-12 15:19:02 -06005766void SyncOpBarriers::ApplyGlobalBarriers(const Barriers &barriers, const FunctorFactory &factory, const ResourceUsageTag tag,
John Zulaufd5115702021-01-18 12:34:33 -07005767 AccessContext *access_context) {
5768 auto barriers_functor = factory.MakeGlobalApplyFunctor(barriers.size(), tag);
5769 for (const auto &barrier : barriers) {
5770 barriers_functor.EmplaceBack(factory.MakeGlobalBarrierOpFunctor(barrier));
5771 }
5772 for (const auto address_type : kAddressTypes) {
5773 auto range_gen = factory.MakeGlobalRangeGen(address_type);
5774 UpdateMemoryAccessState(&(access_context->GetAccessStateMap(address_type)), barriers_functor, &range_gen);
5775 }
5776}
5777
John Zulauf8eda1562021-04-13 17:06:41 -06005778ResourceUsageTag SyncOpPipelineBarrier::Record(CommandBufferAccessContext *cb_context) const {
John Zulaufd5115702021-01-18 12:34:33 -07005779 auto *access_context = cb_context->GetCurrentAccessContext();
John Zulauf8eda1562021-04-13 17:06:41 -06005780 auto *events_context = cb_context->GetCurrentEventsContext();
John Zulauf36ef9282021-02-02 11:47:24 -07005781 const auto tag = cb_context->NextCommandTag(cmd_);
John Zulaufbb890452021-12-14 11:30:18 -07005782 ReplayRecord(tag, access_context, events_context);
John Zulauf4fa68462021-04-26 21:04:22 -06005783 return tag;
5784}
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005785
John Zulaufbb890452021-12-14 11:30:18 -07005786void SyncOpPipelineBarrier::ReplayRecord(const ResourceUsageTag tag, AccessContext *access_context,
5787 SyncEventsContext *events_context) const {
John Zulauf8eda1562021-04-13 17:06:41 -06005788 SyncOpPipelineBarrierFunctorFactory factory;
John Zulauf4edde622021-02-15 08:54:50 -07005789 // Pipeline barriers only have a single barrier set, unlike WaitEvents2
5790 assert(barriers_.size() == 1);
5791 const auto &barrier_set = barriers_[0];
5792 ApplyBarriers(barrier_set.buffer_memory_barriers, factory, tag, access_context);
5793 ApplyBarriers(barrier_set.image_memory_barriers, factory, tag, access_context);
5794 ApplyGlobalBarriers(barrier_set.memory_barriers, factory, tag, access_context);
John Zulauf4edde622021-02-15 08:54:50 -07005795 if (barrier_set.single_exec_scope) {
John Zulauf8eda1562021-04-13 17:06:41 -06005796 events_context->ApplyBarrier(barrier_set.src_exec_scope, barrier_set.dst_exec_scope);
John Zulauf4edde622021-02-15 08:54:50 -07005797 } else {
5798 for (const auto &barrier : barrier_set.memory_barriers) {
John Zulauf8eda1562021-04-13 17:06:41 -06005799 events_context->ApplyBarrier(barrier.src_exec_scope, barrier.dst_exec_scope);
John Zulauf4edde622021-02-15 08:54:50 -07005800 }
5801 }
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005802}
5803
John Zulauf8eda1562021-04-13 17:06:41 -06005804bool SyncOpPipelineBarrier::ReplayValidate(ResourceUsageTag recorded_tag, const CommandBufferAccessContext &recorded_context,
John Zulaufbb890452021-12-14 11:30:18 -07005805 ResourceUsageTag base_tag, CommandExecutionContext *exec_context) const {
John Zulauf4fa68462021-04-26 21:04:22 -06005806 // No Validation for replay, as the layout transition accesses are checked directly, and the src*Mask ordering is captured
5807 // with first access information.
John Zulauf8eda1562021-04-13 17:06:41 -06005808 return false;
5809}
5810
John Zulauf4edde622021-02-15 08:54:50 -07005811void SyncOpBarriers::BarrierSet::MakeMemoryBarriers(const SyncExecScope &src, const SyncExecScope &dst,
5812 VkDependencyFlags dependency_flags, uint32_t memory_barrier_count,
5813 const VkMemoryBarrier *barriers) {
5814 memory_barriers.reserve(std::max<uint32_t>(1, memory_barrier_count));
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005815 for (uint32_t barrier_index = 0; barrier_index < memory_barrier_count; barrier_index++) {
John Zulauf4edde622021-02-15 08:54:50 -07005816 const auto &barrier = barriers[barrier_index];
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005817 SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07005818 memory_barriers.emplace_back(sync_barrier);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005819 }
5820 if (0 == memory_barrier_count) {
5821 // If there are no global memory barriers, force an exec barrier
John Zulauf4edde622021-02-15 08:54:50 -07005822 memory_barriers.emplace_back(SyncBarrier(src, dst));
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005823 }
John Zulauf4edde622021-02-15 08:54:50 -07005824 single_exec_scope = true;
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005825}
5826
John Zulauf4edde622021-02-15 08:54:50 -07005827void SyncOpBarriers::BarrierSet::MakeBufferMemoryBarriers(const SyncValidator &sync_state, const SyncExecScope &src,
5828 const SyncExecScope &dst, VkDependencyFlags dependencyFlags,
5829 uint32_t barrier_count, const VkBufferMemoryBarrier *barriers) {
5830 buffer_memory_barriers.reserve(barrier_count);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005831 for (uint32_t index = 0; index < barrier_count; index++) {
5832 const auto &barrier = barriers[index];
Jeremy Gebben9f537102021-10-05 16:37:12 -06005833 auto buffer = sync_state.Get<BUFFER_STATE>(barrier.buffer);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005834 if (buffer) {
5835 const auto barrier_size = GetBufferWholeSize(*buffer, barrier.offset, barrier.size);
5836 const auto range = MakeRange(barrier.offset, barrier_size);
5837 const SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07005838 buffer_memory_barriers.emplace_back(buffer, sync_barrier, range);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005839 } else {
John Zulauf4edde622021-02-15 08:54:50 -07005840 buffer_memory_barriers.emplace_back();
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005841 }
5842 }
5843}
5844
John Zulauf4edde622021-02-15 08:54:50 -07005845void SyncOpBarriers::BarrierSet::MakeMemoryBarriers(VkQueueFlags queue_flags, VkDependencyFlags dependency_flags,
Tony-LunarG3f6eceb2021-11-18 14:34:49 -07005846 uint32_t memory_barrier_count, const VkMemoryBarrier2 *barriers) {
John Zulauf4edde622021-02-15 08:54:50 -07005847 memory_barriers.reserve(memory_barrier_count);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005848 for (uint32_t barrier_index = 0; barrier_index < memory_barrier_count; barrier_index++) {
John Zulauf4edde622021-02-15 08:54:50 -07005849 const auto &barrier = barriers[barrier_index];
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005850 auto src = SyncExecScope::MakeSrc(queue_flags, barrier.srcStageMask);
5851 auto dst = SyncExecScope::MakeDst(queue_flags, barrier.dstStageMask);
5852 SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07005853 memory_barriers.emplace_back(sync_barrier);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005854 }
John Zulauf4edde622021-02-15 08:54:50 -07005855 single_exec_scope = false;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005856}
5857
John Zulauf4edde622021-02-15 08:54:50 -07005858void SyncOpBarriers::BarrierSet::MakeBufferMemoryBarriers(const SyncValidator &sync_state, VkQueueFlags queue_flags,
5859 VkDependencyFlags dependencyFlags, uint32_t barrier_count,
Tony-LunarG3f6eceb2021-11-18 14:34:49 -07005860 const VkBufferMemoryBarrier2 *barriers) {
John Zulauf4edde622021-02-15 08:54:50 -07005861 buffer_memory_barriers.reserve(barrier_count);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005862 for (uint32_t index = 0; index < barrier_count; index++) {
5863 const auto &barrier = barriers[index];
5864 auto src = SyncExecScope::MakeSrc(queue_flags, barrier.srcStageMask);
5865 auto dst = SyncExecScope::MakeDst(queue_flags, barrier.dstStageMask);
Jeremy Gebben9f537102021-10-05 16:37:12 -06005866 auto buffer = sync_state.Get<BUFFER_STATE>(barrier.buffer);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005867 if (buffer) {
5868 const auto barrier_size = GetBufferWholeSize(*buffer, barrier.offset, barrier.size);
5869 const auto range = MakeRange(barrier.offset, barrier_size);
5870 const SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07005871 buffer_memory_barriers.emplace_back(buffer, sync_barrier, range);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005872 } else {
John Zulauf4edde622021-02-15 08:54:50 -07005873 buffer_memory_barriers.emplace_back();
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005874 }
5875 }
5876}
5877
John Zulauf4edde622021-02-15 08:54:50 -07005878void SyncOpBarriers::BarrierSet::MakeImageMemoryBarriers(const SyncValidator &sync_state, const SyncExecScope &src,
5879 const SyncExecScope &dst, VkDependencyFlags dependencyFlags,
5880 uint32_t barrier_count, const VkImageMemoryBarrier *barriers) {
5881 image_memory_barriers.reserve(barrier_count);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005882 for (uint32_t index = 0; index < barrier_count; index++) {
5883 const auto &barrier = barriers[index];
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005884 auto image = sync_state.Get<IMAGE_STATE>(barrier.image);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005885 if (image) {
5886 auto subresource_range = NormalizeSubresourceRange(image->createInfo, barrier.subresourceRange);
5887 const SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07005888 image_memory_barriers.emplace_back(image, index, sync_barrier, barrier.oldLayout, barrier.newLayout, subresource_range);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005889 } else {
John Zulauf4edde622021-02-15 08:54:50 -07005890 image_memory_barriers.emplace_back();
5891 image_memory_barriers.back().index = index; // Just in case we're interested in the ones we skipped.
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005892 }
5893 }
5894}
John Zulaufd5115702021-01-18 12:34:33 -07005895
John Zulauf4edde622021-02-15 08:54:50 -07005896void SyncOpBarriers::BarrierSet::MakeImageMemoryBarriers(const SyncValidator &sync_state, VkQueueFlags queue_flags,
5897 VkDependencyFlags dependencyFlags, uint32_t barrier_count,
Tony-LunarG3f6eceb2021-11-18 14:34:49 -07005898 const VkImageMemoryBarrier2 *barriers) {
John Zulauf4edde622021-02-15 08:54:50 -07005899 image_memory_barriers.reserve(barrier_count);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005900 for (uint32_t index = 0; index < barrier_count; index++) {
5901 const auto &barrier = barriers[index];
5902 auto src = SyncExecScope::MakeSrc(queue_flags, barrier.srcStageMask);
5903 auto dst = SyncExecScope::MakeDst(queue_flags, barrier.dstStageMask);
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005904 auto image = sync_state.Get<IMAGE_STATE>(barrier.image);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005905 if (image) {
5906 auto subresource_range = NormalizeSubresourceRange(image->createInfo, barrier.subresourceRange);
5907 const SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07005908 image_memory_barriers.emplace_back(image, index, sync_barrier, barrier.oldLayout, barrier.newLayout, subresource_range);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005909 } else {
John Zulauf4edde622021-02-15 08:54:50 -07005910 image_memory_barriers.emplace_back();
5911 image_memory_barriers.back().index = index; // Just in case we're interested in the ones we skipped.
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005912 }
5913 }
5914}
5915
John Zulauf36ef9282021-02-02 11:47:24 -07005916SyncOpWaitEvents::SyncOpWaitEvents(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags, uint32_t eventCount,
John Zulaufd5115702021-01-18 12:34:33 -07005917 const VkEvent *pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
5918 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
5919 uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
5920 uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers)
John Zulauf36ef9282021-02-02 11:47:24 -07005921 : SyncOpBarriers(cmd, sync_state, queue_flags, srcStageMask, dstStageMask, VkDependencyFlags(0U), memoryBarrierCount,
John Zulaufd5115702021-01-18 12:34:33 -07005922 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
5923 pImageMemoryBarriers) {
John Zulauf669dfd52021-01-27 17:15:28 -07005924 MakeEventsList(sync_state, eventCount, pEvents);
John Zulaufd5115702021-01-18 12:34:33 -07005925}
5926
John Zulauf4edde622021-02-15 08:54:50 -07005927SyncOpWaitEvents::SyncOpWaitEvents(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags, uint32_t eventCount,
5928 const VkEvent *pEvents, const VkDependencyInfoKHR *pDependencyInfo)
5929 : SyncOpBarriers(cmd, sync_state, queue_flags, eventCount, pDependencyInfo) {
5930 MakeEventsList(sync_state, eventCount, pEvents);
5931 assert(events_.size() == barriers_.size()); // Just so nobody gets clever and decides to cull the event or barrier arrays
5932}
5933
John Zulauf610e28c2021-08-03 17:46:23 -06005934const char *const SyncOpWaitEvents::kIgnored = "Wait operation is ignored for this event.";
5935
John Zulaufd5115702021-01-18 12:34:33 -07005936bool SyncOpWaitEvents::Validate(const CommandBufferAccessContext &cb_context) const {
John Zulaufd5115702021-01-18 12:34:33 -07005937 bool skip = false;
5938 const auto &sync_state = cb_context.GetSyncState();
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06005939 const auto command_buffer_handle = cb_context.GetCBState().commandBuffer();
John Zulaufd5115702021-01-18 12:34:33 -07005940
John Zulauf610e28c2021-08-03 17:46:23 -06005941 // This is only interesting at record and not replay (Execute/Submit) time.
John Zulauf4edde622021-02-15 08:54:50 -07005942 for (size_t barrier_set_index = 0; barrier_set_index < barriers_.size(); barrier_set_index++) {
5943 const auto &barrier_set = barriers_[barrier_set_index];
5944 if (barrier_set.single_exec_scope) {
5945 if (barrier_set.src_exec_scope.mask_param & VK_PIPELINE_STAGE_HOST_BIT) {
5946 const std::string vuid = std::string("SYNC-") + std::string(CmdName()) + std::string("-hostevent-unsupported");
5947 skip = sync_state.LogInfo(command_buffer_handle, vuid,
5948 "%s, srcStageMask includes %s, unsupported by synchronization validation.", CmdName(),
5949 string_VkPipelineStageFlagBits(VK_PIPELINE_STAGE_HOST_BIT));
5950 } else {
5951 const auto &barriers = barrier_set.memory_barriers;
5952 for (size_t barrier_index = 0; barrier_index < barriers.size(); barrier_index++) {
5953 const auto &barrier = barriers[barrier_index];
5954 if (barrier.src_exec_scope.mask_param & VK_PIPELINE_STAGE_HOST_BIT) {
5955 const std::string vuid =
5956 std::string("SYNC-") + std::string(CmdName()) + std::string("-hostevent-unsupported");
5957 skip =
5958 sync_state.LogInfo(command_buffer_handle, vuid,
5959 "%s, srcStageMask %s of %s %zu, %s %zu, unsupported by synchronization validation.",
5960 CmdName(), string_VkPipelineStageFlagBits(VK_PIPELINE_STAGE_HOST_BIT),
5961 "pDependencyInfo", barrier_set_index, "pMemoryBarriers", barrier_index);
5962 }
5963 }
5964 }
5965 }
John Zulaufd5115702021-01-18 12:34:33 -07005966 }
5967
John Zulauf610e28c2021-08-03 17:46:23 -06005968 // The rest is common to record time and replay time.
5969 skip |= DoValidate(cb_context, ResourceUsageRecord::kMaxIndex);
5970 return skip;
5971}
5972
John Zulaufbb890452021-12-14 11:30:18 -07005973bool SyncOpWaitEvents::DoValidate(const CommandExecutionContext &exec_context, const ResourceUsageTag base_tag) const {
John Zulauf610e28c2021-08-03 17:46:23 -06005974 bool skip = false;
John Zulaufbb890452021-12-14 11:30:18 -07005975 const auto &sync_state = exec_context.GetSyncState();
John Zulauf610e28c2021-08-03 17:46:23 -06005976
Jeremy Gebben40a22942020-12-22 14:22:06 -07005977 VkPipelineStageFlags2KHR event_stage_masks = 0U;
John Zulauf4edde622021-02-15 08:54:50 -07005978 VkPipelineStageFlags2KHR barrier_mask_params = 0U;
John Zulaufd5115702021-01-18 12:34:33 -07005979 bool events_not_found = false;
John Zulaufbb890452021-12-14 11:30:18 -07005980 const auto *events_context = exec_context.GetCurrentEventsContext();
John Zulauf669dfd52021-01-27 17:15:28 -07005981 assert(events_context);
John Zulauf4edde622021-02-15 08:54:50 -07005982 size_t barrier_set_index = 0;
5983 size_t barrier_set_incr = (barriers_.size() == 1) ? 0 : 1;
John Zulauf78394fc2021-07-12 15:41:40 -06005984 for (const auto &event : events_) {
5985 const auto *sync_event = events_context->Get(event.get());
5986 const auto &barrier_set = barriers_[barrier_set_index];
5987 if (!sync_event) {
5988 // NOTE PHASE2: This is where we'll need queue submit time validation to come back and check the srcStageMask bits
5989 // or solve this with replay creating the SyncEventState in the queue context... also this will be a
5990 // new validation error... wait without previously submitted set event...
5991 events_not_found = true; // Demote "extra_stage_bits" error to warning, to avoid false positives at *record time*
John Zulauf4edde622021-02-15 08:54:50 -07005992 barrier_set_index += barrier_set_incr;
John Zulauf78394fc2021-07-12 15:41:40 -06005993 continue; // Core, Lifetimes, or Param check needs to catch invalid events.
John Zulaufd5115702021-01-18 12:34:33 -07005994 }
John Zulauf610e28c2021-08-03 17:46:23 -06005995
5996 // For replay calls, don't revalidate "same command buffer" events
5997 if (sync_event->last_command_tag > base_tag) continue;
5998
John Zulauf78394fc2021-07-12 15:41:40 -06005999 const auto event_handle = sync_event->event->event();
6000 // TODO add "destroyed" checks
6001
John Zulauf78b1f892021-09-20 15:02:09 -06006002 if (sync_event->first_scope_set) {
6003 // Only accumulate barrier and event stages if there is a pending set in the current context
6004 barrier_mask_params |= barrier_set.src_exec_scope.mask_param;
6005 event_stage_masks |= sync_event->scope.mask_param;
6006 }
6007
John Zulauf78394fc2021-07-12 15:41:40 -06006008 const auto &src_exec_scope = barrier_set.src_exec_scope;
John Zulauf78b1f892021-09-20 15:02:09 -06006009
John Zulauf78394fc2021-07-12 15:41:40 -06006010 const auto ignore_reason = sync_event->IsIgnoredByWait(cmd_, src_exec_scope.mask_param);
6011 if (ignore_reason) {
6012 switch (ignore_reason) {
6013 case SyncEventState::ResetWaitRace:
6014 case SyncEventState::Reset2WaitRace: {
6015 // Four permuations of Reset and Wait calls...
6016 const char *vuid =
6017 (cmd_ == CMD_WAITEVENTS) ? "VUID-vkCmdResetEvent-event-03834" : "VUID-vkCmdResetEvent-event-03835";
6018 if (ignore_reason == SyncEventState::Reset2WaitRace) {
Tony-LunarG279601c2021-11-16 10:50:51 -07006019 vuid = (cmd_ == CMD_WAITEVENTS) ? "VUID-vkCmdResetEvent2-event-03831"
6020 : "VUID-vkCmdResetEvent2-event-03832";
John Zulauf78394fc2021-07-12 15:41:40 -06006021 }
6022 const char *const message =
6023 "%s: %s %s operation following %s without intervening execution barrier, may cause race condition. %s";
6024 skip |= sync_state.LogError(event_handle, vuid, message, CmdName(),
6025 sync_state.report_data->FormatHandle(event_handle).c_str(), CmdName(),
John Zulauf610e28c2021-08-03 17:46:23 -06006026 CommandTypeString(sync_event->last_command), kIgnored);
John Zulauf78394fc2021-07-12 15:41:40 -06006027 break;
6028 }
6029 case SyncEventState::SetRace: {
6030 // Issue error message that Wait is waiting on an signal subject to race condition, and is thus ignored for
6031 // this event
6032 const char *const vuid = "SYNC-vkCmdWaitEvents-unsynchronized-setops";
6033 const char *const message =
6034 "%s: %s Unsychronized %s calls result in race conditions w.r.t. event signalling, %s %s";
6035 const char *const reason = "First synchronization scope is undefined.";
6036 skip |= sync_state.LogError(event_handle, vuid, message, CmdName(),
6037 sync_state.report_data->FormatHandle(event_handle).c_str(),
John Zulauf610e28c2021-08-03 17:46:23 -06006038 CommandTypeString(sync_event->last_command), reason, kIgnored);
John Zulauf78394fc2021-07-12 15:41:40 -06006039 break;
6040 }
6041 case SyncEventState::MissingStageBits: {
6042 const auto missing_bits = sync_event->scope.mask_param & ~src_exec_scope.mask_param;
6043 // Issue error message that event waited for is not in wait events scope
6044 const char *const vuid = "VUID-vkCmdWaitEvents-srcStageMask-01158";
6045 const char *const message = "%s: %s stageMask %" PRIx64 " includes bits not present in srcStageMask 0x%" PRIx64
6046 ". Bits missing from srcStageMask %s. %s";
6047 skip |= sync_state.LogError(event_handle, vuid, message, CmdName(),
6048 sync_state.report_data->FormatHandle(event_handle).c_str(),
6049 sync_event->scope.mask_param, src_exec_scope.mask_param,
John Zulauf610e28c2021-08-03 17:46:23 -06006050 sync_utils::StringPipelineStageFlags(missing_bits).c_str(), kIgnored);
John Zulauf78394fc2021-07-12 15:41:40 -06006051 break;
6052 }
6053 case SyncEventState::SetVsWait2: {
Tony-LunarG279601c2021-11-16 10:50:51 -07006054 skip |= sync_state.LogError(event_handle, "VUID-vkCmdWaitEvents2-pEvents-03837",
John Zulauf78394fc2021-07-12 15:41:40 -06006055 "%s: Follows set of %s by %s. Disallowed.", CmdName(),
6056 sync_state.report_data->FormatHandle(event_handle).c_str(),
6057 CommandTypeString(sync_event->last_command));
6058 break;
6059 }
6060 default:
6061 assert(ignore_reason == SyncEventState::NotIgnored);
6062 }
6063 } else if (barrier_set.image_memory_barriers.size()) {
6064 const auto &image_memory_barriers = barrier_set.image_memory_barriers;
John Zulaufbb890452021-12-14 11:30:18 -07006065 const auto *context = exec_context.GetCurrentAccessContext();
John Zulauf78394fc2021-07-12 15:41:40 -06006066 assert(context);
6067 for (const auto &image_memory_barrier : image_memory_barriers) {
6068 if (image_memory_barrier.old_layout == image_memory_barrier.new_layout) continue;
6069 const auto *image_state = image_memory_barrier.image.get();
6070 if (!image_state) continue;
6071 const auto &subresource_range = image_memory_barrier.range;
6072 const auto &src_access_scope = image_memory_barrier.barrier.src_access_scope;
6073 const auto hazard =
6074 context->DetectImageBarrierHazard(*image_state, sync_event->scope.exec_scope, src_access_scope,
6075 subresource_range, *sync_event, AccessContext::DetectOptions::kDetectAll);
6076 if (hazard.hazard) {
6077 skip |= sync_state.LogError(image_state->image(), string_SyncHazardVUID(hazard.hazard),
6078 "%s: Hazard %s for image barrier %" PRIu32 " %s. Access info %s.", CmdName(),
6079 string_SyncHazard(hazard.hazard), image_memory_barrier.index,
6080 sync_state.report_data->FormatHandle(image_state->image()).c_str(),
John Zulaufbb890452021-12-14 11:30:18 -07006081 exec_context.FormatUsage(hazard).c_str());
John Zulauf78394fc2021-07-12 15:41:40 -06006082 break;
6083 }
6084 }
6085 }
6086 // TODO: Add infrastructure for checking pDependencyInfo's vs. CmdSetEvent2 VUID - vkCmdWaitEvents2KHR - pEvents -
6087 // 03839
6088 barrier_set_index += barrier_set_incr;
6089 }
John Zulaufd5115702021-01-18 12:34:33 -07006090
6091 // Note that we can't check for HOST in pEvents as we don't track that set event type
John Zulauf4edde622021-02-15 08:54:50 -07006092 const auto extra_stage_bits = (barrier_mask_params & ~VK_PIPELINE_STAGE_2_HOST_BIT_KHR) & ~event_stage_masks;
John Zulaufd5115702021-01-18 12:34:33 -07006093 if (extra_stage_bits) {
6094 // Issue error message that event waited for is not in wait events scope
John Zulauf4edde622021-02-15 08:54:50 -07006095 // NOTE: This isn't exactly the right VUID for WaitEvents2, but it's as close as we currently have support for
6096 const char *const vuid =
Tony-LunarG279601c2021-11-16 10:50:51 -07006097 (CMD_WAITEVENTS == cmd_) ? "VUID-vkCmdWaitEvents-srcStageMask-01158" : "VUID-vkCmdWaitEvents2-pEvents-03838";
John Zulaufd5115702021-01-18 12:34:33 -07006098 const char *const message =
Jeremy Gebben40a22942020-12-22 14:22:06 -07006099 "%s: srcStageMask 0x%" PRIx64 " contains stages not present in pEvents stageMask. Extra stages are %s.%s";
John Zulaufbb890452021-12-14 11:30:18 -07006100 const auto handle = exec_context.Handle();
John Zulaufd5115702021-01-18 12:34:33 -07006101 if (events_not_found) {
John Zulaufbb890452021-12-14 11:30:18 -07006102 skip |= sync_state.LogInfo(handle, vuid, message, CmdName(), barrier_mask_params,
Jeremy Gebben40a22942020-12-22 14:22:06 -07006103 sync_utils::StringPipelineStageFlags(extra_stage_bits).c_str(),
John Zulaufd5115702021-01-18 12:34:33 -07006104 " vkCmdSetEvent may be in previously submitted command buffer.");
6105 } else {
John Zulaufbb890452021-12-14 11:30:18 -07006106 skip |= sync_state.LogError(handle, vuid, message, CmdName(), barrier_mask_params,
Jeremy Gebben40a22942020-12-22 14:22:06 -07006107 sync_utils::StringPipelineStageFlags(extra_stage_bits).c_str(), "");
John Zulaufd5115702021-01-18 12:34:33 -07006108 }
6109 }
6110 return skip;
6111}
6112
6113struct SyncOpWaitEventsFunctorFactory {
6114 using BarrierOpFunctor = WaitEventBarrierOp;
6115 using ApplyFunctor = ApplyBarrierFunctor<BarrierOpFunctor>;
6116 using GlobalBarrierOpFunctor = WaitEventBarrierOp;
6117 using GlobalApplyFunctor = ApplyBarrierOpsFunctor<GlobalBarrierOpFunctor>;
6118 using BufferRange = EventSimpleRangeGenerator;
6119 using ImageRange = EventImageRangeGenerator;
6120 using GlobalRange = EventSimpleRangeGenerator;
6121
6122 // Need to restrict to only valid exec and access scope for this event
6123 // Pass by value is intentional to get a copy we can change without modifying the passed barrier
6124 SyncBarrier RestrictToEvent(SyncBarrier barrier) const {
John Zulaufc523bf62021-02-16 08:20:34 -07006125 barrier.src_exec_scope.exec_scope = sync_event->scope.exec_scope & barrier.src_exec_scope.exec_scope;
John Zulaufd5115702021-01-18 12:34:33 -07006126 barrier.src_access_scope = sync_event->scope.valid_accesses & barrier.src_access_scope;
6127 return barrier;
6128 }
6129 ApplyFunctor MakeApplyFunctor(const SyncBarrier &barrier_arg, bool layout_transition) const {
6130 auto barrier = RestrictToEvent(barrier_arg);
6131 return ApplyFunctor(BarrierOpFunctor(sync_event->first_scope_tag, barrier, layout_transition));
6132 }
John Zulauf14940722021-04-12 15:19:02 -06006133 GlobalApplyFunctor MakeGlobalApplyFunctor(size_t size_hint, ResourceUsageTag tag) const {
John Zulaufd5115702021-01-18 12:34:33 -07006134 return GlobalApplyFunctor(false /* don't resolve */, size_hint, tag);
6135 }
6136 GlobalBarrierOpFunctor MakeGlobalBarrierOpFunctor(const SyncBarrier &barrier_arg) const {
6137 auto barrier = RestrictToEvent(barrier_arg);
6138 return GlobalBarrierOpFunctor(sync_event->first_scope_tag, barrier, false);
6139 }
6140
6141 BufferRange MakeRangeGen(const BUFFER_STATE &buffer, const ResourceAccessRange &range_arg) const {
6142 const AccessAddressType address_type = GetAccessAddressType(buffer);
6143 const auto base_address = ResourceBaseAddress(buffer);
6144 ResourceAccessRange range = SimpleBinding(buffer) ? (range_arg + base_address) : ResourceAccessRange();
6145 EventSimpleRangeGenerator filtered_range_gen(sync_event->FirstScope(address_type), range);
6146 return filtered_range_gen;
6147 }
John Zulauf110413c2021-03-20 05:38:38 -06006148 ImageRange MakeRangeGen(const IMAGE_STATE &image, const VkImageSubresourceRange &subresource_range) const {
John Zulaufd5115702021-01-18 12:34:33 -07006149 if (!SimpleBinding(image)) return ImageRange();
6150 const auto address_type = GetAccessAddressType(image);
6151 const auto base_address = ResourceBaseAddress(image);
John Zulauf110413c2021-03-20 05:38:38 -06006152 subresource_adapter::ImageRangeGenerator image_range_gen(*image.fragment_encoder.get(), subresource_range, base_address);
John Zulaufd5115702021-01-18 12:34:33 -07006153 EventImageRangeGenerator filtered_range_gen(sync_event->FirstScope(address_type), image_range_gen);
6154
6155 return filtered_range_gen;
6156 }
6157 GlobalRange MakeGlobalRangeGen(AccessAddressType address_type) const {
6158 return EventSimpleRangeGenerator(sync_event->FirstScope(address_type), kFullRange);
6159 }
6160 SyncOpWaitEventsFunctorFactory(SyncEventState *sync_event_) : sync_event(sync_event_) { assert(sync_event); }
6161 SyncEventState *sync_event;
6162};
6163
John Zulauf8eda1562021-04-13 17:06:41 -06006164ResourceUsageTag SyncOpWaitEvents::Record(CommandBufferAccessContext *cb_context) const {
John Zulauf36ef9282021-02-02 11:47:24 -07006165 const auto tag = cb_context->NextCommandTag(cmd_);
John Zulaufd5115702021-01-18 12:34:33 -07006166 auto *access_context = cb_context->GetCurrentAccessContext();
6167 assert(access_context);
John Zulauf8eda1562021-04-13 17:06:41 -06006168 if (!access_context) return tag;
John Zulauf669dfd52021-01-27 17:15:28 -07006169 auto *events_context = cb_context->GetCurrentEventsContext();
6170 assert(events_context);
John Zulauf8eda1562021-04-13 17:06:41 -06006171 if (!events_context) return tag;
John Zulaufd5115702021-01-18 12:34:33 -07006172
John Zulaufbb890452021-12-14 11:30:18 -07006173 ReplayRecord(tag, access_context, events_context);
John Zulauf610e28c2021-08-03 17:46:23 -06006174 return tag;
6175}
6176
John Zulaufbb890452021-12-14 11:30:18 -07006177void SyncOpWaitEvents::ReplayRecord(ResourceUsageTag tag, AccessContext *access_context, SyncEventsContext *events_context) const {
John Zulaufd5115702021-01-18 12:34:33 -07006178 // Unlike PipelineBarrier, WaitEvent is *not* limited to accesses within the current subpass (if any) and thus needs to import
6179 // all accesses. Can instead import for all first_scopes, or a union of them, if this becomes a performance/memory issue,
6180 // but with no idea of the performance of the union, nor of whether it even matters... take the simplest approach here,
6181 access_context->ResolvePreviousAccesses();
6182
John Zulauf4edde622021-02-15 08:54:50 -07006183 size_t barrier_set_index = 0;
6184 size_t barrier_set_incr = (barriers_.size() == 1) ? 0 : 1;
6185 assert(barriers_.size() == 1 || (barriers_.size() == events_.size()));
John Zulauf669dfd52021-01-27 17:15:28 -07006186 for (auto &event_shared : events_) {
6187 if (!event_shared.get()) continue;
6188 auto *sync_event = events_context->GetFromShared(event_shared);
John Zulaufd5115702021-01-18 12:34:33 -07006189
John Zulauf4edde622021-02-15 08:54:50 -07006190 sync_event->last_command = cmd_;
John Zulauf610e28c2021-08-03 17:46:23 -06006191 sync_event->last_command_tag = tag;
John Zulaufd5115702021-01-18 12:34:33 -07006192
John Zulauf4edde622021-02-15 08:54:50 -07006193 const auto &barrier_set = barriers_[barrier_set_index];
6194 const auto &dst = barrier_set.dst_exec_scope;
6195 if (!sync_event->IsIgnoredByWait(cmd_, barrier_set.src_exec_scope.mask_param)) {
John Zulaufd5115702021-01-18 12:34:33 -07006196 // These apply barriers one at a time as the are restricted to the resource ranges specified per each barrier,
6197 // but do not update the dependency chain information (but set the "pending" state) // s.t. the order independence
6198 // of the barriers is maintained.
6199 SyncOpWaitEventsFunctorFactory factory(sync_event);
John Zulauf4edde622021-02-15 08:54:50 -07006200 ApplyBarriers(barrier_set.buffer_memory_barriers, factory, tag, access_context);
6201 ApplyBarriers(barrier_set.image_memory_barriers, factory, tag, access_context);
6202 ApplyGlobalBarriers(barrier_set.memory_barriers, factory, tag, access_context);
John Zulaufd5115702021-01-18 12:34:33 -07006203
6204 // Apply the global barrier to the event itself (for race condition tracking)
6205 // Events don't happen at a stage, so we need to store the unexpanded ALL_COMMANDS if set for inter-event-calls
6206 sync_event->barriers = dst.mask_param & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
6207 sync_event->barriers |= dst.exec_scope;
6208 } else {
6209 // We ignored this wait, so we don't have any effective synchronization barriers for it.
6210 sync_event->barriers = 0U;
6211 }
John Zulauf4edde622021-02-15 08:54:50 -07006212 barrier_set_index += barrier_set_incr;
John Zulaufd5115702021-01-18 12:34:33 -07006213 }
6214
6215 // Apply the pending barriers
6216 ResolvePendingBarrierFunctor apply_pending_action(tag);
6217 access_context->ApplyToContext(apply_pending_action);
6218}
6219
John Zulauf8eda1562021-04-13 17:06:41 -06006220bool SyncOpWaitEvents::ReplayValidate(ResourceUsageTag recorded_tag, const CommandBufferAccessContext &recorded_context,
John Zulaufbb890452021-12-14 11:30:18 -07006221 ResourceUsageTag base_tag, CommandExecutionContext *exec_context) const {
6222 return DoValidate(*exec_context, base_tag);
John Zulauf8eda1562021-04-13 17:06:41 -06006223}
6224
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006225bool SyncValidator::PreCallValidateCmdWriteBufferMarker2AMD(VkCommandBuffer commandBuffer, VkPipelineStageFlags2KHR pipelineStage,
6226 VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) const {
6227 bool skip = false;
6228 const auto *cb_access_context = GetAccessContext(commandBuffer);
6229 assert(cb_access_context);
6230 if (!cb_access_context) return skip;
6231
6232 const auto *context = cb_access_context->GetCurrentAccessContext();
6233 assert(context);
6234 if (!context) return skip;
6235
Jeremy Gebbenf4449392022-01-28 10:09:10 -07006236 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006237
6238 if (dst_buffer) {
6239 const ResourceAccessRange range = MakeRange(dstOffset, 4);
6240 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
6241 if (hazard.hazard) {
6242 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
6243 "vkCmdWriteBufferMarkerAMD2: Hazard %s for dstBuffer %s. Access info %s.",
6244 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstBuffer).c_str(),
John Zulauf14940722021-04-12 15:19:02 -06006245 cb_access_context->FormatUsage(hazard).c_str());
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006246 }
6247 }
6248 return skip;
6249}
6250
John Zulauf669dfd52021-01-27 17:15:28 -07006251void SyncOpWaitEvents::MakeEventsList(const SyncValidator &sync_state, uint32_t event_count, const VkEvent *events) {
John Zulaufd5115702021-01-18 12:34:33 -07006252 events_.reserve(event_count);
6253 for (uint32_t event_index = 0; event_index < event_count; event_index++) {
Jeremy Gebben9f537102021-10-05 16:37:12 -06006254 events_.emplace_back(sync_state.Get<EVENT_STATE>(events[event_index]));
John Zulaufd5115702021-01-18 12:34:33 -07006255 }
6256}
John Zulauf6ce24372021-01-30 05:56:25 -07006257
John Zulauf36ef9282021-02-02 11:47:24 -07006258SyncOpResetEvent::SyncOpResetEvent(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags, VkEvent event,
John Zulauf4edde622021-02-15 08:54:50 -07006259 VkPipelineStageFlags2KHR stageMask)
Jeremy Gebben9f537102021-10-05 16:37:12 -06006260 : SyncOpBase(cmd), event_(sync_state.Get<EVENT_STATE>(event)), exec_scope_(SyncExecScope::MakeSrc(queue_flags, stageMask)) {}
John Zulauf6ce24372021-01-30 05:56:25 -07006261
John Zulauf1bf30522021-09-03 15:39:06 -06006262bool SyncOpResetEvent::Validate(const CommandBufferAccessContext& cb_context) const {
6263 return DoValidate(cb_context, ResourceUsageRecord::kMaxIndex);
6264}
6265
John Zulaufbb890452021-12-14 11:30:18 -07006266bool SyncOpResetEvent::DoValidate(const CommandExecutionContext &exec_context, const ResourceUsageTag base_tag) const {
6267 auto *events_context = exec_context.GetCurrentEventsContext();
John Zulauf6ce24372021-01-30 05:56:25 -07006268 assert(events_context);
6269 bool skip = false;
6270 if (!events_context) return skip;
6271
John Zulaufbb890452021-12-14 11:30:18 -07006272 const auto &sync_state = exec_context.GetSyncState();
John Zulauf6ce24372021-01-30 05:56:25 -07006273 const auto *sync_event = events_context->Get(event_);
6274 if (!sync_event) return skip; // Core, Lifetimes, or Param check needs to catch invalid events.
6275
John Zulauf1bf30522021-09-03 15:39:06 -06006276 if (sync_event->last_command_tag > base_tag) return skip; // if we validated this in recording of the secondary, don't repeat
6277
John Zulauf6ce24372021-01-30 05:56:25 -07006278 const char *const set_wait =
6279 "%s: %s %s operation following %s without intervening execution barrier, is a race condition and may result in data "
6280 "hazards.";
6281 const char *message = set_wait; // Only one message this call.
6282 if (!sync_event->HasBarrier(exec_scope_.mask_param, exec_scope_.exec_scope)) {
6283 const char *vuid = nullptr;
6284 switch (sync_event->last_command) {
6285 case CMD_SETEVENT:
John Zulauf4edde622021-02-15 08:54:50 -07006286 case CMD_SETEVENT2KHR:
Tony-LunarG8d71c4f2022-01-27 15:25:53 -07006287 case CMD_SETEVENT2:
John Zulauf6ce24372021-01-30 05:56:25 -07006288 // Needs a barrier between set and reset
6289 vuid = "SYNC-vkCmdResetEvent-missingbarrier-set";
6290 break;
John Zulauf4edde622021-02-15 08:54:50 -07006291 case CMD_WAITEVENTS:
Tony-LunarG1364cf52021-11-17 16:10:11 -07006292 case CMD_WAITEVENTS2:
John Zulauf4edde622021-02-15 08:54:50 -07006293 case CMD_WAITEVENTS2KHR: {
John Zulauf6ce24372021-01-30 05:56:25 -07006294 // Needs to be in the barriers chain (either because of a barrier, or because of dstStageMask
6295 vuid = "SYNC-vkCmdResetEvent-missingbarrier-wait";
6296 break;
6297 }
6298 default:
6299 // The only other valid last command that wasn't one.
John Zulauf4edde622021-02-15 08:54:50 -07006300 assert((sync_event->last_command == CMD_NONE) || (sync_event->last_command == CMD_RESETEVENT) ||
6301 (sync_event->last_command == CMD_RESETEVENT2KHR));
John Zulauf6ce24372021-01-30 05:56:25 -07006302 break;
6303 }
6304 if (vuid) {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06006305 skip |= sync_state.LogError(event_->event(), vuid, message, CmdName(),
6306 sync_state.report_data->FormatHandle(event_->event()).c_str(), CmdName(),
John Zulauf6ce24372021-01-30 05:56:25 -07006307 CommandTypeString(sync_event->last_command));
6308 }
6309 }
6310 return skip;
6311}
6312
John Zulauf8eda1562021-04-13 17:06:41 -06006313ResourceUsageTag SyncOpResetEvent::Record(CommandBufferAccessContext *cb_context) const {
6314 const auto tag = cb_context->NextCommandTag(cmd_);
John Zulauf6ce24372021-01-30 05:56:25 -07006315 auto *events_context = cb_context->GetCurrentEventsContext();
6316 assert(events_context);
John Zulauf8eda1562021-04-13 17:06:41 -06006317 if (!events_context) return tag;
John Zulauf6ce24372021-01-30 05:56:25 -07006318
6319 auto *sync_event = events_context->GetFromShared(event_);
John Zulauf8eda1562021-04-13 17:06:41 -06006320 if (!sync_event) return tag; // Core, Lifetimes, or Param check needs to catch invalid events.
John Zulauf6ce24372021-01-30 05:56:25 -07006321
6322 // Update the event state
John Zulauf36ef9282021-02-02 11:47:24 -07006323 sync_event->last_command = cmd_;
John Zulauf610e28c2021-08-03 17:46:23 -06006324 sync_event->last_command_tag = tag;
John Zulauf6ce24372021-01-30 05:56:25 -07006325 sync_event->unsynchronized_set = CMD_NONE;
6326 sync_event->ResetFirstScope();
6327 sync_event->barriers = 0U;
John Zulauf8eda1562021-04-13 17:06:41 -06006328
6329 return tag;
John Zulauf6ce24372021-01-30 05:56:25 -07006330}
6331
John Zulauf8eda1562021-04-13 17:06:41 -06006332bool SyncOpResetEvent::ReplayValidate(ResourceUsageTag recorded_tag, const CommandBufferAccessContext &recorded_context,
John Zulaufbb890452021-12-14 11:30:18 -07006333 ResourceUsageTag base_tag, CommandExecutionContext *exec_context) const {
6334 return DoValidate(*exec_context, base_tag);
John Zulauf8eda1562021-04-13 17:06:41 -06006335}
6336
John Zulaufbb890452021-12-14 11:30:18 -07006337void SyncOpResetEvent::ReplayRecord(ResourceUsageTag tag, AccessContext *access_context, SyncEventsContext *events_context) const {}
John Zulauf8eda1562021-04-13 17:06:41 -06006338
John Zulauf36ef9282021-02-02 11:47:24 -07006339SyncOpSetEvent::SyncOpSetEvent(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags, VkEvent event,
John Zulauf4edde622021-02-15 08:54:50 -07006340 VkPipelineStageFlags2KHR stageMask)
John Zulauf36ef9282021-02-02 11:47:24 -07006341 : SyncOpBase(cmd),
Jeremy Gebben9f537102021-10-05 16:37:12 -06006342 event_(sync_state.Get<EVENT_STATE>(event)),
John Zulauf4edde622021-02-15 08:54:50 -07006343 src_exec_scope_(SyncExecScope::MakeSrc(queue_flags, stageMask)),
6344 dep_info_() {}
6345
6346SyncOpSetEvent::SyncOpSetEvent(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags, VkEvent event,
6347 const VkDependencyInfoKHR &dep_info)
6348 : SyncOpBase(cmd),
Jeremy Gebben9f537102021-10-05 16:37:12 -06006349 event_(sync_state.Get<EVENT_STATE>(event)),
John Zulauf4edde622021-02-15 08:54:50 -07006350 src_exec_scope_(SyncExecScope::MakeSrc(queue_flags, sync_utils::GetGlobalStageMasks(dep_info).src)),
Tony-LunarG273f32f2021-09-28 08:56:30 -06006351 dep_info_(new safe_VkDependencyInfo(&dep_info)) {}
John Zulauf6ce24372021-01-30 05:56:25 -07006352
6353bool SyncOpSetEvent::Validate(const CommandBufferAccessContext &cb_context) const {
John Zulauf610e28c2021-08-03 17:46:23 -06006354 return DoValidate(cb_context, ResourceUsageRecord::kMaxIndex);
6355}
6356bool SyncOpSetEvent::ReplayValidate(ResourceUsageTag recorded_tag, const CommandBufferAccessContext &recorded_context,
John Zulaufbb890452021-12-14 11:30:18 -07006357 ResourceUsageTag base_tag, CommandExecutionContext *exec_context) const {
6358 assert(exec_context);
6359 return DoValidate(*exec_context, base_tag);
John Zulauf610e28c2021-08-03 17:46:23 -06006360}
6361
John Zulaufbb890452021-12-14 11:30:18 -07006362bool SyncOpSetEvent::DoValidate(const CommandExecutionContext &exec_context, const ResourceUsageTag base_tag) const {
John Zulauf6ce24372021-01-30 05:56:25 -07006363 bool skip = false;
6364
John Zulaufbb890452021-12-14 11:30:18 -07006365 const auto &sync_state = exec_context.GetSyncState();
6366 auto *events_context = exec_context.GetCurrentEventsContext();
John Zulauf6ce24372021-01-30 05:56:25 -07006367 assert(events_context);
6368 if (!events_context) return skip;
6369
6370 const auto *sync_event = events_context->Get(event_);
6371 if (!sync_event) return skip; // Core, Lifetimes, or Param check needs to catch invalid events.
6372
John Zulauf610e28c2021-08-03 17:46:23 -06006373 if (sync_event->last_command_tag >= base_tag) return skip; // for replay we don't want to revalidate internal "last commmand"
6374
John Zulauf6ce24372021-01-30 05:56:25 -07006375 const char *const reset_set =
6376 "%s: %s %s operation following %s without intervening execution barrier, is a race condition and may result in data "
6377 "hazards.";
6378 const char *const wait =
6379 "%s: %s %s operation following %s without intervening vkCmdResetEvent, may result in data hazard and is ignored.";
6380
6381 if (!sync_event->HasBarrier(src_exec_scope_.mask_param, src_exec_scope_.exec_scope)) {
John Zulauf4edde622021-02-15 08:54:50 -07006382 const char *vuid_stem = nullptr;
John Zulauf6ce24372021-01-30 05:56:25 -07006383 const char *message = nullptr;
6384 switch (sync_event->last_command) {
6385 case CMD_RESETEVENT:
John Zulauf4edde622021-02-15 08:54:50 -07006386 case CMD_RESETEVENT2KHR:
Tony-LunarG8d71c4f2022-01-27 15:25:53 -07006387 case CMD_RESETEVENT2:
John Zulauf6ce24372021-01-30 05:56:25 -07006388 // Needs a barrier between reset and set
John Zulauf4edde622021-02-15 08:54:50 -07006389 vuid_stem = "-missingbarrier-reset";
John Zulauf6ce24372021-01-30 05:56:25 -07006390 message = reset_set;
6391 break;
6392 case CMD_SETEVENT:
John Zulauf4edde622021-02-15 08:54:50 -07006393 case CMD_SETEVENT2KHR:
Tony-LunarG8d71c4f2022-01-27 15:25:53 -07006394 case CMD_SETEVENT2:
John Zulauf6ce24372021-01-30 05:56:25 -07006395 // Needs a barrier between set and set
John Zulauf4edde622021-02-15 08:54:50 -07006396 vuid_stem = "-missingbarrier-set";
John Zulauf6ce24372021-01-30 05:56:25 -07006397 message = reset_set;
6398 break;
6399 case CMD_WAITEVENTS:
Tony-LunarG1364cf52021-11-17 16:10:11 -07006400 case CMD_WAITEVENTS2:
John Zulauf4edde622021-02-15 08:54:50 -07006401 case CMD_WAITEVENTS2KHR:
John Zulauf6ce24372021-01-30 05:56:25 -07006402 // Needs a barrier or is in second execution scope
John Zulauf4edde622021-02-15 08:54:50 -07006403 vuid_stem = "-missingbarrier-wait";
John Zulauf6ce24372021-01-30 05:56:25 -07006404 message = wait;
6405 break;
6406 default:
6407 // The only other valid last command that wasn't one.
6408 assert(sync_event->last_command == CMD_NONE);
6409 break;
6410 }
John Zulauf4edde622021-02-15 08:54:50 -07006411 if (vuid_stem) {
John Zulauf6ce24372021-01-30 05:56:25 -07006412 assert(nullptr != message);
John Zulauf4edde622021-02-15 08:54:50 -07006413 std::string vuid("SYNC-");
6414 vuid.append(CmdName()).append(vuid_stem);
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06006415 skip |= sync_state.LogError(event_->event(), vuid.c_str(), message, CmdName(),
6416 sync_state.report_data->FormatHandle(event_->event()).c_str(), CmdName(),
John Zulauf6ce24372021-01-30 05:56:25 -07006417 CommandTypeString(sync_event->last_command));
6418 }
6419 }
6420
6421 return skip;
6422}
6423
John Zulauf8eda1562021-04-13 17:06:41 -06006424ResourceUsageTag SyncOpSetEvent::Record(CommandBufferAccessContext *cb_context) const {
John Zulauf36ef9282021-02-02 11:47:24 -07006425 const auto tag = cb_context->NextCommandTag(cmd_);
John Zulauf6ce24372021-01-30 05:56:25 -07006426 auto *events_context = cb_context->GetCurrentEventsContext();
6427 auto *access_context = cb_context->GetCurrentAccessContext();
6428 assert(events_context);
John Zulauf610e28c2021-08-03 17:46:23 -06006429 if (access_context && events_context) {
John Zulaufbb890452021-12-14 11:30:18 -07006430 ReplayRecord(tag, access_context, events_context);
John Zulauf610e28c2021-08-03 17:46:23 -06006431 }
6432 return tag;
6433}
John Zulauf6ce24372021-01-30 05:56:25 -07006434
John Zulaufbb890452021-12-14 11:30:18 -07006435void SyncOpSetEvent::ReplayRecord(ResourceUsageTag tag, AccessContext *access_context, SyncEventsContext *events_context) const {
John Zulauf6ce24372021-01-30 05:56:25 -07006436 auto *sync_event = events_context->GetFromShared(event_);
John Zulauf610e28c2021-08-03 17:46:23 -06006437 if (!sync_event) return; // Core, Lifetimes, or Param check needs to catch invalid events.
John Zulauf6ce24372021-01-30 05:56:25 -07006438
6439 // NOTE: We're going to simply record the sync scope here, as anything else would be implementation defined/undefined
6440 // and we're issuing errors re: missing barriers between event commands, which if the user fixes would fix
6441 // any issues caused by naive scope setting here.
6442
6443 // What happens with two SetEvent is that one cannot know what group of operations will be waited for.
6444 // Given:
6445 // Stuff1; SetEvent; Stuff2; SetEvent; WaitEvents;
6446 // WaitEvents cannot know which of Stuff1, Stuff2, or both has completed execution.
6447
6448 if (!sync_event->HasBarrier(src_exec_scope_.mask_param, src_exec_scope_.exec_scope)) {
6449 sync_event->unsynchronized_set = sync_event->last_command;
6450 sync_event->ResetFirstScope();
John Zulauf78b1f892021-09-20 15:02:09 -06006451 } else if (!sync_event->first_scope_set) {
John Zulauf6ce24372021-01-30 05:56:25 -07006452 // We only set the scope if there isn't one
6453 sync_event->scope = src_exec_scope_;
6454
6455 auto set_scope = [&sync_event](AccessAddressType address_type, const ResourceAccessRangeMap::value_type &access) {
6456 auto &scope_map = sync_event->first_scope[static_cast<size_t>(address_type)];
6457 if (access.second.InSourceScopeOrChain(sync_event->scope.exec_scope, sync_event->scope.valid_accesses)) {
6458 scope_map.insert(scope_map.end(), std::make_pair(access.first, true));
6459 }
6460 };
6461 access_context->ForAll(set_scope);
6462 sync_event->unsynchronized_set = CMD_NONE;
John Zulauf78b1f892021-09-20 15:02:09 -06006463 sync_event->first_scope_set = true;
John Zulauf6ce24372021-01-30 05:56:25 -07006464 sync_event->first_scope_tag = tag;
6465 }
John Zulauf4edde622021-02-15 08:54:50 -07006466 // TODO: Store dep_info_ shared ptr in sync_state for WaitEvents2 validation
6467 sync_event->last_command = cmd_;
John Zulauf610e28c2021-08-03 17:46:23 -06006468 sync_event->last_command_tag = tag;
John Zulauf6ce24372021-01-30 05:56:25 -07006469 sync_event->barriers = 0U;
6470}
John Zulauf64ffe552021-02-06 10:25:07 -07006471
6472SyncOpBeginRenderPass::SyncOpBeginRenderPass(CMD_TYPE cmd, const SyncValidator &sync_state,
6473 const VkRenderPassBeginInfo *pRenderPassBegin,
sfricke-samsung85584a72021-09-30 21:43:38 -07006474 const VkSubpassBeginInfo *pSubpassBeginInfo)
6475 : SyncOpBase(cmd) {
John Zulauf64ffe552021-02-06 10:25:07 -07006476 if (pRenderPassBegin) {
Jeremy Gebben9f537102021-10-05 16:37:12 -06006477 rp_state_ = sync_state.Get<RENDER_PASS_STATE>(pRenderPassBegin->renderPass);
John Zulauf64ffe552021-02-06 10:25:07 -07006478 renderpass_begin_info_ = safe_VkRenderPassBeginInfo(pRenderPassBegin);
Jeremy Gebbenf4449392022-01-28 10:09:10 -07006479 auto fb_state = sync_state.Get<FRAMEBUFFER_STATE>(pRenderPassBegin->framebuffer);
John Zulauf64ffe552021-02-06 10:25:07 -07006480 if (fb_state) {
Jeremy Gebben9f537102021-10-05 16:37:12 -06006481 shared_attachments_ = sync_state.GetAttachmentViews(*renderpass_begin_info_.ptr(), *fb_state);
John Zulauf64ffe552021-02-06 10:25:07 -07006482 // TODO: Revisit this when all attachment validation is through SyncOps to see if we can discard the plain pointer copy
6483 // Note that this a safe to presist as long as shared_attachments is not cleared
6484 attachments_.reserve(shared_attachments_.size());
sfricke-samsung01c9ae92021-02-09 22:30:52 -08006485 for (const auto &attachment : shared_attachments_) {
John Zulauf64ffe552021-02-06 10:25:07 -07006486 attachments_.emplace_back(attachment.get());
6487 }
6488 }
6489 if (pSubpassBeginInfo) {
6490 subpass_begin_info_ = safe_VkSubpassBeginInfo(pSubpassBeginInfo);
6491 }
6492 }
6493}
6494
6495bool SyncOpBeginRenderPass::Validate(const CommandBufferAccessContext &cb_context) const {
6496 // Check if any of the layout transitions are hazardous.... but we don't have the renderpass context to work with, so we
6497 bool skip = false;
6498
6499 assert(rp_state_.get());
6500 if (nullptr == rp_state_.get()) return skip;
6501 auto &rp_state = *rp_state_.get();
6502
6503 const uint32_t subpass = 0;
6504
6505 // Construct the state we can use to validate against... (since validation is const and RecordCmdBeginRenderPass
6506 // hasn't happened yet)
6507 const std::vector<AccessContext> empty_context_vector;
6508 AccessContext temp_context(subpass, cb_context.GetQueueFlags(), rp_state.subpass_dependencies, empty_context_vector,
6509 cb_context.GetCurrentAccessContext());
6510
6511 // Validate attachment operations
6512 if (attachments_.size() == 0) return skip;
6513 const auto &render_area = renderpass_begin_info_.renderArea;
John Zulaufd0ec59f2021-03-13 14:25:08 -07006514
6515 // Since the isn't a valid RenderPassAccessContext until Record, needs to create the view/generator list... we could limit this
6516 // by predicating on whether subpass 0 uses the attachment if it is too expensive to create the full list redundantly here.
6517 // More broadly we could look at thread specific state shared between Validate and Record as is done for other heavyweight
6518 // operations (though it's currently a messy approach)
6519 AttachmentViewGenVector view_gens = RenderPassAccessContext::CreateAttachmentViewGen(render_area, attachments_);
6520 skip |= temp_context.ValidateLayoutTransitions(cb_context, rp_state, render_area, subpass, view_gens, CmdName());
John Zulauf64ffe552021-02-06 10:25:07 -07006521
6522 // Validate load operations if there were no layout transition hazards
6523 if (!skip) {
John Zulaufee984022022-04-13 16:39:50 -06006524 temp_context.RecordLayoutTransitions(rp_state, subpass, view_gens, kInvalidTag);
John Zulaufd0ec59f2021-03-13 14:25:08 -07006525 skip |= temp_context.ValidateLoadOperation(cb_context, rp_state, render_area, subpass, view_gens, CmdName());
John Zulauf64ffe552021-02-06 10:25:07 -07006526 }
6527
6528 return skip;
6529}
6530
John Zulauf8eda1562021-04-13 17:06:41 -06006531ResourceUsageTag SyncOpBeginRenderPass::Record(CommandBufferAccessContext *cb_context) const {
John Zulauf64ffe552021-02-06 10:25:07 -07006532 // TODO PHASE2 need to have a consistent way to record to either command buffer or queue contexts
6533 assert(rp_state_.get());
John Zulauf41a9c7c2021-12-07 15:59:53 -07006534 if (nullptr == rp_state_.get()) return cb_context->NextCommandTag(cmd_);
6535 return cb_context->RecordBeginRenderPass(cmd_, *rp_state_.get(), renderpass_begin_info_.renderArea, attachments_);
John Zulauf64ffe552021-02-06 10:25:07 -07006536}
6537
John Zulauf8eda1562021-04-13 17:06:41 -06006538bool SyncOpBeginRenderPass::ReplayValidate(ResourceUsageTag recorded_tag, const CommandBufferAccessContext &recorded_context,
John Zulaufbb890452021-12-14 11:30:18 -07006539 ResourceUsageTag base_tag, CommandExecutionContext *exec_context) const {
John Zulauf8eda1562021-04-13 17:06:41 -06006540 return false;
6541}
6542
John Zulaufbb890452021-12-14 11:30:18 -07006543void SyncOpBeginRenderPass::ReplayRecord(ResourceUsageTag tag, AccessContext *access_context,
6544 SyncEventsContext *events_context) const {}
John Zulauf8eda1562021-04-13 17:06:41 -06006545
John Zulauf64ffe552021-02-06 10:25:07 -07006546SyncOpNextSubpass::SyncOpNextSubpass(CMD_TYPE cmd, const SyncValidator &sync_state, const VkSubpassBeginInfo *pSubpassBeginInfo,
sfricke-samsung85584a72021-09-30 21:43:38 -07006547 const VkSubpassEndInfo *pSubpassEndInfo)
6548 : SyncOpBase(cmd) {
John Zulauf64ffe552021-02-06 10:25:07 -07006549 if (pSubpassBeginInfo) {
6550 subpass_begin_info_.initialize(pSubpassBeginInfo);
6551 }
6552 if (pSubpassEndInfo) {
6553 subpass_end_info_.initialize(pSubpassEndInfo);
6554 }
6555}
6556
6557bool SyncOpNextSubpass::Validate(const CommandBufferAccessContext &cb_context) const {
6558 bool skip = false;
6559 const auto *renderpass_context = cb_context.GetCurrentRenderPassContext();
6560 if (!renderpass_context) return skip;
6561
6562 skip |= renderpass_context->ValidateNextSubpass(cb_context.GetExecutionContext(), CmdName());
6563 return skip;
6564}
6565
John Zulauf8eda1562021-04-13 17:06:41 -06006566ResourceUsageTag SyncOpNextSubpass::Record(CommandBufferAccessContext *cb_context) const {
John Zulauf41a9c7c2021-12-07 15:59:53 -07006567 return cb_context->RecordNextSubpass(cmd_);
John Zulauf8eda1562021-04-13 17:06:41 -06006568}
6569
6570bool SyncOpNextSubpass::ReplayValidate(ResourceUsageTag recorded_tag, const CommandBufferAccessContext &recorded_context,
John Zulaufbb890452021-12-14 11:30:18 -07006571 ResourceUsageTag base_tag, CommandExecutionContext *exec_context) const {
John Zulauf8eda1562021-04-13 17:06:41 -06006572 return false;
John Zulauf64ffe552021-02-06 10:25:07 -07006573}
6574
sfricke-samsung85584a72021-09-30 21:43:38 -07006575SyncOpEndRenderPass::SyncOpEndRenderPass(CMD_TYPE cmd, const SyncValidator &sync_state, const VkSubpassEndInfo *pSubpassEndInfo)
6576 : SyncOpBase(cmd) {
John Zulauf64ffe552021-02-06 10:25:07 -07006577 if (pSubpassEndInfo) {
6578 subpass_end_info_.initialize(pSubpassEndInfo);
6579 }
6580}
6581
John Zulaufbb890452021-12-14 11:30:18 -07006582void SyncOpNextSubpass::ReplayRecord(ResourceUsageTag tag, AccessContext *access_context, SyncEventsContext *events_context) const {
6583}
John Zulauf8eda1562021-04-13 17:06:41 -06006584
John Zulauf64ffe552021-02-06 10:25:07 -07006585bool SyncOpEndRenderPass::Validate(const CommandBufferAccessContext &cb_context) const {
6586 bool skip = false;
6587 const auto *renderpass_context = cb_context.GetCurrentRenderPassContext();
6588
6589 if (!renderpass_context) return skip;
6590 skip |= renderpass_context->ValidateEndRenderPass(cb_context.GetExecutionContext(), CmdName());
6591 return skip;
6592}
6593
John Zulauf8eda1562021-04-13 17:06:41 -06006594ResourceUsageTag SyncOpEndRenderPass::Record(CommandBufferAccessContext *cb_context) const {
John Zulauf41a9c7c2021-12-07 15:59:53 -07006595 return cb_context->RecordEndRenderPass(cmd_);
John Zulauf64ffe552021-02-06 10:25:07 -07006596}
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006597
John Zulauf8eda1562021-04-13 17:06:41 -06006598bool SyncOpEndRenderPass::ReplayValidate(ResourceUsageTag recorded_tag, const CommandBufferAccessContext &recorded_context,
John Zulaufbb890452021-12-14 11:30:18 -07006599 ResourceUsageTag base_tag, CommandExecutionContext *exec_context) const {
John Zulauf8eda1562021-04-13 17:06:41 -06006600 return false;
6601}
6602
John Zulaufbb890452021-12-14 11:30:18 -07006603void SyncOpEndRenderPass::ReplayRecord(ResourceUsageTag tag, AccessContext *access_context,
6604 SyncEventsContext *events_context) const {}
John Zulauf8eda1562021-04-13 17:06:41 -06006605
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006606void SyncValidator::PreCallRecordCmdWriteBufferMarker2AMD(VkCommandBuffer commandBuffer, VkPipelineStageFlags2KHR pipelineStage,
6607 VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) {
6608 StateTracker::PreCallRecordCmdWriteBufferMarker2AMD(commandBuffer, pipelineStage, dstBuffer, dstOffset, marker);
6609 auto *cb_access_context = GetAccessContext(commandBuffer);
6610 assert(cb_access_context);
6611 const auto tag = cb_access_context->NextCommandTag(CMD_WRITEBUFFERMARKERAMD);
6612 auto *context = cb_access_context->GetCurrentAccessContext();
6613 assert(context);
6614
Jeremy Gebbenf4449392022-01-28 10:09:10 -07006615 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006616
6617 if (dst_buffer) {
6618 const ResourceAccessRange range = MakeRange(dstOffset, 4);
6619 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
6620 }
6621}
John Zulaufd05c5842021-03-26 11:32:16 -06006622
John Zulaufae842002021-04-15 18:20:55 -06006623bool SyncValidator::PreCallValidateCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBufferCount,
6624 const VkCommandBuffer *pCommandBuffers) const {
6625 bool skip = StateTracker::PreCallValidateCmdExecuteCommands(commandBuffer, commandBufferCount, pCommandBuffers);
6626 const char *func_name = "vkCmdExecuteCommands";
6627 const auto *cb_context = GetAccessContext(commandBuffer);
6628 assert(cb_context);
John Zulauf4fa68462021-04-26 21:04:22 -06006629
6630 // Heavyweight, but we need a proxy copy of the active command buffer access context
6631 CommandBufferAccessContext proxy_cb_context(*cb_context, CommandBufferAccessContext::AsProxyContext());
John Zulaufae842002021-04-15 18:20:55 -06006632
6633 // Make working copies of the access and events contexts
John Zulaufae842002021-04-15 18:20:55 -06006634 for (uint32_t cb_index = 0; cb_index < commandBufferCount; ++cb_index) {
John Zulauf41a9c7c2021-12-07 15:59:53 -07006635 proxy_cb_context.NextIndexedCommandTag(CMD_EXECUTECOMMANDS, cb_index);
6636
John Zulaufae842002021-04-15 18:20:55 -06006637 const auto *recorded_cb_context = GetAccessContext(pCommandBuffers[cb_index]);
6638 if (!recorded_cb_context) continue;
John Zulauf4fa68462021-04-26 21:04:22 -06006639
6640 const auto *recorded_context = recorded_cb_context->GetCurrentAccessContext();
6641 assert(recorded_context);
6642 skip |= recorded_cb_context->ValidateFirstUse(&proxy_cb_context, func_name, cb_index);
6643
6644 // The barriers have already been applied in ValidatFirstUse
6645 ResourceUsageRange tag_range = proxy_cb_context.ImportRecordedAccessLog(*recorded_cb_context);
6646 proxy_cb_context.ResolveRecordedContext(*recorded_context, tag_range.begin);
John Zulaufae842002021-04-15 18:20:55 -06006647 }
6648
John Zulaufae842002021-04-15 18:20:55 -06006649 return skip;
6650}
6651
6652void SyncValidator::PreCallRecordCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBufferCount,
6653 const VkCommandBuffer *pCommandBuffers) {
6654 StateTracker::PreCallRecordCmdExecuteCommands(commandBuffer, commandBufferCount, pCommandBuffers);
John Zulauf4fa68462021-04-26 21:04:22 -06006655 auto *cb_context = GetAccessContext(commandBuffer);
6656 assert(cb_context);
John Zulauf4fa68462021-04-26 21:04:22 -06006657 for (uint32_t cb_index = 0; cb_index < commandBufferCount; ++cb_index) {
John Zulauf41a9c7c2021-12-07 15:59:53 -07006658 cb_context->NextIndexedCommandTag(CMD_EXECUTECOMMANDS, cb_index);
John Zulauf4fa68462021-04-26 21:04:22 -06006659 const auto *recorded_cb_context = GetAccessContext(pCommandBuffers[cb_index]);
6660 if (!recorded_cb_context) continue;
6661 cb_context->RecordExecutedCommandBuffer(*recorded_cb_context, CMD_EXECUTECOMMANDS);
6662 }
John Zulaufae842002021-04-15 18:20:55 -06006663}
6664
John Zulaufd0ec59f2021-03-13 14:25:08 -07006665AttachmentViewGen::AttachmentViewGen(const IMAGE_VIEW_STATE *view, const VkOffset3D &offset, const VkExtent3D &extent)
6666 : view_(view), view_mask_(), gen_store_() {
6667 if (!view_ || !view_->image_state || !SimpleBinding(*view_->image_state)) return;
6668 const IMAGE_STATE &image_state = *view_->image_state.get();
6669 const auto base_address = ResourceBaseAddress(image_state);
6670 const auto *encoder = image_state.fragment_encoder.get();
6671 if (!encoder) return;
Jeremy Gebben11a68a32021-07-29 11:59:22 -06006672 // Get offset and extent for the view, accounting for possible depth slicing
6673 const VkOffset3D zero_offset = view->GetOffset();
6674 const VkExtent3D &image_extent = view->GetExtent();
John Zulaufd0ec59f2021-03-13 14:25:08 -07006675 // Intentional copy
6676 VkImageSubresourceRange subres_range = view_->normalized_subresource_range;
6677 view_mask_ = subres_range.aspectMask;
6678 gen_store_[Gen::kViewSubresource].emplace(*encoder, subres_range, zero_offset, image_extent, base_address);
6679 gen_store_[Gen::kRenderArea].emplace(*encoder, subres_range, offset, extent, base_address);
6680
6681 const auto depth = view_mask_ & VK_IMAGE_ASPECT_DEPTH_BIT;
6682 if (depth && (depth != view_mask_)) {
6683 subres_range.aspectMask = depth;
6684 gen_store_[Gen::kDepthOnlyRenderArea].emplace(*encoder, subres_range, offset, extent, base_address);
6685 }
6686 const auto stencil = view_mask_ & VK_IMAGE_ASPECT_STENCIL_BIT;
6687 if (stencil && (stencil != view_mask_)) {
6688 subres_range.aspectMask = stencil;
6689 gen_store_[Gen::kStencilOnlyRenderArea].emplace(*encoder, subres_range, offset, extent, base_address);
6690 }
6691}
6692
6693const ImageRangeGen *AttachmentViewGen::GetRangeGen(AttachmentViewGen::Gen gen_type) const {
6694 const ImageRangeGen *got = nullptr;
6695 switch (gen_type) {
6696 case kViewSubresource:
6697 got = &gen_store_[kViewSubresource];
6698 break;
6699 case kRenderArea:
6700 got = &gen_store_[kRenderArea];
6701 break;
6702 case kDepthOnlyRenderArea:
6703 got =
6704 (view_mask_ == VK_IMAGE_ASPECT_DEPTH_BIT) ? &gen_store_[Gen::kRenderArea] : &gen_store_[Gen::kDepthOnlyRenderArea];
6705 break;
6706 case kStencilOnlyRenderArea:
6707 got = (view_mask_ == VK_IMAGE_ASPECT_STENCIL_BIT) ? &gen_store_[Gen::kRenderArea]
6708 : &gen_store_[Gen::kStencilOnlyRenderArea];
6709 break;
6710 default:
6711 assert(got);
6712 }
6713 return got;
6714}
6715
6716AttachmentViewGen::Gen AttachmentViewGen::GetDepthStencilRenderAreaGenType(bool depth_op, bool stencil_op) const {
6717 assert(IsValid());
6718 assert(view_mask_ & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT));
6719 if (depth_op) {
6720 assert(view_mask_ & VK_IMAGE_ASPECT_DEPTH_BIT);
6721 if (stencil_op) {
6722 assert(view_mask_ & VK_IMAGE_ASPECT_STENCIL_BIT);
6723 return kRenderArea;
6724 }
6725 return kDepthOnlyRenderArea;
6726 }
6727 if (stencil_op) {
6728 assert(view_mask_ & VK_IMAGE_ASPECT_STENCIL_BIT);
6729 return kStencilOnlyRenderArea;
6730 }
6731
6732 assert(depth_op || stencil_op);
6733 return kRenderArea;
6734}
6735
6736AccessAddressType AttachmentViewGen::GetAddressType() const { return AccessContext::ImageAddressType(*view_->image_state); }
John Zulauf8eda1562021-04-13 17:06:41 -06006737
6738void SyncEventsContext::ApplyBarrier(const SyncExecScope &src, const SyncExecScope &dst) {
6739 const bool all_commands_bit = 0 != (src.mask_param & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
6740 for (auto &event_pair : map_) {
6741 assert(event_pair.second); // Shouldn't be storing empty
6742 auto &sync_event = *event_pair.second;
6743 // Events don't happen at a stage, so we need to check and store the unexpanded ALL_COMMANDS if set for inter-event-calls
6744 if ((sync_event.barriers & src.exec_scope) || all_commands_bit) {
6745 sync_event.barriers |= dst.exec_scope;
6746 sync_event.barriers |= dst.mask_param & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
6747 }
6748 }
6749}
John Zulaufbb890452021-12-14 11:30:18 -07006750
6751ReplayTrackbackBarriersAction::ReplayTrackbackBarriersAction(VkQueueFlags queue_flags,
6752 const SubpassDependencyGraphNode &subpass_dep,
6753 const std::vector<ReplayTrackbackBarriersAction> &replay_contexts) {
6754 bool has_barrier_from_external = subpass_dep.barrier_from_external.size() > 0U;
6755 trackback_barriers.reserve(subpass_dep.prev.size() + (has_barrier_from_external ? 1U : 0U));
6756 for (const auto &prev_dep : subpass_dep.prev) {
6757 const auto prev_pass = prev_dep.first->pass;
6758 const auto &prev_barriers = prev_dep.second;
6759 trackback_barriers.emplace_back(&replay_contexts[prev_pass], queue_flags, prev_barriers);
6760 }
6761 if (has_barrier_from_external) {
6762 // Store the barrier from external with the reat, but save pointer for "by subpass" lookups.
6763 trackback_barriers.emplace_back(nullptr, queue_flags, subpass_dep.barrier_from_external);
6764 }
6765}
6766
6767void ReplayTrackbackBarriersAction::operator()(ResourceAccessState *access) const {
6768 if (trackback_barriers.size() == 1) {
6769 trackback_barriers[0](access);
6770 } else {
6771 ResourceAccessState resolved;
6772 for (const auto &trackback : trackback_barriers) {
6773 ResourceAccessState access_copy = *access;
6774 trackback(&access_copy);
6775 resolved.Resolve(access_copy);
6776 }
6777 *access = resolved;
6778 }
6779}
6780
6781ReplayTrackbackBarriersAction::TrackbackBarriers::TrackbackBarriers(
6782 const ReplayTrackbackBarriersAction *source_subpass_, VkQueueFlags queue_flags_,
6783 const std::vector<const VkSubpassDependency2 *> &subpass_dependencies_)
6784 : Base(source_subpass_, queue_flags_, subpass_dependencies_) {}
6785
6786void ReplayTrackbackBarriersAction::TrackbackBarriers::operator()(ResourceAccessState *access) const {
6787 if (source_subpass) {
6788 (*source_subpass)(access);
6789 }
6790 access->ApplyBarriersImmediate(barriers);
6791}