blob: 92ce93406b5cac6abd9985d1e53e6ead081ae218 [file] [log] [blame]
Jeremy Gebben4d51c552022-01-06 21:27:15 -07001/* Copyright (c) 2019-2022 The Khronos Group Inc.
2 * Copyright (c) 2019-2022 Valve Corporation
3 * Copyright (c) 2019-2022 LunarG, Inc.
John Zulauf9cb530d2019-09-30 14:14:10 -06004 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 * Author: John Zulauf <jzulauf@lunarg.com>
John Zulaufab7756b2020-12-29 16:10:16 -070018 * Author: Locke Lin <locke@lunarg.com>
19 * Author: Jeremy Gebben <jeremyg@lunarg.com>
John Zulauf9cb530d2019-09-30 14:14:10 -060020 */
21
22#include <limits>
23#include <vector>
locke-lunarg296a3c92020-03-25 01:04:29 -060024#include <memory>
25#include <bitset>
John Zulauf9cb530d2019-09-30 14:14:10 -060026#include "synchronization_validation.h"
Jeremy Gebben5f585ae2021-02-02 09:03:06 -070027#include "sync_utils.h"
John Zulauf9cb530d2019-09-30 14:14:10 -060028
John Zulaufea943c52022-02-22 11:05:17 -070029// Utilities to DRY up Get... calls
30template <typename Map, typename Key = typename Map::key_type, typename RetVal = layer_data::optional<typename Map::mapped_type>>
31RetVal GetMappedOptional(const Map &map, const Key &key) {
32 RetVal ret_val;
33 auto it = map.find(key);
34 if (it != map.cend()) {
35 ret_val.emplace(it->second);
36 }
37 return ret_val;
38}
39template <typename Map, typename Fn>
40typename Map::mapped_type GetMapped(const Map &map, const typename Map::key_type &key, Fn &&default_factory) {
41 auto value = GetMappedOptional(map, key);
42 return (value) ? *value : default_factory();
43}
44
45template <typename Map, typename Fn>
46typename Map::mapped_type GetMappedInsert(Map &map, const typename Map::key_type &key, Fn &&default_factory) {
47 auto value = GetMappedOptional(map, key);
48 if (value) {
49 return *value;
50 }
51 auto insert_it = map.emplace(std::make_pair(key, default_factory()));
52 assert(insert_it.second);
53
54 return insert_it.first->second;
55}
56
57template <typename Map, typename Key = typename Map::key_type, typename Mapped = typename Map::mapped_type,
58 typename Value = typename Mapped::element_type>
59Value *GetMappedPlainFromShared(const Map &map, const Key &key) {
60 auto value = GetMappedOptional<Map, Key>(map, key);
61 if (value) return value->get();
62 return nullptr;
63}
64
Jeremy Gebben6fbf8242021-06-21 09:14:46 -060065static bool SimpleBinding(const BINDABLE &bindable) { return !bindable.sparse && bindable.Binding(); }
John Zulauf264cce02021-02-05 14:40:47 -070066
John Zulauf29d00532021-03-04 13:28:54 -070067static bool SimpleBinding(const IMAGE_STATE &image_state) {
Jeremy Gebben62c3bf42021-07-21 15:38:24 -060068 bool simple =
Jeremy Gebben82e11d52021-07-26 09:19:37 -060069 SimpleBinding(static_cast<const BINDABLE &>(image_state)) || image_state.IsSwapchainImage() || image_state.bind_swapchain;
John Zulauf29d00532021-03-04 13:28:54 -070070
71 // If it's not simple we must have an encoder.
72 assert(!simple || image_state.fragment_encoder.get());
73 return simple;
74}
75
John Zulauf4fa68462021-04-26 21:04:22 -060076static const ResourceAccessRange kFullRange(std::numeric_limits<VkDeviceSize>::min(), std::numeric_limits<VkDeviceSize>::max());
77static const std::array<AccessAddressType, static_cast<size_t>(AccessAddressType::kTypeCount)> kAddressTypes = {
John Zulauf43cc7462020-12-03 12:33:12 -070078 AccessAddressType::kLinear, AccessAddressType::kIdealized};
79
John Zulaufd5115702021-01-18 12:34:33 -070080static constexpr AccessAddressType GetAccessAddressType(const BUFFER_STATE &) { return AccessAddressType::kLinear; };
John Zulauf264cce02021-02-05 14:40:47 -070081static AccessAddressType GetAccessAddressType(const IMAGE_STATE &image) {
82 return SimpleBinding(image) ? AccessContext::ImageAddressType(image) : AccessAddressType::kIdealized;
83}
John Zulaufd5115702021-01-18 12:34:33 -070084
John Zulauf9cb530d2019-09-30 14:14:10 -060085static const char *string_SyncHazardVUID(SyncHazard hazard) {
86 switch (hazard) {
87 case SyncHazard::NONE:
John Zulauf2f952d22020-02-10 11:34:51 -070088 return "SYNC-HAZARD-NONE";
John Zulauf9cb530d2019-09-30 14:14:10 -060089 break;
90 case SyncHazard::READ_AFTER_WRITE:
91 return "SYNC-HAZARD-READ_AFTER_WRITE";
92 break;
93 case SyncHazard::WRITE_AFTER_READ:
94 return "SYNC-HAZARD-WRITE_AFTER_READ";
95 break;
96 case SyncHazard::WRITE_AFTER_WRITE:
97 return "SYNC-HAZARD-WRITE_AFTER_WRITE";
98 break;
John Zulauf2f952d22020-02-10 11:34:51 -070099 case SyncHazard::READ_RACING_WRITE:
100 return "SYNC-HAZARD-READ-RACING-WRITE";
101 break;
102 case SyncHazard::WRITE_RACING_WRITE:
103 return "SYNC-HAZARD-WRITE-RACING-WRITE";
104 break;
105 case SyncHazard::WRITE_RACING_READ:
106 return "SYNC-HAZARD-WRITE-RACING-READ";
107 break;
John Zulauf9cb530d2019-09-30 14:14:10 -0600108 default:
109 assert(0);
110 }
111 return "SYNC-HAZARD-INVALID";
112}
113
John Zulauf59e25072020-07-17 10:55:21 -0600114static bool IsHazardVsRead(SyncHazard hazard) {
115 switch (hazard) {
116 case SyncHazard::NONE:
117 return false;
118 break;
119 case SyncHazard::READ_AFTER_WRITE:
120 return false;
121 break;
122 case SyncHazard::WRITE_AFTER_READ:
123 return true;
124 break;
125 case SyncHazard::WRITE_AFTER_WRITE:
126 return false;
127 break;
128 case SyncHazard::READ_RACING_WRITE:
129 return false;
130 break;
131 case SyncHazard::WRITE_RACING_WRITE:
132 return false;
133 break;
134 case SyncHazard::WRITE_RACING_READ:
135 return true;
136 break;
137 default:
138 assert(0);
139 }
140 return false;
141}
142
John Zulauf9cb530d2019-09-30 14:14:10 -0600143static const char *string_SyncHazard(SyncHazard hazard) {
144 switch (hazard) {
145 case SyncHazard::NONE:
146 return "NONR";
147 break;
148 case SyncHazard::READ_AFTER_WRITE:
149 return "READ_AFTER_WRITE";
150 break;
151 case SyncHazard::WRITE_AFTER_READ:
152 return "WRITE_AFTER_READ";
153 break;
154 case SyncHazard::WRITE_AFTER_WRITE:
155 return "WRITE_AFTER_WRITE";
156 break;
John Zulauf2f952d22020-02-10 11:34:51 -0700157 case SyncHazard::READ_RACING_WRITE:
158 return "READ_RACING_WRITE";
159 break;
160 case SyncHazard::WRITE_RACING_WRITE:
161 return "WRITE_RACING_WRITE";
162 break;
163 case SyncHazard::WRITE_RACING_READ:
164 return "WRITE_RACING_READ";
165 break;
John Zulauf9cb530d2019-09-30 14:14:10 -0600166 default:
167 assert(0);
168 }
169 return "INVALID HAZARD";
170}
171
John Zulauf37ceaed2020-07-03 16:18:15 -0600172static const SyncStageAccessInfoType *SyncStageAccessInfoFromMask(SyncStageAccessFlags flags) {
173 // Return the info for the first bit found
174 const SyncStageAccessInfoType *info = nullptr;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700175 for (size_t i = 0; i < flags.size(); i++) {
176 if (flags.test(i)) {
177 info = &syncStageAccessInfoByStageAccessIndex[i];
178 break;
John Zulauf37ceaed2020-07-03 16:18:15 -0600179 }
180 }
181 return info;
182}
183
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700184static std::string string_SyncStageAccessFlags(const SyncStageAccessFlags &flags, const char *sep = "|") {
John Zulauf59e25072020-07-17 10:55:21 -0600185 std::string out_str;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700186 if (flags.none()) {
John Zulauf389c34b2020-07-28 11:19:35 -0600187 out_str = "0";
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700188 } else {
189 for (size_t i = 0; i < syncStageAccessInfoByStageAccessIndex.size(); i++) {
190 const auto &info = syncStageAccessInfoByStageAccessIndex[i];
191 if ((flags & info.stage_access_bit).any()) {
192 if (!out_str.empty()) {
193 out_str.append(sep);
194 }
195 out_str.append(info.name);
John Zulauf59e25072020-07-17 10:55:21 -0600196 }
John Zulauf59e25072020-07-17 10:55:21 -0600197 }
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700198 if (out_str.length() == 0) {
199 out_str.append("Unhandled SyncStageAccess");
200 }
John Zulauf59e25072020-07-17 10:55:21 -0600201 }
202 return out_str;
203}
204
John Zulauf14940722021-04-12 15:19:02 -0600205static std::string string_UsageTag(const ResourceUsageRecord &tag) {
Jeremy Gebben6ea9d9e2020-12-11 09:41:01 -0700206 std::stringstream out;
207
John Zulauffaea0ee2021-01-14 14:01:32 -0700208 out << "command: " << CommandTypeString(tag.command);
209 out << ", seq_no: " << tag.seq_num;
210 if (tag.sub_command != 0) {
211 out << ", subcmd: " << tag.sub_command;
Jeremy Gebben6ea9d9e2020-12-11 09:41:01 -0700212 }
213 return out.str();
214}
John Zulauf4fa68462021-04-26 21:04:22 -0600215static std::string string_UsageIndex(SyncStageAccessIndex usage_index) {
216 const char *stage_access_name = "INVALID_STAGE_ACCESS";
217 if (usage_index < static_cast<SyncStageAccessIndex>(syncStageAccessInfoByStageAccessIndex.size())) {
218 stage_access_name = syncStageAccessInfoByStageAccessIndex[usage_index].name;
219 }
220 return std::string(stage_access_name);
221}
222
223struct NoopBarrierAction {
224 explicit NoopBarrierAction() {}
225 void operator()(ResourceAccessState *access) const {}
John Zulauf5c628d02021-05-04 15:46:36 -0600226 const bool layout_transition = false;
John Zulauf4fa68462021-04-26 21:04:22 -0600227};
228
229// NOTE: Make sure the proxy doesn't outlive from, as the proxy is pointing directly to access contexts owned by from.
230CommandBufferAccessContext::CommandBufferAccessContext(const CommandBufferAccessContext &from, AsProxyContext dummy)
231 : CommandBufferAccessContext(from.sync_state_) {
232 // Copy only the needed fields out of from for a temporary, proxy command buffer context
233 cb_state_ = from.cb_state_;
234 queue_flags_ = from.queue_flags_;
235 destroyed_ = from.destroyed_;
236 access_log_ = from.access_log_; // potentially large, but no choice given tagging lookup.
237 command_number_ = from.command_number_;
238 subcommand_number_ = from.subcommand_number_;
239 reset_count_ = from.reset_count_;
240
241 const auto *from_context = from.GetCurrentAccessContext();
242 assert(from_context);
243
244 // Construct a fully resolved single access context out of from
245 const NoopBarrierAction noop_barrier;
246 for (AccessAddressType address_type : kAddressTypes) {
247 from_context->ResolveAccessRange(address_type, kFullRange, noop_barrier,
248 &cb_access_context_.GetAccessStateMap(address_type), nullptr);
249 }
250 // The proxy has flatten the current render pass context (if any), but the async contexts are needed for hazard detection
251 cb_access_context_.ImportAsyncContexts(*from_context);
252
253 events_context_ = from.events_context_;
254
255 // We don't want to copy the full render_pass_context_ history just for the proxy.
256}
257
258std::string CommandBufferAccessContext::FormatUsage(const ResourceUsageTag tag) const {
259 std::stringstream out;
260 assert(tag < access_log_.size());
261 const auto &record = access_log_[tag];
262 out << string_UsageTag(record);
263 if (record.cb_state != cb_state_.get()) {
264 out << ", command_buffer: " << sync_state_->report_data->FormatHandle(record.cb_state->commandBuffer()).c_str();
265 if (record.cb_state->Destroyed()) {
266 out << " (destroyed)";
267 }
268
John Zulauf4fa68462021-04-26 21:04:22 -0600269 }
John Zulaufd142c9a2022-04-12 14:22:44 -0600270 out << ", reset_no: " << std::to_string(record.reset_count);
John Zulauf4fa68462021-04-26 21:04:22 -0600271 return out.str();
272}
273std::string CommandBufferAccessContext::FormatUsage(const ResourceFirstAccess &access) const {
274 std::stringstream out;
275 out << "(recorded_usage: " << string_UsageIndex(access.usage_index);
276 out << ", " << FormatUsage(access.tag) << ")";
277 return out.str();
278}
Jeremy Gebben6ea9d9e2020-12-11 09:41:01 -0700279
John Zulauffaea0ee2021-01-14 14:01:32 -0700280std::string CommandBufferAccessContext::FormatUsage(const HazardResult &hazard) const {
John Zulauf37ceaed2020-07-03 16:18:15 -0600281 const auto &tag = hazard.tag;
John Zulauf59e25072020-07-17 10:55:21 -0600282 assert(hazard.usage_index < static_cast<SyncStageAccessIndex>(syncStageAccessInfoByStageAccessIndex.size()));
283 const auto &usage_info = syncStageAccessInfoByStageAccessIndex[hazard.usage_index];
John Zulauf1dae9192020-06-16 15:46:44 -0600284 std::stringstream out;
John Zulauf37ceaed2020-07-03 16:18:15 -0600285 const auto *info = SyncStageAccessInfoFromMask(hazard.prior_access);
286 const char *stage_access_name = info ? info->name : "INVALID_STAGE_ACCESS";
John Zulauf4fa68462021-04-26 21:04:22 -0600287 out << "(";
288 if (!hazard.recorded_access.get()) {
289 // if we have a recorded usage the usage is reported from the recorded contexts point of view
290 out << "usage: " << usage_info.name << ", ";
291 }
292 out << "prior_usage: " << stage_access_name;
John Zulauf59e25072020-07-17 10:55:21 -0600293 if (IsHazardVsRead(hazard.hazard)) {
294 const auto barriers = hazard.access_state->GetReadBarriers(hazard.prior_access);
Jeremy Gebben40a22942020-12-22 14:22:06 -0700295 out << ", read_barriers: " << string_VkPipelineStageFlags2KHR(barriers);
John Zulauf59e25072020-07-17 10:55:21 -0600296 } else {
297 SyncStageAccessFlags write_barrier = hazard.access_state->GetWriteBarriers();
298 out << ", write_barriers: " << string_SyncStageAccessFlags(write_barrier);
299 }
300
ziga-lunarg0f248902022-03-24 16:42:45 +0100301 if (tag < access_log_.size()) {
302 out << ", " << FormatUsage(tag) << ")";
303 }
John Zulauf1dae9192020-06-16 15:46:44 -0600304 return out.str();
305}
306
John Zulaufd14743a2020-07-03 09:42:39 -0600307// NOTE: the attachement read flag is put *only* in the access scope and not in the exect scope, since the ordering
308// rules apply only to this specific access for this stage, and not the stage as a whole. The ordering detection
309// also reflects this special case for read hazard detection (using access instead of exec scope)
Jeremy Gebben40a22942020-12-22 14:22:06 -0700310static constexpr VkPipelineStageFlags2KHR kColorAttachmentExecScope = VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT_KHR;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700311static const SyncStageAccessFlags kColorAttachmentAccessScope =
312 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ_BIT |
313 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT |
314 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE_BIT |
315 SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT; // Note: this is intentionally not in the exec scope
Jeremy Gebben40a22942020-12-22 14:22:06 -0700316static constexpr VkPipelineStageFlags2KHR kDepthStencilAttachmentExecScope =
317 VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT_KHR | VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT_KHR;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700318static const SyncStageAccessFlags kDepthStencilAttachmentAccessScope =
319 SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
320 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
321 SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT; // Note: this is intentionally not in the exec scope
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -0700322static constexpr VkPipelineStageFlags2KHR kRasterAttachmentExecScope = kDepthStencilAttachmentExecScope | kColorAttachmentExecScope;
John Zulauf8e3c3e92021-01-06 11:19:36 -0700323static const SyncStageAccessFlags kRasterAttachmentAccessScope = kDepthStencilAttachmentAccessScope | kColorAttachmentAccessScope;
John Zulaufb027cdb2020-05-21 14:25:22 -0600324
John Zulauf8e3c3e92021-01-06 11:19:36 -0700325ResourceAccessState::OrderingBarriers ResourceAccessState::kOrderingRules = {
Jeremy Gebben40a22942020-12-22 14:22:06 -0700326 {{VK_PIPELINE_STAGE_2_NONE_KHR, SyncStageAccessFlags()},
John Zulauf8e3c3e92021-01-06 11:19:36 -0700327 {kColorAttachmentExecScope, kColorAttachmentAccessScope},
328 {kDepthStencilAttachmentExecScope, kDepthStencilAttachmentAccessScope},
329 {kRasterAttachmentExecScope, kRasterAttachmentAccessScope}}};
330
John Zulaufee984022022-04-13 16:39:50 -0600331// Sometimes we have an internal access conflict, and we using the kInvalidTag to set and detect in temporary/proxy contexts
332static const ResourceUsageTag kInvalidTag(ResourceUsageRecord::kMaxIndex);
John Zulaufb027cdb2020-05-21 14:25:22 -0600333
Jeremy Gebben62c3bf42021-07-21 15:38:24 -0600334static VkDeviceSize ResourceBaseAddress(const BINDABLE &bindable) { return bindable.GetFakeBaseAddress(); }
John Zulaufb02c1eb2020-10-06 16:33:36 -0600335
locke-lunarg3c038002020-04-30 23:08:08 -0600336inline VkDeviceSize GetRealWholeSize(VkDeviceSize offset, VkDeviceSize size, VkDeviceSize whole_size) {
337 if (size == VK_WHOLE_SIZE) {
338 return (whole_size - offset);
339 }
340 return size;
341}
342
John Zulauf3e86bf02020-09-12 10:47:57 -0600343static inline VkDeviceSize GetBufferWholeSize(const BUFFER_STATE &buf_state, VkDeviceSize offset, VkDeviceSize size) {
344 return GetRealWholeSize(offset, size, buf_state.createInfo.size);
345}
346
John Zulauf16adfc92020-04-08 10:28:33 -0600347template <typename T>
John Zulauf355e49b2020-04-24 15:11:15 -0600348static ResourceAccessRange MakeRange(const T &has_offset_and_size) {
John Zulauf16adfc92020-04-08 10:28:33 -0600349 return ResourceAccessRange(has_offset_and_size.offset, (has_offset_and_size.offset + has_offset_and_size.size));
350}
351
John Zulauf355e49b2020-04-24 15:11:15 -0600352static ResourceAccessRange MakeRange(VkDeviceSize start, VkDeviceSize size) { return ResourceAccessRange(start, (start + size)); }
John Zulauf16adfc92020-04-08 10:28:33 -0600353
John Zulauf3e86bf02020-09-12 10:47:57 -0600354static inline ResourceAccessRange MakeRange(const BUFFER_STATE &buffer, VkDeviceSize offset, VkDeviceSize size) {
355 return MakeRange(offset, GetBufferWholeSize(buffer, offset, size));
356}
357
358static inline ResourceAccessRange MakeRange(const BUFFER_VIEW_STATE &buf_view_state) {
359 return MakeRange(*buf_view_state.buffer_state.get(), buf_view_state.create_info.offset, buf_view_state.create_info.range);
360}
361
John Zulauf4a6105a2020-11-17 15:11:05 -0700362// Range generators for to allow event scope filtration to be limited to the top of the resource access traversal pipeline
363//
John Zulauf10f1f522020-12-18 12:00:35 -0700364// Note: there is no "begin/end" or reset facility. These are each written as "one time through" generators.
365//
John Zulauf4a6105a2020-11-17 15:11:05 -0700366// Usage:
367// Constructor() -- initializes the generator to point to the begin of the space declared.
368// * -- the current range of the generator empty signfies end
369// ++ -- advance to the next non-empty range (or end)
370
371// A wrapper for a single range with the same semantics as the actual generators below
372template <typename KeyType>
373class SingleRangeGenerator {
374 public:
375 SingleRangeGenerator(const KeyType &range) : current_(range) {}
John Zulaufd5115702021-01-18 12:34:33 -0700376 const KeyType &operator*() const { return current_; }
377 const KeyType *operator->() const { return &current_; }
John Zulauf4a6105a2020-11-17 15:11:05 -0700378 SingleRangeGenerator &operator++() {
379 current_ = KeyType(); // just one real range
380 return *this;
381 }
382
383 bool operator==(const SingleRangeGenerator &other) const { return current_ == other.current_; }
384
385 private:
386 SingleRangeGenerator() = default;
387 const KeyType range_;
388 KeyType current_;
389};
390
John Zulaufae842002021-04-15 18:20:55 -0600391// Generate the ranges that are the intersection of range and the entries in the RangeMap
392template <typename RangeMap, typename KeyType = typename RangeMap::key_type>
393class MapRangesRangeGenerator {
John Zulauf4a6105a2020-11-17 15:11:05 -0700394 public:
John Zulaufd5115702021-01-18 12:34:33 -0700395 // Default constructed is safe to dereference for "empty" test, but for no other operation.
John Zulaufae842002021-04-15 18:20:55 -0600396 MapRangesRangeGenerator() : range_(), map_(nullptr), map_pos_(), current_() {
John Zulaufd5115702021-01-18 12:34:33 -0700397 // Default construction for KeyType *must* be empty range
398 assert(current_.empty());
399 }
John Zulaufae842002021-04-15 18:20:55 -0600400 MapRangesRangeGenerator(const RangeMap &filter, const KeyType &range) : range_(range), map_(&filter), map_pos_(), current_() {
John Zulauf4a6105a2020-11-17 15:11:05 -0700401 SeekBegin();
402 }
John Zulaufae842002021-04-15 18:20:55 -0600403 MapRangesRangeGenerator(const MapRangesRangeGenerator &from) = default;
John Zulaufd5115702021-01-18 12:34:33 -0700404
John Zulauf4a6105a2020-11-17 15:11:05 -0700405 const KeyType &operator*() const { return current_; }
406 const KeyType *operator->() const { return &current_; }
John Zulaufae842002021-04-15 18:20:55 -0600407 MapRangesRangeGenerator &operator++() {
408 ++map_pos_;
John Zulauf4a6105a2020-11-17 15:11:05 -0700409 UpdateCurrent();
410 return *this;
411 }
412
John Zulaufae842002021-04-15 18:20:55 -0600413 bool operator==(const MapRangesRangeGenerator &other) const { return current_ == other.current_; }
John Zulauf4a6105a2020-11-17 15:11:05 -0700414
John Zulaufae842002021-04-15 18:20:55 -0600415 protected:
John Zulauf4a6105a2020-11-17 15:11:05 -0700416 void UpdateCurrent() {
John Zulaufae842002021-04-15 18:20:55 -0600417 if (map_pos_ != map_->cend()) {
418 current_ = range_ & map_pos_->first;
John Zulauf4a6105a2020-11-17 15:11:05 -0700419 } else {
420 current_ = KeyType();
421 }
422 }
423 void SeekBegin() {
John Zulaufae842002021-04-15 18:20:55 -0600424 map_pos_ = map_->lower_bound(range_);
John Zulauf4a6105a2020-11-17 15:11:05 -0700425 UpdateCurrent();
426 }
John Zulaufae842002021-04-15 18:20:55 -0600427
428 // Adding this functionality here, to avoid gratuitous Base:: qualifiers in the derived class
429 // Note: Not exposed in this classes public interface to encourage using a consistent ++/empty generator semantic
430 template <typename Pred>
431 MapRangesRangeGenerator &PredicatedIncrement(Pred &pred) {
432 do {
433 ++map_pos_;
434 } while (map_pos_ != map_->cend() && map_pos_->first.intersects(range_) && !pred(map_pos_));
435 UpdateCurrent();
436 return *this;
437 }
438
John Zulauf4a6105a2020-11-17 15:11:05 -0700439 const KeyType range_;
John Zulaufae842002021-04-15 18:20:55 -0600440 const RangeMap *map_;
441 typename RangeMap::const_iterator map_pos_;
John Zulauf4a6105a2020-11-17 15:11:05 -0700442 KeyType current_;
443};
John Zulaufd5115702021-01-18 12:34:33 -0700444using SingleAccessRangeGenerator = SingleRangeGenerator<ResourceAccessRange>;
John Zulaufae842002021-04-15 18:20:55 -0600445using EventSimpleRangeGenerator = MapRangesRangeGenerator<SyncEventState::ScopeMap>;
John Zulauf4a6105a2020-11-17 15:11:05 -0700446
John Zulaufae842002021-04-15 18:20:55 -0600447// Generate the ranges for entries meeting the predicate that are the intersection of range and the entries in the RangeMap
448template <typename RangeMap, typename Predicate, typename KeyType = typename RangeMap::key_type>
449class PredicatedMapRangesRangeGenerator : public MapRangesRangeGenerator<RangeMap, KeyType> {
450 public:
451 using Base = MapRangesRangeGenerator<RangeMap, KeyType>;
452 // Default constructed is safe to dereference for "empty" test, but for no other operation.
453 PredicatedMapRangesRangeGenerator() : Base(), pred_() {}
454 PredicatedMapRangesRangeGenerator(const RangeMap &filter, const KeyType &range, Predicate pred)
455 : Base(filter, range), pred_(pred) {}
456 PredicatedMapRangesRangeGenerator(const PredicatedMapRangesRangeGenerator &from) = default;
457
458 PredicatedMapRangesRangeGenerator &operator++() {
459 Base::PredicatedIncrement(pred_);
460 return *this;
461 }
462
463 protected:
464 Predicate pred_;
465};
John Zulauf4a6105a2020-11-17 15:11:05 -0700466
467// Generate the ranges that are the intersection of the RangeGen ranges and the entries in the FilterMap
John Zulaufae842002021-04-15 18:20:55 -0600468// Templated to allow for different Range generators or map sources...
469template <typename RangeMap, typename RangeGen, typename KeyType = typename RangeMap::key_type>
John Zulauf4a6105a2020-11-17 15:11:05 -0700470class FilteredGeneratorGenerator {
471 public:
John Zulaufd5115702021-01-18 12:34:33 -0700472 // Default constructed is safe to dereference for "empty" test, but for no other operation.
473 FilteredGeneratorGenerator() : filter_(nullptr), gen_(), filter_pos_(), current_() {
474 // Default construction for KeyType *must* be empty range
475 assert(current_.empty());
476 }
John Zulaufae842002021-04-15 18:20:55 -0600477 FilteredGeneratorGenerator(const RangeMap &filter, RangeGen &gen) : filter_(&filter), gen_(gen), filter_pos_(), current_() {
John Zulauf4a6105a2020-11-17 15:11:05 -0700478 SeekBegin();
479 }
John Zulaufd5115702021-01-18 12:34:33 -0700480 FilteredGeneratorGenerator(const FilteredGeneratorGenerator &from) = default;
John Zulauf4a6105a2020-11-17 15:11:05 -0700481 const KeyType &operator*() const { return current_; }
482 const KeyType *operator->() const { return &current_; }
483 FilteredGeneratorGenerator &operator++() {
484 KeyType gen_range = GenRange();
485 KeyType filter_range = FilterRange();
486 current_ = KeyType();
487 while (gen_range.non_empty() && filter_range.non_empty() && current_.empty()) {
488 if (gen_range.end > filter_range.end) {
489 // if the generated range is beyond the filter_range, advance the filter range
490 filter_range = AdvanceFilter();
491 } else {
492 gen_range = AdvanceGen();
493 }
494 current_ = gen_range & filter_range;
495 }
496 return *this;
497 }
498
499 bool operator==(const FilteredGeneratorGenerator &other) const { return current_ == other.current_; }
500
501 private:
502 KeyType AdvanceFilter() {
503 ++filter_pos_;
504 auto filter_range = FilterRange();
505 if (filter_range.valid()) {
506 FastForwardGen(filter_range);
507 }
508 return filter_range;
509 }
510 KeyType AdvanceGen() {
John Zulaufd5115702021-01-18 12:34:33 -0700511 ++gen_;
John Zulauf4a6105a2020-11-17 15:11:05 -0700512 auto gen_range = GenRange();
513 if (gen_range.valid()) {
514 FastForwardFilter(gen_range);
515 }
516 return gen_range;
517 }
518
519 KeyType FilterRange() const { return (filter_pos_ != filter_->cend()) ? filter_pos_->first : KeyType(); }
John Zulaufd5115702021-01-18 12:34:33 -0700520 KeyType GenRange() const { return *gen_; }
John Zulauf4a6105a2020-11-17 15:11:05 -0700521
522 KeyType FastForwardFilter(const KeyType &range) {
523 auto filter_range = FilterRange();
524 int retry_count = 0;
John Zulauf10f1f522020-12-18 12:00:35 -0700525 const static int kRetryLimit = 2; // TODO -- determine whether this limit is optimal
John Zulauf4a6105a2020-11-17 15:11:05 -0700526 while (!filter_range.empty() && (filter_range.end <= range.begin)) {
527 if (retry_count < kRetryLimit) {
528 ++filter_pos_;
529 filter_range = FilterRange();
530 retry_count++;
531 } else {
532 // Okay we've tried walking, do a seek.
533 filter_pos_ = filter_->lower_bound(range);
534 break;
535 }
536 }
537 return FilterRange();
538 }
539
540 // TODO: Consider adding "seek" (or an absolute bound "get" to range generators to make this walk
541 // faster.
542 KeyType FastForwardGen(const KeyType &range) {
543 auto gen_range = GenRange();
544 while (!gen_range.empty() && (gen_range.end <= range.begin)) {
John Zulaufd5115702021-01-18 12:34:33 -0700545 ++gen_;
John Zulauf4a6105a2020-11-17 15:11:05 -0700546 gen_range = GenRange();
547 }
548 return gen_range;
549 }
550
551 void SeekBegin() {
552 auto gen_range = GenRange();
553 if (gen_range.empty()) {
554 current_ = KeyType();
555 filter_pos_ = filter_->cend();
556 } else {
557 filter_pos_ = filter_->lower_bound(gen_range);
558 current_ = gen_range & FilterRange();
559 }
560 }
561
John Zulaufae842002021-04-15 18:20:55 -0600562 const RangeMap *filter_;
John Zulaufd5115702021-01-18 12:34:33 -0700563 RangeGen gen_;
John Zulaufae842002021-04-15 18:20:55 -0600564 typename RangeMap::const_iterator filter_pos_;
John Zulauf4a6105a2020-11-17 15:11:05 -0700565 KeyType current_;
566};
567
568using EventImageRangeGenerator = FilteredGeneratorGenerator<SyncEventState::ScopeMap, subresource_adapter::ImageRangeGenerator>;
569
John Zulauf5c5e88d2019-12-26 11:22:02 -0700570
John Zulauf3e86bf02020-09-12 10:47:57 -0600571ResourceAccessRange GetBufferRange(VkDeviceSize offset, VkDeviceSize buf_whole_size, uint32_t first_index, uint32_t count,
572 VkDeviceSize stride) {
573 VkDeviceSize range_start = offset + first_index * stride;
574 VkDeviceSize range_size = 0;
locke-lunargff255f92020-05-13 18:53:52 -0600575 if (count == UINT32_MAX) {
576 range_size = buf_whole_size - range_start;
577 } else {
578 range_size = count * stride;
579 }
John Zulauf3e86bf02020-09-12 10:47:57 -0600580 return MakeRange(range_start, range_size);
locke-lunargff255f92020-05-13 18:53:52 -0600581}
582
locke-lunarg654e3692020-06-04 17:19:15 -0600583SyncStageAccessIndex GetSyncStageAccessIndexsByDescriptorSet(VkDescriptorType descriptor_type, const interface_var &descriptor_data,
584 VkShaderStageFlagBits stage_flag) {
585 if (descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT) {
586 assert(stage_flag == VK_SHADER_STAGE_FRAGMENT_BIT);
587 return SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ;
588 }
589 auto stage_access = syncStageAccessMaskByShaderStage.find(stage_flag);
590 if (stage_access == syncStageAccessMaskByShaderStage.end()) {
591 assert(0);
592 }
593 if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER || descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
594 return stage_access->second.uniform_read;
595 }
596
597 // If the desriptorSet is writable, we don't need to care SHADER_READ. SHADER_WRITE is enough.
598 // Because if write hazard happens, read hazard might or might not happen.
599 // But if write hazard doesn't happen, read hazard is impossible to happen.
600 if (descriptor_data.is_writable) {
Jeremy Gebben40a22942020-12-22 14:22:06 -0700601 return stage_access->second.storage_write;
locke-lunarg654e3692020-06-04 17:19:15 -0600602 }
Jeremy Gebben40a22942020-12-22 14:22:06 -0700603 // TODO: sampled_read
604 return stage_access->second.storage_read;
locke-lunarg654e3692020-06-04 17:19:15 -0600605}
606
locke-lunarg37047832020-06-12 13:44:45 -0600607bool IsImageLayoutDepthWritable(VkImageLayout image_layout) {
608 return (image_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
609 image_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL ||
610 image_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL)
611 ? true
612 : false;
613}
614
615bool IsImageLayoutStencilWritable(VkImageLayout image_layout) {
616 return (image_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
617 image_layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL ||
618 image_layout == VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL)
619 ? true
620 : false;
621}
622
John Zulauf355e49b2020-04-24 15:11:15 -0600623// Class AccessContext stores the state of accesses specific to a Command, Subpass, or Queue
John Zulaufb02c1eb2020-10-06 16:33:36 -0600624template <typename Action>
625static void ApplyOverImageRange(const IMAGE_STATE &image_state, const VkImageSubresourceRange &subresource_range_arg,
626 Action &action) {
627 // At this point the "apply over range" logic only supports a single memory binding
628 if (!SimpleBinding(image_state)) return;
629 auto subresource_range = NormalizeSubresourceRange(image_state.createInfo, subresource_range_arg);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600630 const auto base_address = ResourceBaseAddress(image_state);
John Zulauf150e5332020-12-03 08:52:52 -0700631 subresource_adapter::ImageRangeGenerator range_gen(*image_state.fragment_encoder.get(), subresource_range, {0, 0, 0},
632 image_state.createInfo.extent, base_address);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600633 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf150e5332020-12-03 08:52:52 -0700634 action(*range_gen);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600635 }
636}
637
John Zulauf7635de32020-05-29 17:14:15 -0600638// Tranverse the attachment resolves for this a specific subpass, and do action() to them.
639// Used by both validation and record operations
640//
641// The signature for Action() reflect the needs of both uses.
642template <typename Action>
John Zulaufd0ec59f2021-03-13 14:25:08 -0700643void ResolveOperation(Action &action, const RENDER_PASS_STATE &rp_state, const AttachmentViewGenVector &attachment_views,
644 uint32_t subpass) {
John Zulauf7635de32020-05-29 17:14:15 -0600645 const auto &rp_ci = rp_state.createInfo;
646 const auto *attachment_ci = rp_ci.pAttachments;
647 const auto &subpass_ci = rp_ci.pSubpasses[subpass];
648
649 // Color resolves -- require an inuse color attachment and a matching inuse resolve attachment
650 const auto *color_attachments = subpass_ci.pColorAttachments;
651 const auto *color_resolve = subpass_ci.pResolveAttachments;
652 if (color_resolve && color_attachments) {
653 for (uint32_t i = 0; i < subpass_ci.colorAttachmentCount; i++) {
654 const auto &color_attach = color_attachments[i].attachment;
655 const auto &resolve_attach = subpass_ci.pResolveAttachments[i].attachment;
656 if ((color_attach != VK_ATTACHMENT_UNUSED) && (resolve_attach != VK_ATTACHMENT_UNUSED)) {
657 action("color", "resolve read", color_attach, resolve_attach, attachment_views[color_attach],
John Zulaufd0ec59f2021-03-13 14:25:08 -0700658 AttachmentViewGen::Gen::kRenderArea, SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ,
659 SyncOrdering::kColorAttachment);
John Zulauf7635de32020-05-29 17:14:15 -0600660 action("color", "resolve write", color_attach, resolve_attach, attachment_views[resolve_attach],
John Zulaufd0ec59f2021-03-13 14:25:08 -0700661 AttachmentViewGen::Gen::kRenderArea, SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE,
662 SyncOrdering::kColorAttachment);
John Zulauf7635de32020-05-29 17:14:15 -0600663 }
664 }
665 }
666
667 // Depth stencil resolve only if the extension is present
Mark Lobodzinski1f887d32020-12-30 15:31:33 -0700668 const auto ds_resolve = LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(subpass_ci.pNext);
John Zulauf7635de32020-05-29 17:14:15 -0600669 if (ds_resolve && ds_resolve->pDepthStencilResolveAttachment &&
670 (ds_resolve->pDepthStencilResolveAttachment->attachment != VK_ATTACHMENT_UNUSED) && subpass_ci.pDepthStencilAttachment &&
671 (subpass_ci.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
672 const auto src_at = subpass_ci.pDepthStencilAttachment->attachment;
673 const auto src_ci = attachment_ci[src_at];
674 // The formats are required to match so we can pick either
675 const bool resolve_depth = (ds_resolve->depthResolveMode != VK_RESOLVE_MODE_NONE) && FormatHasDepth(src_ci.format);
676 const bool resolve_stencil = (ds_resolve->stencilResolveMode != VK_RESOLVE_MODE_NONE) && FormatHasStencil(src_ci.format);
677 const auto dst_at = ds_resolve->pDepthStencilResolveAttachment->attachment;
John Zulauf7635de32020-05-29 17:14:15 -0600678
679 // Figure out which aspects are actually touched during resolve operations
680 const char *aspect_string = nullptr;
John Zulaufd0ec59f2021-03-13 14:25:08 -0700681 AttachmentViewGen::Gen gen_type = AttachmentViewGen::Gen::kRenderArea;
John Zulauf7635de32020-05-29 17:14:15 -0600682 if (resolve_depth && resolve_stencil) {
John Zulauf7635de32020-05-29 17:14:15 -0600683 aspect_string = "depth/stencil";
684 } else if (resolve_depth) {
685 // Validate depth only
John Zulaufd0ec59f2021-03-13 14:25:08 -0700686 gen_type = AttachmentViewGen::Gen::kDepthOnlyRenderArea;
John Zulauf7635de32020-05-29 17:14:15 -0600687 aspect_string = "depth";
688 } else if (resolve_stencil) {
689 // Validate all stencil only
John Zulaufd0ec59f2021-03-13 14:25:08 -0700690 gen_type = AttachmentViewGen::Gen::kStencilOnlyRenderArea;
John Zulauf7635de32020-05-29 17:14:15 -0600691 aspect_string = "stencil";
692 }
693
John Zulaufd0ec59f2021-03-13 14:25:08 -0700694 if (aspect_string) {
695 action(aspect_string, "resolve read", src_at, dst_at, attachment_views[src_at], gen_type,
696 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ, SyncOrdering::kRaster);
697 action(aspect_string, "resolve write", src_at, dst_at, attachment_views[dst_at], gen_type,
698 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kRaster);
John Zulauf7635de32020-05-29 17:14:15 -0600699 }
700 }
701}
702
703// Action for validating resolve operations
704class ValidateResolveAction {
705 public:
John Zulauffaea0ee2021-01-14 14:01:32 -0700706 ValidateResolveAction(VkRenderPass render_pass, uint32_t subpass, const AccessContext &context,
John Zulaufbb890452021-12-14 11:30:18 -0700707 const CommandExecutionContext &exec_context, const char *func_name)
John Zulauf7635de32020-05-29 17:14:15 -0600708 : render_pass_(render_pass),
709 subpass_(subpass),
710 context_(context),
John Zulaufbb890452021-12-14 11:30:18 -0700711 exec_context_(exec_context),
John Zulauf7635de32020-05-29 17:14:15 -0600712 func_name_(func_name),
713 skip_(false) {}
714 void operator()(const char *aspect_name, const char *attachment_name, uint32_t src_at, uint32_t dst_at,
John Zulaufd0ec59f2021-03-13 14:25:08 -0700715 const AttachmentViewGen &view_gen, AttachmentViewGen::Gen gen_type, SyncStageAccessIndex current_usage,
716 SyncOrdering ordering_rule) {
John Zulauf7635de32020-05-29 17:14:15 -0600717 HazardResult hazard;
John Zulaufd0ec59f2021-03-13 14:25:08 -0700718 hazard = context_.DetectHazard(view_gen, gen_type, current_usage, ordering_rule);
John Zulauf7635de32020-05-29 17:14:15 -0600719 if (hazard.hazard) {
John Zulauffaea0ee2021-01-14 14:01:32 -0700720 skip_ |=
John Zulaufbb890452021-12-14 11:30:18 -0700721 exec_context_.GetSyncState().LogError(render_pass_, string_SyncHazardVUID(hazard.hazard),
722 "%s: Hazard %s in subpass %" PRIu32 "during %s %s, from attachment %" PRIu32
723 " to resolve attachment %" PRIu32 ". Access info %s.",
724 func_name_, string_SyncHazard(hazard.hazard), subpass_, aspect_name,
725 attachment_name, src_at, dst_at, exec_context_.FormatUsage(hazard).c_str());
John Zulauf7635de32020-05-29 17:14:15 -0600726 }
727 }
728 // Providing a mechanism for the constructing caller to get the result of the validation
729 bool GetSkip() const { return skip_; }
730
731 private:
732 VkRenderPass render_pass_;
733 const uint32_t subpass_;
734 const AccessContext &context_;
John Zulaufbb890452021-12-14 11:30:18 -0700735 const CommandExecutionContext &exec_context_;
John Zulauf7635de32020-05-29 17:14:15 -0600736 const char *func_name_;
737 bool skip_;
738};
739
740// Update action for resolve operations
741class UpdateStateResolveAction {
742 public:
John Zulauf14940722021-04-12 15:19:02 -0600743 UpdateStateResolveAction(AccessContext &context, ResourceUsageTag tag) : context_(context), tag_(tag) {}
John Zulaufd0ec59f2021-03-13 14:25:08 -0700744 void operator()(const char *, const char *, uint32_t, uint32_t, const AttachmentViewGen &view_gen,
745 AttachmentViewGen::Gen gen_type, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule) {
John Zulauf7635de32020-05-29 17:14:15 -0600746 // Ignores validation only arguments...
John Zulaufd0ec59f2021-03-13 14:25:08 -0700747 context_.UpdateAccessState(view_gen, gen_type, current_usage, ordering_rule, tag_);
John Zulauf7635de32020-05-29 17:14:15 -0600748 }
749
750 private:
751 AccessContext &context_;
John Zulauf14940722021-04-12 15:19:02 -0600752 const ResourceUsageTag tag_;
John Zulauf7635de32020-05-29 17:14:15 -0600753};
754
John Zulauf59e25072020-07-17 10:55:21 -0600755void HazardResult::Set(const ResourceAccessState *access_state_, SyncStageAccessIndex usage_index_, SyncHazard hazard_,
John Zulauf14940722021-04-12 15:19:02 -0600756 const SyncStageAccessFlags &prior_, const ResourceUsageTag tag_) {
John Zulauf4fa68462021-04-26 21:04:22 -0600757 access_state = layer_data::make_unique<const ResourceAccessState>(*access_state_);
John Zulauf59e25072020-07-17 10:55:21 -0600758 usage_index = usage_index_;
759 hazard = hazard_;
760 prior_access = prior_;
761 tag = tag_;
762}
763
John Zulauf4fa68462021-04-26 21:04:22 -0600764void HazardResult::AddRecordedAccess(const ResourceFirstAccess &first_access) {
765 recorded_access = layer_data::make_unique<const ResourceFirstAccess>(first_access);
766}
767
John Zulauf540266b2020-04-06 18:54:53 -0600768AccessContext::AccessContext(uint32_t subpass, VkQueueFlags queue_flags,
769 const std::vector<SubpassDependencyGraphNode> &dependencies,
John Zulauf1a224292020-06-30 14:52:13 -0600770 const std::vector<AccessContext> &contexts, const AccessContext *external_context) {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600771 Reset();
772 const auto &subpass_dep = dependencies[subpass];
John Zulauf22aefed2021-03-11 18:14:35 -0700773 bool has_barrier_from_external = subpass_dep.barrier_from_external.size() > 0U;
774 prev_.reserve(subpass_dep.prev.size() + (has_barrier_from_external ? 1U : 0U));
John Zulauf355e49b2020-04-24 15:11:15 -0600775 prev_by_subpass_.resize(subpass, nullptr); // Can't be more prevs than the subpass we're on
John Zulauf3d84f1b2020-03-09 13:33:25 -0600776 for (const auto &prev_dep : subpass_dep.prev) {
John Zulaufbaea94f2020-09-15 17:55:16 -0600777 const auto prev_pass = prev_dep.first->pass;
778 const auto &prev_barriers = prev_dep.second;
779 assert(prev_dep.second.size());
780 prev_.emplace_back(&contexts[prev_pass], queue_flags, prev_barriers);
781 prev_by_subpass_[prev_pass] = &prev_.back();
John Zulauf5c5e88d2019-12-26 11:22:02 -0700782 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600783
784 async_.reserve(subpass_dep.async.size());
785 for (const auto async_subpass : subpass_dep.async) {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -0700786 async_.emplace_back(&contexts[async_subpass]);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600787 }
John Zulauf22aefed2021-03-11 18:14:35 -0700788 if (has_barrier_from_external) {
789 // Store the barrier from external with the reat, but save pointer for "by subpass" lookups.
790 prev_.emplace_back(external_context, queue_flags, subpass_dep.barrier_from_external);
791 src_external_ = &prev_.back();
John Zulaufe5da6e52020-03-18 15:32:18 -0600792 }
John Zulaufbaea94f2020-09-15 17:55:16 -0600793 if (subpass_dep.barrier_to_external.size()) {
794 dst_external_ = TrackBack(this, queue_flags, subpass_dep.barrier_to_external);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600795 }
John Zulauf5c5e88d2019-12-26 11:22:02 -0700796}
797
John Zulauf5f13a792020-03-10 07:31:21 -0600798template <typename Detector>
John Zulauf43cc7462020-12-03 12:33:12 -0700799HazardResult AccessContext::DetectPreviousHazard(AccessAddressType type, const Detector &detector,
John Zulauf540266b2020-04-06 18:54:53 -0600800 const ResourceAccessRange &range) const {
John Zulauf5f13a792020-03-10 07:31:21 -0600801 ResourceAccessRangeMap descent_map;
John Zulauf69133422020-05-20 14:55:53 -0600802 ResolvePreviousAccess(type, range, &descent_map, nullptr);
John Zulauf5f13a792020-03-10 07:31:21 -0600803
804 HazardResult hazard;
805 for (auto prev = descent_map.begin(); prev != descent_map.end() && !hazard.hazard; ++prev) {
806 hazard = detector.Detect(prev);
807 }
808 return hazard;
809}
810
John Zulauf4a6105a2020-11-17 15:11:05 -0700811template <typename Action>
812void AccessContext::ForAll(Action &&action) {
813 for (const auto address_type : kAddressTypes) {
814 auto &accesses = GetAccessStateMap(address_type);
815 for (const auto &access : accesses) {
816 action(address_type, access);
817 }
818 }
819}
820
John Zulauf3d84f1b2020-03-09 13:33:25 -0600821// A recursive range walker for hazard detection, first for the current context and the (DetectHazardRecur) to walk
822// the DAG of the contexts (for example subpasses)
823template <typename Detector>
John Zulauf43cc7462020-12-03 12:33:12 -0700824HazardResult AccessContext::DetectHazard(AccessAddressType type, const Detector &detector, const ResourceAccessRange &range,
John Zulauf355e49b2020-04-24 15:11:15 -0600825 DetectOptions options) const {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600826 HazardResult hazard;
John Zulauf5f13a792020-03-10 07:31:21 -0600827
John Zulauf1a224292020-06-30 14:52:13 -0600828 if (static_cast<uint32_t>(options) & DetectOptions::kDetectAsync) {
John Zulauf355e49b2020-04-24 15:11:15 -0600829 // Async checks don't require recursive lookups, as the async lists are exhaustive for the top-level context
830 // so we'll check these first
831 for (const auto &async_context : async_) {
832 hazard = async_context->DetectAsyncHazard(type, detector, range);
833 if (hazard.hazard) return hazard;
834 }
John Zulauf5f13a792020-03-10 07:31:21 -0600835 }
836
John Zulauf1a224292020-06-30 14:52:13 -0600837 const bool detect_prev = (static_cast<uint32_t>(options) & DetectOptions::kDetectPrevious) != 0;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600838
John Zulauf69133422020-05-20 14:55:53 -0600839 const auto &accesses = GetAccessStateMap(type);
John Zulauf3cafbf72021-03-26 16:55:19 -0600840 const auto the_end = accesses.cend(); // End is not invalidated
841 auto pos = accesses.lower_bound(range);
John Zulauf69133422020-05-20 14:55:53 -0600842 ResourceAccessRange gap = {range.begin, range.begin};
John Zulauf5f13a792020-03-10 07:31:21 -0600843
John Zulauf3cafbf72021-03-26 16:55:19 -0600844 while (pos != the_end && pos->first.begin < range.end) {
John Zulauf69133422020-05-20 14:55:53 -0600845 // Cover any leading gap, or gap between entries
846 if (detect_prev) {
847 // TODO: After profiling we may want to change the descent logic such that we don't recur per gap...
848 // Cover any leading gap, or gap between entries
849 gap.end = pos->first.begin; // We know this begin is < range.end
John Zulauf355e49b2020-04-24 15:11:15 -0600850 if (gap.non_empty()) {
John Zulauf69133422020-05-20 14:55:53 -0600851 // Recur on all gaps
John Zulauf16adfc92020-04-08 10:28:33 -0600852 hazard = DetectPreviousHazard(type, detector, gap);
John Zulauf5f13a792020-03-10 07:31:21 -0600853 if (hazard.hazard) return hazard;
854 }
John Zulauf69133422020-05-20 14:55:53 -0600855 // Set up for the next gap. If pos..end is >= range.end, loop will exit, and trailing gap will be empty
856 gap.begin = pos->first.end;
857 }
858
859 hazard = detector.Detect(pos);
860 if (hazard.hazard) return hazard;
John Zulauf3cafbf72021-03-26 16:55:19 -0600861 ++pos;
John Zulauf69133422020-05-20 14:55:53 -0600862 }
863
864 if (detect_prev) {
865 // Detect in the trailing empty as needed
866 gap.end = range.end;
867 if (gap.non_empty()) {
868 hazard = DetectPreviousHazard(type, detector, gap);
John Zulauf16adfc92020-04-08 10:28:33 -0600869 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600870 }
871
872 return hazard;
873}
874
875// A non recursive range walker for the asynchronous contexts (those we have no barriers with)
876template <typename Detector>
John Zulauf43cc7462020-12-03 12:33:12 -0700877HazardResult AccessContext::DetectAsyncHazard(AccessAddressType type, const Detector &detector,
878 const ResourceAccessRange &range) const {
John Zulauf16adfc92020-04-08 10:28:33 -0600879 auto &accesses = GetAccessStateMap(type);
John Zulauf3cafbf72021-03-26 16:55:19 -0600880 auto pos = accesses.lower_bound(range);
881 const auto the_end = accesses.end();
John Zulauf16adfc92020-04-08 10:28:33 -0600882
John Zulauf3d84f1b2020-03-09 13:33:25 -0600883 HazardResult hazard;
John Zulauf3cafbf72021-03-26 16:55:19 -0600884 while (pos != the_end && pos->first.begin < range.end) {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -0700885 hazard = detector.DetectAsync(pos, start_tag_);
John Zulauf3cafbf72021-03-26 16:55:19 -0600886 if (hazard.hazard) break;
887 ++pos;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600888 }
John Zulauf16adfc92020-04-08 10:28:33 -0600889
John Zulauf3d84f1b2020-03-09 13:33:25 -0600890 return hazard;
891}
892
John Zulaufb02c1eb2020-10-06 16:33:36 -0600893struct ApplySubpassTransitionBarriersAction {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -0700894 explicit ApplySubpassTransitionBarriersAction(const std::vector<SyncBarrier> &barriers_) : barriers(barriers_) {}
John Zulaufb02c1eb2020-10-06 16:33:36 -0600895 void operator()(ResourceAccessState *access) const {
896 assert(access);
897 access->ApplyBarriers(barriers, true);
898 }
899 const std::vector<SyncBarrier> &barriers;
900};
901
John Zulauf22aefed2021-03-11 18:14:35 -0700902struct ApplyTrackbackStackAction {
903 explicit ApplyTrackbackStackAction(const std::vector<SyncBarrier> &barriers_,
904 const ResourceAccessStateFunction *previous_barrier_ = nullptr)
905 : barriers(barriers_), previous_barrier(previous_barrier_) {}
John Zulaufb02c1eb2020-10-06 16:33:36 -0600906 void operator()(ResourceAccessState *access) const {
907 assert(access);
908 assert(!access->HasPendingState());
909 access->ApplyBarriers(barriers, false);
John Zulaufee984022022-04-13 16:39:50 -0600910 // NOTE: We can use invalid tag, as these barriers do no include layout transitions (would assert in SetWrite)
911 access->ApplyPendingBarriers(kInvalidTag);
John Zulauf22aefed2021-03-11 18:14:35 -0700912 if (previous_barrier) {
913 assert(bool(*previous_barrier));
914 (*previous_barrier)(access);
915 }
John Zulaufb02c1eb2020-10-06 16:33:36 -0600916 }
917 const std::vector<SyncBarrier> &barriers;
John Zulauf22aefed2021-03-11 18:14:35 -0700918 const ResourceAccessStateFunction *previous_barrier;
John Zulaufb02c1eb2020-10-06 16:33:36 -0600919};
920
921// Splits a single map entry into piece matching the entries in [first, last) the total range over [first, last) must be
922// contained with entry. Entry must be an iterator pointing to dest, first and last must be iterators pointing to a
923// *different* map from dest.
924// Returns the position past the last resolved range -- the entry covering the remainder of entry->first not included in the
925// range [first, last)
926template <typename BarrierAction>
John Zulauf355e49b2020-04-24 15:11:15 -0600927static void ResolveMapToEntry(ResourceAccessRangeMap *dest, ResourceAccessRangeMap::iterator entry,
928 ResourceAccessRangeMap::const_iterator first, ResourceAccessRangeMap::const_iterator last,
John Zulaufb02c1eb2020-10-06 16:33:36 -0600929 BarrierAction &barrier_action) {
John Zulauf355e49b2020-04-24 15:11:15 -0600930 auto at = entry;
931 for (auto pos = first; pos != last; ++pos) {
932 // Every member of the input iterator range must fit within the remaining portion of entry
933 assert(at->first.includes(pos->first));
934 assert(at != dest->end());
935 // Trim up at to the same size as the entry to resolve
936 at = sparse_container::split(at, *dest, pos->first);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600937 auto access = pos->second; // intentional copy
938 barrier_action(&access);
John Zulauf355e49b2020-04-24 15:11:15 -0600939 at->second.Resolve(access);
940 ++at; // Go to the remaining unused section of entry
941 }
942}
943
John Zulaufa0a98292020-09-18 09:30:10 -0600944static SyncBarrier MergeBarriers(const std::vector<SyncBarrier> &barriers) {
945 SyncBarrier merged = {};
946 for (const auto &barrier : barriers) {
947 merged.Merge(barrier);
948 }
949 return merged;
950}
951
John Zulaufb02c1eb2020-10-06 16:33:36 -0600952template <typename BarrierAction>
John Zulauf43cc7462020-12-03 12:33:12 -0700953void AccessContext::ResolveAccessRange(AccessAddressType type, const ResourceAccessRange &range, BarrierAction &barrier_action,
John Zulauf355e49b2020-04-24 15:11:15 -0600954 ResourceAccessRangeMap *resolve_map, const ResourceAccessState *infill_state,
955 bool recur_to_infill) const {
John Zulauf3bcab5e2020-06-19 14:42:32 -0600956 if (!range.non_empty()) return;
957
John Zulauf355e49b2020-04-24 15:11:15 -0600958 ResourceRangeMergeIterator current(*resolve_map, GetAccessStateMap(type), range.begin);
959 while (current->range.non_empty() && range.includes(current->range.begin)) {
John Zulauf3bcab5e2020-06-19 14:42:32 -0600960 const auto current_range = current->range & range;
John Zulauf16adfc92020-04-08 10:28:33 -0600961 if (current->pos_B->valid) {
962 const auto &src_pos = current->pos_B->lower_bound;
John Zulaufb02c1eb2020-10-06 16:33:36 -0600963 auto access = src_pos->second; // intentional copy
964 barrier_action(&access);
965
John Zulauf16adfc92020-04-08 10:28:33 -0600966 if (current->pos_A->valid) {
John Zulauf3bcab5e2020-06-19 14:42:32 -0600967 const auto trimmed = sparse_container::split(current->pos_A->lower_bound, *resolve_map, current_range);
968 trimmed->second.Resolve(access);
969 current.invalidate_A(trimmed);
John Zulauf5f13a792020-03-10 07:31:21 -0600970 } else {
John Zulauf3bcab5e2020-06-19 14:42:32 -0600971 auto inserted = resolve_map->insert(current->pos_A->lower_bound, std::make_pair(current_range, access));
John Zulauf355e49b2020-04-24 15:11:15 -0600972 current.invalidate_A(inserted); // Update the parallel iterator to point at the insert segment
John Zulauf5f13a792020-03-10 07:31:21 -0600973 }
John Zulauf16adfc92020-04-08 10:28:33 -0600974 } else {
975 // we have to descend to fill this gap
976 if (recur_to_infill) {
John Zulauf22aefed2021-03-11 18:14:35 -0700977 ResourceAccessRange recurrence_range = current_range;
978 // The current context is empty for the current range, so recur to fill the gap.
979 // Since we will be recurring back up the DAG, expand the gap descent to cover the full range for which B
980 // is not valid, to minimize that recurrence
981 if (current->pos_B.at_end()) {
982 // Do the remainder here....
983 recurrence_range.end = range.end;
John Zulauf355e49b2020-04-24 15:11:15 -0600984 } else {
John Zulauf22aefed2021-03-11 18:14:35 -0700985 // Recur only over the range until B becomes valid (within the limits of range).
986 recurrence_range.end = std::min(range.end, current->pos_B->lower_bound->first.begin);
John Zulauf355e49b2020-04-24 15:11:15 -0600987 }
John Zulauf22aefed2021-03-11 18:14:35 -0700988 ResolvePreviousAccessStack(type, recurrence_range, resolve_map, infill_state, barrier_action);
989
John Zulauf355e49b2020-04-24 15:11:15 -0600990 // Given that there could be gaps we need to seek carefully to not repeatedly search the same gaps in the next
991 // iterator of the outer while.
992
993 // Set the parallel iterator to the end of this range s.t. ++ will move us to the next range whether or
994 // not the end of the range is a gap. For the seek to work, first we need to warn the parallel iterator
995 // we stepped on the dest map
John Zulauf22aefed2021-03-11 18:14:35 -0700996 const auto seek_to = recurrence_range.end - 1; // The subtraction is safe as range can't be empty (loop condition)
locke-lunarg88dbb542020-06-23 22:05:42 -0600997 current.invalidate_A(); // Changes current->range
John Zulauf355e49b2020-04-24 15:11:15 -0600998 current.seek(seek_to);
999 } else if (!current->pos_A->valid && infill_state) {
1000 // If we didn't find anything in the current range, and we aren't reccuring... we infill if required
1001 auto inserted = resolve_map->insert(current->pos_A->lower_bound, std::make_pair(current->range, *infill_state));
1002 current.invalidate_A(inserted); // Update the parallel iterator to point at the correct segment after insert
John Zulauf16adfc92020-04-08 10:28:33 -06001003 }
John Zulauf5f13a792020-03-10 07:31:21 -06001004 }
ziga-lunargf0e27ad2022-03-28 00:44:12 +02001005 if (current->range.non_empty()) {
1006 ++current;
1007 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06001008 }
John Zulauf1a224292020-06-30 14:52:13 -06001009
1010 // Infill if range goes passed both the current and resolve map prior contents
1011 if (recur_to_infill && (current->range.end < range.end)) {
1012 ResourceAccessRange trailing_fill_range = {current->range.end, range.end};
John Zulauf22aefed2021-03-11 18:14:35 -07001013 ResolvePreviousAccessStack<BarrierAction>(type, trailing_fill_range, resolve_map, infill_state, barrier_action);
John Zulauf1a224292020-06-30 14:52:13 -06001014 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06001015}
1016
John Zulauf22aefed2021-03-11 18:14:35 -07001017template <typename BarrierAction>
1018void AccessContext::ResolvePreviousAccessStack(AccessAddressType type, const ResourceAccessRange &range,
1019 ResourceAccessRangeMap *descent_map, const ResourceAccessState *infill_state,
1020 const BarrierAction &previous_barrier) const {
1021 ResourceAccessStateFunction stacked_barrier(std::ref(previous_barrier));
1022 ResolvePreviousAccess(type, range, descent_map, infill_state, &stacked_barrier);
1023}
1024
John Zulauf43cc7462020-12-03 12:33:12 -07001025void AccessContext::ResolvePreviousAccess(AccessAddressType type, const ResourceAccessRange &range,
John Zulauf22aefed2021-03-11 18:14:35 -07001026 ResourceAccessRangeMap *descent_map, const ResourceAccessState *infill_state,
1027 const ResourceAccessStateFunction *previous_barrier) const {
1028 if (prev_.size() == 0) {
John Zulauf5f13a792020-03-10 07:31:21 -06001029 if (range.non_empty() && infill_state) {
John Zulauf22aefed2021-03-11 18:14:35 -07001030 // Fill the empty poritions of descent_map with the default_state with the barrier function applied (iff present)
1031 ResourceAccessState state_copy;
1032 if (previous_barrier) {
1033 assert(bool(*previous_barrier));
1034 state_copy = *infill_state;
1035 (*previous_barrier)(&state_copy);
1036 infill_state = &state_copy;
1037 }
1038 sparse_container::update_range_value(*descent_map, range, *infill_state,
1039 sparse_container::value_precedence::prefer_dest);
John Zulauf5f13a792020-03-10 07:31:21 -06001040 }
1041 } else {
1042 // Look for something to fill the gap further along.
1043 for (const auto &prev_dep : prev_) {
John Zulauf22aefed2021-03-11 18:14:35 -07001044 const ApplyTrackbackStackAction barrier_action(prev_dep.barriers, previous_barrier);
John Zulaufbb890452021-12-14 11:30:18 -07001045 prev_dep.source_subpass->ResolveAccessRange(type, range, barrier_action, descent_map, infill_state);
John Zulauf5f13a792020-03-10 07:31:21 -06001046 }
John Zulauf5f13a792020-03-10 07:31:21 -06001047 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06001048}
1049
John Zulauf4a6105a2020-11-17 15:11:05 -07001050// Non-lazy import of all accesses, WaitEvents needs this.
1051void AccessContext::ResolvePreviousAccesses() {
1052 ResourceAccessState default_state;
John Zulauf22aefed2021-03-11 18:14:35 -07001053 if (!prev_.size()) return; // If no previous contexts, nothing to do
1054
John Zulauf4a6105a2020-11-17 15:11:05 -07001055 for (const auto address_type : kAddressTypes) {
1056 ResolvePreviousAccess(address_type, kFullRange, &GetAccessStateMap(address_type), &default_state);
1057 }
1058}
1059
John Zulauf43cc7462020-12-03 12:33:12 -07001060AccessAddressType AccessContext::ImageAddressType(const IMAGE_STATE &image) {
1061 return (image.fragment_encoder->IsLinearImage()) ? AccessAddressType::kLinear : AccessAddressType::kIdealized;
John Zulauf16adfc92020-04-08 10:28:33 -06001062}
1063
John Zulauf1507ee42020-05-18 11:33:09 -06001064static SyncStageAccessIndex ColorLoadUsage(VkAttachmentLoadOp load_op) {
John Zulauf57261402021-08-13 11:32:06 -06001065 const auto stage_access = (load_op == VK_ATTACHMENT_LOAD_OP_NONE_EXT)
1066 ? SYNC_ACCESS_INDEX_NONE
1067 : ((load_op == VK_ATTACHMENT_LOAD_OP_LOAD) ? SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ
1068 : SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE);
John Zulauf1507ee42020-05-18 11:33:09 -06001069 return stage_access;
1070}
1071static SyncStageAccessIndex DepthStencilLoadUsage(VkAttachmentLoadOp load_op) {
John Zulauf57261402021-08-13 11:32:06 -06001072 const auto stage_access =
1073 (load_op == VK_ATTACHMENT_LOAD_OP_NONE_EXT)
1074 ? SYNC_ACCESS_INDEX_NONE
1075 : ((load_op == VK_ATTACHMENT_LOAD_OP_LOAD) ? SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_READ
1076 : SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE);
John Zulauf1507ee42020-05-18 11:33:09 -06001077 return stage_access;
1078}
1079
John Zulauf7635de32020-05-29 17:14:15 -06001080// Caller must manage returned pointer
1081static AccessContext *CreateStoreResolveProxyContext(const AccessContext &context, const RENDER_PASS_STATE &rp_state,
John Zulaufd0ec59f2021-03-13 14:25:08 -07001082 uint32_t subpass, const AttachmentViewGenVector &attachment_views) {
John Zulauf7635de32020-05-29 17:14:15 -06001083 auto *proxy = new AccessContext(context);
John Zulaufee984022022-04-13 16:39:50 -06001084 proxy->UpdateAttachmentResolveAccess(rp_state, attachment_views, subpass, kInvalidTag);
1085 proxy->UpdateAttachmentStoreAccess(rp_state, attachment_views, subpass, kInvalidTag);
John Zulauf7635de32020-05-29 17:14:15 -06001086 return proxy;
1087}
1088
John Zulaufb02c1eb2020-10-06 16:33:36 -06001089template <typename BarrierAction>
John Zulaufd0ec59f2021-03-13 14:25:08 -07001090void AccessContext::ResolveAccessRange(const AttachmentViewGen &view_gen, AttachmentViewGen::Gen gen_type,
1091 BarrierAction &barrier_action, ResourceAccessRangeMap *descent_map,
1092 const ResourceAccessState *infill_state) const {
1093 const auto *attachment_gen = view_gen.GetRangeGen(gen_type);
1094 if (!attachment_gen) return;
1095
1096 subresource_adapter::ImageRangeGenerator range_gen(*attachment_gen);
1097 const AccessAddressType address_type = view_gen.GetAddressType();
1098 for (; range_gen->non_empty(); ++range_gen) {
1099 ResolveAccessRange(address_type, *range_gen, barrier_action, descent_map, infill_state);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001100 }
John Zulauf62f10592020-04-03 12:20:02 -06001101}
1102
John Zulauf7635de32020-05-29 17:14:15 -06001103// Layout transitions are handled as if the were occuring in the beginning of the next subpass
John Zulaufbb890452021-12-14 11:30:18 -07001104bool AccessContext::ValidateLayoutTransitions(const CommandExecutionContext &exec_context, const RENDER_PASS_STATE &rp_state,
John Zulauf7635de32020-05-29 17:14:15 -06001105 const VkRect2D &render_area, uint32_t subpass,
John Zulaufd0ec59f2021-03-13 14:25:08 -07001106 const AttachmentViewGenVector &attachment_views, const char *func_name) const {
John Zulauf355e49b2020-04-24 15:11:15 -06001107 bool skip = false;
John Zulauf7635de32020-05-29 17:14:15 -06001108 // As validation methods are const and precede the record/update phase, for any tranistions from the immediately
1109 // previous subpass, we have to validate them against a copy of the AccessContext, with resolve operations applied, as
1110 // those affects have not been recorded yet.
1111 //
1112 // Note: we could be more efficient by tracking whether or not we actually *have* any changes (e.g. attachment resolve)
1113 // to apply and only copy then, if this proves a hot spot.
1114 std::unique_ptr<AccessContext> proxy_for_prev;
1115 TrackBack proxy_track_back;
1116
John Zulauf355e49b2020-04-24 15:11:15 -06001117 const auto &transitions = rp_state.subpass_transitions[subpass];
1118 for (const auto &transition : transitions) {
John Zulauf7635de32020-05-29 17:14:15 -06001119 const bool prev_needs_proxy = transition.prev_pass != VK_SUBPASS_EXTERNAL && (transition.prev_pass + 1 == subpass);
1120
1121 const auto *track_back = GetTrackBackFromSubpass(transition.prev_pass);
John Zulauf22aefed2021-03-11 18:14:35 -07001122 assert(track_back);
John Zulauf7635de32020-05-29 17:14:15 -06001123 if (prev_needs_proxy) {
1124 if (!proxy_for_prev) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001125 proxy_for_prev.reset(
John Zulaufbb890452021-12-14 11:30:18 -07001126 CreateStoreResolveProxyContext(*track_back->source_subpass, rp_state, transition.prev_pass, attachment_views));
John Zulauf7635de32020-05-29 17:14:15 -06001127 proxy_track_back = *track_back;
John Zulaufbb890452021-12-14 11:30:18 -07001128 proxy_track_back.source_subpass = proxy_for_prev.get();
John Zulauf7635de32020-05-29 17:14:15 -06001129 }
1130 track_back = &proxy_track_back;
1131 }
1132 auto hazard = DetectSubpassTransitionHazard(*track_back, attachment_views[transition.attachment]);
John Zulauf355e49b2020-04-24 15:11:15 -06001133 if (hazard.hazard) {
John Zulaufee984022022-04-13 16:39:50 -06001134 if (hazard.tag == kInvalidTag) {
John Zulaufbb890452021-12-14 11:30:18 -07001135 skip |= exec_context.GetSyncState().LogError(
John Zulaufee984022022-04-13 16:39:50 -06001136 rp_state.renderPass(), string_SyncHazardVUID(hazard.hazard),
1137 "%s: Hazard %s in subpass %" PRIu32 " for attachment %" PRIu32
1138 " image layout transition (old_layout: %s, new_layout: %s) after store/resolve operation in subpass %" PRIu32,
1139 func_name, string_SyncHazard(hazard.hazard), subpass, transition.attachment,
1140 string_VkImageLayout(transition.old_layout), string_VkImageLayout(transition.new_layout), transition.prev_pass);
1141 } else {
John Zulaufbb890452021-12-14 11:30:18 -07001142 skip |= exec_context.GetSyncState().LogError(
John Zulaufee984022022-04-13 16:39:50 -06001143 rp_state.renderPass(), string_SyncHazardVUID(hazard.hazard),
1144 "%s: Hazard %s in subpass %" PRIu32 " for attachment %" PRIu32
1145 " image layout transition (old_layout: %s, new_layout: %s). Access info %s.",
1146 func_name, string_SyncHazard(hazard.hazard), subpass, transition.attachment,
1147 string_VkImageLayout(transition.old_layout), string_VkImageLayout(transition.new_layout),
John Zulaufbb890452021-12-14 11:30:18 -07001148 exec_context.FormatUsage(hazard).c_str());
John Zulaufee984022022-04-13 16:39:50 -06001149 }
John Zulauf355e49b2020-04-24 15:11:15 -06001150 }
1151 }
1152 return skip;
1153}
1154
John Zulaufbb890452021-12-14 11:30:18 -07001155bool AccessContext::ValidateLoadOperation(const CommandExecutionContext &exec_context, const RENDER_PASS_STATE &rp_state,
John Zulauf7635de32020-05-29 17:14:15 -06001156 const VkRect2D &render_area, uint32_t subpass,
John Zulaufd0ec59f2021-03-13 14:25:08 -07001157 const AttachmentViewGenVector &attachment_views, const char *func_name) const {
John Zulauf1507ee42020-05-18 11:33:09 -06001158 bool skip = false;
1159 const auto *attachment_ci = rp_state.createInfo.pAttachments;
John Zulaufa0a98292020-09-18 09:30:10 -06001160
John Zulauf1507ee42020-05-18 11:33:09 -06001161 for (uint32_t i = 0; i < rp_state.createInfo.attachmentCount; i++) {
1162 if (subpass == rp_state.attachment_first_subpass[i]) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001163 const auto &view_gen = attachment_views[i];
1164 if (!view_gen.IsValid()) continue;
John Zulauf1507ee42020-05-18 11:33:09 -06001165 const auto &ci = attachment_ci[i];
John Zulauf1507ee42020-05-18 11:33:09 -06001166
1167 // Need check in the following way
1168 // 1) if the usage bit isn't in the dest_access_scope, and there is layout traniition for initial use, report hazard
1169 // vs. transition
1170 // 2) if there isn't a layout transition, we need to look at the external context with a "detect hazard" operation
1171 // for each aspect loaded.
1172
1173 const bool has_depth = FormatHasDepth(ci.format);
John Zulaufb027cdb2020-05-21 14:25:22 -06001174 const bool has_stencil = FormatHasStencil(ci.format);
John Zulauf1507ee42020-05-18 11:33:09 -06001175 const bool is_color = !(has_depth || has_stencil);
1176
1177 const SyncStageAccessIndex load_index = has_depth ? DepthStencilLoadUsage(ci.loadOp) : ColorLoadUsage(ci.loadOp);
John Zulauf1507ee42020-05-18 11:33:09 -06001178 const SyncStageAccessIndex stencil_load_index = has_stencil ? DepthStencilLoadUsage(ci.stencilLoadOp) : load_index;
John Zulauf1507ee42020-05-18 11:33:09 -06001179
John Zulaufaff20662020-06-01 14:07:58 -06001180 HazardResult hazard;
John Zulauf1507ee42020-05-18 11:33:09 -06001181 const char *aspect = nullptr;
John Zulauf1507ee42020-05-18 11:33:09 -06001182
John Zulaufb02c1eb2020-10-06 16:33:36 -06001183 bool checked_stencil = false;
John Zulauf57261402021-08-13 11:32:06 -06001184 if (is_color && (load_index != SYNC_ACCESS_INDEX_NONE)) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001185 hazard = DetectHazard(view_gen, AttachmentViewGen::Gen::kRenderArea, load_index, SyncOrdering::kColorAttachment);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001186 aspect = "color";
1187 } else {
John Zulauf57261402021-08-13 11:32:06 -06001188 if (has_depth && (load_index != SYNC_ACCESS_INDEX_NONE)) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001189 hazard = DetectHazard(view_gen, AttachmentViewGen::Gen::kDepthOnlyRenderArea, load_index,
1190 SyncOrdering::kDepthStencilAttachment);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001191 aspect = "depth";
1192 }
John Zulauf57261402021-08-13 11:32:06 -06001193 if (!hazard.hazard && has_stencil && (stencil_load_index != SYNC_ACCESS_INDEX_NONE)) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001194 hazard = DetectHazard(view_gen, AttachmentViewGen::Gen::kStencilOnlyRenderArea, stencil_load_index,
1195 SyncOrdering::kDepthStencilAttachment);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001196 aspect = "stencil";
1197 checked_stencil = true;
1198 }
1199 }
1200
1201 if (hazard.hazard) {
1202 auto load_op_string = string_VkAttachmentLoadOp(checked_stencil ? ci.stencilLoadOp : ci.loadOp);
John Zulaufbb890452021-12-14 11:30:18 -07001203 const auto &sync_state = exec_context.GetSyncState();
John Zulaufee984022022-04-13 16:39:50 -06001204 if (hazard.tag == kInvalidTag) {
John Zulaufb02c1eb2020-10-06 16:33:36 -06001205 // Hazard vs. ILT
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06001206 skip |= sync_state.LogError(rp_state.renderPass(), string_SyncHazardVUID(hazard.hazard),
John Zulaufb02c1eb2020-10-06 16:33:36 -06001207 "%s: Hazard %s vs. layout transition in subpass %" PRIu32 " for attachment %" PRIu32
1208 " aspect %s during load with loadOp %s.",
1209 func_name, string_SyncHazard(hazard.hazard), subpass, i, aspect, load_op_string);
1210 } else {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06001211 skip |= sync_state.LogError(rp_state.renderPass(), string_SyncHazardVUID(hazard.hazard),
John Zulauf1507ee42020-05-18 11:33:09 -06001212 "%s: Hazard %s in subpass %" PRIu32 " for attachment %" PRIu32
John Zulauf59e25072020-07-17 10:55:21 -06001213 " aspect %s during load with loadOp %s. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06001214 func_name, string_SyncHazard(hazard.hazard), subpass, i, aspect, load_op_string,
John Zulaufbb890452021-12-14 11:30:18 -07001215 exec_context.FormatUsage(hazard).c_str());
John Zulauf1507ee42020-05-18 11:33:09 -06001216 }
1217 }
1218 }
1219 }
1220 return skip;
1221}
1222
John Zulaufaff20662020-06-01 14:07:58 -06001223// Store operation validation can ignore resolve (before it) and layout tranistions after it. The first is ignored
1224// because of the ordering guarantees w.r.t. sample access and that the resolve validation hasn't altered the state, because
1225// store is part of the same Next/End operation.
1226// The latter is handled in layout transistion validation directly
John Zulaufbb890452021-12-14 11:30:18 -07001227bool AccessContext::ValidateStoreOperation(const CommandExecutionContext &exec_context, const RENDER_PASS_STATE &rp_state,
John Zulaufaff20662020-06-01 14:07:58 -06001228 const VkRect2D &render_area, uint32_t subpass,
John Zulaufd0ec59f2021-03-13 14:25:08 -07001229 const AttachmentViewGenVector &attachment_views, const char *func_name) const {
John Zulaufaff20662020-06-01 14:07:58 -06001230 bool skip = false;
1231 const auto *attachment_ci = rp_state.createInfo.pAttachments;
John Zulaufaff20662020-06-01 14:07:58 -06001232
1233 for (uint32_t i = 0; i < rp_state.createInfo.attachmentCount; i++) {
1234 if (subpass == rp_state.attachment_last_subpass[i]) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001235 const AttachmentViewGen &view_gen = attachment_views[i];
1236 if (!view_gen.IsValid()) continue;
John Zulaufaff20662020-06-01 14:07:58 -06001237 const auto &ci = attachment_ci[i];
1238
1239 // The spec states that "don't care" is an operation with VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
1240 // so we assume that an implementation is *free* to write in that case, meaning that for correctness
1241 // sake, we treat DONT_CARE as writing.
1242 const bool has_depth = FormatHasDepth(ci.format);
1243 const bool has_stencil = FormatHasStencil(ci.format);
1244 const bool is_color = !(has_depth || has_stencil);
John Zulauf57261402021-08-13 11:32:06 -06001245 const bool store_op_stores = ci.storeOp != VK_ATTACHMENT_STORE_OP_NONE_EXT;
John Zulaufaff20662020-06-01 14:07:58 -06001246 if (!has_stencil && !store_op_stores) continue;
1247
1248 HazardResult hazard;
1249 const char *aspect = nullptr;
1250 bool checked_stencil = false;
1251 if (is_color) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001252 hazard = DetectHazard(view_gen, AttachmentViewGen::Gen::kRenderArea,
1253 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kRaster);
John Zulaufaff20662020-06-01 14:07:58 -06001254 aspect = "color";
1255 } else {
John Zulauf57261402021-08-13 11:32:06 -06001256 const bool stencil_op_stores = ci.stencilStoreOp != VK_ATTACHMENT_STORE_OP_NONE_EXT;
John Zulaufaff20662020-06-01 14:07:58 -06001257 if (has_depth && store_op_stores) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001258 hazard = DetectHazard(view_gen, AttachmentViewGen::Gen::kDepthOnlyRenderArea,
1259 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, SyncOrdering::kRaster);
John Zulaufaff20662020-06-01 14:07:58 -06001260 aspect = "depth";
1261 }
1262 if (!hazard.hazard && has_stencil && stencil_op_stores) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001263 hazard = DetectHazard(view_gen, AttachmentViewGen::Gen::kStencilOnlyRenderArea,
1264 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, SyncOrdering::kRaster);
John Zulaufaff20662020-06-01 14:07:58 -06001265 aspect = "stencil";
1266 checked_stencil = true;
1267 }
1268 }
1269
1270 if (hazard.hazard) {
1271 const char *const op_type_string = checked_stencil ? "stencilStoreOp" : "storeOp";
1272 const char *const store_op_string = string_VkAttachmentStoreOp(checked_stencil ? ci.stencilStoreOp : ci.storeOp);
John Zulaufbb890452021-12-14 11:30:18 -07001273 skip |=
1274 exec_context.GetSyncState().LogError(rp_state.renderPass(), string_SyncHazardVUID(hazard.hazard),
1275 "%s: Hazard %s in subpass %" PRIu32 " for attachment %" PRIu32
1276 " %s aspect during store with %s %s. Access info %s",
1277 func_name, string_SyncHazard(hazard.hazard), subpass, i, aspect,
1278 op_type_string, store_op_string, exec_context.FormatUsage(hazard).c_str());
John Zulaufaff20662020-06-01 14:07:58 -06001279 }
1280 }
1281 }
1282 return skip;
1283}
1284
John Zulaufbb890452021-12-14 11:30:18 -07001285bool AccessContext::ValidateResolveOperations(const CommandExecutionContext &exec_context, const RENDER_PASS_STATE &rp_state,
John Zulaufd0ec59f2021-03-13 14:25:08 -07001286 const VkRect2D &render_area, const AttachmentViewGenVector &attachment_views,
1287 const char *func_name, uint32_t subpass) const {
John Zulaufbb890452021-12-14 11:30:18 -07001288 ValidateResolveAction validate_action(rp_state.renderPass(), subpass, *this, exec_context, func_name);
John Zulaufd0ec59f2021-03-13 14:25:08 -07001289 ResolveOperation(validate_action, rp_state, attachment_views, subpass);
John Zulauf7635de32020-05-29 17:14:15 -06001290 return validate_action.GetSkip();
John Zulaufb027cdb2020-05-21 14:25:22 -06001291}
1292
John Zulauf3d84f1b2020-03-09 13:33:25 -06001293class HazardDetector {
1294 SyncStageAccessIndex usage_index_;
1295
1296 public:
John Zulauf5f13a792020-03-10 07:31:21 -06001297 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const { return pos->second.DetectHazard(usage_index_); }
John Zulauf14940722021-04-12 15:19:02 -06001298 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, ResourceUsageTag start_tag) const {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07001299 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001300 }
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001301 explicit HazardDetector(SyncStageAccessIndex usage) : usage_index_(usage) {}
John Zulauf3d84f1b2020-03-09 13:33:25 -06001302};
1303
John Zulauf69133422020-05-20 14:55:53 -06001304class HazardDetectorWithOrdering {
1305 const SyncStageAccessIndex usage_index_;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001306 const SyncOrdering ordering_rule_;
John Zulauf69133422020-05-20 14:55:53 -06001307
1308 public:
1309 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
John Zulauf8e3c3e92021-01-06 11:19:36 -07001310 return pos->second.DetectHazard(usage_index_, ordering_rule_);
John Zulauf69133422020-05-20 14:55:53 -06001311 }
John Zulauf14940722021-04-12 15:19:02 -06001312 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, ResourceUsageTag start_tag) const {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07001313 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
John Zulauf69133422020-05-20 14:55:53 -06001314 }
John Zulauf8e3c3e92021-01-06 11:19:36 -07001315 HazardDetectorWithOrdering(SyncStageAccessIndex usage, SyncOrdering ordering) : usage_index_(usage), ordering_rule_(ordering) {}
John Zulauf69133422020-05-20 14:55:53 -06001316};
1317
John Zulauf16adfc92020-04-08 10:28:33 -06001318HazardResult AccessContext::DetectHazard(const BUFFER_STATE &buffer, SyncStageAccessIndex usage_index,
John Zulauf355e49b2020-04-24 15:11:15 -06001319 const ResourceAccessRange &range) const {
John Zulauf16adfc92020-04-08 10:28:33 -06001320 if (!SimpleBinding(buffer)) return HazardResult();
John Zulauf150e5332020-12-03 08:52:52 -07001321 const auto base_address = ResourceBaseAddress(buffer);
1322 HazardDetector detector(usage_index);
1323 return DetectHazard(AccessAddressType::kLinear, detector, (range + base_address), DetectOptions::kDetectAll);
John Zulaufe5da6e52020-03-18 15:32:18 -06001324}
1325
John Zulauf69133422020-05-20 14:55:53 -06001326template <typename Detector>
John Zulaufd0ec59f2021-03-13 14:25:08 -07001327HazardResult AccessContext::DetectHazard(Detector &detector, const AttachmentViewGen &view_gen, AttachmentViewGen::Gen gen_type,
1328 DetectOptions options) const {
1329 const auto *attachment_gen = view_gen.GetRangeGen(gen_type);
1330 if (!attachment_gen) return HazardResult();
1331
1332 subresource_adapter::ImageRangeGenerator range_gen(*attachment_gen);
1333 const auto address_type = view_gen.GetAddressType();
1334 for (; range_gen->non_empty(); ++range_gen) {
1335 HazardResult hazard = DetectHazard(address_type, detector, *range_gen, options);
1336 if (hazard.hazard) return hazard;
1337 }
1338
1339 return HazardResult();
1340}
1341
1342template <typename Detector>
John Zulauf69133422020-05-20 14:55:53 -06001343HazardResult AccessContext::DetectHazard(Detector &detector, const IMAGE_STATE &image,
1344 const VkImageSubresourceRange &subresource_range, const VkOffset3D &offset,
1345 const VkExtent3D &extent, DetectOptions options) const {
1346 if (!SimpleBinding(image)) return HazardResult();
John Zulauf69133422020-05-20 14:55:53 -06001347 const auto base_address = ResourceBaseAddress(image);
John Zulauf150e5332020-12-03 08:52:52 -07001348 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, offset, extent,
1349 base_address);
1350 const auto address_type = ImageAddressType(image);
John Zulauf69133422020-05-20 14:55:53 -06001351 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf150e5332020-12-03 08:52:52 -07001352 HazardResult hazard = DetectHazard(address_type, detector, *range_gen, options);
John Zulauf69133422020-05-20 14:55:53 -06001353 if (hazard.hazard) return hazard;
1354 }
1355 return HazardResult();
1356}
John Zulauf110413c2021-03-20 05:38:38 -06001357template <typename Detector>
1358HazardResult AccessContext::DetectHazard(Detector &detector, const IMAGE_STATE &image,
1359 const VkImageSubresourceRange &subresource_range, DetectOptions options) const {
1360 if (!SimpleBinding(image)) return HazardResult();
1361 const auto base_address = ResourceBaseAddress(image);
1362 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, base_address);
1363 const auto address_type = ImageAddressType(image);
1364 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf110413c2021-03-20 05:38:38 -06001365 HazardResult hazard = DetectHazard(address_type, detector, *range_gen, options);
1366 if (hazard.hazard) return hazard;
1367 }
1368 return HazardResult();
1369}
John Zulauf69133422020-05-20 14:55:53 -06001370
John Zulauf540266b2020-04-06 18:54:53 -06001371HazardResult AccessContext::DetectHazard(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
1372 const VkImageSubresourceLayers &subresource, const VkOffset3D &offset,
1373 const VkExtent3D &extent) const {
John Zulauf5c5e88d2019-12-26 11:22:02 -07001374 VkImageSubresourceRange subresource_range = {subresource.aspectMask, subresource.mipLevel, 1, subresource.baseArrayLayer,
1375 subresource.layerCount};
John Zulauf110413c2021-03-20 05:38:38 -06001376 HazardDetector detector(current_usage);
1377 return DetectHazard(detector, image, subresource_range, offset, extent, DetectOptions::kDetectAll);
John Zulauf1507ee42020-05-18 11:33:09 -06001378}
1379
1380HazardResult AccessContext::DetectHazard(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
John Zulauf110413c2021-03-20 05:38:38 -06001381 const VkImageSubresourceRange &subresource_range) const {
John Zulauf69133422020-05-20 14:55:53 -06001382 HazardDetector detector(current_usage);
John Zulauf110413c2021-03-20 05:38:38 -06001383 return DetectHazard(detector, image, subresource_range, DetectOptions::kDetectAll);
John Zulauf69133422020-05-20 14:55:53 -06001384}
1385
John Zulaufd0ec59f2021-03-13 14:25:08 -07001386HazardResult AccessContext::DetectHazard(const AttachmentViewGen &view_gen, AttachmentViewGen::Gen gen_type,
1387 SyncStageAccessIndex current_usage, SyncOrdering ordering_rule) const {
1388 HazardDetectorWithOrdering detector(current_usage, ordering_rule);
1389 return DetectHazard(detector, view_gen, gen_type, DetectOptions::kDetectAll);
1390}
1391
John Zulauf69133422020-05-20 14:55:53 -06001392HazardResult AccessContext::DetectHazard(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
John Zulauf8e3c3e92021-01-06 11:19:36 -07001393 const VkImageSubresourceRange &subresource_range, SyncOrdering ordering_rule,
John Zulauf69133422020-05-20 14:55:53 -06001394 const VkOffset3D &offset, const VkExtent3D &extent) const {
John Zulauf8e3c3e92021-01-06 11:19:36 -07001395 HazardDetectorWithOrdering detector(current_usage, ordering_rule);
John Zulauf69133422020-05-20 14:55:53 -06001396 return DetectHazard(detector, image, subresource_range, offset, extent, DetectOptions::kDetectAll);
John Zulauf9cb530d2019-09-30 14:14:10 -06001397}
1398
John Zulauf3d84f1b2020-03-09 13:33:25 -06001399class BarrierHazardDetector {
1400 public:
Jeremy Gebben40a22942020-12-22 14:22:06 -07001401 BarrierHazardDetector(SyncStageAccessIndex usage_index, VkPipelineStageFlags2KHR src_exec_scope,
John Zulauf3d84f1b2020-03-09 13:33:25 -06001402 SyncStageAccessFlags src_access_scope)
1403 : usage_index_(usage_index), src_exec_scope_(src_exec_scope), src_access_scope_(src_access_scope) {}
1404
John Zulauf5f13a792020-03-10 07:31:21 -06001405 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
1406 return pos->second.DetectBarrierHazard(usage_index_, src_exec_scope_, src_access_scope_);
John Zulauf0cb5be22020-01-23 12:18:22 -07001407 }
John Zulauf14940722021-04-12 15:19:02 -06001408 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, ResourceUsageTag start_tag) const {
John Zulauf3d84f1b2020-03-09 13:33:25 -06001409 // Async barrier hazard detection can use the same path as the usage index is not IsRead, but is IsWrite
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07001410 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001411 }
1412
1413 private:
1414 SyncStageAccessIndex usage_index_;
Jeremy Gebben40a22942020-12-22 14:22:06 -07001415 VkPipelineStageFlags2KHR src_exec_scope_;
John Zulauf3d84f1b2020-03-09 13:33:25 -06001416 SyncStageAccessFlags src_access_scope_;
1417};
1418
John Zulauf4a6105a2020-11-17 15:11:05 -07001419class EventBarrierHazardDetector {
1420 public:
Jeremy Gebben40a22942020-12-22 14:22:06 -07001421 EventBarrierHazardDetector(SyncStageAccessIndex usage_index, VkPipelineStageFlags2KHR src_exec_scope,
John Zulauf4a6105a2020-11-17 15:11:05 -07001422 SyncStageAccessFlags src_access_scope, const SyncEventState::ScopeMap &event_scope,
John Zulauf14940722021-04-12 15:19:02 -06001423 ResourceUsageTag scope_tag)
John Zulauf4a6105a2020-11-17 15:11:05 -07001424 : usage_index_(usage_index),
1425 src_exec_scope_(src_exec_scope),
1426 src_access_scope_(src_access_scope),
1427 event_scope_(event_scope),
1428 scope_pos_(event_scope.cbegin()),
1429 scope_end_(event_scope.cend()),
1430 scope_tag_(scope_tag) {}
1431
1432 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
1433 // TODO NOTE: This is almost the slowest way to do this... need to intelligently walk this...
1434 // Need to find a more efficient sync, since we know pos->first is strictly increasing call to call
1435 // NOTE: "cached_lower_bound_impl" with upgrades could do this.
1436 if (scope_pos_ == scope_end_) return HazardResult();
1437 if (!scope_pos_->first.intersects(pos->first)) {
1438 event_scope_.lower_bound(pos->first);
1439 if ((scope_pos_ == scope_end_) || !scope_pos_->first.intersects(pos->first)) return HazardResult();
1440 }
1441
1442 // Some portion of this pos is in the event_scope, so check for a barrier hazard
1443 return pos->second.DetectBarrierHazard(usage_index_, src_exec_scope_, src_access_scope_, scope_tag_);
1444 }
John Zulauf14940722021-04-12 15:19:02 -06001445 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, ResourceUsageTag start_tag) const {
John Zulauf4a6105a2020-11-17 15:11:05 -07001446 // Async barrier hazard detection can use the same path as the usage index is not IsRead, but is IsWrite
1447 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
1448 }
1449
1450 private:
1451 SyncStageAccessIndex usage_index_;
Jeremy Gebben40a22942020-12-22 14:22:06 -07001452 VkPipelineStageFlags2KHR src_exec_scope_;
John Zulauf4a6105a2020-11-17 15:11:05 -07001453 SyncStageAccessFlags src_access_scope_;
1454 const SyncEventState::ScopeMap &event_scope_;
1455 SyncEventState::ScopeMap::const_iterator scope_pos_;
1456 SyncEventState::ScopeMap::const_iterator scope_end_;
John Zulauf14940722021-04-12 15:19:02 -06001457 const ResourceUsageTag scope_tag_;
John Zulauf4a6105a2020-11-17 15:11:05 -07001458};
1459
Jeremy Gebben40a22942020-12-22 14:22:06 -07001460HazardResult AccessContext::DetectImageBarrierHazard(const IMAGE_STATE &image, VkPipelineStageFlags2KHR src_exec_scope,
John Zulauf4a6105a2020-11-17 15:11:05 -07001461 const SyncStageAccessFlags &src_access_scope,
1462 const VkImageSubresourceRange &subresource_range,
1463 const SyncEventState &sync_event, DetectOptions options) const {
1464 // It's not particularly DRY to get the address type in this function as well as lower down, but we have to select the
1465 // first access scope map to use, and there's no easy way to plumb it in below.
1466 const auto address_type = ImageAddressType(image);
1467 const auto &event_scope = sync_event.FirstScope(address_type);
1468
1469 EventBarrierHazardDetector detector(SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION, src_exec_scope, src_access_scope,
1470 event_scope, sync_event.first_scope_tag);
John Zulauf110413c2021-03-20 05:38:38 -06001471 return DetectHazard(detector, image, subresource_range, options);
John Zulauf4a6105a2020-11-17 15:11:05 -07001472}
1473
John Zulaufd0ec59f2021-03-13 14:25:08 -07001474HazardResult AccessContext::DetectImageBarrierHazard(const AttachmentViewGen &view_gen, const SyncBarrier &barrier,
1475 DetectOptions options) const {
1476 BarrierHazardDetector detector(SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION, barrier.src_exec_scope.exec_scope,
1477 barrier.src_access_scope);
1478 return DetectHazard(detector, view_gen, AttachmentViewGen::Gen::kViewSubresource, options);
1479}
1480
Jeremy Gebben40a22942020-12-22 14:22:06 -07001481HazardResult AccessContext::DetectImageBarrierHazard(const IMAGE_STATE &image, VkPipelineStageFlags2KHR src_exec_scope,
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07001482 const SyncStageAccessFlags &src_access_scope,
John Zulauf355e49b2020-04-24 15:11:15 -06001483 const VkImageSubresourceRange &subresource_range,
John Zulauf43cc7462020-12-03 12:33:12 -07001484 const DetectOptions options) const {
John Zulauf69133422020-05-20 14:55:53 -06001485 BarrierHazardDetector detector(SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION, src_exec_scope, src_access_scope);
John Zulauf110413c2021-03-20 05:38:38 -06001486 return DetectHazard(detector, image, subresource_range, options);
John Zulauf0cb5be22020-01-23 12:18:22 -07001487}
1488
Jeremy Gebben40a22942020-12-22 14:22:06 -07001489HazardResult AccessContext::DetectImageBarrierHazard(const IMAGE_STATE &image, VkPipelineStageFlags2KHR src_exec_scope,
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07001490 const SyncStageAccessFlags &src_stage_accesses,
John Zulauf355e49b2020-04-24 15:11:15 -06001491 const VkImageMemoryBarrier &barrier) const {
1492 auto subresource_range = NormalizeSubresourceRange(image.createInfo, barrier.subresourceRange);
1493 const auto src_access_scope = SyncStageAccess::AccessScope(src_stage_accesses, barrier.srcAccessMask);
1494 return DetectImageBarrierHazard(image, src_exec_scope, src_access_scope, subresource_range, kDetectAll);
1495}
John Zulaufe7f6a5e2021-01-16 14:31:18 -07001496HazardResult AccessContext::DetectImageBarrierHazard(const SyncImageMemoryBarrier &image_barrier) const {
John Zulaufc523bf62021-02-16 08:20:34 -07001497 return DetectImageBarrierHazard(*image_barrier.image.get(), image_barrier.barrier.src_exec_scope.exec_scope,
John Zulauf110413c2021-03-20 05:38:38 -06001498 image_barrier.barrier.src_access_scope, image_barrier.range, kDetectAll);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07001499}
John Zulauf355e49b2020-04-24 15:11:15 -06001500
John Zulauf9cb530d2019-09-30 14:14:10 -06001501template <typename Flags, typename Map>
1502SyncStageAccessFlags AccessScopeImpl(Flags flag_mask, const Map &map) {
1503 SyncStageAccessFlags scope = 0;
1504 for (const auto &bit_scope : map) {
1505 if (flag_mask < bit_scope.first) break;
1506
1507 if (flag_mask & bit_scope.first) {
1508 scope |= bit_scope.second;
1509 }
1510 }
1511 return scope;
1512}
1513
Jeremy Gebben40a22942020-12-22 14:22:06 -07001514SyncStageAccessFlags SyncStageAccess::AccessScopeByStage(VkPipelineStageFlags2KHR stages) {
John Zulauf9cb530d2019-09-30 14:14:10 -06001515 return AccessScopeImpl(stages, syncStageAccessMaskByStageBit);
1516}
1517
Jeremy Gebben40a22942020-12-22 14:22:06 -07001518SyncStageAccessFlags SyncStageAccess::AccessScopeByAccess(VkAccessFlags2KHR accesses) {
1519 return AccessScopeImpl(sync_utils::ExpandAccessFlags(accesses), syncStageAccessMaskByAccessBit);
John Zulauf9cb530d2019-09-30 14:14:10 -06001520}
1521
Jeremy Gebben40a22942020-12-22 14:22:06 -07001522// Getting from stage mask and access mask to stage/access masks is something we need to be good at...
1523SyncStageAccessFlags SyncStageAccess::AccessScope(VkPipelineStageFlags2KHR stages, VkAccessFlags2KHR accesses) {
John Zulauf5f13a792020-03-10 07:31:21 -06001524 // The access scope is the intersection of all stage/access types possible for the enabled stages and the enables
1525 // accesses (after doing a couple factoring of common terms the union of stage/access intersections is the intersections
1526 // of the union of all stage/access types for all the stages and the same unions for the access mask...
John Zulauf9cb530d2019-09-30 14:14:10 -06001527 return AccessScopeByStage(stages) & AccessScopeByAccess(accesses);
1528}
1529
1530template <typename Action>
John Zulauf5c5e88d2019-12-26 11:22:02 -07001531void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const ResourceAccessRange &range, const Action &action) {
John Zulauf7635de32020-05-29 17:14:15 -06001532 // TODO: Optimization for operations that do a pure overwrite (i.e. WRITE usages which rewrite the state, vs READ usages
1533 // that do incrementalupdates
John Zulauf4a6105a2020-11-17 15:11:05 -07001534 assert(accesses);
John Zulauf9cb530d2019-09-30 14:14:10 -06001535 auto pos = accesses->lower_bound(range);
1536 if (pos == accesses->end() || !pos->first.intersects(range)) {
1537 // The range is empty, fill it with a default value.
1538 pos = action.Infill(accesses, pos, range);
1539 } else if (range.begin < pos->first.begin) {
1540 // Leading empty space, infill
John Zulauf5c5e88d2019-12-26 11:22:02 -07001541 pos = action.Infill(accesses, pos, ResourceAccessRange(range.begin, pos->first.begin));
John Zulauf9cb530d2019-09-30 14:14:10 -06001542 } else if (pos->first.begin < range.begin) {
1543 // Trim the beginning if needed
1544 pos = accesses->split(pos, range.begin, sparse_container::split_op_keep_both());
1545 ++pos;
1546 }
1547
1548 const auto the_end = accesses->end();
1549 while ((pos != the_end) && pos->first.intersects(range)) {
1550 if (pos->first.end > range.end) {
1551 pos = accesses->split(pos, range.end, sparse_container::split_op_keep_both());
1552 }
1553
1554 pos = action(accesses, pos);
1555 if (pos == the_end) break;
1556
1557 auto next = pos;
1558 ++next;
1559 if ((pos->first.end < range.end) && (next != the_end) && !next->first.is_subsequent_to(pos->first)) {
1560 // Need to infill if next is disjoint
1561 VkDeviceSize limit = (next == the_end) ? range.end : std::min(range.end, next->first.begin);
John Zulauf5c5e88d2019-12-26 11:22:02 -07001562 ResourceAccessRange new_range(pos->first.end, limit);
John Zulauf9cb530d2019-09-30 14:14:10 -06001563 next = action.Infill(accesses, next, new_range);
1564 }
1565 pos = next;
1566 }
1567}
John Zulaufd5115702021-01-18 12:34:33 -07001568
1569// Give a comparable interface for range generators and ranges
1570template <typename Action>
1571inline void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const Action &action, ResourceAccessRange *range) {
1572 assert(range);
1573 UpdateMemoryAccessState(accesses, *range, action);
1574}
1575
John Zulauf4a6105a2020-11-17 15:11:05 -07001576template <typename Action, typename RangeGen>
1577void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const Action &action, RangeGen *range_gen_arg) {
1578 assert(range_gen_arg);
John Zulaufd5115702021-01-18 12:34:33 -07001579 RangeGen &range_gen = *range_gen_arg; // Non-const references must be * by style requirement but deref-ing * iterator is a pain
John Zulauf4a6105a2020-11-17 15:11:05 -07001580 for (; range_gen->non_empty(); ++range_gen) {
1581 UpdateMemoryAccessState(accesses, *range_gen, action);
1582 }
1583}
John Zulauf9cb530d2019-09-30 14:14:10 -06001584
John Zulaufd0ec59f2021-03-13 14:25:08 -07001585template <typename Action, typename RangeGen>
1586void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const Action &action, const RangeGen &range_gen_prebuilt) {
1587 RangeGen range_gen(range_gen_prebuilt); // RangeGenerators can be expensive to create from scratch... initialize from built
1588 for (; range_gen->non_empty(); ++range_gen) {
1589 UpdateMemoryAccessState(accesses, *range_gen, action);
1590 }
1591}
John Zulauf9cb530d2019-09-30 14:14:10 -06001592struct UpdateMemoryAccessStateFunctor {
John Zulauf5c5e88d2019-12-26 11:22:02 -07001593 using Iterator = ResourceAccessRangeMap::iterator;
1594 Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const {
John Zulauf5f13a792020-03-10 07:31:21 -06001595 // this is only called on gaps, and never returns a gap.
1596 ResourceAccessState default_state;
John Zulauf16adfc92020-04-08 10:28:33 -06001597 context.ResolvePreviousAccess(type, range, accesses, &default_state);
John Zulauf5f13a792020-03-10 07:31:21 -06001598 return accesses->lower_bound(range);
John Zulauf9cb530d2019-09-30 14:14:10 -06001599 }
John Zulauf5f13a792020-03-10 07:31:21 -06001600
John Zulauf5c5e88d2019-12-26 11:22:02 -07001601 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -06001602 auto &access_state = pos->second;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001603 access_state.Update(usage, ordering_rule, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06001604 return pos;
1605 }
1606
John Zulauf43cc7462020-12-03 12:33:12 -07001607 UpdateMemoryAccessStateFunctor(AccessAddressType type_, const AccessContext &context_, SyncStageAccessIndex usage_,
John Zulauf14940722021-04-12 15:19:02 -06001608 SyncOrdering ordering_rule_, ResourceUsageTag tag_)
John Zulauf8e3c3e92021-01-06 11:19:36 -07001609 : type(type_), context(context_), usage(usage_), ordering_rule(ordering_rule_), tag(tag_) {}
John Zulauf43cc7462020-12-03 12:33:12 -07001610 const AccessAddressType type;
John Zulauf540266b2020-04-06 18:54:53 -06001611 const AccessContext &context;
John Zulauf16adfc92020-04-08 10:28:33 -06001612 const SyncStageAccessIndex usage;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001613 const SyncOrdering ordering_rule;
John Zulauf14940722021-04-12 15:19:02 -06001614 const ResourceUsageTag tag;
John Zulauf9cb530d2019-09-30 14:14:10 -06001615};
1616
John Zulauf4a6105a2020-11-17 15:11:05 -07001617// The barrier operation for pipeline and subpass dependencies`
John Zulauf1e331ec2020-12-04 18:29:38 -07001618struct PipelineBarrierOp {
1619 SyncBarrier barrier;
1620 bool layout_transition;
1621 PipelineBarrierOp(const SyncBarrier &barrier_, bool layout_transition_)
1622 : barrier(barrier_), layout_transition(layout_transition_) {}
1623 PipelineBarrierOp() = default;
John Zulaufd5115702021-01-18 12:34:33 -07001624 PipelineBarrierOp(const PipelineBarrierOp &) = default;
John Zulauf1e331ec2020-12-04 18:29:38 -07001625 void operator()(ResourceAccessState *access_state) const { access_state->ApplyBarrier(barrier, layout_transition); }
1626};
John Zulauf4a6105a2020-11-17 15:11:05 -07001627// The barrier operation for wait events
1628struct WaitEventBarrierOp {
John Zulauf14940722021-04-12 15:19:02 -06001629 ResourceUsageTag scope_tag;
John Zulauf4a6105a2020-11-17 15:11:05 -07001630 SyncBarrier barrier;
1631 bool layout_transition;
John Zulauf14940722021-04-12 15:19:02 -06001632 WaitEventBarrierOp(const ResourceUsageTag scope_tag_, const SyncBarrier &barrier_, bool layout_transition_)
1633 : scope_tag(scope_tag_), barrier(barrier_), layout_transition(layout_transition_) {}
John Zulauf4a6105a2020-11-17 15:11:05 -07001634 WaitEventBarrierOp() = default;
John Zulauf14940722021-04-12 15:19:02 -06001635 void operator()(ResourceAccessState *access_state) const { access_state->ApplyBarrier(scope_tag, barrier, layout_transition); }
John Zulauf4a6105a2020-11-17 15:11:05 -07001636};
John Zulauf1e331ec2020-12-04 18:29:38 -07001637
John Zulauf4a6105a2020-11-17 15:11:05 -07001638// This functor applies a collection of barriers, updating the "pending state" in each touched memory range, and optionally
1639// resolves the pending state. Suitable for processing Global memory barriers, or Subpass Barriers when the "final" barrier
1640// of a collection is known/present.
John Zulauf5c628d02021-05-04 15:46:36 -06001641template <typename BarrierOp, typename OpVector = std::vector<BarrierOp>>
John Zulauf89311b42020-09-29 16:28:47 -06001642class ApplyBarrierOpsFunctor {
1643 public:
John Zulauf5c5e88d2019-12-26 11:22:02 -07001644 using Iterator = ResourceAccessRangeMap::iterator;
John Zulauf5c628d02021-05-04 15:46:36 -06001645 // Only called with a gap, and pos at the lower_bound(range)
1646 inline Iterator Infill(ResourceAccessRangeMap *accesses, const Iterator &pos, const ResourceAccessRange &range) const {
1647 if (!infill_default_) {
1648 return pos;
1649 }
1650 ResourceAccessState default_state;
1651 auto inserted = accesses->insert(pos, std::make_pair(range, default_state));
1652 return inserted;
1653 }
John Zulauf9cb530d2019-09-30 14:14:10 -06001654
John Zulauf5c628d02021-05-04 15:46:36 -06001655 Iterator operator()(ResourceAccessRangeMap *accesses, const Iterator &pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -06001656 auto &access_state = pos->second;
John Zulauf1e331ec2020-12-04 18:29:38 -07001657 for (const auto &op : barrier_ops_) {
1658 op(&access_state);
John Zulauf89311b42020-09-29 16:28:47 -06001659 }
John Zulauf9cb530d2019-09-30 14:14:10 -06001660
John Zulauf89311b42020-09-29 16:28:47 -06001661 if (resolve_) {
1662 // If this is the last (or only) batch, we can do the pending resolve as the last step in this operation to avoid
1663 // another walk
1664 access_state.ApplyPendingBarriers(tag_);
John Zulauf9cb530d2019-09-30 14:14:10 -06001665 }
1666 return pos;
1667 }
1668
John Zulauf89311b42020-09-29 16:28:47 -06001669 // A valid tag is required IFF layout_transition is true, as transitions are write ops
John Zulauf5c628d02021-05-04 15:46:36 -06001670 ApplyBarrierOpsFunctor(bool resolve, typename OpVector::size_type size_hint, ResourceUsageTag tag)
1671 : resolve_(resolve), infill_default_(false), barrier_ops_(), tag_(tag) {
John Zulaufd5115702021-01-18 12:34:33 -07001672 barrier_ops_.reserve(size_hint);
1673 }
John Zulauf5c628d02021-05-04 15:46:36 -06001674 void EmplaceBack(const BarrierOp &op) {
1675 barrier_ops_.emplace_back(op);
1676 infill_default_ |= op.layout_transition;
1677 }
John Zulauf89311b42020-09-29 16:28:47 -06001678
1679 private:
1680 bool resolve_;
John Zulauf5c628d02021-05-04 15:46:36 -06001681 bool infill_default_;
1682 OpVector barrier_ops_;
John Zulauf14940722021-04-12 15:19:02 -06001683 const ResourceUsageTag tag_;
John Zulauf1e331ec2020-12-04 18:29:38 -07001684};
1685
John Zulauf4a6105a2020-11-17 15:11:05 -07001686// This functor applies a single barrier, updating the "pending state" in each touched memory range, but does not
1687// resolve the pendinging state. Suitable for processing Image and Buffer barriers from PipelineBarriers or Events
1688template <typename BarrierOp>
John Zulauf5c628d02021-05-04 15:46:36 -06001689class ApplyBarrierFunctor : public ApplyBarrierOpsFunctor<BarrierOp, small_vector<BarrierOp, 1>> {
1690 using Base = ApplyBarrierOpsFunctor<BarrierOp, small_vector<BarrierOp, 1>>;
1691
John Zulauf4a6105a2020-11-17 15:11:05 -07001692 public:
John Zulaufee984022022-04-13 16:39:50 -06001693 ApplyBarrierFunctor(const BarrierOp &barrier_op) : Base(false, 1, kInvalidTag) { Base::EmplaceBack(barrier_op); }
John Zulauf4a6105a2020-11-17 15:11:05 -07001694};
1695
John Zulauf1e331ec2020-12-04 18:29:38 -07001696// This functor resolves the pendinging state.
John Zulauf5c628d02021-05-04 15:46:36 -06001697class ResolvePendingBarrierFunctor : public ApplyBarrierOpsFunctor<NoopBarrierAction, small_vector<NoopBarrierAction, 1>> {
1698 using Base = ApplyBarrierOpsFunctor<NoopBarrierAction, small_vector<NoopBarrierAction, 1>>;
1699
John Zulauf1e331ec2020-12-04 18:29:38 -07001700 public:
John Zulauf5c628d02021-05-04 15:46:36 -06001701 ResolvePendingBarrierFunctor(ResourceUsageTag tag) : Base(true, 0, tag) {}
John Zulauf9cb530d2019-09-30 14:14:10 -06001702};
1703
John Zulauf8e3c3e92021-01-06 11:19:36 -07001704void AccessContext::UpdateAccessState(AccessAddressType type, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf14940722021-04-12 15:19:02 -06001705 const ResourceAccessRange &range, const ResourceUsageTag tag) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07001706 UpdateMemoryAccessStateFunctor action(type, *this, current_usage, ordering_rule, tag);
John Zulauf16adfc92020-04-08 10:28:33 -06001707 UpdateMemoryAccessState(&GetAccessStateMap(type), range, action);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001708}
1709
John Zulauf8e3c3e92021-01-06 11:19:36 -07001710void AccessContext::UpdateAccessState(const BUFFER_STATE &buffer, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf14940722021-04-12 15:19:02 -06001711 const ResourceAccessRange &range, const ResourceUsageTag tag) {
John Zulauf16adfc92020-04-08 10:28:33 -06001712 if (!SimpleBinding(buffer)) return;
1713 const auto base_address = ResourceBaseAddress(buffer);
John Zulauf8e3c3e92021-01-06 11:19:36 -07001714 UpdateAccessState(AccessAddressType::kLinear, current_usage, ordering_rule, range + base_address, tag);
John Zulauf16adfc92020-04-08 10:28:33 -06001715}
John Zulauf355e49b2020-04-24 15:11:15 -06001716
John Zulauf8e3c3e92021-01-06 11:19:36 -07001717void AccessContext::UpdateAccessState(const IMAGE_STATE &image, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf110413c2021-03-20 05:38:38 -06001718 const VkImageSubresourceRange &subresource_range, const ResourceUsageTag &tag) {
1719 if (!SimpleBinding(image)) return;
1720 const auto base_address = ResourceBaseAddress(image);
1721 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, base_address);
1722 const auto address_type = ImageAddressType(image);
1723 UpdateMemoryAccessStateFunctor action(address_type, *this, current_usage, ordering_rule, tag);
1724 UpdateMemoryAccessState(&GetAccessStateMap(address_type), action, &range_gen);
1725}
1726void AccessContext::UpdateAccessState(const IMAGE_STATE &image, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf355e49b2020-04-24 15:11:15 -06001727 const VkImageSubresourceRange &subresource_range, const VkOffset3D &offset,
John Zulauf14940722021-04-12 15:19:02 -06001728 const VkExtent3D &extent, const ResourceUsageTag tag) {
John Zulauf16adfc92020-04-08 10:28:33 -06001729 if (!SimpleBinding(image)) return;
John Zulauf16adfc92020-04-08 10:28:33 -06001730 const auto base_address = ResourceBaseAddress(image);
John Zulauf150e5332020-12-03 08:52:52 -07001731 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, offset, extent,
1732 base_address);
1733 const auto address_type = ImageAddressType(image);
John Zulauf8e3c3e92021-01-06 11:19:36 -07001734 UpdateMemoryAccessStateFunctor action(address_type, *this, current_usage, ordering_rule, tag);
John Zulauf110413c2021-03-20 05:38:38 -06001735 UpdateMemoryAccessState(&GetAccessStateMap(address_type), action, &range_gen);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001736}
John Zulaufd0ec59f2021-03-13 14:25:08 -07001737
1738void AccessContext::UpdateAccessState(const AttachmentViewGen &view_gen, AttachmentViewGen::Gen gen_type,
John Zulauf14940722021-04-12 15:19:02 -06001739 SyncStageAccessIndex current_usage, SyncOrdering ordering_rule, const ResourceUsageTag tag) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001740 const ImageRangeGen *gen = view_gen.GetRangeGen(gen_type);
1741 if (!gen) return;
1742 subresource_adapter::ImageRangeGenerator range_gen(*gen);
1743 const auto address_type = view_gen.GetAddressType();
1744 UpdateMemoryAccessStateFunctor action(address_type, *this, current_usage, ordering_rule, tag);
1745 ApplyUpdateAction(address_type, action, &range_gen);
John Zulauf7635de32020-05-29 17:14:15 -06001746}
John Zulauf3d84f1b2020-03-09 13:33:25 -06001747
John Zulauf8e3c3e92021-01-06 11:19:36 -07001748void AccessContext::UpdateAccessState(const IMAGE_STATE &image, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf355e49b2020-04-24 15:11:15 -06001749 const VkImageSubresourceLayers &subresource, const VkOffset3D &offset,
John Zulauf14940722021-04-12 15:19:02 -06001750 const VkExtent3D &extent, const ResourceUsageTag tag) {
John Zulauf355e49b2020-04-24 15:11:15 -06001751 VkImageSubresourceRange subresource_range = {subresource.aspectMask, subresource.mipLevel, 1, subresource.baseArrayLayer,
1752 subresource.layerCount};
John Zulauf8e3c3e92021-01-06 11:19:36 -07001753 UpdateAccessState(image, current_usage, ordering_rule, subresource_range, offset, extent, tag);
John Zulauf355e49b2020-04-24 15:11:15 -06001754}
1755
John Zulaufd0ec59f2021-03-13 14:25:08 -07001756template <typename Action, typename RangeGen>
1757void AccessContext::ApplyUpdateAction(AccessAddressType address_type, const Action &action, RangeGen *range_gen_arg) {
1758 assert(range_gen_arg); // Old Google C++ styleguide require non-const object pass by * not &, but this isn't an optional arg.
1759 UpdateMemoryAccessState(&GetAccessStateMap(address_type), action, range_gen_arg);
John Zulauf540266b2020-04-06 18:54:53 -06001760}
1761
1762template <typename Action>
John Zulaufd0ec59f2021-03-13 14:25:08 -07001763void AccessContext::ApplyUpdateAction(const AttachmentViewGen &view_gen, AttachmentViewGen::Gen gen_type, const Action &action) {
1764 const ImageRangeGen *gen = view_gen.GetRangeGen(gen_type);
1765 if (!gen) return;
1766 UpdateMemoryAccessState(&GetAccessStateMap(view_gen.GetAddressType()), action, *gen);
John Zulauf540266b2020-04-06 18:54:53 -06001767}
1768
John Zulaufd0ec59f2021-03-13 14:25:08 -07001769void AccessContext::UpdateAttachmentResolveAccess(const RENDER_PASS_STATE &rp_state,
1770 const AttachmentViewGenVector &attachment_views, uint32_t subpass,
John Zulauf14940722021-04-12 15:19:02 -06001771 const ResourceUsageTag tag) {
John Zulauf7635de32020-05-29 17:14:15 -06001772 UpdateStateResolveAction update(*this, tag);
John Zulaufd0ec59f2021-03-13 14:25:08 -07001773 ResolveOperation(update, rp_state, attachment_views, subpass);
John Zulauf7635de32020-05-29 17:14:15 -06001774}
1775
John Zulaufd0ec59f2021-03-13 14:25:08 -07001776void AccessContext::UpdateAttachmentStoreAccess(const RENDER_PASS_STATE &rp_state, const AttachmentViewGenVector &attachment_views,
John Zulauf14940722021-04-12 15:19:02 -06001777 uint32_t subpass, const ResourceUsageTag tag) {
John Zulaufaff20662020-06-01 14:07:58 -06001778 const auto *attachment_ci = rp_state.createInfo.pAttachments;
John Zulaufaff20662020-06-01 14:07:58 -06001779
1780 for (uint32_t i = 0; i < rp_state.createInfo.attachmentCount; i++) {
1781 if (rp_state.attachment_last_subpass[i] == subpass) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001782 const auto &view_gen = attachment_views[i];
1783 if (!view_gen.IsValid()) continue; // UNUSED
John Zulaufaff20662020-06-01 14:07:58 -06001784
1785 const auto &ci = attachment_ci[i];
1786 const bool has_depth = FormatHasDepth(ci.format);
1787 const bool has_stencil = FormatHasStencil(ci.format);
1788 const bool is_color = !(has_depth || has_stencil);
John Zulauf57261402021-08-13 11:32:06 -06001789 const bool store_op_stores = ci.storeOp != VK_ATTACHMENT_STORE_OP_NONE_EXT;
John Zulaufaff20662020-06-01 14:07:58 -06001790
1791 if (is_color && store_op_stores) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001792 UpdateAccessState(view_gen, AttachmentViewGen::Gen::kRenderArea,
1793 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kRaster, tag);
John Zulaufaff20662020-06-01 14:07:58 -06001794 } else {
John Zulaufaff20662020-06-01 14:07:58 -06001795 if (has_depth && store_op_stores) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001796 UpdateAccessState(view_gen, AttachmentViewGen::Gen::kDepthOnlyRenderArea,
1797 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, SyncOrdering::kRaster, tag);
John Zulaufaff20662020-06-01 14:07:58 -06001798 }
John Zulauf57261402021-08-13 11:32:06 -06001799 const bool stencil_op_stores = ci.stencilStoreOp != VK_ATTACHMENT_STORE_OP_NONE_EXT;
John Zulaufaff20662020-06-01 14:07:58 -06001800 if (has_stencil && stencil_op_stores) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001801 UpdateAccessState(view_gen, AttachmentViewGen::Gen::kStencilOnlyRenderArea,
1802 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, SyncOrdering::kRaster, tag);
John Zulaufaff20662020-06-01 14:07:58 -06001803 }
1804 }
1805 }
1806 }
1807}
1808
John Zulauf540266b2020-04-06 18:54:53 -06001809template <typename Action>
John Zulaufd5115702021-01-18 12:34:33 -07001810void AccessContext::ApplyToContext(const Action &barrier_action) {
John Zulauf540266b2020-04-06 18:54:53 -06001811 // Note: Barriers do *not* cross context boundaries, applying to accessess within.... (at least for renderpass subpasses)
John Zulauf16adfc92020-04-08 10:28:33 -06001812 for (const auto address_type : kAddressTypes) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001813 UpdateMemoryAccessState(&GetAccessStateMap(address_type), kFullRange, barrier_action);
John Zulauf540266b2020-04-06 18:54:53 -06001814 }
1815}
1816
1817void AccessContext::ResolveChildContexts(const std::vector<AccessContext> &contexts) {
John Zulauf540266b2020-04-06 18:54:53 -06001818 for (uint32_t subpass_index = 0; subpass_index < contexts.size(); subpass_index++) {
1819 auto &context = contexts[subpass_index];
John Zulauf22aefed2021-03-11 18:14:35 -07001820 ApplyTrackbackStackAction barrier_action(context.GetDstExternalTrackBack().barriers);
John Zulauf16adfc92020-04-08 10:28:33 -06001821 for (const auto address_type : kAddressTypes) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001822 context.ResolveAccessRange(address_type, kFullRange, barrier_action, &GetAccessStateMap(address_type), nullptr, false);
John Zulauf540266b2020-04-06 18:54:53 -06001823 }
1824 }
1825}
1826
John Zulauf4fa68462021-04-26 21:04:22 -06001827// Caller must ensure that lifespan of this is less than from
1828void AccessContext::ImportAsyncContexts(const AccessContext &from) { async_ = from.async_; }
1829
John Zulauf355e49b2020-04-24 15:11:15 -06001830// Suitable only for *subpass* access contexts
John Zulaufd0ec59f2021-03-13 14:25:08 -07001831HazardResult AccessContext::DetectSubpassTransitionHazard(const TrackBack &track_back, const AttachmentViewGen &attach_view) const {
1832 if (!attach_view.IsValid()) return HazardResult();
John Zulauf355e49b2020-04-24 15:11:15 -06001833
John Zulauf355e49b2020-04-24 15:11:15 -06001834 // We should never ask for a transition from a context we don't have
John Zulaufbb890452021-12-14 11:30:18 -07001835 assert(track_back.source_subpass);
John Zulauf355e49b2020-04-24 15:11:15 -06001836
1837 // Do the detection against the specific prior context independent of other contexts. (Synchronous only)
John Zulaufa0a98292020-09-18 09:30:10 -06001838 // Hazard detection for the transition can be against the merged of the barriers (it only uses src_...)
1839 const auto merged_barrier = MergeBarriers(track_back.barriers);
John Zulaufbb890452021-12-14 11:30:18 -07001840 HazardResult hazard = track_back.source_subpass->DetectImageBarrierHazard(attach_view, merged_barrier, kDetectPrevious);
John Zulauf355e49b2020-04-24 15:11:15 -06001841 if (!hazard.hazard) {
1842 // The Async hazard check is against the current context's async set.
John Zulaufd0ec59f2021-03-13 14:25:08 -07001843 hazard = DetectImageBarrierHazard(attach_view, merged_barrier, kDetectAsync);
John Zulauf355e49b2020-04-24 15:11:15 -06001844 }
John Zulaufa0a98292020-09-18 09:30:10 -06001845
John Zulauf355e49b2020-04-24 15:11:15 -06001846 return hazard;
1847}
1848
John Zulaufb02c1eb2020-10-06 16:33:36 -06001849void AccessContext::RecordLayoutTransitions(const RENDER_PASS_STATE &rp_state, uint32_t subpass,
John Zulauf14940722021-04-12 15:19:02 -06001850 const AttachmentViewGenVector &attachment_views, const ResourceUsageTag tag) {
John Zulaufb02c1eb2020-10-06 16:33:36 -06001851 const auto &transitions = rp_state.subpass_transitions[subpass];
John Zulauf646cc292020-10-23 09:16:45 -06001852 const ResourceAccessState empty_infill;
John Zulaufb02c1eb2020-10-06 16:33:36 -06001853 for (const auto &transition : transitions) {
1854 const auto prev_pass = transition.prev_pass;
John Zulaufd0ec59f2021-03-13 14:25:08 -07001855 const auto &view_gen = attachment_views[transition.attachment];
1856 if (!view_gen.IsValid()) continue;
John Zulaufb02c1eb2020-10-06 16:33:36 -06001857
1858 const auto *trackback = GetTrackBackFromSubpass(prev_pass);
1859 assert(trackback);
1860
1861 // Import the attachments into the current context
John Zulaufbb890452021-12-14 11:30:18 -07001862 const auto *prev_context = trackback->source_subpass;
John Zulaufb02c1eb2020-10-06 16:33:36 -06001863 assert(prev_context);
John Zulaufd0ec59f2021-03-13 14:25:08 -07001864 const auto address_type = view_gen.GetAddressType();
John Zulaufb02c1eb2020-10-06 16:33:36 -06001865 auto &target_map = GetAccessStateMap(address_type);
1866 ApplySubpassTransitionBarriersAction barrier_action(trackback->barriers);
John Zulaufd0ec59f2021-03-13 14:25:08 -07001867 prev_context->ResolveAccessRange(view_gen, AttachmentViewGen::Gen::kViewSubresource, barrier_action, &target_map,
1868 &empty_infill);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001869 }
1870
John Zulauf86356ca2020-10-19 11:46:41 -06001871 // If there were no transitions skip this global map walk
1872 if (transitions.size()) {
John Zulauf1e331ec2020-12-04 18:29:38 -07001873 ResolvePendingBarrierFunctor apply_pending_action(tag);
John Zulaufd5115702021-01-18 12:34:33 -07001874 ApplyToContext(apply_pending_action);
John Zulauf86356ca2020-10-19 11:46:41 -06001875 }
John Zulaufb02c1eb2020-10-06 16:33:36 -06001876}
Jeremy Gebben9893daf2021-01-04 10:40:50 -07001877
Jeremy Gebben9893daf2021-01-04 10:40:50 -07001878void CommandBufferAccessContext::ApplyGlobalBarriersToEvents(const SyncExecScope &src, const SyncExecScope &dst) {
John Zulauf669dfd52021-01-27 17:15:28 -07001879 auto *events_context = GetCurrentEventsContext();
1880 assert(events_context);
John Zulauf8eda1562021-04-13 17:06:41 -06001881 events_context->ApplyBarrier(src, dst);
John Zulauf4a6105a2020-11-17 15:11:05 -07001882}
1883
locke-lunarg61870c22020-06-09 14:51:50 -06001884bool CommandBufferAccessContext::ValidateDispatchDrawDescriptorSet(VkPipelineBindPoint pipelineBindPoint,
1885 const char *func_name) const {
1886 bool skip = false;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001887 const PIPELINE_STATE *pipe = nullptr;
locke-lunarg61870c22020-06-09 14:51:50 -06001888 const std::vector<LAST_BOUND_STATE::PER_SET> *per_sets = nullptr;
Jeremy Gebben159b3cc2021-06-03 09:09:03 -06001889 cb_state_->GetCurrentPipelineAndDesriptorSets(pipelineBindPoint, &pipe, &per_sets);
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001890 if (!pipe || !per_sets) {
locke-lunarg61870c22020-06-09 14:51:50 -06001891 return skip;
1892 }
1893
1894 using DescriptorClass = cvdescriptorset::DescriptorClass;
1895 using BufferDescriptor = cvdescriptorset::BufferDescriptor;
1896 using ImageDescriptor = cvdescriptorset::ImageDescriptor;
locke-lunarg61870c22020-06-09 14:51:50 -06001897 using TexelDescriptor = cvdescriptorset::TexelDescriptor;
1898
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001899 for (const auto &stage_state : pipe->stage_state) {
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07001900 const auto raster_state = pipe->RasterizationState();
1901 if (stage_state.stage_flag == VK_SHADER_STAGE_FRAGMENT_BIT && raster_state && raster_state->rasterizerDiscardEnable) {
locke-lunarge9f1cdf2020-06-12 12:28:57 -06001902 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001903 }
locke-lunarg61870c22020-06-09 14:51:50 -06001904 for (const auto &set_binding : stage_state.descriptor_uses) {
Jeremy Gebben4d51c552022-01-06 21:27:15 -07001905 const auto *descriptor_set = (*per_sets)[set_binding.first.set].bound_descriptor_set.get();
locke-lunarg61870c22020-06-09 14:51:50 -06001906 cvdescriptorset::DescriptorSetLayout::ConstBindingIterator binding_it(descriptor_set->GetLayout().get(),
Jeremy Gebben7fc88a22021-08-25 13:30:45 -06001907 set_binding.first.binding);
locke-lunarg61870c22020-06-09 14:51:50 -06001908 const auto descriptor_type = binding_it.GetType();
1909 cvdescriptorset::IndexRange index_range = binding_it.GetGlobalIndexRange();
1910 auto array_idx = 0;
1911
1912 if (binding_it.IsVariableDescriptorCount()) {
1913 index_range.end = index_range.start + descriptor_set->GetVariableDescriptorCount();
1914 }
1915 SyncStageAccessIndex sync_index =
1916 GetSyncStageAccessIndexsByDescriptorSet(descriptor_type, set_binding.second, stage_state.stage_flag);
1917
1918 for (uint32_t i = index_range.start; i < index_range.end; ++i, ++array_idx) {
1919 uint32_t index = i - index_range.start;
1920 const auto *descriptor = descriptor_set->GetDescriptorFromGlobalIndex(i);
1921 switch (descriptor->GetClass()) {
1922 case DescriptorClass::ImageSampler:
1923 case DescriptorClass::Image: {
Jeremy Gebbena08da232022-02-01 15:14:52 -07001924 if (descriptor->Invalid()) {
1925 continue;
locke-lunarg61870c22020-06-09 14:51:50 -06001926 }
Jeremy Gebbena08da232022-02-01 15:14:52 -07001927
1928 // NOTE: ImageSamplerDescriptor inherits from ImageDescriptor, so this cast works for both types.
1929 const auto *image_descriptor = static_cast<const ImageDescriptor *>(descriptor);
1930 const auto *img_view_state = image_descriptor->GetImageViewState();
1931 VkImageLayout image_layout = image_descriptor->GetImageLayout();
1932
John Zulauf361fb532020-07-22 10:45:39 -06001933 HazardResult hazard;
Jeremy Gebben11a68a32021-07-29 11:59:22 -06001934 // NOTE: 2D ImageViews of VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT Images are not allowed in
1935 // Descriptors, so we do not have to worry about depth slicing here.
1936 // See: VUID 00343
1937 assert(!img_view_state->IsDepthSliced());
John Zulauf110413c2021-03-20 05:38:38 -06001938 const IMAGE_STATE *img_state = img_view_state->image_state.get();
John Zulauf361fb532020-07-22 10:45:39 -06001939 const auto &subresource_range = img_view_state->normalized_subresource_range;
John Zulauf110413c2021-03-20 05:38:38 -06001940
1941 if (sync_index == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ) {
1942 const VkExtent3D extent = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.extent);
1943 const VkOffset3D offset = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.offset);
John Zulauf361fb532020-07-22 10:45:39 -06001944 // Input attachments are subject to raster ordering rules
1945 hazard = current_context_->DetectHazard(*img_state, sync_index, subresource_range,
John Zulauf8e3c3e92021-01-06 11:19:36 -07001946 SyncOrdering::kRaster, offset, extent);
John Zulauf361fb532020-07-22 10:45:39 -06001947 } else {
John Zulauf110413c2021-03-20 05:38:38 -06001948 hazard = current_context_->DetectHazard(*img_state, sync_index, subresource_range);
John Zulauf361fb532020-07-22 10:45:39 -06001949 }
John Zulauf110413c2021-03-20 05:38:38 -06001950
John Zulauf33fc1d52020-07-17 11:01:10 -06001951 if (hazard.hazard && !sync_state_->SupressedBoundDescriptorWAW(hazard)) {
John Zulauf1dae9192020-06-16 15:46:44 -06001952 skip |= sync_state_->LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06001953 img_view_state->image_view(), string_SyncHazardVUID(hazard.hazard),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001954 "%s: Hazard %s for %s, in %s, and %s, %s, type: %s, imageLayout: %s, binding #%" PRIu32
1955 ", index %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06001956 func_name, string_SyncHazard(hazard.hazard),
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06001957 sync_state_->report_data->FormatHandle(img_view_state->image_view()).c_str(),
1958 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer()).c_str(),
1959 sync_state_->report_data->FormatHandle(pipe->pipeline()).c_str(),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001960 sync_state_->report_data->FormatHandle(descriptor_set->GetSet()).c_str(),
1961 string_VkDescriptorType(descriptor_type), string_VkImageLayout(image_layout),
Jeremy Gebben7fc88a22021-08-25 13:30:45 -06001962 set_binding.first.binding, index, FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06001963 }
1964 break;
1965 }
1966 case DescriptorClass::TexelBuffer: {
Jeremy Gebbena08da232022-02-01 15:14:52 -07001967 const auto *texel_descriptor = static_cast<const TexelDescriptor *>(descriptor);
1968 if (texel_descriptor->Invalid()) {
1969 continue;
1970 }
1971 const auto *buf_view_state = texel_descriptor->GetBufferViewState();
1972 const auto *buf_state = buf_view_state->buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06001973 const ResourceAccessRange range = MakeRange(*buf_view_state);
locke-lunarg61870c22020-06-09 14:51:50 -06001974 auto hazard = current_context_->DetectHazard(*buf_state, sync_index, range);
John Zulauf33fc1d52020-07-17 11:01:10 -06001975 if (hazard.hazard && !sync_state_->SupressedBoundDescriptorWAW(hazard)) {
locke-lunarg88dbb542020-06-23 22:05:42 -06001976 skip |= sync_state_->LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06001977 buf_view_state->buffer_view(), string_SyncHazardVUID(hazard.hazard),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001978 "%s: Hazard %s for %s in %s, %s, and %s, type: %s, binding #%d index %d. Access info %s.",
1979 func_name, string_SyncHazard(hazard.hazard),
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06001980 sync_state_->report_data->FormatHandle(buf_view_state->buffer_view()).c_str(),
1981 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer()).c_str(),
1982 sync_state_->report_data->FormatHandle(pipe->pipeline()).c_str(),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001983 sync_state_->report_data->FormatHandle(descriptor_set->GetSet()).c_str(),
Jeremy Gebben7fc88a22021-08-25 13:30:45 -06001984 string_VkDescriptorType(descriptor_type), set_binding.first.binding, index,
John Zulauffaea0ee2021-01-14 14:01:32 -07001985 FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06001986 }
1987 break;
1988 }
1989 case DescriptorClass::GeneralBuffer: {
1990 const auto *buffer_descriptor = static_cast<const BufferDescriptor *>(descriptor);
Jeremy Gebbena08da232022-02-01 15:14:52 -07001991 if (buffer_descriptor->Invalid()) {
1992 continue;
1993 }
1994 const auto *buf_state = buffer_descriptor->GetBufferState();
John Zulauf3e86bf02020-09-12 10:47:57 -06001995 const ResourceAccessRange range =
1996 MakeRange(*buf_state, buffer_descriptor->GetOffset(), buffer_descriptor->GetRange());
locke-lunarg61870c22020-06-09 14:51:50 -06001997 auto hazard = current_context_->DetectHazard(*buf_state, sync_index, range);
John Zulauf3ac701a2020-09-07 14:34:41 -06001998 if (hazard.hazard && !sync_state_->SupressedBoundDescriptorWAW(hazard)) {
locke-lunarg88dbb542020-06-23 22:05:42 -06001999 skip |= sync_state_->LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002000 buf_state->buffer(), string_SyncHazardVUID(hazard.hazard),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06002001 "%s: Hazard %s for %s in %s, %s, and %s, type: %s, binding #%d index %d. Access info %s.",
2002 func_name, string_SyncHazard(hazard.hazard),
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002003 sync_state_->report_data->FormatHandle(buf_state->buffer()).c_str(),
2004 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer()).c_str(),
2005 sync_state_->report_data->FormatHandle(pipe->pipeline()).c_str(),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06002006 sync_state_->report_data->FormatHandle(descriptor_set->GetSet()).c_str(),
Jeremy Gebben7fc88a22021-08-25 13:30:45 -06002007 string_VkDescriptorType(descriptor_type), set_binding.first.binding, index,
John Zulauffaea0ee2021-01-14 14:01:32 -07002008 FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06002009 }
2010 break;
2011 }
2012 // TODO: INLINE_UNIFORM_BLOCK_EXT, ACCELERATION_STRUCTURE_KHR
2013 default:
2014 break;
2015 }
2016 }
2017 }
2018 }
2019 return skip;
2020}
2021
2022void CommandBufferAccessContext::RecordDispatchDrawDescriptorSet(VkPipelineBindPoint pipelineBindPoint,
John Zulauf14940722021-04-12 15:19:02 -06002023 const ResourceUsageTag tag) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002024 const PIPELINE_STATE *pipe = nullptr;
locke-lunarg61870c22020-06-09 14:51:50 -06002025 const std::vector<LAST_BOUND_STATE::PER_SET> *per_sets = nullptr;
Jeremy Gebben159b3cc2021-06-03 09:09:03 -06002026 cb_state_->GetCurrentPipelineAndDesriptorSets(pipelineBindPoint, &pipe, &per_sets);
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002027 if (!pipe || !per_sets) {
locke-lunarg61870c22020-06-09 14:51:50 -06002028 return;
2029 }
2030
2031 using DescriptorClass = cvdescriptorset::DescriptorClass;
2032 using BufferDescriptor = cvdescriptorset::BufferDescriptor;
2033 using ImageDescriptor = cvdescriptorset::ImageDescriptor;
locke-lunarg61870c22020-06-09 14:51:50 -06002034 using TexelDescriptor = cvdescriptorset::TexelDescriptor;
2035
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002036 for (const auto &stage_state : pipe->stage_state) {
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002037 const auto raster_state = pipe->RasterizationState();
2038 if (stage_state.stage_flag == VK_SHADER_STAGE_FRAGMENT_BIT && raster_state && raster_state->rasterizerDiscardEnable) {
locke-lunarge9f1cdf2020-06-12 12:28:57 -06002039 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002040 }
locke-lunarg61870c22020-06-09 14:51:50 -06002041 for (const auto &set_binding : stage_state.descriptor_uses) {
Jeremy Gebben4d51c552022-01-06 21:27:15 -07002042 const auto *descriptor_set = (*per_sets)[set_binding.first.set].bound_descriptor_set.get();
locke-lunarg61870c22020-06-09 14:51:50 -06002043 cvdescriptorset::DescriptorSetLayout::ConstBindingIterator binding_it(descriptor_set->GetLayout().get(),
Jeremy Gebben7fc88a22021-08-25 13:30:45 -06002044 set_binding.first.binding);
locke-lunarg61870c22020-06-09 14:51:50 -06002045 const auto descriptor_type = binding_it.GetType();
2046 cvdescriptorset::IndexRange index_range = binding_it.GetGlobalIndexRange();
2047 auto array_idx = 0;
2048
2049 if (binding_it.IsVariableDescriptorCount()) {
2050 index_range.end = index_range.start + descriptor_set->GetVariableDescriptorCount();
2051 }
2052 SyncStageAccessIndex sync_index =
2053 GetSyncStageAccessIndexsByDescriptorSet(descriptor_type, set_binding.second, stage_state.stage_flag);
2054
2055 for (uint32_t i = index_range.start; i < index_range.end; ++i, ++array_idx) {
2056 const auto *descriptor = descriptor_set->GetDescriptorFromGlobalIndex(i);
2057 switch (descriptor->GetClass()) {
2058 case DescriptorClass::ImageSampler:
2059 case DescriptorClass::Image: {
Jeremy Gebbena08da232022-02-01 15:14:52 -07002060 // NOTE: ImageSamplerDescriptor inherits from ImageDescriptor, so this cast works for both types.
2061 const auto *image_descriptor = static_cast<const ImageDescriptor *>(descriptor);
2062 if (image_descriptor->Invalid()) {
2063 continue;
locke-lunarg61870c22020-06-09 14:51:50 -06002064 }
Jeremy Gebbena08da232022-02-01 15:14:52 -07002065 const auto *img_view_state = image_descriptor->GetImageViewState();
Jeremy Gebben11a68a32021-07-29 11:59:22 -06002066 // NOTE: 2D ImageViews of VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT Images are not allowed in
2067 // Descriptors, so we do not have to worry about depth slicing here.
2068 // See: VUID 00343
2069 assert(!img_view_state->IsDepthSliced());
locke-lunarg61870c22020-06-09 14:51:50 -06002070 const IMAGE_STATE *img_state = img_view_state->image_state.get();
locke-lunarg61870c22020-06-09 14:51:50 -06002071 if (sync_index == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ) {
John Zulauf110413c2021-03-20 05:38:38 -06002072 const VkExtent3D extent = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.extent);
2073 const VkOffset3D offset = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.offset);
2074 current_context_->UpdateAccessState(*img_state, sync_index, SyncOrdering::kRaster,
2075 img_view_state->normalized_subresource_range, offset, extent, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002076 } else {
John Zulauf110413c2021-03-20 05:38:38 -06002077 current_context_->UpdateAccessState(*img_state, sync_index, SyncOrdering::kNonAttachment,
2078 img_view_state->normalized_subresource_range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002079 }
locke-lunarg61870c22020-06-09 14:51:50 -06002080 break;
2081 }
2082 case DescriptorClass::TexelBuffer: {
Jeremy Gebbena08da232022-02-01 15:14:52 -07002083 const auto *texel_descriptor = static_cast<const TexelDescriptor *>(descriptor);
2084 if (texel_descriptor->Invalid()) {
2085 continue;
2086 }
2087 const auto *buf_view_state = texel_descriptor->GetBufferViewState();
2088 const auto *buf_state = buf_view_state->buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06002089 const ResourceAccessRange range = MakeRange(*buf_view_state);
John Zulauf8e3c3e92021-01-06 11:19:36 -07002090 current_context_->UpdateAccessState(*buf_state, sync_index, SyncOrdering::kNonAttachment, range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002091 break;
2092 }
2093 case DescriptorClass::GeneralBuffer: {
2094 const auto *buffer_descriptor = static_cast<const BufferDescriptor *>(descriptor);
Jeremy Gebbena08da232022-02-01 15:14:52 -07002095 if (buffer_descriptor->Invalid()) {
2096 continue;
2097 }
2098 const auto *buf_state = buffer_descriptor->GetBufferState();
John Zulauf3e86bf02020-09-12 10:47:57 -06002099 const ResourceAccessRange range =
2100 MakeRange(*buf_state, buffer_descriptor->GetOffset(), buffer_descriptor->GetRange());
John Zulauf8e3c3e92021-01-06 11:19:36 -07002101 current_context_->UpdateAccessState(*buf_state, sync_index, SyncOrdering::kNonAttachment, range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002102 break;
2103 }
2104 // TODO: INLINE_UNIFORM_BLOCK_EXT, ACCELERATION_STRUCTURE_KHR
2105 default:
2106 break;
2107 }
2108 }
2109 }
2110 }
2111}
2112
2113bool CommandBufferAccessContext::ValidateDrawVertex(uint32_t vertexCount, uint32_t firstVertex, const char *func_name) const {
2114 bool skip = false;
Jeremy Gebben159b3cc2021-06-03 09:09:03 -06002115 const auto *pipe = cb_state_->GetCurrentPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS);
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002116 if (!pipe) {
locke-lunarg61870c22020-06-09 14:51:50 -06002117 return skip;
2118 }
2119
2120 const auto &binding_buffers = cb_state_->current_vertex_buffer_binding_info.vertex_buffer_bindings;
2121 const auto &binding_buffers_size = binding_buffers.size();
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002122 const auto &binding_descriptions_size = pipe->vertex_input_state->binding_descriptions.size();
locke-lunarg61870c22020-06-09 14:51:50 -06002123
2124 for (size_t i = 0; i < binding_descriptions_size; ++i) {
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002125 const auto &binding_description = pipe->vertex_input_state->binding_descriptions[i];
locke-lunarg61870c22020-06-09 14:51:50 -06002126 if (binding_description.binding < binding_buffers_size) {
2127 const auto &binding_buffer = binding_buffers[binding_description.binding];
Jeremy Gebben9efe1cf2021-05-15 20:05:09 -06002128 if (binding_buffer.buffer_state == nullptr || binding_buffer.buffer_state->Destroyed()) continue;
locke-lunarg61870c22020-06-09 14:51:50 -06002129
locke-lunarg1ae57d62020-11-18 10:49:19 -07002130 auto *buf_state = binding_buffer.buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06002131 const ResourceAccessRange range = GetBufferRange(binding_buffer.offset, buf_state->createInfo.size, firstVertex,
2132 vertexCount, binding_description.stride);
Jeremy Gebben40a22942020-12-22 14:22:06 -07002133 auto hazard = current_context_->DetectHazard(*buf_state, SYNC_VERTEX_ATTRIBUTE_INPUT_VERTEX_ATTRIBUTE_READ, range);
locke-lunarg61870c22020-06-09 14:51:50 -06002134 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002135 skip |= sync_state_->LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002136 buf_state->buffer(), string_SyncHazardVUID(hazard.hazard), "%s: Hazard %s for vertex %s in %s. Access info %s.",
2137 func_name, string_SyncHazard(hazard.hazard), sync_state_->report_data->FormatHandle(buf_state->buffer()).c_str(),
2138 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer()).c_str(), FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06002139 }
2140 }
2141 }
2142 return skip;
2143}
2144
John Zulauf14940722021-04-12 15:19:02 -06002145void CommandBufferAccessContext::RecordDrawVertex(uint32_t vertexCount, uint32_t firstVertex, const ResourceUsageTag tag) {
Jeremy Gebben159b3cc2021-06-03 09:09:03 -06002146 const auto *pipe = cb_state_->GetCurrentPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS);
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002147 if (!pipe) {
locke-lunarg61870c22020-06-09 14:51:50 -06002148 return;
2149 }
2150 const auto &binding_buffers = cb_state_->current_vertex_buffer_binding_info.vertex_buffer_bindings;
2151 const auto &binding_buffers_size = binding_buffers.size();
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002152 const auto &binding_descriptions_size = pipe->vertex_input_state->binding_descriptions.size();
locke-lunarg61870c22020-06-09 14:51:50 -06002153
2154 for (size_t i = 0; i < binding_descriptions_size; ++i) {
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002155 const auto &binding_description = pipe->vertex_input_state->binding_descriptions[i];
locke-lunarg61870c22020-06-09 14:51:50 -06002156 if (binding_description.binding < binding_buffers_size) {
2157 const auto &binding_buffer = binding_buffers[binding_description.binding];
Jeremy Gebben9efe1cf2021-05-15 20:05:09 -06002158 if (binding_buffer.buffer_state == nullptr || binding_buffer.buffer_state->Destroyed()) continue;
locke-lunarg61870c22020-06-09 14:51:50 -06002159
locke-lunarg1ae57d62020-11-18 10:49:19 -07002160 auto *buf_state = binding_buffer.buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06002161 const ResourceAccessRange range = GetBufferRange(binding_buffer.offset, buf_state->createInfo.size, firstVertex,
2162 vertexCount, binding_description.stride);
Jeremy Gebben40a22942020-12-22 14:22:06 -07002163 current_context_->UpdateAccessState(*buf_state, SYNC_VERTEX_ATTRIBUTE_INPUT_VERTEX_ATTRIBUTE_READ,
2164 SyncOrdering::kNonAttachment, range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002165 }
2166 }
2167}
2168
2169bool CommandBufferAccessContext::ValidateDrawVertexIndex(uint32_t indexCount, uint32_t firstIndex, const char *func_name) const {
2170 bool skip = false;
Jeremy Gebben9efe1cf2021-05-15 20:05:09 -06002171 if (cb_state_->index_buffer_binding.buffer_state == nullptr || cb_state_->index_buffer_binding.buffer_state->Destroyed()) {
locke-lunarg1ae57d62020-11-18 10:49:19 -07002172 return skip;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002173 }
locke-lunarg61870c22020-06-09 14:51:50 -06002174
locke-lunarg1ae57d62020-11-18 10:49:19 -07002175 auto *index_buf_state = cb_state_->index_buffer_binding.buffer_state.get();
locke-lunarg61870c22020-06-09 14:51:50 -06002176 const auto index_size = GetIndexAlignment(cb_state_->index_buffer_binding.index_type);
John Zulauf3e86bf02020-09-12 10:47:57 -06002177 const ResourceAccessRange range = GetBufferRange(cb_state_->index_buffer_binding.offset, index_buf_state->createInfo.size,
2178 firstIndex, indexCount, index_size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07002179 auto hazard = current_context_->DetectHazard(*index_buf_state, SYNC_INDEX_INPUT_INDEX_READ, range);
locke-lunarg61870c22020-06-09 14:51:50 -06002180 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002181 skip |= sync_state_->LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002182 index_buf_state->buffer(), string_SyncHazardVUID(hazard.hazard), "%s: Hazard %s for index %s in %s. Access info %s.",
2183 func_name, string_SyncHazard(hazard.hazard), sync_state_->report_data->FormatHandle(index_buf_state->buffer()).c_str(),
2184 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer()).c_str(), FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06002185 }
2186
2187 // TODO: For now, we detect the whole vertex buffer. Index buffer could be changed until SubmitQueue.
2188 // We will detect more accurate range in the future.
2189 skip |= ValidateDrawVertex(UINT32_MAX, 0, func_name);
2190 return skip;
2191}
2192
John Zulauf14940722021-04-12 15:19:02 -06002193void CommandBufferAccessContext::RecordDrawVertexIndex(uint32_t indexCount, uint32_t firstIndex, const ResourceUsageTag tag) {
Jeremy Gebben9efe1cf2021-05-15 20:05:09 -06002194 if (cb_state_->index_buffer_binding.buffer_state == nullptr || cb_state_->index_buffer_binding.buffer_state->Destroyed()) return;
locke-lunarg61870c22020-06-09 14:51:50 -06002195
locke-lunarg1ae57d62020-11-18 10:49:19 -07002196 auto *index_buf_state = cb_state_->index_buffer_binding.buffer_state.get();
locke-lunarg61870c22020-06-09 14:51:50 -06002197 const auto index_size = GetIndexAlignment(cb_state_->index_buffer_binding.index_type);
John Zulauf3e86bf02020-09-12 10:47:57 -06002198 const ResourceAccessRange range = GetBufferRange(cb_state_->index_buffer_binding.offset, index_buf_state->createInfo.size,
2199 firstIndex, indexCount, index_size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07002200 current_context_->UpdateAccessState(*index_buf_state, SYNC_INDEX_INPUT_INDEX_READ, SyncOrdering::kNonAttachment, range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002201
2202 // TODO: For now, we detect the whole vertex buffer. Index buffer could be changed until SubmitQueue.
2203 // We will detect more accurate range in the future.
2204 RecordDrawVertex(UINT32_MAX, 0, tag);
2205}
2206
2207bool CommandBufferAccessContext::ValidateDrawSubpassAttachment(const char *func_name) const {
locke-lunarg7077d502020-06-18 21:37:26 -06002208 bool skip = false;
2209 if (!current_renderpass_context_) return skip;
John Zulauf64ffe552021-02-06 10:25:07 -07002210 skip |= current_renderpass_context_->ValidateDrawSubpassAttachment(GetExecutionContext(), *cb_state_.get(), func_name);
locke-lunarg7077d502020-06-18 21:37:26 -06002211 return skip;
locke-lunarg61870c22020-06-09 14:51:50 -06002212}
2213
John Zulauf14940722021-04-12 15:19:02 -06002214void CommandBufferAccessContext::RecordDrawSubpassAttachment(const ResourceUsageTag tag) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002215 if (current_renderpass_context_) {
John Zulauf64ffe552021-02-06 10:25:07 -07002216 current_renderpass_context_->RecordDrawSubpassAttachment(*cb_state_.get(), tag);
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002217 }
locke-lunarg61870c22020-06-09 14:51:50 -06002218}
2219
John Zulauf41a9c7c2021-12-07 15:59:53 -07002220ResourceUsageTag CommandBufferAccessContext::RecordBeginRenderPass(CMD_TYPE cmd, const RENDER_PASS_STATE &rp_state,
2221 const VkRect2D &render_area,
2222 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views) {
John Zulauf355e49b2020-04-24 15:11:15 -06002223 // Create an access context the current renderpass.
John Zulauf41a9c7c2021-12-07 15:59:53 -07002224 const auto barrier_tag = NextCommandTag(cmd, ResourceUsageRecord::SubcommandType::kSubpassTransition);
2225 const auto load_tag = NextSubcommandTag(cmd, ResourceUsageRecord::SubcommandType::kLoadOp);
John Zulauf64ffe552021-02-06 10:25:07 -07002226 render_pass_contexts_.emplace_back(rp_state, render_area, GetQueueFlags(), attachment_views, &cb_access_context_);
John Zulauf16adfc92020-04-08 10:28:33 -06002227 current_renderpass_context_ = &render_pass_contexts_.back();
John Zulauf41a9c7c2021-12-07 15:59:53 -07002228 current_renderpass_context_->RecordBeginRenderPass(barrier_tag, load_tag);
John Zulauf16adfc92020-04-08 10:28:33 -06002229 current_context_ = &current_renderpass_context_->CurrentContext();
John Zulauf41a9c7c2021-12-07 15:59:53 -07002230 return barrier_tag;
John Zulauf16adfc92020-04-08 10:28:33 -06002231}
2232
John Zulauf41a9c7c2021-12-07 15:59:53 -07002233ResourceUsageTag CommandBufferAccessContext::RecordNextSubpass(const CMD_TYPE cmd) {
John Zulauf16adfc92020-04-08 10:28:33 -06002234 assert(current_renderpass_context_);
John Zulauf41a9c7c2021-12-07 15:59:53 -07002235 if (!current_renderpass_context_) return NextCommandTag(cmd);
2236
2237 auto store_tag = NextCommandTag(cmd, ResourceUsageRecord::SubcommandType::kStoreOp);
2238 auto barrier_tag = NextSubcommandTag(cmd, ResourceUsageRecord::SubcommandType::kSubpassTransition);
2239 auto load_tag = NextSubcommandTag(cmd, ResourceUsageRecord::SubcommandType::kLoadOp);
2240
2241 current_renderpass_context_->RecordNextSubpass(store_tag, barrier_tag, load_tag);
John Zulauf16adfc92020-04-08 10:28:33 -06002242 current_context_ = &current_renderpass_context_->CurrentContext();
John Zulauf41a9c7c2021-12-07 15:59:53 -07002243 return barrier_tag;
John Zulauf16adfc92020-04-08 10:28:33 -06002244}
2245
John Zulauf41a9c7c2021-12-07 15:59:53 -07002246ResourceUsageTag CommandBufferAccessContext::RecordEndRenderPass(const CMD_TYPE cmd) {
John Zulauf16adfc92020-04-08 10:28:33 -06002247 assert(current_renderpass_context_);
John Zulauf41a9c7c2021-12-07 15:59:53 -07002248 if (!current_renderpass_context_) return NextCommandTag(cmd);
John Zulauf16adfc92020-04-08 10:28:33 -06002249
John Zulauf41a9c7c2021-12-07 15:59:53 -07002250 auto store_tag = NextCommandTag(cmd, ResourceUsageRecord::SubcommandType::kStoreOp);
2251 auto barrier_tag = NextSubcommandTag(cmd, ResourceUsageRecord::SubcommandType::kSubpassTransition);
2252
2253 current_renderpass_context_->RecordEndRenderPass(&cb_access_context_, store_tag, barrier_tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002254 current_context_ = &cb_access_context_;
John Zulauf16adfc92020-04-08 10:28:33 -06002255 current_renderpass_context_ = nullptr;
John Zulauf41a9c7c2021-12-07 15:59:53 -07002256 return barrier_tag;
John Zulauf16adfc92020-04-08 10:28:33 -06002257}
2258
John Zulauf4a6105a2020-11-17 15:11:05 -07002259void CommandBufferAccessContext::RecordDestroyEvent(VkEvent event) {
2260 // Erase is okay with the key not being
Jeremy Gebbenf4449392022-01-28 10:09:10 -07002261 auto event_state = sync_state_->Get<EVENT_STATE>(event);
John Zulauf669dfd52021-01-27 17:15:28 -07002262 if (event_state) {
Jeremy Gebben9f537102021-10-05 16:37:12 -06002263 GetCurrentEventsContext()->Destroy(event_state.get());
John Zulaufd5115702021-01-18 12:34:33 -07002264 }
2265}
2266
John Zulaufae842002021-04-15 18:20:55 -06002267// The is the recorded cb context
John Zulaufbb890452021-12-14 11:30:18 -07002268bool CommandBufferAccessContext::ValidateFirstUse(CommandExecutionContext *proxy_context, const char *func_name,
John Zulauf4fa68462021-04-26 21:04:22 -06002269 uint32_t index) const {
2270 assert(proxy_context);
2271 auto *events_context = proxy_context->GetCurrentEventsContext();
2272 auto *access_context = proxy_context->GetCurrentAccessContext();
2273 const ResourceUsageTag base_tag = proxy_context->GetTagLimit();
John Zulaufae842002021-04-15 18:20:55 -06002274 bool skip = false;
2275 ResourceUsageRange tag_range = {0, 0};
2276 const AccessContext *recorded_context = GetCurrentAccessContext();
2277 assert(recorded_context);
2278 HazardResult hazard;
John Zulaufbb890452021-12-14 11:30:18 -07002279 auto log_msg = [this](const HazardResult &hazard, const CommandExecutionContext &exec_context, const char *func_name,
John Zulaufae842002021-04-15 18:20:55 -06002280 uint32_t index) {
John Zulaufbb890452021-12-14 11:30:18 -07002281 const auto handle = exec_context.Handle();
John Zulaufae842002021-04-15 18:20:55 -06002282 const auto recorded_handle = cb_state_->commandBuffer();
John Zulauf4fa68462021-04-26 21:04:22 -06002283 const auto *report_data = sync_state_->report_data;
John Zulaufbb890452021-12-14 11:30:18 -07002284 return sync_state_->LogError(handle, string_SyncHazardVUID(hazard.hazard),
John Zulauf4fa68462021-04-26 21:04:22 -06002285 "%s: Hazard %s for entry %" PRIu32 ", %s, Recorded access info %s. Access info %s.", func_name,
2286 string_SyncHazard(hazard.hazard), index, report_data->FormatHandle(recorded_handle).c_str(),
John Zulaufbb890452021-12-14 11:30:18 -07002287 FormatUsage(*hazard.recorded_access).c_str(), exec_context.FormatUsage(hazard).c_str());
John Zulaufae842002021-04-15 18:20:55 -06002288 };
John Zulaufbb890452021-12-14 11:30:18 -07002289 const ReplayTrackbackBarriersAction *replay_context = nullptr;
John Zulaufae842002021-04-15 18:20:55 -06002290 for (const auto &sync_op : sync_ops_) {
John Zulauf4fa68462021-04-26 21:04:22 -06002291 // we update the range to any include layout transition first use writes,
2292 // as they are stored along with the source scope (as effective barrier) when recorded
2293 tag_range.end = sync_op.tag + 1;
John Zulauf610e28c2021-08-03 17:46:23 -06002294 skip |= sync_op.sync_op->ReplayValidate(sync_op.tag, *this, base_tag, proxy_context);
John Zulauf4fa68462021-04-26 21:04:22 -06002295
John Zulaufbb890452021-12-14 11:30:18 -07002296 hazard = recorded_context->DetectFirstUseHazard(tag_range, *access_context, replay_context);
John Zulaufae842002021-04-15 18:20:55 -06002297 if (hazard.hazard) {
John Zulauf4fa68462021-04-26 21:04:22 -06002298 skip |= log_msg(hazard, *proxy_context, func_name, index);
John Zulaufae842002021-04-15 18:20:55 -06002299 }
2300 // NOTE: Add call to replay validate here when we add support for syncop with non-trivial replay
John Zulauf4fa68462021-04-26 21:04:22 -06002301 // Record the barrier into the proxy context.
John Zulaufbb890452021-12-14 11:30:18 -07002302 sync_op.sync_op->ReplayRecord(base_tag + sync_op.tag, access_context, events_context);
2303 replay_context = sync_op.sync_op->GetReplayTrackback();
John Zulauf4fa68462021-04-26 21:04:22 -06002304 tag_range.begin = tag_range.end;
John Zulaufae842002021-04-15 18:20:55 -06002305 }
2306
John Zulaufbb890452021-12-14 11:30:18 -07002307 // Renderpasses may not cross command buffer boundaries
2308 assert(replay_context == nullptr);
2309
John Zulaufae842002021-04-15 18:20:55 -06002310 // and anything after the last syncop
John Zulaufae842002021-04-15 18:20:55 -06002311 tag_range.end = ResourceUsageRecord::kMaxIndex;
John Zulaufbb890452021-12-14 11:30:18 -07002312 hazard = recorded_context->DetectFirstUseHazard(tag_range, *access_context, replay_context);
John Zulaufae842002021-04-15 18:20:55 -06002313 if (hazard.hazard) {
John Zulauf4fa68462021-04-26 21:04:22 -06002314 skip |= log_msg(hazard, *proxy_context, func_name, index);
John Zulaufae842002021-04-15 18:20:55 -06002315 }
2316
2317 return skip;
2318}
2319
John Zulauf4fa68462021-04-26 21:04:22 -06002320void CommandBufferAccessContext::RecordExecutedCommandBuffer(const CommandBufferAccessContext &recorded_cb_context, CMD_TYPE cmd) {
2321 auto *events_context = GetCurrentEventsContext();
2322 auto *access_context = GetCurrentAccessContext();
2323 const AccessContext *recorded_context = recorded_cb_context.GetCurrentAccessContext();
2324 assert(recorded_context);
2325
2326 // Just run through the barriers ignoring the usage from the recorded context, as Resolve will overwrite outdated state
2327 const ResourceUsageTag base_tag = GetTagLimit();
2328 for (const auto &sync_op : recorded_cb_context.sync_ops_) {
2329 // we update the range to any include layout transition first use writes,
2330 // as they are stored along with the source scope (as effective barrier) when recorded
John Zulaufbb890452021-12-14 11:30:18 -07002331 sync_op.sync_op->ReplayRecord(base_tag + sync_op.tag, access_context, events_context);
John Zulauf4fa68462021-04-26 21:04:22 -06002332 }
2333
2334 ResourceUsageRange tag_range = ImportRecordedAccessLog(recorded_cb_context);
2335 assert(base_tag == tag_range.begin); // to ensure the to offset calculation agree
2336 ResolveRecordedContext(*recorded_context, tag_range.begin);
2337}
2338
2339void CommandBufferAccessContext::ResolveRecordedContext(const AccessContext &recorded_context, ResourceUsageTag offset) {
2340 auto tag_offset = [offset](ResourceAccessState *access) { access->OffsetTag(offset); };
2341
2342 auto *access_context = GetCurrentAccessContext();
2343 for (auto address_type : kAddressTypes) {
2344 recorded_context.ResolveAccessRange(address_type, kFullRange, tag_offset, &access_context->GetAccessStateMap(address_type),
2345 nullptr, false);
2346 }
2347}
2348
2349ResourceUsageRange CommandBufferAccessContext::ImportRecordedAccessLog(const CommandBufferAccessContext &recorded_context) {
2350 // The execution references ensure lifespan for the referenced child CB's...
2351 ResourceUsageRange tag_range(GetTagLimit(), 0);
John Zulauf3c2a0b32021-07-14 11:14:52 -06002352 cbs_referenced_.emplace(recorded_context.cb_state_);
John Zulauf4fa68462021-04-26 21:04:22 -06002353 access_log_.insert(access_log_.end(), recorded_context.access_log_.cbegin(), recorded_context.access_log_.end());
2354 tag_range.end = access_log_.size();
2355 return tag_range;
2356}
2357
John Zulauf41a9c7c2021-12-07 15:59:53 -07002358ResourceUsageTag CommandBufferAccessContext::NextSubcommandTag(CMD_TYPE command, ResourceUsageRecord::SubcommandType subcommand) {
2359 ResourceUsageTag next = access_log_.size();
2360 access_log_.emplace_back(command, command_number_, subcommand, ++subcommand_number_, cb_state_.get(), reset_count_);
2361 return next;
2362}
2363
2364ResourceUsageTag CommandBufferAccessContext::NextCommandTag(CMD_TYPE command, ResourceUsageRecord::SubcommandType subcommand) {
2365 command_number_++;
2366 subcommand_number_ = 0;
2367 ResourceUsageTag next = access_log_.size();
2368 access_log_.emplace_back(command, command_number_, subcommand, subcommand_number_, cb_state_.get(), reset_count_);
2369 return next;
2370}
2371
2372ResourceUsageTag CommandBufferAccessContext::NextIndexedCommandTag(CMD_TYPE command, uint32_t index) {
2373 if (index == 0) {
2374 return NextCommandTag(command, ResourceUsageRecord::SubcommandType::kIndex);
2375 }
2376 return NextSubcommandTag(command, ResourceUsageRecord::SubcommandType::kIndex);
2377}
2378
John Zulaufbb890452021-12-14 11:30:18 -07002379void CommandBufferAccessContext::RecordSyncOp(SyncOpPointer &&sync_op) {
2380 auto tag = sync_op->Record(this);
2381 // As renderpass operations can have side effects on the command buffer access context,
2382 // update the sync operation to record these if any.
2383 if (current_renderpass_context_) {
2384 const auto &rpc = *current_renderpass_context_;
2385 sync_op->SetReplayContext(rpc.GetCurrentSubpass(), rpc.GetReplayContext());
2386 }
2387 sync_ops_.emplace_back(tag, std::move(sync_op));
2388}
2389
John Zulaufae842002021-04-15 18:20:55 -06002390class HazardDetectFirstUse {
2391 public:
John Zulaufbb890452021-12-14 11:30:18 -07002392 HazardDetectFirstUse(const ResourceAccessState &recorded_use, const ResourceUsageRange &tag_range,
2393 const ReplayTrackbackBarriersAction *replay_barrier)
2394 : recorded_use_(recorded_use), tag_range_(tag_range), replay_barrier_(replay_barrier) {}
John Zulaufae842002021-04-15 18:20:55 -06002395 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
John Zulaufbb890452021-12-14 11:30:18 -07002396 if (replay_barrier_) {
2397 // Intentional copy to apply the replay barrier
2398 auto access = pos->second;
2399 (*replay_barrier_)(&access);
2400 return access.DetectHazard(recorded_use_, tag_range_);
2401 }
John Zulaufae842002021-04-15 18:20:55 -06002402 return pos->second.DetectHazard(recorded_use_, tag_range_);
2403 }
2404 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, ResourceUsageTag start_tag) const {
2405 return pos->second.DetectAsyncHazard(recorded_use_, tag_range_, start_tag);
2406 }
2407
2408 private:
2409 const ResourceAccessState &recorded_use_;
2410 const ResourceUsageRange &tag_range_;
John Zulaufbb890452021-12-14 11:30:18 -07002411 const ReplayTrackbackBarriersAction *replay_barrier_;
John Zulaufae842002021-04-15 18:20:55 -06002412};
2413
2414// This is called with the *recorded* command buffers access context, with the *active* access context pass in, againsts which
2415// hazards will be detected
John Zulaufbb890452021-12-14 11:30:18 -07002416HazardResult AccessContext::DetectFirstUseHazard(const ResourceUsageRange &tag_range, const AccessContext &access_context,
2417 const ReplayTrackbackBarriersAction *replay_barrier) const {
John Zulaufae842002021-04-15 18:20:55 -06002418 HazardResult hazard;
2419 for (const auto address_type : kAddressTypes) {
2420 const auto &recorded_access_map = GetAccessStateMap(address_type);
2421 for (const auto &recorded_access : recorded_access_map) {
2422 // Cull any entries not in the current tag range
2423 if (!recorded_access.second.FirstAccessInTagRange(tag_range)) continue;
John Zulaufbb890452021-12-14 11:30:18 -07002424 HazardDetectFirstUse detector(recorded_access.second, tag_range, replay_barrier);
John Zulaufae842002021-04-15 18:20:55 -06002425 hazard = access_context.DetectHazard(address_type, detector, recorded_access.first, DetectOptions::kDetectAll);
2426 if (hazard.hazard) break;
2427 }
2428 }
2429
2430 return hazard;
2431}
2432
John Zulaufbb890452021-12-14 11:30:18 -07002433bool RenderPassAccessContext::ValidateDrawSubpassAttachment(const CommandExecutionContext &exec_context,
2434 const CMD_BUFFER_STATE &cmd, const char *func_name) const {
locke-lunarg61870c22020-06-09 14:51:50 -06002435 bool skip = false;
John Zulaufbb890452021-12-14 11:30:18 -07002436 const auto &sync_state = exec_context.GetSyncState();
Jeremy Gebben159b3cc2021-06-03 09:09:03 -06002437 const auto *pipe = cmd.GetCurrentPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS);
Jeremy Gebben11af9792021-08-20 10:20:09 -06002438 if (!pipe) {
2439 return skip;
2440 }
2441
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002442 const auto raster_state = pipe->RasterizationState();
2443 if (raster_state && raster_state->rasterizerDiscardEnable) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002444 return skip;
2445 }
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002446 const auto &list = pipe->fragmentShader_writable_output_location_list;
locke-lunarg61870c22020-06-09 14:51:50 -06002447 const auto &subpass = rp_state_->createInfo.pSubpasses[current_subpass_];
locke-lunarg37047832020-06-12 13:44:45 -06002448
John Zulauf1a224292020-06-30 14:52:13 -06002449 const auto &current_context = CurrentContext();
locke-lunarg44f9bb12020-06-10 14:43:57 -06002450 // Subpass's inputAttachment has been done in ValidateDispatchDrawDescriptorSet
locke-lunarg96dc9632020-06-10 17:22:18 -06002451 if (subpass.pColorAttachments && subpass.colorAttachmentCount && !list.empty()) {
2452 for (const auto location : list) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002453 if (location >= subpass.colorAttachmentCount ||
2454 subpass.pColorAttachments[location].attachment == VK_ATTACHMENT_UNUSED) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002455 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002456 }
John Zulaufd0ec59f2021-03-13 14:25:08 -07002457 const AttachmentViewGen &view_gen = attachment_views_[subpass.pColorAttachments[location].attachment];
2458 if (!view_gen.IsValid()) continue;
2459 HazardResult hazard =
2460 current_context.DetectHazard(view_gen, AttachmentViewGen::Gen::kRenderArea,
2461 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kColorAttachment);
locke-lunarg96dc9632020-06-10 17:22:18 -06002462 if (hazard.hazard) {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002463 const VkImageView view_handle = view_gen.GetViewState()->image_view();
John Zulaufd0ec59f2021-03-13 14:25:08 -07002464 skip |= sync_state.LogError(view_handle, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06002465 "%s: Hazard %s for %s in %s, Subpass #%d, and pColorAttachments #%d. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06002466 func_name, string_SyncHazard(hazard.hazard),
John Zulaufd0ec59f2021-03-13 14:25:08 -07002467 sync_state.report_data->FormatHandle(view_handle).c_str(),
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002468 sync_state.report_data->FormatHandle(cmd.commandBuffer()).c_str(), cmd.activeSubpass,
John Zulaufbb890452021-12-14 11:30:18 -07002469 location, exec_context.FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06002470 }
2471 }
2472 }
locke-lunarg37047832020-06-12 13:44:45 -06002473
2474 // PHASE1 TODO: Add layout based read/vs. write selection.
2475 // PHASE1 TODO: Read operations for both depth and stencil are possible in the future.
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002476 const auto ds_state = pipe->DepthStencilState();
2477 const uint32_t depth_stencil_attachment = GetSubpassDepthStencilAttachmentIndex(ds_state, subpass.pDepthStencilAttachment);
John Zulaufd0ec59f2021-03-13 14:25:08 -07002478
2479 if ((depth_stencil_attachment != VK_ATTACHMENT_UNUSED) && attachment_views_[depth_stencil_attachment].IsValid()) {
2480 const AttachmentViewGen &view_gen = attachment_views_[depth_stencil_attachment];
2481 const IMAGE_VIEW_STATE &view_state = *view_gen.GetViewState();
locke-lunarg37047832020-06-12 13:44:45 -06002482 bool depth_write = false, stencil_write = false;
2483
2484 // PHASE1 TODO: These validation should be in core_checks.
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002485 if (!FormatIsStencilOnly(view_state.create_info.format) && ds_state->depthTestEnable && ds_state->depthWriteEnable &&
locke-lunarg37047832020-06-12 13:44:45 -06002486 IsImageLayoutDepthWritable(subpass.pDepthStencilAttachment->layout)) {
2487 depth_write = true;
2488 }
2489 // PHASE1 TODO: It needs to check if stencil is writable.
2490 // If failOp, passOp, or depthFailOp are not KEEP, and writeMask isn't 0, it's writable.
2491 // If depth test is disable, it's considered depth test passes, and then depthFailOp doesn't run.
2492 // PHASE1 TODO: These validation should be in core_checks.
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002493 if (!FormatIsDepthOnly(view_state.create_info.format) && ds_state->stencilTestEnable &&
locke-lunarg37047832020-06-12 13:44:45 -06002494 IsImageLayoutStencilWritable(subpass.pDepthStencilAttachment->layout)) {
2495 stencil_write = true;
2496 }
2497
2498 // PHASE1 TODO: Add EARLY stage detection based on ExecutionMode.
2499 if (depth_write) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07002500 HazardResult hazard = current_context.DetectHazard(view_gen, AttachmentViewGen::Gen::kDepthOnlyRenderArea,
2501 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE,
2502 SyncOrdering::kDepthStencilAttachment);
locke-lunarg37047832020-06-12 13:44:45 -06002503 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002504 skip |= sync_state.LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002505 view_state.image_view(), string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06002506 "%s: Hazard %s for %s in %s, Subpass #%d, and depth part of pDepthStencilAttachment. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06002507 func_name, string_SyncHazard(hazard.hazard),
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002508 sync_state.report_data->FormatHandle(view_state.image_view()).c_str(),
2509 sync_state.report_data->FormatHandle(cmd.commandBuffer()).c_str(), cmd.activeSubpass,
John Zulaufbb890452021-12-14 11:30:18 -07002510 exec_context.FormatUsage(hazard).c_str());
locke-lunarg37047832020-06-12 13:44:45 -06002511 }
2512 }
2513 if (stencil_write) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07002514 HazardResult hazard = current_context.DetectHazard(view_gen, AttachmentViewGen::Gen::kStencilOnlyRenderArea,
2515 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE,
2516 SyncOrdering::kDepthStencilAttachment);
locke-lunarg37047832020-06-12 13:44:45 -06002517 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002518 skip |= sync_state.LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002519 view_state.image_view(), string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06002520 "%s: Hazard %s for %s in %s, Subpass #%d, and stencil part of pDepthStencilAttachment. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06002521 func_name, string_SyncHazard(hazard.hazard),
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002522 sync_state.report_data->FormatHandle(view_state.image_view()).c_str(),
2523 sync_state.report_data->FormatHandle(cmd.commandBuffer()).c_str(), cmd.activeSubpass,
John Zulaufbb890452021-12-14 11:30:18 -07002524 exec_context.FormatUsage(hazard).c_str());
locke-lunarg37047832020-06-12 13:44:45 -06002525 }
locke-lunarg61870c22020-06-09 14:51:50 -06002526 }
2527 }
2528 return skip;
2529}
2530
John Zulauf14940722021-04-12 15:19:02 -06002531void RenderPassAccessContext::RecordDrawSubpassAttachment(const CMD_BUFFER_STATE &cmd, const ResourceUsageTag tag) {
Jeremy Gebben159b3cc2021-06-03 09:09:03 -06002532 const auto *pipe = cmd.GetCurrentPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS);
Jeremy Gebben11af9792021-08-20 10:20:09 -06002533 if (!pipe) {
2534 return;
2535 }
2536
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002537 const auto *raster_state = pipe->RasterizationState();
2538 if (raster_state && raster_state->rasterizerDiscardEnable) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002539 return;
2540 }
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002541 const auto &list = pipe->fragmentShader_writable_output_location_list;
locke-lunarg61870c22020-06-09 14:51:50 -06002542 const auto &subpass = rp_state_->createInfo.pSubpasses[current_subpass_];
locke-lunarg61870c22020-06-09 14:51:50 -06002543
John Zulauf1a224292020-06-30 14:52:13 -06002544 auto &current_context = CurrentContext();
locke-lunarg44f9bb12020-06-10 14:43:57 -06002545 // Subpass's inputAttachment has been done in RecordDispatchDrawDescriptorSet
locke-lunarg96dc9632020-06-10 17:22:18 -06002546 if (subpass.pColorAttachments && subpass.colorAttachmentCount && !list.empty()) {
2547 for (const auto location : list) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002548 if (location >= subpass.colorAttachmentCount ||
2549 subpass.pColorAttachments[location].attachment == VK_ATTACHMENT_UNUSED) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002550 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002551 }
John Zulaufd0ec59f2021-03-13 14:25:08 -07002552 const AttachmentViewGen &view_gen = attachment_views_[subpass.pColorAttachments[location].attachment];
2553 current_context.UpdateAccessState(view_gen, AttachmentViewGen::Gen::kRenderArea,
2554 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kColorAttachment,
2555 tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002556 }
2557 }
locke-lunarg37047832020-06-12 13:44:45 -06002558
2559 // PHASE1 TODO: Add layout based read/vs. write selection.
2560 // PHASE1 TODO: Read operations for both depth and stencil are possible in the future.
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002561 const auto *ds_state = pipe->DepthStencilState();
2562 const uint32_t depth_stencil_attachment = GetSubpassDepthStencilAttachmentIndex(ds_state, subpass.pDepthStencilAttachment);
John Zulaufd0ec59f2021-03-13 14:25:08 -07002563 if ((depth_stencil_attachment != VK_ATTACHMENT_UNUSED) && attachment_views_[depth_stencil_attachment].IsValid()) {
2564 const AttachmentViewGen &view_gen = attachment_views_[depth_stencil_attachment];
2565 const IMAGE_VIEW_STATE &view_state = *view_gen.GetViewState();
locke-lunarg37047832020-06-12 13:44:45 -06002566 bool depth_write = false, stencil_write = false;
John Zulaufd0ec59f2021-03-13 14:25:08 -07002567 const bool has_depth = 0 != (view_state.normalized_subresource_range.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT);
2568 const bool has_stencil = 0 != (view_state.normalized_subresource_range.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT);
locke-lunarg37047832020-06-12 13:44:45 -06002569
2570 // PHASE1 TODO: These validation should be in core_checks.
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002571 if (has_depth && !FormatIsStencilOnly(view_state.create_info.format) && ds_state->depthTestEnable &&
2572 ds_state->depthWriteEnable && IsImageLayoutDepthWritable(subpass.pDepthStencilAttachment->layout)) {
locke-lunarg37047832020-06-12 13:44:45 -06002573 depth_write = true;
2574 }
2575 // PHASE1 TODO: It needs to check if stencil is writable.
2576 // If failOp, passOp, or depthFailOp are not KEEP, and writeMask isn't 0, it's writable.
2577 // If depth test is disable, it's considered depth test passes, and then depthFailOp doesn't run.
2578 // PHASE1 TODO: These validation should be in core_checks.
Nathaniel Cesario3fd4f762022-02-16 16:07:06 -07002579 if (has_stencil && !FormatIsDepthOnly(view_state.create_info.format) && ds_state->stencilTestEnable &&
locke-lunarg37047832020-06-12 13:44:45 -06002580 IsImageLayoutStencilWritable(subpass.pDepthStencilAttachment->layout)) {
2581 stencil_write = true;
2582 }
2583
John Zulaufd0ec59f2021-03-13 14:25:08 -07002584 if (depth_write || stencil_write) {
2585 const auto ds_gentype = view_gen.GetDepthStencilRenderAreaGenType(depth_write, stencil_write);
2586 // PHASE1 TODO: Add EARLY stage detection based on ExecutionMode.
2587 current_context.UpdateAccessState(view_gen, ds_gentype, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE,
2588 SyncOrdering::kDepthStencilAttachment, tag);
locke-lunarg37047832020-06-12 13:44:45 -06002589 }
locke-lunarg61870c22020-06-09 14:51:50 -06002590 }
2591}
2592
John Zulaufbb890452021-12-14 11:30:18 -07002593bool RenderPassAccessContext::ValidateNextSubpass(const CommandExecutionContext &exec_context, const char *func_name) const {
John Zulaufaff20662020-06-01 14:07:58 -06002594 // PHASE1 TODO: Add Validate Preserve attachments
John Zulauf355e49b2020-04-24 15:11:15 -06002595 bool skip = false;
John Zulaufbb890452021-12-14 11:30:18 -07002596 skip |= CurrentContext().ValidateResolveOperations(exec_context, *rp_state_, render_area_, attachment_views_, func_name,
John Zulaufb027cdb2020-05-21 14:25:22 -06002597 current_subpass_);
John Zulaufbb890452021-12-14 11:30:18 -07002598 skip |= CurrentContext().ValidateStoreOperation(exec_context, *rp_state_, render_area_, current_subpass_, attachment_views_,
John Zulaufaff20662020-06-01 14:07:58 -06002599 func_name);
2600
John Zulauf355e49b2020-04-24 15:11:15 -06002601 const auto next_subpass = current_subpass_ + 1;
ziga-lunarg31a3e772022-03-22 11:48:46 +01002602 if (next_subpass >= subpass_contexts_.size()) {
2603 return skip;
2604 }
John Zulauf1507ee42020-05-18 11:33:09 -06002605 const auto &next_context = subpass_contexts_[next_subpass];
John Zulauf64ffe552021-02-06 10:25:07 -07002606 skip |=
John Zulaufbb890452021-12-14 11:30:18 -07002607 next_context.ValidateLayoutTransitions(exec_context, *rp_state_, render_area_, next_subpass, attachment_views_, func_name);
John Zulaufb02c1eb2020-10-06 16:33:36 -06002608 if (!skip) {
2609 // To avoid complex (and buggy) duplication of the affect of layout transitions on load operations, we'll record them
2610 // on a copy of the (empty) next context.
2611 // Note: The resource access map should be empty so hopefully this copy isn't too horrible from a perf POV.
2612 AccessContext temp_context(next_context);
John Zulaufee984022022-04-13 16:39:50 -06002613 temp_context.RecordLayoutTransitions(*rp_state_, next_subpass, attachment_views_, kInvalidTag);
John Zulauf64ffe552021-02-06 10:25:07 -07002614 skip |=
John Zulaufbb890452021-12-14 11:30:18 -07002615 temp_context.ValidateLoadOperation(exec_context, *rp_state_, render_area_, next_subpass, attachment_views_, func_name);
John Zulaufb02c1eb2020-10-06 16:33:36 -06002616 }
John Zulauf7635de32020-05-29 17:14:15 -06002617 return skip;
2618}
John Zulaufbb890452021-12-14 11:30:18 -07002619bool RenderPassAccessContext::ValidateEndRenderPass(const CommandExecutionContext &exec_context, const char *func_name) const {
John Zulaufaff20662020-06-01 14:07:58 -06002620 // PHASE1 TODO: Validate Preserve
John Zulauf7635de32020-05-29 17:14:15 -06002621 bool skip = false;
John Zulaufbb890452021-12-14 11:30:18 -07002622 skip |= CurrentContext().ValidateResolveOperations(exec_context, *rp_state_, render_area_, attachment_views_, func_name,
John Zulauf7635de32020-05-29 17:14:15 -06002623 current_subpass_);
John Zulaufbb890452021-12-14 11:30:18 -07002624 skip |= CurrentContext().ValidateStoreOperation(exec_context, *rp_state_, render_area_, current_subpass_,
John Zulaufd0ec59f2021-03-13 14:25:08 -07002625
2626 attachment_views_, func_name);
John Zulaufbb890452021-12-14 11:30:18 -07002627 skip |= ValidateFinalSubpassLayoutTransitions(exec_context, func_name);
John Zulauf355e49b2020-04-24 15:11:15 -06002628 return skip;
2629}
2630
John Zulauf64ffe552021-02-06 10:25:07 -07002631AccessContext *RenderPassAccessContext::CreateStoreResolveProxy() const {
John Zulaufd0ec59f2021-03-13 14:25:08 -07002632 return CreateStoreResolveProxyContext(CurrentContext(), *rp_state_, current_subpass_, attachment_views_);
John Zulauf7635de32020-05-29 17:14:15 -06002633}
2634
John Zulaufbb890452021-12-14 11:30:18 -07002635bool RenderPassAccessContext::ValidateFinalSubpassLayoutTransitions(const CommandExecutionContext &exec_context,
John Zulauf64ffe552021-02-06 10:25:07 -07002636 const char *func_name) const {
John Zulauf355e49b2020-04-24 15:11:15 -06002637 bool skip = false;
2638
John Zulauf7635de32020-05-29 17:14:15 -06002639 // As validation methods are const and precede the record/update phase, for any tranistions from the current (last)
2640 // subpass, we have to validate them against a copy of the current AccessContext, with resolve operations applied.
2641 // Note: we could be more efficient by tracking whether or not we actually *have* any changes (e.g. attachment resolve)
2642 // to apply and only copy then, if this proves a hot spot.
2643 std::unique_ptr<AccessContext> proxy_for_current;
2644
John Zulauf355e49b2020-04-24 15:11:15 -06002645 // Validate the "finalLayout" transitions to external
2646 // Get them from where there we're hidding in the extra entry.
2647 const auto &final_transitions = rp_state_->subpass_transitions.back();
2648 for (const auto &transition : final_transitions) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07002649 const auto &view_gen = attachment_views_[transition.attachment];
John Zulauf355e49b2020-04-24 15:11:15 -06002650 const auto &trackback = subpass_contexts_[transition.prev_pass].GetDstExternalTrackBack();
John Zulaufbb890452021-12-14 11:30:18 -07002651 assert(trackback.source_subpass); // Transitions are given implicit transitions if the StateTracker is working correctly
2652 auto *context = trackback.source_subpass;
John Zulauf7635de32020-05-29 17:14:15 -06002653
2654 if (transition.prev_pass == current_subpass_) {
2655 if (!proxy_for_current) {
2656 // We haven't recorded resolve ofor the current_subpass, so we need to copy current and update it *as if*
John Zulauf64ffe552021-02-06 10:25:07 -07002657 proxy_for_current.reset(CreateStoreResolveProxy());
John Zulauf7635de32020-05-29 17:14:15 -06002658 }
2659 context = proxy_for_current.get();
2660 }
2661
John Zulaufa0a98292020-09-18 09:30:10 -06002662 // Use the merged barrier for the hazard check (safe since it just considers the src (first) scope.
2663 const auto merged_barrier = MergeBarriers(trackback.barriers);
John Zulaufd0ec59f2021-03-13 14:25:08 -07002664 auto hazard = context->DetectImageBarrierHazard(view_gen, merged_barrier, AccessContext::DetectOptions::kDetectPrevious);
John Zulauf355e49b2020-04-24 15:11:15 -06002665 if (hazard.hazard) {
John Zulaufee984022022-04-13 16:39:50 -06002666 if (hazard.tag == kInvalidTag) {
2667 // Hazard vs. ILT
John Zulaufbb890452021-12-14 11:30:18 -07002668 skip |= exec_context.GetSyncState().LogError(
John Zulaufee984022022-04-13 16:39:50 -06002669 rp_state_->renderPass(), string_SyncHazardVUID(hazard.hazard),
2670 "%s: Hazard %s vs. store/resolve operations in subpass %" PRIu32 " for attachment %" PRIu32
2671 " final image layout transition (old_layout: %s, new_layout: %s).",
2672 func_name, string_SyncHazard(hazard.hazard), transition.prev_pass, transition.attachment,
2673 string_VkImageLayout(transition.old_layout), string_VkImageLayout(transition.new_layout));
2674 } else {
John Zulaufbb890452021-12-14 11:30:18 -07002675 skip |= exec_context.GetSyncState().LogError(
John Zulaufee984022022-04-13 16:39:50 -06002676 rp_state_->renderPass(), string_SyncHazardVUID(hazard.hazard),
2677 "%s: Hazard %s with last use subpass %" PRIu32 " for attachment %" PRIu32
2678 " final image layout transition (old_layout: %s, new_layout: %s). Access info %s.",
2679 func_name, string_SyncHazard(hazard.hazard), transition.prev_pass, transition.attachment,
2680 string_VkImageLayout(transition.old_layout), string_VkImageLayout(transition.new_layout),
John Zulaufbb890452021-12-14 11:30:18 -07002681 exec_context.FormatUsage(hazard).c_str());
John Zulaufee984022022-04-13 16:39:50 -06002682 }
John Zulauf355e49b2020-04-24 15:11:15 -06002683 }
2684 }
2685 return skip;
2686}
2687
John Zulauf14940722021-04-12 15:19:02 -06002688void RenderPassAccessContext::RecordLayoutTransitions(const ResourceUsageTag tag) {
John Zulauf355e49b2020-04-24 15:11:15 -06002689 // Add layout transitions...
John Zulaufb02c1eb2020-10-06 16:33:36 -06002690 subpass_contexts_[current_subpass_].RecordLayoutTransitions(*rp_state_, current_subpass_, attachment_views_, tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002691}
2692
John Zulauf14940722021-04-12 15:19:02 -06002693void RenderPassAccessContext::RecordLoadOperations(const ResourceUsageTag tag) {
John Zulauf1507ee42020-05-18 11:33:09 -06002694 const auto *attachment_ci = rp_state_->createInfo.pAttachments;
2695 auto &subpass_context = subpass_contexts_[current_subpass_];
John Zulauf1507ee42020-05-18 11:33:09 -06002696
2697 for (uint32_t i = 0; i < rp_state_->createInfo.attachmentCount; i++) {
2698 if (rp_state_->attachment_first_subpass[i] == current_subpass_) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07002699 const AttachmentViewGen &view_gen = attachment_views_[i];
2700 if (!view_gen.IsValid()) continue; // UNUSED
John Zulauf1507ee42020-05-18 11:33:09 -06002701
2702 const auto &ci = attachment_ci[i];
2703 const bool has_depth = FormatHasDepth(ci.format);
John Zulaufb027cdb2020-05-21 14:25:22 -06002704 const bool has_stencil = FormatHasStencil(ci.format);
John Zulauf1507ee42020-05-18 11:33:09 -06002705 const bool is_color = !(has_depth || has_stencil);
2706
2707 if (is_color) {
John Zulauf57261402021-08-13 11:32:06 -06002708 const SyncStageAccessIndex load_op = ColorLoadUsage(ci.loadOp);
2709 if (load_op != SYNC_ACCESS_INDEX_NONE) {
2710 subpass_context.UpdateAccessState(view_gen, AttachmentViewGen::Gen::kRenderArea, load_op,
2711 SyncOrdering::kColorAttachment, tag);
2712 }
John Zulauf1507ee42020-05-18 11:33:09 -06002713 } else {
John Zulauf1507ee42020-05-18 11:33:09 -06002714 if (has_depth) {
John Zulauf57261402021-08-13 11:32:06 -06002715 const SyncStageAccessIndex load_op = DepthStencilLoadUsage(ci.loadOp);
2716 if (load_op != SYNC_ACCESS_INDEX_NONE) {
2717 subpass_context.UpdateAccessState(view_gen, AttachmentViewGen::Gen::kDepthOnlyRenderArea, load_op,
2718 SyncOrdering::kDepthStencilAttachment, tag);
2719 }
John Zulauf1507ee42020-05-18 11:33:09 -06002720 }
2721 if (has_stencil) {
John Zulauf57261402021-08-13 11:32:06 -06002722 const SyncStageAccessIndex load_op = DepthStencilLoadUsage(ci.stencilLoadOp);
2723 if (load_op != SYNC_ACCESS_INDEX_NONE) {
2724 subpass_context.UpdateAccessState(view_gen, AttachmentViewGen::Gen::kStencilOnlyRenderArea, load_op,
2725 SyncOrdering::kDepthStencilAttachment, tag);
2726 }
John Zulauf1507ee42020-05-18 11:33:09 -06002727 }
2728 }
2729 }
2730 }
2731}
John Zulaufd0ec59f2021-03-13 14:25:08 -07002732AttachmentViewGenVector RenderPassAccessContext::CreateAttachmentViewGen(
2733 const VkRect2D &render_area, const std::vector<const IMAGE_VIEW_STATE *> &attachment_views) {
2734 AttachmentViewGenVector view_gens;
2735 VkExtent3D extent = CastTo3D(render_area.extent);
2736 VkOffset3D offset = CastTo3D(render_area.offset);
2737 view_gens.reserve(attachment_views.size());
2738 for (const auto *view : attachment_views) {
2739 view_gens.emplace_back(view, offset, extent);
2740 }
2741 return view_gens;
2742}
John Zulauf64ffe552021-02-06 10:25:07 -07002743RenderPassAccessContext::RenderPassAccessContext(const RENDER_PASS_STATE &rp_state, const VkRect2D &render_area,
2744 VkQueueFlags queue_flags,
2745 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views,
2746 const AccessContext *external_context)
John Zulaufd0ec59f2021-03-13 14:25:08 -07002747 : rp_state_(&rp_state), render_area_(render_area), current_subpass_(0U), attachment_views_() {
John Zulauf355e49b2020-04-24 15:11:15 -06002748 // Add this for all subpasses here so that they exsist during next subpass validation
John Zulauf64ffe552021-02-06 10:25:07 -07002749 subpass_contexts_.reserve(rp_state_->createInfo.subpassCount);
John Zulaufbb890452021-12-14 11:30:18 -07002750 replay_context_ = std::make_shared<ReplayRenderpassContext>();
2751 auto &replay_subpass_contexts = replay_context_->subpass_contexts;
2752 replay_subpass_contexts.reserve(rp_state_->createInfo.subpassCount);
John Zulauf355e49b2020-04-24 15:11:15 -06002753 for (uint32_t pass = 0; pass < rp_state_->createInfo.subpassCount; pass++) {
John Zulauf1a224292020-06-30 14:52:13 -06002754 subpass_contexts_.emplace_back(pass, queue_flags, rp_state_->subpass_dependencies, subpass_contexts_, external_context);
John Zulaufbb890452021-12-14 11:30:18 -07002755 replay_subpass_contexts.emplace_back(queue_flags, rp_state_->subpass_dependencies[pass], replay_subpass_contexts);
John Zulauf355e49b2020-04-24 15:11:15 -06002756 }
John Zulaufd0ec59f2021-03-13 14:25:08 -07002757 attachment_views_ = CreateAttachmentViewGen(render_area, attachment_views);
John Zulauf64ffe552021-02-06 10:25:07 -07002758}
John Zulauf41a9c7c2021-12-07 15:59:53 -07002759void RenderPassAccessContext::RecordBeginRenderPass(const ResourceUsageTag barrier_tag, const ResourceUsageTag load_tag) {
John Zulauf64ffe552021-02-06 10:25:07 -07002760 assert(0 == current_subpass_);
John Zulauf41a9c7c2021-12-07 15:59:53 -07002761 subpass_contexts_[current_subpass_].SetStartTag(barrier_tag);
2762 RecordLayoutTransitions(barrier_tag);
2763 RecordLoadOperations(load_tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002764}
John Zulauf1507ee42020-05-18 11:33:09 -06002765
John Zulauf41a9c7c2021-12-07 15:59:53 -07002766void RenderPassAccessContext::RecordNextSubpass(const ResourceUsageTag store_tag, const ResourceUsageTag barrier_tag,
2767 const ResourceUsageTag load_tag) {
John Zulauf7635de32020-05-29 17:14:15 -06002768 // Resolves are against *prior* subpass context and thus *before* the subpass increment
John Zulauf41a9c7c2021-12-07 15:59:53 -07002769 CurrentContext().UpdateAttachmentResolveAccess(*rp_state_, attachment_views_, current_subpass_, store_tag);
2770 CurrentContext().UpdateAttachmentStoreAccess(*rp_state_, attachment_views_, current_subpass_, store_tag);
John Zulauf7635de32020-05-29 17:14:15 -06002771
ziga-lunarg31a3e772022-03-22 11:48:46 +01002772 if (current_subpass_ + 1 >= subpass_contexts_.size()) {
2773 return;
2774 }
Jeremy Gebben6ea9d9e2020-12-11 09:41:01 -07002775 // Move to the next sub-command for the new subpass. The resolve and store are logically part of the previous
2776 // subpass, so their tag needs to be different from the layout and load operations below.
John Zulauf355e49b2020-04-24 15:11:15 -06002777 current_subpass_++;
John Zulauf41a9c7c2021-12-07 15:59:53 -07002778 subpass_contexts_[current_subpass_].SetStartTag(barrier_tag);
2779 RecordLayoutTransitions(barrier_tag);
2780 RecordLoadOperations(load_tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002781}
2782
John Zulauf41a9c7c2021-12-07 15:59:53 -07002783void RenderPassAccessContext::RecordEndRenderPass(AccessContext *external_context, const ResourceUsageTag store_tag,
2784 const ResourceUsageTag barrier_tag) {
John Zulaufaff20662020-06-01 14:07:58 -06002785 // Add the resolve and store accesses
John Zulauf41a9c7c2021-12-07 15:59:53 -07002786 CurrentContext().UpdateAttachmentResolveAccess(*rp_state_, attachment_views_, current_subpass_, store_tag);
2787 CurrentContext().UpdateAttachmentStoreAccess(*rp_state_, attachment_views_, current_subpass_, store_tag);
John Zulauf7635de32020-05-29 17:14:15 -06002788
John Zulauf355e49b2020-04-24 15:11:15 -06002789 // Export the accesses from the renderpass...
John Zulauf1a224292020-06-30 14:52:13 -06002790 external_context->ResolveChildContexts(subpass_contexts_);
John Zulauf355e49b2020-04-24 15:11:15 -06002791
2792 // Add the "finalLayout" transitions to external
2793 // Get them from where there we're hidding in the extra entry.
John Zulauf89311b42020-09-29 16:28:47 -06002794 // Not that since *final* always comes from *one* subpass per view, we don't have to accumulate the barriers
2795 // TODO Aliasing we may need to reconsider barrier accumulation... though I don't know that it would be valid for aliasing
2796 // that had mulitple final layout transistions from mulitple final subpasses.
John Zulauf355e49b2020-04-24 15:11:15 -06002797 const auto &final_transitions = rp_state_->subpass_transitions.back();
2798 for (const auto &transition : final_transitions) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07002799 const AttachmentViewGen &view_gen = attachment_views_[transition.attachment];
John Zulauf355e49b2020-04-24 15:11:15 -06002800 const auto &last_trackback = subpass_contexts_[transition.prev_pass].GetDstExternalTrackBack();
John Zulaufbb890452021-12-14 11:30:18 -07002801 assert(&subpass_contexts_[transition.prev_pass] == last_trackback.source_subpass);
John Zulauf41a9c7c2021-12-07 15:59:53 -07002802 ApplyBarrierOpsFunctor<PipelineBarrierOp> barrier_action(true /* resolve */, last_trackback.barriers.size(), barrier_tag);
John Zulauf1e331ec2020-12-04 18:29:38 -07002803 for (const auto &barrier : last_trackback.barriers) {
John Zulaufd5115702021-01-18 12:34:33 -07002804 barrier_action.EmplaceBack(PipelineBarrierOp(barrier, true));
John Zulauf1e331ec2020-12-04 18:29:38 -07002805 }
John Zulaufd0ec59f2021-03-13 14:25:08 -07002806 external_context->ApplyUpdateAction(view_gen, AttachmentViewGen::Gen::kViewSubresource, barrier_action);
John Zulauf355e49b2020-04-24 15:11:15 -06002807 }
2808}
2809
Jeremy Gebben40a22942020-12-22 14:22:06 -07002810SyncExecScope SyncExecScope::MakeSrc(VkQueueFlags queue_flags, VkPipelineStageFlags2KHR mask_param) {
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002811 SyncExecScope result;
2812 result.mask_param = mask_param;
Jeremy Gebben5f585ae2021-02-02 09:03:06 -07002813 result.expanded_mask = sync_utils::ExpandPipelineStages(mask_param, queue_flags);
2814 result.exec_scope = sync_utils::WithEarlierPipelineStages(result.expanded_mask);
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002815 result.valid_accesses = SyncStageAccess::AccessScopeByStage(result.exec_scope);
2816 return result;
2817}
2818
Jeremy Gebben40a22942020-12-22 14:22:06 -07002819SyncExecScope SyncExecScope::MakeDst(VkQueueFlags queue_flags, VkPipelineStageFlags2KHR mask_param) {
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002820 SyncExecScope result;
2821 result.mask_param = mask_param;
Jeremy Gebben5f585ae2021-02-02 09:03:06 -07002822 result.expanded_mask = sync_utils::ExpandPipelineStages(mask_param, queue_flags);
2823 result.exec_scope = sync_utils::WithLaterPipelineStages(result.expanded_mask);
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002824 result.valid_accesses = SyncStageAccess::AccessScopeByStage(result.exec_scope);
2825 return result;
2826}
2827
2828SyncBarrier::SyncBarrier(const SyncExecScope &src, const SyncExecScope &dst) {
John Zulaufc523bf62021-02-16 08:20:34 -07002829 src_exec_scope = src;
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002830 src_access_scope = 0;
John Zulaufc523bf62021-02-16 08:20:34 -07002831 dst_exec_scope = dst;
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002832 dst_access_scope = 0;
2833}
2834
2835template <typename Barrier>
2836SyncBarrier::SyncBarrier(const Barrier &barrier, const SyncExecScope &src, const SyncExecScope &dst) {
John Zulaufc523bf62021-02-16 08:20:34 -07002837 src_exec_scope = src;
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002838 src_access_scope = SyncStageAccess::AccessScope(src.valid_accesses, barrier.srcAccessMask);
John Zulaufc523bf62021-02-16 08:20:34 -07002839 dst_exec_scope = dst;
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002840 dst_access_scope = SyncStageAccess::AccessScope(dst.valid_accesses, barrier.dstAccessMask);
2841}
2842
2843SyncBarrier::SyncBarrier(VkQueueFlags queue_flags, const VkSubpassDependency2 &subpass) {
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002844 const auto barrier = lvl_find_in_chain<VkMemoryBarrier2KHR>(subpass.pNext);
2845 if (barrier) {
2846 auto src = SyncExecScope::MakeSrc(queue_flags, barrier->srcStageMask);
John Zulaufc523bf62021-02-16 08:20:34 -07002847 src_exec_scope = src;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002848 src_access_scope = SyncStageAccess::AccessScope(src.valid_accesses, barrier->srcAccessMask);
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002849
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002850 auto dst = SyncExecScope::MakeDst(queue_flags, barrier->dstStageMask);
John Zulaufc523bf62021-02-16 08:20:34 -07002851 dst_exec_scope = dst;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002852 dst_access_scope = SyncStageAccess::AccessScope(dst.valid_accesses, barrier->dstAccessMask);
2853
2854 } else {
2855 auto src = SyncExecScope::MakeSrc(queue_flags, subpass.srcStageMask);
John Zulaufc523bf62021-02-16 08:20:34 -07002856 src_exec_scope = src;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002857 src_access_scope = SyncStageAccess::AccessScope(src.valid_accesses, subpass.srcAccessMask);
2858
2859 auto dst = SyncExecScope::MakeDst(queue_flags, subpass.dstStageMask);
John Zulaufc523bf62021-02-16 08:20:34 -07002860 dst_exec_scope = dst;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002861 dst_access_scope = SyncStageAccess::AccessScope(dst.valid_accesses, subpass.dstAccessMask);
2862 }
2863}
2864
2865template <typename Barrier>
2866SyncBarrier::SyncBarrier(VkQueueFlags queue_flags, const Barrier &barrier) {
2867 auto src = SyncExecScope::MakeSrc(queue_flags, barrier.srcStageMask);
2868 src_exec_scope = src.exec_scope;
2869 src_access_scope = SyncStageAccess::AccessScope(src.valid_accesses, barrier.srcAccessMask);
2870
2871 auto dst = SyncExecScope::MakeDst(queue_flags, barrier.dstStageMask);
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002872 dst_exec_scope = dst.exec_scope;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002873 dst_access_scope = SyncStageAccess::AccessScope(dst.valid_accesses, barrier.dstAccessMask);
John Zulauf3d84f1b2020-03-09 13:33:25 -06002874}
2875
John Zulaufb02c1eb2020-10-06 16:33:36 -06002876// Apply a list of barriers, without resolving pending state, useful for subpass layout transitions
2877void ResourceAccessState::ApplyBarriers(const std::vector<SyncBarrier> &barriers, bool layout_transition) {
2878 for (const auto &barrier : barriers) {
2879 ApplyBarrier(barrier, layout_transition);
2880 }
2881}
2882
John Zulauf89311b42020-09-29 16:28:47 -06002883// ApplyBarriers is design for *fully* inclusive barrier lists without layout tranistions. Designed use was for
2884// inter-subpass barriers for lazy-evaluation of parent context memory ranges. Subpass layout transistions are *not* done
2885// lazily, s.t. no previous access reports should need layout transitions.
John Zulaufbb890452021-12-14 11:30:18 -07002886void ResourceAccessState::ApplyBarriersImmediate(const std::vector<SyncBarrier> &barriers) {
John Zulaufb02c1eb2020-10-06 16:33:36 -06002887 assert(!pending_layout_transition); // This should never be call in the middle of another barrier application
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002888 assert(pending_write_barriers.none());
John Zulaufb02c1eb2020-10-06 16:33:36 -06002889 assert(!pending_write_dep_chain);
John Zulaufa0a98292020-09-18 09:30:10 -06002890 for (const auto &barrier : barriers) {
John Zulauf89311b42020-09-29 16:28:47 -06002891 ApplyBarrier(barrier, false);
John Zulaufa0a98292020-09-18 09:30:10 -06002892 }
John Zulaufbb890452021-12-14 11:30:18 -07002893 ApplyPendingBarriers(kInvalidTag); // There can't be any need for this tag
John Zulauf3d84f1b2020-03-09 13:33:25 -06002894}
John Zulauf9cb530d2019-09-30 14:14:10 -06002895HazardResult ResourceAccessState::DetectHazard(SyncStageAccessIndex usage_index) const {
2896 HazardResult hazard;
2897 auto usage = FlagBit(usage_index);
John Zulauf361fb532020-07-22 10:45:39 -06002898 const auto usage_stage = PipelineStageBit(usage_index);
John Zulauf9cb530d2019-09-30 14:14:10 -06002899 if (IsRead(usage)) {
John Zulauf4285ee92020-09-23 10:20:52 -06002900 if (IsRAWHazard(usage_stage, usage)) {
John Zulauf59e25072020-07-17 10:55:21 -06002901 hazard.Set(this, usage_index, READ_AFTER_WRITE, last_write, write_tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06002902 }
2903 } else {
John Zulauf361fb532020-07-22 10:45:39 -06002904 // Write operation:
2905 // Check for read operations more recent than last_write (as setting last_write clears reads, that would be *any*
2906 // If reads exists -- test only against them because either:
2907 // * the reads were hazards, and we've reported the hazard, so just test the current write vs. the read operations
2908 // * the read weren't hazards, and thus if the write is safe w.r.t. the reads, no hazard vs. last_write is possible if
2909 // the current write happens after the reads, so just test the write against the reades
2910 // Otherwise test against last_write
2911 //
2912 // Look for casus belli for WAR
John Zulaufab7756b2020-12-29 16:10:16 -07002913 if (last_reads.size()) {
2914 for (const auto &read_access : last_reads) {
John Zulauf361fb532020-07-22 10:45:39 -06002915 if (IsReadHazard(usage_stage, read_access)) {
2916 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
2917 break;
2918 }
2919 }
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002920 } else if (last_write.any() && IsWriteHazard(usage)) {
John Zulauf361fb532020-07-22 10:45:39 -06002921 // Write-After-Write check -- if we have a previous write to test against
2922 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06002923 }
2924 }
2925 return hazard;
2926}
2927
John Zulauf4fa68462021-04-26 21:04:22 -06002928HazardResult ResourceAccessState::DetectHazard(SyncStageAccessIndex usage_index, const SyncOrdering ordering_rule) const {
John Zulauf8e3c3e92021-01-06 11:19:36 -07002929 const auto &ordering = GetOrderingRules(ordering_rule);
John Zulauf4fa68462021-04-26 21:04:22 -06002930 return DetectHazard(usage_index, ordering);
2931}
2932
2933HazardResult ResourceAccessState::DetectHazard(SyncStageAccessIndex usage_index, const OrderingBarrier &ordering) const {
John Zulauf69133422020-05-20 14:55:53 -06002934 // The ordering guarantees act as barriers to the last accesses, independent of synchronization operations
2935 HazardResult hazard;
John Zulauf4285ee92020-09-23 10:20:52 -06002936 const auto usage_bit = FlagBit(usage_index);
John Zulauf361fb532020-07-22 10:45:39 -06002937 const auto usage_stage = PipelineStageBit(usage_index);
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002938 const bool input_attachment_ordering = (ordering.access_scope & SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT).any();
2939 const bool last_write_is_ordered = (last_write & ordering.access_scope).any();
John Zulauf4285ee92020-09-23 10:20:52 -06002940 if (IsRead(usage_bit)) {
2941 // Exclude RAW if no write, or write not most "most recent" operation w.r.t. usage;
2942 bool is_raw_hazard = IsRAWHazard(usage_stage, usage_bit);
2943 if (is_raw_hazard) {
2944 // NOTE: we know last_write is non-zero
2945 // See if the ordering rules save us from the simple RAW check above
2946 // First check to see if the current usage is covered by the ordering rules
2947 const bool usage_is_input_attachment = (usage_index == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ);
2948 const bool usage_is_ordered =
2949 (input_attachment_ordering && usage_is_input_attachment) || (0 != (usage_stage & ordering.exec_scope));
2950 if (usage_is_ordered) {
2951 // Now see of the most recent write (or a subsequent read) are ordered
2952 const bool most_recent_is_ordered = last_write_is_ordered || (0 != GetOrderedStages(ordering));
2953 is_raw_hazard = !most_recent_is_ordered;
John Zulauf361fb532020-07-22 10:45:39 -06002954 }
2955 }
John Zulauf4285ee92020-09-23 10:20:52 -06002956 if (is_raw_hazard) {
2957 hazard.Set(this, usage_index, READ_AFTER_WRITE, last_write, write_tag);
2958 }
John Zulauf5c628d02021-05-04 15:46:36 -06002959 } else if (usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION) {
2960 // For Image layout transitions, the barrier represents the first synchronization/access scope of the layout transition
2961 return DetectBarrierHazard(usage_index, ordering.exec_scope, ordering.access_scope);
John Zulauf361fb532020-07-22 10:45:39 -06002962 } else {
2963 // Only check for WAW if there are no reads since last_write
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002964 bool usage_write_is_ordered = (usage_bit & ordering.access_scope).any();
John Zulaufab7756b2020-12-29 16:10:16 -07002965 if (last_reads.size()) {
John Zulauf361fb532020-07-22 10:45:39 -06002966 // Look for any WAR hazards outside the ordered set of stages
Jeremy Gebben40a22942020-12-22 14:22:06 -07002967 VkPipelineStageFlags2KHR ordered_stages = 0;
John Zulauf4285ee92020-09-23 10:20:52 -06002968 if (usage_write_is_ordered) {
2969 // If the usage is ordered, we can ignore all ordered read stages w.r.t. WAR)
2970 ordered_stages = GetOrderedStages(ordering);
2971 }
2972 // If we're tracking any reads that aren't ordered against the current write, got to check 'em all.
2973 if ((ordered_stages & last_read_stages) != last_read_stages) {
John Zulaufab7756b2020-12-29 16:10:16 -07002974 for (const auto &read_access : last_reads) {
John Zulauf4285ee92020-09-23 10:20:52 -06002975 if (read_access.stage & ordered_stages) continue; // but we can skip the ordered ones
2976 if (IsReadHazard(usage_stage, read_access)) {
2977 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
2978 break;
2979 }
John Zulaufd14743a2020-07-03 09:42:39 -06002980 }
2981 }
John Zulauf2a344ca2021-09-09 17:07:19 -06002982 } else if (last_write.any() && !(last_write_is_ordered && usage_write_is_ordered)) {
2983 bool ilt_ilt_hazard = false;
2984 if ((usage_index == SYNC_IMAGE_LAYOUT_TRANSITION) && (usage_bit == last_write)) {
2985 // ILT after ILT is a special case where we check the 2nd access scope of the first ILT against the first access
2986 // scope of the second ILT, which has been passed (smuggled?) in the ordering barrier
2987 ilt_ilt_hazard = !(write_barriers & ordering.access_scope).any();
2988 }
2989 if (ilt_ilt_hazard || IsWriteHazard(usage_bit)) {
John Zulauf4285ee92020-09-23 10:20:52 -06002990 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
John Zulauf361fb532020-07-22 10:45:39 -06002991 }
John Zulauf69133422020-05-20 14:55:53 -06002992 }
2993 }
2994 return hazard;
2995}
2996
John Zulaufae842002021-04-15 18:20:55 -06002997HazardResult ResourceAccessState::DetectHazard(const ResourceAccessState &recorded_use, const ResourceUsageRange &tag_range) const {
2998 HazardResult hazard;
John Zulauf4fa68462021-04-26 21:04:22 -06002999 using Size = FirstAccesses::size_type;
3000 const auto &recorded_accesses = recorded_use.first_accesses_;
3001 Size count = recorded_accesses.size();
3002 if (count) {
3003 const auto &last_access = recorded_accesses.back();
3004 bool do_write_last = IsWrite(last_access.usage_index);
3005 if (do_write_last) --count;
John Zulaufae842002021-04-15 18:20:55 -06003006
John Zulauf4fa68462021-04-26 21:04:22 -06003007 for (Size i = 0; i < count; ++count) {
3008 const auto &first = recorded_accesses[i];
3009 // Skip and quit logic
3010 if (first.tag < tag_range.begin) continue;
3011 if (first.tag >= tag_range.end) {
3012 do_write_last = false; // ignore last since we know it can't be in tag_range
3013 break;
3014 }
3015
3016 hazard = DetectHazard(first.usage_index, first.ordering_rule);
3017 if (hazard.hazard) {
3018 hazard.AddRecordedAccess(first);
3019 break;
3020 }
3021 }
3022
3023 if (do_write_last && tag_range.includes(last_access.tag)) {
3024 // Writes are a bit special... both for the "most recent" access logic, and layout transition specific logic
3025 OrderingBarrier barrier = GetOrderingRules(last_access.ordering_rule);
3026 if (last_access.usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION) {
3027 // Or in the layout first access scope as a barrier... IFF the usage is an ILT
3028 // this was saved off in the "apply barriers" logic to simplify ILT access checks as they straddle
3029 // the barrier that applies them
3030 barrier |= recorded_use.first_write_layout_ordering_;
3031 }
3032 // Any read stages present in the recorded context (this) are most recent to the write, and thus mask those stages in
3033 // the active context
3034 if (recorded_use.first_read_stages_) {
3035 // we need to ignore the first use read stage in the active context (so we add them to the ordering rule),
3036 // reads in the active context are not "most recent" as all recorded context operations are *after* them
3037 // This supresses only RAW checks for stages present in the recorded context, but not those only present in the
3038 // active context.
3039 barrier.exec_scope |= recorded_use.first_read_stages_;
3040 // if there are any first use reads, we suppress WAW by injecting the active context write in the ordering rule
3041 barrier.access_scope |= FlagBit(last_access.usage_index);
3042 }
3043 hazard = DetectHazard(last_access.usage_index, barrier);
3044 if (hazard.hazard) {
3045 hazard.AddRecordedAccess(last_access);
3046 }
3047 }
John Zulaufae842002021-04-15 18:20:55 -06003048 }
3049 return hazard;
3050}
3051
John Zulauf2f952d22020-02-10 11:34:51 -07003052// Asynchronous Hazards occur between subpasses with no connection through the DAG
John Zulauf14940722021-04-12 15:19:02 -06003053HazardResult ResourceAccessState::DetectAsyncHazard(SyncStageAccessIndex usage_index, const ResourceUsageTag start_tag) const {
John Zulauf2f952d22020-02-10 11:34:51 -07003054 HazardResult hazard;
3055 auto usage = FlagBit(usage_index);
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07003056 // Async checks need to not go back further than the start of the subpass, as we only want to find hazards between the async
3057 // subpasses. Anything older than that should have been checked at the start of each subpass, taking into account all of
3058 // the raster ordering rules.
John Zulauf2f952d22020-02-10 11:34:51 -07003059 if (IsRead(usage)) {
John Zulauf14940722021-04-12 15:19:02 -06003060 if (last_write.any() && (write_tag >= start_tag)) {
John Zulauf59e25072020-07-17 10:55:21 -06003061 hazard.Set(this, usage_index, READ_RACING_WRITE, last_write, write_tag);
John Zulauf2f952d22020-02-10 11:34:51 -07003062 }
3063 } else {
John Zulauf14940722021-04-12 15:19:02 -06003064 if (last_write.any() && (write_tag >= start_tag)) {
John Zulauf59e25072020-07-17 10:55:21 -06003065 hazard.Set(this, usage_index, WRITE_RACING_WRITE, last_write, write_tag);
John Zulaufab7756b2020-12-29 16:10:16 -07003066 } else if (last_reads.size() > 0) {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07003067 // Any reads during the other subpass will conflict with this write, so we need to check them all.
John Zulaufab7756b2020-12-29 16:10:16 -07003068 for (const auto &read_access : last_reads) {
John Zulauf14940722021-04-12 15:19:02 -06003069 if (read_access.tag >= start_tag) {
John Zulaufab7756b2020-12-29 16:10:16 -07003070 hazard.Set(this, usage_index, WRITE_RACING_READ, read_access.access, read_access.tag);
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07003071 break;
3072 }
3073 }
John Zulauf2f952d22020-02-10 11:34:51 -07003074 }
3075 }
3076 return hazard;
3077}
3078
John Zulaufae842002021-04-15 18:20:55 -06003079HazardResult ResourceAccessState::DetectAsyncHazard(const ResourceAccessState &recorded_use, const ResourceUsageRange &tag_range,
3080 ResourceUsageTag start_tag) const {
3081 HazardResult hazard;
John Zulauf4fa68462021-04-26 21:04:22 -06003082 for (const auto &first : recorded_use.first_accesses_) {
John Zulaufae842002021-04-15 18:20:55 -06003083 // Skip and quit logic
3084 if (first.tag < tag_range.begin) continue;
3085 if (first.tag >= tag_range.end) break;
John Zulaufae842002021-04-15 18:20:55 -06003086
3087 hazard = DetectAsyncHazard(first.usage_index, start_tag);
John Zulauf4fa68462021-04-26 21:04:22 -06003088 if (hazard.hazard) {
3089 hazard.AddRecordedAccess(first);
3090 break;
3091 }
John Zulaufae842002021-04-15 18:20:55 -06003092 }
3093 return hazard;
3094}
3095
Jeremy Gebben40a22942020-12-22 14:22:06 -07003096HazardResult ResourceAccessState::DetectBarrierHazard(SyncStageAccessIndex usage_index, VkPipelineStageFlags2KHR src_exec_scope,
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003097 const SyncStageAccessFlags &src_access_scope) const {
John Zulauf0cb5be22020-01-23 12:18:22 -07003098 // Only supporting image layout transitions for now
3099 assert(usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION);
3100 HazardResult hazard;
John Zulauf361fb532020-07-22 10:45:39 -06003101 // only test for WAW if there no intervening read operations.
3102 // See DetectHazard(SyncStagetAccessIndex) above for more details.
John Zulaufab7756b2020-12-29 16:10:16 -07003103 if (last_reads.size()) {
John Zulauf355e49b2020-04-24 15:11:15 -06003104 // Look at the reads if any
John Zulaufab7756b2020-12-29 16:10:16 -07003105 for (const auto &read_access : last_reads) {
John Zulauf4a6105a2020-11-17 15:11:05 -07003106 if (read_access.IsReadBarrierHazard(src_exec_scope)) {
John Zulauf59e25072020-07-17 10:55:21 -06003107 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
John Zulauf0cb5be22020-01-23 12:18:22 -07003108 break;
3109 }
3110 }
John Zulauf4a6105a2020-11-17 15:11:05 -07003111 } else if (last_write.any() && IsWriteBarrierHazard(src_exec_scope, src_access_scope)) {
3112 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
3113 }
3114
3115 return hazard;
3116}
3117
Jeremy Gebben40a22942020-12-22 14:22:06 -07003118HazardResult ResourceAccessState::DetectBarrierHazard(SyncStageAccessIndex usage_index, VkPipelineStageFlags2KHR src_exec_scope,
John Zulauf4a6105a2020-11-17 15:11:05 -07003119 const SyncStageAccessFlags &src_access_scope,
John Zulauf14940722021-04-12 15:19:02 -06003120 const ResourceUsageTag event_tag) const {
John Zulauf4a6105a2020-11-17 15:11:05 -07003121 // Only supporting image layout transitions for now
3122 assert(usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION);
3123 HazardResult hazard;
3124 // only test for WAW if there no intervening read operations.
3125 // See DetectHazard(SyncStagetAccessIndex) above for more details.
3126
John Zulaufab7756b2020-12-29 16:10:16 -07003127 if (last_reads.size()) {
John Zulauf4a6105a2020-11-17 15:11:05 -07003128 // Look at the reads if any... if reads exist, they are either the resaon the access is in the event
3129 // first scope, or they are a hazard.
John Zulaufab7756b2020-12-29 16:10:16 -07003130 for (const auto &read_access : last_reads) {
John Zulauf14940722021-04-12 15:19:02 -06003131 if (read_access.tag < event_tag) {
John Zulauf4a6105a2020-11-17 15:11:05 -07003132 // The read is in the events first synchronization scope, so we use a barrier hazard check
3133 // If the read stage is not in the src sync scope
3134 // *AND* not execution chained with an existing sync barrier (that's the or)
3135 // then the barrier access is unsafe (R/W after R)
3136 if (read_access.IsReadBarrierHazard(src_exec_scope)) {
3137 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
3138 break;
3139 }
3140 } else {
3141 // The read not in the event first sync scope and so is a hazard vs. the layout transition
3142 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
3143 }
3144 }
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003145 } else if (last_write.any()) {
John Zulauf4a6105a2020-11-17 15:11:05 -07003146 // if there are no reads, the write is either the reason the access is in the event scope... they are a hazard
John Zulauf14940722021-04-12 15:19:02 -06003147 if (write_tag < event_tag) {
John Zulauf4a6105a2020-11-17 15:11:05 -07003148 // The write is in the first sync scope of the event (sync their aren't any reads to be the reason)
3149 // So do a normal barrier hazard check
3150 if (IsWriteBarrierHazard(src_exec_scope, src_access_scope)) {
3151 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
3152 }
3153 } else {
3154 // The write isn't in scope, and is thus a hazard to the layout transistion for wait
John Zulauf361fb532020-07-22 10:45:39 -06003155 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
3156 }
John Zulaufd14743a2020-07-03 09:42:39 -06003157 }
John Zulauf361fb532020-07-22 10:45:39 -06003158
John Zulauf0cb5be22020-01-23 12:18:22 -07003159 return hazard;
3160}
3161
John Zulauf5f13a792020-03-10 07:31:21 -06003162// The logic behind resolves is the same as update, we assume that earlier hazards have be reported, and that no
3163// tranistive hazard can exists with a hazard between the earlier operations. Yes, an early hazard can mask that another
3164// exists, but if you fix *that* hazard it either fixes or unmasks the subsequent ones.
3165void ResourceAccessState::Resolve(const ResourceAccessState &other) {
John Zulauf14940722021-04-12 15:19:02 -06003166 if (write_tag < other.write_tag) {
John Zulauf4285ee92020-09-23 10:20:52 -06003167 // If this is a later write, we've reported any exsiting hazard, and we can just overwrite as the more recent
3168 // operation
John Zulauf5f13a792020-03-10 07:31:21 -06003169 *this = other;
John Zulauf14940722021-04-12 15:19:02 -06003170 } else if (other.write_tag == write_tag) {
3171 // In the *equals* case for write operations, we merged the write barriers and the read state (but without the
John Zulauf5f13a792020-03-10 07:31:21 -06003172 // dependency chaining logic or any stage expansion)
3173 write_barriers |= other.write_barriers;
John Zulaufb02c1eb2020-10-06 16:33:36 -06003174 pending_write_barriers |= other.pending_write_barriers;
3175 pending_layout_transition |= other.pending_layout_transition;
3176 pending_write_dep_chain |= other.pending_write_dep_chain;
John Zulauf4fa68462021-04-26 21:04:22 -06003177 pending_layout_ordering_ |= other.pending_layout_ordering_;
John Zulauf5f13a792020-03-10 07:31:21 -06003178
John Zulaufd14743a2020-07-03 09:42:39 -06003179 // Merge the read states
John Zulaufab7756b2020-12-29 16:10:16 -07003180 const auto pre_merge_count = last_reads.size();
John Zulauf4285ee92020-09-23 10:20:52 -06003181 const auto pre_merge_stages = last_read_stages;
John Zulaufab7756b2020-12-29 16:10:16 -07003182 for (uint32_t other_read_index = 0; other_read_index < other.last_reads.size(); other_read_index++) {
John Zulauf5f13a792020-03-10 07:31:21 -06003183 auto &other_read = other.last_reads[other_read_index];
John Zulauf4285ee92020-09-23 10:20:52 -06003184 if (pre_merge_stages & other_read.stage) {
John Zulauf5f13a792020-03-10 07:31:21 -06003185 // Merge in the barriers for read stages that exist in *both* this and other
John Zulauf4285ee92020-09-23 10:20:52 -06003186 // TODO: This is N^2 with stages... perhaps the ReadStates should be sorted by stage index.
3187 // but we should wait on profiling data for that.
3188 for (uint32_t my_read_index = 0; my_read_index < pre_merge_count; my_read_index++) {
John Zulauf5f13a792020-03-10 07:31:21 -06003189 auto &my_read = last_reads[my_read_index];
3190 if (other_read.stage == my_read.stage) {
John Zulauf14940722021-04-12 15:19:02 -06003191 if (my_read.tag < other_read.tag) {
John Zulauf4285ee92020-09-23 10:20:52 -06003192 // Other is more recent, copy in the state
John Zulauf37ceaed2020-07-03 16:18:15 -06003193 my_read.access = other_read.access;
John Zulauf4285ee92020-09-23 10:20:52 -06003194 my_read.tag = other_read.tag;
John Zulaufb02c1eb2020-10-06 16:33:36 -06003195 my_read.pending_dep_chain = other_read.pending_dep_chain;
3196 // TODO: Phase 2 -- review the state merge logic to avoid false positive from overwriting the barriers
3197 // May require tracking more than one access per stage.
3198 my_read.barriers = other_read.barriers;
Jeremy Gebben40a22942020-12-22 14:22:06 -07003199 if (my_read.stage == VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR) {
John Zulauf4285ee92020-09-23 10:20:52 -06003200 // Since I'm overwriting the fragement stage read, also update the input attachment info
3201 // as this is the only stage that affects it.
John Zulauff51fbb62020-10-02 14:43:24 -06003202 input_attachment_read = other.input_attachment_read;
John Zulauf4285ee92020-09-23 10:20:52 -06003203 }
John Zulauf14940722021-04-12 15:19:02 -06003204 } else if (other_read.tag == my_read.tag) {
John Zulaufb02c1eb2020-10-06 16:33:36 -06003205 // The read tags match so merge the barriers
3206 my_read.barriers |= other_read.barriers;
3207 my_read.pending_dep_chain |= other_read.pending_dep_chain;
John Zulauf5f13a792020-03-10 07:31:21 -06003208 }
John Zulaufb02c1eb2020-10-06 16:33:36 -06003209
John Zulauf5f13a792020-03-10 07:31:21 -06003210 break;
3211 }
3212 }
3213 } else {
3214 // The other read stage doesn't exist in this, so add it.
John Zulaufab7756b2020-12-29 16:10:16 -07003215 last_reads.emplace_back(other_read);
John Zulauf5f13a792020-03-10 07:31:21 -06003216 last_read_stages |= other_read.stage;
Jeremy Gebben40a22942020-12-22 14:22:06 -07003217 if (other_read.stage == VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR) {
John Zulauff51fbb62020-10-02 14:43:24 -06003218 input_attachment_read = other.input_attachment_read;
John Zulauf4285ee92020-09-23 10:20:52 -06003219 }
John Zulauf5f13a792020-03-10 07:31:21 -06003220 }
3221 }
John Zulauf361fb532020-07-22 10:45:39 -06003222 read_execution_barriers |= other.read_execution_barriers;
John Zulauf4285ee92020-09-23 10:20:52 -06003223 } // the else clause would be that other write is before this write... in which case we supercede the other state and
3224 // ignore it.
John Zulauffaea0ee2021-01-14 14:01:32 -07003225
3226 // Merge first access information by making a copy of this first_access and reconstructing with a shuffle
3227 // of the copy and other into this using the update first logic.
3228 // NOTE: All sorts of additional cleverness could be put into short circuts. (for example back is write and is before front
3229 // of the other first_accesses... )
3230 if (!(first_accesses_ == other.first_accesses_) && !other.first_accesses_.empty()) {
3231 FirstAccesses firsts(std::move(first_accesses_));
3232 first_accesses_.clear();
3233 first_read_stages_ = 0U;
3234 auto a = firsts.begin();
3235 auto a_end = firsts.end();
3236 for (auto &b : other.first_accesses_) {
John Zulauf14940722021-04-12 15:19:02 -06003237 // TODO: Determine whether some tag offset will be needed for PHASE II
3238 while ((a != a_end) && (a->tag < b.tag)) {
John Zulauffaea0ee2021-01-14 14:01:32 -07003239 UpdateFirst(a->tag, a->usage_index, a->ordering_rule);
3240 ++a;
3241 }
3242 UpdateFirst(b.tag, b.usage_index, b.ordering_rule);
3243 }
3244 for (; a != a_end; ++a) {
3245 UpdateFirst(a->tag, a->usage_index, a->ordering_rule);
3246 }
3247 }
John Zulauf5f13a792020-03-10 07:31:21 -06003248}
3249
John Zulauf14940722021-04-12 15:19:02 -06003250void ResourceAccessState::Update(SyncStageAccessIndex usage_index, SyncOrdering ordering_rule, const ResourceUsageTag tag) {
John Zulauf9cb530d2019-09-30 14:14:10 -06003251 // Move this logic in the ResourceStateTracker as methods, thereof (or we'll repeat it for every flavor of resource...
3252 const auto usage_bit = FlagBit(usage_index);
John Zulauf4285ee92020-09-23 10:20:52 -06003253 if (IsRead(usage_index)) {
John Zulauf9cb530d2019-09-30 14:14:10 -06003254 // Mulitple outstanding reads may be of interest and do dependency chains independently
3255 // However, for purposes of barrier tracking, only one read per pipeline stage matters
3256 const auto usage_stage = PipelineStageBit(usage_index);
3257 if (usage_stage & last_read_stages) {
John Zulaufab7756b2020-12-29 16:10:16 -07003258 for (auto &read_access : last_reads) {
3259 if (read_access.stage == usage_stage) {
3260 read_access.Set(usage_stage, usage_bit, 0, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003261 break;
3262 }
3263 }
3264 } else {
John Zulaufab7756b2020-12-29 16:10:16 -07003265 last_reads.emplace_back(usage_stage, usage_bit, 0, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003266 last_read_stages |= usage_stage;
3267 }
John Zulauf4285ee92020-09-23 10:20:52 -06003268
3269 // Fragment shader reads come in two flavors, and we need to track if the one we're tracking is the special one.
Jeremy Gebben40a22942020-12-22 14:22:06 -07003270 if (usage_stage == VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR) {
John Zulauff51fbb62020-10-02 14:43:24 -06003271 // TODO Revisit re: multiple reads for a given stage
3272 input_attachment_read = (usage_bit == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT);
John Zulauf4285ee92020-09-23 10:20:52 -06003273 }
John Zulauf9cb530d2019-09-30 14:14:10 -06003274 } else {
3275 // Assume write
3276 // TODO determine what to do with READ-WRITE operations if any
John Zulauf89311b42020-09-29 16:28:47 -06003277 SetWrite(usage_bit, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003278 }
John Zulauffaea0ee2021-01-14 14:01:32 -07003279 UpdateFirst(tag, usage_index, ordering_rule);
John Zulauf9cb530d2019-09-30 14:14:10 -06003280}
John Zulauf5f13a792020-03-10 07:31:21 -06003281
John Zulauf89311b42020-09-29 16:28:47 -06003282// Clobber last read and all barriers... because all we have is DANGER, DANGER, WILL ROBINSON!!!
3283// if the last_reads/last_write were unsafe, we've reported them, in either case the prior access is irrelevant.
3284// We can overwrite them as *this* write is now after them.
3285//
3286// Note: intentionally ignore pending barriers and chains (i.e. don't apply or clear them), let ApplyPendingBarriers handle them.
John Zulauf14940722021-04-12 15:19:02 -06003287void ResourceAccessState::SetWrite(const SyncStageAccessFlags &usage_bit, const ResourceUsageTag tag) {
John Zulaufab7756b2020-12-29 16:10:16 -07003288 last_reads.clear();
John Zulauf89311b42020-09-29 16:28:47 -06003289 last_read_stages = 0;
3290 read_execution_barriers = 0;
John Zulauff51fbb62020-10-02 14:43:24 -06003291 input_attachment_read = false; // Denotes no outstanding input attachment read after the last write.
John Zulauf89311b42020-09-29 16:28:47 -06003292
3293 write_barriers = 0;
3294 write_dependency_chain = 0;
3295 write_tag = tag;
3296 last_write = usage_bit;
John Zulauf9cb530d2019-09-30 14:14:10 -06003297}
3298
John Zulauf89311b42020-09-29 16:28:47 -06003299// Apply the memory barrier without updating the existing barriers. The execution barrier
3300// changes the "chaining" state, but to keep barriers independent, we defer this until all barriers
3301// of the batch have been processed. Also, depending on whether layout transition happens, we'll either
3302// replace the current write barriers or add to them, so accumulate to pending as well.
3303void ResourceAccessState::ApplyBarrier(const SyncBarrier &barrier, bool layout_transition) {
3304 // For independent barriers we need to track what the new barriers and dependency chain *will* be when we're done
3305 // applying the memory barriers
John Zulauf86356ca2020-10-19 11:46:41 -06003306 // NOTE: We update the write barrier if the write is in the first access scope or if there is a layout
3307 // transistion, under the theory of "most recent access". If the read/write *isn't* safe
3308 // vs. this layout transition DetectBarrierHazard should report it. We treat the layout
3309 // transistion *as* a write and in scope with the barrier (it's before visibility).
John Zulaufc523bf62021-02-16 08:20:34 -07003310 if (layout_transition || WriteInSourceScopeOrChain(barrier.src_exec_scope.exec_scope, barrier.src_access_scope)) {
John Zulauf89311b42020-09-29 16:28:47 -06003311 pending_write_barriers |= barrier.dst_access_scope;
John Zulaufc523bf62021-02-16 08:20:34 -07003312 pending_write_dep_chain |= barrier.dst_exec_scope.exec_scope;
John Zulauf4fa68462021-04-26 21:04:22 -06003313 if (layout_transition) {
3314 pending_layout_ordering_ |= OrderingBarrier(barrier.src_exec_scope.exec_scope, barrier.src_access_scope);
3315 }
John Zulaufa0a98292020-09-18 09:30:10 -06003316 }
John Zulauf89311b42020-09-29 16:28:47 -06003317 // Track layout transistion as pending as we can't modify last_write until all barriers processed
3318 pending_layout_transition |= layout_transition;
John Zulaufa0a98292020-09-18 09:30:10 -06003319
John Zulauf89311b42020-09-29 16:28:47 -06003320 if (!pending_layout_transition) {
3321 // Once we're dealing with a layout transition (which is modelled as a *write*) then the last reads/writes/chains
3322 // don't need to be tracked as we're just going to zero them.
John Zulaufab7756b2020-12-29 16:10:16 -07003323 for (auto &read_access : last_reads) {
John Zulauf89311b42020-09-29 16:28:47 -06003324 // The | implements the "dependency chain" logic for this access, as the barriers field stores the second sync scope
John Zulaufc523bf62021-02-16 08:20:34 -07003325 if (barrier.src_exec_scope.exec_scope & (read_access.stage | read_access.barriers)) {
3326 read_access.pending_dep_chain |= barrier.dst_exec_scope.exec_scope;
John Zulaufa0a98292020-09-18 09:30:10 -06003327 }
3328 }
John Zulaufa0a98292020-09-18 09:30:10 -06003329 }
John Zulaufa0a98292020-09-18 09:30:10 -06003330}
3331
John Zulauf4a6105a2020-11-17 15:11:05 -07003332// Apply the tag scoped memory barrier without updating the existing barriers. The execution barrier
3333// changes the "chaining" state, but to keep barriers independent. See discussion above.
John Zulauf14940722021-04-12 15:19:02 -06003334void ResourceAccessState::ApplyBarrier(const ResourceUsageTag scope_tag, const SyncBarrier &barrier, bool layout_transition) {
John Zulauf4a6105a2020-11-17 15:11:05 -07003335 // The scope logic for events is, if we're here, the resource usage was flagged as "in the first execution scope" at
3336 // the time of the SetEvent, thus all we need check is whether the access is the same one (i.e. before the scope tag
3337 // in order to know if it's in the excecution scope
3338 // Notice that the layout transition sets the pending barriers *regardless*, as any lack of src_access_scope to
3339 // guard against the layout transition should be reported in the detect barrier hazard phase, and we only report
3340 // errors w.r.t. "most recent" accesses.
John Zulauf14940722021-04-12 15:19:02 -06003341 if (layout_transition || ((write_tag < scope_tag) && (barrier.src_access_scope & last_write).any())) {
John Zulauf4a6105a2020-11-17 15:11:05 -07003342 pending_write_barriers |= barrier.dst_access_scope;
John Zulaufc523bf62021-02-16 08:20:34 -07003343 pending_write_dep_chain |= barrier.dst_exec_scope.exec_scope;
John Zulauf4fa68462021-04-26 21:04:22 -06003344 if (layout_transition) {
3345 pending_layout_ordering_ |= OrderingBarrier(barrier.src_exec_scope.exec_scope, barrier.src_access_scope);
3346 }
John Zulauf4a6105a2020-11-17 15:11:05 -07003347 }
3348 // Track layout transistion as pending as we can't modify last_write until all barriers processed
3349 pending_layout_transition |= layout_transition;
3350
3351 if (!pending_layout_transition) {
3352 // Once we're dealing with a layout transition (which is modelled as a *write*) then the last reads/writes/chains
3353 // don't need to be tracked as we're just going to zero them.
John Zulaufab7756b2020-12-29 16:10:16 -07003354 for (auto &read_access : last_reads) {
John Zulauf4a6105a2020-11-17 15:11:05 -07003355 // If this read is the same one we included in the set event and in scope, then apply the execution barrier...
3356 // NOTE: That's not really correct... this read stage might *not* have been included in the setevent, and the barriers
3357 // representing the chain might have changed since then (that would be an odd usage), so as a first approximation
3358 // we'll assume the barriers *haven't* been changed since (if the tag hasn't), and while this could be a false
3359 // positive in the case of Set; SomeBarrier; Wait; we'll live with it until we can add more state to the first scope
3360 // capture (the specific write and read stages that *were* in scope at the moment of SetEvents.
3361 // TODO: eliminate the false positive by including write/read-stages "in scope" information in SetEvents first_scope
John Zulauf14940722021-04-12 15:19:02 -06003362 if ((read_access.tag < scope_tag) && (barrier.src_exec_scope.exec_scope & (read_access.stage | read_access.barriers))) {
John Zulaufc523bf62021-02-16 08:20:34 -07003363 read_access.pending_dep_chain |= barrier.dst_exec_scope.exec_scope;
John Zulauf4a6105a2020-11-17 15:11:05 -07003364 }
3365 }
3366 }
3367}
John Zulauf14940722021-04-12 15:19:02 -06003368void ResourceAccessState::ApplyPendingBarriers(const ResourceUsageTag tag) {
John Zulauf89311b42020-09-29 16:28:47 -06003369 if (pending_layout_transition) {
John Zulauf4fa68462021-04-26 21:04:22 -06003370 // SetWrite clobbers the last_reads array, and thus we don't have to clear the read_state out.
John Zulauf89311b42020-09-29 16:28:47 -06003371 SetWrite(SYNC_IMAGE_LAYOUT_TRANSITION_BIT, tag); // Side effect notes below
John Zulauffaea0ee2021-01-14 14:01:32 -07003372 UpdateFirst(tag, SYNC_IMAGE_LAYOUT_TRANSITION, SyncOrdering::kNonAttachment);
John Zulauf4fa68462021-04-26 21:04:22 -06003373 TouchupFirstForLayoutTransition(tag, pending_layout_ordering_);
3374 pending_layout_ordering_ = OrderingBarrier();
John Zulauf89311b42020-09-29 16:28:47 -06003375 pending_layout_transition = false;
John Zulauf9cb530d2019-09-30 14:14:10 -06003376 }
John Zulauf89311b42020-09-29 16:28:47 -06003377
3378 // Apply the accumulate execution barriers (and thus update chaining information)
John Zulauf4fa68462021-04-26 21:04:22 -06003379 // for layout transition, last_reads is reset by SetWrite, so this will be skipped.
John Zulaufab7756b2020-12-29 16:10:16 -07003380 for (auto &read_access : last_reads) {
3381 read_access.barriers |= read_access.pending_dep_chain;
3382 read_execution_barriers |= read_access.barriers;
3383 read_access.pending_dep_chain = 0;
John Zulauf89311b42020-09-29 16:28:47 -06003384 }
3385
3386 // We OR in the accumulated write chain and barriers even in the case of a layout transition as SetWrite zeros them.
3387 write_dependency_chain |= pending_write_dep_chain;
3388 write_barriers |= pending_write_barriers;
3389 pending_write_dep_chain = 0;
3390 pending_write_barriers = 0;
John Zulauf9cb530d2019-09-30 14:14:10 -06003391}
3392
John Zulaufae842002021-04-15 18:20:55 -06003393bool ResourceAccessState::FirstAccessInTagRange(const ResourceUsageRange &tag_range) const {
3394 if (!first_accesses_.size()) return false;
3395 const ResourceUsageRange first_access_range = {first_accesses_.front().tag, first_accesses_.back().tag + 1};
3396 return tag_range.intersects(first_access_range);
3397}
3398
John Zulauf59e25072020-07-17 10:55:21 -06003399// This should be just Bits or Index, but we don't have an invalid state for Index
Jeremy Gebben40a22942020-12-22 14:22:06 -07003400VkPipelineStageFlags2KHR ResourceAccessState::GetReadBarriers(const SyncStageAccessFlags &usage_bit) const {
3401 VkPipelineStageFlags2KHR barriers = 0U;
John Zulauf4285ee92020-09-23 10:20:52 -06003402
John Zulaufab7756b2020-12-29 16:10:16 -07003403 for (const auto &read_access : last_reads) {
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003404 if ((read_access.access & usage_bit).any()) {
John Zulauf4285ee92020-09-23 10:20:52 -06003405 barriers = read_access.barriers;
3406 break;
John Zulauf59e25072020-07-17 10:55:21 -06003407 }
3408 }
John Zulauf4285ee92020-09-23 10:20:52 -06003409
John Zulauf59e25072020-07-17 10:55:21 -06003410 return barriers;
3411}
3412
Jeremy Gebben40a22942020-12-22 14:22:06 -07003413inline bool ResourceAccessState::IsRAWHazard(VkPipelineStageFlags2KHR usage_stage, const SyncStageAccessFlags &usage) const {
John Zulauf4285ee92020-09-23 10:20:52 -06003414 assert(IsRead(usage));
3415 // Only RAW vs. last_write if it doesn't happen-after any other read because either:
3416 // * the previous reads are not hazards, and thus last_write must be visible and available to
3417 // any reads that happen after.
3418 // * the previous reads *are* hazards to last_write, have been reported, and if that hazard is fixed
3419 // the current read will be also not be a hazard, thus reporting a hazard here adds no needed information.
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003420 return last_write.any() && (0 == (read_execution_barriers & usage_stage)) && IsWriteHazard(usage);
John Zulauf4285ee92020-09-23 10:20:52 -06003421}
3422
Jeremy Gebben40a22942020-12-22 14:22:06 -07003423VkPipelineStageFlags2KHR ResourceAccessState::GetOrderedStages(const OrderingBarrier &ordering) const {
John Zulauf4285ee92020-09-23 10:20:52 -06003424 // Whether the stage are in the ordering scope only matters if the current write is ordered
Jeremy Gebben40a22942020-12-22 14:22:06 -07003425 VkPipelineStageFlags2KHR ordered_stages = last_read_stages & ordering.exec_scope;
John Zulauf4285ee92020-09-23 10:20:52 -06003426 // Special input attachment handling as always (not encoded in exec_scop)
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003427 const bool input_attachment_ordering = (ordering.access_scope & SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT).any();
John Zulauff51fbb62020-10-02 14:43:24 -06003428 if (input_attachment_ordering && input_attachment_read) {
John Zulauf4285ee92020-09-23 10:20:52 -06003429 // If we have an input attachment in last_reads and input attachments are ordered we all that stage
Jeremy Gebben40a22942020-12-22 14:22:06 -07003430 ordered_stages |= VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR;
John Zulauf4285ee92020-09-23 10:20:52 -06003431 }
3432
3433 return ordered_stages;
3434}
3435
John Zulauf14940722021-04-12 15:19:02 -06003436void ResourceAccessState::UpdateFirst(const ResourceUsageTag tag, SyncStageAccessIndex usage_index, SyncOrdering ordering_rule) {
John Zulauffaea0ee2021-01-14 14:01:32 -07003437 // Only record until we record a write.
3438 if (first_accesses_.empty() || IsRead(first_accesses_.back().usage_index)) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003439 const VkPipelineStageFlags2KHR usage_stage = IsRead(usage_index) ? PipelineStageBit(usage_index) : 0U;
John Zulauffaea0ee2021-01-14 14:01:32 -07003440 if (0 == (usage_stage & first_read_stages_)) {
3441 // If this is a read we haven't seen or a write, record.
John Zulauf4fa68462021-04-26 21:04:22 -06003442 // We always need to know what stages were found prior to write
John Zulauffaea0ee2021-01-14 14:01:32 -07003443 first_read_stages_ |= usage_stage;
John Zulauf4fa68462021-04-26 21:04:22 -06003444 if (0 == (read_execution_barriers & usage_stage)) {
3445 // If this stage isn't masked then we add it (since writes map to usage_stage 0, this also records writes)
3446 first_accesses_.emplace_back(tag, usage_index, ordering_rule);
3447 }
John Zulauffaea0ee2021-01-14 14:01:32 -07003448 }
3449 }
3450}
3451
John Zulauf4fa68462021-04-26 21:04:22 -06003452void ResourceAccessState::TouchupFirstForLayoutTransition(ResourceUsageTag tag, const OrderingBarrier &layout_ordering) {
3453 // Only call this after recording an image layout transition
3454 assert(first_accesses_.size());
3455 if (first_accesses_.back().tag == tag) {
3456 // If this layout transition is the the first write, add the additional ordering rules that guard the ILT
Samuel Iglesias Gonsálvez9b4660b2021-10-21 08:50:39 +02003457 assert(first_accesses_.back().usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION);
John Zulauf4fa68462021-04-26 21:04:22 -06003458 first_write_layout_ordering_ = layout_ordering;
3459 }
3460}
3461
John Zulaufee984022022-04-13 16:39:50 -06003462void ResourceAccessState::ReadState::Set(VkPipelineStageFlags2KHR stage_, const SyncStageAccessFlags &access_,
3463 VkPipelineStageFlags2KHR barriers_, ResourceUsageTag tag_) {
3464 stage = stage_;
3465 access = access_;
3466 barriers = barriers_;
3467 tag = tag_;
3468 pending_dep_chain = 0; // If this is a new read, we aren't applying a barrier set.
3469}
3470
John Zulaufea943c52022-02-22 11:05:17 -07003471std::shared_ptr<CommandBufferAccessContext> SyncValidator::AccessContextFactory(VkCommandBuffer command_buffer) {
3472 // If we don't have one, make it.
3473 auto cb_state = Get<CMD_BUFFER_STATE>(command_buffer);
3474 assert(cb_state.get());
3475 auto queue_flags = cb_state->GetQueueFlags();
3476 return std::make_shared<CommandBufferAccessContext>(*this, cb_state, queue_flags);
3477}
3478
3479inline std::shared_ptr<CommandBufferAccessContext> SyncValidator::GetAccessContextShared(VkCommandBuffer command_buffer) {
3480 return GetMappedInsert(cb_access_state, command_buffer,
3481 [this, command_buffer]() { return AccessContextFactory(command_buffer); });
3482}
3483
3484std::shared_ptr<const CommandBufferAccessContext> SyncValidator::GetAccessContextShared(VkCommandBuffer command_buffer) const {
3485 return GetMapped(cb_access_state, command_buffer, []() { return std::shared_ptr<CommandBufferAccessContext>(); });
3486}
3487
3488const CommandBufferAccessContext *SyncValidator::GetAccessContext(VkCommandBuffer command_buffer) const {
3489 return GetMappedPlainFromShared(cb_access_state, command_buffer);
3490}
3491
3492CommandBufferAccessContext *SyncValidator::GetAccessContext(VkCommandBuffer command_buffer) {
3493 return GetAccessContextShared(command_buffer).get();
3494}
3495
3496CommandBufferAccessContext *SyncValidator::GetAccessContextNoInsert(VkCommandBuffer command_buffer) {
3497 return GetMappedPlainFromShared(cb_access_state, command_buffer);
3498}
3499
John Zulaufd1f85d42020-04-15 12:23:15 -06003500void SyncValidator::ResetCommandBufferCallback(VkCommandBuffer command_buffer) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003501 auto *access_context = GetAccessContextNoInsert(command_buffer);
3502 if (access_context) {
3503 access_context->Reset();
John Zulauf9cb530d2019-09-30 14:14:10 -06003504 }
3505}
3506
John Zulaufd1f85d42020-04-15 12:23:15 -06003507void SyncValidator::FreeCommandBufferCallback(VkCommandBuffer command_buffer) {
3508 auto access_found = cb_access_state.find(command_buffer);
3509 if (access_found != cb_access_state.end()) {
3510 access_found->second->Reset();
John Zulauf4fa68462021-04-26 21:04:22 -06003511 access_found->second->MarkDestroyed();
John Zulaufd1f85d42020-04-15 12:23:15 -06003512 cb_access_state.erase(access_found);
3513 }
3514}
3515
John Zulauf9cb530d2019-09-30 14:14:10 -06003516bool SyncValidator::PreCallValidateCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
3517 uint32_t regionCount, const VkBufferCopy *pRegions) const {
3518 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -06003519 const auto *cb_context = GetAccessContext(commandBuffer);
3520 assert(cb_context);
3521 if (!cb_context) return skip;
3522 const auto *context = cb_context->GetCurrentAccessContext();
John Zulauf9cb530d2019-09-30 14:14:10 -06003523
John Zulauf3d84f1b2020-03-09 13:33:25 -06003524 // If we have no previous accesses, we have no hazards
Jeremy Gebbenf4449392022-01-28 10:09:10 -07003525 auto src_buffer = Get<BUFFER_STATE>(srcBuffer);
3526 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003527
3528 for (uint32_t region = 0; region < regionCount; region++) {
3529 const auto &copy_region = pRegions[region];
John Zulauf16adfc92020-04-08 10:28:33 -06003530 if (src_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06003531 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003532 auto hazard = context->DetectHazard(*src_buffer, SYNC_COPY_TRANSFER_READ, src_range);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003533 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003534 skip |= LogError(srcBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003535 "vkCmdCopyBuffer: Hazard %s for srcBuffer %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06003536 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcBuffer).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003537 cb_context->FormatUsage(hazard).c_str());
John Zulauf9cb530d2019-09-30 14:14:10 -06003538 }
John Zulauf9cb530d2019-09-30 14:14:10 -06003539 }
John Zulauf16adfc92020-04-08 10:28:33 -06003540 if (dst_buffer && !skip) {
John Zulauf3e86bf02020-09-12 10:47:57 -06003541 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003542 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, dst_range);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003543 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003544 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003545 "vkCmdCopyBuffer: Hazard %s for dstBuffer %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06003546 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstBuffer).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003547 cb_context->FormatUsage(hazard).c_str());
John Zulauf3d84f1b2020-03-09 13:33:25 -06003548 }
3549 }
3550 if (skip) break;
John Zulauf9cb530d2019-09-30 14:14:10 -06003551 }
3552 return skip;
3553}
3554
3555void SyncValidator::PreCallRecordCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
3556 uint32_t regionCount, const VkBufferCopy *pRegions) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003557 auto *cb_context = GetAccessContext(commandBuffer);
3558 assert(cb_context);
John Zulauf2b151bf2020-04-24 15:37:44 -06003559 const auto tag = cb_context->NextCommandTag(CMD_COPYBUFFER);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003560 auto *context = cb_context->GetCurrentAccessContext();
3561
Jeremy Gebbenf4449392022-01-28 10:09:10 -07003562 auto src_buffer = Get<BUFFER_STATE>(srcBuffer);
3563 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
John Zulauf9cb530d2019-09-30 14:14:10 -06003564
3565 for (uint32_t region = 0; region < regionCount; region++) {
3566 const auto &copy_region = pRegions[region];
John Zulauf16adfc92020-04-08 10:28:33 -06003567 if (src_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06003568 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003569 context->UpdateAccessState(*src_buffer, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment, src_range, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003570 }
John Zulauf16adfc92020-04-08 10:28:33 -06003571 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06003572 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003573 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, dst_range, tag);
John Zulauf5c5e88d2019-12-26 11:22:02 -07003574 }
3575 }
3576}
3577
John Zulauf4a6105a2020-11-17 15:11:05 -07003578void SyncValidator::PreCallRecordDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
3579 // Clear out events from the command buffer contexts
3580 for (auto &cb_context : cb_access_state) {
3581 cb_context.second->RecordDestroyEvent(event);
3582 }
3583}
3584
Tony-LunarGef035472021-11-02 10:23:33 -06003585bool SyncValidator::ValidateCmdCopyBuffer2(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2 *pCopyBufferInfos,
3586 CMD_TYPE cmd_type) const {
Jeff Leger178b1e52020-10-05 12:22:23 -04003587 bool skip = false;
3588 const auto *cb_context = GetAccessContext(commandBuffer);
3589 assert(cb_context);
3590 if (!cb_context) return skip;
3591 const auto *context = cb_context->GetCurrentAccessContext();
Tony-LunarGef035472021-11-02 10:23:33 -06003592 const char *func_name = CommandTypeString(cmd_type);
Jeff Leger178b1e52020-10-05 12:22:23 -04003593
3594 // If we have no previous accesses, we have no hazards
Jeremy Gebbenf4449392022-01-28 10:09:10 -07003595 auto src_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->srcBuffer);
3596 auto dst_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->dstBuffer);
Jeff Leger178b1e52020-10-05 12:22:23 -04003597
3598 for (uint32_t region = 0; region < pCopyBufferInfos->regionCount; region++) {
3599 const auto &copy_region = pCopyBufferInfos->pRegions[region];
3600 if (src_buffer) {
3601 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003602 auto hazard = context->DetectHazard(*src_buffer, SYNC_COPY_TRANSFER_READ, src_range);
Jeff Leger178b1e52020-10-05 12:22:23 -04003603 if (hazard.hazard) {
3604 // TODO -- add tag information to log msg when useful.
3605 skip |= LogError(pCopyBufferInfos->srcBuffer, string_SyncHazardVUID(hazard.hazard),
Tony-LunarGef035472021-11-02 10:23:33 -06003606 "%s(): Hazard %s for srcBuffer %s, region %" PRIu32 ". Access info %s.", func_name,
Jeff Leger178b1e52020-10-05 12:22:23 -04003607 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyBufferInfos->srcBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07003608 region, cb_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04003609 }
3610 }
3611 if (dst_buffer && !skip) {
3612 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003613 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, dst_range);
Jeff Leger178b1e52020-10-05 12:22:23 -04003614 if (hazard.hazard) {
3615 skip |= LogError(pCopyBufferInfos->dstBuffer, string_SyncHazardVUID(hazard.hazard),
Tony-LunarGef035472021-11-02 10:23:33 -06003616 "%s(): Hazard %s for dstBuffer %s, region %" PRIu32 ". Access info %s.", func_name,
Jeff Leger178b1e52020-10-05 12:22:23 -04003617 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyBufferInfos->dstBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07003618 region, cb_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04003619 }
3620 }
3621 if (skip) break;
3622 }
3623 return skip;
3624}
3625
Tony-LunarGef035472021-11-02 10:23:33 -06003626bool SyncValidator::PreCallValidateCmdCopyBuffer2KHR(VkCommandBuffer commandBuffer,
3627 const VkCopyBufferInfo2KHR *pCopyBufferInfos) const {
3628 return ValidateCmdCopyBuffer2(commandBuffer, pCopyBufferInfos, CMD_COPYBUFFER2KHR);
3629}
3630
3631bool SyncValidator::PreCallValidateCmdCopyBuffer2(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2 *pCopyBufferInfos) const {
3632 return ValidateCmdCopyBuffer2(commandBuffer, pCopyBufferInfos, CMD_COPYBUFFER2);
3633}
3634
3635void SyncValidator::RecordCmdCopyBuffer2(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2KHR *pCopyBufferInfos, CMD_TYPE cmd_type) {
Jeff Leger178b1e52020-10-05 12:22:23 -04003636 auto *cb_context = GetAccessContext(commandBuffer);
3637 assert(cb_context);
Tony-LunarGef035472021-11-02 10:23:33 -06003638 const auto tag = cb_context->NextCommandTag(cmd_type);
Jeff Leger178b1e52020-10-05 12:22:23 -04003639 auto *context = cb_context->GetCurrentAccessContext();
3640
Jeremy Gebbenf4449392022-01-28 10:09:10 -07003641 auto src_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->srcBuffer);
3642 auto dst_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->dstBuffer);
Jeff Leger178b1e52020-10-05 12:22:23 -04003643
3644 for (uint32_t region = 0; region < pCopyBufferInfos->regionCount; region++) {
3645 const auto &copy_region = pCopyBufferInfos->pRegions[region];
3646 if (src_buffer) {
3647 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003648 context->UpdateAccessState(*src_buffer, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment, src_range, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04003649 }
3650 if (dst_buffer) {
3651 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003652 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, dst_range, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04003653 }
3654 }
3655}
3656
Tony-LunarGef035472021-11-02 10:23:33 -06003657void SyncValidator::PreCallRecordCmdCopyBuffer2KHR(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2KHR *pCopyBufferInfos) {
3658 RecordCmdCopyBuffer2(commandBuffer, pCopyBufferInfos, CMD_COPYBUFFER2KHR);
3659}
3660
3661void SyncValidator::PreCallRecordCmdCopyBuffer2(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2 *pCopyBufferInfos) {
3662 RecordCmdCopyBuffer2(commandBuffer, pCopyBufferInfos, CMD_COPYBUFFER2);
3663}
3664
John Zulauf5c5e88d2019-12-26 11:22:02 -07003665bool SyncValidator::PreCallValidateCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
3666 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
3667 const VkImageCopy *pRegions) const {
3668 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -06003669 const auto *cb_access_context = GetAccessContext(commandBuffer);
3670 assert(cb_access_context);
3671 if (!cb_access_context) return skip;
John Zulauf5c5e88d2019-12-26 11:22:02 -07003672
John Zulauf3d84f1b2020-03-09 13:33:25 -06003673 const auto *context = cb_access_context->GetCurrentAccessContext();
3674 assert(context);
3675 if (!context) return skip;
3676
Jeremy Gebbenf4449392022-01-28 10:09:10 -07003677 auto src_image = Get<IMAGE_STATE>(srcImage);
3678 auto dst_image = Get<IMAGE_STATE>(dstImage);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003679 for (uint32_t region = 0; region < regionCount; region++) {
3680 const auto &copy_region = pRegions[region];
3681 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003682 auto hazard = context->DetectHazard(*src_image, SYNC_COPY_TRANSFER_READ, copy_region.srcSubresource,
John Zulauf3d84f1b2020-03-09 13:33:25 -06003683 copy_region.srcOffset, copy_region.extent);
3684 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003685 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003686 "vkCmdCopyImage: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06003687 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003688 cb_access_context->FormatUsage(hazard).c_str());
John Zulauf5c5e88d2019-12-26 11:22:02 -07003689 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06003690 }
3691
3692 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003693 auto hazard = context->DetectHazard(*dst_image, SYNC_COPY_TRANSFER_WRITE, copy_region.dstSubresource,
ziga-lunarg73746512022-03-23 23:08:17 +01003694 copy_region.dstOffset, copy_region.extent);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003695 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003696 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003697 "vkCmdCopyImage: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06003698 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003699 cb_access_context->FormatUsage(hazard).c_str());
John Zulauf5c5e88d2019-12-26 11:22:02 -07003700 }
locke-lunarg1dbbb9e2020-02-28 22:43:53 -07003701 if (skip) break;
John Zulauf5c5e88d2019-12-26 11:22:02 -07003702 }
3703 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06003704
John Zulauf5c5e88d2019-12-26 11:22:02 -07003705 return skip;
3706}
3707
3708void SyncValidator::PreCallRecordCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
3709 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
3710 const VkImageCopy *pRegions) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003711 auto *cb_access_context = GetAccessContext(commandBuffer);
3712 assert(cb_access_context);
John Zulauf2b151bf2020-04-24 15:37:44 -06003713 const auto tag = cb_access_context->NextCommandTag(CMD_COPYIMAGE);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003714 auto *context = cb_access_context->GetCurrentAccessContext();
3715 assert(context);
3716
Jeremy Gebben9f537102021-10-05 16:37:12 -06003717 auto src_image = Get<IMAGE_STATE>(srcImage);
3718 auto dst_image = Get<IMAGE_STATE>(dstImage);
John Zulauf5c5e88d2019-12-26 11:22:02 -07003719
3720 for (uint32_t region = 0; region < regionCount; region++) {
3721 const auto &copy_region = pRegions[region];
John Zulauf3d84f1b2020-03-09 13:33:25 -06003722 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003723 context->UpdateAccessState(*src_image, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07003724 copy_region.srcSubresource, copy_region.srcOffset, copy_region.extent, tag);
John Zulauf5c5e88d2019-12-26 11:22:02 -07003725 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06003726 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003727 context->UpdateAccessState(*dst_image, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
ziga-lunarg73746512022-03-23 23:08:17 +01003728 copy_region.dstSubresource, copy_region.dstOffset, copy_region.extent, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003729 }
3730 }
3731}
3732
Tony-LunarGb61514a2021-11-02 12:36:51 -06003733bool SyncValidator::ValidateCmdCopyImage2(VkCommandBuffer commandBuffer, const VkCopyImageInfo2 *pCopyImageInfo,
3734 CMD_TYPE cmd_type) const {
Jeff Leger178b1e52020-10-05 12:22:23 -04003735 bool skip = false;
3736 const auto *cb_access_context = GetAccessContext(commandBuffer);
3737 assert(cb_access_context);
3738 if (!cb_access_context) return skip;
3739
3740 const auto *context = cb_access_context->GetCurrentAccessContext();
3741 assert(context);
3742 if (!context) return skip;
3743
Tony-LunarGb61514a2021-11-02 12:36:51 -06003744 const char *func_name = CommandTypeString(cmd_type);
Jeremy Gebbenf4449392022-01-28 10:09:10 -07003745 auto src_image = Get<IMAGE_STATE>(pCopyImageInfo->srcImage);
3746 auto dst_image = Get<IMAGE_STATE>(pCopyImageInfo->dstImage);
Tony-LunarGb61514a2021-11-02 12:36:51 -06003747
Jeff Leger178b1e52020-10-05 12:22:23 -04003748 for (uint32_t region = 0; region < pCopyImageInfo->regionCount; region++) {
3749 const auto &copy_region = pCopyImageInfo->pRegions[region];
3750 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003751 auto hazard = context->DetectHazard(*src_image, SYNC_COPY_TRANSFER_READ, copy_region.srcSubresource,
Jeff Leger178b1e52020-10-05 12:22:23 -04003752 copy_region.srcOffset, copy_region.extent);
3753 if (hazard.hazard) {
3754 skip |= LogError(pCopyImageInfo->srcImage, string_SyncHazardVUID(hazard.hazard),
sfricke-samsung71f04e32022-03-16 01:21:21 -05003755 "%s: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.", func_name,
Jeff Leger178b1e52020-10-05 12:22:23 -04003756 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyImageInfo->srcImage).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07003757 region, cb_access_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04003758 }
3759 }
3760
3761 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003762 auto hazard = context->DetectHazard(*dst_image, SYNC_COPY_TRANSFER_WRITE, copy_region.dstSubresource,
ziga-lunarg73746512022-03-23 23:08:17 +01003763 copy_region.dstOffset, copy_region.extent);
Jeff Leger178b1e52020-10-05 12:22:23 -04003764 if (hazard.hazard) {
3765 skip |= LogError(pCopyImageInfo->dstImage, string_SyncHazardVUID(hazard.hazard),
sfricke-samsung71f04e32022-03-16 01:21:21 -05003766 "%s: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.", func_name,
Jeff Leger178b1e52020-10-05 12:22:23 -04003767 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyImageInfo->dstImage).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07003768 region, cb_access_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04003769 }
3770 if (skip) break;
3771 }
3772 }
3773
3774 return skip;
3775}
3776
Tony-LunarGb61514a2021-11-02 12:36:51 -06003777bool SyncValidator::PreCallValidateCmdCopyImage2KHR(VkCommandBuffer commandBuffer,
3778 const VkCopyImageInfo2KHR *pCopyImageInfo) const {
3779 return ValidateCmdCopyImage2(commandBuffer, pCopyImageInfo, CMD_COPYIMAGE2KHR);
3780}
3781
3782bool SyncValidator::PreCallValidateCmdCopyImage2(VkCommandBuffer commandBuffer, const VkCopyImageInfo2 *pCopyImageInfo) const {
3783 return ValidateCmdCopyImage2(commandBuffer, pCopyImageInfo, CMD_COPYIMAGE2);
3784}
3785
3786void SyncValidator::RecordCmdCopyImage2(VkCommandBuffer commandBuffer, const VkCopyImageInfo2KHR *pCopyImageInfo, CMD_TYPE cmd_type) {
Jeff Leger178b1e52020-10-05 12:22:23 -04003787 auto *cb_access_context = GetAccessContext(commandBuffer);
3788 assert(cb_access_context);
Tony-LunarGb61514a2021-11-02 12:36:51 -06003789 const auto tag = cb_access_context->NextCommandTag(cmd_type);
Jeff Leger178b1e52020-10-05 12:22:23 -04003790 auto *context = cb_access_context->GetCurrentAccessContext();
3791 assert(context);
3792
Jeremy Gebben9f537102021-10-05 16:37:12 -06003793 auto src_image = Get<IMAGE_STATE>(pCopyImageInfo->srcImage);
3794 auto dst_image = Get<IMAGE_STATE>(pCopyImageInfo->dstImage);
Jeff Leger178b1e52020-10-05 12:22:23 -04003795
3796 for (uint32_t region = 0; region < pCopyImageInfo->regionCount; region++) {
3797 const auto &copy_region = pCopyImageInfo->pRegions[region];
3798 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003799 context->UpdateAccessState(*src_image, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07003800 copy_region.srcSubresource, copy_region.srcOffset, copy_region.extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04003801 }
3802 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003803 context->UpdateAccessState(*dst_image, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
ziga-lunarg73746512022-03-23 23:08:17 +01003804 copy_region.dstSubresource, copy_region.dstOffset, copy_region.extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04003805 }
3806 }
3807}
3808
Tony-LunarGb61514a2021-11-02 12:36:51 -06003809void SyncValidator::PreCallRecordCmdCopyImage2KHR(VkCommandBuffer commandBuffer, const VkCopyImageInfo2KHR *pCopyImageInfo) {
3810 RecordCmdCopyImage2(commandBuffer, pCopyImageInfo, CMD_COPYIMAGE2KHR);
3811}
3812
3813void SyncValidator::PreCallRecordCmdCopyImage2(VkCommandBuffer commandBuffer, const VkCopyImageInfo2 *pCopyImageInfo) {
3814 RecordCmdCopyImage2(commandBuffer, pCopyImageInfo, CMD_COPYIMAGE2);
3815}
3816
John Zulauf9cb530d2019-09-30 14:14:10 -06003817bool SyncValidator::PreCallValidateCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
3818 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
3819 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
3820 uint32_t bufferMemoryBarrierCount,
3821 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
3822 uint32_t imageMemoryBarrierCount,
3823 const VkImageMemoryBarrier *pImageMemoryBarriers) const {
3824 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -06003825 const auto *cb_access_context = GetAccessContext(commandBuffer);
3826 assert(cb_access_context);
3827 if (!cb_access_context) return skip;
John Zulauf0cb5be22020-01-23 12:18:22 -07003828
John Zulauf36ef9282021-02-02 11:47:24 -07003829 SyncOpPipelineBarrier pipeline_barrier(CMD_PIPELINEBARRIER, *this, cb_access_context->GetQueueFlags(), srcStageMask,
3830 dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers,
3831 bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
3832 pImageMemoryBarriers);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07003833 skip = pipeline_barrier.Validate(*cb_access_context);
John Zulauf9cb530d2019-09-30 14:14:10 -06003834 return skip;
3835}
3836
3837void SyncValidator::PreCallRecordCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
3838 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
3839 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
3840 uint32_t bufferMemoryBarrierCount,
3841 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
3842 uint32_t imageMemoryBarrierCount,
3843 const VkImageMemoryBarrier *pImageMemoryBarriers) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003844 auto *cb_access_context = GetAccessContext(commandBuffer);
3845 assert(cb_access_context);
3846 if (!cb_access_context) return;
John Zulauf9cb530d2019-09-30 14:14:10 -06003847
John Zulauf1bf30522021-09-03 15:39:06 -06003848 cb_access_context->RecordSyncOp<SyncOpPipelineBarrier>(CMD_PIPELINEBARRIER, *this, cb_access_context->GetQueueFlags(),
3849 srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount,
3850 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
3851 imageMemoryBarrierCount, pImageMemoryBarriers);
John Zulauf9cb530d2019-09-30 14:14:10 -06003852}
3853
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07003854bool SyncValidator::PreCallValidateCmdPipelineBarrier2KHR(VkCommandBuffer commandBuffer,
3855 const VkDependencyInfoKHR *pDependencyInfo) const {
3856 bool skip = false;
3857 const auto *cb_access_context = GetAccessContext(commandBuffer);
3858 assert(cb_access_context);
3859 if (!cb_access_context) return skip;
3860
3861 SyncOpPipelineBarrier pipeline_barrier(CMD_PIPELINEBARRIER2KHR, *this, cb_access_context->GetQueueFlags(), *pDependencyInfo);
3862 skip = pipeline_barrier.Validate(*cb_access_context);
3863 return skip;
3864}
3865
Tony-LunarG3f6eceb2021-11-18 14:34:49 -07003866bool SyncValidator::PreCallValidateCmdPipelineBarrier2(VkCommandBuffer commandBuffer,
3867 const VkDependencyInfo *pDependencyInfo) const {
3868 bool skip = false;
3869 const auto *cb_access_context = GetAccessContext(commandBuffer);
3870 assert(cb_access_context);
3871 if (!cb_access_context) return skip;
3872
3873 SyncOpPipelineBarrier pipeline_barrier(CMD_PIPELINEBARRIER2, *this, cb_access_context->GetQueueFlags(), *pDependencyInfo);
3874 skip = pipeline_barrier.Validate(*cb_access_context);
3875 return skip;
3876}
3877
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07003878void SyncValidator::PreCallRecordCmdPipelineBarrier2KHR(VkCommandBuffer commandBuffer, const VkDependencyInfoKHR *pDependencyInfo) {
3879 auto *cb_access_context = GetAccessContext(commandBuffer);
3880 assert(cb_access_context);
3881 if (!cb_access_context) return;
3882
John Zulauf1bf30522021-09-03 15:39:06 -06003883 cb_access_context->RecordSyncOp<SyncOpPipelineBarrier>(CMD_PIPELINEBARRIER2KHR, *this, cb_access_context->GetQueueFlags(),
3884 *pDependencyInfo);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07003885}
3886
Tony-LunarG3f6eceb2021-11-18 14:34:49 -07003887void SyncValidator::PreCallRecordCmdPipelineBarrier2(VkCommandBuffer commandBuffer, const VkDependencyInfo *pDependencyInfo) {
3888 auto *cb_access_context = GetAccessContext(commandBuffer);
3889 assert(cb_access_context);
3890 if (!cb_access_context) return;
3891
3892 cb_access_context->RecordSyncOp<SyncOpPipelineBarrier>(CMD_PIPELINEBARRIER2, *this, cb_access_context->GetQueueFlags(),
3893 *pDependencyInfo);
3894}
3895
Jeremy Gebben36a3b832022-03-23 10:54:18 -06003896void SyncValidator::CreateDevice(const VkDeviceCreateInfo *pCreateInfo) {
John Zulauf9cb530d2019-09-30 14:14:10 -06003897 // The state tracker sets up the device state
Jeremy Gebben36a3b832022-03-23 10:54:18 -06003898 StateTracker::CreateDevice(pCreateInfo);
John Zulauf9cb530d2019-09-30 14:14:10 -06003899
John Zulauf5f13a792020-03-10 07:31:21 -06003900 // Add the callback hooks for the functions that are either broadly or deeply used and that the ValidationStateTracker
3901 // refactor would be messier without.
John Zulauf9cb530d2019-09-30 14:14:10 -06003902 // TODO: Find a good way to do this hooklessly.
Jeremy Gebben36a3b832022-03-23 10:54:18 -06003903 SetCommandBufferResetCallback([this](VkCommandBuffer command_buffer) -> void { ResetCommandBufferCallback(command_buffer); });
3904 SetCommandBufferFreeCallback([this](VkCommandBuffer command_buffer) -> void { FreeCommandBufferCallback(command_buffer); });
John Zulauf9cb530d2019-09-30 14:14:10 -06003905}
John Zulauf3d84f1b2020-03-09 13:33:25 -06003906
John Zulauf355e49b2020-04-24 15:11:15 -06003907bool SyncValidator::ValidateBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
sfricke-samsung85584a72021-09-30 21:43:38 -07003908 const VkSubpassBeginInfo *pSubpassBeginInfo, CMD_TYPE cmd) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003909 bool skip = false;
John Zulauf355e49b2020-04-24 15:11:15 -06003910 auto cb_context = GetAccessContext(commandBuffer);
John Zulauf64ffe552021-02-06 10:25:07 -07003911 if (cb_context) {
sfricke-samsung85584a72021-09-30 21:43:38 -07003912 SyncOpBeginRenderPass sync_op(cmd, *this, pRenderPassBegin, pSubpassBeginInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07003913 skip = sync_op.Validate(*cb_context);
John Zulauf355e49b2020-04-24 15:11:15 -06003914 }
John Zulauf355e49b2020-04-24 15:11:15 -06003915 return skip;
3916}
3917
3918bool SyncValidator::PreCallValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
3919 VkSubpassContents contents) const {
3920 bool skip = StateTracker::PreCallValidateCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07003921 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf355e49b2020-04-24 15:11:15 -06003922 subpass_begin_info.contents = contents;
John Zulauf64ffe552021-02-06 10:25:07 -07003923 skip |= ValidateBeginRenderPass(commandBuffer, pRenderPassBegin, &subpass_begin_info, CMD_BEGINRENDERPASS);
John Zulauf355e49b2020-04-24 15:11:15 -06003924 return skip;
3925}
3926
3927bool SyncValidator::PreCallValidateCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
Mike Schuchardt2df08912020-12-15 16:28:09 -08003928 const VkSubpassBeginInfo *pSubpassBeginInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003929 bool skip = StateTracker::PreCallValidateCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07003930 skip |= ValidateBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2);
John Zulauf355e49b2020-04-24 15:11:15 -06003931 return skip;
3932}
3933
3934bool SyncValidator::PreCallValidateCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
3935 const VkRenderPassBeginInfo *pRenderPassBegin,
Mike Schuchardt2df08912020-12-15 16:28:09 -08003936 const VkSubpassBeginInfo *pSubpassBeginInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003937 bool skip = StateTracker::PreCallValidateCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
sfricke-samsung85584a72021-09-30 21:43:38 -07003938 skip |= ValidateBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2KHR);
John Zulauf355e49b2020-04-24 15:11:15 -06003939 return skip;
3940}
3941
John Zulauf3d84f1b2020-03-09 13:33:25 -06003942void SyncValidator::PostCallRecordBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo,
3943 VkResult result) {
3944 // The state tracker sets up the command buffer state
3945 StateTracker::PostCallRecordBeginCommandBuffer(commandBuffer, pBeginInfo, result);
3946
3947 // Create/initialize the structure that trackers accesses at the command buffer scope.
3948 auto cb_access_context = GetAccessContext(commandBuffer);
3949 assert(cb_access_context);
3950 cb_access_context->Reset();
3951}
3952
3953void SyncValidator::RecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
sfricke-samsung85584a72021-09-30 21:43:38 -07003954 const VkSubpassBeginInfo *pSubpassBeginInfo, CMD_TYPE cmd) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003955 auto cb_context = GetAccessContext(commandBuffer);
John Zulauf355e49b2020-04-24 15:11:15 -06003956 if (cb_context) {
John Zulaufbb890452021-12-14 11:30:18 -07003957 cb_context->RecordSyncOp<SyncOpBeginRenderPass>(cmd, *this, pRenderPassBegin, pSubpassBeginInfo);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003958 }
3959}
3960
3961void SyncValidator::PostCallRecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
3962 VkSubpassContents contents) {
3963 StateTracker::PostCallRecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07003964 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf3d84f1b2020-03-09 13:33:25 -06003965 subpass_begin_info.contents = contents;
John Zulauf355e49b2020-04-24 15:11:15 -06003966 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, &subpass_begin_info, CMD_BEGINRENDERPASS);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003967}
3968
3969void SyncValidator::PostCallRecordCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
3970 const VkSubpassBeginInfo *pSubpassBeginInfo) {
3971 StateTracker::PostCallRecordCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
John Zulauf355e49b2020-04-24 15:11:15 -06003972 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003973}
3974
3975void SyncValidator::PostCallRecordCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
3976 const VkRenderPassBeginInfo *pRenderPassBegin,
3977 const VkSubpassBeginInfo *pSubpassBeginInfo) {
3978 StateTracker::PostCallRecordCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
sfricke-samsung85584a72021-09-30 21:43:38 -07003979 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2KHR);
John Zulauf355e49b2020-04-24 15:11:15 -06003980}
3981
Mike Schuchardt2df08912020-12-15 16:28:09 -08003982bool SyncValidator::ValidateCmdNextSubpass(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
sfricke-samsung85584a72021-09-30 21:43:38 -07003983 const VkSubpassEndInfo *pSubpassEndInfo, CMD_TYPE cmd) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003984 bool skip = false;
3985
3986 auto cb_context = GetAccessContext(commandBuffer);
3987 assert(cb_context);
John Zulauf64ffe552021-02-06 10:25:07 -07003988 if (!cb_context) return skip;
sfricke-samsung85584a72021-09-30 21:43:38 -07003989 SyncOpNextSubpass sync_op(cmd, *this, pSubpassBeginInfo, pSubpassEndInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07003990 return sync_op.Validate(*cb_context);
John Zulauf355e49b2020-04-24 15:11:15 -06003991}
3992
3993bool SyncValidator::PreCallValidateCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) const {
3994 bool skip = StateTracker::PreCallValidateCmdNextSubpass(commandBuffer, contents);
John Zulauf64ffe552021-02-06 10:25:07 -07003995 // Convert to a NextSubpass2
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07003996 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf355e49b2020-04-24 15:11:15 -06003997 subpass_begin_info.contents = contents;
John Zulauf64ffe552021-02-06 10:25:07 -07003998 auto subpass_end_info = LvlInitStruct<VkSubpassEndInfo>();
3999 skip |= ValidateCmdNextSubpass(commandBuffer, &subpass_begin_info, &subpass_end_info, CMD_NEXTSUBPASS);
John Zulauf355e49b2020-04-24 15:11:15 -06004000 return skip;
4001}
4002
Mike Schuchardt2df08912020-12-15 16:28:09 -08004003bool SyncValidator::PreCallValidateCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
4004 const VkSubpassEndInfo *pSubpassEndInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06004005 bool skip = StateTracker::PreCallValidateCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
sfricke-samsung85584a72021-09-30 21:43:38 -07004006 skip |= ValidateCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2KHR);
John Zulauf355e49b2020-04-24 15:11:15 -06004007 return skip;
4008}
4009
4010bool SyncValidator::PreCallValidateCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
4011 const VkSubpassEndInfo *pSubpassEndInfo) const {
4012 bool skip = StateTracker::PreCallValidateCmdNextSubpass2(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07004013 skip |= ValidateCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2);
John Zulauf355e49b2020-04-24 15:11:15 -06004014 return skip;
John Zulauf3d84f1b2020-03-09 13:33:25 -06004015}
4016
4017void SyncValidator::RecordCmdNextSubpass(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
sfricke-samsung85584a72021-09-30 21:43:38 -07004018 const VkSubpassEndInfo *pSubpassEndInfo, CMD_TYPE cmd) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06004019 auto cb_context = GetAccessContext(commandBuffer);
4020 assert(cb_context);
John Zulauf64ffe552021-02-06 10:25:07 -07004021 if (!cb_context) return;
John Zulauf3d84f1b2020-03-09 13:33:25 -06004022
John Zulaufbb890452021-12-14 11:30:18 -07004023 cb_context->RecordSyncOp<SyncOpNextSubpass>(cmd, *this, pSubpassBeginInfo, pSubpassEndInfo);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004024}
4025
4026void SyncValidator::PostCallRecordCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
4027 StateTracker::PostCallRecordCmdNextSubpass(commandBuffer, contents);
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07004028 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf3d84f1b2020-03-09 13:33:25 -06004029 subpass_begin_info.contents = contents;
John Zulauf355e49b2020-04-24 15:11:15 -06004030 RecordCmdNextSubpass(commandBuffer, &subpass_begin_info, nullptr, CMD_NEXTSUBPASS);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004031}
4032
4033void SyncValidator::PostCallRecordCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
4034 const VkSubpassEndInfo *pSubpassEndInfo) {
4035 StateTracker::PostCallRecordCmdNextSubpass2(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
John Zulauf355e49b2020-04-24 15:11:15 -06004036 RecordCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004037}
4038
4039void SyncValidator::PostCallRecordCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
4040 const VkSubpassEndInfo *pSubpassEndInfo) {
4041 StateTracker::PostCallRecordCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
sfricke-samsung85584a72021-09-30 21:43:38 -07004042 RecordCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2KHR);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004043}
4044
sfricke-samsung85584a72021-09-30 21:43:38 -07004045bool SyncValidator::ValidateCmdEndRenderPass(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo,
4046 CMD_TYPE cmd) const {
John Zulauf355e49b2020-04-24 15:11:15 -06004047 bool skip = false;
4048
4049 auto cb_context = GetAccessContext(commandBuffer);
4050 assert(cb_context);
John Zulauf64ffe552021-02-06 10:25:07 -07004051 if (!cb_context) return skip;
John Zulauf355e49b2020-04-24 15:11:15 -06004052
sfricke-samsung85584a72021-09-30 21:43:38 -07004053 SyncOpEndRenderPass sync_op(cmd, *this, pSubpassEndInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07004054 skip |= sync_op.Validate(*cb_context);
John Zulauf355e49b2020-04-24 15:11:15 -06004055 return skip;
4056}
4057
4058bool SyncValidator::PreCallValidateCmdEndRenderPass(VkCommandBuffer commandBuffer) const {
4059 bool skip = StateTracker::PreCallValidateCmdEndRenderPass(commandBuffer);
John Zulauf64ffe552021-02-06 10:25:07 -07004060 skip |= ValidateCmdEndRenderPass(commandBuffer, nullptr, CMD_ENDRENDERPASS);
John Zulauf355e49b2020-04-24 15:11:15 -06004061 return skip;
4062}
4063
Mike Schuchardt2df08912020-12-15 16:28:09 -08004064bool SyncValidator::PreCallValidateCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06004065 bool skip = StateTracker::PreCallValidateCmdEndRenderPass2(commandBuffer, pSubpassEndInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07004066 skip |= ValidateCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2);
John Zulauf355e49b2020-04-24 15:11:15 -06004067 return skip;
4068}
4069
4070bool SyncValidator::PreCallValidateCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer,
Mike Schuchardt2df08912020-12-15 16:28:09 -08004071 const VkSubpassEndInfo *pSubpassEndInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06004072 bool skip = StateTracker::PreCallValidateCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo);
sfricke-samsung85584a72021-09-30 21:43:38 -07004073 skip |= ValidateCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2KHR);
John Zulauf355e49b2020-04-24 15:11:15 -06004074 return skip;
4075}
4076
sfricke-samsung85584a72021-09-30 21:43:38 -07004077void SyncValidator::RecordCmdEndRenderPass(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo, CMD_TYPE cmd) {
John Zulaufe5da6e52020-03-18 15:32:18 -06004078 // Resolve the all subpass contexts to the command buffer contexts
4079 auto cb_context = GetAccessContext(commandBuffer);
4080 assert(cb_context);
John Zulauf64ffe552021-02-06 10:25:07 -07004081 if (!cb_context) return;
John Zulaufe5da6e52020-03-18 15:32:18 -06004082
John Zulaufbb890452021-12-14 11:30:18 -07004083 cb_context->RecordSyncOp<SyncOpEndRenderPass>(cmd, *this, pSubpassEndInfo);
John Zulaufe5da6e52020-03-18 15:32:18 -06004084}
John Zulauf3d84f1b2020-03-09 13:33:25 -06004085
John Zulauf33fc1d52020-07-17 11:01:10 -06004086// Simple heuristic rule to detect WAW operations representing algorithmically safe or increment
4087// updates to a resource which do not conflict at the byte level.
4088// TODO: Revisit this rule to see if it needs to be tighter or looser
4089// TODO: Add programatic control over suppression heuristics
4090bool SyncValidator::SupressedBoundDescriptorWAW(const HazardResult &hazard) const {
4091 return (hazard.hazard == WRITE_AFTER_WRITE) && (FlagBit(hazard.usage_index) == hazard.prior_access);
4092}
4093
John Zulauf3d84f1b2020-03-09 13:33:25 -06004094void SyncValidator::PostCallRecordCmdEndRenderPass(VkCommandBuffer commandBuffer) {
John Zulauf355e49b2020-04-24 15:11:15 -06004095 RecordCmdEndRenderPass(commandBuffer, nullptr, CMD_ENDRENDERPASS);
John Zulauf5a1a5382020-06-22 17:23:25 -06004096 StateTracker::PostCallRecordCmdEndRenderPass(commandBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004097}
4098
4099void SyncValidator::PostCallRecordCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
John Zulauf355e49b2020-04-24 15:11:15 -06004100 RecordCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2);
John Zulauf5a1a5382020-06-22 17:23:25 -06004101 StateTracker::PostCallRecordCmdEndRenderPass2(commandBuffer, pSubpassEndInfo);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004102}
4103
4104void SyncValidator::PostCallRecordCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
sfricke-samsung85584a72021-09-30 21:43:38 -07004105 RecordCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2KHR);
John Zulauf5a1a5382020-06-22 17:23:25 -06004106 StateTracker::PostCallRecordCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004107}
locke-lunarga19c71d2020-03-02 18:17:04 -07004108
sfricke-samsung71f04e32022-03-16 01:21:21 -05004109template <typename RegionType>
Jeff Leger178b1e52020-10-05 12:22:23 -04004110bool SyncValidator::ValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
sfricke-samsung71f04e32022-03-16 01:21:21 -05004111 VkImageLayout dstImageLayout, uint32_t regionCount, const RegionType *pRegions,
4112 CMD_TYPE cmd_type) const {
locke-lunarga19c71d2020-03-02 18:17:04 -07004113 bool skip = false;
4114 const auto *cb_access_context = GetAccessContext(commandBuffer);
4115 assert(cb_access_context);
4116 if (!cb_access_context) return skip;
4117
Tony Barbour845d29b2021-11-09 11:43:14 -07004118 const char *func_name = CommandTypeString(cmd_type);
Jeff Leger178b1e52020-10-05 12:22:23 -04004119
locke-lunarga19c71d2020-03-02 18:17:04 -07004120 const auto *context = cb_access_context->GetCurrentAccessContext();
4121 assert(context);
4122 if (!context) return skip;
4123
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004124 auto src_buffer = Get<BUFFER_STATE>(srcBuffer);
4125 auto dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07004126
4127 for (uint32_t region = 0; region < regionCount; region++) {
4128 const auto &copy_region = pRegions[region];
John Zulauf477700e2021-01-06 11:41:49 -07004129 HazardResult hazard;
locke-lunarga19c71d2020-03-02 18:17:04 -07004130 if (dst_image) {
John Zulauf477700e2021-01-06 11:41:49 -07004131 if (src_buffer) {
4132 ResourceAccessRange src_range =
4133 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, dst_image->createInfo.format));
Jeremy Gebben40a22942020-12-22 14:22:06 -07004134 hazard = context->DetectHazard(*src_buffer, SYNC_COPY_TRANSFER_READ, src_range);
John Zulauf477700e2021-01-06 11:41:49 -07004135 if (hazard.hazard) {
4136 // PHASE1 TODO -- add tag information to log msg when useful.
4137 skip |= LogError(srcBuffer, string_SyncHazardVUID(hazard.hazard),
4138 "%s: Hazard %s for srcBuffer %s, region %" PRIu32 ". Access info %s.", func_name,
4139 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcBuffer).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07004140 cb_access_context->FormatUsage(hazard).c_str());
John Zulauf477700e2021-01-06 11:41:49 -07004141 }
4142 }
4143
Jeremy Gebben40a22942020-12-22 14:22:06 -07004144 hazard = context->DetectHazard(*dst_image, SYNC_COPY_TRANSFER_WRITE, copy_region.imageSubresource,
John Zulauf477700e2021-01-06 11:41:49 -07004145 copy_region.imageOffset, copy_region.imageExtent);
locke-lunarga19c71d2020-03-02 18:17:04 -07004146 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06004147 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
Jeff Leger178b1e52020-10-05 12:22:23 -04004148 "%s: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.", func_name,
John Zulauf1dae9192020-06-16 15:46:44 -06004149 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07004150 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07004151 }
4152 if (skip) break;
4153 }
4154 if (skip) break;
4155 }
4156 return skip;
4157}
4158
Jeff Leger178b1e52020-10-05 12:22:23 -04004159bool SyncValidator::PreCallValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
4160 VkImageLayout dstImageLayout, uint32_t regionCount,
4161 const VkBufferImageCopy *pRegions) const {
4162 return ValidateCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions,
Tony Barbour845d29b2021-11-09 11:43:14 -07004163 CMD_COPYBUFFERTOIMAGE);
Jeff Leger178b1e52020-10-05 12:22:23 -04004164}
4165
4166bool SyncValidator::PreCallValidateCmdCopyBufferToImage2KHR(VkCommandBuffer commandBuffer,
4167 const VkCopyBufferToImageInfo2KHR *pCopyBufferToImageInfo) const {
4168 return ValidateCmdCopyBufferToImage(commandBuffer, pCopyBufferToImageInfo->srcBuffer, pCopyBufferToImageInfo->dstImage,
4169 pCopyBufferToImageInfo->dstImageLayout, pCopyBufferToImageInfo->regionCount,
Tony Barbour845d29b2021-11-09 11:43:14 -07004170 pCopyBufferToImageInfo->pRegions, CMD_COPYBUFFERTOIMAGE2KHR);
4171}
4172
4173bool SyncValidator::PreCallValidateCmdCopyBufferToImage2(VkCommandBuffer commandBuffer,
4174 const VkCopyBufferToImageInfo2 *pCopyBufferToImageInfo) const {
4175 return ValidateCmdCopyBufferToImage(commandBuffer, pCopyBufferToImageInfo->srcBuffer, pCopyBufferToImageInfo->dstImage,
4176 pCopyBufferToImageInfo->dstImageLayout, pCopyBufferToImageInfo->regionCount,
4177 pCopyBufferToImageInfo->pRegions, CMD_COPYBUFFERTOIMAGE2);
Jeff Leger178b1e52020-10-05 12:22:23 -04004178}
4179
sfricke-samsung71f04e32022-03-16 01:21:21 -05004180template <typename RegionType>
Jeff Leger178b1e52020-10-05 12:22:23 -04004181void SyncValidator::RecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
sfricke-samsung71f04e32022-03-16 01:21:21 -05004182 VkImageLayout dstImageLayout, uint32_t regionCount, const RegionType *pRegions,
4183 CMD_TYPE cmd_type) {
locke-lunarga19c71d2020-03-02 18:17:04 -07004184 auto *cb_access_context = GetAccessContext(commandBuffer);
4185 assert(cb_access_context);
Jeff Leger178b1e52020-10-05 12:22:23 -04004186
Jeff Leger178b1e52020-10-05 12:22:23 -04004187 const auto tag = cb_access_context->NextCommandTag(cmd_type);
locke-lunarga19c71d2020-03-02 18:17:04 -07004188 auto *context = cb_access_context->GetCurrentAccessContext();
4189 assert(context);
4190
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004191 auto src_buffer = Get<BUFFER_STATE>(srcBuffer);
4192 auto dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07004193
4194 for (uint32_t region = 0; region < regionCount; region++) {
4195 const auto &copy_region = pRegions[region];
locke-lunarga19c71d2020-03-02 18:17:04 -07004196 if (dst_image) {
John Zulauf477700e2021-01-06 11:41:49 -07004197 if (src_buffer) {
4198 ResourceAccessRange src_range =
4199 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, dst_image->createInfo.format));
Jeremy Gebben40a22942020-12-22 14:22:06 -07004200 context->UpdateAccessState(*src_buffer, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment, src_range, tag);
John Zulauf477700e2021-01-06 11:41:49 -07004201 }
Jeremy Gebben40a22942020-12-22 14:22:06 -07004202 context->UpdateAccessState(*dst_image, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07004203 copy_region.imageSubresource, copy_region.imageOffset, copy_region.imageExtent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07004204 }
4205 }
4206}
4207
Jeff Leger178b1e52020-10-05 12:22:23 -04004208void SyncValidator::PreCallRecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
4209 VkImageLayout dstImageLayout, uint32_t regionCount,
4210 const VkBufferImageCopy *pRegions) {
4211 StateTracker::PreCallRecordCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
Tony Barbour845d29b2021-11-09 11:43:14 -07004212 RecordCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions, CMD_COPYBUFFERTOIMAGE);
Jeff Leger178b1e52020-10-05 12:22:23 -04004213}
4214
4215void SyncValidator::PreCallRecordCmdCopyBufferToImage2KHR(VkCommandBuffer commandBuffer,
4216 const VkCopyBufferToImageInfo2KHR *pCopyBufferToImageInfo) {
4217 StateTracker::PreCallRecordCmdCopyBufferToImage2KHR(commandBuffer, pCopyBufferToImageInfo);
4218 RecordCmdCopyBufferToImage(commandBuffer, pCopyBufferToImageInfo->srcBuffer, pCopyBufferToImageInfo->dstImage,
4219 pCopyBufferToImageInfo->dstImageLayout, pCopyBufferToImageInfo->regionCount,
Tony Barbour845d29b2021-11-09 11:43:14 -07004220 pCopyBufferToImageInfo->pRegions, CMD_COPYBUFFERTOIMAGE2KHR);
4221}
4222
4223void SyncValidator::PreCallRecordCmdCopyBufferToImage2(VkCommandBuffer commandBuffer,
4224 const VkCopyBufferToImageInfo2 *pCopyBufferToImageInfo) {
4225 StateTracker::PreCallRecordCmdCopyBufferToImage2(commandBuffer, pCopyBufferToImageInfo);
4226 RecordCmdCopyBufferToImage(commandBuffer, pCopyBufferToImageInfo->srcBuffer, pCopyBufferToImageInfo->dstImage,
4227 pCopyBufferToImageInfo->dstImageLayout, pCopyBufferToImageInfo->regionCount,
4228 pCopyBufferToImageInfo->pRegions, CMD_COPYBUFFERTOIMAGE2);
Jeff Leger178b1e52020-10-05 12:22:23 -04004229}
4230
sfricke-samsung71f04e32022-03-16 01:21:21 -05004231template <typename RegionType>
Jeff Leger178b1e52020-10-05 12:22:23 -04004232bool SyncValidator::ValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
sfricke-samsung71f04e32022-03-16 01:21:21 -05004233 VkBuffer dstBuffer, uint32_t regionCount, const RegionType *pRegions,
4234 CMD_TYPE cmd_type) const {
locke-lunarga19c71d2020-03-02 18:17:04 -07004235 bool skip = false;
4236 const auto *cb_access_context = GetAccessContext(commandBuffer);
4237 assert(cb_access_context);
4238 if (!cb_access_context) return skip;
Tony-LunarGaf3632a2021-11-10 15:51:57 -07004239 const char *func_name = CommandTypeString(cmd_type);
Jeff Leger178b1e52020-10-05 12:22:23 -04004240
locke-lunarga19c71d2020-03-02 18:17:04 -07004241 const auto *context = cb_access_context->GetCurrentAccessContext();
4242 assert(context);
4243 if (!context) return skip;
4244
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004245 auto src_image = Get<IMAGE_STATE>(srcImage);
4246 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
Jeremy Gebben6fbf8242021-06-21 09:14:46 -06004247 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->MemState()->mem() : VK_NULL_HANDLE;
locke-lunarga19c71d2020-03-02 18:17:04 -07004248 for (uint32_t region = 0; region < regionCount; region++) {
4249 const auto &copy_region = pRegions[region];
4250 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004251 auto hazard = context->DetectHazard(*src_image, SYNC_COPY_TRANSFER_READ, copy_region.imageSubresource,
locke-lunarga19c71d2020-03-02 18:17:04 -07004252 copy_region.imageOffset, copy_region.imageExtent);
4253 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06004254 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
Jeff Leger178b1e52020-10-05 12:22:23 -04004255 "%s: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.", func_name,
John Zulauf1dae9192020-06-16 15:46:44 -06004256 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07004257 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07004258 }
John Zulauf477700e2021-01-06 11:41:49 -07004259 if (dst_mem) {
4260 ResourceAccessRange dst_range =
4261 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, src_image->createInfo.format));
Jeremy Gebben40a22942020-12-22 14:22:06 -07004262 hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, dst_range);
John Zulauf477700e2021-01-06 11:41:49 -07004263 if (hazard.hazard) {
4264 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
4265 "%s: Hazard %s for dstBuffer %s, region %" PRIu32 ". Access info %s.", func_name,
4266 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstBuffer).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07004267 cb_access_context->FormatUsage(hazard).c_str());
John Zulauf477700e2021-01-06 11:41:49 -07004268 }
locke-lunarga19c71d2020-03-02 18:17:04 -07004269 }
4270 }
4271 if (skip) break;
4272 }
4273 return skip;
4274}
4275
Jeff Leger178b1e52020-10-05 12:22:23 -04004276bool SyncValidator::PreCallValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
4277 VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount,
4278 const VkBufferImageCopy *pRegions) const {
4279 return ValidateCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions,
Tony-LunarGaf3632a2021-11-10 15:51:57 -07004280 CMD_COPYIMAGETOBUFFER);
Jeff Leger178b1e52020-10-05 12:22:23 -04004281}
4282
4283bool SyncValidator::PreCallValidateCmdCopyImageToBuffer2KHR(VkCommandBuffer commandBuffer,
4284 const VkCopyImageToBufferInfo2KHR *pCopyImageToBufferInfo) const {
4285 return ValidateCmdCopyImageToBuffer(commandBuffer, pCopyImageToBufferInfo->srcImage, pCopyImageToBufferInfo->srcImageLayout,
4286 pCopyImageToBufferInfo->dstBuffer, pCopyImageToBufferInfo->regionCount,
Tony-LunarGaf3632a2021-11-10 15:51:57 -07004287 pCopyImageToBufferInfo->pRegions, CMD_COPYIMAGETOBUFFER2KHR);
4288}
4289
4290bool SyncValidator::PreCallValidateCmdCopyImageToBuffer2(VkCommandBuffer commandBuffer,
4291 const VkCopyImageToBufferInfo2 *pCopyImageToBufferInfo) const {
4292 return ValidateCmdCopyImageToBuffer(commandBuffer, pCopyImageToBufferInfo->srcImage, pCopyImageToBufferInfo->srcImageLayout,
4293 pCopyImageToBufferInfo->dstBuffer, pCopyImageToBufferInfo->regionCount,
4294 pCopyImageToBufferInfo->pRegions, CMD_COPYIMAGETOBUFFER2);
Jeff Leger178b1e52020-10-05 12:22:23 -04004295}
4296
sfricke-samsung71f04e32022-03-16 01:21:21 -05004297template <typename RegionType>
Jeff Leger178b1e52020-10-05 12:22:23 -04004298void SyncValidator::RecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
sfricke-samsung71f04e32022-03-16 01:21:21 -05004299 VkBuffer dstBuffer, uint32_t regionCount, const RegionType *pRegions,
Tony-LunarGaf3632a2021-11-10 15:51:57 -07004300 CMD_TYPE cmd_type) {
locke-lunarga19c71d2020-03-02 18:17:04 -07004301 auto *cb_access_context = GetAccessContext(commandBuffer);
4302 assert(cb_access_context);
Jeff Leger178b1e52020-10-05 12:22:23 -04004303
Jeff Leger178b1e52020-10-05 12:22:23 -04004304 const auto tag = cb_access_context->NextCommandTag(cmd_type);
locke-lunarga19c71d2020-03-02 18:17:04 -07004305 auto *context = cb_access_context->GetCurrentAccessContext();
4306 assert(context);
4307
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004308 auto src_image = Get<IMAGE_STATE>(srcImage);
Jeremy Gebben9f537102021-10-05 16:37:12 -06004309 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
Jeremy Gebben6fbf8242021-06-21 09:14:46 -06004310 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->MemState()->mem() : VK_NULL_HANDLE;
John Zulauf5f13a792020-03-10 07:31:21 -06004311 const VulkanTypedHandle dst_handle(dst_mem, kVulkanObjectTypeDeviceMemory);
locke-lunarga19c71d2020-03-02 18:17:04 -07004312
4313 for (uint32_t region = 0; region < regionCount; region++) {
4314 const auto &copy_region = pRegions[region];
4315 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004316 context->UpdateAccessState(*src_image, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07004317 copy_region.imageSubresource, copy_region.imageOffset, copy_region.imageExtent, tag);
John Zulauf477700e2021-01-06 11:41:49 -07004318 if (dst_buffer) {
4319 ResourceAccessRange dst_range =
4320 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, src_image->createInfo.format));
Jeremy Gebben40a22942020-12-22 14:22:06 -07004321 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, dst_range, tag);
John Zulauf477700e2021-01-06 11:41:49 -07004322 }
locke-lunarga19c71d2020-03-02 18:17:04 -07004323 }
4324 }
4325}
4326
Jeff Leger178b1e52020-10-05 12:22:23 -04004327void SyncValidator::PreCallRecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4328 VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
4329 StateTracker::PreCallRecordCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
Tony-LunarGaf3632a2021-11-10 15:51:57 -07004330 RecordCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions, CMD_COPYIMAGETOBUFFER);
Jeff Leger178b1e52020-10-05 12:22:23 -04004331}
4332
4333void SyncValidator::PreCallRecordCmdCopyImageToBuffer2KHR(VkCommandBuffer commandBuffer,
4334 const VkCopyImageToBufferInfo2KHR *pCopyImageToBufferInfo) {
4335 StateTracker::PreCallRecordCmdCopyImageToBuffer2KHR(commandBuffer, pCopyImageToBufferInfo);
4336 RecordCmdCopyImageToBuffer(commandBuffer, pCopyImageToBufferInfo->srcImage, pCopyImageToBufferInfo->srcImageLayout,
4337 pCopyImageToBufferInfo->dstBuffer, pCopyImageToBufferInfo->regionCount,
Tony-LunarGaf3632a2021-11-10 15:51:57 -07004338 pCopyImageToBufferInfo->pRegions, CMD_COPYIMAGETOBUFFER2KHR);
4339}
4340
4341void SyncValidator::PreCallRecordCmdCopyImageToBuffer2(VkCommandBuffer commandBuffer,
4342 const VkCopyImageToBufferInfo2 *pCopyImageToBufferInfo) {
4343 StateTracker::PreCallRecordCmdCopyImageToBuffer2(commandBuffer, pCopyImageToBufferInfo);
4344 RecordCmdCopyImageToBuffer(commandBuffer, pCopyImageToBufferInfo->srcImage, pCopyImageToBufferInfo->srcImageLayout,
4345 pCopyImageToBufferInfo->dstBuffer, pCopyImageToBufferInfo->regionCount,
4346 pCopyImageToBufferInfo->pRegions, CMD_COPYIMAGETOBUFFER2);
Jeff Leger178b1e52020-10-05 12:22:23 -04004347}
4348
4349template <typename RegionType>
4350bool SyncValidator::ValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4351 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
4352 const RegionType *pRegions, VkFilter filter, const char *apiName) const {
locke-lunarga19c71d2020-03-02 18:17:04 -07004353 bool skip = false;
4354 const auto *cb_access_context = GetAccessContext(commandBuffer);
4355 assert(cb_access_context);
4356 if (!cb_access_context) return skip;
4357
4358 const auto *context = cb_access_context->GetCurrentAccessContext();
4359 assert(context);
4360 if (!context) return skip;
4361
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004362 auto src_image = Get<IMAGE_STATE>(srcImage);
4363 auto dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07004364
4365 for (uint32_t region = 0; region < regionCount; region++) {
4366 const auto &blit_region = pRegions[region];
4367 if (src_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06004368 VkOffset3D offset = {std::min(blit_region.srcOffsets[0].x, blit_region.srcOffsets[1].x),
4369 std::min(blit_region.srcOffsets[0].y, blit_region.srcOffsets[1].y),
4370 std::min(blit_region.srcOffsets[0].z, blit_region.srcOffsets[1].z)};
4371 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.srcOffsets[1].x - blit_region.srcOffsets[0].x)),
4372 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].y - blit_region.srcOffsets[0].y)),
4373 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].z - blit_region.srcOffsets[0].z))};
Jeremy Gebben40a22942020-12-22 14:22:06 -07004374 auto hazard = context->DetectHazard(*src_image, SYNC_BLIT_TRANSFER_READ, blit_region.srcSubresource, offset, extent);
locke-lunarga19c71d2020-03-02 18:17:04 -07004375 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06004376 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
Jeff Leger178b1e52020-10-05 12:22:23 -04004377 "%s: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.", apiName,
John Zulauf1dae9192020-06-16 15:46:44 -06004378 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07004379 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07004380 }
4381 }
4382
4383 if (dst_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06004384 VkOffset3D offset = {std::min(blit_region.dstOffsets[0].x, blit_region.dstOffsets[1].x),
4385 std::min(blit_region.dstOffsets[0].y, blit_region.dstOffsets[1].y),
4386 std::min(blit_region.dstOffsets[0].z, blit_region.dstOffsets[1].z)};
4387 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.dstOffsets[1].x - blit_region.dstOffsets[0].x)),
4388 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].y - blit_region.dstOffsets[0].y)),
4389 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].z - blit_region.dstOffsets[0].z))};
Jeremy Gebben40a22942020-12-22 14:22:06 -07004390 auto hazard = context->DetectHazard(*dst_image, SYNC_BLIT_TRANSFER_WRITE, blit_region.dstSubresource, offset, extent);
locke-lunarga19c71d2020-03-02 18:17:04 -07004391 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06004392 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
Jeff Leger178b1e52020-10-05 12:22:23 -04004393 "%s: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.", apiName,
John Zulauf1dae9192020-06-16 15:46:44 -06004394 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07004395 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07004396 }
4397 if (skip) break;
4398 }
4399 }
4400
4401 return skip;
4402}
4403
Jeff Leger178b1e52020-10-05 12:22:23 -04004404bool SyncValidator::PreCallValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4405 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
4406 const VkImageBlit *pRegions, VkFilter filter) const {
4407 return ValidateCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter,
4408 "vkCmdBlitImage");
4409}
4410
4411bool SyncValidator::PreCallValidateCmdBlitImage2KHR(VkCommandBuffer commandBuffer,
4412 const VkBlitImageInfo2KHR *pBlitImageInfo) const {
4413 return ValidateCmdBlitImage(commandBuffer, pBlitImageInfo->srcImage, pBlitImageInfo->srcImageLayout, pBlitImageInfo->dstImage,
4414 pBlitImageInfo->dstImageLayout, pBlitImageInfo->regionCount, pBlitImageInfo->pRegions,
4415 pBlitImageInfo->filter, "vkCmdBlitImage2KHR");
4416}
4417
Tony-LunarG542ae912021-11-04 16:06:44 -06004418bool SyncValidator::PreCallValidateCmdBlitImage2(VkCommandBuffer commandBuffer,
4419 const VkBlitImageInfo2 *pBlitImageInfo) const {
4420 return ValidateCmdBlitImage(commandBuffer, pBlitImageInfo->srcImage, pBlitImageInfo->srcImageLayout, pBlitImageInfo->dstImage,
4421 pBlitImageInfo->dstImageLayout, pBlitImageInfo->regionCount, pBlitImageInfo->pRegions,
4422 pBlitImageInfo->filter, "vkCmdBlitImage2");
4423}
4424
Jeff Leger178b1e52020-10-05 12:22:23 -04004425template <typename RegionType>
4426void SyncValidator::RecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4427 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
4428 const RegionType *pRegions, VkFilter filter, ResourceUsageTag tag) {
locke-lunarga19c71d2020-03-02 18:17:04 -07004429 auto *cb_access_context = GetAccessContext(commandBuffer);
4430 assert(cb_access_context);
4431 auto *context = cb_access_context->GetCurrentAccessContext();
4432 assert(context);
4433
Jeremy Gebben9f537102021-10-05 16:37:12 -06004434 auto src_image = Get<IMAGE_STATE>(srcImage);
4435 auto dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07004436
4437 for (uint32_t region = 0; region < regionCount; region++) {
4438 const auto &blit_region = pRegions[region];
4439 if (src_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06004440 VkOffset3D offset = {std::min(blit_region.srcOffsets[0].x, blit_region.srcOffsets[1].x),
4441 std::min(blit_region.srcOffsets[0].y, blit_region.srcOffsets[1].y),
4442 std::min(blit_region.srcOffsets[0].z, blit_region.srcOffsets[1].z)};
4443 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.srcOffsets[1].x - blit_region.srcOffsets[0].x)),
4444 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].y - blit_region.srcOffsets[0].y)),
4445 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].z - blit_region.srcOffsets[0].z))};
Jeremy Gebben40a22942020-12-22 14:22:06 -07004446 context->UpdateAccessState(*src_image, SYNC_BLIT_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07004447 blit_region.srcSubresource, offset, extent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07004448 }
4449 if (dst_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06004450 VkOffset3D offset = {std::min(blit_region.dstOffsets[0].x, blit_region.dstOffsets[1].x),
4451 std::min(blit_region.dstOffsets[0].y, blit_region.dstOffsets[1].y),
4452 std::min(blit_region.dstOffsets[0].z, blit_region.dstOffsets[1].z)};
4453 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.dstOffsets[1].x - blit_region.dstOffsets[0].x)),
4454 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].y - blit_region.dstOffsets[0].y)),
4455 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].z - blit_region.dstOffsets[0].z))};
Jeremy Gebben40a22942020-12-22 14:22:06 -07004456 context->UpdateAccessState(*dst_image, SYNC_BLIT_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07004457 blit_region.dstSubresource, offset, extent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07004458 }
4459 }
4460}
locke-lunarg36ba2592020-04-03 09:42:04 -06004461
Jeff Leger178b1e52020-10-05 12:22:23 -04004462void SyncValidator::PreCallRecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4463 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
4464 const VkImageBlit *pRegions, VkFilter filter) {
4465 auto *cb_access_context = GetAccessContext(commandBuffer);
4466 assert(cb_access_context);
4467 const auto tag = cb_access_context->NextCommandTag(CMD_BLITIMAGE);
4468 StateTracker::PreCallRecordCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
4469 pRegions, filter);
4470 RecordCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter, tag);
4471}
4472
4473void SyncValidator::PreCallRecordCmdBlitImage2KHR(VkCommandBuffer commandBuffer, const VkBlitImageInfo2KHR *pBlitImageInfo) {
4474 StateTracker::PreCallRecordCmdBlitImage2KHR(commandBuffer, pBlitImageInfo);
4475 auto *cb_access_context = GetAccessContext(commandBuffer);
4476 assert(cb_access_context);
4477 const auto tag = cb_access_context->NextCommandTag(CMD_BLITIMAGE2KHR);
4478 RecordCmdBlitImage(commandBuffer, pBlitImageInfo->srcImage, pBlitImageInfo->srcImageLayout, pBlitImageInfo->dstImage,
4479 pBlitImageInfo->dstImageLayout, pBlitImageInfo->regionCount, pBlitImageInfo->pRegions,
4480 pBlitImageInfo->filter, tag);
4481}
4482
Tony-LunarG542ae912021-11-04 16:06:44 -06004483void SyncValidator::PreCallRecordCmdBlitImage2(VkCommandBuffer commandBuffer, const VkBlitImageInfo2 *pBlitImageInfo) {
4484 StateTracker::PreCallRecordCmdBlitImage2KHR(commandBuffer, pBlitImageInfo);
4485 auto *cb_access_context = GetAccessContext(commandBuffer);
4486 assert(cb_access_context);
4487 const auto tag = cb_access_context->NextCommandTag(CMD_BLITIMAGE2);
4488 RecordCmdBlitImage(commandBuffer, pBlitImageInfo->srcImage, pBlitImageInfo->srcImageLayout, pBlitImageInfo->dstImage,
4489 pBlitImageInfo->dstImageLayout, pBlitImageInfo->regionCount, pBlitImageInfo->pRegions,
4490 pBlitImageInfo->filter, tag);
4491}
4492
John Zulauffaea0ee2021-01-14 14:01:32 -07004493bool SyncValidator::ValidateIndirectBuffer(const CommandBufferAccessContext &cb_context, const AccessContext &context,
4494 VkCommandBuffer commandBuffer, const VkDeviceSize struct_size, const VkBuffer buffer,
4495 const VkDeviceSize offset, const uint32_t drawCount, const uint32_t stride,
4496 const char *function) const {
locke-lunargff255f92020-05-13 18:53:52 -06004497 bool skip = false;
4498 if (drawCount == 0) return skip;
4499
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004500 auto buf_state = Get<BUFFER_STATE>(buffer);
locke-lunargff255f92020-05-13 18:53:52 -06004501 VkDeviceSize size = struct_size;
4502 if (drawCount == 1 || stride == size) {
4503 if (drawCount > 1) size *= drawCount;
John Zulauf3e86bf02020-09-12 10:47:57 -06004504 const ResourceAccessRange range = MakeRange(offset, size);
locke-lunargff255f92020-05-13 18:53:52 -06004505 auto hazard = context.DetectHazard(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, range);
4506 if (hazard.hazard) {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06004507 skip |= LogError(buf_state->buffer(), string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004508 "%s: Hazard %s for indirect %s in %s. Access info %s.", function, string_SyncHazard(hazard.hazard),
John Zulauf1dae9192020-06-16 15:46:44 -06004509 report_data->FormatHandle(buffer).c_str(), report_data->FormatHandle(commandBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07004510 cb_context.FormatUsage(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06004511 }
4512 } else {
4513 for (uint32_t i = 0; i < drawCount; ++i) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004514 const ResourceAccessRange range = MakeRange(offset + i * stride, size);
locke-lunargff255f92020-05-13 18:53:52 -06004515 auto hazard = context.DetectHazard(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, range);
4516 if (hazard.hazard) {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06004517 skip |= LogError(buf_state->buffer(), string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004518 "%s: Hazard %s for indirect %s in %s. Access info %s.", function, string_SyncHazard(hazard.hazard),
4519 report_data->FormatHandle(buffer).c_str(), report_data->FormatHandle(commandBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07004520 cb_context.FormatUsage(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06004521 break;
4522 }
4523 }
4524 }
4525 return skip;
4526}
4527
John Zulauf14940722021-04-12 15:19:02 -06004528void SyncValidator::RecordIndirectBuffer(AccessContext &context, const ResourceUsageTag tag, const VkDeviceSize struct_size,
locke-lunarg61870c22020-06-09 14:51:50 -06004529 const VkBuffer buffer, const VkDeviceSize offset, const uint32_t drawCount,
4530 uint32_t stride) {
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004531 auto buf_state = Get<BUFFER_STATE>(buffer);
locke-lunargff255f92020-05-13 18:53:52 -06004532 VkDeviceSize size = struct_size;
4533 if (drawCount == 1 || stride == size) {
4534 if (drawCount > 1) size *= drawCount;
John Zulauf3e86bf02020-09-12 10:47:57 -06004535 const ResourceAccessRange range = MakeRange(offset, size);
John Zulauf8e3c3e92021-01-06 11:19:36 -07004536 context.UpdateAccessState(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, SyncOrdering::kNonAttachment, range, tag);
locke-lunargff255f92020-05-13 18:53:52 -06004537 } else {
4538 for (uint32_t i = 0; i < drawCount; ++i) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004539 const ResourceAccessRange range = MakeRange(offset + i * stride, size);
John Zulauf8e3c3e92021-01-06 11:19:36 -07004540 context.UpdateAccessState(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, SyncOrdering::kNonAttachment, range,
4541 tag);
locke-lunargff255f92020-05-13 18:53:52 -06004542 }
4543 }
4544}
4545
John Zulauffaea0ee2021-01-14 14:01:32 -07004546bool SyncValidator::ValidateCountBuffer(const CommandBufferAccessContext &cb_context, const AccessContext &context,
4547 VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4548 const char *function) const {
locke-lunargff255f92020-05-13 18:53:52 -06004549 bool skip = false;
4550
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004551 auto count_buf_state = Get<BUFFER_STATE>(buffer);
John Zulauf3e86bf02020-09-12 10:47:57 -06004552 const ResourceAccessRange range = MakeRange(offset, 4);
locke-lunargff255f92020-05-13 18:53:52 -06004553 auto hazard = context.DetectHazard(*count_buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, range);
4554 if (hazard.hazard) {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06004555 skip |= LogError(count_buf_state->buffer(), string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004556 "%s: Hazard %s for countBuffer %s in %s. Access info %s.", function, string_SyncHazard(hazard.hazard),
John Zulauf1dae9192020-06-16 15:46:44 -06004557 report_data->FormatHandle(buffer).c_str(), report_data->FormatHandle(commandBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07004558 cb_context.FormatUsage(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06004559 }
4560 return skip;
4561}
4562
John Zulauf14940722021-04-12 15:19:02 -06004563void SyncValidator::RecordCountBuffer(AccessContext &context, const ResourceUsageTag tag, VkBuffer buffer, VkDeviceSize offset) {
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004564 auto count_buf_state = Get<BUFFER_STATE>(buffer);
John Zulauf3e86bf02020-09-12 10:47:57 -06004565 const ResourceAccessRange range = MakeRange(offset, 4);
John Zulauf8e3c3e92021-01-06 11:19:36 -07004566 context.UpdateAccessState(*count_buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, SyncOrdering::kNonAttachment, range, tag);
locke-lunargff255f92020-05-13 18:53:52 -06004567}
4568
locke-lunarg36ba2592020-04-03 09:42:04 -06004569bool SyncValidator::PreCallValidateCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) const {
locke-lunargff255f92020-05-13 18:53:52 -06004570 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004571 const auto *cb_access_context = GetAccessContext(commandBuffer);
4572 assert(cb_access_context);
4573 if (!cb_access_context) return skip;
4574
locke-lunarg61870c22020-06-09 14:51:50 -06004575 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, "vkCmdDispatch");
locke-lunargff255f92020-05-13 18:53:52 -06004576 return skip;
locke-lunarg36ba2592020-04-03 09:42:04 -06004577}
4578
4579void SyncValidator::PreCallRecordCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004580 StateTracker::PreCallRecordCmdDispatch(commandBuffer, x, y, z);
locke-lunargff255f92020-05-13 18:53:52 -06004581 auto *cb_access_context = GetAccessContext(commandBuffer);
4582 assert(cb_access_context);
4583 const auto tag = cb_access_context->NextCommandTag(CMD_DISPATCH);
locke-lunargff255f92020-05-13 18:53:52 -06004584
locke-lunarg61870c22020-06-09 14:51:50 -06004585 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, tag);
locke-lunarg36ba2592020-04-03 09:42:04 -06004586}
locke-lunarge1a67022020-04-29 00:15:36 -06004587
4588bool SyncValidator::PreCallValidateCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) const {
locke-lunargff255f92020-05-13 18:53:52 -06004589 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004590 const auto *cb_access_context = GetAccessContext(commandBuffer);
4591 assert(cb_access_context);
4592 if (!cb_access_context) return skip;
4593
4594 const auto *context = cb_access_context->GetCurrentAccessContext();
4595 assert(context);
4596 if (!context) return skip;
4597
locke-lunarg61870c22020-06-09 14:51:50 -06004598 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, "vkCmdDispatchIndirect");
John Zulauffaea0ee2021-01-14 14:01:32 -07004599 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDispatchIndirectCommand), buffer, offset,
4600 1, sizeof(VkDispatchIndirectCommand), "vkCmdDispatchIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004601 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004602}
4603
4604void SyncValidator::PreCallRecordCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004605 StateTracker::PreCallRecordCmdDispatchIndirect(commandBuffer, buffer, offset);
locke-lunargff255f92020-05-13 18:53:52 -06004606 auto *cb_access_context = GetAccessContext(commandBuffer);
4607 assert(cb_access_context);
4608 const auto tag = cb_access_context->NextCommandTag(CMD_DISPATCHINDIRECT);
4609 auto *context = cb_access_context->GetCurrentAccessContext();
4610 assert(context);
4611
locke-lunarg61870c22020-06-09 14:51:50 -06004612 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, tag);
4613 RecordIndirectBuffer(*context, tag, sizeof(VkDispatchIndirectCommand), buffer, offset, 1, sizeof(VkDispatchIndirectCommand));
locke-lunarge1a67022020-04-29 00:15:36 -06004614}
4615
4616bool SyncValidator::PreCallValidateCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
4617 uint32_t firstVertex, uint32_t firstInstance) const {
locke-lunarga4d39ea2020-05-22 14:17:29 -06004618 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004619 const auto *cb_access_context = GetAccessContext(commandBuffer);
4620 assert(cb_access_context);
4621 if (!cb_access_context) return skip;
4622
locke-lunarg61870c22020-06-09 14:51:50 -06004623 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDraw");
4624 skip |= cb_access_context->ValidateDrawVertex(vertexCount, firstVertex, "vkCmdDraw");
4625 skip |= cb_access_context->ValidateDrawSubpassAttachment("vkCmdDraw");
locke-lunarga4d39ea2020-05-22 14:17:29 -06004626 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004627}
4628
4629void SyncValidator::PreCallRecordCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
4630 uint32_t firstVertex, uint32_t firstInstance) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004631 StateTracker::PreCallRecordCmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
locke-lunargff255f92020-05-13 18:53:52 -06004632 auto *cb_access_context = GetAccessContext(commandBuffer);
4633 assert(cb_access_context);
4634 const auto tag = cb_access_context->NextCommandTag(CMD_DRAW);
locke-lunargff255f92020-05-13 18:53:52 -06004635
locke-lunarg61870c22020-06-09 14:51:50 -06004636 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4637 cb_access_context->RecordDrawVertex(vertexCount, firstVertex, tag);
4638 cb_access_context->RecordDrawSubpassAttachment(tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004639}
4640
4641bool SyncValidator::PreCallValidateCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
4642 uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) const {
locke-lunarga4d39ea2020-05-22 14:17:29 -06004643 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004644 const auto *cb_access_context = GetAccessContext(commandBuffer);
4645 assert(cb_access_context);
4646 if (!cb_access_context) return skip;
4647
locke-lunarg61870c22020-06-09 14:51:50 -06004648 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndexed");
4649 skip |= cb_access_context->ValidateDrawVertexIndex(indexCount, firstIndex, "vkCmdDrawIndexed");
4650 skip |= cb_access_context->ValidateDrawSubpassAttachment("vkCmdDrawIndexed");
locke-lunarga4d39ea2020-05-22 14:17:29 -06004651 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004652}
4653
4654void SyncValidator::PreCallRecordCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
4655 uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004656 StateTracker::PreCallRecordCmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
locke-lunargff255f92020-05-13 18:53:52 -06004657 auto *cb_access_context = GetAccessContext(commandBuffer);
4658 assert(cb_access_context);
4659 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDEXED);
locke-lunargff255f92020-05-13 18:53:52 -06004660
locke-lunarg61870c22020-06-09 14:51:50 -06004661 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4662 cb_access_context->RecordDrawVertexIndex(indexCount, firstIndex, tag);
4663 cb_access_context->RecordDrawSubpassAttachment(tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004664}
4665
4666bool SyncValidator::PreCallValidateCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4667 uint32_t drawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004668 bool skip = false;
4669 if (drawCount == 0) return skip;
4670
locke-lunargff255f92020-05-13 18:53:52 -06004671 const auto *cb_access_context = GetAccessContext(commandBuffer);
4672 assert(cb_access_context);
4673 if (!cb_access_context) return skip;
4674
4675 const auto *context = cb_access_context->GetCurrentAccessContext();
4676 assert(context);
4677 if (!context) return skip;
4678
locke-lunarg61870c22020-06-09 14:51:50 -06004679 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndirect");
4680 skip |= cb_access_context->ValidateDrawSubpassAttachment("vkCmdDrawIndirect");
John Zulauffaea0ee2021-01-14 14:01:32 -07004681 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndirectCommand), buffer, offset,
4682 drawCount, stride, "vkCmdDrawIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004683
4684 // TODO: For now, we validate the whole vertex buffer. It might cause some false positive.
4685 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
4686 // We will validate the vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004687 skip |= cb_access_context->ValidateDrawVertex(UINT32_MAX, 0, "vkCmdDrawIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004688 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004689}
4690
4691void SyncValidator::PreCallRecordCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4692 uint32_t drawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004693 StateTracker::PreCallRecordCmdDrawIndirect(commandBuffer, buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06004694 if (drawCount == 0) return;
locke-lunargff255f92020-05-13 18:53:52 -06004695 auto *cb_access_context = GetAccessContext(commandBuffer);
4696 assert(cb_access_context);
4697 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDIRECT);
4698 auto *context = cb_access_context->GetCurrentAccessContext();
4699 assert(context);
4700
locke-lunarg61870c22020-06-09 14:51:50 -06004701 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4702 cb_access_context->RecordDrawSubpassAttachment(tag);
4703 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndirectCommand), buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06004704
4705 // TODO: For now, we record the whole vertex buffer. It might cause some false positive.
4706 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
4707 // We will record the vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004708 cb_access_context->RecordDrawVertex(UINT32_MAX, 0, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004709}
4710
4711bool SyncValidator::PreCallValidateCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4712 uint32_t drawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004713 bool skip = false;
4714 if (drawCount == 0) return skip;
locke-lunargff255f92020-05-13 18:53:52 -06004715 const auto *cb_access_context = GetAccessContext(commandBuffer);
4716 assert(cb_access_context);
4717 if (!cb_access_context) return skip;
4718
4719 const auto *context = cb_access_context->GetCurrentAccessContext();
4720 assert(context);
4721 if (!context) return skip;
4722
locke-lunarg61870c22020-06-09 14:51:50 -06004723 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndexedIndirect");
4724 skip |= cb_access_context->ValidateDrawSubpassAttachment("vkCmdDrawIndexedIndirect");
John Zulauffaea0ee2021-01-14 14:01:32 -07004725 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndexedIndirectCommand), buffer,
4726 offset, drawCount, stride, "vkCmdDrawIndexedIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004727
4728 // TODO: For now, we validate the whole index and vertex buffer. It might cause some false positive.
4729 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
4730 // We will validate the index and vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004731 skip |= cb_access_context->ValidateDrawVertexIndex(UINT32_MAX, 0, "vkCmdDrawIndexedIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004732 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004733}
4734
4735void SyncValidator::PreCallRecordCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4736 uint32_t drawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004737 StateTracker::PreCallRecordCmdDrawIndexedIndirect(commandBuffer, buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06004738 auto *cb_access_context = GetAccessContext(commandBuffer);
4739 assert(cb_access_context);
4740 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDEXEDINDIRECT);
4741 auto *context = cb_access_context->GetCurrentAccessContext();
4742 assert(context);
4743
locke-lunarg61870c22020-06-09 14:51:50 -06004744 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4745 cb_access_context->RecordDrawSubpassAttachment(tag);
4746 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndexedIndirectCommand), buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06004747
4748 // TODO: For now, we record the whole index and vertex buffer. It might cause some false positive.
4749 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
4750 // We will record the index and vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004751 cb_access_context->RecordDrawVertexIndex(UINT32_MAX, 0, tag);
locke-lunargff255f92020-05-13 18:53:52 -06004752}
4753
4754bool SyncValidator::ValidateCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4755 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4756 uint32_t stride, const char *function) const {
4757 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004758 const auto *cb_access_context = GetAccessContext(commandBuffer);
4759 assert(cb_access_context);
4760 if (!cb_access_context) return skip;
4761
4762 const auto *context = cb_access_context->GetCurrentAccessContext();
4763 assert(context);
4764 if (!context) return skip;
4765
locke-lunarg61870c22020-06-09 14:51:50 -06004766 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, function);
4767 skip |= cb_access_context->ValidateDrawSubpassAttachment(function);
John Zulauffaea0ee2021-01-14 14:01:32 -07004768 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndirectCommand), buffer, offset,
4769 maxDrawCount, stride, function);
4770 skip |= ValidateCountBuffer(*cb_access_context, *context, commandBuffer, countBuffer, countBufferOffset, function);
locke-lunargff255f92020-05-13 18:53:52 -06004771
4772 // TODO: For now, we validate the whole vertex buffer. It might cause some false positive.
4773 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
4774 // We will validate the vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004775 skip |= cb_access_context->ValidateDrawVertex(UINT32_MAX, 0, function);
locke-lunargff255f92020-05-13 18:53:52 -06004776 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004777}
4778
4779bool SyncValidator::PreCallValidateCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4780 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4781 uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004782 return ValidateCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4783 "vkCmdDrawIndirectCount");
locke-lunarge1a67022020-04-29 00:15:36 -06004784}
4785
sfricke-samsung85584a72021-09-30 21:43:38 -07004786void SyncValidator::RecordCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4787 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4788 uint32_t stride, CMD_TYPE cmd_type) {
locke-lunargff255f92020-05-13 18:53:52 -06004789 auto *cb_access_context = GetAccessContext(commandBuffer);
4790 assert(cb_access_context);
sfricke-samsung85584a72021-09-30 21:43:38 -07004791 const auto tag = cb_access_context->NextCommandTag(cmd_type);
locke-lunargff255f92020-05-13 18:53:52 -06004792 auto *context = cb_access_context->GetCurrentAccessContext();
4793 assert(context);
4794
locke-lunarg61870c22020-06-09 14:51:50 -06004795 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4796 cb_access_context->RecordDrawSubpassAttachment(tag);
4797 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndirectCommand), buffer, offset, 1, stride);
4798 RecordCountBuffer(*context, tag, countBuffer, countBufferOffset);
locke-lunargff255f92020-05-13 18:53:52 -06004799
4800 // TODO: For now, we record the whole vertex buffer. It might cause some false positive.
4801 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
4802 // We will record the vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004803 cb_access_context->RecordDrawVertex(UINT32_MAX, 0, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004804}
4805
sfricke-samsung85584a72021-09-30 21:43:38 -07004806void SyncValidator::PreCallRecordCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4807 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4808 uint32_t stride) {
4809 StateTracker::PreCallRecordCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount,
4810 stride);
4811 RecordCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4812 CMD_DRAWINDIRECTCOUNT);
4813}
locke-lunarge1a67022020-04-29 00:15:36 -06004814bool SyncValidator::PreCallValidateCmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4815 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4816 uint32_t maxDrawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004817 return ValidateCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4818 "vkCmdDrawIndirectCountKHR");
locke-lunarge1a67022020-04-29 00:15:36 -06004819}
4820
4821void SyncValidator::PreCallRecordCmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4822 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4823 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004824 StateTracker::PreCallRecordCmdDrawIndirectCountKHR(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount,
4825 stride);
sfricke-samsung85584a72021-09-30 21:43:38 -07004826 RecordCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4827 CMD_DRAWINDIRECTCOUNTKHR);
locke-lunarge1a67022020-04-29 00:15:36 -06004828}
4829
4830bool SyncValidator::PreCallValidateCmdDrawIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4831 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4832 uint32_t maxDrawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004833 return ValidateCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4834 "vkCmdDrawIndirectCountAMD");
locke-lunarge1a67022020-04-29 00:15:36 -06004835}
4836
4837void SyncValidator::PreCallRecordCmdDrawIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4838 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4839 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004840 StateTracker::PreCallRecordCmdDrawIndirectCountAMD(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount,
4841 stride);
sfricke-samsung85584a72021-09-30 21:43:38 -07004842 RecordCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4843 CMD_DRAWINDIRECTCOUNTAMD);
locke-lunargff255f92020-05-13 18:53:52 -06004844}
4845
4846bool SyncValidator::ValidateCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4847 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4848 uint32_t stride, const char *function) const {
4849 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004850 const auto *cb_access_context = GetAccessContext(commandBuffer);
4851 assert(cb_access_context);
4852 if (!cb_access_context) return skip;
4853
4854 const auto *context = cb_access_context->GetCurrentAccessContext();
4855 assert(context);
4856 if (!context) return skip;
4857
locke-lunarg61870c22020-06-09 14:51:50 -06004858 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, function);
4859 skip |= cb_access_context->ValidateDrawSubpassAttachment(function);
John Zulauffaea0ee2021-01-14 14:01:32 -07004860 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndexedIndirectCommand), buffer,
4861 offset, maxDrawCount, stride, function);
4862 skip |= ValidateCountBuffer(*cb_access_context, *context, commandBuffer, countBuffer, countBufferOffset, function);
locke-lunargff255f92020-05-13 18:53:52 -06004863
4864 // TODO: For now, we validate the whole index and vertex buffer. It might cause some false positive.
4865 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
4866 // We will validate the index and vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004867 skip |= cb_access_context->ValidateDrawVertexIndex(UINT32_MAX, 0, function);
locke-lunargff255f92020-05-13 18:53:52 -06004868 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004869}
4870
4871bool SyncValidator::PreCallValidateCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4872 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4873 uint32_t maxDrawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004874 return ValidateCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4875 "vkCmdDrawIndexedIndirectCount");
locke-lunarge1a67022020-04-29 00:15:36 -06004876}
4877
sfricke-samsung85584a72021-09-30 21:43:38 -07004878void SyncValidator::RecordCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4879 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4880 uint32_t stride, CMD_TYPE cmd_type) {
locke-lunargff255f92020-05-13 18:53:52 -06004881 auto *cb_access_context = GetAccessContext(commandBuffer);
4882 assert(cb_access_context);
sfricke-samsung85584a72021-09-30 21:43:38 -07004883 const auto tag = cb_access_context->NextCommandTag(cmd_type);
locke-lunargff255f92020-05-13 18:53:52 -06004884 auto *context = cb_access_context->GetCurrentAccessContext();
4885 assert(context);
4886
locke-lunarg61870c22020-06-09 14:51:50 -06004887 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4888 cb_access_context->RecordDrawSubpassAttachment(tag);
4889 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndexedIndirectCommand), buffer, offset, 1, stride);
4890 RecordCountBuffer(*context, tag, countBuffer, countBufferOffset);
locke-lunargff255f92020-05-13 18:53:52 -06004891
4892 // TODO: For now, we record the whole index and vertex buffer. It might cause some false positive.
4893 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
locke-lunarg61870c22020-06-09 14:51:50 -06004894 // We will update the index and vertex buffer in SubmitQueue in the future.
4895 cb_access_context->RecordDrawVertexIndex(UINT32_MAX, 0, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004896}
4897
sfricke-samsung85584a72021-09-30 21:43:38 -07004898void SyncValidator::PreCallRecordCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4899 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4900 uint32_t maxDrawCount, uint32_t stride) {
4901 StateTracker::PreCallRecordCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
4902 maxDrawCount, stride);
4903 RecordCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4904 CMD_DRAWINDEXEDINDIRECTCOUNT);
4905}
4906
locke-lunarge1a67022020-04-29 00:15:36 -06004907bool SyncValidator::PreCallValidateCmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer,
4908 VkDeviceSize offset, VkBuffer countBuffer,
4909 VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4910 uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004911 return ValidateCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4912 "vkCmdDrawIndexedIndirectCountKHR");
locke-lunarge1a67022020-04-29 00:15:36 -06004913}
4914
4915void SyncValidator::PreCallRecordCmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4916 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4917 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004918 StateTracker::PreCallRecordCmdDrawIndexedIndirectCountKHR(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
4919 maxDrawCount, stride);
sfricke-samsung85584a72021-09-30 21:43:38 -07004920 RecordCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4921 CMD_DRAWINDEXEDINDIRECTCOUNTKHR);
locke-lunarge1a67022020-04-29 00:15:36 -06004922}
4923
4924bool SyncValidator::PreCallValidateCmdDrawIndexedIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer,
4925 VkDeviceSize offset, VkBuffer countBuffer,
4926 VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4927 uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004928 return ValidateCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4929 "vkCmdDrawIndexedIndirectCountAMD");
locke-lunarge1a67022020-04-29 00:15:36 -06004930}
4931
4932void SyncValidator::PreCallRecordCmdDrawIndexedIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4933 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4934 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004935 StateTracker::PreCallRecordCmdDrawIndexedIndirectCountAMD(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
4936 maxDrawCount, stride);
sfricke-samsung85584a72021-09-30 21:43:38 -07004937 RecordCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4938 CMD_DRAWINDEXEDINDIRECTCOUNTAMD);
locke-lunarge1a67022020-04-29 00:15:36 -06004939}
4940
4941bool SyncValidator::PreCallValidateCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
4942 const VkClearColorValue *pColor, uint32_t rangeCount,
4943 const VkImageSubresourceRange *pRanges) const {
4944 bool skip = false;
4945 const auto *cb_access_context = GetAccessContext(commandBuffer);
4946 assert(cb_access_context);
4947 if (!cb_access_context) return skip;
4948
4949 const auto *context = cb_access_context->GetCurrentAccessContext();
4950 assert(context);
4951 if (!context) return skip;
4952
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004953 auto image_state = Get<IMAGE_STATE>(image);
locke-lunarge1a67022020-04-29 00:15:36 -06004954
4955 for (uint32_t index = 0; index < rangeCount; index++) {
4956 const auto &range = pRanges[index];
4957 if (image_state) {
John Zulauf110413c2021-03-20 05:38:38 -06004958 auto hazard = context->DetectHazard(*image_state, SYNC_CLEAR_TRANSFER_WRITE, range);
locke-lunarge1a67022020-04-29 00:15:36 -06004959 if (hazard.hazard) {
4960 skip |= LogError(image, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004961 "vkCmdClearColorImage: Hazard %s for %s, range index %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06004962 string_SyncHazard(hazard.hazard), report_data->FormatHandle(image).c_str(), index,
John Zulauffaea0ee2021-01-14 14:01:32 -07004963 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06004964 }
4965 }
4966 }
4967 return skip;
4968}
4969
4970void SyncValidator::PreCallRecordCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
4971 const VkClearColorValue *pColor, uint32_t rangeCount,
4972 const VkImageSubresourceRange *pRanges) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004973 StateTracker::PreCallRecordCmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
locke-lunarge1a67022020-04-29 00:15:36 -06004974 auto *cb_access_context = GetAccessContext(commandBuffer);
4975 assert(cb_access_context);
4976 const auto tag = cb_access_context->NextCommandTag(CMD_CLEARCOLORIMAGE);
4977 auto *context = cb_access_context->GetCurrentAccessContext();
4978 assert(context);
4979
Jeremy Gebbenf4449392022-01-28 10:09:10 -07004980 auto image_state = Get<IMAGE_STATE>(image);
locke-lunarge1a67022020-04-29 00:15:36 -06004981
4982 for (uint32_t index = 0; index < rangeCount; index++) {
4983 const auto &range = pRanges[index];
4984 if (image_state) {
John Zulauf110413c2021-03-20 05:38:38 -06004985 context->UpdateAccessState(*image_state, SYNC_CLEAR_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004986 }
4987 }
4988}
4989
4990bool SyncValidator::PreCallValidateCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image,
4991 VkImageLayout imageLayout,
4992 const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
4993 const VkImageSubresourceRange *pRanges) const {
4994 bool skip = false;
4995 const auto *cb_access_context = GetAccessContext(commandBuffer);
4996 assert(cb_access_context);
4997 if (!cb_access_context) return skip;
4998
4999 const auto *context = cb_access_context->GetCurrentAccessContext();
5000 assert(context);
5001 if (!context) return skip;
5002
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005003 auto image_state = Get<IMAGE_STATE>(image);
locke-lunarge1a67022020-04-29 00:15:36 -06005004
5005 for (uint32_t index = 0; index < rangeCount; index++) {
5006 const auto &range = pRanges[index];
5007 if (image_state) {
John Zulauf110413c2021-03-20 05:38:38 -06005008 auto hazard = context->DetectHazard(*image_state, SYNC_CLEAR_TRANSFER_WRITE, range);
locke-lunarge1a67022020-04-29 00:15:36 -06005009 if (hazard.hazard) {
5010 skip |= LogError(image, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06005011 "vkCmdClearDepthStencilImage: Hazard %s for %s, range index %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06005012 string_SyncHazard(hazard.hazard), report_data->FormatHandle(image).c_str(), index,
John Zulauffaea0ee2021-01-14 14:01:32 -07005013 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06005014 }
5015 }
5016 }
5017 return skip;
5018}
5019
5020void SyncValidator::PreCallRecordCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
5021 const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
5022 const VkImageSubresourceRange *pRanges) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005023 StateTracker::PreCallRecordCmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
locke-lunarge1a67022020-04-29 00:15:36 -06005024 auto *cb_access_context = GetAccessContext(commandBuffer);
5025 assert(cb_access_context);
5026 const auto tag = cb_access_context->NextCommandTag(CMD_CLEARDEPTHSTENCILIMAGE);
5027 auto *context = cb_access_context->GetCurrentAccessContext();
5028 assert(context);
5029
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005030 auto image_state = Get<IMAGE_STATE>(image);
locke-lunarge1a67022020-04-29 00:15:36 -06005031
5032 for (uint32_t index = 0; index < rangeCount; index++) {
5033 const auto &range = pRanges[index];
5034 if (image_state) {
John Zulauf110413c2021-03-20 05:38:38 -06005035 context->UpdateAccessState(*image_state, SYNC_CLEAR_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005036 }
5037 }
5038}
5039
5040bool SyncValidator::PreCallValidateCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool,
5041 uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer,
5042 VkDeviceSize dstOffset, VkDeviceSize stride,
5043 VkQueryResultFlags flags) const {
5044 bool skip = false;
5045 const auto *cb_access_context = GetAccessContext(commandBuffer);
5046 assert(cb_access_context);
5047 if (!cb_access_context) return skip;
5048
5049 const auto *context = cb_access_context->GetCurrentAccessContext();
5050 assert(context);
5051 if (!context) return skip;
5052
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005053 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
locke-lunarge1a67022020-04-29 00:15:36 -06005054
5055 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005056 const ResourceAccessRange range = MakeRange(dstOffset, stride * queryCount);
Jeremy Gebben40a22942020-12-22 14:22:06 -07005057 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
locke-lunarge1a67022020-04-29 00:15:36 -06005058 if (hazard.hazard) {
John Zulauf59e25072020-07-17 10:55:21 -06005059 skip |=
5060 LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
5061 "vkCmdCopyQueryPoolResults: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07005062 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06005063 }
5064 }
locke-lunargff255f92020-05-13 18:53:52 -06005065
5066 // TODO:Track VkQueryPool
locke-lunarge1a67022020-04-29 00:15:36 -06005067 return skip;
5068}
5069
5070void SyncValidator::PreCallRecordCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
5071 uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
5072 VkDeviceSize stride, VkQueryResultFlags flags) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005073 StateTracker::PreCallRecordCmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset,
5074 stride, flags);
locke-lunarge1a67022020-04-29 00:15:36 -06005075 auto *cb_access_context = GetAccessContext(commandBuffer);
5076 assert(cb_access_context);
locke-lunargff255f92020-05-13 18:53:52 -06005077 const auto tag = cb_access_context->NextCommandTag(CMD_COPYQUERYPOOLRESULTS);
locke-lunarge1a67022020-04-29 00:15:36 -06005078 auto *context = cb_access_context->GetCurrentAccessContext();
5079 assert(context);
5080
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005081 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
locke-lunarge1a67022020-04-29 00:15:36 -06005082
5083 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005084 const ResourceAccessRange range = MakeRange(dstOffset, stride * queryCount);
Jeremy Gebben40a22942020-12-22 14:22:06 -07005085 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005086 }
locke-lunargff255f92020-05-13 18:53:52 -06005087
5088 // TODO:Track VkQueryPool
locke-lunarge1a67022020-04-29 00:15:36 -06005089}
5090
5091bool SyncValidator::PreCallValidateCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
5092 VkDeviceSize size, uint32_t data) const {
5093 bool skip = false;
5094 const auto *cb_access_context = GetAccessContext(commandBuffer);
5095 assert(cb_access_context);
5096 if (!cb_access_context) return skip;
5097
5098 const auto *context = cb_access_context->GetCurrentAccessContext();
5099 assert(context);
5100 if (!context) return skip;
5101
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005102 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
locke-lunarge1a67022020-04-29 00:15:36 -06005103
5104 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005105 const ResourceAccessRange range = MakeRange(*dst_buffer, dstOffset, size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07005106 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
locke-lunarge1a67022020-04-29 00:15:36 -06005107 if (hazard.hazard) {
John Zulauf1dae9192020-06-16 15:46:44 -06005108 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06005109 "vkCmdFillBuffer: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07005110 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06005111 }
5112 }
5113 return skip;
5114}
5115
5116void SyncValidator::PreCallRecordCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
5117 VkDeviceSize size, uint32_t data) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005118 StateTracker::PreCallRecordCmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
locke-lunarge1a67022020-04-29 00:15:36 -06005119 auto *cb_access_context = GetAccessContext(commandBuffer);
5120 assert(cb_access_context);
5121 const auto tag = cb_access_context->NextCommandTag(CMD_FILLBUFFER);
5122 auto *context = cb_access_context->GetCurrentAccessContext();
5123 assert(context);
5124
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005125 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
locke-lunarge1a67022020-04-29 00:15:36 -06005126
5127 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005128 const ResourceAccessRange range = MakeRange(*dst_buffer, dstOffset, size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07005129 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005130 }
5131}
5132
5133bool SyncValidator::PreCallValidateCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
5134 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
5135 const VkImageResolve *pRegions) const {
5136 bool skip = false;
5137 const auto *cb_access_context = GetAccessContext(commandBuffer);
5138 assert(cb_access_context);
5139 if (!cb_access_context) return skip;
5140
5141 const auto *context = cb_access_context->GetCurrentAccessContext();
5142 assert(context);
5143 if (!context) return skip;
5144
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005145 auto src_image = Get<IMAGE_STATE>(srcImage);
5146 auto dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarge1a67022020-04-29 00:15:36 -06005147
5148 for (uint32_t region = 0; region < regionCount; region++) {
5149 const auto &resolve_region = pRegions[region];
5150 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07005151 auto hazard = context->DetectHazard(*src_image, SYNC_RESOLVE_TRANSFER_READ, resolve_region.srcSubresource,
locke-lunarge1a67022020-04-29 00:15:36 -06005152 resolve_region.srcOffset, resolve_region.extent);
5153 if (hazard.hazard) {
5154 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06005155 "vkCmdResolveImage: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06005156 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07005157 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06005158 }
5159 }
5160
5161 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07005162 auto hazard = context->DetectHazard(*dst_image, SYNC_RESOLVE_TRANSFER_WRITE, resolve_region.dstSubresource,
locke-lunarge1a67022020-04-29 00:15:36 -06005163 resolve_region.dstOffset, resolve_region.extent);
5164 if (hazard.hazard) {
5165 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06005166 "vkCmdResolveImage: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06005167 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07005168 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06005169 }
5170 if (skip) break;
5171 }
5172 }
5173
5174 return skip;
5175}
5176
5177void SyncValidator::PreCallRecordCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
5178 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
5179 const VkImageResolve *pRegions) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005180 StateTracker::PreCallRecordCmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
5181 pRegions);
locke-lunarge1a67022020-04-29 00:15:36 -06005182 auto *cb_access_context = GetAccessContext(commandBuffer);
5183 assert(cb_access_context);
5184 const auto tag = cb_access_context->NextCommandTag(CMD_RESOLVEIMAGE);
5185 auto *context = cb_access_context->GetCurrentAccessContext();
5186 assert(context);
5187
Jeremy Gebben9f537102021-10-05 16:37:12 -06005188 auto src_image = Get<IMAGE_STATE>(srcImage);
5189 auto dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarge1a67022020-04-29 00:15:36 -06005190
5191 for (uint32_t region = 0; region < regionCount; region++) {
5192 const auto &resolve_region = pRegions[region];
5193 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07005194 context->UpdateAccessState(*src_image, SYNC_RESOLVE_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07005195 resolve_region.srcSubresource, resolve_region.srcOffset, resolve_region.extent, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005196 }
5197 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07005198 context->UpdateAccessState(*dst_image, SYNC_RESOLVE_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07005199 resolve_region.dstSubresource, resolve_region.dstOffset, resolve_region.extent, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005200 }
5201 }
5202}
5203
Tony-LunarG562fc102021-11-12 13:58:35 -07005204bool SyncValidator::ValidateCmdResolveImage2(VkCommandBuffer commandBuffer, const VkResolveImageInfo2KHR *pResolveImageInfo,
5205 CMD_TYPE cmd_type) const {
Jeff Leger178b1e52020-10-05 12:22:23 -04005206 bool skip = false;
5207 const auto *cb_access_context = GetAccessContext(commandBuffer);
5208 assert(cb_access_context);
5209 if (!cb_access_context) return skip;
5210
5211 const auto *context = cb_access_context->GetCurrentAccessContext();
5212 assert(context);
5213 if (!context) return skip;
5214
Tony-LunarG562fc102021-11-12 13:58:35 -07005215 const char *func_name = CommandTypeString(cmd_type);
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005216 auto src_image = Get<IMAGE_STATE>(pResolveImageInfo->srcImage);
5217 auto dst_image = Get<IMAGE_STATE>(pResolveImageInfo->dstImage);
Jeff Leger178b1e52020-10-05 12:22:23 -04005218
5219 for (uint32_t region = 0; region < pResolveImageInfo->regionCount; region++) {
5220 const auto &resolve_region = pResolveImageInfo->pRegions[region];
5221 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07005222 auto hazard = context->DetectHazard(*src_image, SYNC_RESOLVE_TRANSFER_READ, resolve_region.srcSubresource,
Jeff Leger178b1e52020-10-05 12:22:23 -04005223 resolve_region.srcOffset, resolve_region.extent);
5224 if (hazard.hazard) {
5225 skip |= LogError(pResolveImageInfo->srcImage, string_SyncHazardVUID(hazard.hazard),
Tony-LunarG562fc102021-11-12 13:58:35 -07005226 "%s: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.", func_name,
Jeff Leger178b1e52020-10-05 12:22:23 -04005227 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pResolveImageInfo->srcImage).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07005228 region, cb_access_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04005229 }
5230 }
5231
5232 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07005233 auto hazard = context->DetectHazard(*dst_image, SYNC_RESOLVE_TRANSFER_WRITE, resolve_region.dstSubresource,
Jeff Leger178b1e52020-10-05 12:22:23 -04005234 resolve_region.dstOffset, resolve_region.extent);
5235 if (hazard.hazard) {
5236 skip |= LogError(pResolveImageInfo->dstImage, string_SyncHazardVUID(hazard.hazard),
Tony-LunarG562fc102021-11-12 13:58:35 -07005237 "%s: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.", func_name,
Jeff Leger178b1e52020-10-05 12:22:23 -04005238 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pResolveImageInfo->dstImage).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07005239 region, cb_access_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04005240 }
5241 if (skip) break;
5242 }
5243 }
5244
5245 return skip;
5246}
5247
Tony-LunarG562fc102021-11-12 13:58:35 -07005248bool SyncValidator::PreCallValidateCmdResolveImage2KHR(VkCommandBuffer commandBuffer,
5249 const VkResolveImageInfo2KHR *pResolveImageInfo) const {
5250 return ValidateCmdResolveImage2(commandBuffer, pResolveImageInfo, CMD_RESOLVEIMAGE2KHR);
5251}
5252
5253bool SyncValidator::PreCallValidateCmdResolveImage2(VkCommandBuffer commandBuffer,
5254 const VkResolveImageInfo2 *pResolveImageInfo) const {
5255 return ValidateCmdResolveImage2(commandBuffer, pResolveImageInfo, CMD_RESOLVEIMAGE2);
5256}
5257
5258void SyncValidator::RecordCmdResolveImage2(VkCommandBuffer commandBuffer, const VkResolveImageInfo2KHR *pResolveImageInfo,
5259 CMD_TYPE cmd_type) {
Jeff Leger178b1e52020-10-05 12:22:23 -04005260 StateTracker::PreCallRecordCmdResolveImage2KHR(commandBuffer, pResolveImageInfo);
5261 auto *cb_access_context = GetAccessContext(commandBuffer);
5262 assert(cb_access_context);
Tony-LunarG562fc102021-11-12 13:58:35 -07005263 const auto tag = cb_access_context->NextCommandTag(cmd_type);
Jeff Leger178b1e52020-10-05 12:22:23 -04005264 auto *context = cb_access_context->GetCurrentAccessContext();
5265 assert(context);
5266
Jeremy Gebben9f537102021-10-05 16:37:12 -06005267 auto src_image = Get<IMAGE_STATE>(pResolveImageInfo->srcImage);
5268 auto dst_image = Get<IMAGE_STATE>(pResolveImageInfo->dstImage);
Jeff Leger178b1e52020-10-05 12:22:23 -04005269
5270 for (uint32_t region = 0; region < pResolveImageInfo->regionCount; region++) {
5271 const auto &resolve_region = pResolveImageInfo->pRegions[region];
5272 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07005273 context->UpdateAccessState(*src_image, SYNC_RESOLVE_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07005274 resolve_region.srcSubresource, resolve_region.srcOffset, resolve_region.extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04005275 }
5276 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07005277 context->UpdateAccessState(*dst_image, SYNC_RESOLVE_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07005278 resolve_region.dstSubresource, resolve_region.dstOffset, resolve_region.extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04005279 }
5280 }
5281}
5282
Tony-LunarG562fc102021-11-12 13:58:35 -07005283void SyncValidator::PreCallRecordCmdResolveImage2KHR(VkCommandBuffer commandBuffer,
5284 const VkResolveImageInfo2KHR *pResolveImageInfo) {
5285 RecordCmdResolveImage2(commandBuffer, pResolveImageInfo, CMD_RESOLVEIMAGE2KHR);
5286}
5287
5288void SyncValidator::PreCallRecordCmdResolveImage2(VkCommandBuffer commandBuffer, const VkResolveImageInfo2 *pResolveImageInfo) {
5289 RecordCmdResolveImage2(commandBuffer, pResolveImageInfo, CMD_RESOLVEIMAGE2);
5290}
5291
locke-lunarge1a67022020-04-29 00:15:36 -06005292bool SyncValidator::PreCallValidateCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
5293 VkDeviceSize dataSize, const void *pData) const {
5294 bool skip = false;
5295 const auto *cb_access_context = GetAccessContext(commandBuffer);
5296 assert(cb_access_context);
5297 if (!cb_access_context) return skip;
5298
5299 const auto *context = cb_access_context->GetCurrentAccessContext();
5300 assert(context);
5301 if (!context) return skip;
5302
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005303 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
locke-lunarge1a67022020-04-29 00:15:36 -06005304
5305 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005306 // VK_WHOLE_SIZE not allowed
5307 const ResourceAccessRange range = MakeRange(dstOffset, dataSize);
Jeremy Gebben40a22942020-12-22 14:22:06 -07005308 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
locke-lunarge1a67022020-04-29 00:15:36 -06005309 if (hazard.hazard) {
John Zulauf1dae9192020-06-16 15:46:44 -06005310 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06005311 "vkCmdUpdateBuffer: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07005312 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06005313 }
5314 }
5315 return skip;
5316}
5317
5318void SyncValidator::PreCallRecordCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
5319 VkDeviceSize dataSize, const void *pData) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005320 StateTracker::PreCallRecordCmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
locke-lunarge1a67022020-04-29 00:15:36 -06005321 auto *cb_access_context = GetAccessContext(commandBuffer);
5322 assert(cb_access_context);
5323 const auto tag = cb_access_context->NextCommandTag(CMD_UPDATEBUFFER);
5324 auto *context = cb_access_context->GetCurrentAccessContext();
5325 assert(context);
5326
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005327 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
locke-lunarge1a67022020-04-29 00:15:36 -06005328
5329 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005330 // VK_WHOLE_SIZE not allowed
5331 const ResourceAccessRange range = MakeRange(dstOffset, dataSize);
Jeremy Gebben40a22942020-12-22 14:22:06 -07005332 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005333 }
5334}
locke-lunargff255f92020-05-13 18:53:52 -06005335
5336bool SyncValidator::PreCallValidateCmdWriteBufferMarkerAMD(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
5337 VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) const {
5338 bool skip = false;
5339 const auto *cb_access_context = GetAccessContext(commandBuffer);
5340 assert(cb_access_context);
5341 if (!cb_access_context) return skip;
5342
5343 const auto *context = cb_access_context->GetCurrentAccessContext();
5344 assert(context);
5345 if (!context) return skip;
5346
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005347 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
locke-lunargff255f92020-05-13 18:53:52 -06005348
5349 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005350 const ResourceAccessRange range = MakeRange(dstOffset, 4);
Jeremy Gebben40a22942020-12-22 14:22:06 -07005351 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
locke-lunargff255f92020-05-13 18:53:52 -06005352 if (hazard.hazard) {
John Zulauf59e25072020-07-17 10:55:21 -06005353 skip |=
5354 LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
5355 "vkCmdWriteBufferMarkerAMD: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07005356 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatUsage(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06005357 }
5358 }
5359 return skip;
5360}
5361
5362void SyncValidator::PreCallRecordCmdWriteBufferMarkerAMD(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
5363 VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005364 StateTracker::PreCallRecordCmdWriteBufferMarkerAMD(commandBuffer, pipelineStage, dstBuffer, dstOffset, marker);
locke-lunargff255f92020-05-13 18:53:52 -06005365 auto *cb_access_context = GetAccessContext(commandBuffer);
5366 assert(cb_access_context);
5367 const auto tag = cb_access_context->NextCommandTag(CMD_WRITEBUFFERMARKERAMD);
5368 auto *context = cb_access_context->GetCurrentAccessContext();
5369 assert(context);
5370
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005371 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
locke-lunargff255f92020-05-13 18:53:52 -06005372
5373 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005374 const ResourceAccessRange range = MakeRange(dstOffset, 4);
Jeremy Gebben40a22942020-12-22 14:22:06 -07005375 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunargff255f92020-05-13 18:53:52 -06005376 }
5377}
John Zulauf49beb112020-11-04 16:06:31 -07005378
5379bool SyncValidator::PreCallValidateCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) const {
5380 bool skip = false;
5381 const auto *cb_context = GetAccessContext(commandBuffer);
5382 assert(cb_context);
5383 if (!cb_context) return skip;
5384
John Zulauf36ef9282021-02-02 11:47:24 -07005385 SyncOpSetEvent set_event_op(CMD_SETEVENT, *this, cb_context->GetQueueFlags(), event, stageMask);
John Zulauf6ce24372021-01-30 05:56:25 -07005386 return set_event_op.Validate(*cb_context);
John Zulauf49beb112020-11-04 16:06:31 -07005387}
5388
5389void SyncValidator::PostCallRecordCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
5390 StateTracker::PostCallRecordCmdSetEvent(commandBuffer, event, stageMask);
5391 auto *cb_context = GetAccessContext(commandBuffer);
5392 assert(cb_context);
5393 if (!cb_context) return;
John Zulauf1bf30522021-09-03 15:39:06 -06005394 cb_context->RecordSyncOp<SyncOpSetEvent>(CMD_SETEVENT, *this, cb_context->GetQueueFlags(), event, stageMask);
John Zulauf49beb112020-11-04 16:06:31 -07005395}
5396
John Zulauf4edde622021-02-15 08:54:50 -07005397bool SyncValidator::PreCallValidateCmdSetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event,
5398 const VkDependencyInfoKHR *pDependencyInfo) const {
5399 bool skip = false;
5400 const auto *cb_context = GetAccessContext(commandBuffer);
5401 assert(cb_context);
5402 if (!cb_context || !pDependencyInfo) return skip;
5403
5404 SyncOpSetEvent set_event_op(CMD_SETEVENT2KHR, *this, cb_context->GetQueueFlags(), event, *pDependencyInfo);
5405 return set_event_op.Validate(*cb_context);
5406}
5407
Tony-LunarGc43525f2021-11-15 16:12:38 -07005408bool SyncValidator::PreCallValidateCmdSetEvent2(VkCommandBuffer commandBuffer, VkEvent event,
5409 const VkDependencyInfo *pDependencyInfo) const {
5410 bool skip = false;
5411 const auto *cb_context = GetAccessContext(commandBuffer);
5412 assert(cb_context);
5413 if (!cb_context || !pDependencyInfo) return skip;
5414
5415 SyncOpSetEvent set_event_op(CMD_SETEVENT2, *this, cb_context->GetQueueFlags(), event, *pDependencyInfo);
5416 return set_event_op.Validate(*cb_context);
5417}
5418
John Zulauf4edde622021-02-15 08:54:50 -07005419void SyncValidator::PostCallRecordCmdSetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event,
5420 const VkDependencyInfoKHR *pDependencyInfo) {
5421 StateTracker::PostCallRecordCmdSetEvent2KHR(commandBuffer, event, pDependencyInfo);
5422 auto *cb_context = GetAccessContext(commandBuffer);
5423 assert(cb_context);
5424 if (!cb_context || !pDependencyInfo) return;
5425
John Zulauf1bf30522021-09-03 15:39:06 -06005426 cb_context->RecordSyncOp<SyncOpSetEvent>(CMD_SETEVENT2KHR, *this, cb_context->GetQueueFlags(), event, *pDependencyInfo);
John Zulauf4edde622021-02-15 08:54:50 -07005427}
5428
Tony-LunarGc43525f2021-11-15 16:12:38 -07005429void SyncValidator::PostCallRecordCmdSetEvent2(VkCommandBuffer commandBuffer, VkEvent event,
5430 const VkDependencyInfo *pDependencyInfo) {
5431 StateTracker::PostCallRecordCmdSetEvent2(commandBuffer, event, pDependencyInfo);
5432 auto *cb_context = GetAccessContext(commandBuffer);
5433 assert(cb_context);
5434 if (!cb_context || !pDependencyInfo) return;
5435
5436 cb_context->RecordSyncOp<SyncOpSetEvent>(CMD_SETEVENT2, *this, cb_context->GetQueueFlags(), event, *pDependencyInfo);
5437}
5438
John Zulauf49beb112020-11-04 16:06:31 -07005439bool SyncValidator::PreCallValidateCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event,
5440 VkPipelineStageFlags stageMask) const {
5441 bool skip = false;
5442 const auto *cb_context = GetAccessContext(commandBuffer);
5443 assert(cb_context);
5444 if (!cb_context) return skip;
5445
John Zulauf36ef9282021-02-02 11:47:24 -07005446 SyncOpResetEvent reset_event_op(CMD_RESETEVENT, *this, cb_context->GetQueueFlags(), event, stageMask);
John Zulauf6ce24372021-01-30 05:56:25 -07005447 return reset_event_op.Validate(*cb_context);
John Zulauf49beb112020-11-04 16:06:31 -07005448}
5449
5450void SyncValidator::PostCallRecordCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
5451 StateTracker::PostCallRecordCmdResetEvent(commandBuffer, event, stageMask);
5452 auto *cb_context = GetAccessContext(commandBuffer);
5453 assert(cb_context);
5454 if (!cb_context) return;
5455
John Zulauf1bf30522021-09-03 15:39:06 -06005456 cb_context->RecordSyncOp<SyncOpResetEvent>(CMD_RESETEVENT, *this, cb_context->GetQueueFlags(), event, stageMask);
John Zulauf49beb112020-11-04 16:06:31 -07005457}
5458
John Zulauf4edde622021-02-15 08:54:50 -07005459bool SyncValidator::PreCallValidateCmdResetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event,
5460 VkPipelineStageFlags2KHR stageMask) const {
5461 bool skip = false;
5462 const auto *cb_context = GetAccessContext(commandBuffer);
5463 assert(cb_context);
5464 if (!cb_context) return skip;
5465
5466 SyncOpResetEvent reset_event_op(CMD_RESETEVENT2KHR, *this, cb_context->GetQueueFlags(), event, stageMask);
5467 return reset_event_op.Validate(*cb_context);
5468}
5469
Tony-LunarGa2662db2021-11-16 07:26:24 -07005470bool SyncValidator::PreCallValidateCmdResetEvent2(VkCommandBuffer commandBuffer, VkEvent event,
5471 VkPipelineStageFlags2 stageMask) const {
5472 bool skip = false;
5473 const auto *cb_context = GetAccessContext(commandBuffer);
5474 assert(cb_context);
5475 if (!cb_context) return skip;
5476
5477 SyncOpResetEvent reset_event_op(CMD_RESETEVENT2, *this, cb_context->GetQueueFlags(), event, stageMask);
5478 return reset_event_op.Validate(*cb_context);
5479}
5480
John Zulauf4edde622021-02-15 08:54:50 -07005481void SyncValidator::PostCallRecordCmdResetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event,
5482 VkPipelineStageFlags2KHR stageMask) {
5483 StateTracker::PostCallRecordCmdResetEvent2KHR(commandBuffer, event, stageMask);
5484 auto *cb_context = GetAccessContext(commandBuffer);
5485 assert(cb_context);
5486 if (!cb_context) return;
5487
John Zulauf1bf30522021-09-03 15:39:06 -06005488 cb_context->RecordSyncOp<SyncOpResetEvent>(CMD_RESETEVENT2KHR, *this, cb_context->GetQueueFlags(), event, stageMask);
John Zulauf4edde622021-02-15 08:54:50 -07005489}
5490
Tony-LunarGa2662db2021-11-16 07:26:24 -07005491void SyncValidator::PostCallRecordCmdResetEvent2(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags2 stageMask) {
5492 StateTracker::PostCallRecordCmdResetEvent2(commandBuffer, event, stageMask);
5493 auto *cb_context = GetAccessContext(commandBuffer);
5494 assert(cb_context);
5495 if (!cb_context) return;
5496
5497 cb_context->RecordSyncOp<SyncOpResetEvent>(CMD_RESETEVENT2, *this, cb_context->GetQueueFlags(), event, stageMask);
5498}
5499
John Zulauf49beb112020-11-04 16:06:31 -07005500bool SyncValidator::PreCallValidateCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
5501 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
5502 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
5503 uint32_t bufferMemoryBarrierCount,
5504 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
5505 uint32_t imageMemoryBarrierCount,
5506 const VkImageMemoryBarrier *pImageMemoryBarriers) const {
5507 bool skip = false;
5508 const auto *cb_context = GetAccessContext(commandBuffer);
5509 assert(cb_context);
5510 if (!cb_context) return skip;
5511
John Zulauf36ef9282021-02-02 11:47:24 -07005512 SyncOpWaitEvents wait_events_op(CMD_WAITEVENTS, *this, cb_context->GetQueueFlags(), eventCount, pEvents, srcStageMask,
5513 dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
5514 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
John Zulaufd5115702021-01-18 12:34:33 -07005515 return wait_events_op.Validate(*cb_context);
John Zulauf49beb112020-11-04 16:06:31 -07005516}
5517
5518void SyncValidator::PostCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
5519 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
5520 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
5521 uint32_t bufferMemoryBarrierCount,
5522 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
5523 uint32_t imageMemoryBarrierCount,
5524 const VkImageMemoryBarrier *pImageMemoryBarriers) {
5525 StateTracker::PostCallRecordCmdWaitEvents(commandBuffer, eventCount, pEvents, srcStageMask, dstStageMask, memoryBarrierCount,
5526 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
5527 imageMemoryBarrierCount, pImageMemoryBarriers);
5528
5529 auto *cb_context = GetAccessContext(commandBuffer);
5530 assert(cb_context);
5531 if (!cb_context) return;
5532
John Zulauf1bf30522021-09-03 15:39:06 -06005533 cb_context->RecordSyncOp<SyncOpWaitEvents>(
John Zulauf610e28c2021-08-03 17:46:23 -06005534 CMD_WAITEVENTS, *this, cb_context->GetQueueFlags(), eventCount, pEvents, srcStageMask, dstStageMask, memoryBarrierCount,
John Zulauf1bf30522021-09-03 15:39:06 -06005535 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
John Zulauf4a6105a2020-11-17 15:11:05 -07005536}
5537
John Zulauf4edde622021-02-15 08:54:50 -07005538bool SyncValidator::PreCallValidateCmdWaitEvents2KHR(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
5539 const VkDependencyInfoKHR *pDependencyInfos) const {
5540 bool skip = false;
5541 const auto *cb_context = GetAccessContext(commandBuffer);
5542 assert(cb_context);
5543 if (!cb_context) return skip;
5544
5545 SyncOpWaitEvents wait_events_op(CMD_WAITEVENTS2KHR, *this, cb_context->GetQueueFlags(), eventCount, pEvents, pDependencyInfos);
5546 skip |= wait_events_op.Validate(*cb_context);
5547 return skip;
5548}
5549
5550void SyncValidator::PostCallRecordCmdWaitEvents2KHR(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
5551 const VkDependencyInfoKHR *pDependencyInfos) {
5552 StateTracker::PostCallRecordCmdWaitEvents2KHR(commandBuffer, eventCount, pEvents, pDependencyInfos);
5553
5554 auto *cb_context = GetAccessContext(commandBuffer);
5555 assert(cb_context);
5556 if (!cb_context) return;
5557
John Zulauf1bf30522021-09-03 15:39:06 -06005558 cb_context->RecordSyncOp<SyncOpWaitEvents>(CMD_WAITEVENTS2KHR, *this, cb_context->GetQueueFlags(), eventCount, pEvents,
5559 pDependencyInfos);
John Zulauf4edde622021-02-15 08:54:50 -07005560}
5561
Tony-LunarG1364cf52021-11-17 16:10:11 -07005562bool SyncValidator::PreCallValidateCmdWaitEvents2(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
5563 const VkDependencyInfo *pDependencyInfos) const {
5564 bool skip = false;
5565 const auto *cb_context = GetAccessContext(commandBuffer);
5566 assert(cb_context);
5567 if (!cb_context) return skip;
5568
5569 SyncOpWaitEvents wait_events_op(CMD_WAITEVENTS2, *this, cb_context->GetQueueFlags(), eventCount, pEvents, pDependencyInfos);
5570 skip |= wait_events_op.Validate(*cb_context);
5571 return skip;
5572}
5573
5574void SyncValidator::PostCallRecordCmdWaitEvents2(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
5575 const VkDependencyInfo *pDependencyInfos) {
5576 StateTracker::PostCallRecordCmdWaitEvents2KHR(commandBuffer, eventCount, pEvents, pDependencyInfos);
5577
5578 auto *cb_context = GetAccessContext(commandBuffer);
5579 assert(cb_context);
5580 if (!cb_context) return;
5581
5582 cb_context->RecordSyncOp<SyncOpWaitEvents>(CMD_WAITEVENTS2, *this, cb_context->GetQueueFlags(), eventCount, pEvents,
5583 pDependencyInfos);
5584}
5585
John Zulauf4a6105a2020-11-17 15:11:05 -07005586void SyncEventState::ResetFirstScope() {
5587 for (const auto address_type : kAddressTypes) {
5588 first_scope[static_cast<size_t>(address_type)].clear();
5589 }
Jeremy Gebben9893daf2021-01-04 10:40:50 -07005590 scope = SyncExecScope();
John Zulauf78b1f892021-09-20 15:02:09 -06005591 first_scope_set = false;
5592 first_scope_tag = 0;
John Zulauf4a6105a2020-11-17 15:11:05 -07005593}
5594
5595// Keep the "ignore this event" logic in same place for ValidateWait and RecordWait to use
John Zulauf4edde622021-02-15 08:54:50 -07005596SyncEventState::IgnoreReason SyncEventState::IsIgnoredByWait(CMD_TYPE cmd, VkPipelineStageFlags2KHR srcStageMask) const {
John Zulauf4a6105a2020-11-17 15:11:05 -07005597 IgnoreReason reason = NotIgnored;
5598
Tony-LunarG1364cf52021-11-17 16:10:11 -07005599 if ((CMD_WAITEVENTS2KHR == cmd || CMD_WAITEVENTS2 == cmd) && (CMD_SETEVENT == last_command)) {
John Zulauf4edde622021-02-15 08:54:50 -07005600 reason = SetVsWait2;
5601 } else if ((last_command == CMD_RESETEVENT || last_command == CMD_RESETEVENT2KHR) && !HasBarrier(0U, 0U)) {
5602 reason = (last_command == CMD_RESETEVENT) ? ResetWaitRace : Reset2WaitRace;
John Zulauf4a6105a2020-11-17 15:11:05 -07005603 } else if (unsynchronized_set) {
5604 reason = SetRace;
John Zulauf78b1f892021-09-20 15:02:09 -06005605 } else if (first_scope_set) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07005606 const VkPipelineStageFlags2KHR missing_bits = scope.mask_param & ~srcStageMask;
John Zulauf4a6105a2020-11-17 15:11:05 -07005607 if (missing_bits) reason = MissingStageBits;
5608 }
5609
5610 return reason;
5611}
5612
Jeremy Gebben40a22942020-12-22 14:22:06 -07005613bool SyncEventState::HasBarrier(VkPipelineStageFlags2KHR stageMask, VkPipelineStageFlags2KHR exec_scope_arg) const {
John Zulauf4a6105a2020-11-17 15:11:05 -07005614 bool has_barrier = (last_command == CMD_NONE) || (stageMask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) ||
5615 (barriers & exec_scope_arg) || (barriers & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
5616 return has_barrier;
John Zulauf49beb112020-11-04 16:06:31 -07005617}
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005618
John Zulaufbb890452021-12-14 11:30:18 -07005619void SyncOpBase::SetReplayContext(uint32_t subpass, ReplayContextPtr &&replay) {
5620 subpass_ = subpass;
5621 replay_context_ = std::move(replay);
5622}
5623
5624const ReplayTrackbackBarriersAction *SyncOpBase::GetReplayTrackback() const {
5625 if (replay_context_) {
5626 assert(subpass_ < replay_context_->subpass_contexts.size());
5627 return &replay_context_->subpass_contexts[subpass_];
5628 }
5629 return nullptr;
5630}
5631
John Zulauf36ef9282021-02-02 11:47:24 -07005632SyncOpBarriers::SyncOpBarriers(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags,
5633 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
5634 VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount,
John Zulaufd5115702021-01-18 12:34:33 -07005635 const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount,
5636 const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount,
5637 const VkImageMemoryBarrier *pImageMemoryBarriers)
John Zulauf4edde622021-02-15 08:54:50 -07005638 : SyncOpBase(cmd), barriers_(1) {
5639 auto &barrier_set = barriers_[0];
5640 barrier_set.dependency_flags = dependencyFlags;
5641 barrier_set.src_exec_scope = SyncExecScope::MakeSrc(queue_flags, srcStageMask);
5642 barrier_set.dst_exec_scope = SyncExecScope::MakeDst(queue_flags, dstStageMask);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005643 // Translate the API parameters into structures SyncVal understands directly, and dehandle for safer/faster replay.
John Zulauf4edde622021-02-15 08:54:50 -07005644 barrier_set.MakeMemoryBarriers(barrier_set.src_exec_scope, barrier_set.dst_exec_scope, dependencyFlags, memoryBarrierCount,
5645 pMemoryBarriers);
5646 barrier_set.MakeBufferMemoryBarriers(sync_state, barrier_set.src_exec_scope, barrier_set.dst_exec_scope, dependencyFlags,
5647 bufferMemoryBarrierCount, pBufferMemoryBarriers);
5648 barrier_set.MakeImageMemoryBarriers(sync_state, barrier_set.src_exec_scope, barrier_set.dst_exec_scope, dependencyFlags,
5649 imageMemoryBarrierCount, pImageMemoryBarriers);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005650}
5651
John Zulauf4edde622021-02-15 08:54:50 -07005652SyncOpBarriers::SyncOpBarriers(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags, uint32_t event_count,
5653 const VkDependencyInfoKHR *dep_infos)
5654 : SyncOpBase(cmd), barriers_(event_count) {
5655 for (uint32_t i = 0; i < event_count; i++) {
5656 const auto &dep_info = dep_infos[i];
5657 auto &barrier_set = barriers_[i];
5658 barrier_set.dependency_flags = dep_info.dependencyFlags;
5659 auto stage_masks = sync_utils::GetGlobalStageMasks(dep_info);
5660 barrier_set.src_exec_scope = SyncExecScope::MakeSrc(queue_flags, stage_masks.src);
5661 barrier_set.dst_exec_scope = SyncExecScope::MakeDst(queue_flags, stage_masks.dst);
5662 // Translate the API parameters into structures SyncVal understands directly, and dehandle for safer/faster replay.
5663 barrier_set.MakeMemoryBarriers(queue_flags, dep_info.dependencyFlags, dep_info.memoryBarrierCount,
5664 dep_info.pMemoryBarriers);
5665 barrier_set.MakeBufferMemoryBarriers(sync_state, queue_flags, dep_info.dependencyFlags, dep_info.bufferMemoryBarrierCount,
5666 dep_info.pBufferMemoryBarriers);
5667 barrier_set.MakeImageMemoryBarriers(sync_state, queue_flags, dep_info.dependencyFlags, dep_info.imageMemoryBarrierCount,
5668 dep_info.pImageMemoryBarriers);
5669 }
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005670}
5671
John Zulauf36ef9282021-02-02 11:47:24 -07005672SyncOpPipelineBarrier::SyncOpPipelineBarrier(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags,
John Zulaufd5115702021-01-18 12:34:33 -07005673 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
5674 VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount,
5675 const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount,
5676 const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount,
5677 const VkImageMemoryBarrier *pImageMemoryBarriers)
John Zulauf36ef9282021-02-02 11:47:24 -07005678 : SyncOpBarriers(cmd, sync_state, queue_flags, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers,
John Zulaufd5115702021-01-18 12:34:33 -07005679 bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers) {}
5680
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005681SyncOpPipelineBarrier::SyncOpPipelineBarrier(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags,
5682 const VkDependencyInfoKHR &dep_info)
John Zulauf4edde622021-02-15 08:54:50 -07005683 : SyncOpBarriers(cmd, sync_state, queue_flags, 1, &dep_info) {}
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005684
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005685bool SyncOpPipelineBarrier::Validate(const CommandBufferAccessContext &cb_context) const {
5686 bool skip = false;
5687 const auto *context = cb_context.GetCurrentAccessContext();
5688 assert(context);
5689 if (!context) return skip;
John Zulauf6fdf3d02021-03-05 16:50:47 -07005690 assert(barriers_.size() == 1); // PipelineBarriers only support a single barrier set.
5691
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005692 // Validate Image Layout transitions
John Zulauf6fdf3d02021-03-05 16:50:47 -07005693 const auto &barrier_set = barriers_[0];
5694 for (const auto &image_barrier : barrier_set.image_memory_barriers) {
5695 if (image_barrier.new_layout == image_barrier.old_layout) continue; // Only interested in layout transitions at this point.
5696 const auto *image_state = image_barrier.image.get();
5697 if (!image_state) continue;
5698 const auto hazard = context->DetectImageBarrierHazard(image_barrier);
5699 if (hazard.hazard) {
5700 // PHASE1 TODO -- add tag information to log msg when useful.
5701 const auto &sync_state = cb_context.GetSyncState();
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06005702 const auto image_handle = image_state->image();
John Zulauf6fdf3d02021-03-05 16:50:47 -07005703 skip |= sync_state.LogError(image_handle, string_SyncHazardVUID(hazard.hazard),
5704 "%s: Hazard %s for image barrier %" PRIu32 " %s. Access info %s.", CmdName(),
5705 string_SyncHazard(hazard.hazard), image_barrier.index,
5706 sync_state.report_data->FormatHandle(image_handle).c_str(),
5707 cb_context.FormatUsage(hazard).c_str());
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005708 }
5709 }
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005710 return skip;
5711}
5712
John Zulaufd5115702021-01-18 12:34:33 -07005713struct SyncOpPipelineBarrierFunctorFactory {
5714 using BarrierOpFunctor = PipelineBarrierOp;
5715 using ApplyFunctor = ApplyBarrierFunctor<BarrierOpFunctor>;
5716 using GlobalBarrierOpFunctor = PipelineBarrierOp;
5717 using GlobalApplyFunctor = ApplyBarrierOpsFunctor<GlobalBarrierOpFunctor>;
5718 using BufferRange = ResourceAccessRange;
5719 using ImageRange = subresource_adapter::ImageRangeGenerator;
5720 using GlobalRange = ResourceAccessRange;
5721
5722 ApplyFunctor MakeApplyFunctor(const SyncBarrier &barrier, bool layout_transition) const {
5723 return ApplyFunctor(BarrierOpFunctor(barrier, layout_transition));
5724 }
John Zulauf14940722021-04-12 15:19:02 -06005725 GlobalApplyFunctor MakeGlobalApplyFunctor(size_t size_hint, ResourceUsageTag tag) const {
John Zulaufd5115702021-01-18 12:34:33 -07005726 return GlobalApplyFunctor(true /* resolve */, size_hint, tag);
5727 }
5728 GlobalBarrierOpFunctor MakeGlobalBarrierOpFunctor(const SyncBarrier &barrier) const {
5729 return GlobalBarrierOpFunctor(barrier, false);
5730 }
5731
5732 BufferRange MakeRangeGen(const BUFFER_STATE &buffer, const ResourceAccessRange &range) const {
5733 if (!SimpleBinding(buffer)) return ResourceAccessRange();
5734 const auto base_address = ResourceBaseAddress(buffer);
5735 return (range + base_address);
5736 }
John Zulauf110413c2021-03-20 05:38:38 -06005737 ImageRange MakeRangeGen(const IMAGE_STATE &image, const VkImageSubresourceRange &subresource_range) const {
John Zulauf264cce02021-02-05 14:40:47 -07005738 if (!SimpleBinding(image)) return subresource_adapter::ImageRangeGenerator();
John Zulaufd5115702021-01-18 12:34:33 -07005739
5740 const auto base_address = ResourceBaseAddress(image);
John Zulauf110413c2021-03-20 05:38:38 -06005741 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, base_address);
John Zulaufd5115702021-01-18 12:34:33 -07005742 return range_gen;
5743 }
5744 GlobalRange MakeGlobalRangeGen(AccessAddressType) const { return kFullRange; }
5745};
5746
5747template <typename Barriers, typename FunctorFactory>
John Zulauf14940722021-04-12 15:19:02 -06005748void SyncOpBarriers::ApplyBarriers(const Barriers &barriers, const FunctorFactory &factory, const ResourceUsageTag tag,
John Zulaufd5115702021-01-18 12:34:33 -07005749 AccessContext *context) {
5750 for (const auto &barrier : barriers) {
5751 const auto *state = barrier.GetState();
5752 if (state) {
5753 auto *const accesses = &context->GetAccessStateMap(GetAccessAddressType(*state));
5754 auto update_action = factory.MakeApplyFunctor(barrier.barrier, barrier.IsLayoutTransition());
5755 auto range_gen = factory.MakeRangeGen(*state, barrier.Range());
5756 UpdateMemoryAccessState(accesses, update_action, &range_gen);
5757 }
5758 }
5759}
5760
5761template <typename Barriers, typename FunctorFactory>
John Zulauf14940722021-04-12 15:19:02 -06005762void SyncOpBarriers::ApplyGlobalBarriers(const Barriers &barriers, const FunctorFactory &factory, const ResourceUsageTag tag,
John Zulaufd5115702021-01-18 12:34:33 -07005763 AccessContext *access_context) {
5764 auto barriers_functor = factory.MakeGlobalApplyFunctor(barriers.size(), tag);
5765 for (const auto &barrier : barriers) {
5766 barriers_functor.EmplaceBack(factory.MakeGlobalBarrierOpFunctor(barrier));
5767 }
5768 for (const auto address_type : kAddressTypes) {
5769 auto range_gen = factory.MakeGlobalRangeGen(address_type);
5770 UpdateMemoryAccessState(&(access_context->GetAccessStateMap(address_type)), barriers_functor, &range_gen);
5771 }
5772}
5773
John Zulauf8eda1562021-04-13 17:06:41 -06005774ResourceUsageTag SyncOpPipelineBarrier::Record(CommandBufferAccessContext *cb_context) const {
John Zulaufd5115702021-01-18 12:34:33 -07005775 auto *access_context = cb_context->GetCurrentAccessContext();
John Zulauf8eda1562021-04-13 17:06:41 -06005776 auto *events_context = cb_context->GetCurrentEventsContext();
John Zulauf36ef9282021-02-02 11:47:24 -07005777 const auto tag = cb_context->NextCommandTag(cmd_);
John Zulaufbb890452021-12-14 11:30:18 -07005778 ReplayRecord(tag, access_context, events_context);
John Zulauf4fa68462021-04-26 21:04:22 -06005779 return tag;
5780}
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005781
John Zulaufbb890452021-12-14 11:30:18 -07005782void SyncOpPipelineBarrier::ReplayRecord(const ResourceUsageTag tag, AccessContext *access_context,
5783 SyncEventsContext *events_context) const {
John Zulauf8eda1562021-04-13 17:06:41 -06005784 SyncOpPipelineBarrierFunctorFactory factory;
John Zulauf4edde622021-02-15 08:54:50 -07005785 // Pipeline barriers only have a single barrier set, unlike WaitEvents2
5786 assert(barriers_.size() == 1);
5787 const auto &barrier_set = barriers_[0];
5788 ApplyBarriers(barrier_set.buffer_memory_barriers, factory, tag, access_context);
5789 ApplyBarriers(barrier_set.image_memory_barriers, factory, tag, access_context);
5790 ApplyGlobalBarriers(barrier_set.memory_barriers, factory, tag, access_context);
John Zulauf4edde622021-02-15 08:54:50 -07005791 if (barrier_set.single_exec_scope) {
John Zulauf8eda1562021-04-13 17:06:41 -06005792 events_context->ApplyBarrier(barrier_set.src_exec_scope, barrier_set.dst_exec_scope);
John Zulauf4edde622021-02-15 08:54:50 -07005793 } else {
5794 for (const auto &barrier : barrier_set.memory_barriers) {
John Zulauf8eda1562021-04-13 17:06:41 -06005795 events_context->ApplyBarrier(barrier.src_exec_scope, barrier.dst_exec_scope);
John Zulauf4edde622021-02-15 08:54:50 -07005796 }
5797 }
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005798}
5799
John Zulauf8eda1562021-04-13 17:06:41 -06005800bool SyncOpPipelineBarrier::ReplayValidate(ResourceUsageTag recorded_tag, const CommandBufferAccessContext &recorded_context,
John Zulaufbb890452021-12-14 11:30:18 -07005801 ResourceUsageTag base_tag, CommandExecutionContext *exec_context) const {
John Zulauf4fa68462021-04-26 21:04:22 -06005802 // No Validation for replay, as the layout transition accesses are checked directly, and the src*Mask ordering is captured
5803 // with first access information.
John Zulauf8eda1562021-04-13 17:06:41 -06005804 return false;
5805}
5806
John Zulauf4edde622021-02-15 08:54:50 -07005807void SyncOpBarriers::BarrierSet::MakeMemoryBarriers(const SyncExecScope &src, const SyncExecScope &dst,
5808 VkDependencyFlags dependency_flags, uint32_t memory_barrier_count,
5809 const VkMemoryBarrier *barriers) {
5810 memory_barriers.reserve(std::max<uint32_t>(1, memory_barrier_count));
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005811 for (uint32_t barrier_index = 0; barrier_index < memory_barrier_count; barrier_index++) {
John Zulauf4edde622021-02-15 08:54:50 -07005812 const auto &barrier = barriers[barrier_index];
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005813 SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07005814 memory_barriers.emplace_back(sync_barrier);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005815 }
5816 if (0 == memory_barrier_count) {
5817 // If there are no global memory barriers, force an exec barrier
John Zulauf4edde622021-02-15 08:54:50 -07005818 memory_barriers.emplace_back(SyncBarrier(src, dst));
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005819 }
John Zulauf4edde622021-02-15 08:54:50 -07005820 single_exec_scope = true;
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005821}
5822
John Zulauf4edde622021-02-15 08:54:50 -07005823void SyncOpBarriers::BarrierSet::MakeBufferMemoryBarriers(const SyncValidator &sync_state, const SyncExecScope &src,
5824 const SyncExecScope &dst, VkDependencyFlags dependencyFlags,
5825 uint32_t barrier_count, const VkBufferMemoryBarrier *barriers) {
5826 buffer_memory_barriers.reserve(barrier_count);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005827 for (uint32_t index = 0; index < barrier_count; index++) {
5828 const auto &barrier = barriers[index];
Jeremy Gebben9f537102021-10-05 16:37:12 -06005829 auto buffer = sync_state.Get<BUFFER_STATE>(barrier.buffer);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005830 if (buffer) {
5831 const auto barrier_size = GetBufferWholeSize(*buffer, barrier.offset, barrier.size);
5832 const auto range = MakeRange(barrier.offset, barrier_size);
5833 const SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07005834 buffer_memory_barriers.emplace_back(buffer, sync_barrier, range);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005835 } else {
John Zulauf4edde622021-02-15 08:54:50 -07005836 buffer_memory_barriers.emplace_back();
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005837 }
5838 }
5839}
5840
John Zulauf4edde622021-02-15 08:54:50 -07005841void SyncOpBarriers::BarrierSet::MakeMemoryBarriers(VkQueueFlags queue_flags, VkDependencyFlags dependency_flags,
Tony-LunarG3f6eceb2021-11-18 14:34:49 -07005842 uint32_t memory_barrier_count, const VkMemoryBarrier2 *barriers) {
John Zulauf4edde622021-02-15 08:54:50 -07005843 memory_barriers.reserve(memory_barrier_count);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005844 for (uint32_t barrier_index = 0; barrier_index < memory_barrier_count; barrier_index++) {
John Zulauf4edde622021-02-15 08:54:50 -07005845 const auto &barrier = barriers[barrier_index];
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005846 auto src = SyncExecScope::MakeSrc(queue_flags, barrier.srcStageMask);
5847 auto dst = SyncExecScope::MakeDst(queue_flags, barrier.dstStageMask);
5848 SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07005849 memory_barriers.emplace_back(sync_barrier);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005850 }
John Zulauf4edde622021-02-15 08:54:50 -07005851 single_exec_scope = false;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005852}
5853
John Zulauf4edde622021-02-15 08:54:50 -07005854void SyncOpBarriers::BarrierSet::MakeBufferMemoryBarriers(const SyncValidator &sync_state, VkQueueFlags queue_flags,
5855 VkDependencyFlags dependencyFlags, uint32_t barrier_count,
Tony-LunarG3f6eceb2021-11-18 14:34:49 -07005856 const VkBufferMemoryBarrier2 *barriers) {
John Zulauf4edde622021-02-15 08:54:50 -07005857 buffer_memory_barriers.reserve(barrier_count);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005858 for (uint32_t index = 0; index < barrier_count; index++) {
5859 const auto &barrier = barriers[index];
5860 auto src = SyncExecScope::MakeSrc(queue_flags, barrier.srcStageMask);
5861 auto dst = SyncExecScope::MakeDst(queue_flags, barrier.dstStageMask);
Jeremy Gebben9f537102021-10-05 16:37:12 -06005862 auto buffer = sync_state.Get<BUFFER_STATE>(barrier.buffer);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005863 if (buffer) {
5864 const auto barrier_size = GetBufferWholeSize(*buffer, barrier.offset, barrier.size);
5865 const auto range = MakeRange(barrier.offset, barrier_size);
5866 const SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07005867 buffer_memory_barriers.emplace_back(buffer, sync_barrier, range);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005868 } else {
John Zulauf4edde622021-02-15 08:54:50 -07005869 buffer_memory_barriers.emplace_back();
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005870 }
5871 }
5872}
5873
John Zulauf4edde622021-02-15 08:54:50 -07005874void SyncOpBarriers::BarrierSet::MakeImageMemoryBarriers(const SyncValidator &sync_state, const SyncExecScope &src,
5875 const SyncExecScope &dst, VkDependencyFlags dependencyFlags,
5876 uint32_t barrier_count, const VkImageMemoryBarrier *barriers) {
5877 image_memory_barriers.reserve(barrier_count);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005878 for (uint32_t index = 0; index < barrier_count; index++) {
5879 const auto &barrier = barriers[index];
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005880 auto image = sync_state.Get<IMAGE_STATE>(barrier.image);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005881 if (image) {
5882 auto subresource_range = NormalizeSubresourceRange(image->createInfo, barrier.subresourceRange);
5883 const SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07005884 image_memory_barriers.emplace_back(image, index, sync_barrier, barrier.oldLayout, barrier.newLayout, subresource_range);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005885 } else {
John Zulauf4edde622021-02-15 08:54:50 -07005886 image_memory_barriers.emplace_back();
5887 image_memory_barriers.back().index = index; // Just in case we're interested in the ones we skipped.
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005888 }
5889 }
5890}
John Zulaufd5115702021-01-18 12:34:33 -07005891
John Zulauf4edde622021-02-15 08:54:50 -07005892void SyncOpBarriers::BarrierSet::MakeImageMemoryBarriers(const SyncValidator &sync_state, VkQueueFlags queue_flags,
5893 VkDependencyFlags dependencyFlags, uint32_t barrier_count,
Tony-LunarG3f6eceb2021-11-18 14:34:49 -07005894 const VkImageMemoryBarrier2 *barriers) {
John Zulauf4edde622021-02-15 08:54:50 -07005895 image_memory_barriers.reserve(barrier_count);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005896 for (uint32_t index = 0; index < barrier_count; index++) {
5897 const auto &barrier = barriers[index];
5898 auto src = SyncExecScope::MakeSrc(queue_flags, barrier.srcStageMask);
5899 auto dst = SyncExecScope::MakeDst(queue_flags, barrier.dstStageMask);
Jeremy Gebbenf4449392022-01-28 10:09:10 -07005900 auto image = sync_state.Get<IMAGE_STATE>(barrier.image);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005901 if (image) {
5902 auto subresource_range = NormalizeSubresourceRange(image->createInfo, barrier.subresourceRange);
5903 const SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07005904 image_memory_barriers.emplace_back(image, index, sync_barrier, barrier.oldLayout, barrier.newLayout, subresource_range);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005905 } else {
John Zulauf4edde622021-02-15 08:54:50 -07005906 image_memory_barriers.emplace_back();
5907 image_memory_barriers.back().index = index; // Just in case we're interested in the ones we skipped.
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005908 }
5909 }
5910}
5911
John Zulauf36ef9282021-02-02 11:47:24 -07005912SyncOpWaitEvents::SyncOpWaitEvents(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags, uint32_t eventCount,
John Zulaufd5115702021-01-18 12:34:33 -07005913 const VkEvent *pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
5914 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
5915 uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
5916 uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers)
John Zulauf36ef9282021-02-02 11:47:24 -07005917 : SyncOpBarriers(cmd, sync_state, queue_flags, srcStageMask, dstStageMask, VkDependencyFlags(0U), memoryBarrierCount,
John Zulaufd5115702021-01-18 12:34:33 -07005918 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
5919 pImageMemoryBarriers) {
John Zulauf669dfd52021-01-27 17:15:28 -07005920 MakeEventsList(sync_state, eventCount, pEvents);
John Zulaufd5115702021-01-18 12:34:33 -07005921}
5922
John Zulauf4edde622021-02-15 08:54:50 -07005923SyncOpWaitEvents::SyncOpWaitEvents(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags, uint32_t eventCount,
5924 const VkEvent *pEvents, const VkDependencyInfoKHR *pDependencyInfo)
5925 : SyncOpBarriers(cmd, sync_state, queue_flags, eventCount, pDependencyInfo) {
5926 MakeEventsList(sync_state, eventCount, pEvents);
5927 assert(events_.size() == barriers_.size()); // Just so nobody gets clever and decides to cull the event or barrier arrays
5928}
5929
John Zulauf610e28c2021-08-03 17:46:23 -06005930const char *const SyncOpWaitEvents::kIgnored = "Wait operation is ignored for this event.";
5931
John Zulaufd5115702021-01-18 12:34:33 -07005932bool SyncOpWaitEvents::Validate(const CommandBufferAccessContext &cb_context) const {
John Zulaufd5115702021-01-18 12:34:33 -07005933 bool skip = false;
5934 const auto &sync_state = cb_context.GetSyncState();
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06005935 const auto command_buffer_handle = cb_context.GetCBState().commandBuffer();
John Zulaufd5115702021-01-18 12:34:33 -07005936
John Zulauf610e28c2021-08-03 17:46:23 -06005937 // This is only interesting at record and not replay (Execute/Submit) time.
John Zulauf4edde622021-02-15 08:54:50 -07005938 for (size_t barrier_set_index = 0; barrier_set_index < barriers_.size(); barrier_set_index++) {
5939 const auto &barrier_set = barriers_[barrier_set_index];
5940 if (barrier_set.single_exec_scope) {
5941 if (barrier_set.src_exec_scope.mask_param & VK_PIPELINE_STAGE_HOST_BIT) {
5942 const std::string vuid = std::string("SYNC-") + std::string(CmdName()) + std::string("-hostevent-unsupported");
5943 skip = sync_state.LogInfo(command_buffer_handle, vuid,
5944 "%s, srcStageMask includes %s, unsupported by synchronization validation.", CmdName(),
5945 string_VkPipelineStageFlagBits(VK_PIPELINE_STAGE_HOST_BIT));
5946 } else {
5947 const auto &barriers = barrier_set.memory_barriers;
5948 for (size_t barrier_index = 0; barrier_index < barriers.size(); barrier_index++) {
5949 const auto &barrier = barriers[barrier_index];
5950 if (barrier.src_exec_scope.mask_param & VK_PIPELINE_STAGE_HOST_BIT) {
5951 const std::string vuid =
5952 std::string("SYNC-") + std::string(CmdName()) + std::string("-hostevent-unsupported");
5953 skip =
5954 sync_state.LogInfo(command_buffer_handle, vuid,
5955 "%s, srcStageMask %s of %s %zu, %s %zu, unsupported by synchronization validation.",
5956 CmdName(), string_VkPipelineStageFlagBits(VK_PIPELINE_STAGE_HOST_BIT),
5957 "pDependencyInfo", barrier_set_index, "pMemoryBarriers", barrier_index);
5958 }
5959 }
5960 }
5961 }
John Zulaufd5115702021-01-18 12:34:33 -07005962 }
5963
John Zulauf610e28c2021-08-03 17:46:23 -06005964 // The rest is common to record time and replay time.
5965 skip |= DoValidate(cb_context, ResourceUsageRecord::kMaxIndex);
5966 return skip;
5967}
5968
John Zulaufbb890452021-12-14 11:30:18 -07005969bool SyncOpWaitEvents::DoValidate(const CommandExecutionContext &exec_context, const ResourceUsageTag base_tag) const {
John Zulauf610e28c2021-08-03 17:46:23 -06005970 bool skip = false;
John Zulaufbb890452021-12-14 11:30:18 -07005971 const auto &sync_state = exec_context.GetSyncState();
John Zulauf610e28c2021-08-03 17:46:23 -06005972
Jeremy Gebben40a22942020-12-22 14:22:06 -07005973 VkPipelineStageFlags2KHR event_stage_masks = 0U;
John Zulauf4edde622021-02-15 08:54:50 -07005974 VkPipelineStageFlags2KHR barrier_mask_params = 0U;
John Zulaufd5115702021-01-18 12:34:33 -07005975 bool events_not_found = false;
John Zulaufbb890452021-12-14 11:30:18 -07005976 const auto *events_context = exec_context.GetCurrentEventsContext();
John Zulauf669dfd52021-01-27 17:15:28 -07005977 assert(events_context);
John Zulauf4edde622021-02-15 08:54:50 -07005978 size_t barrier_set_index = 0;
5979 size_t barrier_set_incr = (barriers_.size() == 1) ? 0 : 1;
John Zulauf78394fc2021-07-12 15:41:40 -06005980 for (const auto &event : events_) {
5981 const auto *sync_event = events_context->Get(event.get());
5982 const auto &barrier_set = barriers_[barrier_set_index];
5983 if (!sync_event) {
5984 // NOTE PHASE2: This is where we'll need queue submit time validation to come back and check the srcStageMask bits
5985 // or solve this with replay creating the SyncEventState in the queue context... also this will be a
5986 // new validation error... wait without previously submitted set event...
5987 events_not_found = true; // Demote "extra_stage_bits" error to warning, to avoid false positives at *record time*
John Zulauf4edde622021-02-15 08:54:50 -07005988 barrier_set_index += barrier_set_incr;
John Zulauf78394fc2021-07-12 15:41:40 -06005989 continue; // Core, Lifetimes, or Param check needs to catch invalid events.
John Zulaufd5115702021-01-18 12:34:33 -07005990 }
John Zulauf610e28c2021-08-03 17:46:23 -06005991
5992 // For replay calls, don't revalidate "same command buffer" events
5993 if (sync_event->last_command_tag > base_tag) continue;
5994
John Zulauf78394fc2021-07-12 15:41:40 -06005995 const auto event_handle = sync_event->event->event();
5996 // TODO add "destroyed" checks
5997
John Zulauf78b1f892021-09-20 15:02:09 -06005998 if (sync_event->first_scope_set) {
5999 // Only accumulate barrier and event stages if there is a pending set in the current context
6000 barrier_mask_params |= barrier_set.src_exec_scope.mask_param;
6001 event_stage_masks |= sync_event->scope.mask_param;
6002 }
6003
John Zulauf78394fc2021-07-12 15:41:40 -06006004 const auto &src_exec_scope = barrier_set.src_exec_scope;
John Zulauf78b1f892021-09-20 15:02:09 -06006005
John Zulauf78394fc2021-07-12 15:41:40 -06006006 const auto ignore_reason = sync_event->IsIgnoredByWait(cmd_, src_exec_scope.mask_param);
6007 if (ignore_reason) {
6008 switch (ignore_reason) {
6009 case SyncEventState::ResetWaitRace:
6010 case SyncEventState::Reset2WaitRace: {
6011 // Four permuations of Reset and Wait calls...
6012 const char *vuid =
6013 (cmd_ == CMD_WAITEVENTS) ? "VUID-vkCmdResetEvent-event-03834" : "VUID-vkCmdResetEvent-event-03835";
6014 if (ignore_reason == SyncEventState::Reset2WaitRace) {
Tony-LunarG279601c2021-11-16 10:50:51 -07006015 vuid = (cmd_ == CMD_WAITEVENTS) ? "VUID-vkCmdResetEvent2-event-03831"
6016 : "VUID-vkCmdResetEvent2-event-03832";
John Zulauf78394fc2021-07-12 15:41:40 -06006017 }
6018 const char *const message =
6019 "%s: %s %s operation following %s without intervening execution barrier, may cause race condition. %s";
6020 skip |= sync_state.LogError(event_handle, vuid, message, CmdName(),
6021 sync_state.report_data->FormatHandle(event_handle).c_str(), CmdName(),
John Zulauf610e28c2021-08-03 17:46:23 -06006022 CommandTypeString(sync_event->last_command), kIgnored);
John Zulauf78394fc2021-07-12 15:41:40 -06006023 break;
6024 }
6025 case SyncEventState::SetRace: {
6026 // Issue error message that Wait is waiting on an signal subject to race condition, and is thus ignored for
6027 // this event
6028 const char *const vuid = "SYNC-vkCmdWaitEvents-unsynchronized-setops";
6029 const char *const message =
6030 "%s: %s Unsychronized %s calls result in race conditions w.r.t. event signalling, %s %s";
6031 const char *const reason = "First synchronization scope is undefined.";
6032 skip |= sync_state.LogError(event_handle, vuid, message, CmdName(),
6033 sync_state.report_data->FormatHandle(event_handle).c_str(),
John Zulauf610e28c2021-08-03 17:46:23 -06006034 CommandTypeString(sync_event->last_command), reason, kIgnored);
John Zulauf78394fc2021-07-12 15:41:40 -06006035 break;
6036 }
6037 case SyncEventState::MissingStageBits: {
6038 const auto missing_bits = sync_event->scope.mask_param & ~src_exec_scope.mask_param;
6039 // Issue error message that event waited for is not in wait events scope
6040 const char *const vuid = "VUID-vkCmdWaitEvents-srcStageMask-01158";
6041 const char *const message = "%s: %s stageMask %" PRIx64 " includes bits not present in srcStageMask 0x%" PRIx64
6042 ". Bits missing from srcStageMask %s. %s";
6043 skip |= sync_state.LogError(event_handle, vuid, message, CmdName(),
6044 sync_state.report_data->FormatHandle(event_handle).c_str(),
6045 sync_event->scope.mask_param, src_exec_scope.mask_param,
John Zulauf610e28c2021-08-03 17:46:23 -06006046 sync_utils::StringPipelineStageFlags(missing_bits).c_str(), kIgnored);
John Zulauf78394fc2021-07-12 15:41:40 -06006047 break;
6048 }
6049 case SyncEventState::SetVsWait2: {
Tony-LunarG279601c2021-11-16 10:50:51 -07006050 skip |= sync_state.LogError(event_handle, "VUID-vkCmdWaitEvents2-pEvents-03837",
John Zulauf78394fc2021-07-12 15:41:40 -06006051 "%s: Follows set of %s by %s. Disallowed.", CmdName(),
6052 sync_state.report_data->FormatHandle(event_handle).c_str(),
6053 CommandTypeString(sync_event->last_command));
6054 break;
6055 }
6056 default:
6057 assert(ignore_reason == SyncEventState::NotIgnored);
6058 }
6059 } else if (barrier_set.image_memory_barriers.size()) {
6060 const auto &image_memory_barriers = barrier_set.image_memory_barriers;
John Zulaufbb890452021-12-14 11:30:18 -07006061 const auto *context = exec_context.GetCurrentAccessContext();
John Zulauf78394fc2021-07-12 15:41:40 -06006062 assert(context);
6063 for (const auto &image_memory_barrier : image_memory_barriers) {
6064 if (image_memory_barrier.old_layout == image_memory_barrier.new_layout) continue;
6065 const auto *image_state = image_memory_barrier.image.get();
6066 if (!image_state) continue;
6067 const auto &subresource_range = image_memory_barrier.range;
6068 const auto &src_access_scope = image_memory_barrier.barrier.src_access_scope;
6069 const auto hazard =
6070 context->DetectImageBarrierHazard(*image_state, sync_event->scope.exec_scope, src_access_scope,
6071 subresource_range, *sync_event, AccessContext::DetectOptions::kDetectAll);
6072 if (hazard.hazard) {
6073 skip |= sync_state.LogError(image_state->image(), string_SyncHazardVUID(hazard.hazard),
6074 "%s: Hazard %s for image barrier %" PRIu32 " %s. Access info %s.", CmdName(),
6075 string_SyncHazard(hazard.hazard), image_memory_barrier.index,
6076 sync_state.report_data->FormatHandle(image_state->image()).c_str(),
John Zulaufbb890452021-12-14 11:30:18 -07006077 exec_context.FormatUsage(hazard).c_str());
John Zulauf78394fc2021-07-12 15:41:40 -06006078 break;
6079 }
6080 }
6081 }
6082 // TODO: Add infrastructure for checking pDependencyInfo's vs. CmdSetEvent2 VUID - vkCmdWaitEvents2KHR - pEvents -
6083 // 03839
6084 barrier_set_index += barrier_set_incr;
6085 }
John Zulaufd5115702021-01-18 12:34:33 -07006086
6087 // Note that we can't check for HOST in pEvents as we don't track that set event type
John Zulauf4edde622021-02-15 08:54:50 -07006088 const auto extra_stage_bits = (barrier_mask_params & ~VK_PIPELINE_STAGE_2_HOST_BIT_KHR) & ~event_stage_masks;
John Zulaufd5115702021-01-18 12:34:33 -07006089 if (extra_stage_bits) {
6090 // Issue error message that event waited for is not in wait events scope
John Zulauf4edde622021-02-15 08:54:50 -07006091 // NOTE: This isn't exactly the right VUID for WaitEvents2, but it's as close as we currently have support for
6092 const char *const vuid =
Tony-LunarG279601c2021-11-16 10:50:51 -07006093 (CMD_WAITEVENTS == cmd_) ? "VUID-vkCmdWaitEvents-srcStageMask-01158" : "VUID-vkCmdWaitEvents2-pEvents-03838";
John Zulaufd5115702021-01-18 12:34:33 -07006094 const char *const message =
Jeremy Gebben40a22942020-12-22 14:22:06 -07006095 "%s: srcStageMask 0x%" PRIx64 " contains stages not present in pEvents stageMask. Extra stages are %s.%s";
John Zulaufbb890452021-12-14 11:30:18 -07006096 const auto handle = exec_context.Handle();
John Zulaufd5115702021-01-18 12:34:33 -07006097 if (events_not_found) {
John Zulaufbb890452021-12-14 11:30:18 -07006098 skip |= sync_state.LogInfo(handle, vuid, message, CmdName(), barrier_mask_params,
Jeremy Gebben40a22942020-12-22 14:22:06 -07006099 sync_utils::StringPipelineStageFlags(extra_stage_bits).c_str(),
John Zulaufd5115702021-01-18 12:34:33 -07006100 " vkCmdSetEvent may be in previously submitted command buffer.");
6101 } else {
John Zulaufbb890452021-12-14 11:30:18 -07006102 skip |= sync_state.LogError(handle, vuid, message, CmdName(), barrier_mask_params,
Jeremy Gebben40a22942020-12-22 14:22:06 -07006103 sync_utils::StringPipelineStageFlags(extra_stage_bits).c_str(), "");
John Zulaufd5115702021-01-18 12:34:33 -07006104 }
6105 }
6106 return skip;
6107}
6108
6109struct SyncOpWaitEventsFunctorFactory {
6110 using BarrierOpFunctor = WaitEventBarrierOp;
6111 using ApplyFunctor = ApplyBarrierFunctor<BarrierOpFunctor>;
6112 using GlobalBarrierOpFunctor = WaitEventBarrierOp;
6113 using GlobalApplyFunctor = ApplyBarrierOpsFunctor<GlobalBarrierOpFunctor>;
6114 using BufferRange = EventSimpleRangeGenerator;
6115 using ImageRange = EventImageRangeGenerator;
6116 using GlobalRange = EventSimpleRangeGenerator;
6117
6118 // Need to restrict to only valid exec and access scope for this event
6119 // Pass by value is intentional to get a copy we can change without modifying the passed barrier
6120 SyncBarrier RestrictToEvent(SyncBarrier barrier) const {
John Zulaufc523bf62021-02-16 08:20:34 -07006121 barrier.src_exec_scope.exec_scope = sync_event->scope.exec_scope & barrier.src_exec_scope.exec_scope;
John Zulaufd5115702021-01-18 12:34:33 -07006122 barrier.src_access_scope = sync_event->scope.valid_accesses & barrier.src_access_scope;
6123 return barrier;
6124 }
6125 ApplyFunctor MakeApplyFunctor(const SyncBarrier &barrier_arg, bool layout_transition) const {
6126 auto barrier = RestrictToEvent(barrier_arg);
6127 return ApplyFunctor(BarrierOpFunctor(sync_event->first_scope_tag, barrier, layout_transition));
6128 }
John Zulauf14940722021-04-12 15:19:02 -06006129 GlobalApplyFunctor MakeGlobalApplyFunctor(size_t size_hint, ResourceUsageTag tag) const {
John Zulaufd5115702021-01-18 12:34:33 -07006130 return GlobalApplyFunctor(false /* don't resolve */, size_hint, tag);
6131 }
6132 GlobalBarrierOpFunctor MakeGlobalBarrierOpFunctor(const SyncBarrier &barrier_arg) const {
6133 auto barrier = RestrictToEvent(barrier_arg);
6134 return GlobalBarrierOpFunctor(sync_event->first_scope_tag, barrier, false);
6135 }
6136
6137 BufferRange MakeRangeGen(const BUFFER_STATE &buffer, const ResourceAccessRange &range_arg) const {
6138 const AccessAddressType address_type = GetAccessAddressType(buffer);
6139 const auto base_address = ResourceBaseAddress(buffer);
6140 ResourceAccessRange range = SimpleBinding(buffer) ? (range_arg + base_address) : ResourceAccessRange();
6141 EventSimpleRangeGenerator filtered_range_gen(sync_event->FirstScope(address_type), range);
6142 return filtered_range_gen;
6143 }
John Zulauf110413c2021-03-20 05:38:38 -06006144 ImageRange MakeRangeGen(const IMAGE_STATE &image, const VkImageSubresourceRange &subresource_range) const {
John Zulaufd5115702021-01-18 12:34:33 -07006145 if (!SimpleBinding(image)) return ImageRange();
6146 const auto address_type = GetAccessAddressType(image);
6147 const auto base_address = ResourceBaseAddress(image);
John Zulauf110413c2021-03-20 05:38:38 -06006148 subresource_adapter::ImageRangeGenerator image_range_gen(*image.fragment_encoder.get(), subresource_range, base_address);
John Zulaufd5115702021-01-18 12:34:33 -07006149 EventImageRangeGenerator filtered_range_gen(sync_event->FirstScope(address_type), image_range_gen);
6150
6151 return filtered_range_gen;
6152 }
6153 GlobalRange MakeGlobalRangeGen(AccessAddressType address_type) const {
6154 return EventSimpleRangeGenerator(sync_event->FirstScope(address_type), kFullRange);
6155 }
6156 SyncOpWaitEventsFunctorFactory(SyncEventState *sync_event_) : sync_event(sync_event_) { assert(sync_event); }
6157 SyncEventState *sync_event;
6158};
6159
John Zulauf8eda1562021-04-13 17:06:41 -06006160ResourceUsageTag SyncOpWaitEvents::Record(CommandBufferAccessContext *cb_context) const {
John Zulauf36ef9282021-02-02 11:47:24 -07006161 const auto tag = cb_context->NextCommandTag(cmd_);
John Zulaufd5115702021-01-18 12:34:33 -07006162 auto *access_context = cb_context->GetCurrentAccessContext();
6163 assert(access_context);
John Zulauf8eda1562021-04-13 17:06:41 -06006164 if (!access_context) return tag;
John Zulauf669dfd52021-01-27 17:15:28 -07006165 auto *events_context = cb_context->GetCurrentEventsContext();
6166 assert(events_context);
John Zulauf8eda1562021-04-13 17:06:41 -06006167 if (!events_context) return tag;
John Zulaufd5115702021-01-18 12:34:33 -07006168
John Zulaufbb890452021-12-14 11:30:18 -07006169 ReplayRecord(tag, access_context, events_context);
John Zulauf610e28c2021-08-03 17:46:23 -06006170 return tag;
6171}
6172
John Zulaufbb890452021-12-14 11:30:18 -07006173void SyncOpWaitEvents::ReplayRecord(ResourceUsageTag tag, AccessContext *access_context, SyncEventsContext *events_context) const {
John Zulaufd5115702021-01-18 12:34:33 -07006174 // Unlike PipelineBarrier, WaitEvent is *not* limited to accesses within the current subpass (if any) and thus needs to import
6175 // all accesses. Can instead import for all first_scopes, or a union of them, if this becomes a performance/memory issue,
6176 // but with no idea of the performance of the union, nor of whether it even matters... take the simplest approach here,
6177 access_context->ResolvePreviousAccesses();
6178
John Zulauf4edde622021-02-15 08:54:50 -07006179 size_t barrier_set_index = 0;
6180 size_t barrier_set_incr = (barriers_.size() == 1) ? 0 : 1;
6181 assert(barriers_.size() == 1 || (barriers_.size() == events_.size()));
John Zulauf669dfd52021-01-27 17:15:28 -07006182 for (auto &event_shared : events_) {
6183 if (!event_shared.get()) continue;
6184 auto *sync_event = events_context->GetFromShared(event_shared);
John Zulaufd5115702021-01-18 12:34:33 -07006185
John Zulauf4edde622021-02-15 08:54:50 -07006186 sync_event->last_command = cmd_;
John Zulauf610e28c2021-08-03 17:46:23 -06006187 sync_event->last_command_tag = tag;
John Zulaufd5115702021-01-18 12:34:33 -07006188
John Zulauf4edde622021-02-15 08:54:50 -07006189 const auto &barrier_set = barriers_[barrier_set_index];
6190 const auto &dst = barrier_set.dst_exec_scope;
6191 if (!sync_event->IsIgnoredByWait(cmd_, barrier_set.src_exec_scope.mask_param)) {
John Zulaufd5115702021-01-18 12:34:33 -07006192 // These apply barriers one at a time as the are restricted to the resource ranges specified per each barrier,
6193 // but do not update the dependency chain information (but set the "pending" state) // s.t. the order independence
6194 // of the barriers is maintained.
6195 SyncOpWaitEventsFunctorFactory factory(sync_event);
John Zulauf4edde622021-02-15 08:54:50 -07006196 ApplyBarriers(barrier_set.buffer_memory_barriers, factory, tag, access_context);
6197 ApplyBarriers(barrier_set.image_memory_barriers, factory, tag, access_context);
6198 ApplyGlobalBarriers(barrier_set.memory_barriers, factory, tag, access_context);
John Zulaufd5115702021-01-18 12:34:33 -07006199
6200 // Apply the global barrier to the event itself (for race condition tracking)
6201 // Events don't happen at a stage, so we need to store the unexpanded ALL_COMMANDS if set for inter-event-calls
6202 sync_event->barriers = dst.mask_param & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
6203 sync_event->barriers |= dst.exec_scope;
6204 } else {
6205 // We ignored this wait, so we don't have any effective synchronization barriers for it.
6206 sync_event->barriers = 0U;
6207 }
John Zulauf4edde622021-02-15 08:54:50 -07006208 barrier_set_index += barrier_set_incr;
John Zulaufd5115702021-01-18 12:34:33 -07006209 }
6210
6211 // Apply the pending barriers
6212 ResolvePendingBarrierFunctor apply_pending_action(tag);
6213 access_context->ApplyToContext(apply_pending_action);
6214}
6215
John Zulauf8eda1562021-04-13 17:06:41 -06006216bool SyncOpWaitEvents::ReplayValidate(ResourceUsageTag recorded_tag, const CommandBufferAccessContext &recorded_context,
John Zulaufbb890452021-12-14 11:30:18 -07006217 ResourceUsageTag base_tag, CommandExecutionContext *exec_context) const {
6218 return DoValidate(*exec_context, base_tag);
John Zulauf8eda1562021-04-13 17:06:41 -06006219}
6220
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006221bool SyncValidator::PreCallValidateCmdWriteBufferMarker2AMD(VkCommandBuffer commandBuffer, VkPipelineStageFlags2KHR pipelineStage,
6222 VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) const {
6223 bool skip = false;
6224 const auto *cb_access_context = GetAccessContext(commandBuffer);
6225 assert(cb_access_context);
6226 if (!cb_access_context) return skip;
6227
6228 const auto *context = cb_access_context->GetCurrentAccessContext();
6229 assert(context);
6230 if (!context) return skip;
6231
Jeremy Gebbenf4449392022-01-28 10:09:10 -07006232 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006233
6234 if (dst_buffer) {
6235 const ResourceAccessRange range = MakeRange(dstOffset, 4);
6236 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
6237 if (hazard.hazard) {
6238 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
6239 "vkCmdWriteBufferMarkerAMD2: Hazard %s for dstBuffer %s. Access info %s.",
6240 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstBuffer).c_str(),
John Zulauf14940722021-04-12 15:19:02 -06006241 cb_access_context->FormatUsage(hazard).c_str());
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006242 }
6243 }
6244 return skip;
6245}
6246
John Zulauf669dfd52021-01-27 17:15:28 -07006247void SyncOpWaitEvents::MakeEventsList(const SyncValidator &sync_state, uint32_t event_count, const VkEvent *events) {
John Zulaufd5115702021-01-18 12:34:33 -07006248 events_.reserve(event_count);
6249 for (uint32_t event_index = 0; event_index < event_count; event_index++) {
Jeremy Gebben9f537102021-10-05 16:37:12 -06006250 events_.emplace_back(sync_state.Get<EVENT_STATE>(events[event_index]));
John Zulaufd5115702021-01-18 12:34:33 -07006251 }
6252}
John Zulauf6ce24372021-01-30 05:56:25 -07006253
John Zulauf36ef9282021-02-02 11:47:24 -07006254SyncOpResetEvent::SyncOpResetEvent(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags, VkEvent event,
John Zulauf4edde622021-02-15 08:54:50 -07006255 VkPipelineStageFlags2KHR stageMask)
Jeremy Gebben9f537102021-10-05 16:37:12 -06006256 : SyncOpBase(cmd), event_(sync_state.Get<EVENT_STATE>(event)), exec_scope_(SyncExecScope::MakeSrc(queue_flags, stageMask)) {}
John Zulauf6ce24372021-01-30 05:56:25 -07006257
John Zulauf1bf30522021-09-03 15:39:06 -06006258bool SyncOpResetEvent::Validate(const CommandBufferAccessContext& cb_context) const {
6259 return DoValidate(cb_context, ResourceUsageRecord::kMaxIndex);
6260}
6261
John Zulaufbb890452021-12-14 11:30:18 -07006262bool SyncOpResetEvent::DoValidate(const CommandExecutionContext &exec_context, const ResourceUsageTag base_tag) const {
6263 auto *events_context = exec_context.GetCurrentEventsContext();
John Zulauf6ce24372021-01-30 05:56:25 -07006264 assert(events_context);
6265 bool skip = false;
6266 if (!events_context) return skip;
6267
John Zulaufbb890452021-12-14 11:30:18 -07006268 const auto &sync_state = exec_context.GetSyncState();
John Zulauf6ce24372021-01-30 05:56:25 -07006269 const auto *sync_event = events_context->Get(event_);
6270 if (!sync_event) return skip; // Core, Lifetimes, or Param check needs to catch invalid events.
6271
John Zulauf1bf30522021-09-03 15:39:06 -06006272 if (sync_event->last_command_tag > base_tag) return skip; // if we validated this in recording of the secondary, don't repeat
6273
John Zulauf6ce24372021-01-30 05:56:25 -07006274 const char *const set_wait =
6275 "%s: %s %s operation following %s without intervening execution barrier, is a race condition and may result in data "
6276 "hazards.";
6277 const char *message = set_wait; // Only one message this call.
6278 if (!sync_event->HasBarrier(exec_scope_.mask_param, exec_scope_.exec_scope)) {
6279 const char *vuid = nullptr;
6280 switch (sync_event->last_command) {
6281 case CMD_SETEVENT:
John Zulauf4edde622021-02-15 08:54:50 -07006282 case CMD_SETEVENT2KHR:
Tony-LunarG8d71c4f2022-01-27 15:25:53 -07006283 case CMD_SETEVENT2:
John Zulauf6ce24372021-01-30 05:56:25 -07006284 // Needs a barrier between set and reset
6285 vuid = "SYNC-vkCmdResetEvent-missingbarrier-set";
6286 break;
John Zulauf4edde622021-02-15 08:54:50 -07006287 case CMD_WAITEVENTS:
Tony-LunarG1364cf52021-11-17 16:10:11 -07006288 case CMD_WAITEVENTS2:
John Zulauf4edde622021-02-15 08:54:50 -07006289 case CMD_WAITEVENTS2KHR: {
John Zulauf6ce24372021-01-30 05:56:25 -07006290 // Needs to be in the barriers chain (either because of a barrier, or because of dstStageMask
6291 vuid = "SYNC-vkCmdResetEvent-missingbarrier-wait";
6292 break;
6293 }
6294 default:
6295 // The only other valid last command that wasn't one.
John Zulauf4edde622021-02-15 08:54:50 -07006296 assert((sync_event->last_command == CMD_NONE) || (sync_event->last_command == CMD_RESETEVENT) ||
6297 (sync_event->last_command == CMD_RESETEVENT2KHR));
John Zulauf6ce24372021-01-30 05:56:25 -07006298 break;
6299 }
6300 if (vuid) {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06006301 skip |= sync_state.LogError(event_->event(), vuid, message, CmdName(),
6302 sync_state.report_data->FormatHandle(event_->event()).c_str(), CmdName(),
John Zulauf6ce24372021-01-30 05:56:25 -07006303 CommandTypeString(sync_event->last_command));
6304 }
6305 }
6306 return skip;
6307}
6308
John Zulauf8eda1562021-04-13 17:06:41 -06006309ResourceUsageTag SyncOpResetEvent::Record(CommandBufferAccessContext *cb_context) const {
6310 const auto tag = cb_context->NextCommandTag(cmd_);
John Zulauf6ce24372021-01-30 05:56:25 -07006311 auto *events_context = cb_context->GetCurrentEventsContext();
6312 assert(events_context);
John Zulauf8eda1562021-04-13 17:06:41 -06006313 if (!events_context) return tag;
John Zulauf6ce24372021-01-30 05:56:25 -07006314
6315 auto *sync_event = events_context->GetFromShared(event_);
John Zulauf8eda1562021-04-13 17:06:41 -06006316 if (!sync_event) return tag; // Core, Lifetimes, or Param check needs to catch invalid events.
John Zulauf6ce24372021-01-30 05:56:25 -07006317
6318 // Update the event state
John Zulauf36ef9282021-02-02 11:47:24 -07006319 sync_event->last_command = cmd_;
John Zulauf610e28c2021-08-03 17:46:23 -06006320 sync_event->last_command_tag = tag;
John Zulauf6ce24372021-01-30 05:56:25 -07006321 sync_event->unsynchronized_set = CMD_NONE;
6322 sync_event->ResetFirstScope();
6323 sync_event->barriers = 0U;
John Zulauf8eda1562021-04-13 17:06:41 -06006324
6325 return tag;
John Zulauf6ce24372021-01-30 05:56:25 -07006326}
6327
John Zulauf8eda1562021-04-13 17:06:41 -06006328bool SyncOpResetEvent::ReplayValidate(ResourceUsageTag recorded_tag, const CommandBufferAccessContext &recorded_context,
John Zulaufbb890452021-12-14 11:30:18 -07006329 ResourceUsageTag base_tag, CommandExecutionContext *exec_context) const {
6330 return DoValidate(*exec_context, base_tag);
John Zulauf8eda1562021-04-13 17:06:41 -06006331}
6332
John Zulaufbb890452021-12-14 11:30:18 -07006333void SyncOpResetEvent::ReplayRecord(ResourceUsageTag tag, AccessContext *access_context, SyncEventsContext *events_context) const {}
John Zulauf8eda1562021-04-13 17:06:41 -06006334
John Zulauf36ef9282021-02-02 11:47:24 -07006335SyncOpSetEvent::SyncOpSetEvent(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags, VkEvent event,
John Zulauf4edde622021-02-15 08:54:50 -07006336 VkPipelineStageFlags2KHR stageMask)
John Zulauf36ef9282021-02-02 11:47:24 -07006337 : SyncOpBase(cmd),
Jeremy Gebben9f537102021-10-05 16:37:12 -06006338 event_(sync_state.Get<EVENT_STATE>(event)),
John Zulauf4edde622021-02-15 08:54:50 -07006339 src_exec_scope_(SyncExecScope::MakeSrc(queue_flags, stageMask)),
6340 dep_info_() {}
6341
6342SyncOpSetEvent::SyncOpSetEvent(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags, VkEvent event,
6343 const VkDependencyInfoKHR &dep_info)
6344 : SyncOpBase(cmd),
Jeremy Gebben9f537102021-10-05 16:37:12 -06006345 event_(sync_state.Get<EVENT_STATE>(event)),
John Zulauf4edde622021-02-15 08:54:50 -07006346 src_exec_scope_(SyncExecScope::MakeSrc(queue_flags, sync_utils::GetGlobalStageMasks(dep_info).src)),
Tony-LunarG273f32f2021-09-28 08:56:30 -06006347 dep_info_(new safe_VkDependencyInfo(&dep_info)) {}
John Zulauf6ce24372021-01-30 05:56:25 -07006348
6349bool SyncOpSetEvent::Validate(const CommandBufferAccessContext &cb_context) const {
John Zulauf610e28c2021-08-03 17:46:23 -06006350 return DoValidate(cb_context, ResourceUsageRecord::kMaxIndex);
6351}
6352bool SyncOpSetEvent::ReplayValidate(ResourceUsageTag recorded_tag, const CommandBufferAccessContext &recorded_context,
John Zulaufbb890452021-12-14 11:30:18 -07006353 ResourceUsageTag base_tag, CommandExecutionContext *exec_context) const {
6354 assert(exec_context);
6355 return DoValidate(*exec_context, base_tag);
John Zulauf610e28c2021-08-03 17:46:23 -06006356}
6357
John Zulaufbb890452021-12-14 11:30:18 -07006358bool SyncOpSetEvent::DoValidate(const CommandExecutionContext &exec_context, const ResourceUsageTag base_tag) const {
John Zulauf6ce24372021-01-30 05:56:25 -07006359 bool skip = false;
6360
John Zulaufbb890452021-12-14 11:30:18 -07006361 const auto &sync_state = exec_context.GetSyncState();
6362 auto *events_context = exec_context.GetCurrentEventsContext();
John Zulauf6ce24372021-01-30 05:56:25 -07006363 assert(events_context);
6364 if (!events_context) return skip;
6365
6366 const auto *sync_event = events_context->Get(event_);
6367 if (!sync_event) return skip; // Core, Lifetimes, or Param check needs to catch invalid events.
6368
John Zulauf610e28c2021-08-03 17:46:23 -06006369 if (sync_event->last_command_tag >= base_tag) return skip; // for replay we don't want to revalidate internal "last commmand"
6370
John Zulauf6ce24372021-01-30 05:56:25 -07006371 const char *const reset_set =
6372 "%s: %s %s operation following %s without intervening execution barrier, is a race condition and may result in data "
6373 "hazards.";
6374 const char *const wait =
6375 "%s: %s %s operation following %s without intervening vkCmdResetEvent, may result in data hazard and is ignored.";
6376
6377 if (!sync_event->HasBarrier(src_exec_scope_.mask_param, src_exec_scope_.exec_scope)) {
John Zulauf4edde622021-02-15 08:54:50 -07006378 const char *vuid_stem = nullptr;
John Zulauf6ce24372021-01-30 05:56:25 -07006379 const char *message = nullptr;
6380 switch (sync_event->last_command) {
6381 case CMD_RESETEVENT:
John Zulauf4edde622021-02-15 08:54:50 -07006382 case CMD_RESETEVENT2KHR:
Tony-LunarG8d71c4f2022-01-27 15:25:53 -07006383 case CMD_RESETEVENT2:
John Zulauf6ce24372021-01-30 05:56:25 -07006384 // Needs a barrier between reset and set
John Zulauf4edde622021-02-15 08:54:50 -07006385 vuid_stem = "-missingbarrier-reset";
John Zulauf6ce24372021-01-30 05:56:25 -07006386 message = reset_set;
6387 break;
6388 case CMD_SETEVENT:
John Zulauf4edde622021-02-15 08:54:50 -07006389 case CMD_SETEVENT2KHR:
Tony-LunarG8d71c4f2022-01-27 15:25:53 -07006390 case CMD_SETEVENT2:
John Zulauf6ce24372021-01-30 05:56:25 -07006391 // Needs a barrier between set and set
John Zulauf4edde622021-02-15 08:54:50 -07006392 vuid_stem = "-missingbarrier-set";
John Zulauf6ce24372021-01-30 05:56:25 -07006393 message = reset_set;
6394 break;
6395 case CMD_WAITEVENTS:
Tony-LunarG1364cf52021-11-17 16:10:11 -07006396 case CMD_WAITEVENTS2:
John Zulauf4edde622021-02-15 08:54:50 -07006397 case CMD_WAITEVENTS2KHR:
John Zulauf6ce24372021-01-30 05:56:25 -07006398 // Needs a barrier or is in second execution scope
John Zulauf4edde622021-02-15 08:54:50 -07006399 vuid_stem = "-missingbarrier-wait";
John Zulauf6ce24372021-01-30 05:56:25 -07006400 message = wait;
6401 break;
6402 default:
6403 // The only other valid last command that wasn't one.
6404 assert(sync_event->last_command == CMD_NONE);
6405 break;
6406 }
John Zulauf4edde622021-02-15 08:54:50 -07006407 if (vuid_stem) {
John Zulauf6ce24372021-01-30 05:56:25 -07006408 assert(nullptr != message);
John Zulauf4edde622021-02-15 08:54:50 -07006409 std::string vuid("SYNC-");
6410 vuid.append(CmdName()).append(vuid_stem);
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06006411 skip |= sync_state.LogError(event_->event(), vuid.c_str(), message, CmdName(),
6412 sync_state.report_data->FormatHandle(event_->event()).c_str(), CmdName(),
John Zulauf6ce24372021-01-30 05:56:25 -07006413 CommandTypeString(sync_event->last_command));
6414 }
6415 }
6416
6417 return skip;
6418}
6419
John Zulauf8eda1562021-04-13 17:06:41 -06006420ResourceUsageTag SyncOpSetEvent::Record(CommandBufferAccessContext *cb_context) const {
John Zulauf36ef9282021-02-02 11:47:24 -07006421 const auto tag = cb_context->NextCommandTag(cmd_);
John Zulauf6ce24372021-01-30 05:56:25 -07006422 auto *events_context = cb_context->GetCurrentEventsContext();
6423 auto *access_context = cb_context->GetCurrentAccessContext();
6424 assert(events_context);
John Zulauf610e28c2021-08-03 17:46:23 -06006425 if (access_context && events_context) {
John Zulaufbb890452021-12-14 11:30:18 -07006426 ReplayRecord(tag, access_context, events_context);
John Zulauf610e28c2021-08-03 17:46:23 -06006427 }
6428 return tag;
6429}
John Zulauf6ce24372021-01-30 05:56:25 -07006430
John Zulaufbb890452021-12-14 11:30:18 -07006431void SyncOpSetEvent::ReplayRecord(ResourceUsageTag tag, AccessContext *access_context, SyncEventsContext *events_context) const {
John Zulauf6ce24372021-01-30 05:56:25 -07006432 auto *sync_event = events_context->GetFromShared(event_);
John Zulauf610e28c2021-08-03 17:46:23 -06006433 if (!sync_event) return; // Core, Lifetimes, or Param check needs to catch invalid events.
John Zulauf6ce24372021-01-30 05:56:25 -07006434
6435 // NOTE: We're going to simply record the sync scope here, as anything else would be implementation defined/undefined
6436 // and we're issuing errors re: missing barriers between event commands, which if the user fixes would fix
6437 // any issues caused by naive scope setting here.
6438
6439 // What happens with two SetEvent is that one cannot know what group of operations will be waited for.
6440 // Given:
6441 // Stuff1; SetEvent; Stuff2; SetEvent; WaitEvents;
6442 // WaitEvents cannot know which of Stuff1, Stuff2, or both has completed execution.
6443
6444 if (!sync_event->HasBarrier(src_exec_scope_.mask_param, src_exec_scope_.exec_scope)) {
6445 sync_event->unsynchronized_set = sync_event->last_command;
6446 sync_event->ResetFirstScope();
John Zulauf78b1f892021-09-20 15:02:09 -06006447 } else if (!sync_event->first_scope_set) {
John Zulauf6ce24372021-01-30 05:56:25 -07006448 // We only set the scope if there isn't one
6449 sync_event->scope = src_exec_scope_;
6450
6451 auto set_scope = [&sync_event](AccessAddressType address_type, const ResourceAccessRangeMap::value_type &access) {
6452 auto &scope_map = sync_event->first_scope[static_cast<size_t>(address_type)];
6453 if (access.second.InSourceScopeOrChain(sync_event->scope.exec_scope, sync_event->scope.valid_accesses)) {
6454 scope_map.insert(scope_map.end(), std::make_pair(access.first, true));
6455 }
6456 };
6457 access_context->ForAll(set_scope);
6458 sync_event->unsynchronized_set = CMD_NONE;
John Zulauf78b1f892021-09-20 15:02:09 -06006459 sync_event->first_scope_set = true;
John Zulauf6ce24372021-01-30 05:56:25 -07006460 sync_event->first_scope_tag = tag;
6461 }
John Zulauf4edde622021-02-15 08:54:50 -07006462 // TODO: Store dep_info_ shared ptr in sync_state for WaitEvents2 validation
6463 sync_event->last_command = cmd_;
John Zulauf610e28c2021-08-03 17:46:23 -06006464 sync_event->last_command_tag = tag;
John Zulauf6ce24372021-01-30 05:56:25 -07006465 sync_event->barriers = 0U;
6466}
John Zulauf64ffe552021-02-06 10:25:07 -07006467
6468SyncOpBeginRenderPass::SyncOpBeginRenderPass(CMD_TYPE cmd, const SyncValidator &sync_state,
6469 const VkRenderPassBeginInfo *pRenderPassBegin,
sfricke-samsung85584a72021-09-30 21:43:38 -07006470 const VkSubpassBeginInfo *pSubpassBeginInfo)
6471 : SyncOpBase(cmd) {
John Zulauf64ffe552021-02-06 10:25:07 -07006472 if (pRenderPassBegin) {
Jeremy Gebben9f537102021-10-05 16:37:12 -06006473 rp_state_ = sync_state.Get<RENDER_PASS_STATE>(pRenderPassBegin->renderPass);
John Zulauf64ffe552021-02-06 10:25:07 -07006474 renderpass_begin_info_ = safe_VkRenderPassBeginInfo(pRenderPassBegin);
Jeremy Gebbenf4449392022-01-28 10:09:10 -07006475 auto fb_state = sync_state.Get<FRAMEBUFFER_STATE>(pRenderPassBegin->framebuffer);
John Zulauf64ffe552021-02-06 10:25:07 -07006476 if (fb_state) {
Jeremy Gebben9f537102021-10-05 16:37:12 -06006477 shared_attachments_ = sync_state.GetAttachmentViews(*renderpass_begin_info_.ptr(), *fb_state);
John Zulauf64ffe552021-02-06 10:25:07 -07006478 // TODO: Revisit this when all attachment validation is through SyncOps to see if we can discard the plain pointer copy
6479 // Note that this a safe to presist as long as shared_attachments is not cleared
6480 attachments_.reserve(shared_attachments_.size());
sfricke-samsung01c9ae92021-02-09 22:30:52 -08006481 for (const auto &attachment : shared_attachments_) {
John Zulauf64ffe552021-02-06 10:25:07 -07006482 attachments_.emplace_back(attachment.get());
6483 }
6484 }
6485 if (pSubpassBeginInfo) {
6486 subpass_begin_info_ = safe_VkSubpassBeginInfo(pSubpassBeginInfo);
6487 }
6488 }
6489}
6490
6491bool SyncOpBeginRenderPass::Validate(const CommandBufferAccessContext &cb_context) const {
6492 // Check if any of the layout transitions are hazardous.... but we don't have the renderpass context to work with, so we
6493 bool skip = false;
6494
6495 assert(rp_state_.get());
6496 if (nullptr == rp_state_.get()) return skip;
6497 auto &rp_state = *rp_state_.get();
6498
6499 const uint32_t subpass = 0;
6500
6501 // Construct the state we can use to validate against... (since validation is const and RecordCmdBeginRenderPass
6502 // hasn't happened yet)
6503 const std::vector<AccessContext> empty_context_vector;
6504 AccessContext temp_context(subpass, cb_context.GetQueueFlags(), rp_state.subpass_dependencies, empty_context_vector,
6505 cb_context.GetCurrentAccessContext());
6506
6507 // Validate attachment operations
6508 if (attachments_.size() == 0) return skip;
6509 const auto &render_area = renderpass_begin_info_.renderArea;
John Zulaufd0ec59f2021-03-13 14:25:08 -07006510
6511 // Since the isn't a valid RenderPassAccessContext until Record, needs to create the view/generator list... we could limit this
6512 // by predicating on whether subpass 0 uses the attachment if it is too expensive to create the full list redundantly here.
6513 // More broadly we could look at thread specific state shared between Validate and Record as is done for other heavyweight
6514 // operations (though it's currently a messy approach)
6515 AttachmentViewGenVector view_gens = RenderPassAccessContext::CreateAttachmentViewGen(render_area, attachments_);
6516 skip |= temp_context.ValidateLayoutTransitions(cb_context, rp_state, render_area, subpass, view_gens, CmdName());
John Zulauf64ffe552021-02-06 10:25:07 -07006517
6518 // Validate load operations if there were no layout transition hazards
6519 if (!skip) {
John Zulaufee984022022-04-13 16:39:50 -06006520 temp_context.RecordLayoutTransitions(rp_state, subpass, view_gens, kInvalidTag);
John Zulaufd0ec59f2021-03-13 14:25:08 -07006521 skip |= temp_context.ValidateLoadOperation(cb_context, rp_state, render_area, subpass, view_gens, CmdName());
John Zulauf64ffe552021-02-06 10:25:07 -07006522 }
6523
6524 return skip;
6525}
6526
John Zulauf8eda1562021-04-13 17:06:41 -06006527ResourceUsageTag SyncOpBeginRenderPass::Record(CommandBufferAccessContext *cb_context) const {
John Zulauf64ffe552021-02-06 10:25:07 -07006528 // TODO PHASE2 need to have a consistent way to record to either command buffer or queue contexts
6529 assert(rp_state_.get());
John Zulauf41a9c7c2021-12-07 15:59:53 -07006530 if (nullptr == rp_state_.get()) return cb_context->NextCommandTag(cmd_);
6531 return cb_context->RecordBeginRenderPass(cmd_, *rp_state_.get(), renderpass_begin_info_.renderArea, attachments_);
John Zulauf64ffe552021-02-06 10:25:07 -07006532}
6533
John Zulauf8eda1562021-04-13 17:06:41 -06006534bool SyncOpBeginRenderPass::ReplayValidate(ResourceUsageTag recorded_tag, const CommandBufferAccessContext &recorded_context,
John Zulaufbb890452021-12-14 11:30:18 -07006535 ResourceUsageTag base_tag, CommandExecutionContext *exec_context) const {
John Zulauf8eda1562021-04-13 17:06:41 -06006536 return false;
6537}
6538
John Zulaufbb890452021-12-14 11:30:18 -07006539void SyncOpBeginRenderPass::ReplayRecord(ResourceUsageTag tag, AccessContext *access_context,
6540 SyncEventsContext *events_context) const {}
John Zulauf8eda1562021-04-13 17:06:41 -06006541
John Zulauf64ffe552021-02-06 10:25:07 -07006542SyncOpNextSubpass::SyncOpNextSubpass(CMD_TYPE cmd, const SyncValidator &sync_state, const VkSubpassBeginInfo *pSubpassBeginInfo,
sfricke-samsung85584a72021-09-30 21:43:38 -07006543 const VkSubpassEndInfo *pSubpassEndInfo)
6544 : SyncOpBase(cmd) {
John Zulauf64ffe552021-02-06 10:25:07 -07006545 if (pSubpassBeginInfo) {
6546 subpass_begin_info_.initialize(pSubpassBeginInfo);
6547 }
6548 if (pSubpassEndInfo) {
6549 subpass_end_info_.initialize(pSubpassEndInfo);
6550 }
6551}
6552
6553bool SyncOpNextSubpass::Validate(const CommandBufferAccessContext &cb_context) const {
6554 bool skip = false;
6555 const auto *renderpass_context = cb_context.GetCurrentRenderPassContext();
6556 if (!renderpass_context) return skip;
6557
6558 skip |= renderpass_context->ValidateNextSubpass(cb_context.GetExecutionContext(), CmdName());
6559 return skip;
6560}
6561
John Zulauf8eda1562021-04-13 17:06:41 -06006562ResourceUsageTag SyncOpNextSubpass::Record(CommandBufferAccessContext *cb_context) const {
John Zulauf41a9c7c2021-12-07 15:59:53 -07006563 return cb_context->RecordNextSubpass(cmd_);
John Zulauf8eda1562021-04-13 17:06:41 -06006564}
6565
6566bool SyncOpNextSubpass::ReplayValidate(ResourceUsageTag recorded_tag, const CommandBufferAccessContext &recorded_context,
John Zulaufbb890452021-12-14 11:30:18 -07006567 ResourceUsageTag base_tag, CommandExecutionContext *exec_context) const {
John Zulauf8eda1562021-04-13 17:06:41 -06006568 return false;
John Zulauf64ffe552021-02-06 10:25:07 -07006569}
6570
sfricke-samsung85584a72021-09-30 21:43:38 -07006571SyncOpEndRenderPass::SyncOpEndRenderPass(CMD_TYPE cmd, const SyncValidator &sync_state, const VkSubpassEndInfo *pSubpassEndInfo)
6572 : SyncOpBase(cmd) {
John Zulauf64ffe552021-02-06 10:25:07 -07006573 if (pSubpassEndInfo) {
6574 subpass_end_info_.initialize(pSubpassEndInfo);
6575 }
6576}
6577
John Zulaufbb890452021-12-14 11:30:18 -07006578void SyncOpNextSubpass::ReplayRecord(ResourceUsageTag tag, AccessContext *access_context, SyncEventsContext *events_context) const {
6579}
John Zulauf8eda1562021-04-13 17:06:41 -06006580
John Zulauf64ffe552021-02-06 10:25:07 -07006581bool SyncOpEndRenderPass::Validate(const CommandBufferAccessContext &cb_context) const {
6582 bool skip = false;
6583 const auto *renderpass_context = cb_context.GetCurrentRenderPassContext();
6584
6585 if (!renderpass_context) return skip;
6586 skip |= renderpass_context->ValidateEndRenderPass(cb_context.GetExecutionContext(), CmdName());
6587 return skip;
6588}
6589
John Zulauf8eda1562021-04-13 17:06:41 -06006590ResourceUsageTag SyncOpEndRenderPass::Record(CommandBufferAccessContext *cb_context) const {
John Zulauf41a9c7c2021-12-07 15:59:53 -07006591 return cb_context->RecordEndRenderPass(cmd_);
John Zulauf64ffe552021-02-06 10:25:07 -07006592}
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006593
John Zulauf8eda1562021-04-13 17:06:41 -06006594bool SyncOpEndRenderPass::ReplayValidate(ResourceUsageTag recorded_tag, const CommandBufferAccessContext &recorded_context,
John Zulaufbb890452021-12-14 11:30:18 -07006595 ResourceUsageTag base_tag, CommandExecutionContext *exec_context) const {
John Zulauf8eda1562021-04-13 17:06:41 -06006596 return false;
6597}
6598
John Zulaufbb890452021-12-14 11:30:18 -07006599void SyncOpEndRenderPass::ReplayRecord(ResourceUsageTag tag, AccessContext *access_context,
6600 SyncEventsContext *events_context) const {}
John Zulauf8eda1562021-04-13 17:06:41 -06006601
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006602void SyncValidator::PreCallRecordCmdWriteBufferMarker2AMD(VkCommandBuffer commandBuffer, VkPipelineStageFlags2KHR pipelineStage,
6603 VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) {
6604 StateTracker::PreCallRecordCmdWriteBufferMarker2AMD(commandBuffer, pipelineStage, dstBuffer, dstOffset, marker);
6605 auto *cb_access_context = GetAccessContext(commandBuffer);
6606 assert(cb_access_context);
6607 const auto tag = cb_access_context->NextCommandTag(CMD_WRITEBUFFERMARKERAMD);
6608 auto *context = cb_access_context->GetCurrentAccessContext();
6609 assert(context);
6610
Jeremy Gebbenf4449392022-01-28 10:09:10 -07006611 auto dst_buffer = Get<BUFFER_STATE>(dstBuffer);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006612
6613 if (dst_buffer) {
6614 const ResourceAccessRange range = MakeRange(dstOffset, 4);
6615 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
6616 }
6617}
John Zulaufd05c5842021-03-26 11:32:16 -06006618
John Zulaufae842002021-04-15 18:20:55 -06006619bool SyncValidator::PreCallValidateCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBufferCount,
6620 const VkCommandBuffer *pCommandBuffers) const {
6621 bool skip = StateTracker::PreCallValidateCmdExecuteCommands(commandBuffer, commandBufferCount, pCommandBuffers);
6622 const char *func_name = "vkCmdExecuteCommands";
6623 const auto *cb_context = GetAccessContext(commandBuffer);
6624 assert(cb_context);
John Zulauf4fa68462021-04-26 21:04:22 -06006625
6626 // Heavyweight, but we need a proxy copy of the active command buffer access context
6627 CommandBufferAccessContext proxy_cb_context(*cb_context, CommandBufferAccessContext::AsProxyContext());
John Zulaufae842002021-04-15 18:20:55 -06006628
6629 // Make working copies of the access and events contexts
John Zulaufae842002021-04-15 18:20:55 -06006630 for (uint32_t cb_index = 0; cb_index < commandBufferCount; ++cb_index) {
John Zulauf41a9c7c2021-12-07 15:59:53 -07006631 proxy_cb_context.NextIndexedCommandTag(CMD_EXECUTECOMMANDS, cb_index);
6632
John Zulaufae842002021-04-15 18:20:55 -06006633 const auto *recorded_cb_context = GetAccessContext(pCommandBuffers[cb_index]);
6634 if (!recorded_cb_context) continue;
John Zulauf4fa68462021-04-26 21:04:22 -06006635
6636 const auto *recorded_context = recorded_cb_context->GetCurrentAccessContext();
6637 assert(recorded_context);
6638 skip |= recorded_cb_context->ValidateFirstUse(&proxy_cb_context, func_name, cb_index);
6639
6640 // The barriers have already been applied in ValidatFirstUse
6641 ResourceUsageRange tag_range = proxy_cb_context.ImportRecordedAccessLog(*recorded_cb_context);
6642 proxy_cb_context.ResolveRecordedContext(*recorded_context, tag_range.begin);
John Zulaufae842002021-04-15 18:20:55 -06006643 }
6644
John Zulaufae842002021-04-15 18:20:55 -06006645 return skip;
6646}
6647
6648void SyncValidator::PreCallRecordCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBufferCount,
6649 const VkCommandBuffer *pCommandBuffers) {
6650 StateTracker::PreCallRecordCmdExecuteCommands(commandBuffer, commandBufferCount, pCommandBuffers);
John Zulauf4fa68462021-04-26 21:04:22 -06006651 auto *cb_context = GetAccessContext(commandBuffer);
6652 assert(cb_context);
John Zulauf4fa68462021-04-26 21:04:22 -06006653 for (uint32_t cb_index = 0; cb_index < commandBufferCount; ++cb_index) {
John Zulauf41a9c7c2021-12-07 15:59:53 -07006654 cb_context->NextIndexedCommandTag(CMD_EXECUTECOMMANDS, cb_index);
John Zulauf4fa68462021-04-26 21:04:22 -06006655 const auto *recorded_cb_context = GetAccessContext(pCommandBuffers[cb_index]);
6656 if (!recorded_cb_context) continue;
6657 cb_context->RecordExecutedCommandBuffer(*recorded_cb_context, CMD_EXECUTECOMMANDS);
6658 }
John Zulaufae842002021-04-15 18:20:55 -06006659}
6660
John Zulaufd0ec59f2021-03-13 14:25:08 -07006661AttachmentViewGen::AttachmentViewGen(const IMAGE_VIEW_STATE *view, const VkOffset3D &offset, const VkExtent3D &extent)
6662 : view_(view), view_mask_(), gen_store_() {
6663 if (!view_ || !view_->image_state || !SimpleBinding(*view_->image_state)) return;
6664 const IMAGE_STATE &image_state = *view_->image_state.get();
6665 const auto base_address = ResourceBaseAddress(image_state);
6666 const auto *encoder = image_state.fragment_encoder.get();
6667 if (!encoder) return;
Jeremy Gebben11a68a32021-07-29 11:59:22 -06006668 // Get offset and extent for the view, accounting for possible depth slicing
6669 const VkOffset3D zero_offset = view->GetOffset();
6670 const VkExtent3D &image_extent = view->GetExtent();
John Zulaufd0ec59f2021-03-13 14:25:08 -07006671 // Intentional copy
6672 VkImageSubresourceRange subres_range = view_->normalized_subresource_range;
6673 view_mask_ = subres_range.aspectMask;
6674 gen_store_[Gen::kViewSubresource].emplace(*encoder, subres_range, zero_offset, image_extent, base_address);
6675 gen_store_[Gen::kRenderArea].emplace(*encoder, subres_range, offset, extent, base_address);
6676
6677 const auto depth = view_mask_ & VK_IMAGE_ASPECT_DEPTH_BIT;
6678 if (depth && (depth != view_mask_)) {
6679 subres_range.aspectMask = depth;
6680 gen_store_[Gen::kDepthOnlyRenderArea].emplace(*encoder, subres_range, offset, extent, base_address);
6681 }
6682 const auto stencil = view_mask_ & VK_IMAGE_ASPECT_STENCIL_BIT;
6683 if (stencil && (stencil != view_mask_)) {
6684 subres_range.aspectMask = stencil;
6685 gen_store_[Gen::kStencilOnlyRenderArea].emplace(*encoder, subres_range, offset, extent, base_address);
6686 }
6687}
6688
6689const ImageRangeGen *AttachmentViewGen::GetRangeGen(AttachmentViewGen::Gen gen_type) const {
6690 const ImageRangeGen *got = nullptr;
6691 switch (gen_type) {
6692 case kViewSubresource:
6693 got = &gen_store_[kViewSubresource];
6694 break;
6695 case kRenderArea:
6696 got = &gen_store_[kRenderArea];
6697 break;
6698 case kDepthOnlyRenderArea:
6699 got =
6700 (view_mask_ == VK_IMAGE_ASPECT_DEPTH_BIT) ? &gen_store_[Gen::kRenderArea] : &gen_store_[Gen::kDepthOnlyRenderArea];
6701 break;
6702 case kStencilOnlyRenderArea:
6703 got = (view_mask_ == VK_IMAGE_ASPECT_STENCIL_BIT) ? &gen_store_[Gen::kRenderArea]
6704 : &gen_store_[Gen::kStencilOnlyRenderArea];
6705 break;
6706 default:
6707 assert(got);
6708 }
6709 return got;
6710}
6711
6712AttachmentViewGen::Gen AttachmentViewGen::GetDepthStencilRenderAreaGenType(bool depth_op, bool stencil_op) const {
6713 assert(IsValid());
6714 assert(view_mask_ & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT));
6715 if (depth_op) {
6716 assert(view_mask_ & VK_IMAGE_ASPECT_DEPTH_BIT);
6717 if (stencil_op) {
6718 assert(view_mask_ & VK_IMAGE_ASPECT_STENCIL_BIT);
6719 return kRenderArea;
6720 }
6721 return kDepthOnlyRenderArea;
6722 }
6723 if (stencil_op) {
6724 assert(view_mask_ & VK_IMAGE_ASPECT_STENCIL_BIT);
6725 return kStencilOnlyRenderArea;
6726 }
6727
6728 assert(depth_op || stencil_op);
6729 return kRenderArea;
6730}
6731
6732AccessAddressType AttachmentViewGen::GetAddressType() const { return AccessContext::ImageAddressType(*view_->image_state); }
John Zulauf8eda1562021-04-13 17:06:41 -06006733
6734void SyncEventsContext::ApplyBarrier(const SyncExecScope &src, const SyncExecScope &dst) {
6735 const bool all_commands_bit = 0 != (src.mask_param & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
6736 for (auto &event_pair : map_) {
6737 assert(event_pair.second); // Shouldn't be storing empty
6738 auto &sync_event = *event_pair.second;
6739 // Events don't happen at a stage, so we need to check and store the unexpanded ALL_COMMANDS if set for inter-event-calls
6740 if ((sync_event.barriers & src.exec_scope) || all_commands_bit) {
6741 sync_event.barriers |= dst.exec_scope;
6742 sync_event.barriers |= dst.mask_param & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
6743 }
6744 }
6745}
John Zulaufbb890452021-12-14 11:30:18 -07006746
6747ReplayTrackbackBarriersAction::ReplayTrackbackBarriersAction(VkQueueFlags queue_flags,
6748 const SubpassDependencyGraphNode &subpass_dep,
6749 const std::vector<ReplayTrackbackBarriersAction> &replay_contexts) {
6750 bool has_barrier_from_external = subpass_dep.barrier_from_external.size() > 0U;
6751 trackback_barriers.reserve(subpass_dep.prev.size() + (has_barrier_from_external ? 1U : 0U));
6752 for (const auto &prev_dep : subpass_dep.prev) {
6753 const auto prev_pass = prev_dep.first->pass;
6754 const auto &prev_barriers = prev_dep.second;
6755 trackback_barriers.emplace_back(&replay_contexts[prev_pass], queue_flags, prev_barriers);
6756 }
6757 if (has_barrier_from_external) {
6758 // Store the barrier from external with the reat, but save pointer for "by subpass" lookups.
6759 trackback_barriers.emplace_back(nullptr, queue_flags, subpass_dep.barrier_from_external);
6760 }
6761}
6762
6763void ReplayTrackbackBarriersAction::operator()(ResourceAccessState *access) const {
6764 if (trackback_barriers.size() == 1) {
6765 trackback_barriers[0](access);
6766 } else {
6767 ResourceAccessState resolved;
6768 for (const auto &trackback : trackback_barriers) {
6769 ResourceAccessState access_copy = *access;
6770 trackback(&access_copy);
6771 resolved.Resolve(access_copy);
6772 }
6773 *access = resolved;
6774 }
6775}
6776
6777ReplayTrackbackBarriersAction::TrackbackBarriers::TrackbackBarriers(
6778 const ReplayTrackbackBarriersAction *source_subpass_, VkQueueFlags queue_flags_,
6779 const std::vector<const VkSubpassDependency2 *> &subpass_dependencies_)
6780 : Base(source_subpass_, queue_flags_, subpass_dependencies_) {}
6781
6782void ReplayTrackbackBarriersAction::TrackbackBarriers::operator()(ResourceAccessState *access) const {
6783 if (source_subpass) {
6784 (*source_subpass)(access);
6785 }
6786 access->ApplyBarriersImmediate(barriers);
6787}