blob: 3a2229e70b59a7cc7b9f6b0e2b66c4224f4528fe [file] [log] [blame]
John Zulaufab7756b2020-12-29 16:10:16 -07001/* Copyright (c) 2019-2021 The Khronos Group Inc.
2 * Copyright (c) 2019-2021 Valve Corporation
3 * Copyright (c) 2019-2021 LunarG, Inc.
John Zulauf9cb530d2019-09-30 14:14:10 -06004 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 * Author: John Zulauf <jzulauf@lunarg.com>
John Zulaufab7756b2020-12-29 16:10:16 -070018 * Author: Locke Lin <locke@lunarg.com>
19 * Author: Jeremy Gebben <jeremyg@lunarg.com>
John Zulauf9cb530d2019-09-30 14:14:10 -060020 */
21
22#include <limits>
23#include <vector>
locke-lunarg296a3c92020-03-25 01:04:29 -060024#include <memory>
25#include <bitset>
John Zulauf9cb530d2019-09-30 14:14:10 -060026#include "synchronization_validation.h"
Jeremy Gebben5f585ae2021-02-02 09:03:06 -070027#include "sync_utils.h"
John Zulauf9cb530d2019-09-30 14:14:10 -060028
Jeremy Gebben6fbf8242021-06-21 09:14:46 -060029static bool SimpleBinding(const BINDABLE &bindable) { return !bindable.sparse && bindable.Binding(); }
John Zulauf264cce02021-02-05 14:40:47 -070030
John Zulauf29d00532021-03-04 13:28:54 -070031static bool SimpleBinding(const IMAGE_STATE &image_state) {
Jeremy Gebben62c3bf42021-07-21 15:38:24 -060032 bool simple =
Jeremy Gebben82e11d52021-07-26 09:19:37 -060033 SimpleBinding(static_cast<const BINDABLE &>(image_state)) || image_state.IsSwapchainImage() || image_state.bind_swapchain;
John Zulauf29d00532021-03-04 13:28:54 -070034
35 // If it's not simple we must have an encoder.
36 assert(!simple || image_state.fragment_encoder.get());
37 return simple;
38}
39
John Zulauf4fa68462021-04-26 21:04:22 -060040static const ResourceAccessRange kFullRange(std::numeric_limits<VkDeviceSize>::min(), std::numeric_limits<VkDeviceSize>::max());
41static const std::array<AccessAddressType, static_cast<size_t>(AccessAddressType::kTypeCount)> kAddressTypes = {
John Zulauf43cc7462020-12-03 12:33:12 -070042 AccessAddressType::kLinear, AccessAddressType::kIdealized};
43
John Zulaufd5115702021-01-18 12:34:33 -070044static constexpr AccessAddressType GetAccessAddressType(const BUFFER_STATE &) { return AccessAddressType::kLinear; };
John Zulauf264cce02021-02-05 14:40:47 -070045static AccessAddressType GetAccessAddressType(const IMAGE_STATE &image) {
46 return SimpleBinding(image) ? AccessContext::ImageAddressType(image) : AccessAddressType::kIdealized;
47}
John Zulaufd5115702021-01-18 12:34:33 -070048
John Zulauf9cb530d2019-09-30 14:14:10 -060049static const char *string_SyncHazardVUID(SyncHazard hazard) {
50 switch (hazard) {
51 case SyncHazard::NONE:
John Zulauf2f952d22020-02-10 11:34:51 -070052 return "SYNC-HAZARD-NONE";
John Zulauf9cb530d2019-09-30 14:14:10 -060053 break;
54 case SyncHazard::READ_AFTER_WRITE:
55 return "SYNC-HAZARD-READ_AFTER_WRITE";
56 break;
57 case SyncHazard::WRITE_AFTER_READ:
58 return "SYNC-HAZARD-WRITE_AFTER_READ";
59 break;
60 case SyncHazard::WRITE_AFTER_WRITE:
61 return "SYNC-HAZARD-WRITE_AFTER_WRITE";
62 break;
John Zulauf2f952d22020-02-10 11:34:51 -070063 case SyncHazard::READ_RACING_WRITE:
64 return "SYNC-HAZARD-READ-RACING-WRITE";
65 break;
66 case SyncHazard::WRITE_RACING_WRITE:
67 return "SYNC-HAZARD-WRITE-RACING-WRITE";
68 break;
69 case SyncHazard::WRITE_RACING_READ:
70 return "SYNC-HAZARD-WRITE-RACING-READ";
71 break;
John Zulauf9cb530d2019-09-30 14:14:10 -060072 default:
73 assert(0);
74 }
75 return "SYNC-HAZARD-INVALID";
76}
77
John Zulauf59e25072020-07-17 10:55:21 -060078static bool IsHazardVsRead(SyncHazard hazard) {
79 switch (hazard) {
80 case SyncHazard::NONE:
81 return false;
82 break;
83 case SyncHazard::READ_AFTER_WRITE:
84 return false;
85 break;
86 case SyncHazard::WRITE_AFTER_READ:
87 return true;
88 break;
89 case SyncHazard::WRITE_AFTER_WRITE:
90 return false;
91 break;
92 case SyncHazard::READ_RACING_WRITE:
93 return false;
94 break;
95 case SyncHazard::WRITE_RACING_WRITE:
96 return false;
97 break;
98 case SyncHazard::WRITE_RACING_READ:
99 return true;
100 break;
101 default:
102 assert(0);
103 }
104 return false;
105}
106
John Zulauf9cb530d2019-09-30 14:14:10 -0600107static const char *string_SyncHazard(SyncHazard hazard) {
108 switch (hazard) {
109 case SyncHazard::NONE:
110 return "NONR";
111 break;
112 case SyncHazard::READ_AFTER_WRITE:
113 return "READ_AFTER_WRITE";
114 break;
115 case SyncHazard::WRITE_AFTER_READ:
116 return "WRITE_AFTER_READ";
117 break;
118 case SyncHazard::WRITE_AFTER_WRITE:
119 return "WRITE_AFTER_WRITE";
120 break;
John Zulauf2f952d22020-02-10 11:34:51 -0700121 case SyncHazard::READ_RACING_WRITE:
122 return "READ_RACING_WRITE";
123 break;
124 case SyncHazard::WRITE_RACING_WRITE:
125 return "WRITE_RACING_WRITE";
126 break;
127 case SyncHazard::WRITE_RACING_READ:
128 return "WRITE_RACING_READ";
129 break;
John Zulauf9cb530d2019-09-30 14:14:10 -0600130 default:
131 assert(0);
132 }
133 return "INVALID HAZARD";
134}
135
John Zulauf37ceaed2020-07-03 16:18:15 -0600136static const SyncStageAccessInfoType *SyncStageAccessInfoFromMask(SyncStageAccessFlags flags) {
137 // Return the info for the first bit found
138 const SyncStageAccessInfoType *info = nullptr;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700139 for (size_t i = 0; i < flags.size(); i++) {
140 if (flags.test(i)) {
141 info = &syncStageAccessInfoByStageAccessIndex[i];
142 break;
John Zulauf37ceaed2020-07-03 16:18:15 -0600143 }
144 }
145 return info;
146}
147
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700148static std::string string_SyncStageAccessFlags(const SyncStageAccessFlags &flags, const char *sep = "|") {
John Zulauf59e25072020-07-17 10:55:21 -0600149 std::string out_str;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700150 if (flags.none()) {
John Zulauf389c34b2020-07-28 11:19:35 -0600151 out_str = "0";
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700152 } else {
153 for (size_t i = 0; i < syncStageAccessInfoByStageAccessIndex.size(); i++) {
154 const auto &info = syncStageAccessInfoByStageAccessIndex[i];
155 if ((flags & info.stage_access_bit).any()) {
156 if (!out_str.empty()) {
157 out_str.append(sep);
158 }
159 out_str.append(info.name);
John Zulauf59e25072020-07-17 10:55:21 -0600160 }
John Zulauf59e25072020-07-17 10:55:21 -0600161 }
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700162 if (out_str.length() == 0) {
163 out_str.append("Unhandled SyncStageAccess");
164 }
John Zulauf59e25072020-07-17 10:55:21 -0600165 }
166 return out_str;
167}
168
John Zulauf14940722021-04-12 15:19:02 -0600169static std::string string_UsageTag(const ResourceUsageRecord &tag) {
Jeremy Gebben6ea9d9e2020-12-11 09:41:01 -0700170 std::stringstream out;
171
John Zulauffaea0ee2021-01-14 14:01:32 -0700172 out << "command: " << CommandTypeString(tag.command);
173 out << ", seq_no: " << tag.seq_num;
174 if (tag.sub_command != 0) {
175 out << ", subcmd: " << tag.sub_command;
Jeremy Gebben6ea9d9e2020-12-11 09:41:01 -0700176 }
177 return out.str();
178}
John Zulauf4fa68462021-04-26 21:04:22 -0600179static std::string string_UsageIndex(SyncStageAccessIndex usage_index) {
180 const char *stage_access_name = "INVALID_STAGE_ACCESS";
181 if (usage_index < static_cast<SyncStageAccessIndex>(syncStageAccessInfoByStageAccessIndex.size())) {
182 stage_access_name = syncStageAccessInfoByStageAccessIndex[usage_index].name;
183 }
184 return std::string(stage_access_name);
185}
186
187struct NoopBarrierAction {
188 explicit NoopBarrierAction() {}
189 void operator()(ResourceAccessState *access) const {}
190};
191
192// NOTE: Make sure the proxy doesn't outlive from, as the proxy is pointing directly to access contexts owned by from.
193CommandBufferAccessContext::CommandBufferAccessContext(const CommandBufferAccessContext &from, AsProxyContext dummy)
194 : CommandBufferAccessContext(from.sync_state_) {
195 // Copy only the needed fields out of from for a temporary, proxy command buffer context
196 cb_state_ = from.cb_state_;
197 queue_flags_ = from.queue_flags_;
198 destroyed_ = from.destroyed_;
199 access_log_ = from.access_log_; // potentially large, but no choice given tagging lookup.
200 command_number_ = from.command_number_;
201 subcommand_number_ = from.subcommand_number_;
202 reset_count_ = from.reset_count_;
203
204 const auto *from_context = from.GetCurrentAccessContext();
205 assert(from_context);
206
207 // Construct a fully resolved single access context out of from
208 const NoopBarrierAction noop_barrier;
209 for (AccessAddressType address_type : kAddressTypes) {
210 from_context->ResolveAccessRange(address_type, kFullRange, noop_barrier,
211 &cb_access_context_.GetAccessStateMap(address_type), nullptr);
212 }
213 // The proxy has flatten the current render pass context (if any), but the async contexts are needed for hazard detection
214 cb_access_context_.ImportAsyncContexts(*from_context);
215
216 events_context_ = from.events_context_;
217
218 // We don't want to copy the full render_pass_context_ history just for the proxy.
219}
220
221std::string CommandBufferAccessContext::FormatUsage(const ResourceUsageTag tag) const {
222 std::stringstream out;
223 assert(tag < access_log_.size());
224 const auto &record = access_log_[tag];
225 out << string_UsageTag(record);
226 if (record.cb_state != cb_state_.get()) {
227 out << ", command_buffer: " << sync_state_->report_data->FormatHandle(record.cb_state->commandBuffer()).c_str();
228 if (record.cb_state->Destroyed()) {
229 out << " (destroyed)";
230 }
231
232 const auto found_it = cb_execution_reference_.find(record.cb_state);
233 assert(found_it != cb_execution_reference_.end());
234 if (found_it != cb_execution_reference_.end()) {
235 out << ", reset_no: " << std::to_string(found_it->second.reset_count);
236 }
237 } else {
238 out << ", reset_no: " << std::to_string(reset_count_);
239 }
240 return out.str();
241}
242std::string CommandBufferAccessContext::FormatUsage(const ResourceFirstAccess &access) const {
243 std::stringstream out;
244 out << "(recorded_usage: " << string_UsageIndex(access.usage_index);
245 out << ", " << FormatUsage(access.tag) << ")";
246 return out.str();
247}
Jeremy Gebben6ea9d9e2020-12-11 09:41:01 -0700248
John Zulauffaea0ee2021-01-14 14:01:32 -0700249std::string CommandBufferAccessContext::FormatUsage(const HazardResult &hazard) const {
John Zulauf37ceaed2020-07-03 16:18:15 -0600250 const auto &tag = hazard.tag;
John Zulauf59e25072020-07-17 10:55:21 -0600251 assert(hazard.usage_index < static_cast<SyncStageAccessIndex>(syncStageAccessInfoByStageAccessIndex.size()));
252 const auto &usage_info = syncStageAccessInfoByStageAccessIndex[hazard.usage_index];
John Zulauf1dae9192020-06-16 15:46:44 -0600253 std::stringstream out;
John Zulauf37ceaed2020-07-03 16:18:15 -0600254 const auto *info = SyncStageAccessInfoFromMask(hazard.prior_access);
255 const char *stage_access_name = info ? info->name : "INVALID_STAGE_ACCESS";
John Zulauf4fa68462021-04-26 21:04:22 -0600256 out << "(";
257 if (!hazard.recorded_access.get()) {
258 // if we have a recorded usage the usage is reported from the recorded contexts point of view
259 out << "usage: " << usage_info.name << ", ";
260 }
261 out << "prior_usage: " << stage_access_name;
John Zulauf59e25072020-07-17 10:55:21 -0600262 if (IsHazardVsRead(hazard.hazard)) {
263 const auto barriers = hazard.access_state->GetReadBarriers(hazard.prior_access);
Jeremy Gebben40a22942020-12-22 14:22:06 -0700264 out << ", read_barriers: " << string_VkPipelineStageFlags2KHR(barriers);
John Zulauf59e25072020-07-17 10:55:21 -0600265 } else {
266 SyncStageAccessFlags write_barrier = hazard.access_state->GetWriteBarriers();
267 out << ", write_barriers: " << string_SyncStageAccessFlags(write_barrier);
268 }
269
John Zulauf14940722021-04-12 15:19:02 -0600270 assert(tag < access_log_.size());
John Zulauf4fa68462021-04-26 21:04:22 -0600271 out << ", " << FormatUsage(tag) << ")";
John Zulauf1dae9192020-06-16 15:46:44 -0600272 return out.str();
273}
274
John Zulaufd14743a2020-07-03 09:42:39 -0600275// NOTE: the attachement read flag is put *only* in the access scope and not in the exect scope, since the ordering
276// rules apply only to this specific access for this stage, and not the stage as a whole. The ordering detection
277// also reflects this special case for read hazard detection (using access instead of exec scope)
Jeremy Gebben40a22942020-12-22 14:22:06 -0700278static constexpr VkPipelineStageFlags2KHR kColorAttachmentExecScope = VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT_KHR;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700279static const SyncStageAccessFlags kColorAttachmentAccessScope =
280 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ_BIT |
281 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT |
282 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE_BIT |
283 SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT; // Note: this is intentionally not in the exec scope
Jeremy Gebben40a22942020-12-22 14:22:06 -0700284static constexpr VkPipelineStageFlags2KHR kDepthStencilAttachmentExecScope =
285 VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT_KHR | VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT_KHR;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700286static const SyncStageAccessFlags kDepthStencilAttachmentAccessScope =
287 SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
288 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
289 SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT; // Note: this is intentionally not in the exec scope
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -0700290static constexpr VkPipelineStageFlags2KHR kRasterAttachmentExecScope = kDepthStencilAttachmentExecScope | kColorAttachmentExecScope;
John Zulauf8e3c3e92021-01-06 11:19:36 -0700291static const SyncStageAccessFlags kRasterAttachmentAccessScope = kDepthStencilAttachmentAccessScope | kColorAttachmentAccessScope;
John Zulaufb027cdb2020-05-21 14:25:22 -0600292
John Zulauf8e3c3e92021-01-06 11:19:36 -0700293ResourceAccessState::OrderingBarriers ResourceAccessState::kOrderingRules = {
Jeremy Gebben40a22942020-12-22 14:22:06 -0700294 {{VK_PIPELINE_STAGE_2_NONE_KHR, SyncStageAccessFlags()},
John Zulauf8e3c3e92021-01-06 11:19:36 -0700295 {kColorAttachmentExecScope, kColorAttachmentAccessScope},
296 {kDepthStencilAttachmentExecScope, kDepthStencilAttachmentAccessScope},
297 {kRasterAttachmentExecScope, kRasterAttachmentAccessScope}}};
298
John Zulauf7635de32020-05-29 17:14:15 -0600299// Sometimes we have an internal access conflict, and we using the kCurrentCommandTag to set and detect in temporary/proxy contexts
John Zulauf14940722021-04-12 15:19:02 -0600300static const ResourceUsageTag kCurrentCommandTag(ResourceUsageRecord::kMaxIndex);
John Zulaufb027cdb2020-05-21 14:25:22 -0600301
Jeremy Gebben62c3bf42021-07-21 15:38:24 -0600302static VkDeviceSize ResourceBaseAddress(const BINDABLE &bindable) { return bindable.GetFakeBaseAddress(); }
John Zulaufb02c1eb2020-10-06 16:33:36 -0600303
locke-lunarg3c038002020-04-30 23:08:08 -0600304inline VkDeviceSize GetRealWholeSize(VkDeviceSize offset, VkDeviceSize size, VkDeviceSize whole_size) {
305 if (size == VK_WHOLE_SIZE) {
306 return (whole_size - offset);
307 }
308 return size;
309}
310
John Zulauf3e86bf02020-09-12 10:47:57 -0600311static inline VkDeviceSize GetBufferWholeSize(const BUFFER_STATE &buf_state, VkDeviceSize offset, VkDeviceSize size) {
312 return GetRealWholeSize(offset, size, buf_state.createInfo.size);
313}
314
John Zulauf16adfc92020-04-08 10:28:33 -0600315template <typename T>
John Zulauf355e49b2020-04-24 15:11:15 -0600316static ResourceAccessRange MakeRange(const T &has_offset_and_size) {
John Zulauf16adfc92020-04-08 10:28:33 -0600317 return ResourceAccessRange(has_offset_and_size.offset, (has_offset_and_size.offset + has_offset_and_size.size));
318}
319
John Zulauf355e49b2020-04-24 15:11:15 -0600320static ResourceAccessRange MakeRange(VkDeviceSize start, VkDeviceSize size) { return ResourceAccessRange(start, (start + size)); }
John Zulauf16adfc92020-04-08 10:28:33 -0600321
John Zulauf3e86bf02020-09-12 10:47:57 -0600322static inline ResourceAccessRange MakeRange(const BUFFER_STATE &buffer, VkDeviceSize offset, VkDeviceSize size) {
323 return MakeRange(offset, GetBufferWholeSize(buffer, offset, size));
324}
325
326static inline ResourceAccessRange MakeRange(const BUFFER_VIEW_STATE &buf_view_state) {
327 return MakeRange(*buf_view_state.buffer_state.get(), buf_view_state.create_info.offset, buf_view_state.create_info.range);
328}
329
John Zulauf4a6105a2020-11-17 15:11:05 -0700330// Range generators for to allow event scope filtration to be limited to the top of the resource access traversal pipeline
331//
John Zulauf10f1f522020-12-18 12:00:35 -0700332// Note: there is no "begin/end" or reset facility. These are each written as "one time through" generators.
333//
John Zulauf4a6105a2020-11-17 15:11:05 -0700334// Usage:
335// Constructor() -- initializes the generator to point to the begin of the space declared.
336// * -- the current range of the generator empty signfies end
337// ++ -- advance to the next non-empty range (or end)
338
339// A wrapper for a single range with the same semantics as the actual generators below
340template <typename KeyType>
341class SingleRangeGenerator {
342 public:
343 SingleRangeGenerator(const KeyType &range) : current_(range) {}
John Zulaufd5115702021-01-18 12:34:33 -0700344 const KeyType &operator*() const { return current_; }
345 const KeyType *operator->() const { return &current_; }
John Zulauf4a6105a2020-11-17 15:11:05 -0700346 SingleRangeGenerator &operator++() {
347 current_ = KeyType(); // just one real range
348 return *this;
349 }
350
351 bool operator==(const SingleRangeGenerator &other) const { return current_ == other.current_; }
352
353 private:
354 SingleRangeGenerator() = default;
355 const KeyType range_;
356 KeyType current_;
357};
358
John Zulaufae842002021-04-15 18:20:55 -0600359// Generate the ranges that are the intersection of range and the entries in the RangeMap
360template <typename RangeMap, typename KeyType = typename RangeMap::key_type>
361class MapRangesRangeGenerator {
John Zulauf4a6105a2020-11-17 15:11:05 -0700362 public:
John Zulaufd5115702021-01-18 12:34:33 -0700363 // Default constructed is safe to dereference for "empty" test, but for no other operation.
John Zulaufae842002021-04-15 18:20:55 -0600364 MapRangesRangeGenerator() : range_(), map_(nullptr), map_pos_(), current_() {
John Zulaufd5115702021-01-18 12:34:33 -0700365 // Default construction for KeyType *must* be empty range
366 assert(current_.empty());
367 }
John Zulaufae842002021-04-15 18:20:55 -0600368 MapRangesRangeGenerator(const RangeMap &filter, const KeyType &range) : range_(range), map_(&filter), map_pos_(), current_() {
John Zulauf4a6105a2020-11-17 15:11:05 -0700369 SeekBegin();
370 }
John Zulaufae842002021-04-15 18:20:55 -0600371 MapRangesRangeGenerator(const MapRangesRangeGenerator &from) = default;
John Zulaufd5115702021-01-18 12:34:33 -0700372
John Zulauf4a6105a2020-11-17 15:11:05 -0700373 const KeyType &operator*() const { return current_; }
374 const KeyType *operator->() const { return &current_; }
John Zulaufae842002021-04-15 18:20:55 -0600375 MapRangesRangeGenerator &operator++() {
376 ++map_pos_;
John Zulauf4a6105a2020-11-17 15:11:05 -0700377 UpdateCurrent();
378 return *this;
379 }
380
John Zulaufae842002021-04-15 18:20:55 -0600381 bool operator==(const MapRangesRangeGenerator &other) const { return current_ == other.current_; }
John Zulauf4a6105a2020-11-17 15:11:05 -0700382
John Zulaufae842002021-04-15 18:20:55 -0600383 protected:
John Zulauf4a6105a2020-11-17 15:11:05 -0700384 void UpdateCurrent() {
John Zulaufae842002021-04-15 18:20:55 -0600385 if (map_pos_ != map_->cend()) {
386 current_ = range_ & map_pos_->first;
John Zulauf4a6105a2020-11-17 15:11:05 -0700387 } else {
388 current_ = KeyType();
389 }
390 }
391 void SeekBegin() {
John Zulaufae842002021-04-15 18:20:55 -0600392 map_pos_ = map_->lower_bound(range_);
John Zulauf4a6105a2020-11-17 15:11:05 -0700393 UpdateCurrent();
394 }
John Zulaufae842002021-04-15 18:20:55 -0600395
396 // Adding this functionality here, to avoid gratuitous Base:: qualifiers in the derived class
397 // Note: Not exposed in this classes public interface to encourage using a consistent ++/empty generator semantic
398 template <typename Pred>
399 MapRangesRangeGenerator &PredicatedIncrement(Pred &pred) {
400 do {
401 ++map_pos_;
402 } while (map_pos_ != map_->cend() && map_pos_->first.intersects(range_) && !pred(map_pos_));
403 UpdateCurrent();
404 return *this;
405 }
406
John Zulauf4a6105a2020-11-17 15:11:05 -0700407 const KeyType range_;
John Zulaufae842002021-04-15 18:20:55 -0600408 const RangeMap *map_;
409 typename RangeMap::const_iterator map_pos_;
John Zulauf4a6105a2020-11-17 15:11:05 -0700410 KeyType current_;
411};
John Zulaufd5115702021-01-18 12:34:33 -0700412using SingleAccessRangeGenerator = SingleRangeGenerator<ResourceAccessRange>;
John Zulaufae842002021-04-15 18:20:55 -0600413using EventSimpleRangeGenerator = MapRangesRangeGenerator<SyncEventState::ScopeMap>;
John Zulauf4a6105a2020-11-17 15:11:05 -0700414
John Zulaufae842002021-04-15 18:20:55 -0600415// Generate the ranges for entries meeting the predicate that are the intersection of range and the entries in the RangeMap
416template <typename RangeMap, typename Predicate, typename KeyType = typename RangeMap::key_type>
417class PredicatedMapRangesRangeGenerator : public MapRangesRangeGenerator<RangeMap, KeyType> {
418 public:
419 using Base = MapRangesRangeGenerator<RangeMap, KeyType>;
420 // Default constructed is safe to dereference for "empty" test, but for no other operation.
421 PredicatedMapRangesRangeGenerator() : Base(), pred_() {}
422 PredicatedMapRangesRangeGenerator(const RangeMap &filter, const KeyType &range, Predicate pred)
423 : Base(filter, range), pred_(pred) {}
424 PredicatedMapRangesRangeGenerator(const PredicatedMapRangesRangeGenerator &from) = default;
425
426 PredicatedMapRangesRangeGenerator &operator++() {
427 Base::PredicatedIncrement(pred_);
428 return *this;
429 }
430
431 protected:
432 Predicate pred_;
433};
John Zulauf4a6105a2020-11-17 15:11:05 -0700434
435// Generate the ranges that are the intersection of the RangeGen ranges and the entries in the FilterMap
John Zulaufae842002021-04-15 18:20:55 -0600436// Templated to allow for different Range generators or map sources...
437template <typename RangeMap, typename RangeGen, typename KeyType = typename RangeMap::key_type>
John Zulauf4a6105a2020-11-17 15:11:05 -0700438class FilteredGeneratorGenerator {
439 public:
John Zulaufd5115702021-01-18 12:34:33 -0700440 // Default constructed is safe to dereference for "empty" test, but for no other operation.
441 FilteredGeneratorGenerator() : filter_(nullptr), gen_(), filter_pos_(), current_() {
442 // Default construction for KeyType *must* be empty range
443 assert(current_.empty());
444 }
John Zulaufae842002021-04-15 18:20:55 -0600445 FilteredGeneratorGenerator(const RangeMap &filter, RangeGen &gen) : filter_(&filter), gen_(gen), filter_pos_(), current_() {
John Zulauf4a6105a2020-11-17 15:11:05 -0700446 SeekBegin();
447 }
John Zulaufd5115702021-01-18 12:34:33 -0700448 FilteredGeneratorGenerator(const FilteredGeneratorGenerator &from) = default;
John Zulauf4a6105a2020-11-17 15:11:05 -0700449 const KeyType &operator*() const { return current_; }
450 const KeyType *operator->() const { return &current_; }
451 FilteredGeneratorGenerator &operator++() {
452 KeyType gen_range = GenRange();
453 KeyType filter_range = FilterRange();
454 current_ = KeyType();
455 while (gen_range.non_empty() && filter_range.non_empty() && current_.empty()) {
456 if (gen_range.end > filter_range.end) {
457 // if the generated range is beyond the filter_range, advance the filter range
458 filter_range = AdvanceFilter();
459 } else {
460 gen_range = AdvanceGen();
461 }
462 current_ = gen_range & filter_range;
463 }
464 return *this;
465 }
466
467 bool operator==(const FilteredGeneratorGenerator &other) const { return current_ == other.current_; }
468
469 private:
470 KeyType AdvanceFilter() {
471 ++filter_pos_;
472 auto filter_range = FilterRange();
473 if (filter_range.valid()) {
474 FastForwardGen(filter_range);
475 }
476 return filter_range;
477 }
478 KeyType AdvanceGen() {
John Zulaufd5115702021-01-18 12:34:33 -0700479 ++gen_;
John Zulauf4a6105a2020-11-17 15:11:05 -0700480 auto gen_range = GenRange();
481 if (gen_range.valid()) {
482 FastForwardFilter(gen_range);
483 }
484 return gen_range;
485 }
486
487 KeyType FilterRange() const { return (filter_pos_ != filter_->cend()) ? filter_pos_->first : KeyType(); }
John Zulaufd5115702021-01-18 12:34:33 -0700488 KeyType GenRange() const { return *gen_; }
John Zulauf4a6105a2020-11-17 15:11:05 -0700489
490 KeyType FastForwardFilter(const KeyType &range) {
491 auto filter_range = FilterRange();
492 int retry_count = 0;
John Zulauf10f1f522020-12-18 12:00:35 -0700493 const static int kRetryLimit = 2; // TODO -- determine whether this limit is optimal
John Zulauf4a6105a2020-11-17 15:11:05 -0700494 while (!filter_range.empty() && (filter_range.end <= range.begin)) {
495 if (retry_count < kRetryLimit) {
496 ++filter_pos_;
497 filter_range = FilterRange();
498 retry_count++;
499 } else {
500 // Okay we've tried walking, do a seek.
501 filter_pos_ = filter_->lower_bound(range);
502 break;
503 }
504 }
505 return FilterRange();
506 }
507
508 // TODO: Consider adding "seek" (or an absolute bound "get" to range generators to make this walk
509 // faster.
510 KeyType FastForwardGen(const KeyType &range) {
511 auto gen_range = GenRange();
512 while (!gen_range.empty() && (gen_range.end <= range.begin)) {
John Zulaufd5115702021-01-18 12:34:33 -0700513 ++gen_;
John Zulauf4a6105a2020-11-17 15:11:05 -0700514 gen_range = GenRange();
515 }
516 return gen_range;
517 }
518
519 void SeekBegin() {
520 auto gen_range = GenRange();
521 if (gen_range.empty()) {
522 current_ = KeyType();
523 filter_pos_ = filter_->cend();
524 } else {
525 filter_pos_ = filter_->lower_bound(gen_range);
526 current_ = gen_range & FilterRange();
527 }
528 }
529
John Zulaufae842002021-04-15 18:20:55 -0600530 const RangeMap *filter_;
John Zulaufd5115702021-01-18 12:34:33 -0700531 RangeGen gen_;
John Zulaufae842002021-04-15 18:20:55 -0600532 typename RangeMap::const_iterator filter_pos_;
John Zulauf4a6105a2020-11-17 15:11:05 -0700533 KeyType current_;
534};
535
536using EventImageRangeGenerator = FilteredGeneratorGenerator<SyncEventState::ScopeMap, subresource_adapter::ImageRangeGenerator>;
537
John Zulauf5c5e88d2019-12-26 11:22:02 -0700538
John Zulauf3e86bf02020-09-12 10:47:57 -0600539ResourceAccessRange GetBufferRange(VkDeviceSize offset, VkDeviceSize buf_whole_size, uint32_t first_index, uint32_t count,
540 VkDeviceSize stride) {
541 VkDeviceSize range_start = offset + first_index * stride;
542 VkDeviceSize range_size = 0;
locke-lunargff255f92020-05-13 18:53:52 -0600543 if (count == UINT32_MAX) {
544 range_size = buf_whole_size - range_start;
545 } else {
546 range_size = count * stride;
547 }
John Zulauf3e86bf02020-09-12 10:47:57 -0600548 return MakeRange(range_start, range_size);
locke-lunargff255f92020-05-13 18:53:52 -0600549}
550
locke-lunarg654e3692020-06-04 17:19:15 -0600551SyncStageAccessIndex GetSyncStageAccessIndexsByDescriptorSet(VkDescriptorType descriptor_type, const interface_var &descriptor_data,
552 VkShaderStageFlagBits stage_flag) {
553 if (descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT) {
554 assert(stage_flag == VK_SHADER_STAGE_FRAGMENT_BIT);
555 return SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ;
556 }
557 auto stage_access = syncStageAccessMaskByShaderStage.find(stage_flag);
558 if (stage_access == syncStageAccessMaskByShaderStage.end()) {
559 assert(0);
560 }
561 if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER || descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
562 return stage_access->second.uniform_read;
563 }
564
565 // If the desriptorSet is writable, we don't need to care SHADER_READ. SHADER_WRITE is enough.
566 // Because if write hazard happens, read hazard might or might not happen.
567 // But if write hazard doesn't happen, read hazard is impossible to happen.
568 if (descriptor_data.is_writable) {
Jeremy Gebben40a22942020-12-22 14:22:06 -0700569 return stage_access->second.storage_write;
locke-lunarg654e3692020-06-04 17:19:15 -0600570 }
Jeremy Gebben40a22942020-12-22 14:22:06 -0700571 // TODO: sampled_read
572 return stage_access->second.storage_read;
locke-lunarg654e3692020-06-04 17:19:15 -0600573}
574
locke-lunarg37047832020-06-12 13:44:45 -0600575bool IsImageLayoutDepthWritable(VkImageLayout image_layout) {
576 return (image_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
577 image_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL ||
578 image_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL)
579 ? true
580 : false;
581}
582
583bool IsImageLayoutStencilWritable(VkImageLayout image_layout) {
584 return (image_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
585 image_layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL ||
586 image_layout == VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL)
587 ? true
588 : false;
589}
590
John Zulauf355e49b2020-04-24 15:11:15 -0600591// Class AccessContext stores the state of accesses specific to a Command, Subpass, or Queue
John Zulaufb02c1eb2020-10-06 16:33:36 -0600592template <typename Action>
593static void ApplyOverImageRange(const IMAGE_STATE &image_state, const VkImageSubresourceRange &subresource_range_arg,
594 Action &action) {
595 // At this point the "apply over range" logic only supports a single memory binding
596 if (!SimpleBinding(image_state)) return;
597 auto subresource_range = NormalizeSubresourceRange(image_state.createInfo, subresource_range_arg);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600598 const auto base_address = ResourceBaseAddress(image_state);
John Zulauf150e5332020-12-03 08:52:52 -0700599 subresource_adapter::ImageRangeGenerator range_gen(*image_state.fragment_encoder.get(), subresource_range, {0, 0, 0},
600 image_state.createInfo.extent, base_address);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600601 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf150e5332020-12-03 08:52:52 -0700602 action(*range_gen);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600603 }
604}
605
John Zulauf7635de32020-05-29 17:14:15 -0600606// Tranverse the attachment resolves for this a specific subpass, and do action() to them.
607// Used by both validation and record operations
608//
609// The signature for Action() reflect the needs of both uses.
610template <typename Action>
John Zulaufd0ec59f2021-03-13 14:25:08 -0700611void ResolveOperation(Action &action, const RENDER_PASS_STATE &rp_state, const AttachmentViewGenVector &attachment_views,
612 uint32_t subpass) {
John Zulauf7635de32020-05-29 17:14:15 -0600613 const auto &rp_ci = rp_state.createInfo;
614 const auto *attachment_ci = rp_ci.pAttachments;
615 const auto &subpass_ci = rp_ci.pSubpasses[subpass];
616
617 // Color resolves -- require an inuse color attachment and a matching inuse resolve attachment
618 const auto *color_attachments = subpass_ci.pColorAttachments;
619 const auto *color_resolve = subpass_ci.pResolveAttachments;
620 if (color_resolve && color_attachments) {
621 for (uint32_t i = 0; i < subpass_ci.colorAttachmentCount; i++) {
622 const auto &color_attach = color_attachments[i].attachment;
623 const auto &resolve_attach = subpass_ci.pResolveAttachments[i].attachment;
624 if ((color_attach != VK_ATTACHMENT_UNUSED) && (resolve_attach != VK_ATTACHMENT_UNUSED)) {
625 action("color", "resolve read", color_attach, resolve_attach, attachment_views[color_attach],
John Zulaufd0ec59f2021-03-13 14:25:08 -0700626 AttachmentViewGen::Gen::kRenderArea, SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ,
627 SyncOrdering::kColorAttachment);
John Zulauf7635de32020-05-29 17:14:15 -0600628 action("color", "resolve write", color_attach, resolve_attach, attachment_views[resolve_attach],
John Zulaufd0ec59f2021-03-13 14:25:08 -0700629 AttachmentViewGen::Gen::kRenderArea, SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE,
630 SyncOrdering::kColorAttachment);
John Zulauf7635de32020-05-29 17:14:15 -0600631 }
632 }
633 }
634
635 // Depth stencil resolve only if the extension is present
Mark Lobodzinski1f887d32020-12-30 15:31:33 -0700636 const auto ds_resolve = LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(subpass_ci.pNext);
John Zulauf7635de32020-05-29 17:14:15 -0600637 if (ds_resolve && ds_resolve->pDepthStencilResolveAttachment &&
638 (ds_resolve->pDepthStencilResolveAttachment->attachment != VK_ATTACHMENT_UNUSED) && subpass_ci.pDepthStencilAttachment &&
639 (subpass_ci.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
640 const auto src_at = subpass_ci.pDepthStencilAttachment->attachment;
641 const auto src_ci = attachment_ci[src_at];
642 // The formats are required to match so we can pick either
643 const bool resolve_depth = (ds_resolve->depthResolveMode != VK_RESOLVE_MODE_NONE) && FormatHasDepth(src_ci.format);
644 const bool resolve_stencil = (ds_resolve->stencilResolveMode != VK_RESOLVE_MODE_NONE) && FormatHasStencil(src_ci.format);
645 const auto dst_at = ds_resolve->pDepthStencilResolveAttachment->attachment;
John Zulauf7635de32020-05-29 17:14:15 -0600646
647 // Figure out which aspects are actually touched during resolve operations
648 const char *aspect_string = nullptr;
John Zulaufd0ec59f2021-03-13 14:25:08 -0700649 AttachmentViewGen::Gen gen_type = AttachmentViewGen::Gen::kRenderArea;
John Zulauf7635de32020-05-29 17:14:15 -0600650 if (resolve_depth && resolve_stencil) {
John Zulauf7635de32020-05-29 17:14:15 -0600651 aspect_string = "depth/stencil";
652 } else if (resolve_depth) {
653 // Validate depth only
John Zulaufd0ec59f2021-03-13 14:25:08 -0700654 gen_type = AttachmentViewGen::Gen::kDepthOnlyRenderArea;
John Zulauf7635de32020-05-29 17:14:15 -0600655 aspect_string = "depth";
656 } else if (resolve_stencil) {
657 // Validate all stencil only
John Zulaufd0ec59f2021-03-13 14:25:08 -0700658 gen_type = AttachmentViewGen::Gen::kStencilOnlyRenderArea;
John Zulauf7635de32020-05-29 17:14:15 -0600659 aspect_string = "stencil";
660 }
661
John Zulaufd0ec59f2021-03-13 14:25:08 -0700662 if (aspect_string) {
663 action(aspect_string, "resolve read", src_at, dst_at, attachment_views[src_at], gen_type,
664 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ, SyncOrdering::kRaster);
665 action(aspect_string, "resolve write", src_at, dst_at, attachment_views[dst_at], gen_type,
666 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kRaster);
John Zulauf7635de32020-05-29 17:14:15 -0600667 }
668 }
669}
670
671// Action for validating resolve operations
672class ValidateResolveAction {
673 public:
John Zulauffaea0ee2021-01-14 14:01:32 -0700674 ValidateResolveAction(VkRenderPass render_pass, uint32_t subpass, const AccessContext &context,
John Zulauf64ffe552021-02-06 10:25:07 -0700675 const CommandExecutionContext &ex_context, const char *func_name)
John Zulauf7635de32020-05-29 17:14:15 -0600676 : render_pass_(render_pass),
677 subpass_(subpass),
678 context_(context),
John Zulauf64ffe552021-02-06 10:25:07 -0700679 ex_context_(ex_context),
John Zulauf7635de32020-05-29 17:14:15 -0600680 func_name_(func_name),
681 skip_(false) {}
682 void operator()(const char *aspect_name, const char *attachment_name, uint32_t src_at, uint32_t dst_at,
John Zulaufd0ec59f2021-03-13 14:25:08 -0700683 const AttachmentViewGen &view_gen, AttachmentViewGen::Gen gen_type, SyncStageAccessIndex current_usage,
684 SyncOrdering ordering_rule) {
John Zulauf7635de32020-05-29 17:14:15 -0600685 HazardResult hazard;
John Zulaufd0ec59f2021-03-13 14:25:08 -0700686 hazard = context_.DetectHazard(view_gen, gen_type, current_usage, ordering_rule);
John Zulauf7635de32020-05-29 17:14:15 -0600687 if (hazard.hazard) {
John Zulauffaea0ee2021-01-14 14:01:32 -0700688 skip_ |=
John Zulauf64ffe552021-02-06 10:25:07 -0700689 ex_context_.GetSyncState().LogError(render_pass_, string_SyncHazardVUID(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -0700690 "%s: Hazard %s in subpass %" PRIu32 "during %s %s, from attachment %" PRIu32
691 " to resolve attachment %" PRIu32 ". Access info %s.",
692 func_name_, string_SyncHazard(hazard.hazard), subpass_, aspect_name,
John Zulauf64ffe552021-02-06 10:25:07 -0700693 attachment_name, src_at, dst_at, ex_context_.FormatUsage(hazard).c_str());
John Zulauf7635de32020-05-29 17:14:15 -0600694 }
695 }
696 // Providing a mechanism for the constructing caller to get the result of the validation
697 bool GetSkip() const { return skip_; }
698
699 private:
700 VkRenderPass render_pass_;
701 const uint32_t subpass_;
702 const AccessContext &context_;
John Zulauf64ffe552021-02-06 10:25:07 -0700703 const CommandExecutionContext &ex_context_;
John Zulauf7635de32020-05-29 17:14:15 -0600704 const char *func_name_;
705 bool skip_;
706};
707
708// Update action for resolve operations
709class UpdateStateResolveAction {
710 public:
John Zulauf14940722021-04-12 15:19:02 -0600711 UpdateStateResolveAction(AccessContext &context, ResourceUsageTag tag) : context_(context), tag_(tag) {}
John Zulaufd0ec59f2021-03-13 14:25:08 -0700712 void operator()(const char *, const char *, uint32_t, uint32_t, const AttachmentViewGen &view_gen,
713 AttachmentViewGen::Gen gen_type, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule) {
John Zulauf7635de32020-05-29 17:14:15 -0600714 // Ignores validation only arguments...
John Zulaufd0ec59f2021-03-13 14:25:08 -0700715 context_.UpdateAccessState(view_gen, gen_type, current_usage, ordering_rule, tag_);
John Zulauf7635de32020-05-29 17:14:15 -0600716 }
717
718 private:
719 AccessContext &context_;
John Zulauf14940722021-04-12 15:19:02 -0600720 const ResourceUsageTag tag_;
John Zulauf7635de32020-05-29 17:14:15 -0600721};
722
John Zulauf59e25072020-07-17 10:55:21 -0600723void HazardResult::Set(const ResourceAccessState *access_state_, SyncStageAccessIndex usage_index_, SyncHazard hazard_,
John Zulauf14940722021-04-12 15:19:02 -0600724 const SyncStageAccessFlags &prior_, const ResourceUsageTag tag_) {
John Zulauf4fa68462021-04-26 21:04:22 -0600725 access_state = layer_data::make_unique<const ResourceAccessState>(*access_state_);
John Zulauf59e25072020-07-17 10:55:21 -0600726 usage_index = usage_index_;
727 hazard = hazard_;
728 prior_access = prior_;
729 tag = tag_;
730}
731
John Zulauf4fa68462021-04-26 21:04:22 -0600732void HazardResult::AddRecordedAccess(const ResourceFirstAccess &first_access) {
733 recorded_access = layer_data::make_unique<const ResourceFirstAccess>(first_access);
734}
735
John Zulauf540266b2020-04-06 18:54:53 -0600736AccessContext::AccessContext(uint32_t subpass, VkQueueFlags queue_flags,
737 const std::vector<SubpassDependencyGraphNode> &dependencies,
John Zulauf1a224292020-06-30 14:52:13 -0600738 const std::vector<AccessContext> &contexts, const AccessContext *external_context) {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600739 Reset();
740 const auto &subpass_dep = dependencies[subpass];
John Zulauf22aefed2021-03-11 18:14:35 -0700741 bool has_barrier_from_external = subpass_dep.barrier_from_external.size() > 0U;
742 prev_.reserve(subpass_dep.prev.size() + (has_barrier_from_external ? 1U : 0U));
John Zulauf355e49b2020-04-24 15:11:15 -0600743 prev_by_subpass_.resize(subpass, nullptr); // Can't be more prevs than the subpass we're on
John Zulauf3d84f1b2020-03-09 13:33:25 -0600744 for (const auto &prev_dep : subpass_dep.prev) {
John Zulaufbaea94f2020-09-15 17:55:16 -0600745 const auto prev_pass = prev_dep.first->pass;
746 const auto &prev_barriers = prev_dep.second;
747 assert(prev_dep.second.size());
748 prev_.emplace_back(&contexts[prev_pass], queue_flags, prev_barriers);
749 prev_by_subpass_[prev_pass] = &prev_.back();
John Zulauf5c5e88d2019-12-26 11:22:02 -0700750 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600751
752 async_.reserve(subpass_dep.async.size());
753 for (const auto async_subpass : subpass_dep.async) {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -0700754 async_.emplace_back(&contexts[async_subpass]);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600755 }
John Zulauf22aefed2021-03-11 18:14:35 -0700756 if (has_barrier_from_external) {
757 // Store the barrier from external with the reat, but save pointer for "by subpass" lookups.
758 prev_.emplace_back(external_context, queue_flags, subpass_dep.barrier_from_external);
759 src_external_ = &prev_.back();
John Zulaufe5da6e52020-03-18 15:32:18 -0600760 }
John Zulaufbaea94f2020-09-15 17:55:16 -0600761 if (subpass_dep.barrier_to_external.size()) {
762 dst_external_ = TrackBack(this, queue_flags, subpass_dep.barrier_to_external);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600763 }
John Zulauf5c5e88d2019-12-26 11:22:02 -0700764}
765
John Zulauf5f13a792020-03-10 07:31:21 -0600766template <typename Detector>
John Zulauf43cc7462020-12-03 12:33:12 -0700767HazardResult AccessContext::DetectPreviousHazard(AccessAddressType type, const Detector &detector,
John Zulauf540266b2020-04-06 18:54:53 -0600768 const ResourceAccessRange &range) const {
John Zulauf5f13a792020-03-10 07:31:21 -0600769 ResourceAccessRangeMap descent_map;
John Zulauf69133422020-05-20 14:55:53 -0600770 ResolvePreviousAccess(type, range, &descent_map, nullptr);
John Zulauf5f13a792020-03-10 07:31:21 -0600771
772 HazardResult hazard;
773 for (auto prev = descent_map.begin(); prev != descent_map.end() && !hazard.hazard; ++prev) {
774 hazard = detector.Detect(prev);
775 }
776 return hazard;
777}
778
John Zulauf4a6105a2020-11-17 15:11:05 -0700779template <typename Action>
780void AccessContext::ForAll(Action &&action) {
781 for (const auto address_type : kAddressTypes) {
782 auto &accesses = GetAccessStateMap(address_type);
783 for (const auto &access : accesses) {
784 action(address_type, access);
785 }
786 }
787}
788
John Zulauf3d84f1b2020-03-09 13:33:25 -0600789// A recursive range walker for hazard detection, first for the current context and the (DetectHazardRecur) to walk
790// the DAG of the contexts (for example subpasses)
791template <typename Detector>
John Zulauf43cc7462020-12-03 12:33:12 -0700792HazardResult AccessContext::DetectHazard(AccessAddressType type, const Detector &detector, const ResourceAccessRange &range,
John Zulauf355e49b2020-04-24 15:11:15 -0600793 DetectOptions options) const {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600794 HazardResult hazard;
John Zulauf5f13a792020-03-10 07:31:21 -0600795
John Zulauf1a224292020-06-30 14:52:13 -0600796 if (static_cast<uint32_t>(options) & DetectOptions::kDetectAsync) {
John Zulauf355e49b2020-04-24 15:11:15 -0600797 // Async checks don't require recursive lookups, as the async lists are exhaustive for the top-level context
798 // so we'll check these first
799 for (const auto &async_context : async_) {
800 hazard = async_context->DetectAsyncHazard(type, detector, range);
801 if (hazard.hazard) return hazard;
802 }
John Zulauf5f13a792020-03-10 07:31:21 -0600803 }
804
John Zulauf1a224292020-06-30 14:52:13 -0600805 const bool detect_prev = (static_cast<uint32_t>(options) & DetectOptions::kDetectPrevious) != 0;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600806
John Zulauf69133422020-05-20 14:55:53 -0600807 const auto &accesses = GetAccessStateMap(type);
John Zulauf3cafbf72021-03-26 16:55:19 -0600808 const auto the_end = accesses.cend(); // End is not invalidated
809 auto pos = accesses.lower_bound(range);
John Zulauf69133422020-05-20 14:55:53 -0600810 ResourceAccessRange gap = {range.begin, range.begin};
John Zulauf5f13a792020-03-10 07:31:21 -0600811
John Zulauf3cafbf72021-03-26 16:55:19 -0600812 while (pos != the_end && pos->first.begin < range.end) {
John Zulauf69133422020-05-20 14:55:53 -0600813 // Cover any leading gap, or gap between entries
814 if (detect_prev) {
815 // TODO: After profiling we may want to change the descent logic such that we don't recur per gap...
816 // Cover any leading gap, or gap between entries
817 gap.end = pos->first.begin; // We know this begin is < range.end
John Zulauf355e49b2020-04-24 15:11:15 -0600818 if (gap.non_empty()) {
John Zulauf69133422020-05-20 14:55:53 -0600819 // Recur on all gaps
John Zulauf16adfc92020-04-08 10:28:33 -0600820 hazard = DetectPreviousHazard(type, detector, gap);
John Zulauf5f13a792020-03-10 07:31:21 -0600821 if (hazard.hazard) return hazard;
822 }
John Zulauf69133422020-05-20 14:55:53 -0600823 // Set up for the next gap. If pos..end is >= range.end, loop will exit, and trailing gap will be empty
824 gap.begin = pos->first.end;
825 }
826
827 hazard = detector.Detect(pos);
828 if (hazard.hazard) return hazard;
John Zulauf3cafbf72021-03-26 16:55:19 -0600829 ++pos;
John Zulauf69133422020-05-20 14:55:53 -0600830 }
831
832 if (detect_prev) {
833 // Detect in the trailing empty as needed
834 gap.end = range.end;
835 if (gap.non_empty()) {
836 hazard = DetectPreviousHazard(type, detector, gap);
John Zulauf16adfc92020-04-08 10:28:33 -0600837 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600838 }
839
840 return hazard;
841}
842
843// A non recursive range walker for the asynchronous contexts (those we have no barriers with)
844template <typename Detector>
John Zulauf43cc7462020-12-03 12:33:12 -0700845HazardResult AccessContext::DetectAsyncHazard(AccessAddressType type, const Detector &detector,
846 const ResourceAccessRange &range) const {
John Zulauf16adfc92020-04-08 10:28:33 -0600847 auto &accesses = GetAccessStateMap(type);
John Zulauf3cafbf72021-03-26 16:55:19 -0600848 auto pos = accesses.lower_bound(range);
849 const auto the_end = accesses.end();
John Zulauf16adfc92020-04-08 10:28:33 -0600850
John Zulauf3d84f1b2020-03-09 13:33:25 -0600851 HazardResult hazard;
John Zulauf3cafbf72021-03-26 16:55:19 -0600852 while (pos != the_end && pos->first.begin < range.end) {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -0700853 hazard = detector.DetectAsync(pos, start_tag_);
John Zulauf3cafbf72021-03-26 16:55:19 -0600854 if (hazard.hazard) break;
855 ++pos;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600856 }
John Zulauf16adfc92020-04-08 10:28:33 -0600857
John Zulauf3d84f1b2020-03-09 13:33:25 -0600858 return hazard;
859}
860
John Zulaufb02c1eb2020-10-06 16:33:36 -0600861struct ApplySubpassTransitionBarriersAction {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -0700862 explicit ApplySubpassTransitionBarriersAction(const std::vector<SyncBarrier> &barriers_) : barriers(barriers_) {}
John Zulaufb02c1eb2020-10-06 16:33:36 -0600863 void operator()(ResourceAccessState *access) const {
864 assert(access);
865 access->ApplyBarriers(barriers, true);
866 }
867 const std::vector<SyncBarrier> &barriers;
868};
869
John Zulauf22aefed2021-03-11 18:14:35 -0700870struct ApplyTrackbackStackAction {
871 explicit ApplyTrackbackStackAction(const std::vector<SyncBarrier> &barriers_,
872 const ResourceAccessStateFunction *previous_barrier_ = nullptr)
873 : barriers(barriers_), previous_barrier(previous_barrier_) {}
John Zulaufb02c1eb2020-10-06 16:33:36 -0600874 void operator()(ResourceAccessState *access) const {
875 assert(access);
876 assert(!access->HasPendingState());
877 access->ApplyBarriers(barriers, false);
878 access->ApplyPendingBarriers(kCurrentCommandTag);
John Zulauf22aefed2021-03-11 18:14:35 -0700879 if (previous_barrier) {
880 assert(bool(*previous_barrier));
881 (*previous_barrier)(access);
882 }
John Zulaufb02c1eb2020-10-06 16:33:36 -0600883 }
884 const std::vector<SyncBarrier> &barriers;
John Zulauf22aefed2021-03-11 18:14:35 -0700885 const ResourceAccessStateFunction *previous_barrier;
John Zulaufb02c1eb2020-10-06 16:33:36 -0600886};
887
888// Splits a single map entry into piece matching the entries in [first, last) the total range over [first, last) must be
889// contained with entry. Entry must be an iterator pointing to dest, first and last must be iterators pointing to a
890// *different* map from dest.
891// Returns the position past the last resolved range -- the entry covering the remainder of entry->first not included in the
892// range [first, last)
893template <typename BarrierAction>
John Zulauf355e49b2020-04-24 15:11:15 -0600894static void ResolveMapToEntry(ResourceAccessRangeMap *dest, ResourceAccessRangeMap::iterator entry,
895 ResourceAccessRangeMap::const_iterator first, ResourceAccessRangeMap::const_iterator last,
John Zulaufb02c1eb2020-10-06 16:33:36 -0600896 BarrierAction &barrier_action) {
John Zulauf355e49b2020-04-24 15:11:15 -0600897 auto at = entry;
898 for (auto pos = first; pos != last; ++pos) {
899 // Every member of the input iterator range must fit within the remaining portion of entry
900 assert(at->first.includes(pos->first));
901 assert(at != dest->end());
902 // Trim up at to the same size as the entry to resolve
903 at = sparse_container::split(at, *dest, pos->first);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600904 auto access = pos->second; // intentional copy
905 barrier_action(&access);
John Zulauf355e49b2020-04-24 15:11:15 -0600906 at->second.Resolve(access);
907 ++at; // Go to the remaining unused section of entry
908 }
909}
910
John Zulaufa0a98292020-09-18 09:30:10 -0600911static SyncBarrier MergeBarriers(const std::vector<SyncBarrier> &barriers) {
912 SyncBarrier merged = {};
913 for (const auto &barrier : barriers) {
914 merged.Merge(barrier);
915 }
916 return merged;
917}
918
John Zulaufb02c1eb2020-10-06 16:33:36 -0600919template <typename BarrierAction>
John Zulauf43cc7462020-12-03 12:33:12 -0700920void AccessContext::ResolveAccessRange(AccessAddressType type, const ResourceAccessRange &range, BarrierAction &barrier_action,
John Zulauf355e49b2020-04-24 15:11:15 -0600921 ResourceAccessRangeMap *resolve_map, const ResourceAccessState *infill_state,
922 bool recur_to_infill) const {
John Zulauf3bcab5e2020-06-19 14:42:32 -0600923 if (!range.non_empty()) return;
924
John Zulauf355e49b2020-04-24 15:11:15 -0600925 ResourceRangeMergeIterator current(*resolve_map, GetAccessStateMap(type), range.begin);
926 while (current->range.non_empty() && range.includes(current->range.begin)) {
John Zulauf3bcab5e2020-06-19 14:42:32 -0600927 const auto current_range = current->range & range;
John Zulauf16adfc92020-04-08 10:28:33 -0600928 if (current->pos_B->valid) {
929 const auto &src_pos = current->pos_B->lower_bound;
John Zulaufb02c1eb2020-10-06 16:33:36 -0600930 auto access = src_pos->second; // intentional copy
931 barrier_action(&access);
932
John Zulauf16adfc92020-04-08 10:28:33 -0600933 if (current->pos_A->valid) {
John Zulauf3bcab5e2020-06-19 14:42:32 -0600934 const auto trimmed = sparse_container::split(current->pos_A->lower_bound, *resolve_map, current_range);
935 trimmed->second.Resolve(access);
936 current.invalidate_A(trimmed);
John Zulauf5f13a792020-03-10 07:31:21 -0600937 } else {
John Zulauf3bcab5e2020-06-19 14:42:32 -0600938 auto inserted = resolve_map->insert(current->pos_A->lower_bound, std::make_pair(current_range, access));
John Zulauf355e49b2020-04-24 15:11:15 -0600939 current.invalidate_A(inserted); // Update the parallel iterator to point at the insert segment
John Zulauf5f13a792020-03-10 07:31:21 -0600940 }
John Zulauf16adfc92020-04-08 10:28:33 -0600941 } else {
942 // we have to descend to fill this gap
943 if (recur_to_infill) {
John Zulauf22aefed2021-03-11 18:14:35 -0700944 ResourceAccessRange recurrence_range = current_range;
945 // The current context is empty for the current range, so recur to fill the gap.
946 // Since we will be recurring back up the DAG, expand the gap descent to cover the full range for which B
947 // is not valid, to minimize that recurrence
948 if (current->pos_B.at_end()) {
949 // Do the remainder here....
950 recurrence_range.end = range.end;
John Zulauf355e49b2020-04-24 15:11:15 -0600951 } else {
John Zulauf22aefed2021-03-11 18:14:35 -0700952 // Recur only over the range until B becomes valid (within the limits of range).
953 recurrence_range.end = std::min(range.end, current->pos_B->lower_bound->first.begin);
John Zulauf355e49b2020-04-24 15:11:15 -0600954 }
John Zulauf22aefed2021-03-11 18:14:35 -0700955 ResolvePreviousAccessStack(type, recurrence_range, resolve_map, infill_state, barrier_action);
956
John Zulauf355e49b2020-04-24 15:11:15 -0600957 // Given that there could be gaps we need to seek carefully to not repeatedly search the same gaps in the next
958 // iterator of the outer while.
959
960 // Set the parallel iterator to the end of this range s.t. ++ will move us to the next range whether or
961 // not the end of the range is a gap. For the seek to work, first we need to warn the parallel iterator
962 // we stepped on the dest map
John Zulauf22aefed2021-03-11 18:14:35 -0700963 const auto seek_to = recurrence_range.end - 1; // The subtraction is safe as range can't be empty (loop condition)
locke-lunarg88dbb542020-06-23 22:05:42 -0600964 current.invalidate_A(); // Changes current->range
John Zulauf355e49b2020-04-24 15:11:15 -0600965 current.seek(seek_to);
966 } else if (!current->pos_A->valid && infill_state) {
967 // If we didn't find anything in the current range, and we aren't reccuring... we infill if required
968 auto inserted = resolve_map->insert(current->pos_A->lower_bound, std::make_pair(current->range, *infill_state));
969 current.invalidate_A(inserted); // Update the parallel iterator to point at the correct segment after insert
John Zulauf16adfc92020-04-08 10:28:33 -0600970 }
John Zulauf5f13a792020-03-10 07:31:21 -0600971 }
John Zulauf16adfc92020-04-08 10:28:33 -0600972 ++current;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600973 }
John Zulauf1a224292020-06-30 14:52:13 -0600974
975 // Infill if range goes passed both the current and resolve map prior contents
976 if (recur_to_infill && (current->range.end < range.end)) {
977 ResourceAccessRange trailing_fill_range = {current->range.end, range.end};
John Zulauf22aefed2021-03-11 18:14:35 -0700978 ResolvePreviousAccessStack<BarrierAction>(type, trailing_fill_range, resolve_map, infill_state, barrier_action);
John Zulauf1a224292020-06-30 14:52:13 -0600979 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600980}
981
John Zulauf22aefed2021-03-11 18:14:35 -0700982template <typename BarrierAction>
983void AccessContext::ResolvePreviousAccessStack(AccessAddressType type, const ResourceAccessRange &range,
984 ResourceAccessRangeMap *descent_map, const ResourceAccessState *infill_state,
985 const BarrierAction &previous_barrier) const {
986 ResourceAccessStateFunction stacked_barrier(std::ref(previous_barrier));
987 ResolvePreviousAccess(type, range, descent_map, infill_state, &stacked_barrier);
988}
989
John Zulauf43cc7462020-12-03 12:33:12 -0700990void AccessContext::ResolvePreviousAccess(AccessAddressType type, const ResourceAccessRange &range,
John Zulauf22aefed2021-03-11 18:14:35 -0700991 ResourceAccessRangeMap *descent_map, const ResourceAccessState *infill_state,
992 const ResourceAccessStateFunction *previous_barrier) const {
993 if (prev_.size() == 0) {
John Zulauf5f13a792020-03-10 07:31:21 -0600994 if (range.non_empty() && infill_state) {
John Zulauf22aefed2021-03-11 18:14:35 -0700995 // Fill the empty poritions of descent_map with the default_state with the barrier function applied (iff present)
996 ResourceAccessState state_copy;
997 if (previous_barrier) {
998 assert(bool(*previous_barrier));
999 state_copy = *infill_state;
1000 (*previous_barrier)(&state_copy);
1001 infill_state = &state_copy;
1002 }
1003 sparse_container::update_range_value(*descent_map, range, *infill_state,
1004 sparse_container::value_precedence::prefer_dest);
John Zulauf5f13a792020-03-10 07:31:21 -06001005 }
1006 } else {
1007 // Look for something to fill the gap further along.
1008 for (const auto &prev_dep : prev_) {
John Zulauf22aefed2021-03-11 18:14:35 -07001009 const ApplyTrackbackStackAction barrier_action(prev_dep.barriers, previous_barrier);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001010 prev_dep.context->ResolveAccessRange(type, range, barrier_action, descent_map, infill_state);
John Zulauf5f13a792020-03-10 07:31:21 -06001011 }
John Zulauf5f13a792020-03-10 07:31:21 -06001012 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06001013}
1014
John Zulauf4a6105a2020-11-17 15:11:05 -07001015// Non-lazy import of all accesses, WaitEvents needs this.
1016void AccessContext::ResolvePreviousAccesses() {
1017 ResourceAccessState default_state;
John Zulauf22aefed2021-03-11 18:14:35 -07001018 if (!prev_.size()) return; // If no previous contexts, nothing to do
1019
John Zulauf4a6105a2020-11-17 15:11:05 -07001020 for (const auto address_type : kAddressTypes) {
1021 ResolvePreviousAccess(address_type, kFullRange, &GetAccessStateMap(address_type), &default_state);
1022 }
1023}
1024
John Zulauf43cc7462020-12-03 12:33:12 -07001025AccessAddressType AccessContext::ImageAddressType(const IMAGE_STATE &image) {
1026 return (image.fragment_encoder->IsLinearImage()) ? AccessAddressType::kLinear : AccessAddressType::kIdealized;
John Zulauf16adfc92020-04-08 10:28:33 -06001027}
1028
John Zulauf1507ee42020-05-18 11:33:09 -06001029static SyncStageAccessIndex ColorLoadUsage(VkAttachmentLoadOp load_op) {
John Zulauf57261402021-08-13 11:32:06 -06001030 const auto stage_access = (load_op == VK_ATTACHMENT_LOAD_OP_NONE_EXT)
1031 ? SYNC_ACCESS_INDEX_NONE
1032 : ((load_op == VK_ATTACHMENT_LOAD_OP_LOAD) ? SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ
1033 : SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE);
John Zulauf1507ee42020-05-18 11:33:09 -06001034 return stage_access;
1035}
1036static SyncStageAccessIndex DepthStencilLoadUsage(VkAttachmentLoadOp load_op) {
John Zulauf57261402021-08-13 11:32:06 -06001037 const auto stage_access =
1038 (load_op == VK_ATTACHMENT_LOAD_OP_NONE_EXT)
1039 ? SYNC_ACCESS_INDEX_NONE
1040 : ((load_op == VK_ATTACHMENT_LOAD_OP_LOAD) ? SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_READ
1041 : SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE);
John Zulauf1507ee42020-05-18 11:33:09 -06001042 return stage_access;
1043}
1044
John Zulauf7635de32020-05-29 17:14:15 -06001045// Caller must manage returned pointer
1046static AccessContext *CreateStoreResolveProxyContext(const AccessContext &context, const RENDER_PASS_STATE &rp_state,
John Zulaufd0ec59f2021-03-13 14:25:08 -07001047 uint32_t subpass, const AttachmentViewGenVector &attachment_views) {
John Zulauf7635de32020-05-29 17:14:15 -06001048 auto *proxy = new AccessContext(context);
John Zulaufd0ec59f2021-03-13 14:25:08 -07001049 proxy->UpdateAttachmentResolveAccess(rp_state, attachment_views, subpass, kCurrentCommandTag);
1050 proxy->UpdateAttachmentStoreAccess(rp_state, attachment_views, subpass, kCurrentCommandTag);
John Zulauf7635de32020-05-29 17:14:15 -06001051 return proxy;
1052}
1053
John Zulaufb02c1eb2020-10-06 16:33:36 -06001054template <typename BarrierAction>
John Zulaufd0ec59f2021-03-13 14:25:08 -07001055void AccessContext::ResolveAccessRange(const AttachmentViewGen &view_gen, AttachmentViewGen::Gen gen_type,
1056 BarrierAction &barrier_action, ResourceAccessRangeMap *descent_map,
1057 const ResourceAccessState *infill_state) const {
1058 const auto *attachment_gen = view_gen.GetRangeGen(gen_type);
1059 if (!attachment_gen) return;
1060
1061 subresource_adapter::ImageRangeGenerator range_gen(*attachment_gen);
1062 const AccessAddressType address_type = view_gen.GetAddressType();
1063 for (; range_gen->non_empty(); ++range_gen) {
1064 ResolveAccessRange(address_type, *range_gen, barrier_action, descent_map, infill_state);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001065 }
John Zulauf62f10592020-04-03 12:20:02 -06001066}
1067
John Zulauf7635de32020-05-29 17:14:15 -06001068// Layout transitions are handled as if the were occuring in the beginning of the next subpass
John Zulauf64ffe552021-02-06 10:25:07 -07001069bool AccessContext::ValidateLayoutTransitions(const CommandExecutionContext &ex_context, const RENDER_PASS_STATE &rp_state,
John Zulauf7635de32020-05-29 17:14:15 -06001070 const VkRect2D &render_area, uint32_t subpass,
John Zulaufd0ec59f2021-03-13 14:25:08 -07001071 const AttachmentViewGenVector &attachment_views, const char *func_name) const {
John Zulauf355e49b2020-04-24 15:11:15 -06001072 bool skip = false;
John Zulauf7635de32020-05-29 17:14:15 -06001073 // As validation methods are const and precede the record/update phase, for any tranistions from the immediately
1074 // previous subpass, we have to validate them against a copy of the AccessContext, with resolve operations applied, as
1075 // those affects have not been recorded yet.
1076 //
1077 // Note: we could be more efficient by tracking whether or not we actually *have* any changes (e.g. attachment resolve)
1078 // to apply and only copy then, if this proves a hot spot.
1079 std::unique_ptr<AccessContext> proxy_for_prev;
1080 TrackBack proxy_track_back;
1081
John Zulauf355e49b2020-04-24 15:11:15 -06001082 const auto &transitions = rp_state.subpass_transitions[subpass];
1083 for (const auto &transition : transitions) {
John Zulauf7635de32020-05-29 17:14:15 -06001084 const bool prev_needs_proxy = transition.prev_pass != VK_SUBPASS_EXTERNAL && (transition.prev_pass + 1 == subpass);
1085
1086 const auto *track_back = GetTrackBackFromSubpass(transition.prev_pass);
John Zulauf22aefed2021-03-11 18:14:35 -07001087 assert(track_back);
John Zulauf7635de32020-05-29 17:14:15 -06001088 if (prev_needs_proxy) {
1089 if (!proxy_for_prev) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001090 proxy_for_prev.reset(
1091 CreateStoreResolveProxyContext(*track_back->context, rp_state, transition.prev_pass, attachment_views));
John Zulauf7635de32020-05-29 17:14:15 -06001092 proxy_track_back = *track_back;
1093 proxy_track_back.context = proxy_for_prev.get();
1094 }
1095 track_back = &proxy_track_back;
1096 }
1097 auto hazard = DetectSubpassTransitionHazard(*track_back, attachment_views[transition.attachment]);
John Zulauf355e49b2020-04-24 15:11:15 -06001098 if (hazard.hazard) {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06001099 skip |= ex_context.GetSyncState().LogError(rp_state.renderPass(), string_SyncHazardVUID(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07001100 "%s: Hazard %s in subpass %" PRIu32 " for attachment %" PRIu32
1101 " image layout transition (old_layout: %s, new_layout: %s). Access info %s.",
1102 func_name, string_SyncHazard(hazard.hazard), subpass, transition.attachment,
1103 string_VkImageLayout(transition.old_layout),
1104 string_VkImageLayout(transition.new_layout),
John Zulauf64ffe552021-02-06 10:25:07 -07001105 ex_context.FormatUsage(hazard).c_str());
John Zulauf355e49b2020-04-24 15:11:15 -06001106 }
1107 }
1108 return skip;
1109}
1110
John Zulauf64ffe552021-02-06 10:25:07 -07001111bool AccessContext::ValidateLoadOperation(const CommandExecutionContext &ex_context, const RENDER_PASS_STATE &rp_state,
John Zulauf7635de32020-05-29 17:14:15 -06001112 const VkRect2D &render_area, uint32_t subpass,
John Zulaufd0ec59f2021-03-13 14:25:08 -07001113 const AttachmentViewGenVector &attachment_views, const char *func_name) const {
John Zulauf1507ee42020-05-18 11:33:09 -06001114 bool skip = false;
1115 const auto *attachment_ci = rp_state.createInfo.pAttachments;
John Zulaufa0a98292020-09-18 09:30:10 -06001116
John Zulauf1507ee42020-05-18 11:33:09 -06001117 for (uint32_t i = 0; i < rp_state.createInfo.attachmentCount; i++) {
1118 if (subpass == rp_state.attachment_first_subpass[i]) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001119 const auto &view_gen = attachment_views[i];
1120 if (!view_gen.IsValid()) continue;
John Zulauf1507ee42020-05-18 11:33:09 -06001121 const auto &ci = attachment_ci[i];
John Zulauf1507ee42020-05-18 11:33:09 -06001122
1123 // Need check in the following way
1124 // 1) if the usage bit isn't in the dest_access_scope, and there is layout traniition for initial use, report hazard
1125 // vs. transition
1126 // 2) if there isn't a layout transition, we need to look at the external context with a "detect hazard" operation
1127 // for each aspect loaded.
1128
1129 const bool has_depth = FormatHasDepth(ci.format);
John Zulaufb027cdb2020-05-21 14:25:22 -06001130 const bool has_stencil = FormatHasStencil(ci.format);
John Zulauf1507ee42020-05-18 11:33:09 -06001131 const bool is_color = !(has_depth || has_stencil);
1132
1133 const SyncStageAccessIndex load_index = has_depth ? DepthStencilLoadUsage(ci.loadOp) : ColorLoadUsage(ci.loadOp);
John Zulauf1507ee42020-05-18 11:33:09 -06001134 const SyncStageAccessIndex stencil_load_index = has_stencil ? DepthStencilLoadUsage(ci.stencilLoadOp) : load_index;
John Zulauf1507ee42020-05-18 11:33:09 -06001135
John Zulaufaff20662020-06-01 14:07:58 -06001136 HazardResult hazard;
John Zulauf1507ee42020-05-18 11:33:09 -06001137 const char *aspect = nullptr;
John Zulauf1507ee42020-05-18 11:33:09 -06001138
John Zulaufb02c1eb2020-10-06 16:33:36 -06001139 bool checked_stencil = false;
John Zulauf57261402021-08-13 11:32:06 -06001140 if (is_color && (load_index != SYNC_ACCESS_INDEX_NONE)) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001141 hazard = DetectHazard(view_gen, AttachmentViewGen::Gen::kRenderArea, load_index, SyncOrdering::kColorAttachment);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001142 aspect = "color";
1143 } else {
John Zulauf57261402021-08-13 11:32:06 -06001144 if (has_depth && (load_index != SYNC_ACCESS_INDEX_NONE)) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001145 hazard = DetectHazard(view_gen, AttachmentViewGen::Gen::kDepthOnlyRenderArea, load_index,
1146 SyncOrdering::kDepthStencilAttachment);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001147 aspect = "depth";
1148 }
John Zulauf57261402021-08-13 11:32:06 -06001149 if (!hazard.hazard && has_stencil && (stencil_load_index != SYNC_ACCESS_INDEX_NONE)) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001150 hazard = DetectHazard(view_gen, AttachmentViewGen::Gen::kStencilOnlyRenderArea, stencil_load_index,
1151 SyncOrdering::kDepthStencilAttachment);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001152 aspect = "stencil";
1153 checked_stencil = true;
1154 }
1155 }
1156
1157 if (hazard.hazard) {
1158 auto load_op_string = string_VkAttachmentLoadOp(checked_stencil ? ci.stencilLoadOp : ci.loadOp);
John Zulauf64ffe552021-02-06 10:25:07 -07001159 const auto &sync_state = ex_context.GetSyncState();
John Zulaufb02c1eb2020-10-06 16:33:36 -06001160 if (hazard.tag == kCurrentCommandTag) {
1161 // Hazard vs. ILT
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06001162 skip |= sync_state.LogError(rp_state.renderPass(), string_SyncHazardVUID(hazard.hazard),
John Zulaufb02c1eb2020-10-06 16:33:36 -06001163 "%s: Hazard %s vs. layout transition in subpass %" PRIu32 " for attachment %" PRIu32
1164 " aspect %s during load with loadOp %s.",
1165 func_name, string_SyncHazard(hazard.hazard), subpass, i, aspect, load_op_string);
1166 } else {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06001167 skip |= sync_state.LogError(rp_state.renderPass(), string_SyncHazardVUID(hazard.hazard),
John Zulauf1507ee42020-05-18 11:33:09 -06001168 "%s: Hazard %s in subpass %" PRIu32 " for attachment %" PRIu32
John Zulauf59e25072020-07-17 10:55:21 -06001169 " aspect %s during load with loadOp %s. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06001170 func_name, string_SyncHazard(hazard.hazard), subpass, i, aspect, load_op_string,
John Zulauf64ffe552021-02-06 10:25:07 -07001171 ex_context.FormatUsage(hazard).c_str());
John Zulauf1507ee42020-05-18 11:33:09 -06001172 }
1173 }
1174 }
1175 }
1176 return skip;
1177}
1178
John Zulaufaff20662020-06-01 14:07:58 -06001179// Store operation validation can ignore resolve (before it) and layout tranistions after it. The first is ignored
1180// because of the ordering guarantees w.r.t. sample access and that the resolve validation hasn't altered the state, because
1181// store is part of the same Next/End operation.
1182// The latter is handled in layout transistion validation directly
John Zulauf64ffe552021-02-06 10:25:07 -07001183bool AccessContext::ValidateStoreOperation(const CommandExecutionContext &ex_context, const RENDER_PASS_STATE &rp_state,
John Zulaufaff20662020-06-01 14:07:58 -06001184 const VkRect2D &render_area, uint32_t subpass,
John Zulaufd0ec59f2021-03-13 14:25:08 -07001185 const AttachmentViewGenVector &attachment_views, const char *func_name) const {
John Zulaufaff20662020-06-01 14:07:58 -06001186 bool skip = false;
1187 const auto *attachment_ci = rp_state.createInfo.pAttachments;
John Zulaufaff20662020-06-01 14:07:58 -06001188
1189 for (uint32_t i = 0; i < rp_state.createInfo.attachmentCount; i++) {
1190 if (subpass == rp_state.attachment_last_subpass[i]) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001191 const AttachmentViewGen &view_gen = attachment_views[i];
1192 if (!view_gen.IsValid()) continue;
John Zulaufaff20662020-06-01 14:07:58 -06001193 const auto &ci = attachment_ci[i];
1194
1195 // The spec states that "don't care" is an operation with VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
1196 // so we assume that an implementation is *free* to write in that case, meaning that for correctness
1197 // sake, we treat DONT_CARE as writing.
1198 const bool has_depth = FormatHasDepth(ci.format);
1199 const bool has_stencil = FormatHasStencil(ci.format);
1200 const bool is_color = !(has_depth || has_stencil);
John Zulauf57261402021-08-13 11:32:06 -06001201 const bool store_op_stores = ci.storeOp != VK_ATTACHMENT_STORE_OP_NONE_EXT;
John Zulaufaff20662020-06-01 14:07:58 -06001202 if (!has_stencil && !store_op_stores) continue;
1203
1204 HazardResult hazard;
1205 const char *aspect = nullptr;
1206 bool checked_stencil = false;
1207 if (is_color) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001208 hazard = DetectHazard(view_gen, AttachmentViewGen::Gen::kRenderArea,
1209 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kRaster);
John Zulaufaff20662020-06-01 14:07:58 -06001210 aspect = "color";
1211 } else {
John Zulauf57261402021-08-13 11:32:06 -06001212 const bool stencil_op_stores = ci.stencilStoreOp != VK_ATTACHMENT_STORE_OP_NONE_EXT;
John Zulaufaff20662020-06-01 14:07:58 -06001213 if (has_depth && store_op_stores) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001214 hazard = DetectHazard(view_gen, AttachmentViewGen::Gen::kDepthOnlyRenderArea,
1215 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, SyncOrdering::kRaster);
John Zulaufaff20662020-06-01 14:07:58 -06001216 aspect = "depth";
1217 }
1218 if (!hazard.hazard && has_stencil && stencil_op_stores) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001219 hazard = DetectHazard(view_gen, AttachmentViewGen::Gen::kStencilOnlyRenderArea,
1220 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, SyncOrdering::kRaster);
John Zulaufaff20662020-06-01 14:07:58 -06001221 aspect = "stencil";
1222 checked_stencil = true;
1223 }
1224 }
1225
1226 if (hazard.hazard) {
1227 const char *const op_type_string = checked_stencil ? "stencilStoreOp" : "storeOp";
1228 const char *const store_op_string = string_VkAttachmentStoreOp(checked_stencil ? ci.stencilStoreOp : ci.storeOp);
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06001229 skip |= ex_context.GetSyncState().LogError(rp_state.renderPass(), string_SyncHazardVUID(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07001230 "%s: Hazard %s in subpass %" PRIu32 " for attachment %" PRIu32
1231 " %s aspect during store with %s %s. Access info %s",
1232 func_name, string_SyncHazard(hazard.hazard), subpass, i, aspect,
John Zulauf64ffe552021-02-06 10:25:07 -07001233 op_type_string, store_op_string, ex_context.FormatUsage(hazard).c_str());
John Zulaufaff20662020-06-01 14:07:58 -06001234 }
1235 }
1236 }
1237 return skip;
1238}
1239
John Zulauf64ffe552021-02-06 10:25:07 -07001240bool AccessContext::ValidateResolveOperations(const CommandExecutionContext &ex_context, const RENDER_PASS_STATE &rp_state,
John Zulaufd0ec59f2021-03-13 14:25:08 -07001241 const VkRect2D &render_area, const AttachmentViewGenVector &attachment_views,
1242 const char *func_name, uint32_t subpass) const {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06001243 ValidateResolveAction validate_action(rp_state.renderPass(), subpass, *this, ex_context, func_name);
John Zulaufd0ec59f2021-03-13 14:25:08 -07001244 ResolveOperation(validate_action, rp_state, attachment_views, subpass);
John Zulauf7635de32020-05-29 17:14:15 -06001245 return validate_action.GetSkip();
John Zulaufb027cdb2020-05-21 14:25:22 -06001246}
1247
John Zulauf3d84f1b2020-03-09 13:33:25 -06001248class HazardDetector {
1249 SyncStageAccessIndex usage_index_;
1250
1251 public:
John Zulauf5f13a792020-03-10 07:31:21 -06001252 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const { return pos->second.DetectHazard(usage_index_); }
John Zulauf14940722021-04-12 15:19:02 -06001253 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, ResourceUsageTag start_tag) const {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07001254 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001255 }
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001256 explicit HazardDetector(SyncStageAccessIndex usage) : usage_index_(usage) {}
John Zulauf3d84f1b2020-03-09 13:33:25 -06001257};
1258
John Zulauf69133422020-05-20 14:55:53 -06001259class HazardDetectorWithOrdering {
1260 const SyncStageAccessIndex usage_index_;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001261 const SyncOrdering ordering_rule_;
John Zulauf69133422020-05-20 14:55:53 -06001262
1263 public:
1264 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
John Zulauf8e3c3e92021-01-06 11:19:36 -07001265 return pos->second.DetectHazard(usage_index_, ordering_rule_);
John Zulauf69133422020-05-20 14:55:53 -06001266 }
John Zulauf14940722021-04-12 15:19:02 -06001267 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, ResourceUsageTag start_tag) const {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07001268 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
John Zulauf69133422020-05-20 14:55:53 -06001269 }
John Zulauf8e3c3e92021-01-06 11:19:36 -07001270 HazardDetectorWithOrdering(SyncStageAccessIndex usage, SyncOrdering ordering) : usage_index_(usage), ordering_rule_(ordering) {}
John Zulauf69133422020-05-20 14:55:53 -06001271};
1272
John Zulauf16adfc92020-04-08 10:28:33 -06001273HazardResult AccessContext::DetectHazard(const BUFFER_STATE &buffer, SyncStageAccessIndex usage_index,
John Zulauf355e49b2020-04-24 15:11:15 -06001274 const ResourceAccessRange &range) const {
John Zulauf16adfc92020-04-08 10:28:33 -06001275 if (!SimpleBinding(buffer)) return HazardResult();
John Zulauf150e5332020-12-03 08:52:52 -07001276 const auto base_address = ResourceBaseAddress(buffer);
1277 HazardDetector detector(usage_index);
1278 return DetectHazard(AccessAddressType::kLinear, detector, (range + base_address), DetectOptions::kDetectAll);
John Zulaufe5da6e52020-03-18 15:32:18 -06001279}
1280
John Zulauf69133422020-05-20 14:55:53 -06001281template <typename Detector>
John Zulaufd0ec59f2021-03-13 14:25:08 -07001282HazardResult AccessContext::DetectHazard(Detector &detector, const AttachmentViewGen &view_gen, AttachmentViewGen::Gen gen_type,
1283 DetectOptions options) const {
1284 const auto *attachment_gen = view_gen.GetRangeGen(gen_type);
1285 if (!attachment_gen) return HazardResult();
1286
1287 subresource_adapter::ImageRangeGenerator range_gen(*attachment_gen);
1288 const auto address_type = view_gen.GetAddressType();
1289 for (; range_gen->non_empty(); ++range_gen) {
1290 HazardResult hazard = DetectHazard(address_type, detector, *range_gen, options);
1291 if (hazard.hazard) return hazard;
1292 }
1293
1294 return HazardResult();
1295}
1296
1297template <typename Detector>
John Zulauf69133422020-05-20 14:55:53 -06001298HazardResult AccessContext::DetectHazard(Detector &detector, const IMAGE_STATE &image,
1299 const VkImageSubresourceRange &subresource_range, const VkOffset3D &offset,
1300 const VkExtent3D &extent, DetectOptions options) const {
1301 if (!SimpleBinding(image)) return HazardResult();
John Zulauf69133422020-05-20 14:55:53 -06001302 const auto base_address = ResourceBaseAddress(image);
John Zulauf150e5332020-12-03 08:52:52 -07001303 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, offset, extent,
1304 base_address);
1305 const auto address_type = ImageAddressType(image);
John Zulauf69133422020-05-20 14:55:53 -06001306 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf150e5332020-12-03 08:52:52 -07001307 HazardResult hazard = DetectHazard(address_type, detector, *range_gen, options);
John Zulauf69133422020-05-20 14:55:53 -06001308 if (hazard.hazard) return hazard;
1309 }
1310 return HazardResult();
1311}
John Zulauf110413c2021-03-20 05:38:38 -06001312template <typename Detector>
1313HazardResult AccessContext::DetectHazard(Detector &detector, const IMAGE_STATE &image,
1314 const VkImageSubresourceRange &subresource_range, DetectOptions options) const {
1315 if (!SimpleBinding(image)) return HazardResult();
1316 const auto base_address = ResourceBaseAddress(image);
1317 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, base_address);
1318 const auto address_type = ImageAddressType(image);
1319 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf110413c2021-03-20 05:38:38 -06001320 HazardResult hazard = DetectHazard(address_type, detector, *range_gen, options);
1321 if (hazard.hazard) return hazard;
1322 }
1323 return HazardResult();
1324}
John Zulauf69133422020-05-20 14:55:53 -06001325
John Zulauf540266b2020-04-06 18:54:53 -06001326HazardResult AccessContext::DetectHazard(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
1327 const VkImageSubresourceLayers &subresource, const VkOffset3D &offset,
1328 const VkExtent3D &extent) const {
John Zulauf5c5e88d2019-12-26 11:22:02 -07001329 VkImageSubresourceRange subresource_range = {subresource.aspectMask, subresource.mipLevel, 1, subresource.baseArrayLayer,
1330 subresource.layerCount};
John Zulauf110413c2021-03-20 05:38:38 -06001331 HazardDetector detector(current_usage);
1332 return DetectHazard(detector, image, subresource_range, offset, extent, DetectOptions::kDetectAll);
John Zulauf1507ee42020-05-18 11:33:09 -06001333}
1334
1335HazardResult AccessContext::DetectHazard(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
John Zulauf110413c2021-03-20 05:38:38 -06001336 const VkImageSubresourceRange &subresource_range) const {
John Zulauf69133422020-05-20 14:55:53 -06001337 HazardDetector detector(current_usage);
John Zulauf110413c2021-03-20 05:38:38 -06001338 return DetectHazard(detector, image, subresource_range, DetectOptions::kDetectAll);
John Zulauf69133422020-05-20 14:55:53 -06001339}
1340
John Zulaufd0ec59f2021-03-13 14:25:08 -07001341HazardResult AccessContext::DetectHazard(const AttachmentViewGen &view_gen, AttachmentViewGen::Gen gen_type,
1342 SyncStageAccessIndex current_usage, SyncOrdering ordering_rule) const {
1343 HazardDetectorWithOrdering detector(current_usage, ordering_rule);
1344 return DetectHazard(detector, view_gen, gen_type, DetectOptions::kDetectAll);
1345}
1346
John Zulauf69133422020-05-20 14:55:53 -06001347HazardResult AccessContext::DetectHazard(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
John Zulauf8e3c3e92021-01-06 11:19:36 -07001348 const VkImageSubresourceRange &subresource_range, SyncOrdering ordering_rule,
John Zulauf69133422020-05-20 14:55:53 -06001349 const VkOffset3D &offset, const VkExtent3D &extent) const {
John Zulauf8e3c3e92021-01-06 11:19:36 -07001350 HazardDetectorWithOrdering detector(current_usage, ordering_rule);
John Zulauf69133422020-05-20 14:55:53 -06001351 return DetectHazard(detector, image, subresource_range, offset, extent, DetectOptions::kDetectAll);
John Zulauf9cb530d2019-09-30 14:14:10 -06001352}
1353
John Zulauf3d84f1b2020-03-09 13:33:25 -06001354class BarrierHazardDetector {
1355 public:
Jeremy Gebben40a22942020-12-22 14:22:06 -07001356 BarrierHazardDetector(SyncStageAccessIndex usage_index, VkPipelineStageFlags2KHR src_exec_scope,
John Zulauf3d84f1b2020-03-09 13:33:25 -06001357 SyncStageAccessFlags src_access_scope)
1358 : usage_index_(usage_index), src_exec_scope_(src_exec_scope), src_access_scope_(src_access_scope) {}
1359
John Zulauf5f13a792020-03-10 07:31:21 -06001360 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
1361 return pos->second.DetectBarrierHazard(usage_index_, src_exec_scope_, src_access_scope_);
John Zulauf0cb5be22020-01-23 12:18:22 -07001362 }
John Zulauf14940722021-04-12 15:19:02 -06001363 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, ResourceUsageTag start_tag) const {
John Zulauf3d84f1b2020-03-09 13:33:25 -06001364 // Async barrier hazard detection can use the same path as the usage index is not IsRead, but is IsWrite
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07001365 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001366 }
1367
1368 private:
1369 SyncStageAccessIndex usage_index_;
Jeremy Gebben40a22942020-12-22 14:22:06 -07001370 VkPipelineStageFlags2KHR src_exec_scope_;
John Zulauf3d84f1b2020-03-09 13:33:25 -06001371 SyncStageAccessFlags src_access_scope_;
1372};
1373
John Zulauf4a6105a2020-11-17 15:11:05 -07001374class EventBarrierHazardDetector {
1375 public:
Jeremy Gebben40a22942020-12-22 14:22:06 -07001376 EventBarrierHazardDetector(SyncStageAccessIndex usage_index, VkPipelineStageFlags2KHR src_exec_scope,
John Zulauf4a6105a2020-11-17 15:11:05 -07001377 SyncStageAccessFlags src_access_scope, const SyncEventState::ScopeMap &event_scope,
John Zulauf14940722021-04-12 15:19:02 -06001378 ResourceUsageTag scope_tag)
John Zulauf4a6105a2020-11-17 15:11:05 -07001379 : usage_index_(usage_index),
1380 src_exec_scope_(src_exec_scope),
1381 src_access_scope_(src_access_scope),
1382 event_scope_(event_scope),
1383 scope_pos_(event_scope.cbegin()),
1384 scope_end_(event_scope.cend()),
1385 scope_tag_(scope_tag) {}
1386
1387 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
1388 // TODO NOTE: This is almost the slowest way to do this... need to intelligently walk this...
1389 // Need to find a more efficient sync, since we know pos->first is strictly increasing call to call
1390 // NOTE: "cached_lower_bound_impl" with upgrades could do this.
1391 if (scope_pos_ == scope_end_) return HazardResult();
1392 if (!scope_pos_->first.intersects(pos->first)) {
1393 event_scope_.lower_bound(pos->first);
1394 if ((scope_pos_ == scope_end_) || !scope_pos_->first.intersects(pos->first)) return HazardResult();
1395 }
1396
1397 // Some portion of this pos is in the event_scope, so check for a barrier hazard
1398 return pos->second.DetectBarrierHazard(usage_index_, src_exec_scope_, src_access_scope_, scope_tag_);
1399 }
John Zulauf14940722021-04-12 15:19:02 -06001400 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, ResourceUsageTag start_tag) const {
John Zulauf4a6105a2020-11-17 15:11:05 -07001401 // Async barrier hazard detection can use the same path as the usage index is not IsRead, but is IsWrite
1402 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
1403 }
1404
1405 private:
1406 SyncStageAccessIndex usage_index_;
Jeremy Gebben40a22942020-12-22 14:22:06 -07001407 VkPipelineStageFlags2KHR src_exec_scope_;
John Zulauf4a6105a2020-11-17 15:11:05 -07001408 SyncStageAccessFlags src_access_scope_;
1409 const SyncEventState::ScopeMap &event_scope_;
1410 SyncEventState::ScopeMap::const_iterator scope_pos_;
1411 SyncEventState::ScopeMap::const_iterator scope_end_;
John Zulauf14940722021-04-12 15:19:02 -06001412 const ResourceUsageTag scope_tag_;
John Zulauf4a6105a2020-11-17 15:11:05 -07001413};
1414
Jeremy Gebben40a22942020-12-22 14:22:06 -07001415HazardResult AccessContext::DetectImageBarrierHazard(const IMAGE_STATE &image, VkPipelineStageFlags2KHR src_exec_scope,
John Zulauf4a6105a2020-11-17 15:11:05 -07001416 const SyncStageAccessFlags &src_access_scope,
1417 const VkImageSubresourceRange &subresource_range,
1418 const SyncEventState &sync_event, DetectOptions options) const {
1419 // It's not particularly DRY to get the address type in this function as well as lower down, but we have to select the
1420 // first access scope map to use, and there's no easy way to plumb it in below.
1421 const auto address_type = ImageAddressType(image);
1422 const auto &event_scope = sync_event.FirstScope(address_type);
1423
1424 EventBarrierHazardDetector detector(SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION, src_exec_scope, src_access_scope,
1425 event_scope, sync_event.first_scope_tag);
John Zulauf110413c2021-03-20 05:38:38 -06001426 return DetectHazard(detector, image, subresource_range, options);
John Zulauf4a6105a2020-11-17 15:11:05 -07001427}
1428
John Zulaufd0ec59f2021-03-13 14:25:08 -07001429HazardResult AccessContext::DetectImageBarrierHazard(const AttachmentViewGen &view_gen, const SyncBarrier &barrier,
1430 DetectOptions options) const {
1431 BarrierHazardDetector detector(SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION, barrier.src_exec_scope.exec_scope,
1432 barrier.src_access_scope);
1433 return DetectHazard(detector, view_gen, AttachmentViewGen::Gen::kViewSubresource, options);
1434}
1435
Jeremy Gebben40a22942020-12-22 14:22:06 -07001436HazardResult AccessContext::DetectImageBarrierHazard(const IMAGE_STATE &image, VkPipelineStageFlags2KHR src_exec_scope,
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07001437 const SyncStageAccessFlags &src_access_scope,
John Zulauf355e49b2020-04-24 15:11:15 -06001438 const VkImageSubresourceRange &subresource_range,
John Zulauf43cc7462020-12-03 12:33:12 -07001439 const DetectOptions options) const {
John Zulauf69133422020-05-20 14:55:53 -06001440 BarrierHazardDetector detector(SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION, src_exec_scope, src_access_scope);
John Zulauf110413c2021-03-20 05:38:38 -06001441 return DetectHazard(detector, image, subresource_range, options);
John Zulauf0cb5be22020-01-23 12:18:22 -07001442}
1443
Jeremy Gebben40a22942020-12-22 14:22:06 -07001444HazardResult AccessContext::DetectImageBarrierHazard(const IMAGE_STATE &image, VkPipelineStageFlags2KHR src_exec_scope,
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07001445 const SyncStageAccessFlags &src_stage_accesses,
John Zulauf355e49b2020-04-24 15:11:15 -06001446 const VkImageMemoryBarrier &barrier) const {
1447 auto subresource_range = NormalizeSubresourceRange(image.createInfo, barrier.subresourceRange);
1448 const auto src_access_scope = SyncStageAccess::AccessScope(src_stage_accesses, barrier.srcAccessMask);
1449 return DetectImageBarrierHazard(image, src_exec_scope, src_access_scope, subresource_range, kDetectAll);
1450}
John Zulaufe7f6a5e2021-01-16 14:31:18 -07001451HazardResult AccessContext::DetectImageBarrierHazard(const SyncImageMemoryBarrier &image_barrier) const {
John Zulaufc523bf62021-02-16 08:20:34 -07001452 return DetectImageBarrierHazard(*image_barrier.image.get(), image_barrier.barrier.src_exec_scope.exec_scope,
John Zulauf110413c2021-03-20 05:38:38 -06001453 image_barrier.barrier.src_access_scope, image_barrier.range, kDetectAll);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07001454}
John Zulauf355e49b2020-04-24 15:11:15 -06001455
John Zulauf9cb530d2019-09-30 14:14:10 -06001456template <typename Flags, typename Map>
1457SyncStageAccessFlags AccessScopeImpl(Flags flag_mask, const Map &map) {
1458 SyncStageAccessFlags scope = 0;
1459 for (const auto &bit_scope : map) {
1460 if (flag_mask < bit_scope.first) break;
1461
1462 if (flag_mask & bit_scope.first) {
1463 scope |= bit_scope.second;
1464 }
1465 }
1466 return scope;
1467}
1468
Jeremy Gebben40a22942020-12-22 14:22:06 -07001469SyncStageAccessFlags SyncStageAccess::AccessScopeByStage(VkPipelineStageFlags2KHR stages) {
John Zulauf9cb530d2019-09-30 14:14:10 -06001470 return AccessScopeImpl(stages, syncStageAccessMaskByStageBit);
1471}
1472
Jeremy Gebben40a22942020-12-22 14:22:06 -07001473SyncStageAccessFlags SyncStageAccess::AccessScopeByAccess(VkAccessFlags2KHR accesses) {
1474 return AccessScopeImpl(sync_utils::ExpandAccessFlags(accesses), syncStageAccessMaskByAccessBit);
John Zulauf9cb530d2019-09-30 14:14:10 -06001475}
1476
Jeremy Gebben40a22942020-12-22 14:22:06 -07001477// Getting from stage mask and access mask to stage/access masks is something we need to be good at...
1478SyncStageAccessFlags SyncStageAccess::AccessScope(VkPipelineStageFlags2KHR stages, VkAccessFlags2KHR accesses) {
John Zulauf5f13a792020-03-10 07:31:21 -06001479 // The access scope is the intersection of all stage/access types possible for the enabled stages and the enables
1480 // accesses (after doing a couple factoring of common terms the union of stage/access intersections is the intersections
1481 // of the union of all stage/access types for all the stages and the same unions for the access mask...
John Zulauf9cb530d2019-09-30 14:14:10 -06001482 return AccessScopeByStage(stages) & AccessScopeByAccess(accesses);
1483}
1484
1485template <typename Action>
John Zulauf5c5e88d2019-12-26 11:22:02 -07001486void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const ResourceAccessRange &range, const Action &action) {
John Zulauf7635de32020-05-29 17:14:15 -06001487 // TODO: Optimization for operations that do a pure overwrite (i.e. WRITE usages which rewrite the state, vs READ usages
1488 // that do incrementalupdates
John Zulauf4a6105a2020-11-17 15:11:05 -07001489 assert(accesses);
John Zulauf9cb530d2019-09-30 14:14:10 -06001490 auto pos = accesses->lower_bound(range);
1491 if (pos == accesses->end() || !pos->first.intersects(range)) {
1492 // The range is empty, fill it with a default value.
1493 pos = action.Infill(accesses, pos, range);
1494 } else if (range.begin < pos->first.begin) {
1495 // Leading empty space, infill
John Zulauf5c5e88d2019-12-26 11:22:02 -07001496 pos = action.Infill(accesses, pos, ResourceAccessRange(range.begin, pos->first.begin));
John Zulauf9cb530d2019-09-30 14:14:10 -06001497 } else if (pos->first.begin < range.begin) {
1498 // Trim the beginning if needed
1499 pos = accesses->split(pos, range.begin, sparse_container::split_op_keep_both());
1500 ++pos;
1501 }
1502
1503 const auto the_end = accesses->end();
1504 while ((pos != the_end) && pos->first.intersects(range)) {
1505 if (pos->first.end > range.end) {
1506 pos = accesses->split(pos, range.end, sparse_container::split_op_keep_both());
1507 }
1508
1509 pos = action(accesses, pos);
1510 if (pos == the_end) break;
1511
1512 auto next = pos;
1513 ++next;
1514 if ((pos->first.end < range.end) && (next != the_end) && !next->first.is_subsequent_to(pos->first)) {
1515 // Need to infill if next is disjoint
1516 VkDeviceSize limit = (next == the_end) ? range.end : std::min(range.end, next->first.begin);
John Zulauf5c5e88d2019-12-26 11:22:02 -07001517 ResourceAccessRange new_range(pos->first.end, limit);
John Zulauf9cb530d2019-09-30 14:14:10 -06001518 next = action.Infill(accesses, next, new_range);
1519 }
1520 pos = next;
1521 }
1522}
John Zulaufd5115702021-01-18 12:34:33 -07001523
1524// Give a comparable interface for range generators and ranges
1525template <typename Action>
1526inline void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const Action &action, ResourceAccessRange *range) {
1527 assert(range);
1528 UpdateMemoryAccessState(accesses, *range, action);
1529}
1530
John Zulauf4a6105a2020-11-17 15:11:05 -07001531template <typename Action, typename RangeGen>
1532void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const Action &action, RangeGen *range_gen_arg) {
1533 assert(range_gen_arg);
John Zulaufd5115702021-01-18 12:34:33 -07001534 RangeGen &range_gen = *range_gen_arg; // Non-const references must be * by style requirement but deref-ing * iterator is a pain
John Zulauf4a6105a2020-11-17 15:11:05 -07001535 for (; range_gen->non_empty(); ++range_gen) {
1536 UpdateMemoryAccessState(accesses, *range_gen, action);
1537 }
1538}
John Zulauf9cb530d2019-09-30 14:14:10 -06001539
John Zulaufd0ec59f2021-03-13 14:25:08 -07001540template <typename Action, typename RangeGen>
1541void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const Action &action, const RangeGen &range_gen_prebuilt) {
1542 RangeGen range_gen(range_gen_prebuilt); // RangeGenerators can be expensive to create from scratch... initialize from built
1543 for (; range_gen->non_empty(); ++range_gen) {
1544 UpdateMemoryAccessState(accesses, *range_gen, action);
1545 }
1546}
John Zulauf9cb530d2019-09-30 14:14:10 -06001547struct UpdateMemoryAccessStateFunctor {
John Zulauf5c5e88d2019-12-26 11:22:02 -07001548 using Iterator = ResourceAccessRangeMap::iterator;
1549 Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const {
John Zulauf5f13a792020-03-10 07:31:21 -06001550 // this is only called on gaps, and never returns a gap.
1551 ResourceAccessState default_state;
John Zulauf16adfc92020-04-08 10:28:33 -06001552 context.ResolvePreviousAccess(type, range, accesses, &default_state);
John Zulauf5f13a792020-03-10 07:31:21 -06001553 return accesses->lower_bound(range);
John Zulauf9cb530d2019-09-30 14:14:10 -06001554 }
John Zulauf5f13a792020-03-10 07:31:21 -06001555
John Zulauf5c5e88d2019-12-26 11:22:02 -07001556 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -06001557 auto &access_state = pos->second;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001558 access_state.Update(usage, ordering_rule, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06001559 return pos;
1560 }
1561
John Zulauf43cc7462020-12-03 12:33:12 -07001562 UpdateMemoryAccessStateFunctor(AccessAddressType type_, const AccessContext &context_, SyncStageAccessIndex usage_,
John Zulauf14940722021-04-12 15:19:02 -06001563 SyncOrdering ordering_rule_, ResourceUsageTag tag_)
John Zulauf8e3c3e92021-01-06 11:19:36 -07001564 : type(type_), context(context_), usage(usage_), ordering_rule(ordering_rule_), tag(tag_) {}
John Zulauf43cc7462020-12-03 12:33:12 -07001565 const AccessAddressType type;
John Zulauf540266b2020-04-06 18:54:53 -06001566 const AccessContext &context;
John Zulauf16adfc92020-04-08 10:28:33 -06001567 const SyncStageAccessIndex usage;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001568 const SyncOrdering ordering_rule;
John Zulauf14940722021-04-12 15:19:02 -06001569 const ResourceUsageTag tag;
John Zulauf9cb530d2019-09-30 14:14:10 -06001570};
1571
John Zulauf4a6105a2020-11-17 15:11:05 -07001572// The barrier operation for pipeline and subpass dependencies`
John Zulauf1e331ec2020-12-04 18:29:38 -07001573struct PipelineBarrierOp {
1574 SyncBarrier barrier;
1575 bool layout_transition;
1576 PipelineBarrierOp(const SyncBarrier &barrier_, bool layout_transition_)
1577 : barrier(barrier_), layout_transition(layout_transition_) {}
1578 PipelineBarrierOp() = default;
John Zulaufd5115702021-01-18 12:34:33 -07001579 PipelineBarrierOp(const PipelineBarrierOp &) = default;
John Zulauf1e331ec2020-12-04 18:29:38 -07001580 void operator()(ResourceAccessState *access_state) const { access_state->ApplyBarrier(barrier, layout_transition); }
1581};
John Zulauf4a6105a2020-11-17 15:11:05 -07001582// The barrier operation for wait events
1583struct WaitEventBarrierOp {
John Zulauf14940722021-04-12 15:19:02 -06001584 ResourceUsageTag scope_tag;
John Zulauf4a6105a2020-11-17 15:11:05 -07001585 SyncBarrier barrier;
1586 bool layout_transition;
John Zulauf14940722021-04-12 15:19:02 -06001587 WaitEventBarrierOp(const ResourceUsageTag scope_tag_, const SyncBarrier &barrier_, bool layout_transition_)
1588 : scope_tag(scope_tag_), barrier(barrier_), layout_transition(layout_transition_) {}
John Zulauf4a6105a2020-11-17 15:11:05 -07001589 WaitEventBarrierOp() = default;
John Zulauf14940722021-04-12 15:19:02 -06001590 void operator()(ResourceAccessState *access_state) const { access_state->ApplyBarrier(scope_tag, barrier, layout_transition); }
John Zulauf4a6105a2020-11-17 15:11:05 -07001591};
John Zulauf1e331ec2020-12-04 18:29:38 -07001592
John Zulauf4a6105a2020-11-17 15:11:05 -07001593// This functor applies a collection of barriers, updating the "pending state" in each touched memory range, and optionally
1594// resolves the pending state. Suitable for processing Global memory barriers, or Subpass Barriers when the "final" barrier
1595// of a collection is known/present.
John Zulauf1e331ec2020-12-04 18:29:38 -07001596template <typename BarrierOp>
John Zulauf89311b42020-09-29 16:28:47 -06001597class ApplyBarrierOpsFunctor {
1598 public:
John Zulauf5c5e88d2019-12-26 11:22:02 -07001599 using Iterator = ResourceAccessRangeMap::iterator;
1600 inline Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const { return pos; }
John Zulauf9cb530d2019-09-30 14:14:10 -06001601
John Zulauf5c5e88d2019-12-26 11:22:02 -07001602 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -06001603 auto &access_state = pos->second;
John Zulauf1e331ec2020-12-04 18:29:38 -07001604 for (const auto &op : barrier_ops_) {
1605 op(&access_state);
John Zulauf89311b42020-09-29 16:28:47 -06001606 }
John Zulauf9cb530d2019-09-30 14:14:10 -06001607
John Zulauf89311b42020-09-29 16:28:47 -06001608 if (resolve_) {
1609 // If this is the last (or only) batch, we can do the pending resolve as the last step in this operation to avoid
1610 // another walk
1611 access_state.ApplyPendingBarriers(tag_);
John Zulauf9cb530d2019-09-30 14:14:10 -06001612 }
1613 return pos;
1614 }
1615
John Zulauf89311b42020-09-29 16:28:47 -06001616 // A valid tag is required IFF layout_transition is true, as transitions are write ops
John Zulauf14940722021-04-12 15:19:02 -06001617 ApplyBarrierOpsFunctor(bool resolve, size_t size_hint, ResourceUsageTag tag) : resolve_(resolve), barrier_ops_(), tag_(tag) {
John Zulaufd5115702021-01-18 12:34:33 -07001618 barrier_ops_.reserve(size_hint);
1619 }
1620 void EmplaceBack(const BarrierOp &op) { barrier_ops_.emplace_back(op); }
John Zulauf89311b42020-09-29 16:28:47 -06001621
1622 private:
1623 bool resolve_;
John Zulaufd5115702021-01-18 12:34:33 -07001624 std::vector<BarrierOp> barrier_ops_;
John Zulauf14940722021-04-12 15:19:02 -06001625 const ResourceUsageTag tag_;
John Zulauf1e331ec2020-12-04 18:29:38 -07001626};
1627
John Zulauf4a6105a2020-11-17 15:11:05 -07001628// This functor applies a single barrier, updating the "pending state" in each touched memory range, but does not
1629// resolve the pendinging state. Suitable for processing Image and Buffer barriers from PipelineBarriers or Events
1630template <typename BarrierOp>
1631class ApplyBarrierFunctor {
1632 public:
1633 using Iterator = ResourceAccessRangeMap::iterator;
1634 inline Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const { return pos; }
1635
1636 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
1637 auto &access_state = pos->second;
1638 barrier_op_(&access_state);
1639 return pos;
1640 }
1641
1642 ApplyBarrierFunctor(const BarrierOp &barrier_op) : barrier_op_(barrier_op) {}
1643
1644 private:
John Zulaufd5115702021-01-18 12:34:33 -07001645 BarrierOp barrier_op_;
John Zulauf4a6105a2020-11-17 15:11:05 -07001646};
1647
John Zulauf1e331ec2020-12-04 18:29:38 -07001648// This functor resolves the pendinging state.
1649class ResolvePendingBarrierFunctor {
1650 public:
1651 using Iterator = ResourceAccessRangeMap::iterator;
1652 inline Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const { return pos; }
1653
1654 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
1655 auto &access_state = pos->second;
1656 access_state.ApplyPendingBarriers(tag_);
1657 return pos;
1658 }
1659
John Zulauf14940722021-04-12 15:19:02 -06001660 ResolvePendingBarrierFunctor(ResourceUsageTag tag) : tag_(tag) {}
John Zulauf1e331ec2020-12-04 18:29:38 -07001661
1662 private:
John Zulauf14940722021-04-12 15:19:02 -06001663 const ResourceUsageTag tag_;
John Zulauf9cb530d2019-09-30 14:14:10 -06001664};
1665
John Zulauf8e3c3e92021-01-06 11:19:36 -07001666void AccessContext::UpdateAccessState(AccessAddressType type, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf14940722021-04-12 15:19:02 -06001667 const ResourceAccessRange &range, const ResourceUsageTag tag) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07001668 UpdateMemoryAccessStateFunctor action(type, *this, current_usage, ordering_rule, tag);
John Zulauf16adfc92020-04-08 10:28:33 -06001669 UpdateMemoryAccessState(&GetAccessStateMap(type), range, action);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001670}
1671
John Zulauf8e3c3e92021-01-06 11:19:36 -07001672void AccessContext::UpdateAccessState(const BUFFER_STATE &buffer, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf14940722021-04-12 15:19:02 -06001673 const ResourceAccessRange &range, const ResourceUsageTag tag) {
John Zulauf16adfc92020-04-08 10:28:33 -06001674 if (!SimpleBinding(buffer)) return;
1675 const auto base_address = ResourceBaseAddress(buffer);
John Zulauf8e3c3e92021-01-06 11:19:36 -07001676 UpdateAccessState(AccessAddressType::kLinear, current_usage, ordering_rule, range + base_address, tag);
John Zulauf16adfc92020-04-08 10:28:33 -06001677}
John Zulauf355e49b2020-04-24 15:11:15 -06001678
John Zulauf8e3c3e92021-01-06 11:19:36 -07001679void AccessContext::UpdateAccessState(const IMAGE_STATE &image, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf110413c2021-03-20 05:38:38 -06001680 const VkImageSubresourceRange &subresource_range, const ResourceUsageTag &tag) {
1681 if (!SimpleBinding(image)) return;
1682 const auto base_address = ResourceBaseAddress(image);
1683 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, base_address);
1684 const auto address_type = ImageAddressType(image);
1685 UpdateMemoryAccessStateFunctor action(address_type, *this, current_usage, ordering_rule, tag);
1686 UpdateMemoryAccessState(&GetAccessStateMap(address_type), action, &range_gen);
1687}
1688void AccessContext::UpdateAccessState(const IMAGE_STATE &image, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf355e49b2020-04-24 15:11:15 -06001689 const VkImageSubresourceRange &subresource_range, const VkOffset3D &offset,
John Zulauf14940722021-04-12 15:19:02 -06001690 const VkExtent3D &extent, const ResourceUsageTag tag) {
John Zulauf16adfc92020-04-08 10:28:33 -06001691 if (!SimpleBinding(image)) return;
John Zulauf16adfc92020-04-08 10:28:33 -06001692 const auto base_address = ResourceBaseAddress(image);
John Zulauf150e5332020-12-03 08:52:52 -07001693 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, offset, extent,
1694 base_address);
1695 const auto address_type = ImageAddressType(image);
John Zulauf8e3c3e92021-01-06 11:19:36 -07001696 UpdateMemoryAccessStateFunctor action(address_type, *this, current_usage, ordering_rule, tag);
John Zulauf110413c2021-03-20 05:38:38 -06001697 UpdateMemoryAccessState(&GetAccessStateMap(address_type), action, &range_gen);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001698}
John Zulaufd0ec59f2021-03-13 14:25:08 -07001699
1700void AccessContext::UpdateAccessState(const AttachmentViewGen &view_gen, AttachmentViewGen::Gen gen_type,
John Zulauf14940722021-04-12 15:19:02 -06001701 SyncStageAccessIndex current_usage, SyncOrdering ordering_rule, const ResourceUsageTag tag) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001702 const ImageRangeGen *gen = view_gen.GetRangeGen(gen_type);
1703 if (!gen) return;
1704 subresource_adapter::ImageRangeGenerator range_gen(*gen);
1705 const auto address_type = view_gen.GetAddressType();
1706 UpdateMemoryAccessStateFunctor action(address_type, *this, current_usage, ordering_rule, tag);
1707 ApplyUpdateAction(address_type, action, &range_gen);
John Zulauf7635de32020-05-29 17:14:15 -06001708}
John Zulauf3d84f1b2020-03-09 13:33:25 -06001709
John Zulauf8e3c3e92021-01-06 11:19:36 -07001710void AccessContext::UpdateAccessState(const IMAGE_STATE &image, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf355e49b2020-04-24 15:11:15 -06001711 const VkImageSubresourceLayers &subresource, const VkOffset3D &offset,
John Zulauf14940722021-04-12 15:19:02 -06001712 const VkExtent3D &extent, const ResourceUsageTag tag) {
John Zulauf355e49b2020-04-24 15:11:15 -06001713 VkImageSubresourceRange subresource_range = {subresource.aspectMask, subresource.mipLevel, 1, subresource.baseArrayLayer,
1714 subresource.layerCount};
John Zulauf8e3c3e92021-01-06 11:19:36 -07001715 UpdateAccessState(image, current_usage, ordering_rule, subresource_range, offset, extent, tag);
John Zulauf355e49b2020-04-24 15:11:15 -06001716}
1717
John Zulaufd0ec59f2021-03-13 14:25:08 -07001718template <typename Action, typename RangeGen>
1719void AccessContext::ApplyUpdateAction(AccessAddressType address_type, const Action &action, RangeGen *range_gen_arg) {
1720 assert(range_gen_arg); // Old Google C++ styleguide require non-const object pass by * not &, but this isn't an optional arg.
1721 UpdateMemoryAccessState(&GetAccessStateMap(address_type), action, range_gen_arg);
John Zulauf540266b2020-04-06 18:54:53 -06001722}
1723
1724template <typename Action>
John Zulaufd0ec59f2021-03-13 14:25:08 -07001725void AccessContext::ApplyUpdateAction(const AttachmentViewGen &view_gen, AttachmentViewGen::Gen gen_type, const Action &action) {
1726 const ImageRangeGen *gen = view_gen.GetRangeGen(gen_type);
1727 if (!gen) return;
1728 UpdateMemoryAccessState(&GetAccessStateMap(view_gen.GetAddressType()), action, *gen);
John Zulauf540266b2020-04-06 18:54:53 -06001729}
1730
John Zulaufd0ec59f2021-03-13 14:25:08 -07001731void AccessContext::UpdateAttachmentResolveAccess(const RENDER_PASS_STATE &rp_state,
1732 const AttachmentViewGenVector &attachment_views, uint32_t subpass,
John Zulauf14940722021-04-12 15:19:02 -06001733 const ResourceUsageTag tag) {
John Zulauf7635de32020-05-29 17:14:15 -06001734 UpdateStateResolveAction update(*this, tag);
John Zulaufd0ec59f2021-03-13 14:25:08 -07001735 ResolveOperation(update, rp_state, attachment_views, subpass);
John Zulauf7635de32020-05-29 17:14:15 -06001736}
1737
John Zulaufd0ec59f2021-03-13 14:25:08 -07001738void AccessContext::UpdateAttachmentStoreAccess(const RENDER_PASS_STATE &rp_state, const AttachmentViewGenVector &attachment_views,
John Zulauf14940722021-04-12 15:19:02 -06001739 uint32_t subpass, const ResourceUsageTag tag) {
John Zulaufaff20662020-06-01 14:07:58 -06001740 const auto *attachment_ci = rp_state.createInfo.pAttachments;
John Zulaufaff20662020-06-01 14:07:58 -06001741
1742 for (uint32_t i = 0; i < rp_state.createInfo.attachmentCount; i++) {
1743 if (rp_state.attachment_last_subpass[i] == subpass) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001744 const auto &view_gen = attachment_views[i];
1745 if (!view_gen.IsValid()) continue; // UNUSED
John Zulaufaff20662020-06-01 14:07:58 -06001746
1747 const auto &ci = attachment_ci[i];
1748 const bool has_depth = FormatHasDepth(ci.format);
1749 const bool has_stencil = FormatHasStencil(ci.format);
1750 const bool is_color = !(has_depth || has_stencil);
John Zulauf57261402021-08-13 11:32:06 -06001751 const bool store_op_stores = ci.storeOp != VK_ATTACHMENT_STORE_OP_NONE_EXT;
John Zulaufaff20662020-06-01 14:07:58 -06001752
1753 if (is_color && store_op_stores) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001754 UpdateAccessState(view_gen, AttachmentViewGen::Gen::kRenderArea,
1755 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kRaster, tag);
John Zulaufaff20662020-06-01 14:07:58 -06001756 } else {
John Zulaufaff20662020-06-01 14:07:58 -06001757 if (has_depth && store_op_stores) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001758 UpdateAccessState(view_gen, AttachmentViewGen::Gen::kDepthOnlyRenderArea,
1759 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, SyncOrdering::kRaster, tag);
John Zulaufaff20662020-06-01 14:07:58 -06001760 }
John Zulauf57261402021-08-13 11:32:06 -06001761 const bool stencil_op_stores = ci.stencilStoreOp != VK_ATTACHMENT_STORE_OP_NONE_EXT;
John Zulaufaff20662020-06-01 14:07:58 -06001762 if (has_stencil && stencil_op_stores) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001763 UpdateAccessState(view_gen, AttachmentViewGen::Gen::kStencilOnlyRenderArea,
1764 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, SyncOrdering::kRaster, tag);
John Zulaufaff20662020-06-01 14:07:58 -06001765 }
1766 }
1767 }
1768 }
1769}
1770
John Zulauf540266b2020-04-06 18:54:53 -06001771template <typename Action>
John Zulaufd5115702021-01-18 12:34:33 -07001772void AccessContext::ApplyToContext(const Action &barrier_action) {
John Zulauf540266b2020-04-06 18:54:53 -06001773 // Note: Barriers do *not* cross context boundaries, applying to accessess within.... (at least for renderpass subpasses)
John Zulauf16adfc92020-04-08 10:28:33 -06001774 for (const auto address_type : kAddressTypes) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001775 UpdateMemoryAccessState(&GetAccessStateMap(address_type), kFullRange, barrier_action);
John Zulauf540266b2020-04-06 18:54:53 -06001776 }
1777}
1778
1779void AccessContext::ResolveChildContexts(const std::vector<AccessContext> &contexts) {
John Zulauf540266b2020-04-06 18:54:53 -06001780 for (uint32_t subpass_index = 0; subpass_index < contexts.size(); subpass_index++) {
1781 auto &context = contexts[subpass_index];
John Zulauf22aefed2021-03-11 18:14:35 -07001782 ApplyTrackbackStackAction barrier_action(context.GetDstExternalTrackBack().barriers);
John Zulauf16adfc92020-04-08 10:28:33 -06001783 for (const auto address_type : kAddressTypes) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001784 context.ResolveAccessRange(address_type, kFullRange, barrier_action, &GetAccessStateMap(address_type), nullptr, false);
John Zulauf540266b2020-04-06 18:54:53 -06001785 }
1786 }
1787}
1788
John Zulauf4fa68462021-04-26 21:04:22 -06001789// Caller must ensure that lifespan of this is less than from
1790void AccessContext::ImportAsyncContexts(const AccessContext &from) { async_ = from.async_; }
1791
John Zulauf355e49b2020-04-24 15:11:15 -06001792// Suitable only for *subpass* access contexts
John Zulaufd0ec59f2021-03-13 14:25:08 -07001793HazardResult AccessContext::DetectSubpassTransitionHazard(const TrackBack &track_back, const AttachmentViewGen &attach_view) const {
1794 if (!attach_view.IsValid()) return HazardResult();
John Zulauf355e49b2020-04-24 15:11:15 -06001795
John Zulauf355e49b2020-04-24 15:11:15 -06001796 // We should never ask for a transition from a context we don't have
John Zulauf7635de32020-05-29 17:14:15 -06001797 assert(track_back.context);
John Zulauf355e49b2020-04-24 15:11:15 -06001798
1799 // Do the detection against the specific prior context independent of other contexts. (Synchronous only)
John Zulaufa0a98292020-09-18 09:30:10 -06001800 // Hazard detection for the transition can be against the merged of the barriers (it only uses src_...)
1801 const auto merged_barrier = MergeBarriers(track_back.barriers);
John Zulaufd0ec59f2021-03-13 14:25:08 -07001802 HazardResult hazard = track_back.context->DetectImageBarrierHazard(attach_view, merged_barrier, kDetectPrevious);
John Zulauf355e49b2020-04-24 15:11:15 -06001803 if (!hazard.hazard) {
1804 // The Async hazard check is against the current context's async set.
John Zulaufd0ec59f2021-03-13 14:25:08 -07001805 hazard = DetectImageBarrierHazard(attach_view, merged_barrier, kDetectAsync);
John Zulauf355e49b2020-04-24 15:11:15 -06001806 }
John Zulaufa0a98292020-09-18 09:30:10 -06001807
John Zulauf355e49b2020-04-24 15:11:15 -06001808 return hazard;
1809}
1810
John Zulaufb02c1eb2020-10-06 16:33:36 -06001811void AccessContext::RecordLayoutTransitions(const RENDER_PASS_STATE &rp_state, uint32_t subpass,
John Zulauf14940722021-04-12 15:19:02 -06001812 const AttachmentViewGenVector &attachment_views, const ResourceUsageTag tag) {
John Zulaufb02c1eb2020-10-06 16:33:36 -06001813 const auto &transitions = rp_state.subpass_transitions[subpass];
John Zulauf646cc292020-10-23 09:16:45 -06001814 const ResourceAccessState empty_infill;
John Zulaufb02c1eb2020-10-06 16:33:36 -06001815 for (const auto &transition : transitions) {
1816 const auto prev_pass = transition.prev_pass;
John Zulaufd0ec59f2021-03-13 14:25:08 -07001817 const auto &view_gen = attachment_views[transition.attachment];
1818 if (!view_gen.IsValid()) continue;
John Zulaufb02c1eb2020-10-06 16:33:36 -06001819
1820 const auto *trackback = GetTrackBackFromSubpass(prev_pass);
1821 assert(trackback);
1822
1823 // Import the attachments into the current context
1824 const auto *prev_context = trackback->context;
1825 assert(prev_context);
John Zulaufd0ec59f2021-03-13 14:25:08 -07001826 const auto address_type = view_gen.GetAddressType();
John Zulaufb02c1eb2020-10-06 16:33:36 -06001827 auto &target_map = GetAccessStateMap(address_type);
1828 ApplySubpassTransitionBarriersAction barrier_action(trackback->barriers);
John Zulaufd0ec59f2021-03-13 14:25:08 -07001829 prev_context->ResolveAccessRange(view_gen, AttachmentViewGen::Gen::kViewSubresource, barrier_action, &target_map,
1830 &empty_infill);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001831 }
1832
John Zulauf86356ca2020-10-19 11:46:41 -06001833 // If there were no transitions skip this global map walk
1834 if (transitions.size()) {
John Zulauf1e331ec2020-12-04 18:29:38 -07001835 ResolvePendingBarrierFunctor apply_pending_action(tag);
John Zulaufd5115702021-01-18 12:34:33 -07001836 ApplyToContext(apply_pending_action);
John Zulauf86356ca2020-10-19 11:46:41 -06001837 }
John Zulaufb02c1eb2020-10-06 16:33:36 -06001838}
Jeremy Gebben9893daf2021-01-04 10:40:50 -07001839
Jeremy Gebben9893daf2021-01-04 10:40:50 -07001840void CommandBufferAccessContext::ApplyGlobalBarriersToEvents(const SyncExecScope &src, const SyncExecScope &dst) {
John Zulauf669dfd52021-01-27 17:15:28 -07001841 auto *events_context = GetCurrentEventsContext();
1842 assert(events_context);
John Zulauf8eda1562021-04-13 17:06:41 -06001843 events_context->ApplyBarrier(src, dst);
John Zulauf4a6105a2020-11-17 15:11:05 -07001844}
1845
locke-lunarg61870c22020-06-09 14:51:50 -06001846bool CommandBufferAccessContext::ValidateDispatchDrawDescriptorSet(VkPipelineBindPoint pipelineBindPoint,
1847 const char *func_name) const {
1848 bool skip = false;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001849 const PIPELINE_STATE *pipe = nullptr;
locke-lunarg61870c22020-06-09 14:51:50 -06001850 const std::vector<LAST_BOUND_STATE::PER_SET> *per_sets = nullptr;
Jeremy Gebben159b3cc2021-06-03 09:09:03 -06001851 cb_state_->GetCurrentPipelineAndDesriptorSets(pipelineBindPoint, &pipe, &per_sets);
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001852 if (!pipe || !per_sets) {
locke-lunarg61870c22020-06-09 14:51:50 -06001853 return skip;
1854 }
1855
1856 using DescriptorClass = cvdescriptorset::DescriptorClass;
1857 using BufferDescriptor = cvdescriptorset::BufferDescriptor;
1858 using ImageDescriptor = cvdescriptorset::ImageDescriptor;
1859 using ImageSamplerDescriptor = cvdescriptorset::ImageSamplerDescriptor;
1860 using TexelDescriptor = cvdescriptorset::TexelDescriptor;
1861
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001862 for (const auto &stage_state : pipe->stage_state) {
Jeremy Gebben11af9792021-08-20 10:20:09 -06001863 if (stage_state.stage_flag == VK_SHADER_STAGE_FRAGMENT_BIT && pipe->create_info.graphics.pRasterizationState &&
1864 pipe->create_info.graphics.pRasterizationState->rasterizerDiscardEnable) {
locke-lunarge9f1cdf2020-06-12 12:28:57 -06001865 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001866 }
locke-lunarg61870c22020-06-09 14:51:50 -06001867 for (const auto &set_binding : stage_state.descriptor_uses) {
Jeremy Gebben7fc88a22021-08-25 13:30:45 -06001868 cvdescriptorset::DescriptorSet *descriptor_set = (*per_sets)[set_binding.first.set].bound_descriptor_set;
locke-lunarg61870c22020-06-09 14:51:50 -06001869 cvdescriptorset::DescriptorSetLayout::ConstBindingIterator binding_it(descriptor_set->GetLayout().get(),
Jeremy Gebben7fc88a22021-08-25 13:30:45 -06001870 set_binding.first.binding);
locke-lunarg61870c22020-06-09 14:51:50 -06001871 const auto descriptor_type = binding_it.GetType();
1872 cvdescriptorset::IndexRange index_range = binding_it.GetGlobalIndexRange();
1873 auto array_idx = 0;
1874
1875 if (binding_it.IsVariableDescriptorCount()) {
1876 index_range.end = index_range.start + descriptor_set->GetVariableDescriptorCount();
1877 }
1878 SyncStageAccessIndex sync_index =
1879 GetSyncStageAccessIndexsByDescriptorSet(descriptor_type, set_binding.second, stage_state.stage_flag);
1880
1881 for (uint32_t i = index_range.start; i < index_range.end; ++i, ++array_idx) {
1882 uint32_t index = i - index_range.start;
1883 const auto *descriptor = descriptor_set->GetDescriptorFromGlobalIndex(i);
1884 switch (descriptor->GetClass()) {
1885 case DescriptorClass::ImageSampler:
1886 case DescriptorClass::Image: {
1887 const IMAGE_VIEW_STATE *img_view_state = nullptr;
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001888 VkImageLayout image_layout;
locke-lunarg61870c22020-06-09 14:51:50 -06001889 if (descriptor->GetClass() == DescriptorClass::ImageSampler) {
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001890 const auto image_sampler_descriptor = static_cast<const ImageSamplerDescriptor *>(descriptor);
1891 img_view_state = image_sampler_descriptor->GetImageViewState();
1892 image_layout = image_sampler_descriptor->GetImageLayout();
locke-lunarg61870c22020-06-09 14:51:50 -06001893 } else {
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001894 const auto image_descriptor = static_cast<const ImageDescriptor *>(descriptor);
1895 img_view_state = image_descriptor->GetImageViewState();
1896 image_layout = image_descriptor->GetImageLayout();
locke-lunarg61870c22020-06-09 14:51:50 -06001897 }
1898 if (!img_view_state) continue;
John Zulauf361fb532020-07-22 10:45:39 -06001899 HazardResult hazard;
Jeremy Gebben11a68a32021-07-29 11:59:22 -06001900 // NOTE: 2D ImageViews of VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT Images are not allowed in
1901 // Descriptors, so we do not have to worry about depth slicing here.
1902 // See: VUID 00343
1903 assert(!img_view_state->IsDepthSliced());
John Zulauf110413c2021-03-20 05:38:38 -06001904 const IMAGE_STATE *img_state = img_view_state->image_state.get();
John Zulauf361fb532020-07-22 10:45:39 -06001905 const auto &subresource_range = img_view_state->normalized_subresource_range;
John Zulauf110413c2021-03-20 05:38:38 -06001906
1907 if (sync_index == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ) {
1908 const VkExtent3D extent = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.extent);
1909 const VkOffset3D offset = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.offset);
John Zulauf361fb532020-07-22 10:45:39 -06001910 // Input attachments are subject to raster ordering rules
1911 hazard = current_context_->DetectHazard(*img_state, sync_index, subresource_range,
John Zulauf8e3c3e92021-01-06 11:19:36 -07001912 SyncOrdering::kRaster, offset, extent);
John Zulauf361fb532020-07-22 10:45:39 -06001913 } else {
John Zulauf110413c2021-03-20 05:38:38 -06001914 hazard = current_context_->DetectHazard(*img_state, sync_index, subresource_range);
John Zulauf361fb532020-07-22 10:45:39 -06001915 }
John Zulauf110413c2021-03-20 05:38:38 -06001916
John Zulauf33fc1d52020-07-17 11:01:10 -06001917 if (hazard.hazard && !sync_state_->SupressedBoundDescriptorWAW(hazard)) {
John Zulauf1dae9192020-06-16 15:46:44 -06001918 skip |= sync_state_->LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06001919 img_view_state->image_view(), string_SyncHazardVUID(hazard.hazard),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001920 "%s: Hazard %s for %s, in %s, and %s, %s, type: %s, imageLayout: %s, binding #%" PRIu32
1921 ", index %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06001922 func_name, string_SyncHazard(hazard.hazard),
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06001923 sync_state_->report_data->FormatHandle(img_view_state->image_view()).c_str(),
1924 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer()).c_str(),
1925 sync_state_->report_data->FormatHandle(pipe->pipeline()).c_str(),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001926 sync_state_->report_data->FormatHandle(descriptor_set->GetSet()).c_str(),
1927 string_VkDescriptorType(descriptor_type), string_VkImageLayout(image_layout),
Jeremy Gebben7fc88a22021-08-25 13:30:45 -06001928 set_binding.first.binding, index, FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06001929 }
1930 break;
1931 }
1932 case DescriptorClass::TexelBuffer: {
1933 auto buf_view_state = static_cast<const TexelDescriptor *>(descriptor)->GetBufferViewState();
1934 if (!buf_view_state) continue;
1935 const BUFFER_STATE *buf_state = buf_view_state->buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06001936 const ResourceAccessRange range = MakeRange(*buf_view_state);
locke-lunarg61870c22020-06-09 14:51:50 -06001937 auto hazard = current_context_->DetectHazard(*buf_state, sync_index, range);
John Zulauf33fc1d52020-07-17 11:01:10 -06001938 if (hazard.hazard && !sync_state_->SupressedBoundDescriptorWAW(hazard)) {
locke-lunarg88dbb542020-06-23 22:05:42 -06001939 skip |= sync_state_->LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06001940 buf_view_state->buffer_view(), string_SyncHazardVUID(hazard.hazard),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001941 "%s: Hazard %s for %s in %s, %s, and %s, type: %s, binding #%d index %d. Access info %s.",
1942 func_name, string_SyncHazard(hazard.hazard),
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06001943 sync_state_->report_data->FormatHandle(buf_view_state->buffer_view()).c_str(),
1944 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer()).c_str(),
1945 sync_state_->report_data->FormatHandle(pipe->pipeline()).c_str(),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001946 sync_state_->report_data->FormatHandle(descriptor_set->GetSet()).c_str(),
Jeremy Gebben7fc88a22021-08-25 13:30:45 -06001947 string_VkDescriptorType(descriptor_type), set_binding.first.binding, index,
John Zulauffaea0ee2021-01-14 14:01:32 -07001948 FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06001949 }
1950 break;
1951 }
1952 case DescriptorClass::GeneralBuffer: {
1953 const auto *buffer_descriptor = static_cast<const BufferDescriptor *>(descriptor);
1954 auto buf_state = buffer_descriptor->GetBufferState();
1955 if (!buf_state) continue;
John Zulauf3e86bf02020-09-12 10:47:57 -06001956 const ResourceAccessRange range =
1957 MakeRange(*buf_state, buffer_descriptor->GetOffset(), buffer_descriptor->GetRange());
locke-lunarg61870c22020-06-09 14:51:50 -06001958 auto hazard = current_context_->DetectHazard(*buf_state, sync_index, range);
John Zulauf3ac701a2020-09-07 14:34:41 -06001959 if (hazard.hazard && !sync_state_->SupressedBoundDescriptorWAW(hazard)) {
locke-lunarg88dbb542020-06-23 22:05:42 -06001960 skip |= sync_state_->LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06001961 buf_state->buffer(), string_SyncHazardVUID(hazard.hazard),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001962 "%s: Hazard %s for %s in %s, %s, and %s, type: %s, binding #%d index %d. Access info %s.",
1963 func_name, string_SyncHazard(hazard.hazard),
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06001964 sync_state_->report_data->FormatHandle(buf_state->buffer()).c_str(),
1965 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer()).c_str(),
1966 sync_state_->report_data->FormatHandle(pipe->pipeline()).c_str(),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001967 sync_state_->report_data->FormatHandle(descriptor_set->GetSet()).c_str(),
Jeremy Gebben7fc88a22021-08-25 13:30:45 -06001968 string_VkDescriptorType(descriptor_type), set_binding.first.binding, index,
John Zulauffaea0ee2021-01-14 14:01:32 -07001969 FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06001970 }
1971 break;
1972 }
1973 // TODO: INLINE_UNIFORM_BLOCK_EXT, ACCELERATION_STRUCTURE_KHR
1974 default:
1975 break;
1976 }
1977 }
1978 }
1979 }
1980 return skip;
1981}
1982
1983void CommandBufferAccessContext::RecordDispatchDrawDescriptorSet(VkPipelineBindPoint pipelineBindPoint,
John Zulauf14940722021-04-12 15:19:02 -06001984 const ResourceUsageTag tag) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001985 const PIPELINE_STATE *pipe = nullptr;
locke-lunarg61870c22020-06-09 14:51:50 -06001986 const std::vector<LAST_BOUND_STATE::PER_SET> *per_sets = nullptr;
Jeremy Gebben159b3cc2021-06-03 09:09:03 -06001987 cb_state_->GetCurrentPipelineAndDesriptorSets(pipelineBindPoint, &pipe, &per_sets);
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001988 if (!pipe || !per_sets) {
locke-lunarg61870c22020-06-09 14:51:50 -06001989 return;
1990 }
1991
1992 using DescriptorClass = cvdescriptorset::DescriptorClass;
1993 using BufferDescriptor = cvdescriptorset::BufferDescriptor;
1994 using ImageDescriptor = cvdescriptorset::ImageDescriptor;
1995 using ImageSamplerDescriptor = cvdescriptorset::ImageSamplerDescriptor;
1996 using TexelDescriptor = cvdescriptorset::TexelDescriptor;
1997
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001998 for (const auto &stage_state : pipe->stage_state) {
Jeremy Gebben11af9792021-08-20 10:20:09 -06001999 if (stage_state.stage_flag == VK_SHADER_STAGE_FRAGMENT_BIT && pipe->create_info.graphics.pRasterizationState &&
2000 pipe->create_info.graphics.pRasterizationState->rasterizerDiscardEnable) {
locke-lunarge9f1cdf2020-06-12 12:28:57 -06002001 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002002 }
locke-lunarg61870c22020-06-09 14:51:50 -06002003 for (const auto &set_binding : stage_state.descriptor_uses) {
Jeremy Gebben7fc88a22021-08-25 13:30:45 -06002004 cvdescriptorset::DescriptorSet *descriptor_set = (*per_sets)[set_binding.first.set].bound_descriptor_set;
locke-lunarg61870c22020-06-09 14:51:50 -06002005 cvdescriptorset::DescriptorSetLayout::ConstBindingIterator binding_it(descriptor_set->GetLayout().get(),
Jeremy Gebben7fc88a22021-08-25 13:30:45 -06002006 set_binding.first.binding);
locke-lunarg61870c22020-06-09 14:51:50 -06002007 const auto descriptor_type = binding_it.GetType();
2008 cvdescriptorset::IndexRange index_range = binding_it.GetGlobalIndexRange();
2009 auto array_idx = 0;
2010
2011 if (binding_it.IsVariableDescriptorCount()) {
2012 index_range.end = index_range.start + descriptor_set->GetVariableDescriptorCount();
2013 }
2014 SyncStageAccessIndex sync_index =
2015 GetSyncStageAccessIndexsByDescriptorSet(descriptor_type, set_binding.second, stage_state.stage_flag);
2016
2017 for (uint32_t i = index_range.start; i < index_range.end; ++i, ++array_idx) {
2018 const auto *descriptor = descriptor_set->GetDescriptorFromGlobalIndex(i);
2019 switch (descriptor->GetClass()) {
2020 case DescriptorClass::ImageSampler:
2021 case DescriptorClass::Image: {
2022 const IMAGE_VIEW_STATE *img_view_state = nullptr;
2023 if (descriptor->GetClass() == DescriptorClass::ImageSampler) {
2024 img_view_state = static_cast<const ImageSamplerDescriptor *>(descriptor)->GetImageViewState();
2025 } else {
2026 img_view_state = static_cast<const ImageDescriptor *>(descriptor)->GetImageViewState();
2027 }
2028 if (!img_view_state) continue;
Jeremy Gebben11a68a32021-07-29 11:59:22 -06002029 // NOTE: 2D ImageViews of VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT Images are not allowed in
2030 // Descriptors, so we do not have to worry about depth slicing here.
2031 // See: VUID 00343
2032 assert(!img_view_state->IsDepthSliced());
locke-lunarg61870c22020-06-09 14:51:50 -06002033 const IMAGE_STATE *img_state = img_view_state->image_state.get();
locke-lunarg61870c22020-06-09 14:51:50 -06002034 if (sync_index == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ) {
John Zulauf110413c2021-03-20 05:38:38 -06002035 const VkExtent3D extent = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.extent);
2036 const VkOffset3D offset = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.offset);
2037 current_context_->UpdateAccessState(*img_state, sync_index, SyncOrdering::kRaster,
2038 img_view_state->normalized_subresource_range, offset, extent, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002039 } else {
John Zulauf110413c2021-03-20 05:38:38 -06002040 current_context_->UpdateAccessState(*img_state, sync_index, SyncOrdering::kNonAttachment,
2041 img_view_state->normalized_subresource_range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002042 }
locke-lunarg61870c22020-06-09 14:51:50 -06002043 break;
2044 }
2045 case DescriptorClass::TexelBuffer: {
2046 auto buf_view_state = static_cast<const TexelDescriptor *>(descriptor)->GetBufferViewState();
2047 if (!buf_view_state) continue;
2048 const BUFFER_STATE *buf_state = buf_view_state->buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06002049 const ResourceAccessRange range = MakeRange(*buf_view_state);
John Zulauf8e3c3e92021-01-06 11:19:36 -07002050 current_context_->UpdateAccessState(*buf_state, sync_index, SyncOrdering::kNonAttachment, range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002051 break;
2052 }
2053 case DescriptorClass::GeneralBuffer: {
2054 const auto *buffer_descriptor = static_cast<const BufferDescriptor *>(descriptor);
2055 auto buf_state = buffer_descriptor->GetBufferState();
2056 if (!buf_state) continue;
John Zulauf3e86bf02020-09-12 10:47:57 -06002057 const ResourceAccessRange range =
2058 MakeRange(*buf_state, buffer_descriptor->GetOffset(), buffer_descriptor->GetRange());
John Zulauf8e3c3e92021-01-06 11:19:36 -07002059 current_context_->UpdateAccessState(*buf_state, sync_index, SyncOrdering::kNonAttachment, range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002060 break;
2061 }
2062 // TODO: INLINE_UNIFORM_BLOCK_EXT, ACCELERATION_STRUCTURE_KHR
2063 default:
2064 break;
2065 }
2066 }
2067 }
2068 }
2069}
2070
2071bool CommandBufferAccessContext::ValidateDrawVertex(uint32_t vertexCount, uint32_t firstVertex, const char *func_name) const {
2072 bool skip = false;
Jeremy Gebben159b3cc2021-06-03 09:09:03 -06002073 const auto *pipe = cb_state_->GetCurrentPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS);
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002074 if (!pipe) {
locke-lunarg61870c22020-06-09 14:51:50 -06002075 return skip;
2076 }
2077
2078 const auto &binding_buffers = cb_state_->current_vertex_buffer_binding_info.vertex_buffer_bindings;
2079 const auto &binding_buffers_size = binding_buffers.size();
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002080 const auto &binding_descriptions_size = pipe->vertex_binding_descriptions_.size();
locke-lunarg61870c22020-06-09 14:51:50 -06002081
2082 for (size_t i = 0; i < binding_descriptions_size; ++i) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002083 const auto &binding_description = pipe->vertex_binding_descriptions_[i];
locke-lunarg61870c22020-06-09 14:51:50 -06002084 if (binding_description.binding < binding_buffers_size) {
2085 const auto &binding_buffer = binding_buffers[binding_description.binding];
Jeremy Gebben9efe1cf2021-05-15 20:05:09 -06002086 if (binding_buffer.buffer_state == nullptr || binding_buffer.buffer_state->Destroyed()) continue;
locke-lunarg61870c22020-06-09 14:51:50 -06002087
locke-lunarg1ae57d62020-11-18 10:49:19 -07002088 auto *buf_state = binding_buffer.buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06002089 const ResourceAccessRange range = GetBufferRange(binding_buffer.offset, buf_state->createInfo.size, firstVertex,
2090 vertexCount, binding_description.stride);
Jeremy Gebben40a22942020-12-22 14:22:06 -07002091 auto hazard = current_context_->DetectHazard(*buf_state, SYNC_VERTEX_ATTRIBUTE_INPUT_VERTEX_ATTRIBUTE_READ, range);
locke-lunarg61870c22020-06-09 14:51:50 -06002092 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002093 skip |= sync_state_->LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002094 buf_state->buffer(), string_SyncHazardVUID(hazard.hazard), "%s: Hazard %s for vertex %s in %s. Access info %s.",
2095 func_name, string_SyncHazard(hazard.hazard), sync_state_->report_data->FormatHandle(buf_state->buffer()).c_str(),
2096 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer()).c_str(), FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06002097 }
2098 }
2099 }
2100 return skip;
2101}
2102
John Zulauf14940722021-04-12 15:19:02 -06002103void CommandBufferAccessContext::RecordDrawVertex(uint32_t vertexCount, uint32_t firstVertex, const ResourceUsageTag tag) {
Jeremy Gebben159b3cc2021-06-03 09:09:03 -06002104 const auto *pipe = cb_state_->GetCurrentPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS);
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002105 if (!pipe) {
locke-lunarg61870c22020-06-09 14:51:50 -06002106 return;
2107 }
2108 const auto &binding_buffers = cb_state_->current_vertex_buffer_binding_info.vertex_buffer_bindings;
2109 const auto &binding_buffers_size = binding_buffers.size();
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002110 const auto &binding_descriptions_size = pipe->vertex_binding_descriptions_.size();
locke-lunarg61870c22020-06-09 14:51:50 -06002111
2112 for (size_t i = 0; i < binding_descriptions_size; ++i) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002113 const auto &binding_description = pipe->vertex_binding_descriptions_[i];
locke-lunarg61870c22020-06-09 14:51:50 -06002114 if (binding_description.binding < binding_buffers_size) {
2115 const auto &binding_buffer = binding_buffers[binding_description.binding];
Jeremy Gebben9efe1cf2021-05-15 20:05:09 -06002116 if (binding_buffer.buffer_state == nullptr || binding_buffer.buffer_state->Destroyed()) continue;
locke-lunarg61870c22020-06-09 14:51:50 -06002117
locke-lunarg1ae57d62020-11-18 10:49:19 -07002118 auto *buf_state = binding_buffer.buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06002119 const ResourceAccessRange range = GetBufferRange(binding_buffer.offset, buf_state->createInfo.size, firstVertex,
2120 vertexCount, binding_description.stride);
Jeremy Gebben40a22942020-12-22 14:22:06 -07002121 current_context_->UpdateAccessState(*buf_state, SYNC_VERTEX_ATTRIBUTE_INPUT_VERTEX_ATTRIBUTE_READ,
2122 SyncOrdering::kNonAttachment, range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002123 }
2124 }
2125}
2126
2127bool CommandBufferAccessContext::ValidateDrawVertexIndex(uint32_t indexCount, uint32_t firstIndex, const char *func_name) const {
2128 bool skip = false;
Jeremy Gebben9efe1cf2021-05-15 20:05:09 -06002129 if (cb_state_->index_buffer_binding.buffer_state == nullptr || cb_state_->index_buffer_binding.buffer_state->Destroyed()) {
locke-lunarg1ae57d62020-11-18 10:49:19 -07002130 return skip;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002131 }
locke-lunarg61870c22020-06-09 14:51:50 -06002132
locke-lunarg1ae57d62020-11-18 10:49:19 -07002133 auto *index_buf_state = cb_state_->index_buffer_binding.buffer_state.get();
locke-lunarg61870c22020-06-09 14:51:50 -06002134 const auto index_size = GetIndexAlignment(cb_state_->index_buffer_binding.index_type);
John Zulauf3e86bf02020-09-12 10:47:57 -06002135 const ResourceAccessRange range = GetBufferRange(cb_state_->index_buffer_binding.offset, index_buf_state->createInfo.size,
2136 firstIndex, indexCount, index_size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07002137 auto hazard = current_context_->DetectHazard(*index_buf_state, SYNC_INDEX_INPUT_INDEX_READ, range);
locke-lunarg61870c22020-06-09 14:51:50 -06002138 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002139 skip |= sync_state_->LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002140 index_buf_state->buffer(), string_SyncHazardVUID(hazard.hazard), "%s: Hazard %s for index %s in %s. Access info %s.",
2141 func_name, string_SyncHazard(hazard.hazard), sync_state_->report_data->FormatHandle(index_buf_state->buffer()).c_str(),
2142 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer()).c_str(), FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06002143 }
2144
2145 // TODO: For now, we detect the whole vertex buffer. Index buffer could be changed until SubmitQueue.
2146 // We will detect more accurate range in the future.
2147 skip |= ValidateDrawVertex(UINT32_MAX, 0, func_name);
2148 return skip;
2149}
2150
John Zulauf14940722021-04-12 15:19:02 -06002151void CommandBufferAccessContext::RecordDrawVertexIndex(uint32_t indexCount, uint32_t firstIndex, const ResourceUsageTag tag) {
Jeremy Gebben9efe1cf2021-05-15 20:05:09 -06002152 if (cb_state_->index_buffer_binding.buffer_state == nullptr || cb_state_->index_buffer_binding.buffer_state->Destroyed()) return;
locke-lunarg61870c22020-06-09 14:51:50 -06002153
locke-lunarg1ae57d62020-11-18 10:49:19 -07002154 auto *index_buf_state = cb_state_->index_buffer_binding.buffer_state.get();
locke-lunarg61870c22020-06-09 14:51:50 -06002155 const auto index_size = GetIndexAlignment(cb_state_->index_buffer_binding.index_type);
John Zulauf3e86bf02020-09-12 10:47:57 -06002156 const ResourceAccessRange range = GetBufferRange(cb_state_->index_buffer_binding.offset, index_buf_state->createInfo.size,
2157 firstIndex, indexCount, index_size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07002158 current_context_->UpdateAccessState(*index_buf_state, SYNC_INDEX_INPUT_INDEX_READ, SyncOrdering::kNonAttachment, range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002159
2160 // TODO: For now, we detect the whole vertex buffer. Index buffer could be changed until SubmitQueue.
2161 // We will detect more accurate range in the future.
2162 RecordDrawVertex(UINT32_MAX, 0, tag);
2163}
2164
2165bool CommandBufferAccessContext::ValidateDrawSubpassAttachment(const char *func_name) const {
locke-lunarg7077d502020-06-18 21:37:26 -06002166 bool skip = false;
2167 if (!current_renderpass_context_) return skip;
John Zulauf64ffe552021-02-06 10:25:07 -07002168 skip |= current_renderpass_context_->ValidateDrawSubpassAttachment(GetExecutionContext(), *cb_state_.get(), func_name);
locke-lunarg7077d502020-06-18 21:37:26 -06002169 return skip;
locke-lunarg61870c22020-06-09 14:51:50 -06002170}
2171
John Zulauf14940722021-04-12 15:19:02 -06002172void CommandBufferAccessContext::RecordDrawSubpassAttachment(const ResourceUsageTag tag) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002173 if (current_renderpass_context_) {
John Zulauf64ffe552021-02-06 10:25:07 -07002174 current_renderpass_context_->RecordDrawSubpassAttachment(*cb_state_.get(), tag);
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002175 }
locke-lunarg61870c22020-06-09 14:51:50 -06002176}
2177
John Zulauf64ffe552021-02-06 10:25:07 -07002178void CommandBufferAccessContext::RecordBeginRenderPass(const RENDER_PASS_STATE &rp_state, const VkRect2D &render_area,
2179 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views,
John Zulauf14940722021-04-12 15:19:02 -06002180 const ResourceUsageTag tag) {
John Zulauf355e49b2020-04-24 15:11:15 -06002181 // Create an access context the current renderpass.
John Zulauf64ffe552021-02-06 10:25:07 -07002182 render_pass_contexts_.emplace_back(rp_state, render_area, GetQueueFlags(), attachment_views, &cb_access_context_);
John Zulauf16adfc92020-04-08 10:28:33 -06002183 current_renderpass_context_ = &render_pass_contexts_.back();
John Zulauf64ffe552021-02-06 10:25:07 -07002184 current_renderpass_context_->RecordBeginRenderPass(tag);
John Zulauf16adfc92020-04-08 10:28:33 -06002185 current_context_ = &current_renderpass_context_->CurrentContext();
John Zulauf16adfc92020-04-08 10:28:33 -06002186}
2187
John Zulauf8eda1562021-04-13 17:06:41 -06002188void CommandBufferAccessContext::RecordNextSubpass(ResourceUsageTag prev_tag, ResourceUsageTag next_tag) {
John Zulauf16adfc92020-04-08 10:28:33 -06002189 assert(current_renderpass_context_);
John Zulauf64ffe552021-02-06 10:25:07 -07002190 current_renderpass_context_->RecordNextSubpass(prev_tag, next_tag);
John Zulauf16adfc92020-04-08 10:28:33 -06002191 current_context_ = &current_renderpass_context_->CurrentContext();
2192}
2193
John Zulauf8eda1562021-04-13 17:06:41 -06002194void CommandBufferAccessContext::RecordEndRenderPass(const ResourceUsageTag tag) {
John Zulauf16adfc92020-04-08 10:28:33 -06002195 assert(current_renderpass_context_);
2196 if (!current_renderpass_context_) return;
2197
John Zulauf8eda1562021-04-13 17:06:41 -06002198 current_renderpass_context_->RecordEndRenderPass(&cb_access_context_, tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002199 current_context_ = &cb_access_context_;
John Zulauf16adfc92020-04-08 10:28:33 -06002200 current_renderpass_context_ = nullptr;
2201}
2202
John Zulauf4a6105a2020-11-17 15:11:05 -07002203void CommandBufferAccessContext::RecordDestroyEvent(VkEvent event) {
2204 // Erase is okay with the key not being
John Zulauf669dfd52021-01-27 17:15:28 -07002205 const auto *event_state = sync_state_->Get<EVENT_STATE>(event);
2206 if (event_state) {
2207 GetCurrentEventsContext()->Destroy(event_state);
John Zulaufd5115702021-01-18 12:34:33 -07002208 }
2209}
2210
John Zulaufae842002021-04-15 18:20:55 -06002211// The is the recorded cb context
John Zulauf4fa68462021-04-26 21:04:22 -06002212bool CommandBufferAccessContext::ValidateFirstUse(CommandBufferAccessContext *proxy_context, const char *func_name,
2213 uint32_t index) const {
2214 assert(proxy_context);
2215 auto *events_context = proxy_context->GetCurrentEventsContext();
2216 auto *access_context = proxy_context->GetCurrentAccessContext();
2217 const ResourceUsageTag base_tag = proxy_context->GetTagLimit();
John Zulaufae842002021-04-15 18:20:55 -06002218 bool skip = false;
2219 ResourceUsageRange tag_range = {0, 0};
2220 const AccessContext *recorded_context = GetCurrentAccessContext();
2221 assert(recorded_context);
2222 HazardResult hazard;
John Zulauf4fa68462021-04-26 21:04:22 -06002223 auto log_msg = [this](const HazardResult &hazard, const CommandBufferAccessContext &active_context, const char *func_name,
John Zulaufae842002021-04-15 18:20:55 -06002224 uint32_t index) {
2225 const auto cb_handle = active_context.cb_state_->commandBuffer();
2226 const auto recorded_handle = cb_state_->commandBuffer();
John Zulauf4fa68462021-04-26 21:04:22 -06002227 const auto *report_data = sync_state_->report_data;
John Zulaufae842002021-04-15 18:20:55 -06002228 return sync_state_->LogError(cb_handle, string_SyncHazardVUID(hazard.hazard),
John Zulauf4fa68462021-04-26 21:04:22 -06002229 "%s: Hazard %s for entry %" PRIu32 ", %s, Recorded access info %s. Access info %s.", func_name,
2230 string_SyncHazard(hazard.hazard), index, report_data->FormatHandle(recorded_handle).c_str(),
2231 FormatUsage(*hazard.recorded_access).c_str(), active_context.FormatUsage(hazard).c_str());
John Zulaufae842002021-04-15 18:20:55 -06002232 };
2233 for (const auto &sync_op : sync_ops_) {
John Zulauf4fa68462021-04-26 21:04:22 -06002234 // we update the range to any include layout transition first use writes,
2235 // as they are stored along with the source scope (as effective barrier) when recorded
2236 tag_range.end = sync_op.tag + 1;
2237
John Zulaufae842002021-04-15 18:20:55 -06002238 hazard = recorded_context->DetectFirstUseHazard(tag_range, *access_context);
2239 if (hazard.hazard) {
John Zulauf4fa68462021-04-26 21:04:22 -06002240 skip |= log_msg(hazard, *proxy_context, func_name, index);
John Zulaufae842002021-04-15 18:20:55 -06002241 }
2242 // NOTE: Add call to replay validate here when we add support for syncop with non-trivial replay
John Zulauf4fa68462021-04-26 21:04:22 -06002243 // Record the barrier into the proxy context.
2244 sync_op.sync_op->DoRecord(base_tag + sync_op.tag, access_context, events_context);
2245 tag_range.begin = tag_range.end;
John Zulaufae842002021-04-15 18:20:55 -06002246 }
2247
2248 // and anything after the last syncop
John Zulaufae842002021-04-15 18:20:55 -06002249 tag_range.end = ResourceUsageRecord::kMaxIndex;
2250 hazard = recorded_context->DetectFirstUseHazard(tag_range, *access_context);
2251 if (hazard.hazard) {
John Zulauf4fa68462021-04-26 21:04:22 -06002252 skip |= log_msg(hazard, *proxy_context, func_name, index);
John Zulaufae842002021-04-15 18:20:55 -06002253 }
2254
2255 return skip;
2256}
2257
John Zulauf4fa68462021-04-26 21:04:22 -06002258void CommandBufferAccessContext::RecordExecutedCommandBuffer(const CommandBufferAccessContext &recorded_cb_context, CMD_TYPE cmd) {
2259 auto *events_context = GetCurrentEventsContext();
2260 auto *access_context = GetCurrentAccessContext();
2261 const AccessContext *recorded_context = recorded_cb_context.GetCurrentAccessContext();
2262 assert(recorded_context);
2263
2264 // Just run through the barriers ignoring the usage from the recorded context, as Resolve will overwrite outdated state
2265 const ResourceUsageTag base_tag = GetTagLimit();
2266 for (const auto &sync_op : recorded_cb_context.sync_ops_) {
2267 // we update the range to any include layout transition first use writes,
2268 // as they are stored along with the source scope (as effective barrier) when recorded
2269 sync_op.sync_op->DoRecord(base_tag + sync_op.tag, access_context, events_context);
2270 }
2271
2272 ResourceUsageRange tag_range = ImportRecordedAccessLog(recorded_cb_context);
2273 assert(base_tag == tag_range.begin); // to ensure the to offset calculation agree
2274 ResolveRecordedContext(*recorded_context, tag_range.begin);
2275}
2276
2277void CommandBufferAccessContext::ResolveRecordedContext(const AccessContext &recorded_context, ResourceUsageTag offset) {
2278 auto tag_offset = [offset](ResourceAccessState *access) { access->OffsetTag(offset); };
2279
2280 auto *access_context = GetCurrentAccessContext();
2281 for (auto address_type : kAddressTypes) {
2282 recorded_context.ResolveAccessRange(address_type, kFullRange, tag_offset, &access_context->GetAccessStateMap(address_type),
2283 nullptr, false);
2284 }
2285}
2286
2287ResourceUsageRange CommandBufferAccessContext::ImportRecordedAccessLog(const CommandBufferAccessContext &recorded_context) {
2288 // The execution references ensure lifespan for the referenced child CB's...
2289 ResourceUsageRange tag_range(GetTagLimit(), 0);
2290 const auto &rec_cb = recorded_context.cb_state_;
2291 const CMD_BUFFER_STATE *const_rec_cb_plain = rec_cb.get();
2292 cb_execution_reference_.emplace(const_rec_cb_plain, CmdBufReference(rec_cb, recorded_context.reset_count_));
2293 access_log_.insert(access_log_.end(), recorded_context.access_log_.cbegin(), recorded_context.access_log_.end());
2294 tag_range.end = access_log_.size();
2295 return tag_range;
2296}
2297
John Zulaufae842002021-04-15 18:20:55 -06002298class HazardDetectFirstUse {
2299 public:
2300 HazardDetectFirstUse(const ResourceAccessState &recorded_use, const ResourceUsageRange &tag_range)
2301 : recorded_use_(recorded_use), tag_range_(tag_range) {}
2302 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
2303 return pos->second.DetectHazard(recorded_use_, tag_range_);
2304 }
2305 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, ResourceUsageTag start_tag) const {
2306 return pos->second.DetectAsyncHazard(recorded_use_, tag_range_, start_tag);
2307 }
2308
2309 private:
2310 const ResourceAccessState &recorded_use_;
2311 const ResourceUsageRange &tag_range_;
2312};
2313
2314// This is called with the *recorded* command buffers access context, with the *active* access context pass in, againsts which
2315// hazards will be detected
2316HazardResult AccessContext::DetectFirstUseHazard(const ResourceUsageRange &tag_range, const AccessContext &access_context) const {
2317 HazardResult hazard;
2318 for (const auto address_type : kAddressTypes) {
2319 const auto &recorded_access_map = GetAccessStateMap(address_type);
2320 for (const auto &recorded_access : recorded_access_map) {
2321 // Cull any entries not in the current tag range
2322 if (!recorded_access.second.FirstAccessInTagRange(tag_range)) continue;
2323 HazardDetectFirstUse detector(recorded_access.second, tag_range);
2324 hazard = access_context.DetectHazard(address_type, detector, recorded_access.first, DetectOptions::kDetectAll);
2325 if (hazard.hazard) break;
2326 }
2327 }
2328
2329 return hazard;
2330}
2331
John Zulauf64ffe552021-02-06 10:25:07 -07002332bool RenderPassAccessContext::ValidateDrawSubpassAttachment(const CommandExecutionContext &ex_context, const CMD_BUFFER_STATE &cmd,
John Zulauffaea0ee2021-01-14 14:01:32 -07002333 const char *func_name) const {
locke-lunarg61870c22020-06-09 14:51:50 -06002334 bool skip = false;
John Zulauf64ffe552021-02-06 10:25:07 -07002335 const auto &sync_state = ex_context.GetSyncState();
Jeremy Gebben159b3cc2021-06-03 09:09:03 -06002336 const auto *pipe = cmd.GetCurrentPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS);
Jeremy Gebben11af9792021-08-20 10:20:09 -06002337 if (!pipe) {
2338 return skip;
2339 }
2340
2341 const auto &create_info = pipe->create_info.graphics;
2342 if (create_info.pRasterizationState && create_info.pRasterizationState->rasterizerDiscardEnable) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002343 return skip;
2344 }
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002345 const auto &list = pipe->fragmentShader_writable_output_location_list;
locke-lunarg61870c22020-06-09 14:51:50 -06002346 const auto &subpass = rp_state_->createInfo.pSubpasses[current_subpass_];
locke-lunarg37047832020-06-12 13:44:45 -06002347
John Zulauf1a224292020-06-30 14:52:13 -06002348 const auto &current_context = CurrentContext();
locke-lunarg44f9bb12020-06-10 14:43:57 -06002349 // Subpass's inputAttachment has been done in ValidateDispatchDrawDescriptorSet
locke-lunarg96dc9632020-06-10 17:22:18 -06002350 if (subpass.pColorAttachments && subpass.colorAttachmentCount && !list.empty()) {
2351 for (const auto location : list) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002352 if (location >= subpass.colorAttachmentCount ||
2353 subpass.pColorAttachments[location].attachment == VK_ATTACHMENT_UNUSED) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002354 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002355 }
John Zulaufd0ec59f2021-03-13 14:25:08 -07002356 const AttachmentViewGen &view_gen = attachment_views_[subpass.pColorAttachments[location].attachment];
2357 if (!view_gen.IsValid()) continue;
2358 HazardResult hazard =
2359 current_context.DetectHazard(view_gen, AttachmentViewGen::Gen::kRenderArea,
2360 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kColorAttachment);
locke-lunarg96dc9632020-06-10 17:22:18 -06002361 if (hazard.hazard) {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002362 const VkImageView view_handle = view_gen.GetViewState()->image_view();
John Zulaufd0ec59f2021-03-13 14:25:08 -07002363 skip |= sync_state.LogError(view_handle, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06002364 "%s: Hazard %s for %s in %s, Subpass #%d, and pColorAttachments #%d. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06002365 func_name, string_SyncHazard(hazard.hazard),
John Zulaufd0ec59f2021-03-13 14:25:08 -07002366 sync_state.report_data->FormatHandle(view_handle).c_str(),
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002367 sync_state.report_data->FormatHandle(cmd.commandBuffer()).c_str(), cmd.activeSubpass,
John Zulauf64ffe552021-02-06 10:25:07 -07002368 location, ex_context.FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06002369 }
2370 }
2371 }
locke-lunarg37047832020-06-12 13:44:45 -06002372
2373 // PHASE1 TODO: Add layout based read/vs. write selection.
2374 // PHASE1 TODO: Read operations for both depth and stencil are possible in the future.
John Zulaufd0ec59f2021-03-13 14:25:08 -07002375 const uint32_t depth_stencil_attachment =
Jeremy Gebben11af9792021-08-20 10:20:09 -06002376 GetSubpassDepthStencilAttachmentIndex(pipe->create_info.graphics.pDepthStencilState, subpass.pDepthStencilAttachment);
John Zulaufd0ec59f2021-03-13 14:25:08 -07002377
2378 if ((depth_stencil_attachment != VK_ATTACHMENT_UNUSED) && attachment_views_[depth_stencil_attachment].IsValid()) {
2379 const AttachmentViewGen &view_gen = attachment_views_[depth_stencil_attachment];
2380 const IMAGE_VIEW_STATE &view_state = *view_gen.GetViewState();
locke-lunarg37047832020-06-12 13:44:45 -06002381 bool depth_write = false, stencil_write = false;
2382
2383 // PHASE1 TODO: These validation should be in core_checks.
Jeremy Gebben11af9792021-08-20 10:20:09 -06002384 if (!FormatIsStencilOnly(view_state.create_info.format) && create_info.pDepthStencilState->depthTestEnable &&
2385 create_info.pDepthStencilState->depthWriteEnable &&
locke-lunarg37047832020-06-12 13:44:45 -06002386 IsImageLayoutDepthWritable(subpass.pDepthStencilAttachment->layout)) {
2387 depth_write = true;
2388 }
2389 // PHASE1 TODO: It needs to check if stencil is writable.
2390 // If failOp, passOp, or depthFailOp are not KEEP, and writeMask isn't 0, it's writable.
2391 // If depth test is disable, it's considered depth test passes, and then depthFailOp doesn't run.
2392 // PHASE1 TODO: These validation should be in core_checks.
Jeremy Gebben11af9792021-08-20 10:20:09 -06002393 if (!FormatIsDepthOnly(view_state.create_info.format) && create_info.pDepthStencilState->stencilTestEnable &&
locke-lunarg37047832020-06-12 13:44:45 -06002394 IsImageLayoutStencilWritable(subpass.pDepthStencilAttachment->layout)) {
2395 stencil_write = true;
2396 }
2397
2398 // PHASE1 TODO: Add EARLY stage detection based on ExecutionMode.
2399 if (depth_write) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07002400 HazardResult hazard = current_context.DetectHazard(view_gen, AttachmentViewGen::Gen::kDepthOnlyRenderArea,
2401 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE,
2402 SyncOrdering::kDepthStencilAttachment);
locke-lunarg37047832020-06-12 13:44:45 -06002403 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002404 skip |= sync_state.LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002405 view_state.image_view(), string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06002406 "%s: Hazard %s for %s in %s, Subpass #%d, and depth part of pDepthStencilAttachment. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06002407 func_name, string_SyncHazard(hazard.hazard),
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002408 sync_state.report_data->FormatHandle(view_state.image_view()).c_str(),
2409 sync_state.report_data->FormatHandle(cmd.commandBuffer()).c_str(), cmd.activeSubpass,
John Zulauf64ffe552021-02-06 10:25:07 -07002410 ex_context.FormatUsage(hazard).c_str());
locke-lunarg37047832020-06-12 13:44:45 -06002411 }
2412 }
2413 if (stencil_write) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07002414 HazardResult hazard = current_context.DetectHazard(view_gen, AttachmentViewGen::Gen::kStencilOnlyRenderArea,
2415 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE,
2416 SyncOrdering::kDepthStencilAttachment);
locke-lunarg37047832020-06-12 13:44:45 -06002417 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002418 skip |= sync_state.LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002419 view_state.image_view(), string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06002420 "%s: Hazard %s for %s in %s, Subpass #%d, and stencil part of pDepthStencilAttachment. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06002421 func_name, string_SyncHazard(hazard.hazard),
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002422 sync_state.report_data->FormatHandle(view_state.image_view()).c_str(),
2423 sync_state.report_data->FormatHandle(cmd.commandBuffer()).c_str(), cmd.activeSubpass,
John Zulauf64ffe552021-02-06 10:25:07 -07002424 ex_context.FormatUsage(hazard).c_str());
locke-lunarg37047832020-06-12 13:44:45 -06002425 }
locke-lunarg61870c22020-06-09 14:51:50 -06002426 }
2427 }
2428 return skip;
2429}
2430
John Zulauf14940722021-04-12 15:19:02 -06002431void RenderPassAccessContext::RecordDrawSubpassAttachment(const CMD_BUFFER_STATE &cmd, const ResourceUsageTag tag) {
Jeremy Gebben159b3cc2021-06-03 09:09:03 -06002432 const auto *pipe = cmd.GetCurrentPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS);
Jeremy Gebben11af9792021-08-20 10:20:09 -06002433 if (!pipe) {
2434 return;
2435 }
2436
2437 const auto &create_info = pipe->create_info.graphics;
2438 if (create_info.pRasterizationState && create_info.pRasterizationState->rasterizerDiscardEnable) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002439 return;
2440 }
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002441 const auto &list = pipe->fragmentShader_writable_output_location_list;
locke-lunarg61870c22020-06-09 14:51:50 -06002442 const auto &subpass = rp_state_->createInfo.pSubpasses[current_subpass_];
locke-lunarg61870c22020-06-09 14:51:50 -06002443
John Zulauf1a224292020-06-30 14:52:13 -06002444 auto &current_context = CurrentContext();
locke-lunarg44f9bb12020-06-10 14:43:57 -06002445 // Subpass's inputAttachment has been done in RecordDispatchDrawDescriptorSet
locke-lunarg96dc9632020-06-10 17:22:18 -06002446 if (subpass.pColorAttachments && subpass.colorAttachmentCount && !list.empty()) {
2447 for (const auto location : list) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002448 if (location >= subpass.colorAttachmentCount ||
2449 subpass.pColorAttachments[location].attachment == VK_ATTACHMENT_UNUSED) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002450 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002451 }
John Zulaufd0ec59f2021-03-13 14:25:08 -07002452 const AttachmentViewGen &view_gen = attachment_views_[subpass.pColorAttachments[location].attachment];
2453 current_context.UpdateAccessState(view_gen, AttachmentViewGen::Gen::kRenderArea,
2454 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kColorAttachment,
2455 tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002456 }
2457 }
locke-lunarg37047832020-06-12 13:44:45 -06002458
2459 // PHASE1 TODO: Add layout based read/vs. write selection.
2460 // PHASE1 TODO: Read operations for both depth and stencil are possible in the future.
John Zulaufd0ec59f2021-03-13 14:25:08 -07002461 const uint32_t depth_stencil_attachment =
Jeremy Gebben11af9792021-08-20 10:20:09 -06002462 GetSubpassDepthStencilAttachmentIndex(create_info.pDepthStencilState, subpass.pDepthStencilAttachment);
John Zulaufd0ec59f2021-03-13 14:25:08 -07002463 if ((depth_stencil_attachment != VK_ATTACHMENT_UNUSED) && attachment_views_[depth_stencil_attachment].IsValid()) {
2464 const AttachmentViewGen &view_gen = attachment_views_[depth_stencil_attachment];
2465 const IMAGE_VIEW_STATE &view_state = *view_gen.GetViewState();
locke-lunarg37047832020-06-12 13:44:45 -06002466 bool depth_write = false, stencil_write = false;
John Zulaufd0ec59f2021-03-13 14:25:08 -07002467 const bool has_depth = 0 != (view_state.normalized_subresource_range.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT);
2468 const bool has_stencil = 0 != (view_state.normalized_subresource_range.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT);
locke-lunarg37047832020-06-12 13:44:45 -06002469
2470 // PHASE1 TODO: These validation should be in core_checks.
Jeremy Gebben11af9792021-08-20 10:20:09 -06002471 if (has_depth && !FormatIsStencilOnly(view_state.create_info.format) && create_info.pDepthStencilState->depthTestEnable &&
2472 create_info.pDepthStencilState->depthWriteEnable &&
locke-lunarg37047832020-06-12 13:44:45 -06002473 IsImageLayoutDepthWritable(subpass.pDepthStencilAttachment->layout)) {
2474 depth_write = true;
2475 }
2476 // PHASE1 TODO: It needs to check if stencil is writable.
2477 // If failOp, passOp, or depthFailOp are not KEEP, and writeMask isn't 0, it's writable.
2478 // If depth test is disable, it's considered depth test passes, and then depthFailOp doesn't run.
2479 // PHASE1 TODO: These validation should be in core_checks.
Jeremy Gebben11af9792021-08-20 10:20:09 -06002480 if (has_stencil && !FormatIsDepthOnly(view_state.create_info.format) && create_info.pDepthStencilState->stencilTestEnable &&
locke-lunarg37047832020-06-12 13:44:45 -06002481 IsImageLayoutStencilWritable(subpass.pDepthStencilAttachment->layout)) {
2482 stencil_write = true;
2483 }
2484
John Zulaufd0ec59f2021-03-13 14:25:08 -07002485 if (depth_write || stencil_write) {
2486 const auto ds_gentype = view_gen.GetDepthStencilRenderAreaGenType(depth_write, stencil_write);
2487 // PHASE1 TODO: Add EARLY stage detection based on ExecutionMode.
2488 current_context.UpdateAccessState(view_gen, ds_gentype, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE,
2489 SyncOrdering::kDepthStencilAttachment, tag);
locke-lunarg37047832020-06-12 13:44:45 -06002490 }
locke-lunarg61870c22020-06-09 14:51:50 -06002491 }
2492}
2493
John Zulauf64ffe552021-02-06 10:25:07 -07002494bool RenderPassAccessContext::ValidateNextSubpass(const CommandExecutionContext &ex_context, const char *func_name) const {
John Zulaufaff20662020-06-01 14:07:58 -06002495 // PHASE1 TODO: Add Validate Preserve attachments
John Zulauf355e49b2020-04-24 15:11:15 -06002496 bool skip = false;
John Zulauf64ffe552021-02-06 10:25:07 -07002497 skip |= CurrentContext().ValidateResolveOperations(ex_context, *rp_state_, render_area_, attachment_views_, func_name,
John Zulaufb027cdb2020-05-21 14:25:22 -06002498 current_subpass_);
John Zulauf64ffe552021-02-06 10:25:07 -07002499 skip |= CurrentContext().ValidateStoreOperation(ex_context, *rp_state_, render_area_, current_subpass_, attachment_views_,
John Zulaufaff20662020-06-01 14:07:58 -06002500 func_name);
2501
John Zulauf355e49b2020-04-24 15:11:15 -06002502 const auto next_subpass = current_subpass_ + 1;
John Zulauf1507ee42020-05-18 11:33:09 -06002503 const auto &next_context = subpass_contexts_[next_subpass];
John Zulauf64ffe552021-02-06 10:25:07 -07002504 skip |=
2505 next_context.ValidateLayoutTransitions(ex_context, *rp_state_, render_area_, next_subpass, attachment_views_, func_name);
John Zulaufb02c1eb2020-10-06 16:33:36 -06002506 if (!skip) {
2507 // To avoid complex (and buggy) duplication of the affect of layout transitions on load operations, we'll record them
2508 // on a copy of the (empty) next context.
2509 // Note: The resource access map should be empty so hopefully this copy isn't too horrible from a perf POV.
2510 AccessContext temp_context(next_context);
2511 temp_context.RecordLayoutTransitions(*rp_state_, next_subpass, attachment_views_, kCurrentCommandTag);
John Zulauf64ffe552021-02-06 10:25:07 -07002512 skip |=
2513 temp_context.ValidateLoadOperation(ex_context, *rp_state_, render_area_, next_subpass, attachment_views_, func_name);
John Zulaufb02c1eb2020-10-06 16:33:36 -06002514 }
John Zulauf7635de32020-05-29 17:14:15 -06002515 return skip;
2516}
John Zulauf64ffe552021-02-06 10:25:07 -07002517bool RenderPassAccessContext::ValidateEndRenderPass(const CommandExecutionContext &ex_context, const char *func_name) const {
John Zulaufaff20662020-06-01 14:07:58 -06002518 // PHASE1 TODO: Validate Preserve
John Zulauf7635de32020-05-29 17:14:15 -06002519 bool skip = false;
John Zulauf64ffe552021-02-06 10:25:07 -07002520 skip |= CurrentContext().ValidateResolveOperations(ex_context, *rp_state_, render_area_, attachment_views_, func_name,
John Zulauf7635de32020-05-29 17:14:15 -06002521 current_subpass_);
John Zulaufd0ec59f2021-03-13 14:25:08 -07002522 skip |= CurrentContext().ValidateStoreOperation(ex_context, *rp_state_, render_area_, current_subpass_,
2523
2524 attachment_views_, func_name);
John Zulauf64ffe552021-02-06 10:25:07 -07002525 skip |= ValidateFinalSubpassLayoutTransitions(ex_context, func_name);
John Zulauf355e49b2020-04-24 15:11:15 -06002526 return skip;
2527}
2528
John Zulauf64ffe552021-02-06 10:25:07 -07002529AccessContext *RenderPassAccessContext::CreateStoreResolveProxy() const {
John Zulaufd0ec59f2021-03-13 14:25:08 -07002530 return CreateStoreResolveProxyContext(CurrentContext(), *rp_state_, current_subpass_, attachment_views_);
John Zulauf7635de32020-05-29 17:14:15 -06002531}
2532
John Zulauf64ffe552021-02-06 10:25:07 -07002533bool RenderPassAccessContext::ValidateFinalSubpassLayoutTransitions(const CommandExecutionContext &ex_context,
2534 const char *func_name) const {
John Zulauf355e49b2020-04-24 15:11:15 -06002535 bool skip = false;
2536
John Zulauf7635de32020-05-29 17:14:15 -06002537 // As validation methods are const and precede the record/update phase, for any tranistions from the current (last)
2538 // subpass, we have to validate them against a copy of the current AccessContext, with resolve operations applied.
2539 // Note: we could be more efficient by tracking whether or not we actually *have* any changes (e.g. attachment resolve)
2540 // to apply and only copy then, if this proves a hot spot.
2541 std::unique_ptr<AccessContext> proxy_for_current;
2542
John Zulauf355e49b2020-04-24 15:11:15 -06002543 // Validate the "finalLayout" transitions to external
2544 // Get them from where there we're hidding in the extra entry.
2545 const auto &final_transitions = rp_state_->subpass_transitions.back();
2546 for (const auto &transition : final_transitions) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07002547 const auto &view_gen = attachment_views_[transition.attachment];
John Zulauf355e49b2020-04-24 15:11:15 -06002548 const auto &trackback = subpass_contexts_[transition.prev_pass].GetDstExternalTrackBack();
2549 assert(trackback.context); // Transitions are given implicit transitions if the StateTracker is working correctly
John Zulauf7635de32020-05-29 17:14:15 -06002550 auto *context = trackback.context;
2551
2552 if (transition.prev_pass == current_subpass_) {
2553 if (!proxy_for_current) {
2554 // We haven't recorded resolve ofor the current_subpass, so we need to copy current and update it *as if*
John Zulauf64ffe552021-02-06 10:25:07 -07002555 proxy_for_current.reset(CreateStoreResolveProxy());
John Zulauf7635de32020-05-29 17:14:15 -06002556 }
2557 context = proxy_for_current.get();
2558 }
2559
John Zulaufa0a98292020-09-18 09:30:10 -06002560 // Use the merged barrier for the hazard check (safe since it just considers the src (first) scope.
2561 const auto merged_barrier = MergeBarriers(trackback.barriers);
John Zulaufd0ec59f2021-03-13 14:25:08 -07002562 auto hazard = context->DetectImageBarrierHazard(view_gen, merged_barrier, AccessContext::DetectOptions::kDetectPrevious);
John Zulauf355e49b2020-04-24 15:11:15 -06002563 if (hazard.hazard) {
John Zulauf64ffe552021-02-06 10:25:07 -07002564 skip |= ex_context.GetSyncState().LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002565 rp_state_->renderPass(), string_SyncHazardVUID(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07002566 "%s: Hazard %s with last use subpass %" PRIu32 " for attachment %" PRIu32
2567 " final image layout transition (old_layout: %s, new_layout: %s). Access info %s.",
2568 func_name, string_SyncHazard(hazard.hazard), transition.prev_pass, transition.attachment,
2569 string_VkImageLayout(transition.old_layout), string_VkImageLayout(transition.new_layout),
John Zulauf64ffe552021-02-06 10:25:07 -07002570 ex_context.FormatUsage(hazard).c_str());
John Zulauf355e49b2020-04-24 15:11:15 -06002571 }
2572 }
2573 return skip;
2574}
2575
John Zulauf14940722021-04-12 15:19:02 -06002576void RenderPassAccessContext::RecordLayoutTransitions(const ResourceUsageTag tag) {
John Zulauf355e49b2020-04-24 15:11:15 -06002577 // Add layout transitions...
John Zulaufb02c1eb2020-10-06 16:33:36 -06002578 subpass_contexts_[current_subpass_].RecordLayoutTransitions(*rp_state_, current_subpass_, attachment_views_, tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002579}
2580
John Zulauf14940722021-04-12 15:19:02 -06002581void RenderPassAccessContext::RecordLoadOperations(const ResourceUsageTag tag) {
John Zulauf1507ee42020-05-18 11:33:09 -06002582 const auto *attachment_ci = rp_state_->createInfo.pAttachments;
2583 auto &subpass_context = subpass_contexts_[current_subpass_];
John Zulauf1507ee42020-05-18 11:33:09 -06002584
2585 for (uint32_t i = 0; i < rp_state_->createInfo.attachmentCount; i++) {
2586 if (rp_state_->attachment_first_subpass[i] == current_subpass_) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07002587 const AttachmentViewGen &view_gen = attachment_views_[i];
2588 if (!view_gen.IsValid()) continue; // UNUSED
John Zulauf1507ee42020-05-18 11:33:09 -06002589
2590 const auto &ci = attachment_ci[i];
2591 const bool has_depth = FormatHasDepth(ci.format);
John Zulaufb027cdb2020-05-21 14:25:22 -06002592 const bool has_stencil = FormatHasStencil(ci.format);
John Zulauf1507ee42020-05-18 11:33:09 -06002593 const bool is_color = !(has_depth || has_stencil);
2594
2595 if (is_color) {
John Zulauf57261402021-08-13 11:32:06 -06002596 const SyncStageAccessIndex load_op = ColorLoadUsage(ci.loadOp);
2597 if (load_op != SYNC_ACCESS_INDEX_NONE) {
2598 subpass_context.UpdateAccessState(view_gen, AttachmentViewGen::Gen::kRenderArea, load_op,
2599 SyncOrdering::kColorAttachment, tag);
2600 }
John Zulauf1507ee42020-05-18 11:33:09 -06002601 } else {
John Zulauf1507ee42020-05-18 11:33:09 -06002602 if (has_depth) {
John Zulauf57261402021-08-13 11:32:06 -06002603 const SyncStageAccessIndex load_op = DepthStencilLoadUsage(ci.loadOp);
2604 if (load_op != SYNC_ACCESS_INDEX_NONE) {
2605 subpass_context.UpdateAccessState(view_gen, AttachmentViewGen::Gen::kDepthOnlyRenderArea, load_op,
2606 SyncOrdering::kDepthStencilAttachment, tag);
2607 }
John Zulauf1507ee42020-05-18 11:33:09 -06002608 }
2609 if (has_stencil) {
John Zulauf57261402021-08-13 11:32:06 -06002610 const SyncStageAccessIndex load_op = DepthStencilLoadUsage(ci.stencilLoadOp);
2611 if (load_op != SYNC_ACCESS_INDEX_NONE) {
2612 subpass_context.UpdateAccessState(view_gen, AttachmentViewGen::Gen::kStencilOnlyRenderArea, load_op,
2613 SyncOrdering::kDepthStencilAttachment, tag);
2614 }
John Zulauf1507ee42020-05-18 11:33:09 -06002615 }
2616 }
2617 }
2618 }
2619}
John Zulaufd0ec59f2021-03-13 14:25:08 -07002620AttachmentViewGenVector RenderPassAccessContext::CreateAttachmentViewGen(
2621 const VkRect2D &render_area, const std::vector<const IMAGE_VIEW_STATE *> &attachment_views) {
2622 AttachmentViewGenVector view_gens;
2623 VkExtent3D extent = CastTo3D(render_area.extent);
2624 VkOffset3D offset = CastTo3D(render_area.offset);
2625 view_gens.reserve(attachment_views.size());
2626 for (const auto *view : attachment_views) {
2627 view_gens.emplace_back(view, offset, extent);
2628 }
2629 return view_gens;
2630}
John Zulauf64ffe552021-02-06 10:25:07 -07002631RenderPassAccessContext::RenderPassAccessContext(const RENDER_PASS_STATE &rp_state, const VkRect2D &render_area,
2632 VkQueueFlags queue_flags,
2633 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views,
2634 const AccessContext *external_context)
John Zulaufd0ec59f2021-03-13 14:25:08 -07002635 : rp_state_(&rp_state), render_area_(render_area), current_subpass_(0U), attachment_views_() {
John Zulauf355e49b2020-04-24 15:11:15 -06002636 // Add this for all subpasses here so that they exsist during next subpass validation
John Zulauf64ffe552021-02-06 10:25:07 -07002637 subpass_contexts_.reserve(rp_state_->createInfo.subpassCount);
John Zulauf355e49b2020-04-24 15:11:15 -06002638 for (uint32_t pass = 0; pass < rp_state_->createInfo.subpassCount; pass++) {
John Zulauf1a224292020-06-30 14:52:13 -06002639 subpass_contexts_.emplace_back(pass, queue_flags, rp_state_->subpass_dependencies, subpass_contexts_, external_context);
John Zulauf355e49b2020-04-24 15:11:15 -06002640 }
John Zulaufd0ec59f2021-03-13 14:25:08 -07002641 attachment_views_ = CreateAttachmentViewGen(render_area, attachment_views);
John Zulauf64ffe552021-02-06 10:25:07 -07002642}
John Zulauf14940722021-04-12 15:19:02 -06002643void RenderPassAccessContext::RecordBeginRenderPass(const ResourceUsageTag tag) {
John Zulauf64ffe552021-02-06 10:25:07 -07002644 assert(0 == current_subpass_);
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07002645 subpass_contexts_[current_subpass_].SetStartTag(tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002646 RecordLayoutTransitions(tag);
John Zulauf64ffe552021-02-06 10:25:07 -07002647 RecordLoadOperations(tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002648}
John Zulauf1507ee42020-05-18 11:33:09 -06002649
John Zulauf14940722021-04-12 15:19:02 -06002650void RenderPassAccessContext::RecordNextSubpass(const ResourceUsageTag prev_subpass_tag, const ResourceUsageTag next_subpass_tag) {
John Zulauf7635de32020-05-29 17:14:15 -06002651 // Resolves are against *prior* subpass context and thus *before* the subpass increment
John Zulaufd0ec59f2021-03-13 14:25:08 -07002652 CurrentContext().UpdateAttachmentResolveAccess(*rp_state_, attachment_views_, current_subpass_, prev_subpass_tag);
2653 CurrentContext().UpdateAttachmentStoreAccess(*rp_state_, attachment_views_, current_subpass_, prev_subpass_tag);
John Zulauf7635de32020-05-29 17:14:15 -06002654
Jeremy Gebben6ea9d9e2020-12-11 09:41:01 -07002655 // Move to the next sub-command for the new subpass. The resolve and store are logically part of the previous
2656 // subpass, so their tag needs to be different from the layout and load operations below.
John Zulauf355e49b2020-04-24 15:11:15 -06002657 current_subpass_++;
2658 assert(current_subpass_ < subpass_contexts_.size());
John Zulauffaea0ee2021-01-14 14:01:32 -07002659 subpass_contexts_[current_subpass_].SetStartTag(next_subpass_tag);
2660 RecordLayoutTransitions(next_subpass_tag);
John Zulauf64ffe552021-02-06 10:25:07 -07002661 RecordLoadOperations(next_subpass_tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002662}
2663
John Zulauf14940722021-04-12 15:19:02 -06002664void RenderPassAccessContext::RecordEndRenderPass(AccessContext *external_context, const ResourceUsageTag tag) {
John Zulaufaff20662020-06-01 14:07:58 -06002665 // Add the resolve and store accesses
John Zulaufd0ec59f2021-03-13 14:25:08 -07002666 CurrentContext().UpdateAttachmentResolveAccess(*rp_state_, attachment_views_, current_subpass_, tag);
2667 CurrentContext().UpdateAttachmentStoreAccess(*rp_state_, attachment_views_, current_subpass_, tag);
John Zulauf7635de32020-05-29 17:14:15 -06002668
John Zulauf355e49b2020-04-24 15:11:15 -06002669 // Export the accesses from the renderpass...
John Zulauf1a224292020-06-30 14:52:13 -06002670 external_context->ResolveChildContexts(subpass_contexts_);
John Zulauf355e49b2020-04-24 15:11:15 -06002671
2672 // Add the "finalLayout" transitions to external
2673 // Get them from where there we're hidding in the extra entry.
John Zulauf89311b42020-09-29 16:28:47 -06002674 // Not that since *final* always comes from *one* subpass per view, we don't have to accumulate the barriers
2675 // TODO Aliasing we may need to reconsider barrier accumulation... though I don't know that it would be valid for aliasing
2676 // that had mulitple final layout transistions from mulitple final subpasses.
John Zulauf355e49b2020-04-24 15:11:15 -06002677 const auto &final_transitions = rp_state_->subpass_transitions.back();
2678 for (const auto &transition : final_transitions) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07002679 const AttachmentViewGen &view_gen = attachment_views_[transition.attachment];
John Zulauf355e49b2020-04-24 15:11:15 -06002680 const auto &last_trackback = subpass_contexts_[transition.prev_pass].GetDstExternalTrackBack();
John Zulaufaa97d8b2020-07-14 10:58:13 -06002681 assert(&subpass_contexts_[transition.prev_pass] == last_trackback.context);
John Zulaufd5115702021-01-18 12:34:33 -07002682 ApplyBarrierOpsFunctor<PipelineBarrierOp> barrier_action(true /* resolve */, last_trackback.barriers.size(), tag);
John Zulauf1e331ec2020-12-04 18:29:38 -07002683 for (const auto &barrier : last_trackback.barriers) {
John Zulaufd5115702021-01-18 12:34:33 -07002684 barrier_action.EmplaceBack(PipelineBarrierOp(barrier, true));
John Zulauf1e331ec2020-12-04 18:29:38 -07002685 }
John Zulaufd0ec59f2021-03-13 14:25:08 -07002686 external_context->ApplyUpdateAction(view_gen, AttachmentViewGen::Gen::kViewSubresource, barrier_action);
John Zulauf355e49b2020-04-24 15:11:15 -06002687 }
2688}
2689
Jeremy Gebben40a22942020-12-22 14:22:06 -07002690SyncExecScope SyncExecScope::MakeSrc(VkQueueFlags queue_flags, VkPipelineStageFlags2KHR mask_param) {
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002691 SyncExecScope result;
2692 result.mask_param = mask_param;
Jeremy Gebben5f585ae2021-02-02 09:03:06 -07002693 result.expanded_mask = sync_utils::ExpandPipelineStages(mask_param, queue_flags);
2694 result.exec_scope = sync_utils::WithEarlierPipelineStages(result.expanded_mask);
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002695 result.valid_accesses = SyncStageAccess::AccessScopeByStage(result.exec_scope);
2696 return result;
2697}
2698
Jeremy Gebben40a22942020-12-22 14:22:06 -07002699SyncExecScope SyncExecScope::MakeDst(VkQueueFlags queue_flags, VkPipelineStageFlags2KHR mask_param) {
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002700 SyncExecScope result;
2701 result.mask_param = mask_param;
Jeremy Gebben5f585ae2021-02-02 09:03:06 -07002702 result.expanded_mask = sync_utils::ExpandPipelineStages(mask_param, queue_flags);
2703 result.exec_scope = sync_utils::WithLaterPipelineStages(result.expanded_mask);
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002704 result.valid_accesses = SyncStageAccess::AccessScopeByStage(result.exec_scope);
2705 return result;
2706}
2707
2708SyncBarrier::SyncBarrier(const SyncExecScope &src, const SyncExecScope &dst) {
John Zulaufc523bf62021-02-16 08:20:34 -07002709 src_exec_scope = src;
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002710 src_access_scope = 0;
John Zulaufc523bf62021-02-16 08:20:34 -07002711 dst_exec_scope = dst;
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002712 dst_access_scope = 0;
2713}
2714
2715template <typename Barrier>
2716SyncBarrier::SyncBarrier(const Barrier &barrier, const SyncExecScope &src, const SyncExecScope &dst) {
John Zulaufc523bf62021-02-16 08:20:34 -07002717 src_exec_scope = src;
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002718 src_access_scope = SyncStageAccess::AccessScope(src.valid_accesses, barrier.srcAccessMask);
John Zulaufc523bf62021-02-16 08:20:34 -07002719 dst_exec_scope = dst;
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002720 dst_access_scope = SyncStageAccess::AccessScope(dst.valid_accesses, barrier.dstAccessMask);
2721}
2722
2723SyncBarrier::SyncBarrier(VkQueueFlags queue_flags, const VkSubpassDependency2 &subpass) {
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002724 const auto barrier = lvl_find_in_chain<VkMemoryBarrier2KHR>(subpass.pNext);
2725 if (barrier) {
2726 auto src = SyncExecScope::MakeSrc(queue_flags, barrier->srcStageMask);
John Zulaufc523bf62021-02-16 08:20:34 -07002727 src_exec_scope = src;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002728 src_access_scope = SyncStageAccess::AccessScope(src.valid_accesses, barrier->srcAccessMask);
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002729
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002730 auto dst = SyncExecScope::MakeDst(queue_flags, barrier->dstStageMask);
John Zulaufc523bf62021-02-16 08:20:34 -07002731 dst_exec_scope = dst;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002732 dst_access_scope = SyncStageAccess::AccessScope(dst.valid_accesses, barrier->dstAccessMask);
2733
2734 } else {
2735 auto src = SyncExecScope::MakeSrc(queue_flags, subpass.srcStageMask);
John Zulaufc523bf62021-02-16 08:20:34 -07002736 src_exec_scope = src;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002737 src_access_scope = SyncStageAccess::AccessScope(src.valid_accesses, subpass.srcAccessMask);
2738
2739 auto dst = SyncExecScope::MakeDst(queue_flags, subpass.dstStageMask);
John Zulaufc523bf62021-02-16 08:20:34 -07002740 dst_exec_scope = dst;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002741 dst_access_scope = SyncStageAccess::AccessScope(dst.valid_accesses, subpass.dstAccessMask);
2742 }
2743}
2744
2745template <typename Barrier>
2746SyncBarrier::SyncBarrier(VkQueueFlags queue_flags, const Barrier &barrier) {
2747 auto src = SyncExecScope::MakeSrc(queue_flags, barrier.srcStageMask);
2748 src_exec_scope = src.exec_scope;
2749 src_access_scope = SyncStageAccess::AccessScope(src.valid_accesses, barrier.srcAccessMask);
2750
2751 auto dst = SyncExecScope::MakeDst(queue_flags, barrier.dstStageMask);
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002752 dst_exec_scope = dst.exec_scope;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002753 dst_access_scope = SyncStageAccess::AccessScope(dst.valid_accesses, barrier.dstAccessMask);
John Zulauf3d84f1b2020-03-09 13:33:25 -06002754}
2755
John Zulaufb02c1eb2020-10-06 16:33:36 -06002756// Apply a list of barriers, without resolving pending state, useful for subpass layout transitions
2757void ResourceAccessState::ApplyBarriers(const std::vector<SyncBarrier> &barriers, bool layout_transition) {
2758 for (const auto &barrier : barriers) {
2759 ApplyBarrier(barrier, layout_transition);
2760 }
2761}
2762
John Zulauf89311b42020-09-29 16:28:47 -06002763// ApplyBarriers is design for *fully* inclusive barrier lists without layout tranistions. Designed use was for
2764// inter-subpass barriers for lazy-evaluation of parent context memory ranges. Subpass layout transistions are *not* done
2765// lazily, s.t. no previous access reports should need layout transitions.
John Zulauf14940722021-04-12 15:19:02 -06002766void ResourceAccessState::ApplyBarriers(const std::vector<SyncBarrier> &barriers, const ResourceUsageTag tag) {
John Zulaufb02c1eb2020-10-06 16:33:36 -06002767 assert(!pending_layout_transition); // This should never be call in the middle of another barrier application
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002768 assert(pending_write_barriers.none());
John Zulaufb02c1eb2020-10-06 16:33:36 -06002769 assert(!pending_write_dep_chain);
John Zulaufa0a98292020-09-18 09:30:10 -06002770 for (const auto &barrier : barriers) {
John Zulauf89311b42020-09-29 16:28:47 -06002771 ApplyBarrier(barrier, false);
John Zulaufa0a98292020-09-18 09:30:10 -06002772 }
John Zulaufb02c1eb2020-10-06 16:33:36 -06002773 ApplyPendingBarriers(tag);
John Zulauf3d84f1b2020-03-09 13:33:25 -06002774}
John Zulauf9cb530d2019-09-30 14:14:10 -06002775HazardResult ResourceAccessState::DetectHazard(SyncStageAccessIndex usage_index) const {
2776 HazardResult hazard;
2777 auto usage = FlagBit(usage_index);
John Zulauf361fb532020-07-22 10:45:39 -06002778 const auto usage_stage = PipelineStageBit(usage_index);
John Zulauf9cb530d2019-09-30 14:14:10 -06002779 if (IsRead(usage)) {
John Zulauf4285ee92020-09-23 10:20:52 -06002780 if (IsRAWHazard(usage_stage, usage)) {
John Zulauf59e25072020-07-17 10:55:21 -06002781 hazard.Set(this, usage_index, READ_AFTER_WRITE, last_write, write_tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06002782 }
2783 } else {
John Zulauf361fb532020-07-22 10:45:39 -06002784 // Write operation:
2785 // Check for read operations more recent than last_write (as setting last_write clears reads, that would be *any*
2786 // If reads exists -- test only against them because either:
2787 // * the reads were hazards, and we've reported the hazard, so just test the current write vs. the read operations
2788 // * the read weren't hazards, and thus if the write is safe w.r.t. the reads, no hazard vs. last_write is possible if
2789 // the current write happens after the reads, so just test the write against the reades
2790 // Otherwise test against last_write
2791 //
2792 // Look for casus belli for WAR
John Zulaufab7756b2020-12-29 16:10:16 -07002793 if (last_reads.size()) {
2794 for (const auto &read_access : last_reads) {
John Zulauf361fb532020-07-22 10:45:39 -06002795 if (IsReadHazard(usage_stage, read_access)) {
2796 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
2797 break;
2798 }
2799 }
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002800 } else if (last_write.any() && IsWriteHazard(usage)) {
John Zulauf361fb532020-07-22 10:45:39 -06002801 // Write-After-Write check -- if we have a previous write to test against
2802 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06002803 }
2804 }
2805 return hazard;
2806}
2807
John Zulauf4fa68462021-04-26 21:04:22 -06002808HazardResult ResourceAccessState::DetectHazard(SyncStageAccessIndex usage_index, const SyncOrdering ordering_rule) const {
John Zulauf8e3c3e92021-01-06 11:19:36 -07002809 const auto &ordering = GetOrderingRules(ordering_rule);
John Zulauf4fa68462021-04-26 21:04:22 -06002810 return DetectHazard(usage_index, ordering);
2811}
2812
2813HazardResult ResourceAccessState::DetectHazard(SyncStageAccessIndex usage_index, const OrderingBarrier &ordering) const {
John Zulauf69133422020-05-20 14:55:53 -06002814 // The ordering guarantees act as barriers to the last accesses, independent of synchronization operations
2815 HazardResult hazard;
John Zulauf4285ee92020-09-23 10:20:52 -06002816 const auto usage_bit = FlagBit(usage_index);
John Zulauf361fb532020-07-22 10:45:39 -06002817 const auto usage_stage = PipelineStageBit(usage_index);
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002818 const bool input_attachment_ordering = (ordering.access_scope & SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT).any();
2819 const bool last_write_is_ordered = (last_write & ordering.access_scope).any();
John Zulauf4285ee92020-09-23 10:20:52 -06002820 if (IsRead(usage_bit)) {
2821 // Exclude RAW if no write, or write not most "most recent" operation w.r.t. usage;
2822 bool is_raw_hazard = IsRAWHazard(usage_stage, usage_bit);
2823 if (is_raw_hazard) {
2824 // NOTE: we know last_write is non-zero
2825 // See if the ordering rules save us from the simple RAW check above
2826 // First check to see if the current usage is covered by the ordering rules
2827 const bool usage_is_input_attachment = (usage_index == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ);
2828 const bool usage_is_ordered =
2829 (input_attachment_ordering && usage_is_input_attachment) || (0 != (usage_stage & ordering.exec_scope));
2830 if (usage_is_ordered) {
2831 // Now see of the most recent write (or a subsequent read) are ordered
2832 const bool most_recent_is_ordered = last_write_is_ordered || (0 != GetOrderedStages(ordering));
2833 is_raw_hazard = !most_recent_is_ordered;
John Zulauf361fb532020-07-22 10:45:39 -06002834 }
2835 }
John Zulauf4285ee92020-09-23 10:20:52 -06002836 if (is_raw_hazard) {
2837 hazard.Set(this, usage_index, READ_AFTER_WRITE, last_write, write_tag);
2838 }
John Zulauf361fb532020-07-22 10:45:39 -06002839 } else {
2840 // Only check for WAW if there are no reads since last_write
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002841 bool usage_write_is_ordered = (usage_bit & ordering.access_scope).any();
John Zulaufab7756b2020-12-29 16:10:16 -07002842 if (last_reads.size()) {
John Zulauf361fb532020-07-22 10:45:39 -06002843 // Look for any WAR hazards outside the ordered set of stages
Jeremy Gebben40a22942020-12-22 14:22:06 -07002844 VkPipelineStageFlags2KHR ordered_stages = 0;
John Zulauf4285ee92020-09-23 10:20:52 -06002845 if (usage_write_is_ordered) {
2846 // If the usage is ordered, we can ignore all ordered read stages w.r.t. WAR)
2847 ordered_stages = GetOrderedStages(ordering);
2848 }
2849 // If we're tracking any reads that aren't ordered against the current write, got to check 'em all.
2850 if ((ordered_stages & last_read_stages) != last_read_stages) {
John Zulaufab7756b2020-12-29 16:10:16 -07002851 for (const auto &read_access : last_reads) {
John Zulauf4285ee92020-09-23 10:20:52 -06002852 if (read_access.stage & ordered_stages) continue; // but we can skip the ordered ones
2853 if (IsReadHazard(usage_stage, read_access)) {
2854 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
2855 break;
2856 }
John Zulaufd14743a2020-07-03 09:42:39 -06002857 }
2858 }
John Zulauf4285ee92020-09-23 10:20:52 -06002859 } else if (!(last_write_is_ordered && usage_write_is_ordered)) {
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002860 if (last_write.any() && IsWriteHazard(usage_bit)) {
John Zulauf4285ee92020-09-23 10:20:52 -06002861 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
John Zulauf361fb532020-07-22 10:45:39 -06002862 }
John Zulauf69133422020-05-20 14:55:53 -06002863 }
2864 }
2865 return hazard;
2866}
2867
John Zulaufae842002021-04-15 18:20:55 -06002868HazardResult ResourceAccessState::DetectHazard(const ResourceAccessState &recorded_use, const ResourceUsageRange &tag_range) const {
2869 HazardResult hazard;
John Zulauf4fa68462021-04-26 21:04:22 -06002870 using Size = FirstAccesses::size_type;
2871 const auto &recorded_accesses = recorded_use.first_accesses_;
2872 Size count = recorded_accesses.size();
2873 if (count) {
2874 const auto &last_access = recorded_accesses.back();
2875 bool do_write_last = IsWrite(last_access.usage_index);
2876 if (do_write_last) --count;
John Zulaufae842002021-04-15 18:20:55 -06002877
John Zulauf4fa68462021-04-26 21:04:22 -06002878 for (Size i = 0; i < count; ++count) {
2879 const auto &first = recorded_accesses[i];
2880 // Skip and quit logic
2881 if (first.tag < tag_range.begin) continue;
2882 if (first.tag >= tag_range.end) {
2883 do_write_last = false; // ignore last since we know it can't be in tag_range
2884 break;
2885 }
2886
2887 hazard = DetectHazard(first.usage_index, first.ordering_rule);
2888 if (hazard.hazard) {
2889 hazard.AddRecordedAccess(first);
2890 break;
2891 }
2892 }
2893
2894 if (do_write_last && tag_range.includes(last_access.tag)) {
2895 // Writes are a bit special... both for the "most recent" access logic, and layout transition specific logic
2896 OrderingBarrier barrier = GetOrderingRules(last_access.ordering_rule);
2897 if (last_access.usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION) {
2898 // Or in the layout first access scope as a barrier... IFF the usage is an ILT
2899 // this was saved off in the "apply barriers" logic to simplify ILT access checks as they straddle
2900 // the barrier that applies them
2901 barrier |= recorded_use.first_write_layout_ordering_;
2902 }
2903 // Any read stages present in the recorded context (this) are most recent to the write, and thus mask those stages in
2904 // the active context
2905 if (recorded_use.first_read_stages_) {
2906 // we need to ignore the first use read stage in the active context (so we add them to the ordering rule),
2907 // reads in the active context are not "most recent" as all recorded context operations are *after* them
2908 // This supresses only RAW checks for stages present in the recorded context, but not those only present in the
2909 // active context.
2910 barrier.exec_scope |= recorded_use.first_read_stages_;
2911 // if there are any first use reads, we suppress WAW by injecting the active context write in the ordering rule
2912 barrier.access_scope |= FlagBit(last_access.usage_index);
2913 }
2914 hazard = DetectHazard(last_access.usage_index, barrier);
2915 if (hazard.hazard) {
2916 hazard.AddRecordedAccess(last_access);
2917 }
2918 }
John Zulaufae842002021-04-15 18:20:55 -06002919 }
2920 return hazard;
2921}
2922
John Zulauf2f952d22020-02-10 11:34:51 -07002923// Asynchronous Hazards occur between subpasses with no connection through the DAG
John Zulauf14940722021-04-12 15:19:02 -06002924HazardResult ResourceAccessState::DetectAsyncHazard(SyncStageAccessIndex usage_index, const ResourceUsageTag start_tag) const {
John Zulauf2f952d22020-02-10 11:34:51 -07002925 HazardResult hazard;
2926 auto usage = FlagBit(usage_index);
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07002927 // Async checks need to not go back further than the start of the subpass, as we only want to find hazards between the async
2928 // subpasses. Anything older than that should have been checked at the start of each subpass, taking into account all of
2929 // the raster ordering rules.
John Zulauf2f952d22020-02-10 11:34:51 -07002930 if (IsRead(usage)) {
John Zulauf14940722021-04-12 15:19:02 -06002931 if (last_write.any() && (write_tag >= start_tag)) {
John Zulauf59e25072020-07-17 10:55:21 -06002932 hazard.Set(this, usage_index, READ_RACING_WRITE, last_write, write_tag);
John Zulauf2f952d22020-02-10 11:34:51 -07002933 }
2934 } else {
John Zulauf14940722021-04-12 15:19:02 -06002935 if (last_write.any() && (write_tag >= start_tag)) {
John Zulauf59e25072020-07-17 10:55:21 -06002936 hazard.Set(this, usage_index, WRITE_RACING_WRITE, last_write, write_tag);
John Zulaufab7756b2020-12-29 16:10:16 -07002937 } else if (last_reads.size() > 0) {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07002938 // Any reads during the other subpass will conflict with this write, so we need to check them all.
John Zulaufab7756b2020-12-29 16:10:16 -07002939 for (const auto &read_access : last_reads) {
John Zulauf14940722021-04-12 15:19:02 -06002940 if (read_access.tag >= start_tag) {
John Zulaufab7756b2020-12-29 16:10:16 -07002941 hazard.Set(this, usage_index, WRITE_RACING_READ, read_access.access, read_access.tag);
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07002942 break;
2943 }
2944 }
John Zulauf2f952d22020-02-10 11:34:51 -07002945 }
2946 }
2947 return hazard;
2948}
2949
John Zulaufae842002021-04-15 18:20:55 -06002950HazardResult ResourceAccessState::DetectAsyncHazard(const ResourceAccessState &recorded_use, const ResourceUsageRange &tag_range,
2951 ResourceUsageTag start_tag) const {
2952 HazardResult hazard;
John Zulauf4fa68462021-04-26 21:04:22 -06002953 for (const auto &first : recorded_use.first_accesses_) {
John Zulaufae842002021-04-15 18:20:55 -06002954 // Skip and quit logic
2955 if (first.tag < tag_range.begin) continue;
2956 if (first.tag >= tag_range.end) break;
John Zulaufae842002021-04-15 18:20:55 -06002957
2958 hazard = DetectAsyncHazard(first.usage_index, start_tag);
John Zulauf4fa68462021-04-26 21:04:22 -06002959 if (hazard.hazard) {
2960 hazard.AddRecordedAccess(first);
2961 break;
2962 }
John Zulaufae842002021-04-15 18:20:55 -06002963 }
2964 return hazard;
2965}
2966
Jeremy Gebben40a22942020-12-22 14:22:06 -07002967HazardResult ResourceAccessState::DetectBarrierHazard(SyncStageAccessIndex usage_index, VkPipelineStageFlags2KHR src_exec_scope,
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002968 const SyncStageAccessFlags &src_access_scope) const {
John Zulauf0cb5be22020-01-23 12:18:22 -07002969 // Only supporting image layout transitions for now
2970 assert(usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION);
2971 HazardResult hazard;
John Zulauf361fb532020-07-22 10:45:39 -06002972 // only test for WAW if there no intervening read operations.
2973 // See DetectHazard(SyncStagetAccessIndex) above for more details.
John Zulaufab7756b2020-12-29 16:10:16 -07002974 if (last_reads.size()) {
John Zulauf355e49b2020-04-24 15:11:15 -06002975 // Look at the reads if any
John Zulaufab7756b2020-12-29 16:10:16 -07002976 for (const auto &read_access : last_reads) {
John Zulauf4a6105a2020-11-17 15:11:05 -07002977 if (read_access.IsReadBarrierHazard(src_exec_scope)) {
John Zulauf59e25072020-07-17 10:55:21 -06002978 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
John Zulauf0cb5be22020-01-23 12:18:22 -07002979 break;
2980 }
2981 }
John Zulauf4a6105a2020-11-17 15:11:05 -07002982 } else if (last_write.any() && IsWriteBarrierHazard(src_exec_scope, src_access_scope)) {
2983 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
2984 }
2985
2986 return hazard;
2987}
2988
Jeremy Gebben40a22942020-12-22 14:22:06 -07002989HazardResult ResourceAccessState::DetectBarrierHazard(SyncStageAccessIndex usage_index, VkPipelineStageFlags2KHR src_exec_scope,
John Zulauf4a6105a2020-11-17 15:11:05 -07002990 const SyncStageAccessFlags &src_access_scope,
John Zulauf14940722021-04-12 15:19:02 -06002991 const ResourceUsageTag event_tag) const {
John Zulauf4a6105a2020-11-17 15:11:05 -07002992 // Only supporting image layout transitions for now
2993 assert(usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION);
2994 HazardResult hazard;
2995 // only test for WAW if there no intervening read operations.
2996 // See DetectHazard(SyncStagetAccessIndex) above for more details.
2997
John Zulaufab7756b2020-12-29 16:10:16 -07002998 if (last_reads.size()) {
John Zulauf4a6105a2020-11-17 15:11:05 -07002999 // Look at the reads if any... if reads exist, they are either the resaon the access is in the event
3000 // first scope, or they are a hazard.
John Zulaufab7756b2020-12-29 16:10:16 -07003001 for (const auto &read_access : last_reads) {
John Zulauf14940722021-04-12 15:19:02 -06003002 if (read_access.tag < event_tag) {
John Zulauf4a6105a2020-11-17 15:11:05 -07003003 // The read is in the events first synchronization scope, so we use a barrier hazard check
3004 // If the read stage is not in the src sync scope
3005 // *AND* not execution chained with an existing sync barrier (that's the or)
3006 // then the barrier access is unsafe (R/W after R)
3007 if (read_access.IsReadBarrierHazard(src_exec_scope)) {
3008 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
3009 break;
3010 }
3011 } else {
3012 // The read not in the event first sync scope and so is a hazard vs. the layout transition
3013 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
3014 }
3015 }
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003016 } else if (last_write.any()) {
John Zulauf4a6105a2020-11-17 15:11:05 -07003017 // if there are no reads, the write is either the reason the access is in the event scope... they are a hazard
John Zulauf14940722021-04-12 15:19:02 -06003018 if (write_tag < event_tag) {
John Zulauf4a6105a2020-11-17 15:11:05 -07003019 // The write is in the first sync scope of the event (sync their aren't any reads to be the reason)
3020 // So do a normal barrier hazard check
3021 if (IsWriteBarrierHazard(src_exec_scope, src_access_scope)) {
3022 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
3023 }
3024 } else {
3025 // The write isn't in scope, and is thus a hazard to the layout transistion for wait
John Zulauf361fb532020-07-22 10:45:39 -06003026 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
3027 }
John Zulaufd14743a2020-07-03 09:42:39 -06003028 }
John Zulauf361fb532020-07-22 10:45:39 -06003029
John Zulauf0cb5be22020-01-23 12:18:22 -07003030 return hazard;
3031}
3032
John Zulauf5f13a792020-03-10 07:31:21 -06003033// The logic behind resolves is the same as update, we assume that earlier hazards have be reported, and that no
3034// tranistive hazard can exists with a hazard between the earlier operations. Yes, an early hazard can mask that another
3035// exists, but if you fix *that* hazard it either fixes or unmasks the subsequent ones.
3036void ResourceAccessState::Resolve(const ResourceAccessState &other) {
John Zulauf14940722021-04-12 15:19:02 -06003037 if (write_tag < other.write_tag) {
John Zulauf4285ee92020-09-23 10:20:52 -06003038 // If this is a later write, we've reported any exsiting hazard, and we can just overwrite as the more recent
3039 // operation
John Zulauf5f13a792020-03-10 07:31:21 -06003040 *this = other;
John Zulauf14940722021-04-12 15:19:02 -06003041 } else if (other.write_tag == write_tag) {
3042 // In the *equals* case for write operations, we merged the write barriers and the read state (but without the
John Zulauf5f13a792020-03-10 07:31:21 -06003043 // dependency chaining logic or any stage expansion)
3044 write_barriers |= other.write_barriers;
John Zulaufb02c1eb2020-10-06 16:33:36 -06003045 pending_write_barriers |= other.pending_write_barriers;
3046 pending_layout_transition |= other.pending_layout_transition;
3047 pending_write_dep_chain |= other.pending_write_dep_chain;
John Zulauf4fa68462021-04-26 21:04:22 -06003048 pending_layout_ordering_ |= other.pending_layout_ordering_;
John Zulauf5f13a792020-03-10 07:31:21 -06003049
John Zulaufd14743a2020-07-03 09:42:39 -06003050 // Merge the read states
John Zulaufab7756b2020-12-29 16:10:16 -07003051 const auto pre_merge_count = last_reads.size();
John Zulauf4285ee92020-09-23 10:20:52 -06003052 const auto pre_merge_stages = last_read_stages;
John Zulaufab7756b2020-12-29 16:10:16 -07003053 for (uint32_t other_read_index = 0; other_read_index < other.last_reads.size(); other_read_index++) {
John Zulauf5f13a792020-03-10 07:31:21 -06003054 auto &other_read = other.last_reads[other_read_index];
John Zulauf4285ee92020-09-23 10:20:52 -06003055 if (pre_merge_stages & other_read.stage) {
John Zulauf5f13a792020-03-10 07:31:21 -06003056 // Merge in the barriers for read stages that exist in *both* this and other
John Zulauf4285ee92020-09-23 10:20:52 -06003057 // TODO: This is N^2 with stages... perhaps the ReadStates should be sorted by stage index.
3058 // but we should wait on profiling data for that.
3059 for (uint32_t my_read_index = 0; my_read_index < pre_merge_count; my_read_index++) {
John Zulauf5f13a792020-03-10 07:31:21 -06003060 auto &my_read = last_reads[my_read_index];
3061 if (other_read.stage == my_read.stage) {
John Zulauf14940722021-04-12 15:19:02 -06003062 if (my_read.tag < other_read.tag) {
John Zulauf4285ee92020-09-23 10:20:52 -06003063 // Other is more recent, copy in the state
John Zulauf37ceaed2020-07-03 16:18:15 -06003064 my_read.access = other_read.access;
John Zulauf4285ee92020-09-23 10:20:52 -06003065 my_read.tag = other_read.tag;
John Zulaufb02c1eb2020-10-06 16:33:36 -06003066 my_read.pending_dep_chain = other_read.pending_dep_chain;
3067 // TODO: Phase 2 -- review the state merge logic to avoid false positive from overwriting the barriers
3068 // May require tracking more than one access per stage.
3069 my_read.barriers = other_read.barriers;
Jeremy Gebben40a22942020-12-22 14:22:06 -07003070 if (my_read.stage == VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR) {
John Zulauf4285ee92020-09-23 10:20:52 -06003071 // Since I'm overwriting the fragement stage read, also update the input attachment info
3072 // as this is the only stage that affects it.
John Zulauff51fbb62020-10-02 14:43:24 -06003073 input_attachment_read = other.input_attachment_read;
John Zulauf4285ee92020-09-23 10:20:52 -06003074 }
John Zulauf14940722021-04-12 15:19:02 -06003075 } else if (other_read.tag == my_read.tag) {
John Zulaufb02c1eb2020-10-06 16:33:36 -06003076 // The read tags match so merge the barriers
3077 my_read.barriers |= other_read.barriers;
3078 my_read.pending_dep_chain |= other_read.pending_dep_chain;
John Zulauf5f13a792020-03-10 07:31:21 -06003079 }
John Zulaufb02c1eb2020-10-06 16:33:36 -06003080
John Zulauf5f13a792020-03-10 07:31:21 -06003081 break;
3082 }
3083 }
3084 } else {
3085 // The other read stage doesn't exist in this, so add it.
John Zulaufab7756b2020-12-29 16:10:16 -07003086 last_reads.emplace_back(other_read);
John Zulauf5f13a792020-03-10 07:31:21 -06003087 last_read_stages |= other_read.stage;
Jeremy Gebben40a22942020-12-22 14:22:06 -07003088 if (other_read.stage == VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR) {
John Zulauff51fbb62020-10-02 14:43:24 -06003089 input_attachment_read = other.input_attachment_read;
John Zulauf4285ee92020-09-23 10:20:52 -06003090 }
John Zulauf5f13a792020-03-10 07:31:21 -06003091 }
3092 }
John Zulauf361fb532020-07-22 10:45:39 -06003093 read_execution_barriers |= other.read_execution_barriers;
John Zulauf4285ee92020-09-23 10:20:52 -06003094 } // the else clause would be that other write is before this write... in which case we supercede the other state and
3095 // ignore it.
John Zulauffaea0ee2021-01-14 14:01:32 -07003096
3097 // Merge first access information by making a copy of this first_access and reconstructing with a shuffle
3098 // of the copy and other into this using the update first logic.
3099 // NOTE: All sorts of additional cleverness could be put into short circuts. (for example back is write and is before front
3100 // of the other first_accesses... )
3101 if (!(first_accesses_ == other.first_accesses_) && !other.first_accesses_.empty()) {
3102 FirstAccesses firsts(std::move(first_accesses_));
3103 first_accesses_.clear();
3104 first_read_stages_ = 0U;
3105 auto a = firsts.begin();
3106 auto a_end = firsts.end();
3107 for (auto &b : other.first_accesses_) {
John Zulauf14940722021-04-12 15:19:02 -06003108 // TODO: Determine whether some tag offset will be needed for PHASE II
3109 while ((a != a_end) && (a->tag < b.tag)) {
John Zulauffaea0ee2021-01-14 14:01:32 -07003110 UpdateFirst(a->tag, a->usage_index, a->ordering_rule);
3111 ++a;
3112 }
3113 UpdateFirst(b.tag, b.usage_index, b.ordering_rule);
3114 }
3115 for (; a != a_end; ++a) {
3116 UpdateFirst(a->tag, a->usage_index, a->ordering_rule);
3117 }
3118 }
John Zulauf5f13a792020-03-10 07:31:21 -06003119}
3120
John Zulauf14940722021-04-12 15:19:02 -06003121void ResourceAccessState::Update(SyncStageAccessIndex usage_index, SyncOrdering ordering_rule, const ResourceUsageTag tag) {
John Zulauf9cb530d2019-09-30 14:14:10 -06003122 // Move this logic in the ResourceStateTracker as methods, thereof (or we'll repeat it for every flavor of resource...
3123 const auto usage_bit = FlagBit(usage_index);
John Zulauf4285ee92020-09-23 10:20:52 -06003124 if (IsRead(usage_index)) {
John Zulauf9cb530d2019-09-30 14:14:10 -06003125 // Mulitple outstanding reads may be of interest and do dependency chains independently
3126 // However, for purposes of barrier tracking, only one read per pipeline stage matters
3127 const auto usage_stage = PipelineStageBit(usage_index);
3128 if (usage_stage & last_read_stages) {
John Zulaufab7756b2020-12-29 16:10:16 -07003129 for (auto &read_access : last_reads) {
3130 if (read_access.stage == usage_stage) {
3131 read_access.Set(usage_stage, usage_bit, 0, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003132 break;
3133 }
3134 }
3135 } else {
John Zulaufab7756b2020-12-29 16:10:16 -07003136 last_reads.emplace_back(usage_stage, usage_bit, 0, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003137 last_read_stages |= usage_stage;
3138 }
John Zulauf4285ee92020-09-23 10:20:52 -06003139
3140 // Fragment shader reads come in two flavors, and we need to track if the one we're tracking is the special one.
Jeremy Gebben40a22942020-12-22 14:22:06 -07003141 if (usage_stage == VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR) {
John Zulauff51fbb62020-10-02 14:43:24 -06003142 // TODO Revisit re: multiple reads for a given stage
3143 input_attachment_read = (usage_bit == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT);
John Zulauf4285ee92020-09-23 10:20:52 -06003144 }
John Zulauf9cb530d2019-09-30 14:14:10 -06003145 } else {
3146 // Assume write
3147 // TODO determine what to do with READ-WRITE operations if any
John Zulauf89311b42020-09-29 16:28:47 -06003148 SetWrite(usage_bit, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003149 }
John Zulauffaea0ee2021-01-14 14:01:32 -07003150 UpdateFirst(tag, usage_index, ordering_rule);
John Zulauf9cb530d2019-09-30 14:14:10 -06003151}
John Zulauf5f13a792020-03-10 07:31:21 -06003152
John Zulauf89311b42020-09-29 16:28:47 -06003153// Clobber last read and all barriers... because all we have is DANGER, DANGER, WILL ROBINSON!!!
3154// if the last_reads/last_write were unsafe, we've reported them, in either case the prior access is irrelevant.
3155// We can overwrite them as *this* write is now after them.
3156//
3157// Note: intentionally ignore pending barriers and chains (i.e. don't apply or clear them), let ApplyPendingBarriers handle them.
John Zulauf14940722021-04-12 15:19:02 -06003158void ResourceAccessState::SetWrite(const SyncStageAccessFlags &usage_bit, const ResourceUsageTag tag) {
John Zulaufab7756b2020-12-29 16:10:16 -07003159 last_reads.clear();
John Zulauf89311b42020-09-29 16:28:47 -06003160 last_read_stages = 0;
3161 read_execution_barriers = 0;
John Zulauff51fbb62020-10-02 14:43:24 -06003162 input_attachment_read = false; // Denotes no outstanding input attachment read after the last write.
John Zulauf89311b42020-09-29 16:28:47 -06003163
3164 write_barriers = 0;
3165 write_dependency_chain = 0;
3166 write_tag = tag;
3167 last_write = usage_bit;
John Zulauf9cb530d2019-09-30 14:14:10 -06003168}
3169
John Zulauf89311b42020-09-29 16:28:47 -06003170// Apply the memory barrier without updating the existing barriers. The execution barrier
3171// changes the "chaining" state, but to keep barriers independent, we defer this until all barriers
3172// of the batch have been processed. Also, depending on whether layout transition happens, we'll either
3173// replace the current write barriers or add to them, so accumulate to pending as well.
3174void ResourceAccessState::ApplyBarrier(const SyncBarrier &barrier, bool layout_transition) {
3175 // For independent barriers we need to track what the new barriers and dependency chain *will* be when we're done
3176 // applying the memory barriers
John Zulauf86356ca2020-10-19 11:46:41 -06003177 // NOTE: We update the write barrier if the write is in the first access scope or if there is a layout
3178 // transistion, under the theory of "most recent access". If the read/write *isn't* safe
3179 // vs. this layout transition DetectBarrierHazard should report it. We treat the layout
3180 // transistion *as* a write and in scope with the barrier (it's before visibility).
John Zulaufc523bf62021-02-16 08:20:34 -07003181 if (layout_transition || WriteInSourceScopeOrChain(barrier.src_exec_scope.exec_scope, barrier.src_access_scope)) {
John Zulauf89311b42020-09-29 16:28:47 -06003182 pending_write_barriers |= barrier.dst_access_scope;
John Zulaufc523bf62021-02-16 08:20:34 -07003183 pending_write_dep_chain |= barrier.dst_exec_scope.exec_scope;
John Zulauf4fa68462021-04-26 21:04:22 -06003184 if (layout_transition) {
3185 pending_layout_ordering_ |= OrderingBarrier(barrier.src_exec_scope.exec_scope, barrier.src_access_scope);
3186 }
John Zulaufa0a98292020-09-18 09:30:10 -06003187 }
John Zulauf89311b42020-09-29 16:28:47 -06003188 // Track layout transistion as pending as we can't modify last_write until all barriers processed
3189 pending_layout_transition |= layout_transition;
John Zulaufa0a98292020-09-18 09:30:10 -06003190
John Zulauf89311b42020-09-29 16:28:47 -06003191 if (!pending_layout_transition) {
3192 // Once we're dealing with a layout transition (which is modelled as a *write*) then the last reads/writes/chains
3193 // don't need to be tracked as we're just going to zero them.
John Zulaufab7756b2020-12-29 16:10:16 -07003194 for (auto &read_access : last_reads) {
John Zulauf89311b42020-09-29 16:28:47 -06003195 // The | implements the "dependency chain" logic for this access, as the barriers field stores the second sync scope
John Zulaufc523bf62021-02-16 08:20:34 -07003196 if (barrier.src_exec_scope.exec_scope & (read_access.stage | read_access.barriers)) {
3197 read_access.pending_dep_chain |= barrier.dst_exec_scope.exec_scope;
John Zulaufa0a98292020-09-18 09:30:10 -06003198 }
3199 }
John Zulaufa0a98292020-09-18 09:30:10 -06003200 }
John Zulaufa0a98292020-09-18 09:30:10 -06003201}
3202
John Zulauf4a6105a2020-11-17 15:11:05 -07003203// Apply the tag scoped memory barrier without updating the existing barriers. The execution barrier
3204// changes the "chaining" state, but to keep barriers independent. See discussion above.
John Zulauf14940722021-04-12 15:19:02 -06003205void ResourceAccessState::ApplyBarrier(const ResourceUsageTag scope_tag, const SyncBarrier &barrier, bool layout_transition) {
John Zulauf4a6105a2020-11-17 15:11:05 -07003206 // The scope logic for events is, if we're here, the resource usage was flagged as "in the first execution scope" at
3207 // the time of the SetEvent, thus all we need check is whether the access is the same one (i.e. before the scope tag
3208 // in order to know if it's in the excecution scope
3209 // Notice that the layout transition sets the pending barriers *regardless*, as any lack of src_access_scope to
3210 // guard against the layout transition should be reported in the detect barrier hazard phase, and we only report
3211 // errors w.r.t. "most recent" accesses.
John Zulauf14940722021-04-12 15:19:02 -06003212 if (layout_transition || ((write_tag < scope_tag) && (barrier.src_access_scope & last_write).any())) {
John Zulauf4a6105a2020-11-17 15:11:05 -07003213 pending_write_barriers |= barrier.dst_access_scope;
John Zulaufc523bf62021-02-16 08:20:34 -07003214 pending_write_dep_chain |= barrier.dst_exec_scope.exec_scope;
John Zulauf4fa68462021-04-26 21:04:22 -06003215 if (layout_transition) {
3216 pending_layout_ordering_ |= OrderingBarrier(barrier.src_exec_scope.exec_scope, barrier.src_access_scope);
3217 }
John Zulauf4a6105a2020-11-17 15:11:05 -07003218 }
3219 // Track layout transistion as pending as we can't modify last_write until all barriers processed
3220 pending_layout_transition |= layout_transition;
3221
3222 if (!pending_layout_transition) {
3223 // Once we're dealing with a layout transition (which is modelled as a *write*) then the last reads/writes/chains
3224 // don't need to be tracked as we're just going to zero them.
John Zulaufab7756b2020-12-29 16:10:16 -07003225 for (auto &read_access : last_reads) {
John Zulauf4a6105a2020-11-17 15:11:05 -07003226 // If this read is the same one we included in the set event and in scope, then apply the execution barrier...
3227 // NOTE: That's not really correct... this read stage might *not* have been included in the setevent, and the barriers
3228 // representing the chain might have changed since then (that would be an odd usage), so as a first approximation
3229 // we'll assume the barriers *haven't* been changed since (if the tag hasn't), and while this could be a false
3230 // positive in the case of Set; SomeBarrier; Wait; we'll live with it until we can add more state to the first scope
3231 // capture (the specific write and read stages that *were* in scope at the moment of SetEvents.
3232 // TODO: eliminate the false positive by including write/read-stages "in scope" information in SetEvents first_scope
John Zulauf14940722021-04-12 15:19:02 -06003233 if ((read_access.tag < scope_tag) && (barrier.src_exec_scope.exec_scope & (read_access.stage | read_access.barriers))) {
John Zulaufc523bf62021-02-16 08:20:34 -07003234 read_access.pending_dep_chain |= barrier.dst_exec_scope.exec_scope;
John Zulauf4a6105a2020-11-17 15:11:05 -07003235 }
3236 }
3237 }
3238}
John Zulauf14940722021-04-12 15:19:02 -06003239void ResourceAccessState::ApplyPendingBarriers(const ResourceUsageTag tag) {
John Zulauf89311b42020-09-29 16:28:47 -06003240 if (pending_layout_transition) {
John Zulauf4fa68462021-04-26 21:04:22 -06003241 // SetWrite clobbers the last_reads array, and thus we don't have to clear the read_state out.
John Zulauf89311b42020-09-29 16:28:47 -06003242 SetWrite(SYNC_IMAGE_LAYOUT_TRANSITION_BIT, tag); // Side effect notes below
John Zulauffaea0ee2021-01-14 14:01:32 -07003243 UpdateFirst(tag, SYNC_IMAGE_LAYOUT_TRANSITION, SyncOrdering::kNonAttachment);
John Zulauf4fa68462021-04-26 21:04:22 -06003244 TouchupFirstForLayoutTransition(tag, pending_layout_ordering_);
3245 pending_layout_ordering_ = OrderingBarrier();
John Zulauf89311b42020-09-29 16:28:47 -06003246 pending_layout_transition = false;
John Zulauf9cb530d2019-09-30 14:14:10 -06003247 }
John Zulauf89311b42020-09-29 16:28:47 -06003248
3249 // Apply the accumulate execution barriers (and thus update chaining information)
John Zulauf4fa68462021-04-26 21:04:22 -06003250 // for layout transition, last_reads is reset by SetWrite, so this will be skipped.
John Zulaufab7756b2020-12-29 16:10:16 -07003251 for (auto &read_access : last_reads) {
3252 read_access.barriers |= read_access.pending_dep_chain;
3253 read_execution_barriers |= read_access.barriers;
3254 read_access.pending_dep_chain = 0;
John Zulauf89311b42020-09-29 16:28:47 -06003255 }
3256
3257 // We OR in the accumulated write chain and barriers even in the case of a layout transition as SetWrite zeros them.
3258 write_dependency_chain |= pending_write_dep_chain;
3259 write_barriers |= pending_write_barriers;
3260 pending_write_dep_chain = 0;
3261 pending_write_barriers = 0;
John Zulauf9cb530d2019-09-30 14:14:10 -06003262}
3263
John Zulaufae842002021-04-15 18:20:55 -06003264bool ResourceAccessState::FirstAccessInTagRange(const ResourceUsageRange &tag_range) const {
3265 if (!first_accesses_.size()) return false;
3266 const ResourceUsageRange first_access_range = {first_accesses_.front().tag, first_accesses_.back().tag + 1};
3267 return tag_range.intersects(first_access_range);
3268}
3269
John Zulauf59e25072020-07-17 10:55:21 -06003270// This should be just Bits or Index, but we don't have an invalid state for Index
Jeremy Gebben40a22942020-12-22 14:22:06 -07003271VkPipelineStageFlags2KHR ResourceAccessState::GetReadBarriers(const SyncStageAccessFlags &usage_bit) const {
3272 VkPipelineStageFlags2KHR barriers = 0U;
John Zulauf4285ee92020-09-23 10:20:52 -06003273
John Zulaufab7756b2020-12-29 16:10:16 -07003274 for (const auto &read_access : last_reads) {
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003275 if ((read_access.access & usage_bit).any()) {
John Zulauf4285ee92020-09-23 10:20:52 -06003276 barriers = read_access.barriers;
3277 break;
John Zulauf59e25072020-07-17 10:55:21 -06003278 }
3279 }
John Zulauf4285ee92020-09-23 10:20:52 -06003280
John Zulauf59e25072020-07-17 10:55:21 -06003281 return barriers;
3282}
3283
Jeremy Gebben40a22942020-12-22 14:22:06 -07003284inline bool ResourceAccessState::IsRAWHazard(VkPipelineStageFlags2KHR usage_stage, const SyncStageAccessFlags &usage) const {
John Zulauf4285ee92020-09-23 10:20:52 -06003285 assert(IsRead(usage));
3286 // Only RAW vs. last_write if it doesn't happen-after any other read because either:
3287 // * the previous reads are not hazards, and thus last_write must be visible and available to
3288 // any reads that happen after.
3289 // * the previous reads *are* hazards to last_write, have been reported, and if that hazard is fixed
3290 // the current read will be also not be a hazard, thus reporting a hazard here adds no needed information.
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003291 return last_write.any() && (0 == (read_execution_barriers & usage_stage)) && IsWriteHazard(usage);
John Zulauf4285ee92020-09-23 10:20:52 -06003292}
3293
Jeremy Gebben40a22942020-12-22 14:22:06 -07003294VkPipelineStageFlags2KHR ResourceAccessState::GetOrderedStages(const OrderingBarrier &ordering) const {
John Zulauf4285ee92020-09-23 10:20:52 -06003295 // Whether the stage are in the ordering scope only matters if the current write is ordered
Jeremy Gebben40a22942020-12-22 14:22:06 -07003296 VkPipelineStageFlags2KHR ordered_stages = last_read_stages & ordering.exec_scope;
John Zulauf4285ee92020-09-23 10:20:52 -06003297 // Special input attachment handling as always (not encoded in exec_scop)
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003298 const bool input_attachment_ordering = (ordering.access_scope & SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT).any();
John Zulauff51fbb62020-10-02 14:43:24 -06003299 if (input_attachment_ordering && input_attachment_read) {
John Zulauf4285ee92020-09-23 10:20:52 -06003300 // If we have an input attachment in last_reads and input attachments are ordered we all that stage
Jeremy Gebben40a22942020-12-22 14:22:06 -07003301 ordered_stages |= VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR;
John Zulauf4285ee92020-09-23 10:20:52 -06003302 }
3303
3304 return ordered_stages;
3305}
3306
John Zulauf14940722021-04-12 15:19:02 -06003307void ResourceAccessState::UpdateFirst(const ResourceUsageTag tag, SyncStageAccessIndex usage_index, SyncOrdering ordering_rule) {
John Zulauffaea0ee2021-01-14 14:01:32 -07003308 // Only record until we record a write.
3309 if (first_accesses_.empty() || IsRead(first_accesses_.back().usage_index)) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003310 const VkPipelineStageFlags2KHR usage_stage = IsRead(usage_index) ? PipelineStageBit(usage_index) : 0U;
John Zulauffaea0ee2021-01-14 14:01:32 -07003311 if (0 == (usage_stage & first_read_stages_)) {
3312 // If this is a read we haven't seen or a write, record.
John Zulauf4fa68462021-04-26 21:04:22 -06003313 // We always need to know what stages were found prior to write
John Zulauffaea0ee2021-01-14 14:01:32 -07003314 first_read_stages_ |= usage_stage;
John Zulauf4fa68462021-04-26 21:04:22 -06003315 if (0 == (read_execution_barriers & usage_stage)) {
3316 // If this stage isn't masked then we add it (since writes map to usage_stage 0, this also records writes)
3317 first_accesses_.emplace_back(tag, usage_index, ordering_rule);
3318 }
John Zulauffaea0ee2021-01-14 14:01:32 -07003319 }
3320 }
3321}
3322
John Zulauf4fa68462021-04-26 21:04:22 -06003323void ResourceAccessState::TouchupFirstForLayoutTransition(ResourceUsageTag tag, const OrderingBarrier &layout_ordering) {
3324 // Only call this after recording an image layout transition
3325 assert(first_accesses_.size());
3326 if (first_accesses_.back().tag == tag) {
3327 // If this layout transition is the the first write, add the additional ordering rules that guard the ILT
3328 assert(first_accesses_.back().usage_index = SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION);
3329 first_write_layout_ordering_ = layout_ordering;
3330 }
3331}
3332
John Zulaufd1f85d42020-04-15 12:23:15 -06003333void SyncValidator::ResetCommandBufferCallback(VkCommandBuffer command_buffer) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003334 auto *access_context = GetAccessContextNoInsert(command_buffer);
3335 if (access_context) {
3336 access_context->Reset();
John Zulauf9cb530d2019-09-30 14:14:10 -06003337 }
3338}
3339
John Zulaufd1f85d42020-04-15 12:23:15 -06003340void SyncValidator::FreeCommandBufferCallback(VkCommandBuffer command_buffer) {
3341 auto access_found = cb_access_state.find(command_buffer);
3342 if (access_found != cb_access_state.end()) {
3343 access_found->second->Reset();
John Zulauf4fa68462021-04-26 21:04:22 -06003344 access_found->second->MarkDestroyed();
John Zulaufd1f85d42020-04-15 12:23:15 -06003345 cb_access_state.erase(access_found);
3346 }
3347}
3348
John Zulauf9cb530d2019-09-30 14:14:10 -06003349bool SyncValidator::PreCallValidateCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
3350 uint32_t regionCount, const VkBufferCopy *pRegions) const {
3351 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -06003352 const auto *cb_context = GetAccessContext(commandBuffer);
3353 assert(cb_context);
3354 if (!cb_context) return skip;
3355 const auto *context = cb_context->GetCurrentAccessContext();
John Zulauf9cb530d2019-09-30 14:14:10 -06003356
John Zulauf3d84f1b2020-03-09 13:33:25 -06003357 // If we have no previous accesses, we have no hazards
John Zulauf3d84f1b2020-03-09 13:33:25 -06003358 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003359 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003360
3361 for (uint32_t region = 0; region < regionCount; region++) {
3362 const auto &copy_region = pRegions[region];
John Zulauf16adfc92020-04-08 10:28:33 -06003363 if (src_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06003364 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003365 auto hazard = context->DetectHazard(*src_buffer, SYNC_COPY_TRANSFER_READ, src_range);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003366 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003367 skip |= LogError(srcBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003368 "vkCmdCopyBuffer: Hazard %s for srcBuffer %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06003369 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcBuffer).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003370 cb_context->FormatUsage(hazard).c_str());
John Zulauf9cb530d2019-09-30 14:14:10 -06003371 }
John Zulauf9cb530d2019-09-30 14:14:10 -06003372 }
John Zulauf16adfc92020-04-08 10:28:33 -06003373 if (dst_buffer && !skip) {
John Zulauf3e86bf02020-09-12 10:47:57 -06003374 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003375 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, dst_range);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003376 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003377 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003378 "vkCmdCopyBuffer: Hazard %s for dstBuffer %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06003379 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstBuffer).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003380 cb_context->FormatUsage(hazard).c_str());
John Zulauf3d84f1b2020-03-09 13:33:25 -06003381 }
3382 }
3383 if (skip) break;
John Zulauf9cb530d2019-09-30 14:14:10 -06003384 }
3385 return skip;
3386}
3387
3388void SyncValidator::PreCallRecordCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
3389 uint32_t regionCount, const VkBufferCopy *pRegions) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003390 auto *cb_context = GetAccessContext(commandBuffer);
3391 assert(cb_context);
John Zulauf2b151bf2020-04-24 15:37:44 -06003392 const auto tag = cb_context->NextCommandTag(CMD_COPYBUFFER);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003393 auto *context = cb_context->GetCurrentAccessContext();
3394
John Zulauf9cb530d2019-09-30 14:14:10 -06003395 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
John Zulauf9cb530d2019-09-30 14:14:10 -06003396 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
John Zulauf9cb530d2019-09-30 14:14:10 -06003397
3398 for (uint32_t region = 0; region < regionCount; region++) {
3399 const auto &copy_region = pRegions[region];
John Zulauf16adfc92020-04-08 10:28:33 -06003400 if (src_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06003401 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003402 context->UpdateAccessState(*src_buffer, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment, src_range, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003403 }
John Zulauf16adfc92020-04-08 10:28:33 -06003404 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06003405 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003406 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, dst_range, tag);
John Zulauf5c5e88d2019-12-26 11:22:02 -07003407 }
3408 }
3409}
3410
John Zulauf4a6105a2020-11-17 15:11:05 -07003411void SyncValidator::PreCallRecordDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
3412 // Clear out events from the command buffer contexts
3413 for (auto &cb_context : cb_access_state) {
3414 cb_context.second->RecordDestroyEvent(event);
3415 }
3416}
3417
Jeff Leger178b1e52020-10-05 12:22:23 -04003418bool SyncValidator::PreCallValidateCmdCopyBuffer2KHR(VkCommandBuffer commandBuffer,
3419 const VkCopyBufferInfo2KHR *pCopyBufferInfos) const {
3420 bool skip = false;
3421 const auto *cb_context = GetAccessContext(commandBuffer);
3422 assert(cb_context);
3423 if (!cb_context) return skip;
3424 const auto *context = cb_context->GetCurrentAccessContext();
3425
3426 // If we have no previous accesses, we have no hazards
3427 const auto *src_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->srcBuffer);
3428 const auto *dst_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->dstBuffer);
3429
3430 for (uint32_t region = 0; region < pCopyBufferInfos->regionCount; region++) {
3431 const auto &copy_region = pCopyBufferInfos->pRegions[region];
3432 if (src_buffer) {
3433 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003434 auto hazard = context->DetectHazard(*src_buffer, SYNC_COPY_TRANSFER_READ, src_range);
Jeff Leger178b1e52020-10-05 12:22:23 -04003435 if (hazard.hazard) {
3436 // TODO -- add tag information to log msg when useful.
3437 skip |= LogError(pCopyBufferInfos->srcBuffer, string_SyncHazardVUID(hazard.hazard),
3438 "vkCmdCopyBuffer2KHR(): Hazard %s for srcBuffer %s, region %" PRIu32 ". Access info %s.",
3439 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyBufferInfos->srcBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07003440 region, cb_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04003441 }
3442 }
3443 if (dst_buffer && !skip) {
3444 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003445 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, dst_range);
Jeff Leger178b1e52020-10-05 12:22:23 -04003446 if (hazard.hazard) {
3447 skip |= LogError(pCopyBufferInfos->dstBuffer, string_SyncHazardVUID(hazard.hazard),
3448 "vkCmdCopyBuffer2KHR(): Hazard %s for dstBuffer %s, region %" PRIu32 ". Access info %s.",
3449 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyBufferInfos->dstBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07003450 region, cb_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04003451 }
3452 }
3453 if (skip) break;
3454 }
3455 return skip;
3456}
3457
3458void SyncValidator::PreCallRecordCmdCopyBuffer2KHR(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2KHR *pCopyBufferInfos) {
3459 auto *cb_context = GetAccessContext(commandBuffer);
3460 assert(cb_context);
3461 const auto tag = cb_context->NextCommandTag(CMD_COPYBUFFER2KHR);
3462 auto *context = cb_context->GetCurrentAccessContext();
3463
3464 const auto *src_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->srcBuffer);
3465 const auto *dst_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->dstBuffer);
3466
3467 for (uint32_t region = 0; region < pCopyBufferInfos->regionCount; region++) {
3468 const auto &copy_region = pCopyBufferInfos->pRegions[region];
3469 if (src_buffer) {
3470 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003471 context->UpdateAccessState(*src_buffer, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment, src_range, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04003472 }
3473 if (dst_buffer) {
3474 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003475 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, dst_range, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04003476 }
3477 }
3478}
3479
John Zulauf5c5e88d2019-12-26 11:22:02 -07003480bool SyncValidator::PreCallValidateCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
3481 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
3482 const VkImageCopy *pRegions) const {
3483 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -06003484 const auto *cb_access_context = GetAccessContext(commandBuffer);
3485 assert(cb_access_context);
3486 if (!cb_access_context) return skip;
John Zulauf5c5e88d2019-12-26 11:22:02 -07003487
John Zulauf3d84f1b2020-03-09 13:33:25 -06003488 const auto *context = cb_access_context->GetCurrentAccessContext();
3489 assert(context);
3490 if (!context) return skip;
3491
3492 const auto *src_image = Get<IMAGE_STATE>(srcImage);
3493 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003494 for (uint32_t region = 0; region < regionCount; region++) {
3495 const auto &copy_region = pRegions[region];
3496 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003497 auto hazard = context->DetectHazard(*src_image, SYNC_COPY_TRANSFER_READ, copy_region.srcSubresource,
John Zulauf3d84f1b2020-03-09 13:33:25 -06003498 copy_region.srcOffset, copy_region.extent);
3499 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003500 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003501 "vkCmdCopyImage: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06003502 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003503 cb_access_context->FormatUsage(hazard).c_str());
John Zulauf5c5e88d2019-12-26 11:22:02 -07003504 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06003505 }
3506
3507 if (dst_image) {
locke-lunarg1df1f882020-03-02 16:42:08 -07003508 VkExtent3D dst_copy_extent =
3509 GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003510 auto hazard = context->DetectHazard(*dst_image, SYNC_COPY_TRANSFER_WRITE, copy_region.dstSubresource,
locke-lunarg1df1f882020-03-02 16:42:08 -07003511 copy_region.dstOffset, dst_copy_extent);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003512 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003513 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003514 "vkCmdCopyImage: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06003515 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003516 cb_access_context->FormatUsage(hazard).c_str());
John Zulauf5c5e88d2019-12-26 11:22:02 -07003517 }
locke-lunarg1dbbb9e2020-02-28 22:43:53 -07003518 if (skip) break;
John Zulauf5c5e88d2019-12-26 11:22:02 -07003519 }
3520 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06003521
John Zulauf5c5e88d2019-12-26 11:22:02 -07003522 return skip;
3523}
3524
3525void SyncValidator::PreCallRecordCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
3526 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
3527 const VkImageCopy *pRegions) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003528 auto *cb_access_context = GetAccessContext(commandBuffer);
3529 assert(cb_access_context);
John Zulauf2b151bf2020-04-24 15:37:44 -06003530 const auto tag = cb_access_context->NextCommandTag(CMD_COPYIMAGE);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003531 auto *context = cb_access_context->GetCurrentAccessContext();
3532 assert(context);
3533
John Zulauf5c5e88d2019-12-26 11:22:02 -07003534 auto *src_image = Get<IMAGE_STATE>(srcImage);
John Zulauf5c5e88d2019-12-26 11:22:02 -07003535 auto *dst_image = Get<IMAGE_STATE>(dstImage);
John Zulauf5c5e88d2019-12-26 11:22:02 -07003536
3537 for (uint32_t region = 0; region < regionCount; region++) {
3538 const auto &copy_region = pRegions[region];
John Zulauf3d84f1b2020-03-09 13:33:25 -06003539 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003540 context->UpdateAccessState(*src_image, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07003541 copy_region.srcSubresource, copy_region.srcOffset, copy_region.extent, tag);
John Zulauf5c5e88d2019-12-26 11:22:02 -07003542 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06003543 if (dst_image) {
locke-lunarg1df1f882020-03-02 16:42:08 -07003544 VkExtent3D dst_copy_extent =
3545 GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003546 context->UpdateAccessState(*dst_image, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07003547 copy_region.dstSubresource, copy_region.dstOffset, dst_copy_extent, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003548 }
3549 }
3550}
3551
Jeff Leger178b1e52020-10-05 12:22:23 -04003552bool SyncValidator::PreCallValidateCmdCopyImage2KHR(VkCommandBuffer commandBuffer,
3553 const VkCopyImageInfo2KHR *pCopyImageInfo) const {
3554 bool skip = false;
3555 const auto *cb_access_context = GetAccessContext(commandBuffer);
3556 assert(cb_access_context);
3557 if (!cb_access_context) return skip;
3558
3559 const auto *context = cb_access_context->GetCurrentAccessContext();
3560 assert(context);
3561 if (!context) return skip;
3562
3563 const auto *src_image = Get<IMAGE_STATE>(pCopyImageInfo->srcImage);
3564 const auto *dst_image = Get<IMAGE_STATE>(pCopyImageInfo->dstImage);
3565 for (uint32_t region = 0; region < pCopyImageInfo->regionCount; region++) {
3566 const auto &copy_region = pCopyImageInfo->pRegions[region];
3567 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003568 auto hazard = context->DetectHazard(*src_image, SYNC_COPY_TRANSFER_READ, copy_region.srcSubresource,
Jeff Leger178b1e52020-10-05 12:22:23 -04003569 copy_region.srcOffset, copy_region.extent);
3570 if (hazard.hazard) {
3571 skip |= LogError(pCopyImageInfo->srcImage, string_SyncHazardVUID(hazard.hazard),
3572 "vkCmdCopyImage2KHR: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.",
3573 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyImageInfo->srcImage).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07003574 region, cb_access_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04003575 }
3576 }
3577
3578 if (dst_image) {
3579 VkExtent3D dst_copy_extent =
3580 GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003581 auto hazard = context->DetectHazard(*dst_image, SYNC_COPY_TRANSFER_WRITE, copy_region.dstSubresource,
Jeff Leger178b1e52020-10-05 12:22:23 -04003582 copy_region.dstOffset, dst_copy_extent);
3583 if (hazard.hazard) {
3584 skip |= LogError(pCopyImageInfo->dstImage, string_SyncHazardVUID(hazard.hazard),
3585 "vkCmdCopyImage2KHR: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.",
3586 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyImageInfo->dstImage).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07003587 region, cb_access_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04003588 }
3589 if (skip) break;
3590 }
3591 }
3592
3593 return skip;
3594}
3595
3596void SyncValidator::PreCallRecordCmdCopyImage2KHR(VkCommandBuffer commandBuffer, const VkCopyImageInfo2KHR *pCopyImageInfo) {
3597 auto *cb_access_context = GetAccessContext(commandBuffer);
3598 assert(cb_access_context);
3599 const auto tag = cb_access_context->NextCommandTag(CMD_COPYIMAGE2KHR);
3600 auto *context = cb_access_context->GetCurrentAccessContext();
3601 assert(context);
3602
3603 auto *src_image = Get<IMAGE_STATE>(pCopyImageInfo->srcImage);
3604 auto *dst_image = Get<IMAGE_STATE>(pCopyImageInfo->dstImage);
3605
3606 for (uint32_t region = 0; region < pCopyImageInfo->regionCount; region++) {
3607 const auto &copy_region = pCopyImageInfo->pRegions[region];
3608 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003609 context->UpdateAccessState(*src_image, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07003610 copy_region.srcSubresource, copy_region.srcOffset, copy_region.extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04003611 }
3612 if (dst_image) {
3613 VkExtent3D dst_copy_extent =
3614 GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003615 context->UpdateAccessState(*dst_image, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07003616 copy_region.dstSubresource, copy_region.dstOffset, dst_copy_extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04003617 }
3618 }
3619}
3620
John Zulauf9cb530d2019-09-30 14:14:10 -06003621bool SyncValidator::PreCallValidateCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
3622 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
3623 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
3624 uint32_t bufferMemoryBarrierCount,
3625 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
3626 uint32_t imageMemoryBarrierCount,
3627 const VkImageMemoryBarrier *pImageMemoryBarriers) const {
3628 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -06003629 const auto *cb_access_context = GetAccessContext(commandBuffer);
3630 assert(cb_access_context);
3631 if (!cb_access_context) return skip;
John Zulauf0cb5be22020-01-23 12:18:22 -07003632
John Zulauf36ef9282021-02-02 11:47:24 -07003633 SyncOpPipelineBarrier pipeline_barrier(CMD_PIPELINEBARRIER, *this, cb_access_context->GetQueueFlags(), srcStageMask,
3634 dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers,
3635 bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
3636 pImageMemoryBarriers);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07003637 skip = pipeline_barrier.Validate(*cb_access_context);
John Zulauf9cb530d2019-09-30 14:14:10 -06003638 return skip;
3639}
3640
3641void SyncValidator::PreCallRecordCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
3642 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
3643 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
3644 uint32_t bufferMemoryBarrierCount,
3645 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
3646 uint32_t imageMemoryBarrierCount,
3647 const VkImageMemoryBarrier *pImageMemoryBarriers) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003648 auto *cb_access_context = GetAccessContext(commandBuffer);
3649 assert(cb_access_context);
3650 if (!cb_access_context) return;
John Zulauf9cb530d2019-09-30 14:14:10 -06003651
John Zulauf8eda1562021-04-13 17:06:41 -06003652 CommandBufferAccessContext::SyncOpPointer sync_op(
3653 new SyncOpPipelineBarrier(CMD_PIPELINEBARRIER, *this, cb_access_context->GetQueueFlags(), srcStageMask, dstStageMask,
3654 dependencyFlags, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
3655 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers));
3656 const auto tag = sync_op->Record(cb_access_context);
3657 cb_access_context->AddSyncOp(tag, std::move(sync_op));
John Zulauf9cb530d2019-09-30 14:14:10 -06003658}
3659
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07003660bool SyncValidator::PreCallValidateCmdPipelineBarrier2KHR(VkCommandBuffer commandBuffer,
3661 const VkDependencyInfoKHR *pDependencyInfo) const {
3662 bool skip = false;
3663 const auto *cb_access_context = GetAccessContext(commandBuffer);
3664 assert(cb_access_context);
3665 if (!cb_access_context) return skip;
3666
3667 SyncOpPipelineBarrier pipeline_barrier(CMD_PIPELINEBARRIER2KHR, *this, cb_access_context->GetQueueFlags(), *pDependencyInfo);
3668 skip = pipeline_barrier.Validate(*cb_access_context);
3669 return skip;
3670}
3671
3672void SyncValidator::PreCallRecordCmdPipelineBarrier2KHR(VkCommandBuffer commandBuffer, const VkDependencyInfoKHR *pDependencyInfo) {
3673 auto *cb_access_context = GetAccessContext(commandBuffer);
3674 assert(cb_access_context);
3675 if (!cb_access_context) return;
3676
John Zulauf8eda1562021-04-13 17:06:41 -06003677 CommandBufferAccessContext::SyncOpPointer sync_op(
3678 new SyncOpPipelineBarrier(CMD_PIPELINEBARRIER2KHR, *this, cb_access_context->GetQueueFlags(), *pDependencyInfo));
3679 const auto tag = sync_op->Record(cb_access_context);
3680 cb_access_context->AddSyncOp(tag, std::move(sync_op));
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07003681}
3682
John Zulauf9cb530d2019-09-30 14:14:10 -06003683void SyncValidator::PostCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
3684 const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, VkResult result) {
3685 // The state tracker sets up the device state
3686 StateTracker::PostCallRecordCreateDevice(gpu, pCreateInfo, pAllocator, pDevice, result);
3687
John Zulauf5f13a792020-03-10 07:31:21 -06003688 // Add the callback hooks for the functions that are either broadly or deeply used and that the ValidationStateTracker
3689 // refactor would be messier without.
John Zulauf9cb530d2019-09-30 14:14:10 -06003690 // TODO: Find a good way to do this hooklessly.
3691 ValidationObject *device_object = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
3692 ValidationObject *validation_data = GetValidationObject(device_object->object_dispatch, LayerObjectTypeSyncValidation);
3693 SyncValidator *sync_device_state = static_cast<SyncValidator *>(validation_data);
3694
John Zulaufd1f85d42020-04-15 12:23:15 -06003695 sync_device_state->SetCommandBufferResetCallback([sync_device_state](VkCommandBuffer command_buffer) -> void {
3696 sync_device_state->ResetCommandBufferCallback(command_buffer);
3697 });
3698 sync_device_state->SetCommandBufferFreeCallback([sync_device_state](VkCommandBuffer command_buffer) -> void {
3699 sync_device_state->FreeCommandBufferCallback(command_buffer);
3700 });
John Zulauf9cb530d2019-09-30 14:14:10 -06003701}
John Zulauf3d84f1b2020-03-09 13:33:25 -06003702
John Zulauf355e49b2020-04-24 15:11:15 -06003703bool SyncValidator::ValidateBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
sfricke-samsung85584a72021-09-30 21:43:38 -07003704 const VkSubpassBeginInfo *pSubpassBeginInfo, CMD_TYPE cmd) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003705 bool skip = false;
John Zulauf355e49b2020-04-24 15:11:15 -06003706 auto cb_context = GetAccessContext(commandBuffer);
John Zulauf64ffe552021-02-06 10:25:07 -07003707 if (cb_context) {
sfricke-samsung85584a72021-09-30 21:43:38 -07003708 SyncOpBeginRenderPass sync_op(cmd, *this, pRenderPassBegin, pSubpassBeginInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07003709 skip = sync_op.Validate(*cb_context);
John Zulauf355e49b2020-04-24 15:11:15 -06003710 }
John Zulauf355e49b2020-04-24 15:11:15 -06003711 return skip;
3712}
3713
3714bool SyncValidator::PreCallValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
3715 VkSubpassContents contents) const {
3716 bool skip = StateTracker::PreCallValidateCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07003717 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf355e49b2020-04-24 15:11:15 -06003718 subpass_begin_info.contents = contents;
John Zulauf64ffe552021-02-06 10:25:07 -07003719 skip |= ValidateBeginRenderPass(commandBuffer, pRenderPassBegin, &subpass_begin_info, CMD_BEGINRENDERPASS);
John Zulauf355e49b2020-04-24 15:11:15 -06003720 return skip;
3721}
3722
3723bool SyncValidator::PreCallValidateCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
Mike Schuchardt2df08912020-12-15 16:28:09 -08003724 const VkSubpassBeginInfo *pSubpassBeginInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003725 bool skip = StateTracker::PreCallValidateCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07003726 skip |= ValidateBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2);
John Zulauf355e49b2020-04-24 15:11:15 -06003727 return skip;
3728}
3729
3730bool SyncValidator::PreCallValidateCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
3731 const VkRenderPassBeginInfo *pRenderPassBegin,
Mike Schuchardt2df08912020-12-15 16:28:09 -08003732 const VkSubpassBeginInfo *pSubpassBeginInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003733 bool skip = StateTracker::PreCallValidateCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
sfricke-samsung85584a72021-09-30 21:43:38 -07003734 skip |= ValidateBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2KHR);
John Zulauf355e49b2020-04-24 15:11:15 -06003735 return skip;
3736}
3737
John Zulauf3d84f1b2020-03-09 13:33:25 -06003738void SyncValidator::PostCallRecordBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo,
3739 VkResult result) {
3740 // The state tracker sets up the command buffer state
3741 StateTracker::PostCallRecordBeginCommandBuffer(commandBuffer, pBeginInfo, result);
3742
3743 // Create/initialize the structure that trackers accesses at the command buffer scope.
3744 auto cb_access_context = GetAccessContext(commandBuffer);
3745 assert(cb_access_context);
3746 cb_access_context->Reset();
3747}
3748
3749void SyncValidator::RecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
sfricke-samsung85584a72021-09-30 21:43:38 -07003750 const VkSubpassBeginInfo *pSubpassBeginInfo, CMD_TYPE cmd) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003751 auto cb_context = GetAccessContext(commandBuffer);
John Zulauf355e49b2020-04-24 15:11:15 -06003752 if (cb_context) {
sfricke-samsung85584a72021-09-30 21:43:38 -07003753 SyncOpBeginRenderPass sync_op(cmd, *this, pRenderPassBegin, pSubpassBeginInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07003754 sync_op.Record(cb_context);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003755 }
3756}
3757
3758void SyncValidator::PostCallRecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
3759 VkSubpassContents contents) {
3760 StateTracker::PostCallRecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07003761 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf3d84f1b2020-03-09 13:33:25 -06003762 subpass_begin_info.contents = contents;
John Zulauf355e49b2020-04-24 15:11:15 -06003763 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, &subpass_begin_info, CMD_BEGINRENDERPASS);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003764}
3765
3766void SyncValidator::PostCallRecordCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
3767 const VkSubpassBeginInfo *pSubpassBeginInfo) {
3768 StateTracker::PostCallRecordCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
John Zulauf355e49b2020-04-24 15:11:15 -06003769 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003770}
3771
3772void SyncValidator::PostCallRecordCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
3773 const VkRenderPassBeginInfo *pRenderPassBegin,
3774 const VkSubpassBeginInfo *pSubpassBeginInfo) {
3775 StateTracker::PostCallRecordCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
sfricke-samsung85584a72021-09-30 21:43:38 -07003776 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2KHR);
John Zulauf355e49b2020-04-24 15:11:15 -06003777}
3778
Mike Schuchardt2df08912020-12-15 16:28:09 -08003779bool SyncValidator::ValidateCmdNextSubpass(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
sfricke-samsung85584a72021-09-30 21:43:38 -07003780 const VkSubpassEndInfo *pSubpassEndInfo, CMD_TYPE cmd) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003781 bool skip = false;
3782
3783 auto cb_context = GetAccessContext(commandBuffer);
3784 assert(cb_context);
John Zulauf64ffe552021-02-06 10:25:07 -07003785 if (!cb_context) return skip;
sfricke-samsung85584a72021-09-30 21:43:38 -07003786 SyncOpNextSubpass sync_op(cmd, *this, pSubpassBeginInfo, pSubpassEndInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07003787 return sync_op.Validate(*cb_context);
John Zulauf355e49b2020-04-24 15:11:15 -06003788}
3789
3790bool SyncValidator::PreCallValidateCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) const {
3791 bool skip = StateTracker::PreCallValidateCmdNextSubpass(commandBuffer, contents);
John Zulauf64ffe552021-02-06 10:25:07 -07003792 // Convert to a NextSubpass2
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07003793 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf355e49b2020-04-24 15:11:15 -06003794 subpass_begin_info.contents = contents;
John Zulauf64ffe552021-02-06 10:25:07 -07003795 auto subpass_end_info = LvlInitStruct<VkSubpassEndInfo>();
3796 skip |= ValidateCmdNextSubpass(commandBuffer, &subpass_begin_info, &subpass_end_info, CMD_NEXTSUBPASS);
John Zulauf355e49b2020-04-24 15:11:15 -06003797 return skip;
3798}
3799
Mike Schuchardt2df08912020-12-15 16:28:09 -08003800bool SyncValidator::PreCallValidateCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
3801 const VkSubpassEndInfo *pSubpassEndInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003802 bool skip = StateTracker::PreCallValidateCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
sfricke-samsung85584a72021-09-30 21:43:38 -07003803 skip |= ValidateCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2KHR);
John Zulauf355e49b2020-04-24 15:11:15 -06003804 return skip;
3805}
3806
3807bool SyncValidator::PreCallValidateCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
3808 const VkSubpassEndInfo *pSubpassEndInfo) const {
3809 bool skip = StateTracker::PreCallValidateCmdNextSubpass2(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07003810 skip |= ValidateCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2);
John Zulauf355e49b2020-04-24 15:11:15 -06003811 return skip;
John Zulauf3d84f1b2020-03-09 13:33:25 -06003812}
3813
3814void SyncValidator::RecordCmdNextSubpass(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
sfricke-samsung85584a72021-09-30 21:43:38 -07003815 const VkSubpassEndInfo *pSubpassEndInfo, CMD_TYPE cmd) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003816 auto cb_context = GetAccessContext(commandBuffer);
3817 assert(cb_context);
John Zulauf64ffe552021-02-06 10:25:07 -07003818 if (!cb_context) return;
John Zulauf3d84f1b2020-03-09 13:33:25 -06003819
sfricke-samsung85584a72021-09-30 21:43:38 -07003820 SyncOpNextSubpass sync_op(cmd, *this, pSubpassBeginInfo, pSubpassEndInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07003821 sync_op.Record(cb_context);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003822}
3823
3824void SyncValidator::PostCallRecordCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
3825 StateTracker::PostCallRecordCmdNextSubpass(commandBuffer, contents);
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07003826 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf3d84f1b2020-03-09 13:33:25 -06003827 subpass_begin_info.contents = contents;
John Zulauf355e49b2020-04-24 15:11:15 -06003828 RecordCmdNextSubpass(commandBuffer, &subpass_begin_info, nullptr, CMD_NEXTSUBPASS);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003829}
3830
3831void SyncValidator::PostCallRecordCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
3832 const VkSubpassEndInfo *pSubpassEndInfo) {
3833 StateTracker::PostCallRecordCmdNextSubpass2(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
John Zulauf355e49b2020-04-24 15:11:15 -06003834 RecordCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003835}
3836
3837void SyncValidator::PostCallRecordCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
3838 const VkSubpassEndInfo *pSubpassEndInfo) {
3839 StateTracker::PostCallRecordCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
sfricke-samsung85584a72021-09-30 21:43:38 -07003840 RecordCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2KHR);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003841}
3842
sfricke-samsung85584a72021-09-30 21:43:38 -07003843bool SyncValidator::ValidateCmdEndRenderPass(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo,
3844 CMD_TYPE cmd) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003845 bool skip = false;
3846
3847 auto cb_context = GetAccessContext(commandBuffer);
3848 assert(cb_context);
John Zulauf64ffe552021-02-06 10:25:07 -07003849 if (!cb_context) return skip;
John Zulauf355e49b2020-04-24 15:11:15 -06003850
sfricke-samsung85584a72021-09-30 21:43:38 -07003851 SyncOpEndRenderPass sync_op(cmd, *this, pSubpassEndInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07003852 skip |= sync_op.Validate(*cb_context);
John Zulauf355e49b2020-04-24 15:11:15 -06003853 return skip;
3854}
3855
3856bool SyncValidator::PreCallValidateCmdEndRenderPass(VkCommandBuffer commandBuffer) const {
3857 bool skip = StateTracker::PreCallValidateCmdEndRenderPass(commandBuffer);
John Zulauf64ffe552021-02-06 10:25:07 -07003858 skip |= ValidateCmdEndRenderPass(commandBuffer, nullptr, CMD_ENDRENDERPASS);
John Zulauf355e49b2020-04-24 15:11:15 -06003859 return skip;
3860}
3861
Mike Schuchardt2df08912020-12-15 16:28:09 -08003862bool SyncValidator::PreCallValidateCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003863 bool skip = StateTracker::PreCallValidateCmdEndRenderPass2(commandBuffer, pSubpassEndInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07003864 skip |= ValidateCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2);
John Zulauf355e49b2020-04-24 15:11:15 -06003865 return skip;
3866}
3867
3868bool SyncValidator::PreCallValidateCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer,
Mike Schuchardt2df08912020-12-15 16:28:09 -08003869 const VkSubpassEndInfo *pSubpassEndInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003870 bool skip = StateTracker::PreCallValidateCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo);
sfricke-samsung85584a72021-09-30 21:43:38 -07003871 skip |= ValidateCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2KHR);
John Zulauf355e49b2020-04-24 15:11:15 -06003872 return skip;
3873}
3874
sfricke-samsung85584a72021-09-30 21:43:38 -07003875void SyncValidator::RecordCmdEndRenderPass(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo, CMD_TYPE cmd) {
John Zulaufe5da6e52020-03-18 15:32:18 -06003876 // Resolve the all subpass contexts to the command buffer contexts
3877 auto cb_context = GetAccessContext(commandBuffer);
3878 assert(cb_context);
John Zulauf64ffe552021-02-06 10:25:07 -07003879 if (!cb_context) return;
John Zulaufe5da6e52020-03-18 15:32:18 -06003880
sfricke-samsung85584a72021-09-30 21:43:38 -07003881 SyncOpEndRenderPass sync_op(cmd, *this, pSubpassEndInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07003882 sync_op.Record(cb_context);
3883 return;
John Zulaufe5da6e52020-03-18 15:32:18 -06003884}
John Zulauf3d84f1b2020-03-09 13:33:25 -06003885
John Zulauf33fc1d52020-07-17 11:01:10 -06003886// Simple heuristic rule to detect WAW operations representing algorithmically safe or increment
3887// updates to a resource which do not conflict at the byte level.
3888// TODO: Revisit this rule to see if it needs to be tighter or looser
3889// TODO: Add programatic control over suppression heuristics
3890bool SyncValidator::SupressedBoundDescriptorWAW(const HazardResult &hazard) const {
3891 return (hazard.hazard == WRITE_AFTER_WRITE) && (FlagBit(hazard.usage_index) == hazard.prior_access);
3892}
3893
John Zulauf3d84f1b2020-03-09 13:33:25 -06003894void SyncValidator::PostCallRecordCmdEndRenderPass(VkCommandBuffer commandBuffer) {
John Zulauf355e49b2020-04-24 15:11:15 -06003895 RecordCmdEndRenderPass(commandBuffer, nullptr, CMD_ENDRENDERPASS);
John Zulauf5a1a5382020-06-22 17:23:25 -06003896 StateTracker::PostCallRecordCmdEndRenderPass(commandBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003897}
3898
3899void SyncValidator::PostCallRecordCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
John Zulauf355e49b2020-04-24 15:11:15 -06003900 RecordCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2);
John Zulauf5a1a5382020-06-22 17:23:25 -06003901 StateTracker::PostCallRecordCmdEndRenderPass2(commandBuffer, pSubpassEndInfo);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003902}
3903
3904void SyncValidator::PostCallRecordCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
sfricke-samsung85584a72021-09-30 21:43:38 -07003905 RecordCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2KHR);
John Zulauf5a1a5382020-06-22 17:23:25 -06003906 StateTracker::PostCallRecordCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003907}
locke-lunarga19c71d2020-03-02 18:17:04 -07003908
Jeff Leger178b1e52020-10-05 12:22:23 -04003909template <typename BufferImageCopyRegionType>
3910bool SyncValidator::ValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
3911 VkImageLayout dstImageLayout, uint32_t regionCount,
3912 const BufferImageCopyRegionType *pRegions, CopyCommandVersion version) const {
locke-lunarga19c71d2020-03-02 18:17:04 -07003913 bool skip = false;
3914 const auto *cb_access_context = GetAccessContext(commandBuffer);
3915 assert(cb_access_context);
3916 if (!cb_access_context) return skip;
3917
Jeff Leger178b1e52020-10-05 12:22:23 -04003918 const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
3919 const char *func_name = is_2khr ? "vkCmdCopyBufferToImage2KHR()" : "vkCmdCopyBufferToImage()";
3920
locke-lunarga19c71d2020-03-02 18:17:04 -07003921 const auto *context = cb_access_context->GetCurrentAccessContext();
3922 assert(context);
3923 if (!context) return skip;
3924
3925 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
locke-lunarga19c71d2020-03-02 18:17:04 -07003926 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
3927
3928 for (uint32_t region = 0; region < regionCount; region++) {
3929 const auto &copy_region = pRegions[region];
John Zulauf477700e2021-01-06 11:41:49 -07003930 HazardResult hazard;
locke-lunarga19c71d2020-03-02 18:17:04 -07003931 if (dst_image) {
John Zulauf477700e2021-01-06 11:41:49 -07003932 if (src_buffer) {
3933 ResourceAccessRange src_range =
3934 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, dst_image->createInfo.format));
Jeremy Gebben40a22942020-12-22 14:22:06 -07003935 hazard = context->DetectHazard(*src_buffer, SYNC_COPY_TRANSFER_READ, src_range);
John Zulauf477700e2021-01-06 11:41:49 -07003936 if (hazard.hazard) {
3937 // PHASE1 TODO -- add tag information to log msg when useful.
3938 skip |= LogError(srcBuffer, string_SyncHazardVUID(hazard.hazard),
3939 "%s: Hazard %s for srcBuffer %s, region %" PRIu32 ". Access info %s.", func_name,
3940 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcBuffer).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003941 cb_access_context->FormatUsage(hazard).c_str());
John Zulauf477700e2021-01-06 11:41:49 -07003942 }
3943 }
3944
Jeremy Gebben40a22942020-12-22 14:22:06 -07003945 hazard = context->DetectHazard(*dst_image, SYNC_COPY_TRANSFER_WRITE, copy_region.imageSubresource,
John Zulauf477700e2021-01-06 11:41:49 -07003946 copy_region.imageOffset, copy_region.imageExtent);
locke-lunarga19c71d2020-03-02 18:17:04 -07003947 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003948 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
Jeff Leger178b1e52020-10-05 12:22:23 -04003949 "%s: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.", func_name,
John Zulauf1dae9192020-06-16 15:46:44 -06003950 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003951 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07003952 }
3953 if (skip) break;
3954 }
3955 if (skip) break;
3956 }
3957 return skip;
3958}
3959
Jeff Leger178b1e52020-10-05 12:22:23 -04003960bool SyncValidator::PreCallValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
3961 VkImageLayout dstImageLayout, uint32_t regionCount,
3962 const VkBufferImageCopy *pRegions) const {
3963 return ValidateCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions,
3964 COPY_COMMAND_VERSION_1);
3965}
3966
3967bool SyncValidator::PreCallValidateCmdCopyBufferToImage2KHR(VkCommandBuffer commandBuffer,
3968 const VkCopyBufferToImageInfo2KHR *pCopyBufferToImageInfo) const {
3969 return ValidateCmdCopyBufferToImage(commandBuffer, pCopyBufferToImageInfo->srcBuffer, pCopyBufferToImageInfo->dstImage,
3970 pCopyBufferToImageInfo->dstImageLayout, pCopyBufferToImageInfo->regionCount,
3971 pCopyBufferToImageInfo->pRegions, COPY_COMMAND_VERSION_2);
3972}
3973
3974template <typename BufferImageCopyRegionType>
3975void SyncValidator::RecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
3976 VkImageLayout dstImageLayout, uint32_t regionCount,
3977 const BufferImageCopyRegionType *pRegions, CopyCommandVersion version) {
locke-lunarga19c71d2020-03-02 18:17:04 -07003978 auto *cb_access_context = GetAccessContext(commandBuffer);
3979 assert(cb_access_context);
Jeff Leger178b1e52020-10-05 12:22:23 -04003980
3981 const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
3982 const CMD_TYPE cmd_type = is_2khr ? CMD_COPYBUFFERTOIMAGE2KHR : CMD_COPYBUFFERTOIMAGE;
3983
3984 const auto tag = cb_access_context->NextCommandTag(cmd_type);
locke-lunarga19c71d2020-03-02 18:17:04 -07003985 auto *context = cb_access_context->GetCurrentAccessContext();
3986 assert(context);
3987
3988 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
John Zulauf16adfc92020-04-08 10:28:33 -06003989 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07003990
3991 for (uint32_t region = 0; region < regionCount; region++) {
3992 const auto &copy_region = pRegions[region];
locke-lunarga19c71d2020-03-02 18:17:04 -07003993 if (dst_image) {
John Zulauf477700e2021-01-06 11:41:49 -07003994 if (src_buffer) {
3995 ResourceAccessRange src_range =
3996 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, dst_image->createInfo.format));
Jeremy Gebben40a22942020-12-22 14:22:06 -07003997 context->UpdateAccessState(*src_buffer, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment, src_range, tag);
John Zulauf477700e2021-01-06 11:41:49 -07003998 }
Jeremy Gebben40a22942020-12-22 14:22:06 -07003999 context->UpdateAccessState(*dst_image, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07004000 copy_region.imageSubresource, copy_region.imageOffset, copy_region.imageExtent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07004001 }
4002 }
4003}
4004
Jeff Leger178b1e52020-10-05 12:22:23 -04004005void SyncValidator::PreCallRecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
4006 VkImageLayout dstImageLayout, uint32_t regionCount,
4007 const VkBufferImageCopy *pRegions) {
4008 StateTracker::PreCallRecordCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
4009 RecordCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions, COPY_COMMAND_VERSION_1);
4010}
4011
4012void SyncValidator::PreCallRecordCmdCopyBufferToImage2KHR(VkCommandBuffer commandBuffer,
4013 const VkCopyBufferToImageInfo2KHR *pCopyBufferToImageInfo) {
4014 StateTracker::PreCallRecordCmdCopyBufferToImage2KHR(commandBuffer, pCopyBufferToImageInfo);
4015 RecordCmdCopyBufferToImage(commandBuffer, pCopyBufferToImageInfo->srcBuffer, pCopyBufferToImageInfo->dstImage,
4016 pCopyBufferToImageInfo->dstImageLayout, pCopyBufferToImageInfo->regionCount,
4017 pCopyBufferToImageInfo->pRegions, COPY_COMMAND_VERSION_2);
4018}
4019
4020template <typename BufferImageCopyRegionType>
4021bool SyncValidator::ValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4022 VkBuffer dstBuffer, uint32_t regionCount,
4023 const BufferImageCopyRegionType *pRegions, CopyCommandVersion version) const {
locke-lunarga19c71d2020-03-02 18:17:04 -07004024 bool skip = false;
4025 const auto *cb_access_context = GetAccessContext(commandBuffer);
4026 assert(cb_access_context);
4027 if (!cb_access_context) return skip;
4028
Jeff Leger178b1e52020-10-05 12:22:23 -04004029 const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
4030 const char *func_name = is_2khr ? "vkCmdCopyImageToBuffer2KHR()" : "vkCmdCopyImageToBuffer()";
4031
locke-lunarga19c71d2020-03-02 18:17:04 -07004032 const auto *context = cb_access_context->GetCurrentAccessContext();
4033 assert(context);
4034 if (!context) return skip;
4035
4036 const auto *src_image = Get<IMAGE_STATE>(srcImage);
4037 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
Jeremy Gebben6fbf8242021-06-21 09:14:46 -06004038 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->MemState()->mem() : VK_NULL_HANDLE;
locke-lunarga19c71d2020-03-02 18:17:04 -07004039 for (uint32_t region = 0; region < regionCount; region++) {
4040 const auto &copy_region = pRegions[region];
4041 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004042 auto hazard = context->DetectHazard(*src_image, SYNC_COPY_TRANSFER_READ, copy_region.imageSubresource,
locke-lunarga19c71d2020-03-02 18:17:04 -07004043 copy_region.imageOffset, copy_region.imageExtent);
4044 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06004045 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
Jeff Leger178b1e52020-10-05 12:22:23 -04004046 "%s: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.", func_name,
John Zulauf1dae9192020-06-16 15:46:44 -06004047 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07004048 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07004049 }
John Zulauf477700e2021-01-06 11:41:49 -07004050 if (dst_mem) {
4051 ResourceAccessRange dst_range =
4052 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, src_image->createInfo.format));
Jeremy Gebben40a22942020-12-22 14:22:06 -07004053 hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, dst_range);
John Zulauf477700e2021-01-06 11:41:49 -07004054 if (hazard.hazard) {
4055 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
4056 "%s: Hazard %s for dstBuffer %s, region %" PRIu32 ". Access info %s.", func_name,
4057 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstBuffer).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07004058 cb_access_context->FormatUsage(hazard).c_str());
John Zulauf477700e2021-01-06 11:41:49 -07004059 }
locke-lunarga19c71d2020-03-02 18:17:04 -07004060 }
4061 }
4062 if (skip) break;
4063 }
4064 return skip;
4065}
4066
Jeff Leger178b1e52020-10-05 12:22:23 -04004067bool SyncValidator::PreCallValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
4068 VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount,
4069 const VkBufferImageCopy *pRegions) const {
4070 return ValidateCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions,
4071 COPY_COMMAND_VERSION_1);
4072}
4073
4074bool SyncValidator::PreCallValidateCmdCopyImageToBuffer2KHR(VkCommandBuffer commandBuffer,
4075 const VkCopyImageToBufferInfo2KHR *pCopyImageToBufferInfo) const {
4076 return ValidateCmdCopyImageToBuffer(commandBuffer, pCopyImageToBufferInfo->srcImage, pCopyImageToBufferInfo->srcImageLayout,
4077 pCopyImageToBufferInfo->dstBuffer, pCopyImageToBufferInfo->regionCount,
4078 pCopyImageToBufferInfo->pRegions, COPY_COMMAND_VERSION_2);
4079}
4080
4081template <typename BufferImageCopyRegionType>
4082void SyncValidator::RecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4083 VkBuffer dstBuffer, uint32_t regionCount, const BufferImageCopyRegionType *pRegions,
4084 CopyCommandVersion version) {
locke-lunarga19c71d2020-03-02 18:17:04 -07004085 auto *cb_access_context = GetAccessContext(commandBuffer);
4086 assert(cb_access_context);
Jeff Leger178b1e52020-10-05 12:22:23 -04004087
4088 const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
4089 const CMD_TYPE cmd_type = is_2khr ? CMD_COPYIMAGETOBUFFER2KHR : CMD_COPYIMAGETOBUFFER;
4090
4091 const auto tag = cb_access_context->NextCommandTag(cmd_type);
locke-lunarga19c71d2020-03-02 18:17:04 -07004092 auto *context = cb_access_context->GetCurrentAccessContext();
4093 assert(context);
4094
4095 const auto *src_image = Get<IMAGE_STATE>(srcImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07004096 auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
Jeremy Gebben6fbf8242021-06-21 09:14:46 -06004097 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->MemState()->mem() : VK_NULL_HANDLE;
John Zulauf5f13a792020-03-10 07:31:21 -06004098 const VulkanTypedHandle dst_handle(dst_mem, kVulkanObjectTypeDeviceMemory);
locke-lunarga19c71d2020-03-02 18:17:04 -07004099
4100 for (uint32_t region = 0; region < regionCount; region++) {
4101 const auto &copy_region = pRegions[region];
4102 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004103 context->UpdateAccessState(*src_image, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07004104 copy_region.imageSubresource, copy_region.imageOffset, copy_region.imageExtent, tag);
John Zulauf477700e2021-01-06 11:41:49 -07004105 if (dst_buffer) {
4106 ResourceAccessRange dst_range =
4107 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, src_image->createInfo.format));
Jeremy Gebben40a22942020-12-22 14:22:06 -07004108 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, dst_range, tag);
John Zulauf477700e2021-01-06 11:41:49 -07004109 }
locke-lunarga19c71d2020-03-02 18:17:04 -07004110 }
4111 }
4112}
4113
Jeff Leger178b1e52020-10-05 12:22:23 -04004114void SyncValidator::PreCallRecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4115 VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
4116 StateTracker::PreCallRecordCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
4117 RecordCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions, COPY_COMMAND_VERSION_1);
4118}
4119
4120void SyncValidator::PreCallRecordCmdCopyImageToBuffer2KHR(VkCommandBuffer commandBuffer,
4121 const VkCopyImageToBufferInfo2KHR *pCopyImageToBufferInfo) {
4122 StateTracker::PreCallRecordCmdCopyImageToBuffer2KHR(commandBuffer, pCopyImageToBufferInfo);
4123 RecordCmdCopyImageToBuffer(commandBuffer, pCopyImageToBufferInfo->srcImage, pCopyImageToBufferInfo->srcImageLayout,
4124 pCopyImageToBufferInfo->dstBuffer, pCopyImageToBufferInfo->regionCount,
4125 pCopyImageToBufferInfo->pRegions, COPY_COMMAND_VERSION_2);
4126}
4127
4128template <typename RegionType>
4129bool SyncValidator::ValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4130 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
4131 const RegionType *pRegions, VkFilter filter, const char *apiName) const {
locke-lunarga19c71d2020-03-02 18:17:04 -07004132 bool skip = false;
4133 const auto *cb_access_context = GetAccessContext(commandBuffer);
4134 assert(cb_access_context);
4135 if (!cb_access_context) return skip;
4136
4137 const auto *context = cb_access_context->GetCurrentAccessContext();
4138 assert(context);
4139 if (!context) return skip;
4140
4141 const auto *src_image = Get<IMAGE_STATE>(srcImage);
4142 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
4143
4144 for (uint32_t region = 0; region < regionCount; region++) {
4145 const auto &blit_region = pRegions[region];
4146 if (src_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06004147 VkOffset3D offset = {std::min(blit_region.srcOffsets[0].x, blit_region.srcOffsets[1].x),
4148 std::min(blit_region.srcOffsets[0].y, blit_region.srcOffsets[1].y),
4149 std::min(blit_region.srcOffsets[0].z, blit_region.srcOffsets[1].z)};
4150 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.srcOffsets[1].x - blit_region.srcOffsets[0].x)),
4151 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].y - blit_region.srcOffsets[0].y)),
4152 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].z - blit_region.srcOffsets[0].z))};
Jeremy Gebben40a22942020-12-22 14:22:06 -07004153 auto hazard = context->DetectHazard(*src_image, SYNC_BLIT_TRANSFER_READ, blit_region.srcSubresource, offset, extent);
locke-lunarga19c71d2020-03-02 18:17:04 -07004154 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06004155 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
Jeff Leger178b1e52020-10-05 12:22:23 -04004156 "%s: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.", apiName,
John Zulauf1dae9192020-06-16 15:46:44 -06004157 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07004158 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07004159 }
4160 }
4161
4162 if (dst_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06004163 VkOffset3D offset = {std::min(blit_region.dstOffsets[0].x, blit_region.dstOffsets[1].x),
4164 std::min(blit_region.dstOffsets[0].y, blit_region.dstOffsets[1].y),
4165 std::min(blit_region.dstOffsets[0].z, blit_region.dstOffsets[1].z)};
4166 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.dstOffsets[1].x - blit_region.dstOffsets[0].x)),
4167 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].y - blit_region.dstOffsets[0].y)),
4168 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].z - blit_region.dstOffsets[0].z))};
Jeremy Gebben40a22942020-12-22 14:22:06 -07004169 auto hazard = context->DetectHazard(*dst_image, SYNC_BLIT_TRANSFER_WRITE, blit_region.dstSubresource, offset, extent);
locke-lunarga19c71d2020-03-02 18:17:04 -07004170 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06004171 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
Jeff Leger178b1e52020-10-05 12:22:23 -04004172 "%s: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.", apiName,
John Zulauf1dae9192020-06-16 15:46:44 -06004173 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07004174 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07004175 }
4176 if (skip) break;
4177 }
4178 }
4179
4180 return skip;
4181}
4182
Jeff Leger178b1e52020-10-05 12:22:23 -04004183bool SyncValidator::PreCallValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4184 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
4185 const VkImageBlit *pRegions, VkFilter filter) const {
4186 return ValidateCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter,
4187 "vkCmdBlitImage");
4188}
4189
4190bool SyncValidator::PreCallValidateCmdBlitImage2KHR(VkCommandBuffer commandBuffer,
4191 const VkBlitImageInfo2KHR *pBlitImageInfo) const {
4192 return ValidateCmdBlitImage(commandBuffer, pBlitImageInfo->srcImage, pBlitImageInfo->srcImageLayout, pBlitImageInfo->dstImage,
4193 pBlitImageInfo->dstImageLayout, pBlitImageInfo->regionCount, pBlitImageInfo->pRegions,
4194 pBlitImageInfo->filter, "vkCmdBlitImage2KHR");
4195}
4196
4197template <typename RegionType>
4198void SyncValidator::RecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4199 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
4200 const RegionType *pRegions, VkFilter filter, ResourceUsageTag tag) {
locke-lunarga19c71d2020-03-02 18:17:04 -07004201 auto *cb_access_context = GetAccessContext(commandBuffer);
4202 assert(cb_access_context);
4203 auto *context = cb_access_context->GetCurrentAccessContext();
4204 assert(context);
4205
4206 auto *src_image = Get<IMAGE_STATE>(srcImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07004207 auto *dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07004208
4209 for (uint32_t region = 0; region < regionCount; region++) {
4210 const auto &blit_region = pRegions[region];
4211 if (src_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06004212 VkOffset3D offset = {std::min(blit_region.srcOffsets[0].x, blit_region.srcOffsets[1].x),
4213 std::min(blit_region.srcOffsets[0].y, blit_region.srcOffsets[1].y),
4214 std::min(blit_region.srcOffsets[0].z, blit_region.srcOffsets[1].z)};
4215 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.srcOffsets[1].x - blit_region.srcOffsets[0].x)),
4216 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].y - blit_region.srcOffsets[0].y)),
4217 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].z - blit_region.srcOffsets[0].z))};
Jeremy Gebben40a22942020-12-22 14:22:06 -07004218 context->UpdateAccessState(*src_image, SYNC_BLIT_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07004219 blit_region.srcSubresource, offset, extent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07004220 }
4221 if (dst_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06004222 VkOffset3D offset = {std::min(blit_region.dstOffsets[0].x, blit_region.dstOffsets[1].x),
4223 std::min(blit_region.dstOffsets[0].y, blit_region.dstOffsets[1].y),
4224 std::min(blit_region.dstOffsets[0].z, blit_region.dstOffsets[1].z)};
4225 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.dstOffsets[1].x - blit_region.dstOffsets[0].x)),
4226 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].y - blit_region.dstOffsets[0].y)),
4227 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].z - blit_region.dstOffsets[0].z))};
Jeremy Gebben40a22942020-12-22 14:22:06 -07004228 context->UpdateAccessState(*dst_image, SYNC_BLIT_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07004229 blit_region.dstSubresource, offset, extent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07004230 }
4231 }
4232}
locke-lunarg36ba2592020-04-03 09:42:04 -06004233
Jeff Leger178b1e52020-10-05 12:22:23 -04004234void SyncValidator::PreCallRecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4235 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
4236 const VkImageBlit *pRegions, VkFilter filter) {
4237 auto *cb_access_context = GetAccessContext(commandBuffer);
4238 assert(cb_access_context);
4239 const auto tag = cb_access_context->NextCommandTag(CMD_BLITIMAGE);
4240 StateTracker::PreCallRecordCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
4241 pRegions, filter);
4242 RecordCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter, tag);
4243}
4244
4245void SyncValidator::PreCallRecordCmdBlitImage2KHR(VkCommandBuffer commandBuffer, const VkBlitImageInfo2KHR *pBlitImageInfo) {
4246 StateTracker::PreCallRecordCmdBlitImage2KHR(commandBuffer, pBlitImageInfo);
4247 auto *cb_access_context = GetAccessContext(commandBuffer);
4248 assert(cb_access_context);
4249 const auto tag = cb_access_context->NextCommandTag(CMD_BLITIMAGE2KHR);
4250 RecordCmdBlitImage(commandBuffer, pBlitImageInfo->srcImage, pBlitImageInfo->srcImageLayout, pBlitImageInfo->dstImage,
4251 pBlitImageInfo->dstImageLayout, pBlitImageInfo->regionCount, pBlitImageInfo->pRegions,
4252 pBlitImageInfo->filter, tag);
4253}
4254
John Zulauffaea0ee2021-01-14 14:01:32 -07004255bool SyncValidator::ValidateIndirectBuffer(const CommandBufferAccessContext &cb_context, const AccessContext &context,
4256 VkCommandBuffer commandBuffer, const VkDeviceSize struct_size, const VkBuffer buffer,
4257 const VkDeviceSize offset, const uint32_t drawCount, const uint32_t stride,
4258 const char *function) const {
locke-lunargff255f92020-05-13 18:53:52 -06004259 bool skip = false;
4260 if (drawCount == 0) return skip;
4261
4262 const auto *buf_state = Get<BUFFER_STATE>(buffer);
4263 VkDeviceSize size = struct_size;
4264 if (drawCount == 1 || stride == size) {
4265 if (drawCount > 1) size *= drawCount;
John Zulauf3e86bf02020-09-12 10:47:57 -06004266 const ResourceAccessRange range = MakeRange(offset, size);
locke-lunargff255f92020-05-13 18:53:52 -06004267 auto hazard = context.DetectHazard(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, range);
4268 if (hazard.hazard) {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06004269 skip |= LogError(buf_state->buffer(), string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004270 "%s: Hazard %s for indirect %s in %s. Access info %s.", function, string_SyncHazard(hazard.hazard),
John Zulauf1dae9192020-06-16 15:46:44 -06004271 report_data->FormatHandle(buffer).c_str(), report_data->FormatHandle(commandBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07004272 cb_context.FormatUsage(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06004273 }
4274 } else {
4275 for (uint32_t i = 0; i < drawCount; ++i) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004276 const ResourceAccessRange range = MakeRange(offset + i * stride, size);
locke-lunargff255f92020-05-13 18:53:52 -06004277 auto hazard = context.DetectHazard(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, range);
4278 if (hazard.hazard) {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06004279 skip |= LogError(buf_state->buffer(), string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004280 "%s: Hazard %s for indirect %s in %s. Access info %s.", function, string_SyncHazard(hazard.hazard),
4281 report_data->FormatHandle(buffer).c_str(), report_data->FormatHandle(commandBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07004282 cb_context.FormatUsage(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06004283 break;
4284 }
4285 }
4286 }
4287 return skip;
4288}
4289
John Zulauf14940722021-04-12 15:19:02 -06004290void SyncValidator::RecordIndirectBuffer(AccessContext &context, const ResourceUsageTag tag, const VkDeviceSize struct_size,
locke-lunarg61870c22020-06-09 14:51:50 -06004291 const VkBuffer buffer, const VkDeviceSize offset, const uint32_t drawCount,
4292 uint32_t stride) {
locke-lunargff255f92020-05-13 18:53:52 -06004293 const auto *buf_state = Get<BUFFER_STATE>(buffer);
4294 VkDeviceSize size = struct_size;
4295 if (drawCount == 1 || stride == size) {
4296 if (drawCount > 1) size *= drawCount;
John Zulauf3e86bf02020-09-12 10:47:57 -06004297 const ResourceAccessRange range = MakeRange(offset, size);
John Zulauf8e3c3e92021-01-06 11:19:36 -07004298 context.UpdateAccessState(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, SyncOrdering::kNonAttachment, range, tag);
locke-lunargff255f92020-05-13 18:53:52 -06004299 } else {
4300 for (uint32_t i = 0; i < drawCount; ++i) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004301 const ResourceAccessRange range = MakeRange(offset + i * stride, size);
John Zulauf8e3c3e92021-01-06 11:19:36 -07004302 context.UpdateAccessState(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, SyncOrdering::kNonAttachment, range,
4303 tag);
locke-lunargff255f92020-05-13 18:53:52 -06004304 }
4305 }
4306}
4307
John Zulauffaea0ee2021-01-14 14:01:32 -07004308bool SyncValidator::ValidateCountBuffer(const CommandBufferAccessContext &cb_context, const AccessContext &context,
4309 VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4310 const char *function) const {
locke-lunargff255f92020-05-13 18:53:52 -06004311 bool skip = false;
4312
4313 const auto *count_buf_state = Get<BUFFER_STATE>(buffer);
John Zulauf3e86bf02020-09-12 10:47:57 -06004314 const ResourceAccessRange range = MakeRange(offset, 4);
locke-lunargff255f92020-05-13 18:53:52 -06004315 auto hazard = context.DetectHazard(*count_buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, range);
4316 if (hazard.hazard) {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06004317 skip |= LogError(count_buf_state->buffer(), string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004318 "%s: Hazard %s for countBuffer %s in %s. Access info %s.", function, string_SyncHazard(hazard.hazard),
John Zulauf1dae9192020-06-16 15:46:44 -06004319 report_data->FormatHandle(buffer).c_str(), report_data->FormatHandle(commandBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07004320 cb_context.FormatUsage(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06004321 }
4322 return skip;
4323}
4324
John Zulauf14940722021-04-12 15:19:02 -06004325void SyncValidator::RecordCountBuffer(AccessContext &context, const ResourceUsageTag tag, VkBuffer buffer, VkDeviceSize offset) {
locke-lunargff255f92020-05-13 18:53:52 -06004326 const auto *count_buf_state = Get<BUFFER_STATE>(buffer);
John Zulauf3e86bf02020-09-12 10:47:57 -06004327 const ResourceAccessRange range = MakeRange(offset, 4);
John Zulauf8e3c3e92021-01-06 11:19:36 -07004328 context.UpdateAccessState(*count_buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, SyncOrdering::kNonAttachment, range, tag);
locke-lunargff255f92020-05-13 18:53:52 -06004329}
4330
locke-lunarg36ba2592020-04-03 09:42:04 -06004331bool SyncValidator::PreCallValidateCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) const {
locke-lunargff255f92020-05-13 18:53:52 -06004332 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004333 const auto *cb_access_context = GetAccessContext(commandBuffer);
4334 assert(cb_access_context);
4335 if (!cb_access_context) return skip;
4336
locke-lunarg61870c22020-06-09 14:51:50 -06004337 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, "vkCmdDispatch");
locke-lunargff255f92020-05-13 18:53:52 -06004338 return skip;
locke-lunarg36ba2592020-04-03 09:42:04 -06004339}
4340
4341void SyncValidator::PreCallRecordCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004342 StateTracker::PreCallRecordCmdDispatch(commandBuffer, x, y, z);
locke-lunargff255f92020-05-13 18:53:52 -06004343 auto *cb_access_context = GetAccessContext(commandBuffer);
4344 assert(cb_access_context);
4345 const auto tag = cb_access_context->NextCommandTag(CMD_DISPATCH);
locke-lunargff255f92020-05-13 18:53:52 -06004346
locke-lunarg61870c22020-06-09 14:51:50 -06004347 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, tag);
locke-lunarg36ba2592020-04-03 09:42:04 -06004348}
locke-lunarge1a67022020-04-29 00:15:36 -06004349
4350bool SyncValidator::PreCallValidateCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) const {
locke-lunargff255f92020-05-13 18:53:52 -06004351 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004352 const auto *cb_access_context = GetAccessContext(commandBuffer);
4353 assert(cb_access_context);
4354 if (!cb_access_context) return skip;
4355
4356 const auto *context = cb_access_context->GetCurrentAccessContext();
4357 assert(context);
4358 if (!context) return skip;
4359
locke-lunarg61870c22020-06-09 14:51:50 -06004360 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, "vkCmdDispatchIndirect");
John Zulauffaea0ee2021-01-14 14:01:32 -07004361 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDispatchIndirectCommand), buffer, offset,
4362 1, sizeof(VkDispatchIndirectCommand), "vkCmdDispatchIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004363 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004364}
4365
4366void SyncValidator::PreCallRecordCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004367 StateTracker::PreCallRecordCmdDispatchIndirect(commandBuffer, buffer, offset);
locke-lunargff255f92020-05-13 18:53:52 -06004368 auto *cb_access_context = GetAccessContext(commandBuffer);
4369 assert(cb_access_context);
4370 const auto tag = cb_access_context->NextCommandTag(CMD_DISPATCHINDIRECT);
4371 auto *context = cb_access_context->GetCurrentAccessContext();
4372 assert(context);
4373
locke-lunarg61870c22020-06-09 14:51:50 -06004374 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, tag);
4375 RecordIndirectBuffer(*context, tag, sizeof(VkDispatchIndirectCommand), buffer, offset, 1, sizeof(VkDispatchIndirectCommand));
locke-lunarge1a67022020-04-29 00:15:36 -06004376}
4377
4378bool SyncValidator::PreCallValidateCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
4379 uint32_t firstVertex, uint32_t firstInstance) const {
locke-lunarga4d39ea2020-05-22 14:17:29 -06004380 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004381 const auto *cb_access_context = GetAccessContext(commandBuffer);
4382 assert(cb_access_context);
4383 if (!cb_access_context) return skip;
4384
locke-lunarg61870c22020-06-09 14:51:50 -06004385 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDraw");
4386 skip |= cb_access_context->ValidateDrawVertex(vertexCount, firstVertex, "vkCmdDraw");
4387 skip |= cb_access_context->ValidateDrawSubpassAttachment("vkCmdDraw");
locke-lunarga4d39ea2020-05-22 14:17:29 -06004388 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004389}
4390
4391void SyncValidator::PreCallRecordCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
4392 uint32_t firstVertex, uint32_t firstInstance) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004393 StateTracker::PreCallRecordCmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
locke-lunargff255f92020-05-13 18:53:52 -06004394 auto *cb_access_context = GetAccessContext(commandBuffer);
4395 assert(cb_access_context);
4396 const auto tag = cb_access_context->NextCommandTag(CMD_DRAW);
locke-lunargff255f92020-05-13 18:53:52 -06004397
locke-lunarg61870c22020-06-09 14:51:50 -06004398 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4399 cb_access_context->RecordDrawVertex(vertexCount, firstVertex, tag);
4400 cb_access_context->RecordDrawSubpassAttachment(tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004401}
4402
4403bool SyncValidator::PreCallValidateCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
4404 uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) const {
locke-lunarga4d39ea2020-05-22 14:17:29 -06004405 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004406 const auto *cb_access_context = GetAccessContext(commandBuffer);
4407 assert(cb_access_context);
4408 if (!cb_access_context) return skip;
4409
locke-lunarg61870c22020-06-09 14:51:50 -06004410 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndexed");
4411 skip |= cb_access_context->ValidateDrawVertexIndex(indexCount, firstIndex, "vkCmdDrawIndexed");
4412 skip |= cb_access_context->ValidateDrawSubpassAttachment("vkCmdDrawIndexed");
locke-lunarga4d39ea2020-05-22 14:17:29 -06004413 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004414}
4415
4416void SyncValidator::PreCallRecordCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
4417 uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004418 StateTracker::PreCallRecordCmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
locke-lunargff255f92020-05-13 18:53:52 -06004419 auto *cb_access_context = GetAccessContext(commandBuffer);
4420 assert(cb_access_context);
4421 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDEXED);
locke-lunargff255f92020-05-13 18:53:52 -06004422
locke-lunarg61870c22020-06-09 14:51:50 -06004423 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4424 cb_access_context->RecordDrawVertexIndex(indexCount, firstIndex, tag);
4425 cb_access_context->RecordDrawSubpassAttachment(tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004426}
4427
4428bool SyncValidator::PreCallValidateCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4429 uint32_t drawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004430 bool skip = false;
4431 if (drawCount == 0) return skip;
4432
locke-lunargff255f92020-05-13 18:53:52 -06004433 const auto *cb_access_context = GetAccessContext(commandBuffer);
4434 assert(cb_access_context);
4435 if (!cb_access_context) return skip;
4436
4437 const auto *context = cb_access_context->GetCurrentAccessContext();
4438 assert(context);
4439 if (!context) return skip;
4440
locke-lunarg61870c22020-06-09 14:51:50 -06004441 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndirect");
4442 skip |= cb_access_context->ValidateDrawSubpassAttachment("vkCmdDrawIndirect");
John Zulauffaea0ee2021-01-14 14:01:32 -07004443 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndirectCommand), buffer, offset,
4444 drawCount, stride, "vkCmdDrawIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004445
4446 // TODO: For now, we validate the whole vertex buffer. It might cause some false positive.
4447 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
4448 // We will validate the vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004449 skip |= cb_access_context->ValidateDrawVertex(UINT32_MAX, 0, "vkCmdDrawIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004450 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004451}
4452
4453void SyncValidator::PreCallRecordCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4454 uint32_t drawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004455 StateTracker::PreCallRecordCmdDrawIndirect(commandBuffer, buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06004456 if (drawCount == 0) return;
locke-lunargff255f92020-05-13 18:53:52 -06004457 auto *cb_access_context = GetAccessContext(commandBuffer);
4458 assert(cb_access_context);
4459 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDIRECT);
4460 auto *context = cb_access_context->GetCurrentAccessContext();
4461 assert(context);
4462
locke-lunarg61870c22020-06-09 14:51:50 -06004463 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4464 cb_access_context->RecordDrawSubpassAttachment(tag);
4465 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndirectCommand), buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06004466
4467 // TODO: For now, we record the whole vertex buffer. It might cause some false positive.
4468 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
4469 // We will record the vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004470 cb_access_context->RecordDrawVertex(UINT32_MAX, 0, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004471}
4472
4473bool SyncValidator::PreCallValidateCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4474 uint32_t drawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004475 bool skip = false;
4476 if (drawCount == 0) return skip;
locke-lunargff255f92020-05-13 18:53:52 -06004477 const auto *cb_access_context = GetAccessContext(commandBuffer);
4478 assert(cb_access_context);
4479 if (!cb_access_context) return skip;
4480
4481 const auto *context = cb_access_context->GetCurrentAccessContext();
4482 assert(context);
4483 if (!context) return skip;
4484
locke-lunarg61870c22020-06-09 14:51:50 -06004485 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndexedIndirect");
4486 skip |= cb_access_context->ValidateDrawSubpassAttachment("vkCmdDrawIndexedIndirect");
John Zulauffaea0ee2021-01-14 14:01:32 -07004487 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndexedIndirectCommand), buffer,
4488 offset, drawCount, stride, "vkCmdDrawIndexedIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004489
4490 // TODO: For now, we validate the whole index and vertex buffer. It might cause some false positive.
4491 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
4492 // We will validate the index and vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004493 skip |= cb_access_context->ValidateDrawVertexIndex(UINT32_MAX, 0, "vkCmdDrawIndexedIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004494 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004495}
4496
4497void SyncValidator::PreCallRecordCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4498 uint32_t drawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004499 StateTracker::PreCallRecordCmdDrawIndexedIndirect(commandBuffer, buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06004500 auto *cb_access_context = GetAccessContext(commandBuffer);
4501 assert(cb_access_context);
4502 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDEXEDINDIRECT);
4503 auto *context = cb_access_context->GetCurrentAccessContext();
4504 assert(context);
4505
locke-lunarg61870c22020-06-09 14:51:50 -06004506 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4507 cb_access_context->RecordDrawSubpassAttachment(tag);
4508 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndexedIndirectCommand), buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06004509
4510 // TODO: For now, we record the whole index and vertex buffer. It might cause some false positive.
4511 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
4512 // We will record the index and vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004513 cb_access_context->RecordDrawVertexIndex(UINT32_MAX, 0, tag);
locke-lunargff255f92020-05-13 18:53:52 -06004514}
4515
4516bool SyncValidator::ValidateCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4517 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4518 uint32_t stride, const char *function) const {
4519 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004520 const auto *cb_access_context = GetAccessContext(commandBuffer);
4521 assert(cb_access_context);
4522 if (!cb_access_context) return skip;
4523
4524 const auto *context = cb_access_context->GetCurrentAccessContext();
4525 assert(context);
4526 if (!context) return skip;
4527
locke-lunarg61870c22020-06-09 14:51:50 -06004528 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, function);
4529 skip |= cb_access_context->ValidateDrawSubpassAttachment(function);
John Zulauffaea0ee2021-01-14 14:01:32 -07004530 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndirectCommand), buffer, offset,
4531 maxDrawCount, stride, function);
4532 skip |= ValidateCountBuffer(*cb_access_context, *context, commandBuffer, countBuffer, countBufferOffset, function);
locke-lunargff255f92020-05-13 18:53:52 -06004533
4534 // TODO: For now, we validate the whole vertex buffer. It might cause some false positive.
4535 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
4536 // We will validate the vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004537 skip |= cb_access_context->ValidateDrawVertex(UINT32_MAX, 0, function);
locke-lunargff255f92020-05-13 18:53:52 -06004538 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004539}
4540
4541bool SyncValidator::PreCallValidateCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4542 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4543 uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004544 return ValidateCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4545 "vkCmdDrawIndirectCount");
locke-lunarge1a67022020-04-29 00:15:36 -06004546}
4547
sfricke-samsung85584a72021-09-30 21:43:38 -07004548void SyncValidator::RecordCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4549 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4550 uint32_t stride, CMD_TYPE cmd_type) {
locke-lunargff255f92020-05-13 18:53:52 -06004551 auto *cb_access_context = GetAccessContext(commandBuffer);
4552 assert(cb_access_context);
sfricke-samsung85584a72021-09-30 21:43:38 -07004553 const auto tag = cb_access_context->NextCommandTag(cmd_type);
locke-lunargff255f92020-05-13 18:53:52 -06004554 auto *context = cb_access_context->GetCurrentAccessContext();
4555 assert(context);
4556
locke-lunarg61870c22020-06-09 14:51:50 -06004557 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4558 cb_access_context->RecordDrawSubpassAttachment(tag);
4559 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndirectCommand), buffer, offset, 1, stride);
4560 RecordCountBuffer(*context, tag, countBuffer, countBufferOffset);
locke-lunargff255f92020-05-13 18:53:52 -06004561
4562 // TODO: For now, we record the whole vertex buffer. It might cause some false positive.
4563 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
4564 // We will record the vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004565 cb_access_context->RecordDrawVertex(UINT32_MAX, 0, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004566}
4567
sfricke-samsung85584a72021-09-30 21:43:38 -07004568void SyncValidator::PreCallRecordCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4569 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4570 uint32_t stride) {
4571 StateTracker::PreCallRecordCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount,
4572 stride);
4573 RecordCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4574 CMD_DRAWINDIRECTCOUNT);
4575}
locke-lunarge1a67022020-04-29 00:15:36 -06004576bool SyncValidator::PreCallValidateCmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4577 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4578 uint32_t maxDrawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004579 return ValidateCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4580 "vkCmdDrawIndirectCountKHR");
locke-lunarge1a67022020-04-29 00:15:36 -06004581}
4582
4583void SyncValidator::PreCallRecordCmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4584 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4585 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004586 StateTracker::PreCallRecordCmdDrawIndirectCountKHR(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount,
4587 stride);
sfricke-samsung85584a72021-09-30 21:43:38 -07004588 RecordCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4589 CMD_DRAWINDIRECTCOUNTKHR);
locke-lunarge1a67022020-04-29 00:15:36 -06004590}
4591
4592bool SyncValidator::PreCallValidateCmdDrawIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4593 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4594 uint32_t maxDrawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004595 return ValidateCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4596 "vkCmdDrawIndirectCountAMD");
locke-lunarge1a67022020-04-29 00:15:36 -06004597}
4598
4599void SyncValidator::PreCallRecordCmdDrawIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4600 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4601 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004602 StateTracker::PreCallRecordCmdDrawIndirectCountAMD(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount,
4603 stride);
sfricke-samsung85584a72021-09-30 21:43:38 -07004604 RecordCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4605 CMD_DRAWINDIRECTCOUNTAMD);
locke-lunargff255f92020-05-13 18:53:52 -06004606}
4607
4608bool SyncValidator::ValidateCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4609 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4610 uint32_t stride, const char *function) const {
4611 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004612 const auto *cb_access_context = GetAccessContext(commandBuffer);
4613 assert(cb_access_context);
4614 if (!cb_access_context) return skip;
4615
4616 const auto *context = cb_access_context->GetCurrentAccessContext();
4617 assert(context);
4618 if (!context) return skip;
4619
locke-lunarg61870c22020-06-09 14:51:50 -06004620 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, function);
4621 skip |= cb_access_context->ValidateDrawSubpassAttachment(function);
John Zulauffaea0ee2021-01-14 14:01:32 -07004622 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndexedIndirectCommand), buffer,
4623 offset, maxDrawCount, stride, function);
4624 skip |= ValidateCountBuffer(*cb_access_context, *context, commandBuffer, countBuffer, countBufferOffset, function);
locke-lunargff255f92020-05-13 18:53:52 -06004625
4626 // TODO: For now, we validate the whole index and vertex buffer. It might cause some false positive.
4627 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
4628 // We will validate the index and vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004629 skip |= cb_access_context->ValidateDrawVertexIndex(UINT32_MAX, 0, function);
locke-lunargff255f92020-05-13 18:53:52 -06004630 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004631}
4632
4633bool SyncValidator::PreCallValidateCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4634 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4635 uint32_t maxDrawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004636 return ValidateCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4637 "vkCmdDrawIndexedIndirectCount");
locke-lunarge1a67022020-04-29 00:15:36 -06004638}
4639
sfricke-samsung85584a72021-09-30 21:43:38 -07004640void SyncValidator::RecordCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4641 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4642 uint32_t stride, CMD_TYPE cmd_type) {
locke-lunargff255f92020-05-13 18:53:52 -06004643 auto *cb_access_context = GetAccessContext(commandBuffer);
4644 assert(cb_access_context);
sfricke-samsung85584a72021-09-30 21:43:38 -07004645 const auto tag = cb_access_context->NextCommandTag(cmd_type);
locke-lunargff255f92020-05-13 18:53:52 -06004646 auto *context = cb_access_context->GetCurrentAccessContext();
4647 assert(context);
4648
locke-lunarg61870c22020-06-09 14:51:50 -06004649 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4650 cb_access_context->RecordDrawSubpassAttachment(tag);
4651 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndexedIndirectCommand), buffer, offset, 1, stride);
4652 RecordCountBuffer(*context, tag, countBuffer, countBufferOffset);
locke-lunargff255f92020-05-13 18:53:52 -06004653
4654 // TODO: For now, we record the whole index and vertex buffer. It might cause some false positive.
4655 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
locke-lunarg61870c22020-06-09 14:51:50 -06004656 // We will update the index and vertex buffer in SubmitQueue in the future.
4657 cb_access_context->RecordDrawVertexIndex(UINT32_MAX, 0, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004658}
4659
sfricke-samsung85584a72021-09-30 21:43:38 -07004660void SyncValidator::PreCallRecordCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4661 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4662 uint32_t maxDrawCount, uint32_t stride) {
4663 StateTracker::PreCallRecordCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
4664 maxDrawCount, stride);
4665 RecordCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4666 CMD_DRAWINDEXEDINDIRECTCOUNT);
4667}
4668
locke-lunarge1a67022020-04-29 00:15:36 -06004669bool SyncValidator::PreCallValidateCmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer,
4670 VkDeviceSize offset, VkBuffer countBuffer,
4671 VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4672 uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004673 return ValidateCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4674 "vkCmdDrawIndexedIndirectCountKHR");
locke-lunarge1a67022020-04-29 00:15:36 -06004675}
4676
4677void SyncValidator::PreCallRecordCmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4678 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4679 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004680 StateTracker::PreCallRecordCmdDrawIndexedIndirectCountKHR(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
4681 maxDrawCount, stride);
sfricke-samsung85584a72021-09-30 21:43:38 -07004682 RecordCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4683 CMD_DRAWINDEXEDINDIRECTCOUNTKHR);
locke-lunarge1a67022020-04-29 00:15:36 -06004684}
4685
4686bool SyncValidator::PreCallValidateCmdDrawIndexedIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer,
4687 VkDeviceSize offset, VkBuffer countBuffer,
4688 VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4689 uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004690 return ValidateCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4691 "vkCmdDrawIndexedIndirectCountAMD");
locke-lunarge1a67022020-04-29 00:15:36 -06004692}
4693
4694void SyncValidator::PreCallRecordCmdDrawIndexedIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4695 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4696 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004697 StateTracker::PreCallRecordCmdDrawIndexedIndirectCountAMD(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
4698 maxDrawCount, stride);
sfricke-samsung85584a72021-09-30 21:43:38 -07004699 RecordCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4700 CMD_DRAWINDEXEDINDIRECTCOUNTAMD);
locke-lunarge1a67022020-04-29 00:15:36 -06004701}
4702
4703bool SyncValidator::PreCallValidateCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
4704 const VkClearColorValue *pColor, uint32_t rangeCount,
4705 const VkImageSubresourceRange *pRanges) const {
4706 bool skip = false;
4707 const auto *cb_access_context = GetAccessContext(commandBuffer);
4708 assert(cb_access_context);
4709 if (!cb_access_context) return skip;
4710
4711 const auto *context = cb_access_context->GetCurrentAccessContext();
4712 assert(context);
4713 if (!context) return skip;
4714
4715 const auto *image_state = Get<IMAGE_STATE>(image);
4716
4717 for (uint32_t index = 0; index < rangeCount; index++) {
4718 const auto &range = pRanges[index];
4719 if (image_state) {
John Zulauf110413c2021-03-20 05:38:38 -06004720 auto hazard = context->DetectHazard(*image_state, SYNC_CLEAR_TRANSFER_WRITE, range);
locke-lunarge1a67022020-04-29 00:15:36 -06004721 if (hazard.hazard) {
4722 skip |= LogError(image, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004723 "vkCmdClearColorImage: Hazard %s for %s, range index %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06004724 string_SyncHazard(hazard.hazard), report_data->FormatHandle(image).c_str(), index,
John Zulauffaea0ee2021-01-14 14:01:32 -07004725 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06004726 }
4727 }
4728 }
4729 return skip;
4730}
4731
4732void SyncValidator::PreCallRecordCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
4733 const VkClearColorValue *pColor, uint32_t rangeCount,
4734 const VkImageSubresourceRange *pRanges) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004735 StateTracker::PreCallRecordCmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
locke-lunarge1a67022020-04-29 00:15:36 -06004736 auto *cb_access_context = GetAccessContext(commandBuffer);
4737 assert(cb_access_context);
4738 const auto tag = cb_access_context->NextCommandTag(CMD_CLEARCOLORIMAGE);
4739 auto *context = cb_access_context->GetCurrentAccessContext();
4740 assert(context);
4741
4742 const auto *image_state = Get<IMAGE_STATE>(image);
4743
4744 for (uint32_t index = 0; index < rangeCount; index++) {
4745 const auto &range = pRanges[index];
4746 if (image_state) {
John Zulauf110413c2021-03-20 05:38:38 -06004747 context->UpdateAccessState(*image_state, SYNC_CLEAR_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004748 }
4749 }
4750}
4751
4752bool SyncValidator::PreCallValidateCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image,
4753 VkImageLayout imageLayout,
4754 const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
4755 const VkImageSubresourceRange *pRanges) const {
4756 bool skip = false;
4757 const auto *cb_access_context = GetAccessContext(commandBuffer);
4758 assert(cb_access_context);
4759 if (!cb_access_context) return skip;
4760
4761 const auto *context = cb_access_context->GetCurrentAccessContext();
4762 assert(context);
4763 if (!context) return skip;
4764
4765 const auto *image_state = Get<IMAGE_STATE>(image);
4766
4767 for (uint32_t index = 0; index < rangeCount; index++) {
4768 const auto &range = pRanges[index];
4769 if (image_state) {
John Zulauf110413c2021-03-20 05:38:38 -06004770 auto hazard = context->DetectHazard(*image_state, SYNC_CLEAR_TRANSFER_WRITE, range);
locke-lunarge1a67022020-04-29 00:15:36 -06004771 if (hazard.hazard) {
4772 skip |= LogError(image, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004773 "vkCmdClearDepthStencilImage: Hazard %s for %s, range index %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06004774 string_SyncHazard(hazard.hazard), report_data->FormatHandle(image).c_str(), index,
John Zulauffaea0ee2021-01-14 14:01:32 -07004775 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06004776 }
4777 }
4778 }
4779 return skip;
4780}
4781
4782void SyncValidator::PreCallRecordCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
4783 const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
4784 const VkImageSubresourceRange *pRanges) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004785 StateTracker::PreCallRecordCmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
locke-lunarge1a67022020-04-29 00:15:36 -06004786 auto *cb_access_context = GetAccessContext(commandBuffer);
4787 assert(cb_access_context);
4788 const auto tag = cb_access_context->NextCommandTag(CMD_CLEARDEPTHSTENCILIMAGE);
4789 auto *context = cb_access_context->GetCurrentAccessContext();
4790 assert(context);
4791
4792 const auto *image_state = Get<IMAGE_STATE>(image);
4793
4794 for (uint32_t index = 0; index < rangeCount; index++) {
4795 const auto &range = pRanges[index];
4796 if (image_state) {
John Zulauf110413c2021-03-20 05:38:38 -06004797 context->UpdateAccessState(*image_state, SYNC_CLEAR_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004798 }
4799 }
4800}
4801
4802bool SyncValidator::PreCallValidateCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool,
4803 uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer,
4804 VkDeviceSize dstOffset, VkDeviceSize stride,
4805 VkQueryResultFlags flags) const {
4806 bool skip = false;
4807 const auto *cb_access_context = GetAccessContext(commandBuffer);
4808 assert(cb_access_context);
4809 if (!cb_access_context) return skip;
4810
4811 const auto *context = cb_access_context->GetCurrentAccessContext();
4812 assert(context);
4813 if (!context) return skip;
4814
4815 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
4816
4817 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004818 const ResourceAccessRange range = MakeRange(dstOffset, stride * queryCount);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004819 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
locke-lunarge1a67022020-04-29 00:15:36 -06004820 if (hazard.hazard) {
John Zulauf59e25072020-07-17 10:55:21 -06004821 skip |=
4822 LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
4823 "vkCmdCopyQueryPoolResults: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07004824 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06004825 }
4826 }
locke-lunargff255f92020-05-13 18:53:52 -06004827
4828 // TODO:Track VkQueryPool
locke-lunarge1a67022020-04-29 00:15:36 -06004829 return skip;
4830}
4831
4832void SyncValidator::PreCallRecordCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
4833 uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
4834 VkDeviceSize stride, VkQueryResultFlags flags) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004835 StateTracker::PreCallRecordCmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset,
4836 stride, flags);
locke-lunarge1a67022020-04-29 00:15:36 -06004837 auto *cb_access_context = GetAccessContext(commandBuffer);
4838 assert(cb_access_context);
locke-lunargff255f92020-05-13 18:53:52 -06004839 const auto tag = cb_access_context->NextCommandTag(CMD_COPYQUERYPOOLRESULTS);
locke-lunarge1a67022020-04-29 00:15:36 -06004840 auto *context = cb_access_context->GetCurrentAccessContext();
4841 assert(context);
4842
4843 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
4844
4845 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004846 const ResourceAccessRange range = MakeRange(dstOffset, stride * queryCount);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004847 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004848 }
locke-lunargff255f92020-05-13 18:53:52 -06004849
4850 // TODO:Track VkQueryPool
locke-lunarge1a67022020-04-29 00:15:36 -06004851}
4852
4853bool SyncValidator::PreCallValidateCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
4854 VkDeviceSize size, uint32_t data) const {
4855 bool skip = false;
4856 const auto *cb_access_context = GetAccessContext(commandBuffer);
4857 assert(cb_access_context);
4858 if (!cb_access_context) return skip;
4859
4860 const auto *context = cb_access_context->GetCurrentAccessContext();
4861 assert(context);
4862 if (!context) return skip;
4863
4864 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
4865
4866 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004867 const ResourceAccessRange range = MakeRange(*dst_buffer, dstOffset, size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004868 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
locke-lunarge1a67022020-04-29 00:15:36 -06004869 if (hazard.hazard) {
John Zulauf1dae9192020-06-16 15:46:44 -06004870 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004871 "vkCmdFillBuffer: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07004872 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06004873 }
4874 }
4875 return skip;
4876}
4877
4878void SyncValidator::PreCallRecordCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
4879 VkDeviceSize size, uint32_t data) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004880 StateTracker::PreCallRecordCmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
locke-lunarge1a67022020-04-29 00:15:36 -06004881 auto *cb_access_context = GetAccessContext(commandBuffer);
4882 assert(cb_access_context);
4883 const auto tag = cb_access_context->NextCommandTag(CMD_FILLBUFFER);
4884 auto *context = cb_access_context->GetCurrentAccessContext();
4885 assert(context);
4886
4887 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
4888
4889 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004890 const ResourceAccessRange range = MakeRange(*dst_buffer, dstOffset, size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004891 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004892 }
4893}
4894
4895bool SyncValidator::PreCallValidateCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4896 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
4897 const VkImageResolve *pRegions) const {
4898 bool skip = false;
4899 const auto *cb_access_context = GetAccessContext(commandBuffer);
4900 assert(cb_access_context);
4901 if (!cb_access_context) return skip;
4902
4903 const auto *context = cb_access_context->GetCurrentAccessContext();
4904 assert(context);
4905 if (!context) return skip;
4906
4907 const auto *src_image = Get<IMAGE_STATE>(srcImage);
4908 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
4909
4910 for (uint32_t region = 0; region < regionCount; region++) {
4911 const auto &resolve_region = pRegions[region];
4912 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004913 auto hazard = context->DetectHazard(*src_image, SYNC_RESOLVE_TRANSFER_READ, resolve_region.srcSubresource,
locke-lunarge1a67022020-04-29 00:15:36 -06004914 resolve_region.srcOffset, resolve_region.extent);
4915 if (hazard.hazard) {
4916 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004917 "vkCmdResolveImage: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06004918 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07004919 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06004920 }
4921 }
4922
4923 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004924 auto hazard = context->DetectHazard(*dst_image, SYNC_RESOLVE_TRANSFER_WRITE, resolve_region.dstSubresource,
locke-lunarge1a67022020-04-29 00:15:36 -06004925 resolve_region.dstOffset, resolve_region.extent);
4926 if (hazard.hazard) {
4927 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004928 "vkCmdResolveImage: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06004929 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07004930 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06004931 }
4932 if (skip) break;
4933 }
4934 }
4935
4936 return skip;
4937}
4938
4939void SyncValidator::PreCallRecordCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4940 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
4941 const VkImageResolve *pRegions) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004942 StateTracker::PreCallRecordCmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
4943 pRegions);
locke-lunarge1a67022020-04-29 00:15:36 -06004944 auto *cb_access_context = GetAccessContext(commandBuffer);
4945 assert(cb_access_context);
4946 const auto tag = cb_access_context->NextCommandTag(CMD_RESOLVEIMAGE);
4947 auto *context = cb_access_context->GetCurrentAccessContext();
4948 assert(context);
4949
4950 auto *src_image = Get<IMAGE_STATE>(srcImage);
4951 auto *dst_image = Get<IMAGE_STATE>(dstImage);
4952
4953 for (uint32_t region = 0; region < regionCount; region++) {
4954 const auto &resolve_region = pRegions[region];
4955 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004956 context->UpdateAccessState(*src_image, SYNC_RESOLVE_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07004957 resolve_region.srcSubresource, resolve_region.srcOffset, resolve_region.extent, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004958 }
4959 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004960 context->UpdateAccessState(*dst_image, SYNC_RESOLVE_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07004961 resolve_region.dstSubresource, resolve_region.dstOffset, resolve_region.extent, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004962 }
4963 }
4964}
4965
Jeff Leger178b1e52020-10-05 12:22:23 -04004966bool SyncValidator::PreCallValidateCmdResolveImage2KHR(VkCommandBuffer commandBuffer,
4967 const VkResolveImageInfo2KHR *pResolveImageInfo) const {
4968 bool skip = false;
4969 const auto *cb_access_context = GetAccessContext(commandBuffer);
4970 assert(cb_access_context);
4971 if (!cb_access_context) return skip;
4972
4973 const auto *context = cb_access_context->GetCurrentAccessContext();
4974 assert(context);
4975 if (!context) return skip;
4976
4977 const auto *src_image = Get<IMAGE_STATE>(pResolveImageInfo->srcImage);
4978 const auto *dst_image = Get<IMAGE_STATE>(pResolveImageInfo->dstImage);
4979
4980 for (uint32_t region = 0; region < pResolveImageInfo->regionCount; region++) {
4981 const auto &resolve_region = pResolveImageInfo->pRegions[region];
4982 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004983 auto hazard = context->DetectHazard(*src_image, SYNC_RESOLVE_TRANSFER_READ, resolve_region.srcSubresource,
Jeff Leger178b1e52020-10-05 12:22:23 -04004984 resolve_region.srcOffset, resolve_region.extent);
4985 if (hazard.hazard) {
4986 skip |= LogError(pResolveImageInfo->srcImage, string_SyncHazardVUID(hazard.hazard),
4987 "vkCmdResolveImage2KHR: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.",
4988 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pResolveImageInfo->srcImage).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07004989 region, cb_access_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04004990 }
4991 }
4992
4993 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004994 auto hazard = context->DetectHazard(*dst_image, SYNC_RESOLVE_TRANSFER_WRITE, resolve_region.dstSubresource,
Jeff Leger178b1e52020-10-05 12:22:23 -04004995 resolve_region.dstOffset, resolve_region.extent);
4996 if (hazard.hazard) {
4997 skip |= LogError(pResolveImageInfo->dstImage, string_SyncHazardVUID(hazard.hazard),
4998 "vkCmdResolveImage2KHR: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.",
4999 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pResolveImageInfo->dstImage).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07005000 region, cb_access_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04005001 }
5002 if (skip) break;
5003 }
5004 }
5005
5006 return skip;
5007}
5008
5009void SyncValidator::PreCallRecordCmdResolveImage2KHR(VkCommandBuffer commandBuffer,
5010 const VkResolveImageInfo2KHR *pResolveImageInfo) {
5011 StateTracker::PreCallRecordCmdResolveImage2KHR(commandBuffer, pResolveImageInfo);
5012 auto *cb_access_context = GetAccessContext(commandBuffer);
5013 assert(cb_access_context);
5014 const auto tag = cb_access_context->NextCommandTag(CMD_RESOLVEIMAGE2KHR);
5015 auto *context = cb_access_context->GetCurrentAccessContext();
5016 assert(context);
5017
5018 auto *src_image = Get<IMAGE_STATE>(pResolveImageInfo->srcImage);
5019 auto *dst_image = Get<IMAGE_STATE>(pResolveImageInfo->dstImage);
5020
5021 for (uint32_t region = 0; region < pResolveImageInfo->regionCount; region++) {
5022 const auto &resolve_region = pResolveImageInfo->pRegions[region];
5023 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07005024 context->UpdateAccessState(*src_image, SYNC_RESOLVE_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07005025 resolve_region.srcSubresource, resolve_region.srcOffset, resolve_region.extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04005026 }
5027 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07005028 context->UpdateAccessState(*dst_image, SYNC_RESOLVE_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07005029 resolve_region.dstSubresource, resolve_region.dstOffset, resolve_region.extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04005030 }
5031 }
5032}
5033
locke-lunarge1a67022020-04-29 00:15:36 -06005034bool SyncValidator::PreCallValidateCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
5035 VkDeviceSize dataSize, const void *pData) const {
5036 bool skip = false;
5037 const auto *cb_access_context = GetAccessContext(commandBuffer);
5038 assert(cb_access_context);
5039 if (!cb_access_context) return skip;
5040
5041 const auto *context = cb_access_context->GetCurrentAccessContext();
5042 assert(context);
5043 if (!context) return skip;
5044
5045 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
5046
5047 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005048 // VK_WHOLE_SIZE not allowed
5049 const ResourceAccessRange range = MakeRange(dstOffset, dataSize);
Jeremy Gebben40a22942020-12-22 14:22:06 -07005050 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
locke-lunarge1a67022020-04-29 00:15:36 -06005051 if (hazard.hazard) {
John Zulauf1dae9192020-06-16 15:46:44 -06005052 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06005053 "vkCmdUpdateBuffer: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07005054 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06005055 }
5056 }
5057 return skip;
5058}
5059
5060void SyncValidator::PreCallRecordCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
5061 VkDeviceSize dataSize, const void *pData) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005062 StateTracker::PreCallRecordCmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
locke-lunarge1a67022020-04-29 00:15:36 -06005063 auto *cb_access_context = GetAccessContext(commandBuffer);
5064 assert(cb_access_context);
5065 const auto tag = cb_access_context->NextCommandTag(CMD_UPDATEBUFFER);
5066 auto *context = cb_access_context->GetCurrentAccessContext();
5067 assert(context);
5068
5069 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
5070
5071 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005072 // VK_WHOLE_SIZE not allowed
5073 const ResourceAccessRange range = MakeRange(dstOffset, dataSize);
Jeremy Gebben40a22942020-12-22 14:22:06 -07005074 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005075 }
5076}
locke-lunargff255f92020-05-13 18:53:52 -06005077
5078bool SyncValidator::PreCallValidateCmdWriteBufferMarkerAMD(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
5079 VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) const {
5080 bool skip = false;
5081 const auto *cb_access_context = GetAccessContext(commandBuffer);
5082 assert(cb_access_context);
5083 if (!cb_access_context) return skip;
5084
5085 const auto *context = cb_access_context->GetCurrentAccessContext();
5086 assert(context);
5087 if (!context) return skip;
5088
5089 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
5090
5091 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005092 const ResourceAccessRange range = MakeRange(dstOffset, 4);
Jeremy Gebben40a22942020-12-22 14:22:06 -07005093 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
locke-lunargff255f92020-05-13 18:53:52 -06005094 if (hazard.hazard) {
John Zulauf59e25072020-07-17 10:55:21 -06005095 skip |=
5096 LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
5097 "vkCmdWriteBufferMarkerAMD: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07005098 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatUsage(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06005099 }
5100 }
5101 return skip;
5102}
5103
5104void SyncValidator::PreCallRecordCmdWriteBufferMarkerAMD(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
5105 VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005106 StateTracker::PreCallRecordCmdWriteBufferMarkerAMD(commandBuffer, pipelineStage, dstBuffer, dstOffset, marker);
locke-lunargff255f92020-05-13 18:53:52 -06005107 auto *cb_access_context = GetAccessContext(commandBuffer);
5108 assert(cb_access_context);
5109 const auto tag = cb_access_context->NextCommandTag(CMD_WRITEBUFFERMARKERAMD);
5110 auto *context = cb_access_context->GetCurrentAccessContext();
5111 assert(context);
5112
5113 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
5114
5115 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005116 const ResourceAccessRange range = MakeRange(dstOffset, 4);
Jeremy Gebben40a22942020-12-22 14:22:06 -07005117 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunargff255f92020-05-13 18:53:52 -06005118 }
5119}
John Zulauf49beb112020-11-04 16:06:31 -07005120
5121bool SyncValidator::PreCallValidateCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) const {
5122 bool skip = false;
5123 const auto *cb_context = GetAccessContext(commandBuffer);
5124 assert(cb_context);
5125 if (!cb_context) return skip;
5126
John Zulauf36ef9282021-02-02 11:47:24 -07005127 SyncOpSetEvent set_event_op(CMD_SETEVENT, *this, cb_context->GetQueueFlags(), event, stageMask);
John Zulauf6ce24372021-01-30 05:56:25 -07005128 return set_event_op.Validate(*cb_context);
John Zulauf49beb112020-11-04 16:06:31 -07005129}
5130
5131void SyncValidator::PostCallRecordCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
5132 StateTracker::PostCallRecordCmdSetEvent(commandBuffer, event, stageMask);
5133 auto *cb_context = GetAccessContext(commandBuffer);
5134 assert(cb_context);
5135 if (!cb_context) return;
John Zulauf36ef9282021-02-02 11:47:24 -07005136 SyncOpSetEvent set_event_op(CMD_SETEVENT, *this, cb_context->GetQueueFlags(), event, stageMask);
5137 set_event_op.Record(cb_context);
John Zulauf49beb112020-11-04 16:06:31 -07005138}
5139
John Zulauf4edde622021-02-15 08:54:50 -07005140bool SyncValidator::PreCallValidateCmdSetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event,
5141 const VkDependencyInfoKHR *pDependencyInfo) const {
5142 bool skip = false;
5143 const auto *cb_context = GetAccessContext(commandBuffer);
5144 assert(cb_context);
5145 if (!cb_context || !pDependencyInfo) return skip;
5146
5147 SyncOpSetEvent set_event_op(CMD_SETEVENT2KHR, *this, cb_context->GetQueueFlags(), event, *pDependencyInfo);
5148 return set_event_op.Validate(*cb_context);
5149}
5150
5151void SyncValidator::PostCallRecordCmdSetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event,
5152 const VkDependencyInfoKHR *pDependencyInfo) {
5153 StateTracker::PostCallRecordCmdSetEvent2KHR(commandBuffer, event, pDependencyInfo);
5154 auto *cb_context = GetAccessContext(commandBuffer);
5155 assert(cb_context);
5156 if (!cb_context || !pDependencyInfo) return;
5157
5158 SyncOpSetEvent set_event_op(CMD_SETEVENT2KHR, *this, cb_context->GetQueueFlags(), event, *pDependencyInfo);
5159 set_event_op.Record(cb_context);
5160}
5161
John Zulauf49beb112020-11-04 16:06:31 -07005162bool SyncValidator::PreCallValidateCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event,
5163 VkPipelineStageFlags stageMask) const {
5164 bool skip = false;
5165 const auto *cb_context = GetAccessContext(commandBuffer);
5166 assert(cb_context);
5167 if (!cb_context) return skip;
5168
John Zulauf36ef9282021-02-02 11:47:24 -07005169 SyncOpResetEvent reset_event_op(CMD_RESETEVENT, *this, cb_context->GetQueueFlags(), event, stageMask);
John Zulauf6ce24372021-01-30 05:56:25 -07005170 return reset_event_op.Validate(*cb_context);
John Zulauf49beb112020-11-04 16:06:31 -07005171}
5172
5173void SyncValidator::PostCallRecordCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
5174 StateTracker::PostCallRecordCmdResetEvent(commandBuffer, event, stageMask);
5175 auto *cb_context = GetAccessContext(commandBuffer);
5176 assert(cb_context);
5177 if (!cb_context) return;
5178
John Zulauf36ef9282021-02-02 11:47:24 -07005179 SyncOpResetEvent reset_event_op(CMD_RESETEVENT, *this, cb_context->GetQueueFlags(), event, stageMask);
5180 reset_event_op.Record(cb_context);
John Zulauf49beb112020-11-04 16:06:31 -07005181}
5182
John Zulauf4edde622021-02-15 08:54:50 -07005183bool SyncValidator::PreCallValidateCmdResetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event,
5184 VkPipelineStageFlags2KHR stageMask) const {
5185 bool skip = false;
5186 const auto *cb_context = GetAccessContext(commandBuffer);
5187 assert(cb_context);
5188 if (!cb_context) return skip;
5189
5190 SyncOpResetEvent reset_event_op(CMD_RESETEVENT2KHR, *this, cb_context->GetQueueFlags(), event, stageMask);
5191 return reset_event_op.Validate(*cb_context);
5192}
5193
5194void SyncValidator::PostCallRecordCmdResetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event,
5195 VkPipelineStageFlags2KHR stageMask) {
5196 StateTracker::PostCallRecordCmdResetEvent2KHR(commandBuffer, event, stageMask);
5197 auto *cb_context = GetAccessContext(commandBuffer);
5198 assert(cb_context);
5199 if (!cb_context) return;
5200
5201 SyncOpResetEvent reset_event_op(CMD_RESETEVENT2KHR, *this, cb_context->GetQueueFlags(), event, stageMask);
5202 reset_event_op.Record(cb_context);
5203}
5204
John Zulauf49beb112020-11-04 16:06:31 -07005205bool SyncValidator::PreCallValidateCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
5206 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
5207 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
5208 uint32_t bufferMemoryBarrierCount,
5209 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
5210 uint32_t imageMemoryBarrierCount,
5211 const VkImageMemoryBarrier *pImageMemoryBarriers) const {
5212 bool skip = false;
5213 const auto *cb_context = GetAccessContext(commandBuffer);
5214 assert(cb_context);
5215 if (!cb_context) return skip;
5216
John Zulauf36ef9282021-02-02 11:47:24 -07005217 SyncOpWaitEvents wait_events_op(CMD_WAITEVENTS, *this, cb_context->GetQueueFlags(), eventCount, pEvents, srcStageMask,
5218 dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
5219 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
John Zulaufd5115702021-01-18 12:34:33 -07005220 return wait_events_op.Validate(*cb_context);
John Zulauf49beb112020-11-04 16:06:31 -07005221}
5222
5223void SyncValidator::PostCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
5224 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
5225 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
5226 uint32_t bufferMemoryBarrierCount,
5227 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
5228 uint32_t imageMemoryBarrierCount,
5229 const VkImageMemoryBarrier *pImageMemoryBarriers) {
5230 StateTracker::PostCallRecordCmdWaitEvents(commandBuffer, eventCount, pEvents, srcStageMask, dstStageMask, memoryBarrierCount,
5231 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
5232 imageMemoryBarrierCount, pImageMemoryBarriers);
5233
5234 auto *cb_context = GetAccessContext(commandBuffer);
5235 assert(cb_context);
5236 if (!cb_context) return;
5237
John Zulauf36ef9282021-02-02 11:47:24 -07005238 SyncOpWaitEvents wait_events_op(CMD_WAITEVENTS, *this, cb_context->GetQueueFlags(), eventCount, pEvents, srcStageMask,
5239 dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
5240 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
John Zulauf8eda1562021-04-13 17:06:41 -06005241 wait_events_op.Record(cb_context);
5242 return;
John Zulauf4a6105a2020-11-17 15:11:05 -07005243}
5244
John Zulauf4edde622021-02-15 08:54:50 -07005245bool SyncValidator::PreCallValidateCmdWaitEvents2KHR(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
5246 const VkDependencyInfoKHR *pDependencyInfos) const {
5247 bool skip = false;
5248 const auto *cb_context = GetAccessContext(commandBuffer);
5249 assert(cb_context);
5250 if (!cb_context) return skip;
5251
5252 SyncOpWaitEvents wait_events_op(CMD_WAITEVENTS2KHR, *this, cb_context->GetQueueFlags(), eventCount, pEvents, pDependencyInfos);
5253 skip |= wait_events_op.Validate(*cb_context);
5254 return skip;
5255}
5256
5257void SyncValidator::PostCallRecordCmdWaitEvents2KHR(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
5258 const VkDependencyInfoKHR *pDependencyInfos) {
5259 StateTracker::PostCallRecordCmdWaitEvents2KHR(commandBuffer, eventCount, pEvents, pDependencyInfos);
5260
5261 auto *cb_context = GetAccessContext(commandBuffer);
5262 assert(cb_context);
5263 if (!cb_context) return;
5264
5265 SyncOpWaitEvents wait_events_op(CMD_WAITEVENTS2KHR, *this, cb_context->GetQueueFlags(), eventCount, pEvents, pDependencyInfos);
5266 wait_events_op.Record(cb_context);
5267}
5268
John Zulauf4a6105a2020-11-17 15:11:05 -07005269void SyncEventState::ResetFirstScope() {
5270 for (const auto address_type : kAddressTypes) {
5271 first_scope[static_cast<size_t>(address_type)].clear();
5272 }
Jeremy Gebben9893daf2021-01-04 10:40:50 -07005273 scope = SyncExecScope();
John Zulauf4a6105a2020-11-17 15:11:05 -07005274}
5275
5276// Keep the "ignore this event" logic in same place for ValidateWait and RecordWait to use
John Zulauf4edde622021-02-15 08:54:50 -07005277SyncEventState::IgnoreReason SyncEventState::IsIgnoredByWait(CMD_TYPE cmd, VkPipelineStageFlags2KHR srcStageMask) const {
John Zulauf4a6105a2020-11-17 15:11:05 -07005278 IgnoreReason reason = NotIgnored;
5279
John Zulauf4edde622021-02-15 08:54:50 -07005280 if ((CMD_WAITEVENTS2KHR == cmd) && (CMD_SETEVENT == last_command)) {
5281 reason = SetVsWait2;
5282 } else if ((last_command == CMD_RESETEVENT || last_command == CMD_RESETEVENT2KHR) && !HasBarrier(0U, 0U)) {
5283 reason = (last_command == CMD_RESETEVENT) ? ResetWaitRace : Reset2WaitRace;
John Zulauf4a6105a2020-11-17 15:11:05 -07005284 } else if (unsynchronized_set) {
5285 reason = SetRace;
5286 } else {
Jeremy Gebben40a22942020-12-22 14:22:06 -07005287 const VkPipelineStageFlags2KHR missing_bits = scope.mask_param & ~srcStageMask;
John Zulauf4a6105a2020-11-17 15:11:05 -07005288 if (missing_bits) reason = MissingStageBits;
5289 }
5290
5291 return reason;
5292}
5293
Jeremy Gebben40a22942020-12-22 14:22:06 -07005294bool SyncEventState::HasBarrier(VkPipelineStageFlags2KHR stageMask, VkPipelineStageFlags2KHR exec_scope_arg) const {
John Zulauf4a6105a2020-11-17 15:11:05 -07005295 bool has_barrier = (last_command == CMD_NONE) || (stageMask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) ||
5296 (barriers & exec_scope_arg) || (barriers & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
5297 return has_barrier;
John Zulauf49beb112020-11-04 16:06:31 -07005298}
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005299
John Zulauf36ef9282021-02-02 11:47:24 -07005300SyncOpBarriers::SyncOpBarriers(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags,
5301 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
5302 VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount,
John Zulaufd5115702021-01-18 12:34:33 -07005303 const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount,
5304 const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount,
5305 const VkImageMemoryBarrier *pImageMemoryBarriers)
John Zulauf4edde622021-02-15 08:54:50 -07005306 : SyncOpBase(cmd), barriers_(1) {
5307 auto &barrier_set = barriers_[0];
5308 barrier_set.dependency_flags = dependencyFlags;
5309 barrier_set.src_exec_scope = SyncExecScope::MakeSrc(queue_flags, srcStageMask);
5310 barrier_set.dst_exec_scope = SyncExecScope::MakeDst(queue_flags, dstStageMask);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005311 // Translate the API parameters into structures SyncVal understands directly, and dehandle for safer/faster replay.
John Zulauf4edde622021-02-15 08:54:50 -07005312 barrier_set.MakeMemoryBarriers(barrier_set.src_exec_scope, barrier_set.dst_exec_scope, dependencyFlags, memoryBarrierCount,
5313 pMemoryBarriers);
5314 barrier_set.MakeBufferMemoryBarriers(sync_state, barrier_set.src_exec_scope, barrier_set.dst_exec_scope, dependencyFlags,
5315 bufferMemoryBarrierCount, pBufferMemoryBarriers);
5316 barrier_set.MakeImageMemoryBarriers(sync_state, barrier_set.src_exec_scope, barrier_set.dst_exec_scope, dependencyFlags,
5317 imageMemoryBarrierCount, pImageMemoryBarriers);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005318}
5319
John Zulauf4edde622021-02-15 08:54:50 -07005320SyncOpBarriers::SyncOpBarriers(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags, uint32_t event_count,
5321 const VkDependencyInfoKHR *dep_infos)
5322 : SyncOpBase(cmd), barriers_(event_count) {
5323 for (uint32_t i = 0; i < event_count; i++) {
5324 const auto &dep_info = dep_infos[i];
5325 auto &barrier_set = barriers_[i];
5326 barrier_set.dependency_flags = dep_info.dependencyFlags;
5327 auto stage_masks = sync_utils::GetGlobalStageMasks(dep_info);
5328 barrier_set.src_exec_scope = SyncExecScope::MakeSrc(queue_flags, stage_masks.src);
5329 barrier_set.dst_exec_scope = SyncExecScope::MakeDst(queue_flags, stage_masks.dst);
5330 // Translate the API parameters into structures SyncVal understands directly, and dehandle for safer/faster replay.
5331 barrier_set.MakeMemoryBarriers(queue_flags, dep_info.dependencyFlags, dep_info.memoryBarrierCount,
5332 dep_info.pMemoryBarriers);
5333 barrier_set.MakeBufferMemoryBarriers(sync_state, queue_flags, dep_info.dependencyFlags, dep_info.bufferMemoryBarrierCount,
5334 dep_info.pBufferMemoryBarriers);
5335 barrier_set.MakeImageMemoryBarriers(sync_state, queue_flags, dep_info.dependencyFlags, dep_info.imageMemoryBarrierCount,
5336 dep_info.pImageMemoryBarriers);
5337 }
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005338}
5339
John Zulauf36ef9282021-02-02 11:47:24 -07005340SyncOpPipelineBarrier::SyncOpPipelineBarrier(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags,
John Zulaufd5115702021-01-18 12:34:33 -07005341 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
5342 VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount,
5343 const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount,
5344 const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount,
5345 const VkImageMemoryBarrier *pImageMemoryBarriers)
John Zulauf36ef9282021-02-02 11:47:24 -07005346 : SyncOpBarriers(cmd, sync_state, queue_flags, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers,
John Zulaufd5115702021-01-18 12:34:33 -07005347 bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers) {}
5348
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005349SyncOpPipelineBarrier::SyncOpPipelineBarrier(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags,
5350 const VkDependencyInfoKHR &dep_info)
John Zulauf4edde622021-02-15 08:54:50 -07005351 : SyncOpBarriers(cmd, sync_state, queue_flags, 1, &dep_info) {}
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005352
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005353bool SyncOpPipelineBarrier::Validate(const CommandBufferAccessContext &cb_context) const {
5354 bool skip = false;
5355 const auto *context = cb_context.GetCurrentAccessContext();
5356 assert(context);
5357 if (!context) return skip;
John Zulauf6fdf3d02021-03-05 16:50:47 -07005358 assert(barriers_.size() == 1); // PipelineBarriers only support a single barrier set.
5359
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005360 // Validate Image Layout transitions
John Zulauf6fdf3d02021-03-05 16:50:47 -07005361 const auto &barrier_set = barriers_[0];
5362 for (const auto &image_barrier : barrier_set.image_memory_barriers) {
5363 if (image_barrier.new_layout == image_barrier.old_layout) continue; // Only interested in layout transitions at this point.
5364 const auto *image_state = image_barrier.image.get();
5365 if (!image_state) continue;
5366 const auto hazard = context->DetectImageBarrierHazard(image_barrier);
5367 if (hazard.hazard) {
5368 // PHASE1 TODO -- add tag information to log msg when useful.
5369 const auto &sync_state = cb_context.GetSyncState();
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06005370 const auto image_handle = image_state->image();
John Zulauf6fdf3d02021-03-05 16:50:47 -07005371 skip |= sync_state.LogError(image_handle, string_SyncHazardVUID(hazard.hazard),
5372 "%s: Hazard %s for image barrier %" PRIu32 " %s. Access info %s.", CmdName(),
5373 string_SyncHazard(hazard.hazard), image_barrier.index,
5374 sync_state.report_data->FormatHandle(image_handle).c_str(),
5375 cb_context.FormatUsage(hazard).c_str());
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005376 }
5377 }
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005378 return skip;
5379}
5380
John Zulaufd5115702021-01-18 12:34:33 -07005381struct SyncOpPipelineBarrierFunctorFactory {
5382 using BarrierOpFunctor = PipelineBarrierOp;
5383 using ApplyFunctor = ApplyBarrierFunctor<BarrierOpFunctor>;
5384 using GlobalBarrierOpFunctor = PipelineBarrierOp;
5385 using GlobalApplyFunctor = ApplyBarrierOpsFunctor<GlobalBarrierOpFunctor>;
5386 using BufferRange = ResourceAccessRange;
5387 using ImageRange = subresource_adapter::ImageRangeGenerator;
5388 using GlobalRange = ResourceAccessRange;
5389
5390 ApplyFunctor MakeApplyFunctor(const SyncBarrier &barrier, bool layout_transition) const {
5391 return ApplyFunctor(BarrierOpFunctor(barrier, layout_transition));
5392 }
John Zulauf14940722021-04-12 15:19:02 -06005393 GlobalApplyFunctor MakeGlobalApplyFunctor(size_t size_hint, ResourceUsageTag tag) const {
John Zulaufd5115702021-01-18 12:34:33 -07005394 return GlobalApplyFunctor(true /* resolve */, size_hint, tag);
5395 }
5396 GlobalBarrierOpFunctor MakeGlobalBarrierOpFunctor(const SyncBarrier &barrier) const {
5397 return GlobalBarrierOpFunctor(barrier, false);
5398 }
5399
5400 BufferRange MakeRangeGen(const BUFFER_STATE &buffer, const ResourceAccessRange &range) const {
5401 if (!SimpleBinding(buffer)) return ResourceAccessRange();
5402 const auto base_address = ResourceBaseAddress(buffer);
5403 return (range + base_address);
5404 }
John Zulauf110413c2021-03-20 05:38:38 -06005405 ImageRange MakeRangeGen(const IMAGE_STATE &image, const VkImageSubresourceRange &subresource_range) const {
John Zulauf264cce02021-02-05 14:40:47 -07005406 if (!SimpleBinding(image)) return subresource_adapter::ImageRangeGenerator();
John Zulaufd5115702021-01-18 12:34:33 -07005407
5408 const auto base_address = ResourceBaseAddress(image);
John Zulauf110413c2021-03-20 05:38:38 -06005409 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, base_address);
John Zulaufd5115702021-01-18 12:34:33 -07005410 return range_gen;
5411 }
5412 GlobalRange MakeGlobalRangeGen(AccessAddressType) const { return kFullRange; }
5413};
5414
5415template <typename Barriers, typename FunctorFactory>
John Zulauf14940722021-04-12 15:19:02 -06005416void SyncOpBarriers::ApplyBarriers(const Barriers &barriers, const FunctorFactory &factory, const ResourceUsageTag tag,
John Zulaufd5115702021-01-18 12:34:33 -07005417 AccessContext *context) {
5418 for (const auto &barrier : barriers) {
5419 const auto *state = barrier.GetState();
5420 if (state) {
5421 auto *const accesses = &context->GetAccessStateMap(GetAccessAddressType(*state));
5422 auto update_action = factory.MakeApplyFunctor(barrier.barrier, barrier.IsLayoutTransition());
5423 auto range_gen = factory.MakeRangeGen(*state, barrier.Range());
5424 UpdateMemoryAccessState(accesses, update_action, &range_gen);
5425 }
5426 }
5427}
5428
5429template <typename Barriers, typename FunctorFactory>
John Zulauf14940722021-04-12 15:19:02 -06005430void SyncOpBarriers::ApplyGlobalBarriers(const Barriers &barriers, const FunctorFactory &factory, const ResourceUsageTag tag,
John Zulaufd5115702021-01-18 12:34:33 -07005431 AccessContext *access_context) {
5432 auto barriers_functor = factory.MakeGlobalApplyFunctor(barriers.size(), tag);
5433 for (const auto &barrier : barriers) {
5434 barriers_functor.EmplaceBack(factory.MakeGlobalBarrierOpFunctor(barrier));
5435 }
5436 for (const auto address_type : kAddressTypes) {
5437 auto range_gen = factory.MakeGlobalRangeGen(address_type);
5438 UpdateMemoryAccessState(&(access_context->GetAccessStateMap(address_type)), barriers_functor, &range_gen);
5439 }
5440}
5441
John Zulauf8eda1562021-04-13 17:06:41 -06005442ResourceUsageTag SyncOpPipelineBarrier::Record(CommandBufferAccessContext *cb_context) const {
John Zulaufd5115702021-01-18 12:34:33 -07005443 auto *access_context = cb_context->GetCurrentAccessContext();
John Zulauf8eda1562021-04-13 17:06:41 -06005444 auto *events_context = cb_context->GetCurrentEventsContext();
John Zulauf36ef9282021-02-02 11:47:24 -07005445 const auto tag = cb_context->NextCommandTag(cmd_);
John Zulauf4fa68462021-04-26 21:04:22 -06005446 DoRecord(tag, access_context, events_context);
5447 return tag;
5448}
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005449
John Zulauf4fa68462021-04-26 21:04:22 -06005450void SyncOpPipelineBarrier::DoRecord(const ResourceUsageTag tag, AccessContext *access_context,
5451 SyncEventsContext *events_context) const {
John Zulauf8eda1562021-04-13 17:06:41 -06005452 SyncOpPipelineBarrierFunctorFactory factory;
John Zulauf4edde622021-02-15 08:54:50 -07005453 // Pipeline barriers only have a single barrier set, unlike WaitEvents2
5454 assert(barriers_.size() == 1);
5455 const auto &barrier_set = barriers_[0];
5456 ApplyBarriers(barrier_set.buffer_memory_barriers, factory, tag, access_context);
5457 ApplyBarriers(barrier_set.image_memory_barriers, factory, tag, access_context);
5458 ApplyGlobalBarriers(barrier_set.memory_barriers, factory, tag, access_context);
John Zulauf4edde622021-02-15 08:54:50 -07005459 if (barrier_set.single_exec_scope) {
John Zulauf8eda1562021-04-13 17:06:41 -06005460 events_context->ApplyBarrier(barrier_set.src_exec_scope, barrier_set.dst_exec_scope);
John Zulauf4edde622021-02-15 08:54:50 -07005461 } else {
5462 for (const auto &barrier : barrier_set.memory_barriers) {
John Zulauf8eda1562021-04-13 17:06:41 -06005463 events_context->ApplyBarrier(barrier.src_exec_scope, barrier.dst_exec_scope);
John Zulauf4edde622021-02-15 08:54:50 -07005464 }
5465 }
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005466}
5467
John Zulauf8eda1562021-04-13 17:06:41 -06005468bool SyncOpPipelineBarrier::ReplayValidate(ResourceUsageTag recorded_tag, const CommandBufferAccessContext &recorded_context,
5469 CommandBufferAccessContext *active_context) const {
John Zulauf4fa68462021-04-26 21:04:22 -06005470 // No Validation for replay, as the layout transition accesses are checked directly, and the src*Mask ordering is captured
5471 // with first access information.
John Zulauf8eda1562021-04-13 17:06:41 -06005472 return false;
5473}
5474
John Zulauf8eda1562021-04-13 17:06:41 -06005475
John Zulauf4edde622021-02-15 08:54:50 -07005476void SyncOpBarriers::BarrierSet::MakeMemoryBarriers(const SyncExecScope &src, const SyncExecScope &dst,
5477 VkDependencyFlags dependency_flags, uint32_t memory_barrier_count,
5478 const VkMemoryBarrier *barriers) {
5479 memory_barriers.reserve(std::max<uint32_t>(1, memory_barrier_count));
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005480 for (uint32_t barrier_index = 0; barrier_index < memory_barrier_count; barrier_index++) {
John Zulauf4edde622021-02-15 08:54:50 -07005481 const auto &barrier = barriers[barrier_index];
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005482 SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07005483 memory_barriers.emplace_back(sync_barrier);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005484 }
5485 if (0 == memory_barrier_count) {
5486 // If there are no global memory barriers, force an exec barrier
John Zulauf4edde622021-02-15 08:54:50 -07005487 memory_barriers.emplace_back(SyncBarrier(src, dst));
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005488 }
John Zulauf4edde622021-02-15 08:54:50 -07005489 single_exec_scope = true;
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005490}
5491
John Zulauf4edde622021-02-15 08:54:50 -07005492void SyncOpBarriers::BarrierSet::MakeBufferMemoryBarriers(const SyncValidator &sync_state, const SyncExecScope &src,
5493 const SyncExecScope &dst, VkDependencyFlags dependencyFlags,
5494 uint32_t barrier_count, const VkBufferMemoryBarrier *barriers) {
5495 buffer_memory_barriers.reserve(barrier_count);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005496 for (uint32_t index = 0; index < barrier_count; index++) {
5497 const auto &barrier = barriers[index];
5498 auto buffer = sync_state.GetShared<BUFFER_STATE>(barrier.buffer);
5499 if (buffer) {
5500 const auto barrier_size = GetBufferWholeSize(*buffer, barrier.offset, barrier.size);
5501 const auto range = MakeRange(barrier.offset, barrier_size);
5502 const SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07005503 buffer_memory_barriers.emplace_back(buffer, sync_barrier, range);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005504 } else {
John Zulauf4edde622021-02-15 08:54:50 -07005505 buffer_memory_barriers.emplace_back();
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005506 }
5507 }
5508}
5509
John Zulauf4edde622021-02-15 08:54:50 -07005510void SyncOpBarriers::BarrierSet::MakeMemoryBarriers(VkQueueFlags queue_flags, VkDependencyFlags dependency_flags,
5511 uint32_t memory_barrier_count, const VkMemoryBarrier2KHR *barriers) {
5512 memory_barriers.reserve(memory_barrier_count);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005513 for (uint32_t barrier_index = 0; barrier_index < memory_barrier_count; barrier_index++) {
John Zulauf4edde622021-02-15 08:54:50 -07005514 const auto &barrier = barriers[barrier_index];
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005515 auto src = SyncExecScope::MakeSrc(queue_flags, barrier.srcStageMask);
5516 auto dst = SyncExecScope::MakeDst(queue_flags, barrier.dstStageMask);
5517 SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07005518 memory_barriers.emplace_back(sync_barrier);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005519 }
John Zulauf4edde622021-02-15 08:54:50 -07005520 single_exec_scope = false;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005521}
5522
John Zulauf4edde622021-02-15 08:54:50 -07005523void SyncOpBarriers::BarrierSet::MakeBufferMemoryBarriers(const SyncValidator &sync_state, VkQueueFlags queue_flags,
5524 VkDependencyFlags dependencyFlags, uint32_t barrier_count,
5525 const VkBufferMemoryBarrier2KHR *barriers) {
5526 buffer_memory_barriers.reserve(barrier_count);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005527 for (uint32_t index = 0; index < barrier_count; index++) {
5528 const auto &barrier = barriers[index];
5529 auto src = SyncExecScope::MakeSrc(queue_flags, barrier.srcStageMask);
5530 auto dst = SyncExecScope::MakeDst(queue_flags, barrier.dstStageMask);
5531 auto buffer = sync_state.GetShared<BUFFER_STATE>(barrier.buffer);
5532 if (buffer) {
5533 const auto barrier_size = GetBufferWholeSize(*buffer, barrier.offset, barrier.size);
5534 const auto range = MakeRange(barrier.offset, barrier_size);
5535 const SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07005536 buffer_memory_barriers.emplace_back(buffer, sync_barrier, range);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005537 } else {
John Zulauf4edde622021-02-15 08:54:50 -07005538 buffer_memory_barriers.emplace_back();
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005539 }
5540 }
5541}
5542
John Zulauf4edde622021-02-15 08:54:50 -07005543void SyncOpBarriers::BarrierSet::MakeImageMemoryBarriers(const SyncValidator &sync_state, const SyncExecScope &src,
5544 const SyncExecScope &dst, VkDependencyFlags dependencyFlags,
5545 uint32_t barrier_count, const VkImageMemoryBarrier *barriers) {
5546 image_memory_barriers.reserve(barrier_count);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005547 for (uint32_t index = 0; index < barrier_count; index++) {
5548 const auto &barrier = barriers[index];
5549 const auto image = sync_state.GetShared<IMAGE_STATE>(barrier.image);
5550 if (image) {
5551 auto subresource_range = NormalizeSubresourceRange(image->createInfo, barrier.subresourceRange);
5552 const SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07005553 image_memory_barriers.emplace_back(image, index, sync_barrier, barrier.oldLayout, barrier.newLayout, subresource_range);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005554 } else {
John Zulauf4edde622021-02-15 08:54:50 -07005555 image_memory_barriers.emplace_back();
5556 image_memory_barriers.back().index = index; // Just in case we're interested in the ones we skipped.
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005557 }
5558 }
5559}
John Zulaufd5115702021-01-18 12:34:33 -07005560
John Zulauf4edde622021-02-15 08:54:50 -07005561void SyncOpBarriers::BarrierSet::MakeImageMemoryBarriers(const SyncValidator &sync_state, VkQueueFlags queue_flags,
5562 VkDependencyFlags dependencyFlags, uint32_t barrier_count,
5563 const VkImageMemoryBarrier2KHR *barriers) {
5564 image_memory_barriers.reserve(barrier_count);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005565 for (uint32_t index = 0; index < barrier_count; index++) {
5566 const auto &barrier = barriers[index];
5567 auto src = SyncExecScope::MakeSrc(queue_flags, barrier.srcStageMask);
5568 auto dst = SyncExecScope::MakeDst(queue_flags, barrier.dstStageMask);
5569 const auto image = sync_state.GetShared<IMAGE_STATE>(barrier.image);
5570 if (image) {
5571 auto subresource_range = NormalizeSubresourceRange(image->createInfo, barrier.subresourceRange);
5572 const SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07005573 image_memory_barriers.emplace_back(image, index, sync_barrier, barrier.oldLayout, barrier.newLayout, subresource_range);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005574 } else {
John Zulauf4edde622021-02-15 08:54:50 -07005575 image_memory_barriers.emplace_back();
5576 image_memory_barriers.back().index = index; // Just in case we're interested in the ones we skipped.
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005577 }
5578 }
5579}
5580
John Zulauf36ef9282021-02-02 11:47:24 -07005581SyncOpWaitEvents::SyncOpWaitEvents(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags, uint32_t eventCount,
John Zulaufd5115702021-01-18 12:34:33 -07005582 const VkEvent *pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
5583 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
5584 uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
5585 uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers)
John Zulauf36ef9282021-02-02 11:47:24 -07005586 : SyncOpBarriers(cmd, sync_state, queue_flags, srcStageMask, dstStageMask, VkDependencyFlags(0U), memoryBarrierCount,
John Zulaufd5115702021-01-18 12:34:33 -07005587 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
5588 pImageMemoryBarriers) {
John Zulauf669dfd52021-01-27 17:15:28 -07005589 MakeEventsList(sync_state, eventCount, pEvents);
John Zulaufd5115702021-01-18 12:34:33 -07005590}
5591
John Zulauf4edde622021-02-15 08:54:50 -07005592SyncOpWaitEvents::SyncOpWaitEvents(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags, uint32_t eventCount,
5593 const VkEvent *pEvents, const VkDependencyInfoKHR *pDependencyInfo)
5594 : SyncOpBarriers(cmd, sync_state, queue_flags, eventCount, pDependencyInfo) {
5595 MakeEventsList(sync_state, eventCount, pEvents);
5596 assert(events_.size() == barriers_.size()); // Just so nobody gets clever and decides to cull the event or barrier arrays
5597}
5598
John Zulaufd5115702021-01-18 12:34:33 -07005599bool SyncOpWaitEvents::Validate(const CommandBufferAccessContext &cb_context) const {
John Zulaufd5115702021-01-18 12:34:33 -07005600 const char *const ignored = "Wait operation is ignored for this event.";
5601 bool skip = false;
5602 const auto &sync_state = cb_context.GetSyncState();
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06005603 const auto command_buffer_handle = cb_context.GetCBState().commandBuffer();
John Zulaufd5115702021-01-18 12:34:33 -07005604
John Zulauf4edde622021-02-15 08:54:50 -07005605 for (size_t barrier_set_index = 0; barrier_set_index < barriers_.size(); barrier_set_index++) {
5606 const auto &barrier_set = barriers_[barrier_set_index];
5607 if (barrier_set.single_exec_scope) {
5608 if (barrier_set.src_exec_scope.mask_param & VK_PIPELINE_STAGE_HOST_BIT) {
5609 const std::string vuid = std::string("SYNC-") + std::string(CmdName()) + std::string("-hostevent-unsupported");
5610 skip = sync_state.LogInfo(command_buffer_handle, vuid,
5611 "%s, srcStageMask includes %s, unsupported by synchronization validation.", CmdName(),
5612 string_VkPipelineStageFlagBits(VK_PIPELINE_STAGE_HOST_BIT));
5613 } else {
5614 const auto &barriers = barrier_set.memory_barriers;
5615 for (size_t barrier_index = 0; barrier_index < barriers.size(); barrier_index++) {
5616 const auto &barrier = barriers[barrier_index];
5617 if (barrier.src_exec_scope.mask_param & VK_PIPELINE_STAGE_HOST_BIT) {
5618 const std::string vuid =
5619 std::string("SYNC-") + std::string(CmdName()) + std::string("-hostevent-unsupported");
5620 skip =
5621 sync_state.LogInfo(command_buffer_handle, vuid,
5622 "%s, srcStageMask %s of %s %zu, %s %zu, unsupported by synchronization validation.",
5623 CmdName(), string_VkPipelineStageFlagBits(VK_PIPELINE_STAGE_HOST_BIT),
5624 "pDependencyInfo", barrier_set_index, "pMemoryBarriers", barrier_index);
5625 }
5626 }
5627 }
5628 }
John Zulaufd5115702021-01-18 12:34:33 -07005629 }
5630
Jeremy Gebben40a22942020-12-22 14:22:06 -07005631 VkPipelineStageFlags2KHR event_stage_masks = 0U;
John Zulauf4edde622021-02-15 08:54:50 -07005632 VkPipelineStageFlags2KHR barrier_mask_params = 0U;
John Zulaufd5115702021-01-18 12:34:33 -07005633 bool events_not_found = false;
John Zulauf669dfd52021-01-27 17:15:28 -07005634 const auto *events_context = cb_context.GetCurrentEventsContext();
5635 assert(events_context);
John Zulauf4edde622021-02-15 08:54:50 -07005636 size_t barrier_set_index = 0;
5637 size_t barrier_set_incr = (barriers_.size() == 1) ? 0 : 1;
5638 for (size_t event_index = 0; event_index < events_.size(); event_index++)
5639 for (const auto &event : events_) {
5640 const auto *sync_event = events_context->Get(event.get());
5641 const auto &barrier_set = barriers_[barrier_set_index];
5642 if (!sync_event) {
5643 // NOTE PHASE2: This is where we'll need queue submit time validation to come back and check the srcStageMask bits
5644 // or solve this with replay creating the SyncEventState in the queue context... also this will be a
5645 // new validation error... wait without previously submitted set event...
5646 events_not_found = true; // Demote "extra_stage_bits" error to warning, to avoid false positives at *record time*
5647 barrier_set_index += barrier_set_incr;
5648 continue; // Core, Lifetimes, or Param check needs to catch invalid events.
John Zulaufd5115702021-01-18 12:34:33 -07005649 }
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06005650 const auto event_handle = sync_event->event->event();
John Zulauf4edde622021-02-15 08:54:50 -07005651 // TODO add "destroyed" checks
5652
5653 barrier_mask_params |= barrier_set.src_exec_scope.mask_param;
5654 const auto &src_exec_scope = barrier_set.src_exec_scope;
5655 event_stage_masks |= sync_event->scope.mask_param;
5656 const auto ignore_reason = sync_event->IsIgnoredByWait(cmd_, src_exec_scope.mask_param);
5657 if (ignore_reason) {
5658 switch (ignore_reason) {
5659 case SyncEventState::ResetWaitRace:
5660 case SyncEventState::Reset2WaitRace: {
5661 // Four permuations of Reset and Wait calls...
5662 const char *vuid =
5663 (cmd_ == CMD_WAITEVENTS) ? "VUID-vkCmdResetEvent-event-03834" : "VUID-vkCmdResetEvent-event-03835";
5664 if (ignore_reason == SyncEventState::Reset2WaitRace) {
5665 vuid =
Jeremy Gebben476f5e22021-03-01 15:27:20 -07005666 (cmd_ == CMD_WAITEVENTS) ? "VUID-vkCmdResetEvent2KHR-event-03831" : "VUID-vkCmdResetEvent2KHR-event-03832";
John Zulauf4edde622021-02-15 08:54:50 -07005667 }
5668 const char *const message =
5669 "%s: %s %s operation following %s without intervening execution barrier, may cause race condition. %s";
5670 skip |= sync_state.LogError(event_handle, vuid, message, CmdName(),
5671 sync_state.report_data->FormatHandle(event_handle).c_str(), CmdName(),
5672 CommandTypeString(sync_event->last_command), ignored);
5673 break;
5674 }
5675 case SyncEventState::SetRace: {
5676 // Issue error message that Wait is waiting on an signal subject to race condition, and is thus ignored for
5677 // this event
5678 const char *const vuid = "SYNC-vkCmdWaitEvents-unsynchronized-setops";
5679 const char *const message =
5680 "%s: %s Unsychronized %s calls result in race conditions w.r.t. event signalling, %s %s";
5681 const char *const reason = "First synchronization scope is undefined.";
5682 skip |= sync_state.LogError(event_handle, vuid, message, CmdName(),
5683 sync_state.report_data->FormatHandle(event_handle).c_str(),
5684 CommandTypeString(sync_event->last_command), reason, ignored);
5685 break;
5686 }
5687 case SyncEventState::MissingStageBits: {
5688 const auto missing_bits = sync_event->scope.mask_param & ~src_exec_scope.mask_param;
5689 // Issue error message that event waited for is not in wait events scope
5690 const char *const vuid = "VUID-vkCmdWaitEvents-srcStageMask-01158";
5691 const char *const message =
5692 "%s: %s stageMask %" PRIx64 " includes bits not present in srcStageMask 0x%" PRIx64
5693 ". Bits missing from srcStageMask %s. %s";
5694 skip |= sync_state.LogError(event_handle, vuid, message, CmdName(),
5695 sync_state.report_data->FormatHandle(event_handle).c_str(),
5696 sync_event->scope.mask_param, src_exec_scope.mask_param,
5697 sync_utils::StringPipelineStageFlags(missing_bits).c_str(), ignored);
5698 break;
5699 }
5700 case SyncEventState::SetVsWait2: {
5701 skip |= sync_state.LogError(event_handle, "VUID-vkCmdWaitEvents2KHR-pEvents-03837",
5702 "%s: Follows set of %s by %s. Disallowed.", CmdName(),
5703 sync_state.report_data->FormatHandle(event_handle).c_str(),
5704 CommandTypeString(sync_event->last_command));
5705 break;
5706 }
5707 default:
5708 assert(ignore_reason == SyncEventState::NotIgnored);
5709 }
5710 } else if (barrier_set.image_memory_barriers.size()) {
5711 const auto &image_memory_barriers = barrier_set.image_memory_barriers;
5712 const auto *context = cb_context.GetCurrentAccessContext();
5713 assert(context);
5714 for (const auto &image_memory_barrier : image_memory_barriers) {
5715 if (image_memory_barrier.old_layout == image_memory_barrier.new_layout) continue;
5716 const auto *image_state = image_memory_barrier.image.get();
5717 if (!image_state) continue;
John Zulauf110413c2021-03-20 05:38:38 -06005718 const auto &subresource_range = image_memory_barrier.range;
John Zulauf4edde622021-02-15 08:54:50 -07005719 const auto &src_access_scope = image_memory_barrier.barrier.src_access_scope;
5720 const auto hazard =
5721 context->DetectImageBarrierHazard(*image_state, sync_event->scope.exec_scope, src_access_scope,
5722 subresource_range, *sync_event, AccessContext::DetectOptions::kDetectAll);
5723 if (hazard.hazard) {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06005724 skip |= sync_state.LogError(image_state->image(), string_SyncHazardVUID(hazard.hazard),
John Zulauf4edde622021-02-15 08:54:50 -07005725 "%s: Hazard %s for image barrier %" PRIu32 " %s. Access info %s.", CmdName(),
5726 string_SyncHazard(hazard.hazard), image_memory_barrier.index,
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06005727 sync_state.report_data->FormatHandle(image_state->image()).c_str(),
John Zulauf4edde622021-02-15 08:54:50 -07005728 cb_context.FormatUsage(hazard).c_str());
5729 break;
5730 }
John Zulaufd5115702021-01-18 12:34:33 -07005731 }
5732 }
John Zulauf4edde622021-02-15 08:54:50 -07005733 // TODO: Add infrastructure for checking pDependencyInfo's vs. CmdSetEvent2 VUID - vkCmdWaitEvents2KHR - pEvents -
5734 // 03839
5735 barrier_set_index += barrier_set_incr;
John Zulaufd5115702021-01-18 12:34:33 -07005736 }
John Zulaufd5115702021-01-18 12:34:33 -07005737
5738 // Note that we can't check for HOST in pEvents as we don't track that set event type
John Zulauf4edde622021-02-15 08:54:50 -07005739 const auto extra_stage_bits = (barrier_mask_params & ~VK_PIPELINE_STAGE_2_HOST_BIT_KHR) & ~event_stage_masks;
John Zulaufd5115702021-01-18 12:34:33 -07005740 if (extra_stage_bits) {
5741 // Issue error message that event waited for is not in wait events scope
John Zulauf4edde622021-02-15 08:54:50 -07005742 // NOTE: This isn't exactly the right VUID for WaitEvents2, but it's as close as we currently have support for
5743 const char *const vuid =
5744 (CMD_WAITEVENTS == cmd_) ? "VUID-vkCmdWaitEvents-srcStageMask-01158" : "VUID-vkCmdWaitEvents2KHR-pEvents-03838";
John Zulaufd5115702021-01-18 12:34:33 -07005745 const char *const message =
Jeremy Gebben40a22942020-12-22 14:22:06 -07005746 "%s: srcStageMask 0x%" PRIx64 " contains stages not present in pEvents stageMask. Extra stages are %s.%s";
John Zulaufd5115702021-01-18 12:34:33 -07005747 if (events_not_found) {
John Zulauf4edde622021-02-15 08:54:50 -07005748 skip |= sync_state.LogInfo(command_buffer_handle, vuid, message, CmdName(), barrier_mask_params,
Jeremy Gebben40a22942020-12-22 14:22:06 -07005749 sync_utils::StringPipelineStageFlags(extra_stage_bits).c_str(),
John Zulaufd5115702021-01-18 12:34:33 -07005750 " vkCmdSetEvent may be in previously submitted command buffer.");
5751 } else {
John Zulauf4edde622021-02-15 08:54:50 -07005752 skip |= sync_state.LogError(command_buffer_handle, vuid, message, CmdName(), barrier_mask_params,
Jeremy Gebben40a22942020-12-22 14:22:06 -07005753 sync_utils::StringPipelineStageFlags(extra_stage_bits).c_str(), "");
John Zulaufd5115702021-01-18 12:34:33 -07005754 }
5755 }
5756 return skip;
5757}
5758
5759struct SyncOpWaitEventsFunctorFactory {
5760 using BarrierOpFunctor = WaitEventBarrierOp;
5761 using ApplyFunctor = ApplyBarrierFunctor<BarrierOpFunctor>;
5762 using GlobalBarrierOpFunctor = WaitEventBarrierOp;
5763 using GlobalApplyFunctor = ApplyBarrierOpsFunctor<GlobalBarrierOpFunctor>;
5764 using BufferRange = EventSimpleRangeGenerator;
5765 using ImageRange = EventImageRangeGenerator;
5766 using GlobalRange = EventSimpleRangeGenerator;
5767
5768 // Need to restrict to only valid exec and access scope for this event
5769 // Pass by value is intentional to get a copy we can change without modifying the passed barrier
5770 SyncBarrier RestrictToEvent(SyncBarrier barrier) const {
John Zulaufc523bf62021-02-16 08:20:34 -07005771 barrier.src_exec_scope.exec_scope = sync_event->scope.exec_scope & barrier.src_exec_scope.exec_scope;
John Zulaufd5115702021-01-18 12:34:33 -07005772 barrier.src_access_scope = sync_event->scope.valid_accesses & barrier.src_access_scope;
5773 return barrier;
5774 }
5775 ApplyFunctor MakeApplyFunctor(const SyncBarrier &barrier_arg, bool layout_transition) const {
5776 auto barrier = RestrictToEvent(barrier_arg);
5777 return ApplyFunctor(BarrierOpFunctor(sync_event->first_scope_tag, barrier, layout_transition));
5778 }
John Zulauf14940722021-04-12 15:19:02 -06005779 GlobalApplyFunctor MakeGlobalApplyFunctor(size_t size_hint, ResourceUsageTag tag) const {
John Zulaufd5115702021-01-18 12:34:33 -07005780 return GlobalApplyFunctor(false /* don't resolve */, size_hint, tag);
5781 }
5782 GlobalBarrierOpFunctor MakeGlobalBarrierOpFunctor(const SyncBarrier &barrier_arg) const {
5783 auto barrier = RestrictToEvent(barrier_arg);
5784 return GlobalBarrierOpFunctor(sync_event->first_scope_tag, barrier, false);
5785 }
5786
5787 BufferRange MakeRangeGen(const BUFFER_STATE &buffer, const ResourceAccessRange &range_arg) const {
5788 const AccessAddressType address_type = GetAccessAddressType(buffer);
5789 const auto base_address = ResourceBaseAddress(buffer);
5790 ResourceAccessRange range = SimpleBinding(buffer) ? (range_arg + base_address) : ResourceAccessRange();
5791 EventSimpleRangeGenerator filtered_range_gen(sync_event->FirstScope(address_type), range);
5792 return filtered_range_gen;
5793 }
John Zulauf110413c2021-03-20 05:38:38 -06005794 ImageRange MakeRangeGen(const IMAGE_STATE &image, const VkImageSubresourceRange &subresource_range) const {
John Zulaufd5115702021-01-18 12:34:33 -07005795 if (!SimpleBinding(image)) return ImageRange();
5796 const auto address_type = GetAccessAddressType(image);
5797 const auto base_address = ResourceBaseAddress(image);
John Zulauf110413c2021-03-20 05:38:38 -06005798 subresource_adapter::ImageRangeGenerator image_range_gen(*image.fragment_encoder.get(), subresource_range, base_address);
John Zulaufd5115702021-01-18 12:34:33 -07005799 EventImageRangeGenerator filtered_range_gen(sync_event->FirstScope(address_type), image_range_gen);
5800
5801 return filtered_range_gen;
5802 }
5803 GlobalRange MakeGlobalRangeGen(AccessAddressType address_type) const {
5804 return EventSimpleRangeGenerator(sync_event->FirstScope(address_type), kFullRange);
5805 }
5806 SyncOpWaitEventsFunctorFactory(SyncEventState *sync_event_) : sync_event(sync_event_) { assert(sync_event); }
5807 SyncEventState *sync_event;
5808};
5809
John Zulauf8eda1562021-04-13 17:06:41 -06005810ResourceUsageTag SyncOpWaitEvents::Record(CommandBufferAccessContext *cb_context) const {
John Zulauf36ef9282021-02-02 11:47:24 -07005811 const auto tag = cb_context->NextCommandTag(cmd_);
John Zulaufd5115702021-01-18 12:34:33 -07005812 auto *access_context = cb_context->GetCurrentAccessContext();
5813 assert(access_context);
John Zulauf8eda1562021-04-13 17:06:41 -06005814 if (!access_context) return tag;
John Zulauf669dfd52021-01-27 17:15:28 -07005815 auto *events_context = cb_context->GetCurrentEventsContext();
5816 assert(events_context);
John Zulauf8eda1562021-04-13 17:06:41 -06005817 if (!events_context) return tag;
John Zulaufd5115702021-01-18 12:34:33 -07005818
5819 // Unlike PipelineBarrier, WaitEvent is *not* limited to accesses within the current subpass (if any) and thus needs to import
5820 // all accesses. Can instead import for all first_scopes, or a union of them, if this becomes a performance/memory issue,
5821 // but with no idea of the performance of the union, nor of whether it even matters... take the simplest approach here,
5822 access_context->ResolvePreviousAccesses();
5823
John Zulaufd5115702021-01-18 12:34:33 -07005824 // TODO... this needs change the SyncEventContext it's using depending on whether this is replay... the recorded
5825 // sync_event will be in the recorded context, but we need to update the sync_events in the current context....
John Zulauf4edde622021-02-15 08:54:50 -07005826 size_t barrier_set_index = 0;
5827 size_t barrier_set_incr = (barriers_.size() == 1) ? 0 : 1;
5828 assert(barriers_.size() == 1 || (barriers_.size() == events_.size()));
John Zulauf669dfd52021-01-27 17:15:28 -07005829 for (auto &event_shared : events_) {
5830 if (!event_shared.get()) continue;
5831 auto *sync_event = events_context->GetFromShared(event_shared);
John Zulaufd5115702021-01-18 12:34:33 -07005832
John Zulauf4edde622021-02-15 08:54:50 -07005833 sync_event->last_command = cmd_;
John Zulaufd5115702021-01-18 12:34:33 -07005834
John Zulauf4edde622021-02-15 08:54:50 -07005835 const auto &barrier_set = barriers_[barrier_set_index];
5836 const auto &dst = barrier_set.dst_exec_scope;
5837 if (!sync_event->IsIgnoredByWait(cmd_, barrier_set.src_exec_scope.mask_param)) {
John Zulaufd5115702021-01-18 12:34:33 -07005838 // These apply barriers one at a time as the are restricted to the resource ranges specified per each barrier,
5839 // but do not update the dependency chain information (but set the "pending" state) // s.t. the order independence
5840 // of the barriers is maintained.
5841 SyncOpWaitEventsFunctorFactory factory(sync_event);
John Zulauf4edde622021-02-15 08:54:50 -07005842 ApplyBarriers(barrier_set.buffer_memory_barriers, factory, tag, access_context);
5843 ApplyBarriers(barrier_set.image_memory_barriers, factory, tag, access_context);
5844 ApplyGlobalBarriers(barrier_set.memory_barriers, factory, tag, access_context);
John Zulaufd5115702021-01-18 12:34:33 -07005845
5846 // Apply the global barrier to the event itself (for race condition tracking)
5847 // Events don't happen at a stage, so we need to store the unexpanded ALL_COMMANDS if set for inter-event-calls
5848 sync_event->barriers = dst.mask_param & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
5849 sync_event->barriers |= dst.exec_scope;
5850 } else {
5851 // We ignored this wait, so we don't have any effective synchronization barriers for it.
5852 sync_event->barriers = 0U;
5853 }
John Zulauf4edde622021-02-15 08:54:50 -07005854 barrier_set_index += barrier_set_incr;
John Zulaufd5115702021-01-18 12:34:33 -07005855 }
5856
5857 // Apply the pending barriers
5858 ResolvePendingBarrierFunctor apply_pending_action(tag);
5859 access_context->ApplyToContext(apply_pending_action);
John Zulauf8eda1562021-04-13 17:06:41 -06005860
5861 return tag;
John Zulaufd5115702021-01-18 12:34:33 -07005862}
5863
John Zulauf8eda1562021-04-13 17:06:41 -06005864bool SyncOpWaitEvents::ReplayValidate(ResourceUsageTag recorded_tag, const CommandBufferAccessContext &recorded_context,
5865 CommandBufferAccessContext *active_context) const {
5866 return false;
5867}
5868
John Zulauf4fa68462021-04-26 21:04:22 -06005869void SyncOpWaitEvents::DoRecord(ResourceUsageTag tag, AccessContext *access_context, SyncEventsContext *events_context) const {}
John Zulauf8eda1562021-04-13 17:06:41 -06005870
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005871bool SyncValidator::PreCallValidateCmdWriteBufferMarker2AMD(VkCommandBuffer commandBuffer, VkPipelineStageFlags2KHR pipelineStage,
5872 VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) const {
5873 bool skip = false;
5874 const auto *cb_access_context = GetAccessContext(commandBuffer);
5875 assert(cb_access_context);
5876 if (!cb_access_context) return skip;
5877
5878 const auto *context = cb_access_context->GetCurrentAccessContext();
5879 assert(context);
5880 if (!context) return skip;
5881
5882 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
5883
5884 if (dst_buffer) {
5885 const ResourceAccessRange range = MakeRange(dstOffset, 4);
5886 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
5887 if (hazard.hazard) {
5888 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
5889 "vkCmdWriteBufferMarkerAMD2: Hazard %s for dstBuffer %s. Access info %s.",
5890 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstBuffer).c_str(),
John Zulauf14940722021-04-12 15:19:02 -06005891 cb_access_context->FormatUsage(hazard).c_str());
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005892 }
5893 }
5894 return skip;
5895}
5896
John Zulauf669dfd52021-01-27 17:15:28 -07005897void SyncOpWaitEvents::MakeEventsList(const SyncValidator &sync_state, uint32_t event_count, const VkEvent *events) {
John Zulaufd5115702021-01-18 12:34:33 -07005898 events_.reserve(event_count);
5899 for (uint32_t event_index = 0; event_index < event_count; event_index++) {
John Zulauf669dfd52021-01-27 17:15:28 -07005900 events_.emplace_back(sync_state.GetShared<EVENT_STATE>(events[event_index]));
John Zulaufd5115702021-01-18 12:34:33 -07005901 }
5902}
John Zulauf6ce24372021-01-30 05:56:25 -07005903
John Zulauf36ef9282021-02-02 11:47:24 -07005904SyncOpResetEvent::SyncOpResetEvent(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags, VkEvent event,
John Zulauf4edde622021-02-15 08:54:50 -07005905 VkPipelineStageFlags2KHR stageMask)
John Zulauf36ef9282021-02-02 11:47:24 -07005906 : SyncOpBase(cmd),
5907 event_(sync_state.GetShared<EVENT_STATE>(event)),
5908 exec_scope_(SyncExecScope::MakeSrc(queue_flags, stageMask)) {}
John Zulauf6ce24372021-01-30 05:56:25 -07005909
5910bool SyncOpResetEvent::Validate(const CommandBufferAccessContext &cb_context) const {
John Zulauf6ce24372021-01-30 05:56:25 -07005911 auto *events_context = cb_context.GetCurrentEventsContext();
5912 assert(events_context);
5913 bool skip = false;
5914 if (!events_context) return skip;
5915
5916 const auto &sync_state = cb_context.GetSyncState();
5917 const auto *sync_event = events_context->Get(event_);
5918 if (!sync_event) return skip; // Core, Lifetimes, or Param check needs to catch invalid events.
5919
5920 const char *const set_wait =
5921 "%s: %s %s operation following %s without intervening execution barrier, is a race condition and may result in data "
5922 "hazards.";
5923 const char *message = set_wait; // Only one message this call.
5924 if (!sync_event->HasBarrier(exec_scope_.mask_param, exec_scope_.exec_scope)) {
5925 const char *vuid = nullptr;
5926 switch (sync_event->last_command) {
5927 case CMD_SETEVENT:
John Zulauf4edde622021-02-15 08:54:50 -07005928 case CMD_SETEVENT2KHR:
John Zulauf6ce24372021-01-30 05:56:25 -07005929 // Needs a barrier between set and reset
5930 vuid = "SYNC-vkCmdResetEvent-missingbarrier-set";
5931 break;
John Zulauf4edde622021-02-15 08:54:50 -07005932 case CMD_WAITEVENTS:
5933 case CMD_WAITEVENTS2KHR: {
John Zulauf6ce24372021-01-30 05:56:25 -07005934 // Needs to be in the barriers chain (either because of a barrier, or because of dstStageMask
5935 vuid = "SYNC-vkCmdResetEvent-missingbarrier-wait";
5936 break;
5937 }
5938 default:
5939 // The only other valid last command that wasn't one.
John Zulauf4edde622021-02-15 08:54:50 -07005940 assert((sync_event->last_command == CMD_NONE) || (sync_event->last_command == CMD_RESETEVENT) ||
5941 (sync_event->last_command == CMD_RESETEVENT2KHR));
John Zulauf6ce24372021-01-30 05:56:25 -07005942 break;
5943 }
5944 if (vuid) {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06005945 skip |= sync_state.LogError(event_->event(), vuid, message, CmdName(),
5946 sync_state.report_data->FormatHandle(event_->event()).c_str(), CmdName(),
John Zulauf6ce24372021-01-30 05:56:25 -07005947 CommandTypeString(sync_event->last_command));
5948 }
5949 }
5950 return skip;
5951}
5952
John Zulauf8eda1562021-04-13 17:06:41 -06005953ResourceUsageTag SyncOpResetEvent::Record(CommandBufferAccessContext *cb_context) const {
5954 const auto tag = cb_context->NextCommandTag(cmd_);
John Zulauf6ce24372021-01-30 05:56:25 -07005955 auto *events_context = cb_context->GetCurrentEventsContext();
5956 assert(events_context);
John Zulauf8eda1562021-04-13 17:06:41 -06005957 if (!events_context) return tag;
John Zulauf6ce24372021-01-30 05:56:25 -07005958
5959 auto *sync_event = events_context->GetFromShared(event_);
John Zulauf8eda1562021-04-13 17:06:41 -06005960 if (!sync_event) return tag; // Core, Lifetimes, or Param check needs to catch invalid events.
John Zulauf6ce24372021-01-30 05:56:25 -07005961
5962 // Update the event state
John Zulauf36ef9282021-02-02 11:47:24 -07005963 sync_event->last_command = cmd_;
John Zulauf6ce24372021-01-30 05:56:25 -07005964 sync_event->unsynchronized_set = CMD_NONE;
5965 sync_event->ResetFirstScope();
5966 sync_event->barriers = 0U;
John Zulauf8eda1562021-04-13 17:06:41 -06005967
5968 return tag;
John Zulauf6ce24372021-01-30 05:56:25 -07005969}
5970
John Zulauf8eda1562021-04-13 17:06:41 -06005971bool SyncOpResetEvent::ReplayValidate(ResourceUsageTag recorded_tag, const CommandBufferAccessContext &recorded_context,
5972 CommandBufferAccessContext *active_context) const {
5973 return false;
5974}
5975
John Zulauf4fa68462021-04-26 21:04:22 -06005976void SyncOpResetEvent::DoRecord(ResourceUsageTag tag, AccessContext *access_context, SyncEventsContext *events_context) const {}
John Zulauf8eda1562021-04-13 17:06:41 -06005977
John Zulauf36ef9282021-02-02 11:47:24 -07005978SyncOpSetEvent::SyncOpSetEvent(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags, VkEvent event,
John Zulauf4edde622021-02-15 08:54:50 -07005979 VkPipelineStageFlags2KHR stageMask)
John Zulauf36ef9282021-02-02 11:47:24 -07005980 : SyncOpBase(cmd),
5981 event_(sync_state.GetShared<EVENT_STATE>(event)),
John Zulauf4edde622021-02-15 08:54:50 -07005982 src_exec_scope_(SyncExecScope::MakeSrc(queue_flags, stageMask)),
5983 dep_info_() {}
5984
5985SyncOpSetEvent::SyncOpSetEvent(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags, VkEvent event,
5986 const VkDependencyInfoKHR &dep_info)
5987 : SyncOpBase(cmd),
5988 event_(sync_state.GetShared<EVENT_STATE>(event)),
5989 src_exec_scope_(SyncExecScope::MakeSrc(queue_flags, sync_utils::GetGlobalStageMasks(dep_info).src)),
5990 dep_info_(new safe_VkDependencyInfoKHR(&dep_info)) {}
John Zulauf6ce24372021-01-30 05:56:25 -07005991
5992bool SyncOpSetEvent::Validate(const CommandBufferAccessContext &cb_context) const {
5993 // I'll put this here just in case we need to pass this in for future extension support
John Zulauf6ce24372021-01-30 05:56:25 -07005994 bool skip = false;
5995
5996 const auto &sync_state = cb_context.GetSyncState();
5997 auto *events_context = cb_context.GetCurrentEventsContext();
5998 assert(events_context);
5999 if (!events_context) return skip;
6000
6001 const auto *sync_event = events_context->Get(event_);
6002 if (!sync_event) return skip; // Core, Lifetimes, or Param check needs to catch invalid events.
6003
6004 const char *const reset_set =
6005 "%s: %s %s operation following %s without intervening execution barrier, is a race condition and may result in data "
6006 "hazards.";
6007 const char *const wait =
6008 "%s: %s %s operation following %s without intervening vkCmdResetEvent, may result in data hazard and is ignored.";
6009
6010 if (!sync_event->HasBarrier(src_exec_scope_.mask_param, src_exec_scope_.exec_scope)) {
John Zulauf4edde622021-02-15 08:54:50 -07006011 const char *vuid_stem = nullptr;
John Zulauf6ce24372021-01-30 05:56:25 -07006012 const char *message = nullptr;
6013 switch (sync_event->last_command) {
6014 case CMD_RESETEVENT:
John Zulauf4edde622021-02-15 08:54:50 -07006015 case CMD_RESETEVENT2KHR:
John Zulauf6ce24372021-01-30 05:56:25 -07006016 // Needs a barrier between reset and set
John Zulauf4edde622021-02-15 08:54:50 -07006017 vuid_stem = "-missingbarrier-reset";
John Zulauf6ce24372021-01-30 05:56:25 -07006018 message = reset_set;
6019 break;
6020 case CMD_SETEVENT:
John Zulauf4edde622021-02-15 08:54:50 -07006021 case CMD_SETEVENT2KHR:
John Zulauf6ce24372021-01-30 05:56:25 -07006022 // Needs a barrier between set and set
John Zulauf4edde622021-02-15 08:54:50 -07006023 vuid_stem = "-missingbarrier-set";
John Zulauf6ce24372021-01-30 05:56:25 -07006024 message = reset_set;
6025 break;
6026 case CMD_WAITEVENTS:
John Zulauf4edde622021-02-15 08:54:50 -07006027 case CMD_WAITEVENTS2KHR:
John Zulauf6ce24372021-01-30 05:56:25 -07006028 // Needs a barrier or is in second execution scope
John Zulauf4edde622021-02-15 08:54:50 -07006029 vuid_stem = "-missingbarrier-wait";
John Zulauf6ce24372021-01-30 05:56:25 -07006030 message = wait;
6031 break;
6032 default:
6033 // The only other valid last command that wasn't one.
6034 assert(sync_event->last_command == CMD_NONE);
6035 break;
6036 }
John Zulauf4edde622021-02-15 08:54:50 -07006037 if (vuid_stem) {
John Zulauf6ce24372021-01-30 05:56:25 -07006038 assert(nullptr != message);
John Zulauf4edde622021-02-15 08:54:50 -07006039 std::string vuid("SYNC-");
6040 vuid.append(CmdName()).append(vuid_stem);
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06006041 skip |= sync_state.LogError(event_->event(), vuid.c_str(), message, CmdName(),
6042 sync_state.report_data->FormatHandle(event_->event()).c_str(), CmdName(),
John Zulauf6ce24372021-01-30 05:56:25 -07006043 CommandTypeString(sync_event->last_command));
6044 }
6045 }
6046
6047 return skip;
6048}
6049
John Zulauf8eda1562021-04-13 17:06:41 -06006050ResourceUsageTag SyncOpSetEvent::Record(CommandBufferAccessContext *cb_context) const {
John Zulauf36ef9282021-02-02 11:47:24 -07006051 const auto tag = cb_context->NextCommandTag(cmd_);
John Zulauf6ce24372021-01-30 05:56:25 -07006052 auto *events_context = cb_context->GetCurrentEventsContext();
6053 auto *access_context = cb_context->GetCurrentAccessContext();
6054 assert(events_context);
John Zulauf8eda1562021-04-13 17:06:41 -06006055 if (!events_context) return tag;
John Zulauf6ce24372021-01-30 05:56:25 -07006056
6057 auto *sync_event = events_context->GetFromShared(event_);
John Zulauf8eda1562021-04-13 17:06:41 -06006058 if (!sync_event) return tag; // Core, Lifetimes, or Param check needs to catch invalid events.
John Zulauf6ce24372021-01-30 05:56:25 -07006059
6060 // NOTE: We're going to simply record the sync scope here, as anything else would be implementation defined/undefined
6061 // and we're issuing errors re: missing barriers between event commands, which if the user fixes would fix
6062 // any issues caused by naive scope setting here.
6063
6064 // What happens with two SetEvent is that one cannot know what group of operations will be waited for.
6065 // Given:
6066 // Stuff1; SetEvent; Stuff2; SetEvent; WaitEvents;
6067 // WaitEvents cannot know which of Stuff1, Stuff2, or both has completed execution.
6068
6069 if (!sync_event->HasBarrier(src_exec_scope_.mask_param, src_exec_scope_.exec_scope)) {
6070 sync_event->unsynchronized_set = sync_event->last_command;
6071 sync_event->ResetFirstScope();
6072 } else if (sync_event->scope.exec_scope == 0) {
6073 // We only set the scope if there isn't one
6074 sync_event->scope = src_exec_scope_;
6075
6076 auto set_scope = [&sync_event](AccessAddressType address_type, const ResourceAccessRangeMap::value_type &access) {
6077 auto &scope_map = sync_event->first_scope[static_cast<size_t>(address_type)];
6078 if (access.second.InSourceScopeOrChain(sync_event->scope.exec_scope, sync_event->scope.valid_accesses)) {
6079 scope_map.insert(scope_map.end(), std::make_pair(access.first, true));
6080 }
6081 };
6082 access_context->ForAll(set_scope);
6083 sync_event->unsynchronized_set = CMD_NONE;
6084 sync_event->first_scope_tag = tag;
6085 }
John Zulauf4edde622021-02-15 08:54:50 -07006086 // TODO: Store dep_info_ shared ptr in sync_state for WaitEvents2 validation
6087 sync_event->last_command = cmd_;
John Zulauf6ce24372021-01-30 05:56:25 -07006088 sync_event->barriers = 0U;
John Zulauf8eda1562021-04-13 17:06:41 -06006089
6090 return tag;
John Zulauf6ce24372021-01-30 05:56:25 -07006091}
John Zulauf64ffe552021-02-06 10:25:07 -07006092
John Zulauf8eda1562021-04-13 17:06:41 -06006093bool SyncOpSetEvent::ReplayValidate(ResourceUsageTag recorded_tag, const CommandBufferAccessContext &recorded_context,
6094 CommandBufferAccessContext *active_context) const {
6095 return false;
6096}
6097
John Zulauf4fa68462021-04-26 21:04:22 -06006098void SyncOpSetEvent::DoRecord(ResourceUsageTag tag, AccessContext *access_context, SyncEventsContext *events_context) const {}
John Zulauf8eda1562021-04-13 17:06:41 -06006099
John Zulauf64ffe552021-02-06 10:25:07 -07006100SyncOpBeginRenderPass::SyncOpBeginRenderPass(CMD_TYPE cmd, const SyncValidator &sync_state,
6101 const VkRenderPassBeginInfo *pRenderPassBegin,
sfricke-samsung85584a72021-09-30 21:43:38 -07006102 const VkSubpassBeginInfo *pSubpassBeginInfo)
6103 : SyncOpBase(cmd) {
John Zulauf64ffe552021-02-06 10:25:07 -07006104 if (pRenderPassBegin) {
6105 rp_state_ = sync_state.GetShared<RENDER_PASS_STATE>(pRenderPassBegin->renderPass);
6106 renderpass_begin_info_ = safe_VkRenderPassBeginInfo(pRenderPassBegin);
6107 const auto *fb_state = sync_state.Get<FRAMEBUFFER_STATE>(pRenderPassBegin->framebuffer);
6108 if (fb_state) {
6109 shared_attachments_ = sync_state.GetSharedAttachmentViews(*renderpass_begin_info_.ptr(), *fb_state);
6110 // TODO: Revisit this when all attachment validation is through SyncOps to see if we can discard the plain pointer copy
6111 // Note that this a safe to presist as long as shared_attachments is not cleared
6112 attachments_.reserve(shared_attachments_.size());
sfricke-samsung01c9ae92021-02-09 22:30:52 -08006113 for (const auto &attachment : shared_attachments_) {
John Zulauf64ffe552021-02-06 10:25:07 -07006114 attachments_.emplace_back(attachment.get());
6115 }
6116 }
6117 if (pSubpassBeginInfo) {
6118 subpass_begin_info_ = safe_VkSubpassBeginInfo(pSubpassBeginInfo);
6119 }
6120 }
6121}
6122
6123bool SyncOpBeginRenderPass::Validate(const CommandBufferAccessContext &cb_context) const {
6124 // Check if any of the layout transitions are hazardous.... but we don't have the renderpass context to work with, so we
6125 bool skip = false;
6126
6127 assert(rp_state_.get());
6128 if (nullptr == rp_state_.get()) return skip;
6129 auto &rp_state = *rp_state_.get();
6130
6131 const uint32_t subpass = 0;
6132
6133 // Construct the state we can use to validate against... (since validation is const and RecordCmdBeginRenderPass
6134 // hasn't happened yet)
6135 const std::vector<AccessContext> empty_context_vector;
6136 AccessContext temp_context(subpass, cb_context.GetQueueFlags(), rp_state.subpass_dependencies, empty_context_vector,
6137 cb_context.GetCurrentAccessContext());
6138
6139 // Validate attachment operations
6140 if (attachments_.size() == 0) return skip;
6141 const auto &render_area = renderpass_begin_info_.renderArea;
John Zulaufd0ec59f2021-03-13 14:25:08 -07006142
6143 // Since the isn't a valid RenderPassAccessContext until Record, needs to create the view/generator list... we could limit this
6144 // by predicating on whether subpass 0 uses the attachment if it is too expensive to create the full list redundantly here.
6145 // More broadly we could look at thread specific state shared between Validate and Record as is done for other heavyweight
6146 // operations (though it's currently a messy approach)
6147 AttachmentViewGenVector view_gens = RenderPassAccessContext::CreateAttachmentViewGen(render_area, attachments_);
6148 skip |= temp_context.ValidateLayoutTransitions(cb_context, rp_state, render_area, subpass, view_gens, CmdName());
John Zulauf64ffe552021-02-06 10:25:07 -07006149
6150 // Validate load operations if there were no layout transition hazards
6151 if (!skip) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07006152 temp_context.RecordLayoutTransitions(rp_state, subpass, view_gens, kCurrentCommandTag);
6153 skip |= temp_context.ValidateLoadOperation(cb_context, rp_state, render_area, subpass, view_gens, CmdName());
John Zulauf64ffe552021-02-06 10:25:07 -07006154 }
6155
6156 return skip;
6157}
6158
John Zulauf8eda1562021-04-13 17:06:41 -06006159ResourceUsageTag SyncOpBeginRenderPass::Record(CommandBufferAccessContext *cb_context) const {
6160 const auto tag = cb_context->NextCommandTag(cmd_);
John Zulauf64ffe552021-02-06 10:25:07 -07006161 // TODO PHASE2 need to have a consistent way to record to either command buffer or queue contexts
6162 assert(rp_state_.get());
John Zulauf8eda1562021-04-13 17:06:41 -06006163 if (nullptr == rp_state_.get()) return tag;
John Zulauf64ffe552021-02-06 10:25:07 -07006164 cb_context->RecordBeginRenderPass(*rp_state_.get(), renderpass_begin_info_.renderArea, attachments_, tag);
John Zulauf8eda1562021-04-13 17:06:41 -06006165
6166 return tag;
John Zulauf64ffe552021-02-06 10:25:07 -07006167}
6168
John Zulauf8eda1562021-04-13 17:06:41 -06006169bool SyncOpBeginRenderPass::ReplayValidate(ResourceUsageTag recorded_tag, const CommandBufferAccessContext &recorded_context,
6170 CommandBufferAccessContext *active_context) const {
6171 return false;
6172}
6173
John Zulauf4fa68462021-04-26 21:04:22 -06006174void SyncOpBeginRenderPass::DoRecord(ResourceUsageTag tag, AccessContext *access_context, SyncEventsContext *events_context) const {
6175}
John Zulauf8eda1562021-04-13 17:06:41 -06006176
John Zulauf64ffe552021-02-06 10:25:07 -07006177SyncOpNextSubpass::SyncOpNextSubpass(CMD_TYPE cmd, const SyncValidator &sync_state, const VkSubpassBeginInfo *pSubpassBeginInfo,
sfricke-samsung85584a72021-09-30 21:43:38 -07006178 const VkSubpassEndInfo *pSubpassEndInfo)
6179 : SyncOpBase(cmd) {
John Zulauf64ffe552021-02-06 10:25:07 -07006180 if (pSubpassBeginInfo) {
6181 subpass_begin_info_.initialize(pSubpassBeginInfo);
6182 }
6183 if (pSubpassEndInfo) {
6184 subpass_end_info_.initialize(pSubpassEndInfo);
6185 }
6186}
6187
6188bool SyncOpNextSubpass::Validate(const CommandBufferAccessContext &cb_context) const {
6189 bool skip = false;
6190 const auto *renderpass_context = cb_context.GetCurrentRenderPassContext();
6191 if (!renderpass_context) return skip;
6192
6193 skip |= renderpass_context->ValidateNextSubpass(cb_context.GetExecutionContext(), CmdName());
6194 return skip;
6195}
6196
John Zulauf8eda1562021-04-13 17:06:41 -06006197ResourceUsageTag SyncOpNextSubpass::Record(CommandBufferAccessContext *cb_context) const {
John Zulauf64ffe552021-02-06 10:25:07 -07006198 // TODO PHASE2 need to have a consistent way to record to either command buffer or queue contexts
John Zulauf8eda1562021-04-13 17:06:41 -06006199 // TODO PHASE2 Need to fix renderpass tagging/segregation of barrier and access operations for QueueSubmit time validation
6200 auto prev_tag = cb_context->NextCommandTag(cmd_);
6201 auto next_tag = cb_context->NextSubcommandTag(cmd_);
6202
6203 cb_context->RecordNextSubpass(prev_tag, next_tag);
6204 // TODO PHASE2 This needs to be the tag of the barrier operations
6205 return prev_tag;
6206}
6207
6208bool SyncOpNextSubpass::ReplayValidate(ResourceUsageTag recorded_tag, const CommandBufferAccessContext &recorded_context,
6209 CommandBufferAccessContext *active_context) const {
6210 return false;
John Zulauf64ffe552021-02-06 10:25:07 -07006211}
6212
sfricke-samsung85584a72021-09-30 21:43:38 -07006213SyncOpEndRenderPass::SyncOpEndRenderPass(CMD_TYPE cmd, const SyncValidator &sync_state, const VkSubpassEndInfo *pSubpassEndInfo)
6214 : SyncOpBase(cmd) {
John Zulauf64ffe552021-02-06 10:25:07 -07006215 if (pSubpassEndInfo) {
6216 subpass_end_info_.initialize(pSubpassEndInfo);
6217 }
6218}
6219
John Zulauf4fa68462021-04-26 21:04:22 -06006220void SyncOpNextSubpass::DoRecord(ResourceUsageTag tag, AccessContext *access_context, SyncEventsContext *events_context) const {}
John Zulauf8eda1562021-04-13 17:06:41 -06006221
John Zulauf64ffe552021-02-06 10:25:07 -07006222bool SyncOpEndRenderPass::Validate(const CommandBufferAccessContext &cb_context) const {
6223 bool skip = false;
6224 const auto *renderpass_context = cb_context.GetCurrentRenderPassContext();
6225
6226 if (!renderpass_context) return skip;
6227 skip |= renderpass_context->ValidateEndRenderPass(cb_context.GetExecutionContext(), CmdName());
6228 return skip;
6229}
6230
John Zulauf8eda1562021-04-13 17:06:41 -06006231ResourceUsageTag SyncOpEndRenderPass::Record(CommandBufferAccessContext *cb_context) const {
John Zulauf64ffe552021-02-06 10:25:07 -07006232 // TODO PHASE2 need to have a consistent way to record to either command buffer or queue contexts
John Zulauf8eda1562021-04-13 17:06:41 -06006233 const auto tag = cb_context->NextCommandTag(cmd_);
6234 cb_context->RecordEndRenderPass(tag);
6235 return tag;
John Zulauf64ffe552021-02-06 10:25:07 -07006236}
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006237
John Zulauf8eda1562021-04-13 17:06:41 -06006238bool SyncOpEndRenderPass::ReplayValidate(ResourceUsageTag recorded_tag, const CommandBufferAccessContext &recorded_context,
6239 CommandBufferAccessContext *active_context) const {
6240 return false;
6241}
6242
John Zulauf4fa68462021-04-26 21:04:22 -06006243void SyncOpEndRenderPass::DoRecord(ResourceUsageTag tag, AccessContext *access_context, SyncEventsContext *events_context) const {}
John Zulauf8eda1562021-04-13 17:06:41 -06006244
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07006245void SyncValidator::PreCallRecordCmdWriteBufferMarker2AMD(VkCommandBuffer commandBuffer, VkPipelineStageFlags2KHR pipelineStage,
6246 VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) {
6247 StateTracker::PreCallRecordCmdWriteBufferMarker2AMD(commandBuffer, pipelineStage, dstBuffer, dstOffset, marker);
6248 auto *cb_access_context = GetAccessContext(commandBuffer);
6249 assert(cb_access_context);
6250 const auto tag = cb_access_context->NextCommandTag(CMD_WRITEBUFFERMARKERAMD);
6251 auto *context = cb_access_context->GetCurrentAccessContext();
6252 assert(context);
6253
6254 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
6255
6256 if (dst_buffer) {
6257 const ResourceAccessRange range = MakeRange(dstOffset, 4);
6258 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
6259 }
6260}
John Zulaufd05c5842021-03-26 11:32:16 -06006261
John Zulaufae842002021-04-15 18:20:55 -06006262bool SyncValidator::PreCallValidateCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBufferCount,
6263 const VkCommandBuffer *pCommandBuffers) const {
6264 bool skip = StateTracker::PreCallValidateCmdExecuteCommands(commandBuffer, commandBufferCount, pCommandBuffers);
6265 const char *func_name = "vkCmdExecuteCommands";
6266 const auto *cb_context = GetAccessContext(commandBuffer);
6267 assert(cb_context);
John Zulauf4fa68462021-04-26 21:04:22 -06006268
6269 // Heavyweight, but we need a proxy copy of the active command buffer access context
6270 CommandBufferAccessContext proxy_cb_context(*cb_context, CommandBufferAccessContext::AsProxyContext());
John Zulaufae842002021-04-15 18:20:55 -06006271
6272 // Make working copies of the access and events contexts
John Zulauf4fa68462021-04-26 21:04:22 -06006273 proxy_cb_context.NextCommandTag(CMD_EXECUTECOMMANDS);
John Zulaufae842002021-04-15 18:20:55 -06006274
6275 for (uint32_t cb_index = 0; cb_index < commandBufferCount; ++cb_index) {
John Zulauf4fa68462021-04-26 21:04:22 -06006276 proxy_cb_context.NextSubcommandTag(CMD_EXECUTECOMMANDS);
John Zulaufae842002021-04-15 18:20:55 -06006277 const auto *recorded_cb_context = GetAccessContext(pCommandBuffers[cb_index]);
6278 if (!recorded_cb_context) continue;
John Zulauf4fa68462021-04-26 21:04:22 -06006279
6280 const auto *recorded_context = recorded_cb_context->GetCurrentAccessContext();
6281 assert(recorded_context);
6282 skip |= recorded_cb_context->ValidateFirstUse(&proxy_cb_context, func_name, cb_index);
6283
6284 // The barriers have already been applied in ValidatFirstUse
6285 ResourceUsageRange tag_range = proxy_cb_context.ImportRecordedAccessLog(*recorded_cb_context);
6286 proxy_cb_context.ResolveRecordedContext(*recorded_context, tag_range.begin);
John Zulaufae842002021-04-15 18:20:55 -06006287 }
6288
John Zulaufae842002021-04-15 18:20:55 -06006289 return skip;
6290}
6291
6292void SyncValidator::PreCallRecordCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBufferCount,
6293 const VkCommandBuffer *pCommandBuffers) {
6294 StateTracker::PreCallRecordCmdExecuteCommands(commandBuffer, commandBufferCount, pCommandBuffers);
John Zulauf4fa68462021-04-26 21:04:22 -06006295 auto *cb_context = GetAccessContext(commandBuffer);
6296 assert(cb_context);
6297 cb_context->NextCommandTag(CMD_EXECUTECOMMANDS);
6298 for (uint32_t cb_index = 0; cb_index < commandBufferCount; ++cb_index) {
6299 cb_context->NextSubcommandTag(CMD_EXECUTECOMMANDS);
6300 const auto *recorded_cb_context = GetAccessContext(pCommandBuffers[cb_index]);
6301 if (!recorded_cb_context) continue;
6302 cb_context->RecordExecutedCommandBuffer(*recorded_cb_context, CMD_EXECUTECOMMANDS);
6303 }
John Zulaufae842002021-04-15 18:20:55 -06006304}
6305
John Zulaufd0ec59f2021-03-13 14:25:08 -07006306AttachmentViewGen::AttachmentViewGen(const IMAGE_VIEW_STATE *view, const VkOffset3D &offset, const VkExtent3D &extent)
6307 : view_(view), view_mask_(), gen_store_() {
6308 if (!view_ || !view_->image_state || !SimpleBinding(*view_->image_state)) return;
6309 const IMAGE_STATE &image_state = *view_->image_state.get();
6310 const auto base_address = ResourceBaseAddress(image_state);
6311 const auto *encoder = image_state.fragment_encoder.get();
6312 if (!encoder) return;
Jeremy Gebben11a68a32021-07-29 11:59:22 -06006313 // Get offset and extent for the view, accounting for possible depth slicing
6314 const VkOffset3D zero_offset = view->GetOffset();
6315 const VkExtent3D &image_extent = view->GetExtent();
John Zulaufd0ec59f2021-03-13 14:25:08 -07006316 // Intentional copy
6317 VkImageSubresourceRange subres_range = view_->normalized_subresource_range;
6318 view_mask_ = subres_range.aspectMask;
6319 gen_store_[Gen::kViewSubresource].emplace(*encoder, subres_range, zero_offset, image_extent, base_address);
6320 gen_store_[Gen::kRenderArea].emplace(*encoder, subres_range, offset, extent, base_address);
6321
6322 const auto depth = view_mask_ & VK_IMAGE_ASPECT_DEPTH_BIT;
6323 if (depth && (depth != view_mask_)) {
6324 subres_range.aspectMask = depth;
6325 gen_store_[Gen::kDepthOnlyRenderArea].emplace(*encoder, subres_range, offset, extent, base_address);
6326 }
6327 const auto stencil = view_mask_ & VK_IMAGE_ASPECT_STENCIL_BIT;
6328 if (stencil && (stencil != view_mask_)) {
6329 subres_range.aspectMask = stencil;
6330 gen_store_[Gen::kStencilOnlyRenderArea].emplace(*encoder, subres_range, offset, extent, base_address);
6331 }
6332}
6333
6334const ImageRangeGen *AttachmentViewGen::GetRangeGen(AttachmentViewGen::Gen gen_type) const {
6335 const ImageRangeGen *got = nullptr;
6336 switch (gen_type) {
6337 case kViewSubresource:
6338 got = &gen_store_[kViewSubresource];
6339 break;
6340 case kRenderArea:
6341 got = &gen_store_[kRenderArea];
6342 break;
6343 case kDepthOnlyRenderArea:
6344 got =
6345 (view_mask_ == VK_IMAGE_ASPECT_DEPTH_BIT) ? &gen_store_[Gen::kRenderArea] : &gen_store_[Gen::kDepthOnlyRenderArea];
6346 break;
6347 case kStencilOnlyRenderArea:
6348 got = (view_mask_ == VK_IMAGE_ASPECT_STENCIL_BIT) ? &gen_store_[Gen::kRenderArea]
6349 : &gen_store_[Gen::kStencilOnlyRenderArea];
6350 break;
6351 default:
6352 assert(got);
6353 }
6354 return got;
6355}
6356
6357AttachmentViewGen::Gen AttachmentViewGen::GetDepthStencilRenderAreaGenType(bool depth_op, bool stencil_op) const {
6358 assert(IsValid());
6359 assert(view_mask_ & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT));
6360 if (depth_op) {
6361 assert(view_mask_ & VK_IMAGE_ASPECT_DEPTH_BIT);
6362 if (stencil_op) {
6363 assert(view_mask_ & VK_IMAGE_ASPECT_STENCIL_BIT);
6364 return kRenderArea;
6365 }
6366 return kDepthOnlyRenderArea;
6367 }
6368 if (stencil_op) {
6369 assert(view_mask_ & VK_IMAGE_ASPECT_STENCIL_BIT);
6370 return kStencilOnlyRenderArea;
6371 }
6372
6373 assert(depth_op || stencil_op);
6374 return kRenderArea;
6375}
6376
6377AccessAddressType AttachmentViewGen::GetAddressType() const { return AccessContext::ImageAddressType(*view_->image_state); }
John Zulauf8eda1562021-04-13 17:06:41 -06006378
6379void SyncEventsContext::ApplyBarrier(const SyncExecScope &src, const SyncExecScope &dst) {
6380 const bool all_commands_bit = 0 != (src.mask_param & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
6381 for (auto &event_pair : map_) {
6382 assert(event_pair.second); // Shouldn't be storing empty
6383 auto &sync_event = *event_pair.second;
6384 // Events don't happen at a stage, so we need to check and store the unexpanded ALL_COMMANDS if set for inter-event-calls
6385 if ((sync_event.barriers & src.exec_scope) || all_commands_bit) {
6386 sync_event.barriers |= dst.exec_scope;
6387 sync_event.barriers |= dst.mask_param & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
6388 }
6389 }
6390}