blob: 30fa84a85e099f252f237da424c472dd600ddefc [file] [log] [blame]
John Zulaufab7756b2020-12-29 16:10:16 -07001/* Copyright (c) 2019-2021 The Khronos Group Inc.
2 * Copyright (c) 2019-2021 Valve Corporation
3 * Copyright (c) 2019-2021 LunarG, Inc.
John Zulauf9cb530d2019-09-30 14:14:10 -06004 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 * Author: John Zulauf <jzulauf@lunarg.com>
John Zulaufab7756b2020-12-29 16:10:16 -070018 * Author: Locke Lin <locke@lunarg.com>
19 * Author: Jeremy Gebben <jeremyg@lunarg.com>
John Zulauf9cb530d2019-09-30 14:14:10 -060020 */
21
22#include <limits>
23#include <vector>
locke-lunarg296a3c92020-03-25 01:04:29 -060024#include <memory>
25#include <bitset>
John Zulauf9cb530d2019-09-30 14:14:10 -060026#include "synchronization_validation.h"
Jeremy Gebben5f585ae2021-02-02 09:03:06 -070027#include "sync_utils.h"
John Zulauf9cb530d2019-09-30 14:14:10 -060028
John Zulauf264cce02021-02-05 14:40:47 -070029static bool SimpleBinding(const BINDABLE &bindable) { return !bindable.sparse && bindable.binding.mem_state; }
30
John Zulauf43cc7462020-12-03 12:33:12 -070031const static std::array<AccessAddressType, static_cast<size_t>(AccessAddressType::kTypeCount)> kAddressTypes = {
32 AccessAddressType::kLinear, AccessAddressType::kIdealized};
33
John Zulaufd5115702021-01-18 12:34:33 -070034static constexpr AccessAddressType GetAccessAddressType(const BUFFER_STATE &) { return AccessAddressType::kLinear; };
John Zulauf264cce02021-02-05 14:40:47 -070035static AccessAddressType GetAccessAddressType(const IMAGE_STATE &image) {
36 return SimpleBinding(image) ? AccessContext::ImageAddressType(image) : AccessAddressType::kIdealized;
37}
John Zulaufd5115702021-01-18 12:34:33 -070038
John Zulauf9cb530d2019-09-30 14:14:10 -060039static const char *string_SyncHazardVUID(SyncHazard hazard) {
40 switch (hazard) {
41 case SyncHazard::NONE:
John Zulauf2f952d22020-02-10 11:34:51 -070042 return "SYNC-HAZARD-NONE";
John Zulauf9cb530d2019-09-30 14:14:10 -060043 break;
44 case SyncHazard::READ_AFTER_WRITE:
45 return "SYNC-HAZARD-READ_AFTER_WRITE";
46 break;
47 case SyncHazard::WRITE_AFTER_READ:
48 return "SYNC-HAZARD-WRITE_AFTER_READ";
49 break;
50 case SyncHazard::WRITE_AFTER_WRITE:
51 return "SYNC-HAZARD-WRITE_AFTER_WRITE";
52 break;
John Zulauf2f952d22020-02-10 11:34:51 -070053 case SyncHazard::READ_RACING_WRITE:
54 return "SYNC-HAZARD-READ-RACING-WRITE";
55 break;
56 case SyncHazard::WRITE_RACING_WRITE:
57 return "SYNC-HAZARD-WRITE-RACING-WRITE";
58 break;
59 case SyncHazard::WRITE_RACING_READ:
60 return "SYNC-HAZARD-WRITE-RACING-READ";
61 break;
John Zulauf9cb530d2019-09-30 14:14:10 -060062 default:
63 assert(0);
64 }
65 return "SYNC-HAZARD-INVALID";
66}
67
John Zulauf59e25072020-07-17 10:55:21 -060068static bool IsHazardVsRead(SyncHazard hazard) {
69 switch (hazard) {
70 case SyncHazard::NONE:
71 return false;
72 break;
73 case SyncHazard::READ_AFTER_WRITE:
74 return false;
75 break;
76 case SyncHazard::WRITE_AFTER_READ:
77 return true;
78 break;
79 case SyncHazard::WRITE_AFTER_WRITE:
80 return false;
81 break;
82 case SyncHazard::READ_RACING_WRITE:
83 return false;
84 break;
85 case SyncHazard::WRITE_RACING_WRITE:
86 return false;
87 break;
88 case SyncHazard::WRITE_RACING_READ:
89 return true;
90 break;
91 default:
92 assert(0);
93 }
94 return false;
95}
96
John Zulauf9cb530d2019-09-30 14:14:10 -060097static const char *string_SyncHazard(SyncHazard hazard) {
98 switch (hazard) {
99 case SyncHazard::NONE:
100 return "NONR";
101 break;
102 case SyncHazard::READ_AFTER_WRITE:
103 return "READ_AFTER_WRITE";
104 break;
105 case SyncHazard::WRITE_AFTER_READ:
106 return "WRITE_AFTER_READ";
107 break;
108 case SyncHazard::WRITE_AFTER_WRITE:
109 return "WRITE_AFTER_WRITE";
110 break;
John Zulauf2f952d22020-02-10 11:34:51 -0700111 case SyncHazard::READ_RACING_WRITE:
112 return "READ_RACING_WRITE";
113 break;
114 case SyncHazard::WRITE_RACING_WRITE:
115 return "WRITE_RACING_WRITE";
116 break;
117 case SyncHazard::WRITE_RACING_READ:
118 return "WRITE_RACING_READ";
119 break;
John Zulauf9cb530d2019-09-30 14:14:10 -0600120 default:
121 assert(0);
122 }
123 return "INVALID HAZARD";
124}
125
John Zulauf37ceaed2020-07-03 16:18:15 -0600126static const SyncStageAccessInfoType *SyncStageAccessInfoFromMask(SyncStageAccessFlags flags) {
127 // Return the info for the first bit found
128 const SyncStageAccessInfoType *info = nullptr;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700129 for (size_t i = 0; i < flags.size(); i++) {
130 if (flags.test(i)) {
131 info = &syncStageAccessInfoByStageAccessIndex[i];
132 break;
John Zulauf37ceaed2020-07-03 16:18:15 -0600133 }
134 }
135 return info;
136}
137
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700138static std::string string_SyncStageAccessFlags(const SyncStageAccessFlags &flags, const char *sep = "|") {
John Zulauf59e25072020-07-17 10:55:21 -0600139 std::string out_str;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700140 if (flags.none()) {
John Zulauf389c34b2020-07-28 11:19:35 -0600141 out_str = "0";
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700142 } else {
143 for (size_t i = 0; i < syncStageAccessInfoByStageAccessIndex.size(); i++) {
144 const auto &info = syncStageAccessInfoByStageAccessIndex[i];
145 if ((flags & info.stage_access_bit).any()) {
146 if (!out_str.empty()) {
147 out_str.append(sep);
148 }
149 out_str.append(info.name);
John Zulauf59e25072020-07-17 10:55:21 -0600150 }
John Zulauf59e25072020-07-17 10:55:21 -0600151 }
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700152 if (out_str.length() == 0) {
153 out_str.append("Unhandled SyncStageAccess");
154 }
John Zulauf59e25072020-07-17 10:55:21 -0600155 }
156 return out_str;
157}
158
Jeremy Gebben6ea9d9e2020-12-11 09:41:01 -0700159static std::string string_UsageTag(const ResourceUsageTag &tag) {
160 std::stringstream out;
161
John Zulauffaea0ee2021-01-14 14:01:32 -0700162 out << "command: " << CommandTypeString(tag.command);
163 out << ", seq_no: " << tag.seq_num;
164 if (tag.sub_command != 0) {
165 out << ", subcmd: " << tag.sub_command;
Jeremy Gebben6ea9d9e2020-12-11 09:41:01 -0700166 }
167 return out.str();
168}
169
John Zulauffaea0ee2021-01-14 14:01:32 -0700170std::string CommandBufferAccessContext::FormatUsage(const HazardResult &hazard) const {
John Zulauf37ceaed2020-07-03 16:18:15 -0600171 const auto &tag = hazard.tag;
John Zulauf59e25072020-07-17 10:55:21 -0600172 assert(hazard.usage_index < static_cast<SyncStageAccessIndex>(syncStageAccessInfoByStageAccessIndex.size()));
173 const auto &usage_info = syncStageAccessInfoByStageAccessIndex[hazard.usage_index];
John Zulauf1dae9192020-06-16 15:46:44 -0600174 std::stringstream out;
John Zulauf37ceaed2020-07-03 16:18:15 -0600175 const auto *info = SyncStageAccessInfoFromMask(hazard.prior_access);
176 const char *stage_access_name = info ? info->name : "INVALID_STAGE_ACCESS";
John Zulauf59e25072020-07-17 10:55:21 -0600177 out << "(usage: " << usage_info.name << ", prior_usage: " << stage_access_name;
178 if (IsHazardVsRead(hazard.hazard)) {
179 const auto barriers = hazard.access_state->GetReadBarriers(hazard.prior_access);
Jeremy Gebben40a22942020-12-22 14:22:06 -0700180 out << ", read_barriers: " << string_VkPipelineStageFlags2KHR(barriers);
John Zulauf59e25072020-07-17 10:55:21 -0600181 } else {
182 SyncStageAccessFlags write_barrier = hazard.access_state->GetWriteBarriers();
183 out << ", write_barriers: " << string_SyncStageAccessFlags(write_barrier);
184 }
185
John Zulauffaea0ee2021-01-14 14:01:32 -0700186 // PHASE2 TODO -- add comand buffer and reset from secondary if applicable
187 out << ", " << string_UsageTag(tag) << ", reset_no: " << reset_count_;
John Zulauf1dae9192020-06-16 15:46:44 -0600188 return out.str();
189}
190
John Zulaufd14743a2020-07-03 09:42:39 -0600191// NOTE: the attachement read flag is put *only* in the access scope and not in the exect scope, since the ordering
192// rules apply only to this specific access for this stage, and not the stage as a whole. The ordering detection
193// also reflects this special case for read hazard detection (using access instead of exec scope)
Jeremy Gebben40a22942020-12-22 14:22:06 -0700194static constexpr VkPipelineStageFlags2KHR kColorAttachmentExecScope = VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT_KHR;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700195static const SyncStageAccessFlags kColorAttachmentAccessScope =
196 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ_BIT |
197 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT |
198 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE_BIT |
199 SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT; // Note: this is intentionally not in the exec scope
Jeremy Gebben40a22942020-12-22 14:22:06 -0700200static constexpr VkPipelineStageFlags2KHR kDepthStencilAttachmentExecScope =
201 VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT_KHR | VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT_KHR;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700202static const SyncStageAccessFlags kDepthStencilAttachmentAccessScope =
203 SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
204 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
205 SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT; // Note: this is intentionally not in the exec scope
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -0700206static constexpr VkPipelineStageFlags2KHR kRasterAttachmentExecScope = kDepthStencilAttachmentExecScope | kColorAttachmentExecScope;
John Zulauf8e3c3e92021-01-06 11:19:36 -0700207static const SyncStageAccessFlags kRasterAttachmentAccessScope = kDepthStencilAttachmentAccessScope | kColorAttachmentAccessScope;
John Zulaufb027cdb2020-05-21 14:25:22 -0600208
John Zulauf8e3c3e92021-01-06 11:19:36 -0700209ResourceAccessState::OrderingBarriers ResourceAccessState::kOrderingRules = {
Jeremy Gebben40a22942020-12-22 14:22:06 -0700210 {{VK_PIPELINE_STAGE_2_NONE_KHR, SyncStageAccessFlags()},
John Zulauf8e3c3e92021-01-06 11:19:36 -0700211 {kColorAttachmentExecScope, kColorAttachmentAccessScope},
212 {kDepthStencilAttachmentExecScope, kDepthStencilAttachmentAccessScope},
213 {kRasterAttachmentExecScope, kRasterAttachmentAccessScope}}};
214
John Zulauf7635de32020-05-29 17:14:15 -0600215// Sometimes we have an internal access conflict, and we using the kCurrentCommandTag to set and detect in temporary/proxy contexts
John Zulauffaea0ee2021-01-14 14:01:32 -0700216static const ResourceUsageTag kCurrentCommandTag(ResourceUsageTag::kMaxIndex, ResourceUsageTag::kMaxCount,
217 ResourceUsageTag::kMaxCount, CMD_NONE);
John Zulaufb027cdb2020-05-21 14:25:22 -0600218
John Zulaufb02c1eb2020-10-06 16:33:36 -0600219static VkDeviceSize ResourceBaseAddress(const BINDABLE &bindable) {
220 return bindable.binding.offset + bindable.binding.mem_state->fake_base_address;
221}
222
locke-lunarg3c038002020-04-30 23:08:08 -0600223inline VkDeviceSize GetRealWholeSize(VkDeviceSize offset, VkDeviceSize size, VkDeviceSize whole_size) {
224 if (size == VK_WHOLE_SIZE) {
225 return (whole_size - offset);
226 }
227 return size;
228}
229
John Zulauf3e86bf02020-09-12 10:47:57 -0600230static inline VkDeviceSize GetBufferWholeSize(const BUFFER_STATE &buf_state, VkDeviceSize offset, VkDeviceSize size) {
231 return GetRealWholeSize(offset, size, buf_state.createInfo.size);
232}
233
John Zulauf16adfc92020-04-08 10:28:33 -0600234template <typename T>
John Zulauf355e49b2020-04-24 15:11:15 -0600235static ResourceAccessRange MakeRange(const T &has_offset_and_size) {
John Zulauf16adfc92020-04-08 10:28:33 -0600236 return ResourceAccessRange(has_offset_and_size.offset, (has_offset_and_size.offset + has_offset_and_size.size));
237}
238
John Zulauf355e49b2020-04-24 15:11:15 -0600239static ResourceAccessRange MakeRange(VkDeviceSize start, VkDeviceSize size) { return ResourceAccessRange(start, (start + size)); }
John Zulauf16adfc92020-04-08 10:28:33 -0600240
John Zulauf3e86bf02020-09-12 10:47:57 -0600241static inline ResourceAccessRange MakeRange(const BUFFER_STATE &buffer, VkDeviceSize offset, VkDeviceSize size) {
242 return MakeRange(offset, GetBufferWholeSize(buffer, offset, size));
243}
244
245static inline ResourceAccessRange MakeRange(const BUFFER_VIEW_STATE &buf_view_state) {
246 return MakeRange(*buf_view_state.buffer_state.get(), buf_view_state.create_info.offset, buf_view_state.create_info.range);
247}
248
John Zulauf4a6105a2020-11-17 15:11:05 -0700249// Range generators for to allow event scope filtration to be limited to the top of the resource access traversal pipeline
250//
John Zulauf10f1f522020-12-18 12:00:35 -0700251// Note: there is no "begin/end" or reset facility. These are each written as "one time through" generators.
252//
John Zulauf4a6105a2020-11-17 15:11:05 -0700253// Usage:
254// Constructor() -- initializes the generator to point to the begin of the space declared.
255// * -- the current range of the generator empty signfies end
256// ++ -- advance to the next non-empty range (or end)
257
258// A wrapper for a single range with the same semantics as the actual generators below
259template <typename KeyType>
260class SingleRangeGenerator {
261 public:
262 SingleRangeGenerator(const KeyType &range) : current_(range) {}
John Zulaufd5115702021-01-18 12:34:33 -0700263 const KeyType &operator*() const { return current_; }
264 const KeyType *operator->() const { return &current_; }
John Zulauf4a6105a2020-11-17 15:11:05 -0700265 SingleRangeGenerator &operator++() {
266 current_ = KeyType(); // just one real range
267 return *this;
268 }
269
270 bool operator==(const SingleRangeGenerator &other) const { return current_ == other.current_; }
271
272 private:
273 SingleRangeGenerator() = default;
274 const KeyType range_;
275 KeyType current_;
276};
277
278// Generate the ranges that are the intersection of range and the entries in the FilterMap
279template <typename FilterMap, typename KeyType = typename FilterMap::key_type>
280class FilteredRangeGenerator {
281 public:
John Zulaufd5115702021-01-18 12:34:33 -0700282 // Default constructed is safe to dereference for "empty" test, but for no other operation.
283 FilteredRangeGenerator() : range_(), filter_(nullptr), filter_pos_(), current_() {
284 // Default construction for KeyType *must* be empty range
285 assert(current_.empty());
286 }
John Zulauf4a6105a2020-11-17 15:11:05 -0700287 FilteredRangeGenerator(const FilterMap &filter, const KeyType &range)
288 : range_(range), filter_(&filter), filter_pos_(), current_() {
289 SeekBegin();
290 }
John Zulaufd5115702021-01-18 12:34:33 -0700291 FilteredRangeGenerator(const FilteredRangeGenerator &from) = default;
292
John Zulauf4a6105a2020-11-17 15:11:05 -0700293 const KeyType &operator*() const { return current_; }
294 const KeyType *operator->() const { return &current_; }
295 FilteredRangeGenerator &operator++() {
296 ++filter_pos_;
297 UpdateCurrent();
298 return *this;
299 }
300
301 bool operator==(const FilteredRangeGenerator &other) const { return current_ == other.current_; }
302
303 private:
John Zulauf4a6105a2020-11-17 15:11:05 -0700304 void UpdateCurrent() {
305 if (filter_pos_ != filter_->cend()) {
306 current_ = range_ & filter_pos_->first;
307 } else {
308 current_ = KeyType();
309 }
310 }
311 void SeekBegin() {
312 filter_pos_ = filter_->lower_bound(range_);
313 UpdateCurrent();
314 }
315 const KeyType range_;
316 const FilterMap *filter_;
317 typename FilterMap::const_iterator filter_pos_;
318 KeyType current_;
319};
John Zulaufd5115702021-01-18 12:34:33 -0700320using SingleAccessRangeGenerator = SingleRangeGenerator<ResourceAccessRange>;
John Zulauf4a6105a2020-11-17 15:11:05 -0700321using EventSimpleRangeGenerator = FilteredRangeGenerator<SyncEventState::ScopeMap>;
322
323// Templated to allow for different Range generators or map sources...
324
325// Generate the ranges that are the intersection of the RangeGen ranges and the entries in the FilterMap
John Zulauf4a6105a2020-11-17 15:11:05 -0700326template <typename FilterMap, typename RangeGen, typename KeyType = typename FilterMap::key_type>
327class FilteredGeneratorGenerator {
328 public:
John Zulaufd5115702021-01-18 12:34:33 -0700329 // Default constructed is safe to dereference for "empty" test, but for no other operation.
330 FilteredGeneratorGenerator() : filter_(nullptr), gen_(), filter_pos_(), current_() {
331 // Default construction for KeyType *must* be empty range
332 assert(current_.empty());
333 }
334 FilteredGeneratorGenerator(const FilterMap &filter, RangeGen &gen) : filter_(&filter), gen_(gen), filter_pos_(), current_() {
John Zulauf4a6105a2020-11-17 15:11:05 -0700335 SeekBegin();
336 }
John Zulaufd5115702021-01-18 12:34:33 -0700337 FilteredGeneratorGenerator(const FilteredGeneratorGenerator &from) = default;
John Zulauf4a6105a2020-11-17 15:11:05 -0700338 const KeyType &operator*() const { return current_; }
339 const KeyType *operator->() const { return &current_; }
340 FilteredGeneratorGenerator &operator++() {
341 KeyType gen_range = GenRange();
342 KeyType filter_range = FilterRange();
343 current_ = KeyType();
344 while (gen_range.non_empty() && filter_range.non_empty() && current_.empty()) {
345 if (gen_range.end > filter_range.end) {
346 // if the generated range is beyond the filter_range, advance the filter range
347 filter_range = AdvanceFilter();
348 } else {
349 gen_range = AdvanceGen();
350 }
351 current_ = gen_range & filter_range;
352 }
353 return *this;
354 }
355
356 bool operator==(const FilteredGeneratorGenerator &other) const { return current_ == other.current_; }
357
358 private:
359 KeyType AdvanceFilter() {
360 ++filter_pos_;
361 auto filter_range = FilterRange();
362 if (filter_range.valid()) {
363 FastForwardGen(filter_range);
364 }
365 return filter_range;
366 }
367 KeyType AdvanceGen() {
John Zulaufd5115702021-01-18 12:34:33 -0700368 ++gen_;
John Zulauf4a6105a2020-11-17 15:11:05 -0700369 auto gen_range = GenRange();
370 if (gen_range.valid()) {
371 FastForwardFilter(gen_range);
372 }
373 return gen_range;
374 }
375
376 KeyType FilterRange() const { return (filter_pos_ != filter_->cend()) ? filter_pos_->first : KeyType(); }
John Zulaufd5115702021-01-18 12:34:33 -0700377 KeyType GenRange() const { return *gen_; }
John Zulauf4a6105a2020-11-17 15:11:05 -0700378
379 KeyType FastForwardFilter(const KeyType &range) {
380 auto filter_range = FilterRange();
381 int retry_count = 0;
John Zulauf10f1f522020-12-18 12:00:35 -0700382 const static int kRetryLimit = 2; // TODO -- determine whether this limit is optimal
John Zulauf4a6105a2020-11-17 15:11:05 -0700383 while (!filter_range.empty() && (filter_range.end <= range.begin)) {
384 if (retry_count < kRetryLimit) {
385 ++filter_pos_;
386 filter_range = FilterRange();
387 retry_count++;
388 } else {
389 // Okay we've tried walking, do a seek.
390 filter_pos_ = filter_->lower_bound(range);
391 break;
392 }
393 }
394 return FilterRange();
395 }
396
397 // TODO: Consider adding "seek" (or an absolute bound "get" to range generators to make this walk
398 // faster.
399 KeyType FastForwardGen(const KeyType &range) {
400 auto gen_range = GenRange();
401 while (!gen_range.empty() && (gen_range.end <= range.begin)) {
John Zulaufd5115702021-01-18 12:34:33 -0700402 ++gen_;
John Zulauf4a6105a2020-11-17 15:11:05 -0700403 gen_range = GenRange();
404 }
405 return gen_range;
406 }
407
408 void SeekBegin() {
409 auto gen_range = GenRange();
410 if (gen_range.empty()) {
411 current_ = KeyType();
412 filter_pos_ = filter_->cend();
413 } else {
414 filter_pos_ = filter_->lower_bound(gen_range);
415 current_ = gen_range & FilterRange();
416 }
417 }
418
John Zulauf4a6105a2020-11-17 15:11:05 -0700419 const FilterMap *filter_;
John Zulaufd5115702021-01-18 12:34:33 -0700420 RangeGen gen_;
John Zulauf4a6105a2020-11-17 15:11:05 -0700421 typename FilterMap::const_iterator filter_pos_;
422 KeyType current_;
423};
424
425using EventImageRangeGenerator = FilteredGeneratorGenerator<SyncEventState::ScopeMap, subresource_adapter::ImageRangeGenerator>;
426
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -0700427static const ResourceAccessRange kFullRange(std::numeric_limits<VkDeviceSize>::min(), std::numeric_limits<VkDeviceSize>::max());
John Zulauf5c5e88d2019-12-26 11:22:02 -0700428
John Zulauf3e86bf02020-09-12 10:47:57 -0600429ResourceAccessRange GetBufferRange(VkDeviceSize offset, VkDeviceSize buf_whole_size, uint32_t first_index, uint32_t count,
430 VkDeviceSize stride) {
431 VkDeviceSize range_start = offset + first_index * stride;
432 VkDeviceSize range_size = 0;
locke-lunargff255f92020-05-13 18:53:52 -0600433 if (count == UINT32_MAX) {
434 range_size = buf_whole_size - range_start;
435 } else {
436 range_size = count * stride;
437 }
John Zulauf3e86bf02020-09-12 10:47:57 -0600438 return MakeRange(range_start, range_size);
locke-lunargff255f92020-05-13 18:53:52 -0600439}
440
locke-lunarg654e3692020-06-04 17:19:15 -0600441SyncStageAccessIndex GetSyncStageAccessIndexsByDescriptorSet(VkDescriptorType descriptor_type, const interface_var &descriptor_data,
442 VkShaderStageFlagBits stage_flag) {
443 if (descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT) {
444 assert(stage_flag == VK_SHADER_STAGE_FRAGMENT_BIT);
445 return SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ;
446 }
447 auto stage_access = syncStageAccessMaskByShaderStage.find(stage_flag);
448 if (stage_access == syncStageAccessMaskByShaderStage.end()) {
449 assert(0);
450 }
451 if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER || descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
452 return stage_access->second.uniform_read;
453 }
454
455 // If the desriptorSet is writable, we don't need to care SHADER_READ. SHADER_WRITE is enough.
456 // Because if write hazard happens, read hazard might or might not happen.
457 // But if write hazard doesn't happen, read hazard is impossible to happen.
458 if (descriptor_data.is_writable) {
Jeremy Gebben40a22942020-12-22 14:22:06 -0700459 return stage_access->second.storage_write;
locke-lunarg654e3692020-06-04 17:19:15 -0600460 }
Jeremy Gebben40a22942020-12-22 14:22:06 -0700461 // TODO: sampled_read
462 return stage_access->second.storage_read;
locke-lunarg654e3692020-06-04 17:19:15 -0600463}
464
locke-lunarg37047832020-06-12 13:44:45 -0600465bool IsImageLayoutDepthWritable(VkImageLayout image_layout) {
466 return (image_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
467 image_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL ||
468 image_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL)
469 ? true
470 : false;
471}
472
473bool IsImageLayoutStencilWritable(VkImageLayout image_layout) {
474 return (image_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
475 image_layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL ||
476 image_layout == VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL)
477 ? true
478 : false;
479}
480
John Zulauf355e49b2020-04-24 15:11:15 -0600481// Class AccessContext stores the state of accesses specific to a Command, Subpass, or Queue
John Zulaufb02c1eb2020-10-06 16:33:36 -0600482template <typename Action>
483static void ApplyOverImageRange(const IMAGE_STATE &image_state, const VkImageSubresourceRange &subresource_range_arg,
484 Action &action) {
485 // At this point the "apply over range" logic only supports a single memory binding
486 if (!SimpleBinding(image_state)) return;
487 auto subresource_range = NormalizeSubresourceRange(image_state.createInfo, subresource_range_arg);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600488 const auto base_address = ResourceBaseAddress(image_state);
John Zulauf150e5332020-12-03 08:52:52 -0700489 subresource_adapter::ImageRangeGenerator range_gen(*image_state.fragment_encoder.get(), subresource_range, {0, 0, 0},
490 image_state.createInfo.extent, base_address);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600491 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf150e5332020-12-03 08:52:52 -0700492 action(*range_gen);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600493 }
494}
495
John Zulauf7635de32020-05-29 17:14:15 -0600496// Tranverse the attachment resolves for this a specific subpass, and do action() to them.
497// Used by both validation and record operations
498//
499// The signature for Action() reflect the needs of both uses.
500template <typename Action>
501void ResolveOperation(Action &action, const RENDER_PASS_STATE &rp_state, const VkRect2D &render_area,
502 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views, uint32_t subpass) {
503 VkExtent3D extent = CastTo3D(render_area.extent);
504 VkOffset3D offset = CastTo3D(render_area.offset);
505 const auto &rp_ci = rp_state.createInfo;
506 const auto *attachment_ci = rp_ci.pAttachments;
507 const auto &subpass_ci = rp_ci.pSubpasses[subpass];
508
509 // Color resolves -- require an inuse color attachment and a matching inuse resolve attachment
510 const auto *color_attachments = subpass_ci.pColorAttachments;
511 const auto *color_resolve = subpass_ci.pResolveAttachments;
512 if (color_resolve && color_attachments) {
513 for (uint32_t i = 0; i < subpass_ci.colorAttachmentCount; i++) {
514 const auto &color_attach = color_attachments[i].attachment;
515 const auto &resolve_attach = subpass_ci.pResolveAttachments[i].attachment;
516 if ((color_attach != VK_ATTACHMENT_UNUSED) && (resolve_attach != VK_ATTACHMENT_UNUSED)) {
517 action("color", "resolve read", color_attach, resolve_attach, attachment_views[color_attach],
John Zulauf8e3c3e92021-01-06 11:19:36 -0700518 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ, SyncOrdering::kColorAttachment, offset, extent, 0);
John Zulauf7635de32020-05-29 17:14:15 -0600519 action("color", "resolve write", color_attach, resolve_attach, attachment_views[resolve_attach],
John Zulauf8e3c3e92021-01-06 11:19:36 -0700520 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kColorAttachment, offset, extent, 0);
John Zulauf7635de32020-05-29 17:14:15 -0600521 }
522 }
523 }
524
525 // Depth stencil resolve only if the extension is present
Mark Lobodzinski1f887d32020-12-30 15:31:33 -0700526 const auto ds_resolve = LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(subpass_ci.pNext);
John Zulauf7635de32020-05-29 17:14:15 -0600527 if (ds_resolve && ds_resolve->pDepthStencilResolveAttachment &&
528 (ds_resolve->pDepthStencilResolveAttachment->attachment != VK_ATTACHMENT_UNUSED) && subpass_ci.pDepthStencilAttachment &&
529 (subpass_ci.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
530 const auto src_at = subpass_ci.pDepthStencilAttachment->attachment;
531 const auto src_ci = attachment_ci[src_at];
532 // The formats are required to match so we can pick either
533 const bool resolve_depth = (ds_resolve->depthResolveMode != VK_RESOLVE_MODE_NONE) && FormatHasDepth(src_ci.format);
534 const bool resolve_stencil = (ds_resolve->stencilResolveMode != VK_RESOLVE_MODE_NONE) && FormatHasStencil(src_ci.format);
535 const auto dst_at = ds_resolve->pDepthStencilResolveAttachment->attachment;
536 VkImageAspectFlags aspect_mask = 0u;
537
538 // Figure out which aspects are actually touched during resolve operations
539 const char *aspect_string = nullptr;
540 if (resolve_depth && resolve_stencil) {
541 // Validate all aspects together
542 aspect_mask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
543 aspect_string = "depth/stencil";
544 } else if (resolve_depth) {
545 // Validate depth only
546 aspect_mask = VK_IMAGE_ASPECT_DEPTH_BIT;
547 aspect_string = "depth";
548 } else if (resolve_stencil) {
549 // Validate all stencil only
550 aspect_mask = VK_IMAGE_ASPECT_STENCIL_BIT;
551 aspect_string = "stencil";
552 }
553
554 if (aspect_mask) {
555 action(aspect_string, "resolve read", src_at, dst_at, attachment_views[src_at],
John Zulauf8e3c3e92021-01-06 11:19:36 -0700556 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ, SyncOrdering::kRaster, offset, extent, aspect_mask);
John Zulauf7635de32020-05-29 17:14:15 -0600557 action(aspect_string, "resolve write", src_at, dst_at, attachment_views[dst_at],
John Zulauf8e3c3e92021-01-06 11:19:36 -0700558 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kRaster, offset, extent, aspect_mask);
John Zulauf7635de32020-05-29 17:14:15 -0600559 }
560 }
561}
562
563// Action for validating resolve operations
564class ValidateResolveAction {
565 public:
John Zulauffaea0ee2021-01-14 14:01:32 -0700566 ValidateResolveAction(VkRenderPass render_pass, uint32_t subpass, const AccessContext &context,
John Zulauf64ffe552021-02-06 10:25:07 -0700567 const CommandExecutionContext &ex_context, const char *func_name)
John Zulauf7635de32020-05-29 17:14:15 -0600568 : render_pass_(render_pass),
569 subpass_(subpass),
570 context_(context),
John Zulauf64ffe552021-02-06 10:25:07 -0700571 ex_context_(ex_context),
John Zulauf7635de32020-05-29 17:14:15 -0600572 func_name_(func_name),
573 skip_(false) {}
574 void operator()(const char *aspect_name, const char *attachment_name, uint32_t src_at, uint32_t dst_at,
John Zulauf8e3c3e92021-01-06 11:19:36 -0700575 const IMAGE_VIEW_STATE *view, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf7635de32020-05-29 17:14:15 -0600576 const VkOffset3D &offset, const VkExtent3D &extent, VkImageAspectFlags aspect_mask) {
577 HazardResult hazard;
John Zulauf8e3c3e92021-01-06 11:19:36 -0700578 hazard = context_.DetectHazard(view, current_usage, ordering_rule, offset, extent, aspect_mask);
John Zulauf7635de32020-05-29 17:14:15 -0600579 if (hazard.hazard) {
John Zulauffaea0ee2021-01-14 14:01:32 -0700580 skip_ |=
John Zulauf64ffe552021-02-06 10:25:07 -0700581 ex_context_.GetSyncState().LogError(render_pass_, string_SyncHazardVUID(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -0700582 "%s: Hazard %s in subpass %" PRIu32 "during %s %s, from attachment %" PRIu32
583 " to resolve attachment %" PRIu32 ". Access info %s.",
584 func_name_, string_SyncHazard(hazard.hazard), subpass_, aspect_name,
John Zulauf64ffe552021-02-06 10:25:07 -0700585 attachment_name, src_at, dst_at, ex_context_.FormatUsage(hazard).c_str());
John Zulauf7635de32020-05-29 17:14:15 -0600586 }
587 }
588 // Providing a mechanism for the constructing caller to get the result of the validation
589 bool GetSkip() const { return skip_; }
590
591 private:
592 VkRenderPass render_pass_;
593 const uint32_t subpass_;
594 const AccessContext &context_;
John Zulauf64ffe552021-02-06 10:25:07 -0700595 const CommandExecutionContext &ex_context_;
John Zulauf7635de32020-05-29 17:14:15 -0600596 const char *func_name_;
597 bool skip_;
598};
599
600// Update action for resolve operations
601class UpdateStateResolveAction {
602 public:
603 UpdateStateResolveAction(AccessContext &context, const ResourceUsageTag &tag) : context_(context), tag_(tag) {}
604 void operator()(const char *aspect_name, const char *attachment_name, uint32_t src_at, uint32_t dst_at,
John Zulauf8e3c3e92021-01-06 11:19:36 -0700605 const IMAGE_VIEW_STATE *view, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf7635de32020-05-29 17:14:15 -0600606 const VkOffset3D &offset, const VkExtent3D &extent, VkImageAspectFlags aspect_mask) {
607 // Ignores validation only arguments...
John Zulauf8e3c3e92021-01-06 11:19:36 -0700608 context_.UpdateAccessState(view, current_usage, ordering_rule, offset, extent, aspect_mask, tag_);
John Zulauf7635de32020-05-29 17:14:15 -0600609 }
610
611 private:
612 AccessContext &context_;
613 const ResourceUsageTag &tag_;
614};
615
John Zulauf59e25072020-07-17 10:55:21 -0600616void HazardResult::Set(const ResourceAccessState *access_state_, SyncStageAccessIndex usage_index_, SyncHazard hazard_,
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700617 const SyncStageAccessFlags &prior_, const ResourceUsageTag &tag_) {
John Zulauf59e25072020-07-17 10:55:21 -0600618 access_state = std::unique_ptr<const ResourceAccessState>(new ResourceAccessState(*access_state_));
619 usage_index = usage_index_;
620 hazard = hazard_;
621 prior_access = prior_;
622 tag = tag_;
623}
624
John Zulauf540266b2020-04-06 18:54:53 -0600625AccessContext::AccessContext(uint32_t subpass, VkQueueFlags queue_flags,
626 const std::vector<SubpassDependencyGraphNode> &dependencies,
John Zulauf1a224292020-06-30 14:52:13 -0600627 const std::vector<AccessContext> &contexts, const AccessContext *external_context) {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600628 Reset();
629 const auto &subpass_dep = dependencies[subpass];
630 prev_.reserve(subpass_dep.prev.size());
John Zulauf355e49b2020-04-24 15:11:15 -0600631 prev_by_subpass_.resize(subpass, nullptr); // Can't be more prevs than the subpass we're on
John Zulauf3d84f1b2020-03-09 13:33:25 -0600632 for (const auto &prev_dep : subpass_dep.prev) {
John Zulaufbaea94f2020-09-15 17:55:16 -0600633 const auto prev_pass = prev_dep.first->pass;
634 const auto &prev_barriers = prev_dep.second;
635 assert(prev_dep.second.size());
636 prev_.emplace_back(&contexts[prev_pass], queue_flags, prev_barriers);
637 prev_by_subpass_[prev_pass] = &prev_.back();
John Zulauf5c5e88d2019-12-26 11:22:02 -0700638 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600639
640 async_.reserve(subpass_dep.async.size());
641 for (const auto async_subpass : subpass_dep.async) {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -0700642 async_.emplace_back(&contexts[async_subpass]);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600643 }
John Zulaufbaea94f2020-09-15 17:55:16 -0600644 if (subpass_dep.barrier_from_external.size()) {
645 src_external_ = TrackBack(external_context, queue_flags, subpass_dep.barrier_from_external);
John Zulaufe5da6e52020-03-18 15:32:18 -0600646 }
John Zulaufbaea94f2020-09-15 17:55:16 -0600647 if (subpass_dep.barrier_to_external.size()) {
648 dst_external_ = TrackBack(this, queue_flags, subpass_dep.barrier_to_external);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600649 }
John Zulauf5c5e88d2019-12-26 11:22:02 -0700650}
651
John Zulauf5f13a792020-03-10 07:31:21 -0600652template <typename Detector>
John Zulauf43cc7462020-12-03 12:33:12 -0700653HazardResult AccessContext::DetectPreviousHazard(AccessAddressType type, const Detector &detector,
John Zulauf540266b2020-04-06 18:54:53 -0600654 const ResourceAccessRange &range) const {
John Zulauf5f13a792020-03-10 07:31:21 -0600655 ResourceAccessRangeMap descent_map;
John Zulauf69133422020-05-20 14:55:53 -0600656 ResolvePreviousAccess(type, range, &descent_map, nullptr);
John Zulauf5f13a792020-03-10 07:31:21 -0600657
658 HazardResult hazard;
659 for (auto prev = descent_map.begin(); prev != descent_map.end() && !hazard.hazard; ++prev) {
660 hazard = detector.Detect(prev);
661 }
662 return hazard;
663}
664
John Zulauf4a6105a2020-11-17 15:11:05 -0700665template <typename Action>
666void AccessContext::ForAll(Action &&action) {
667 for (const auto address_type : kAddressTypes) {
668 auto &accesses = GetAccessStateMap(address_type);
669 for (const auto &access : accesses) {
670 action(address_type, access);
671 }
672 }
673}
674
John Zulauf3d84f1b2020-03-09 13:33:25 -0600675// A recursive range walker for hazard detection, first for the current context and the (DetectHazardRecur) to walk
676// the DAG of the contexts (for example subpasses)
677template <typename Detector>
John Zulauf43cc7462020-12-03 12:33:12 -0700678HazardResult AccessContext::DetectHazard(AccessAddressType type, const Detector &detector, const ResourceAccessRange &range,
John Zulauf355e49b2020-04-24 15:11:15 -0600679 DetectOptions options) const {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600680 HazardResult hazard;
John Zulauf5f13a792020-03-10 07:31:21 -0600681
John Zulauf1a224292020-06-30 14:52:13 -0600682 if (static_cast<uint32_t>(options) & DetectOptions::kDetectAsync) {
John Zulauf355e49b2020-04-24 15:11:15 -0600683 // Async checks don't require recursive lookups, as the async lists are exhaustive for the top-level context
684 // so we'll check these first
685 for (const auto &async_context : async_) {
686 hazard = async_context->DetectAsyncHazard(type, detector, range);
687 if (hazard.hazard) return hazard;
688 }
John Zulauf5f13a792020-03-10 07:31:21 -0600689 }
690
John Zulauf1a224292020-06-30 14:52:13 -0600691 const bool detect_prev = (static_cast<uint32_t>(options) & DetectOptions::kDetectPrevious) != 0;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600692
John Zulauf69133422020-05-20 14:55:53 -0600693 const auto &accesses = GetAccessStateMap(type);
694 const auto from = accesses.lower_bound(range);
695 const auto to = accesses.upper_bound(range);
696 ResourceAccessRange gap = {range.begin, range.begin};
John Zulauf5f13a792020-03-10 07:31:21 -0600697
John Zulauf69133422020-05-20 14:55:53 -0600698 for (auto pos = from; pos != to; ++pos) {
699 // Cover any leading gap, or gap between entries
700 if (detect_prev) {
701 // TODO: After profiling we may want to change the descent logic such that we don't recur per gap...
702 // Cover any leading gap, or gap between entries
703 gap.end = pos->first.begin; // We know this begin is < range.end
John Zulauf355e49b2020-04-24 15:11:15 -0600704 if (gap.non_empty()) {
John Zulauf69133422020-05-20 14:55:53 -0600705 // Recur on all gaps
John Zulauf16adfc92020-04-08 10:28:33 -0600706 hazard = DetectPreviousHazard(type, detector, gap);
John Zulauf5f13a792020-03-10 07:31:21 -0600707 if (hazard.hazard) return hazard;
708 }
John Zulauf69133422020-05-20 14:55:53 -0600709 // Set up for the next gap. If pos..end is >= range.end, loop will exit, and trailing gap will be empty
710 gap.begin = pos->first.end;
711 }
712
713 hazard = detector.Detect(pos);
714 if (hazard.hazard) return hazard;
715 }
716
717 if (detect_prev) {
718 // Detect in the trailing empty as needed
719 gap.end = range.end;
720 if (gap.non_empty()) {
721 hazard = DetectPreviousHazard(type, detector, gap);
John Zulauf16adfc92020-04-08 10:28:33 -0600722 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600723 }
724
725 return hazard;
726}
727
728// A non recursive range walker for the asynchronous contexts (those we have no barriers with)
729template <typename Detector>
John Zulauf43cc7462020-12-03 12:33:12 -0700730HazardResult AccessContext::DetectAsyncHazard(AccessAddressType type, const Detector &detector,
731 const ResourceAccessRange &range) const {
John Zulauf16adfc92020-04-08 10:28:33 -0600732 auto &accesses = GetAccessStateMap(type);
733 const auto from = accesses.lower_bound(range);
734 const auto to = accesses.upper_bound(range);
735
John Zulauf3d84f1b2020-03-09 13:33:25 -0600736 HazardResult hazard;
John Zulauf16adfc92020-04-08 10:28:33 -0600737 for (auto pos = from; pos != to && !hazard.hazard; ++pos) {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -0700738 hazard = detector.DetectAsync(pos, start_tag_);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600739 }
John Zulauf16adfc92020-04-08 10:28:33 -0600740
John Zulauf3d84f1b2020-03-09 13:33:25 -0600741 return hazard;
742}
743
John Zulaufb02c1eb2020-10-06 16:33:36 -0600744struct ApplySubpassTransitionBarriersAction {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -0700745 explicit ApplySubpassTransitionBarriersAction(const std::vector<SyncBarrier> &barriers_) : barriers(barriers_) {}
John Zulaufb02c1eb2020-10-06 16:33:36 -0600746 void operator()(ResourceAccessState *access) const {
747 assert(access);
748 access->ApplyBarriers(barriers, true);
749 }
750 const std::vector<SyncBarrier> &barriers;
751};
752
753struct ApplyTrackbackBarriersAction {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -0700754 explicit ApplyTrackbackBarriersAction(const std::vector<SyncBarrier> &barriers_) : barriers(barriers_) {}
John Zulaufb02c1eb2020-10-06 16:33:36 -0600755 void operator()(ResourceAccessState *access) const {
756 assert(access);
757 assert(!access->HasPendingState());
758 access->ApplyBarriers(barriers, false);
759 access->ApplyPendingBarriers(kCurrentCommandTag);
760 }
761 const std::vector<SyncBarrier> &barriers;
762};
763
764// Splits a single map entry into piece matching the entries in [first, last) the total range over [first, last) must be
765// contained with entry. Entry must be an iterator pointing to dest, first and last must be iterators pointing to a
766// *different* map from dest.
767// Returns the position past the last resolved range -- the entry covering the remainder of entry->first not included in the
768// range [first, last)
769template <typename BarrierAction>
John Zulauf355e49b2020-04-24 15:11:15 -0600770static void ResolveMapToEntry(ResourceAccessRangeMap *dest, ResourceAccessRangeMap::iterator entry,
771 ResourceAccessRangeMap::const_iterator first, ResourceAccessRangeMap::const_iterator last,
John Zulaufb02c1eb2020-10-06 16:33:36 -0600772 BarrierAction &barrier_action) {
John Zulauf355e49b2020-04-24 15:11:15 -0600773 auto at = entry;
774 for (auto pos = first; pos != last; ++pos) {
775 // Every member of the input iterator range must fit within the remaining portion of entry
776 assert(at->first.includes(pos->first));
777 assert(at != dest->end());
778 // Trim up at to the same size as the entry to resolve
779 at = sparse_container::split(at, *dest, pos->first);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600780 auto access = pos->second; // intentional copy
781 barrier_action(&access);
John Zulauf355e49b2020-04-24 15:11:15 -0600782 at->second.Resolve(access);
783 ++at; // Go to the remaining unused section of entry
784 }
785}
786
John Zulaufa0a98292020-09-18 09:30:10 -0600787static SyncBarrier MergeBarriers(const std::vector<SyncBarrier> &barriers) {
788 SyncBarrier merged = {};
789 for (const auto &barrier : barriers) {
790 merged.Merge(barrier);
791 }
792 return merged;
793}
794
John Zulaufb02c1eb2020-10-06 16:33:36 -0600795template <typename BarrierAction>
John Zulauf43cc7462020-12-03 12:33:12 -0700796void AccessContext::ResolveAccessRange(AccessAddressType type, const ResourceAccessRange &range, BarrierAction &barrier_action,
John Zulauf355e49b2020-04-24 15:11:15 -0600797 ResourceAccessRangeMap *resolve_map, const ResourceAccessState *infill_state,
798 bool recur_to_infill) const {
John Zulauf3bcab5e2020-06-19 14:42:32 -0600799 if (!range.non_empty()) return;
800
John Zulauf355e49b2020-04-24 15:11:15 -0600801 ResourceRangeMergeIterator current(*resolve_map, GetAccessStateMap(type), range.begin);
802 while (current->range.non_empty() && range.includes(current->range.begin)) {
John Zulauf3bcab5e2020-06-19 14:42:32 -0600803 const auto current_range = current->range & range;
John Zulauf16adfc92020-04-08 10:28:33 -0600804 if (current->pos_B->valid) {
805 const auto &src_pos = current->pos_B->lower_bound;
John Zulaufb02c1eb2020-10-06 16:33:36 -0600806 auto access = src_pos->second; // intentional copy
807 barrier_action(&access);
808
John Zulauf16adfc92020-04-08 10:28:33 -0600809 if (current->pos_A->valid) {
John Zulauf3bcab5e2020-06-19 14:42:32 -0600810 const auto trimmed = sparse_container::split(current->pos_A->lower_bound, *resolve_map, current_range);
811 trimmed->second.Resolve(access);
812 current.invalidate_A(trimmed);
John Zulauf5f13a792020-03-10 07:31:21 -0600813 } else {
John Zulauf3bcab5e2020-06-19 14:42:32 -0600814 auto inserted = resolve_map->insert(current->pos_A->lower_bound, std::make_pair(current_range, access));
John Zulauf355e49b2020-04-24 15:11:15 -0600815 current.invalidate_A(inserted); // Update the parallel iterator to point at the insert segment
John Zulauf5f13a792020-03-10 07:31:21 -0600816 }
John Zulauf16adfc92020-04-08 10:28:33 -0600817 } else {
818 // we have to descend to fill this gap
819 if (recur_to_infill) {
John Zulauf355e49b2020-04-24 15:11:15 -0600820 if (current->pos_A->valid) {
821 // Dest is valid, so we need to accumulate along the DAG and then resolve... in an N-to-1 resolve operation
822 ResourceAccessRangeMap gap_map;
John Zulauf3bcab5e2020-06-19 14:42:32 -0600823 ResolvePreviousAccess(type, current_range, &gap_map, infill_state);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600824 ResolveMapToEntry(resolve_map, current->pos_A->lower_bound, gap_map.begin(), gap_map.end(), barrier_action);
John Zulauf355e49b2020-04-24 15:11:15 -0600825 } else {
John Zulauf3bcab5e2020-06-19 14:42:32 -0600826 // There isn't anything in dest in current)range, so we can accumulate directly into it.
827 ResolvePreviousAccess(type, current_range, resolve_map, infill_state);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600828 // Need to apply the barrier to the accesses we accumulated, noting that we haven't updated current
829 for (auto pos = resolve_map->lower_bound(current_range); pos != current->pos_A->lower_bound; ++pos) {
830 barrier_action(&pos->second);
John Zulauf355e49b2020-04-24 15:11:15 -0600831 }
832 }
833 // Given that there could be gaps we need to seek carefully to not repeatedly search the same gaps in the next
834 // iterator of the outer while.
835
836 // Set the parallel iterator to the end of this range s.t. ++ will move us to the next range whether or
837 // not the end of the range is a gap. For the seek to work, first we need to warn the parallel iterator
838 // we stepped on the dest map
locke-lunarg88dbb542020-06-23 22:05:42 -0600839 const auto seek_to = current_range.end - 1; // The subtraction is safe as range can't be empty (loop condition)
840 current.invalidate_A(); // Changes current->range
John Zulauf355e49b2020-04-24 15:11:15 -0600841 current.seek(seek_to);
842 } else if (!current->pos_A->valid && infill_state) {
843 // If we didn't find anything in the current range, and we aren't reccuring... we infill if required
844 auto inserted = resolve_map->insert(current->pos_A->lower_bound, std::make_pair(current->range, *infill_state));
845 current.invalidate_A(inserted); // Update the parallel iterator to point at the correct segment after insert
John Zulauf16adfc92020-04-08 10:28:33 -0600846 }
John Zulauf5f13a792020-03-10 07:31:21 -0600847 }
John Zulauf16adfc92020-04-08 10:28:33 -0600848 ++current;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600849 }
John Zulauf1a224292020-06-30 14:52:13 -0600850
851 // Infill if range goes passed both the current and resolve map prior contents
852 if (recur_to_infill && (current->range.end < range.end)) {
853 ResourceAccessRange trailing_fill_range = {current->range.end, range.end};
854 ResourceAccessRangeMap gap_map;
855 const auto the_end = resolve_map->end();
856 ResolvePreviousAccess(type, trailing_fill_range, &gap_map, infill_state);
857 for (auto &access : gap_map) {
John Zulaufb02c1eb2020-10-06 16:33:36 -0600858 barrier_action(&access.second);
John Zulauf1a224292020-06-30 14:52:13 -0600859 resolve_map->insert(the_end, access);
860 }
861 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600862}
863
John Zulauf43cc7462020-12-03 12:33:12 -0700864void AccessContext::ResolvePreviousAccess(AccessAddressType type, const ResourceAccessRange &range,
865 ResourceAccessRangeMap *descent_map, const ResourceAccessState *infill_state) const {
John Zulaufe5da6e52020-03-18 15:32:18 -0600866 if ((prev_.size() == 0) && (src_external_.context == nullptr)) {
John Zulauf5f13a792020-03-10 07:31:21 -0600867 if (range.non_empty() && infill_state) {
868 descent_map->insert(std::make_pair(range, *infill_state));
869 }
870 } else {
871 // Look for something to fill the gap further along.
872 for (const auto &prev_dep : prev_) {
John Zulaufb02c1eb2020-10-06 16:33:36 -0600873 const ApplyTrackbackBarriersAction barrier_action(prev_dep.barriers);
874 prev_dep.context->ResolveAccessRange(type, range, barrier_action, descent_map, infill_state);
John Zulauf5f13a792020-03-10 07:31:21 -0600875 }
876
John Zulaufe5da6e52020-03-18 15:32:18 -0600877 if (src_external_.context) {
John Zulaufb02c1eb2020-10-06 16:33:36 -0600878 const ApplyTrackbackBarriersAction barrier_action(src_external_.barriers);
879 src_external_.context->ResolveAccessRange(type, range, barrier_action, descent_map, infill_state);
John Zulauf5f13a792020-03-10 07:31:21 -0600880 }
881 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600882}
883
John Zulauf4a6105a2020-11-17 15:11:05 -0700884// Non-lazy import of all accesses, WaitEvents needs this.
885void AccessContext::ResolvePreviousAccesses() {
886 ResourceAccessState default_state;
887 for (const auto address_type : kAddressTypes) {
888 ResolvePreviousAccess(address_type, kFullRange, &GetAccessStateMap(address_type), &default_state);
889 }
890}
891
John Zulauf43cc7462020-12-03 12:33:12 -0700892AccessAddressType AccessContext::ImageAddressType(const IMAGE_STATE &image) {
893 return (image.fragment_encoder->IsLinearImage()) ? AccessAddressType::kLinear : AccessAddressType::kIdealized;
John Zulauf16adfc92020-04-08 10:28:33 -0600894}
895
John Zulauf1507ee42020-05-18 11:33:09 -0600896static SyncStageAccessIndex ColorLoadUsage(VkAttachmentLoadOp load_op) {
897 const auto stage_access = (load_op == VK_ATTACHMENT_LOAD_OP_LOAD) ? SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ
898 : SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE;
899 return stage_access;
900}
901static SyncStageAccessIndex DepthStencilLoadUsage(VkAttachmentLoadOp load_op) {
902 const auto stage_access = (load_op == VK_ATTACHMENT_LOAD_OP_LOAD) ? SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_READ
903 : SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE;
904 return stage_access;
905}
906
John Zulauf7635de32020-05-29 17:14:15 -0600907// Caller must manage returned pointer
908static AccessContext *CreateStoreResolveProxyContext(const AccessContext &context, const RENDER_PASS_STATE &rp_state,
909 uint32_t subpass, const VkRect2D &render_area,
910 std::vector<const IMAGE_VIEW_STATE *> attachment_views) {
911 auto *proxy = new AccessContext(context);
912 proxy->UpdateAttachmentResolveAccess(rp_state, render_area, attachment_views, subpass, kCurrentCommandTag);
John Zulaufaff20662020-06-01 14:07:58 -0600913 proxy->UpdateAttachmentStoreAccess(rp_state, render_area, attachment_views, subpass, kCurrentCommandTag);
John Zulauf7635de32020-05-29 17:14:15 -0600914 return proxy;
915}
916
John Zulaufb02c1eb2020-10-06 16:33:36 -0600917template <typename BarrierAction>
John Zulauf52446eb2020-10-22 16:40:08 -0600918class ResolveAccessRangeFunctor {
John Zulaufb02c1eb2020-10-06 16:33:36 -0600919 public:
John Zulauf43cc7462020-12-03 12:33:12 -0700920 ResolveAccessRangeFunctor(const AccessContext &context, AccessAddressType address_type, ResourceAccessRangeMap *descent_map,
921 const ResourceAccessState *infill_state, BarrierAction &barrier_action)
John Zulauf52446eb2020-10-22 16:40:08 -0600922 : context_(context),
923 address_type_(address_type),
924 descent_map_(descent_map),
925 infill_state_(infill_state),
926 barrier_action_(barrier_action) {}
John Zulaufb02c1eb2020-10-06 16:33:36 -0600927 ResolveAccessRangeFunctor() = delete;
928 void operator()(const ResourceAccessRange &range) const {
929 context_.ResolveAccessRange(address_type_, range, barrier_action_, descent_map_, infill_state_);
930 }
931
932 private:
John Zulauf52446eb2020-10-22 16:40:08 -0600933 const AccessContext &context_;
John Zulauf43cc7462020-12-03 12:33:12 -0700934 const AccessAddressType address_type_;
John Zulauf52446eb2020-10-22 16:40:08 -0600935 ResourceAccessRangeMap *const descent_map_;
936 const ResourceAccessState *infill_state_;
John Zulaufb02c1eb2020-10-06 16:33:36 -0600937 BarrierAction &barrier_action_;
938};
939
John Zulaufb02c1eb2020-10-06 16:33:36 -0600940template <typename BarrierAction>
941void AccessContext::ResolveAccessRange(const IMAGE_STATE &image_state, const VkImageSubresourceRange &subresource_range,
John Zulauf43cc7462020-12-03 12:33:12 -0700942 BarrierAction &barrier_action, AccessAddressType address_type,
943 ResourceAccessRangeMap *descent_map, const ResourceAccessState *infill_state) const {
John Zulaufb02c1eb2020-10-06 16:33:36 -0600944 const ResolveAccessRangeFunctor<BarrierAction> action(*this, address_type, descent_map, infill_state, barrier_action);
945 ApplyOverImageRange(image_state, subresource_range, action);
John Zulauf62f10592020-04-03 12:20:02 -0600946}
947
John Zulauf7635de32020-05-29 17:14:15 -0600948// Layout transitions are handled as if the were occuring in the beginning of the next subpass
John Zulauf64ffe552021-02-06 10:25:07 -0700949bool AccessContext::ValidateLayoutTransitions(const CommandExecutionContext &ex_context, const RENDER_PASS_STATE &rp_state,
John Zulauf7635de32020-05-29 17:14:15 -0600950 const VkRect2D &render_area, uint32_t subpass,
951 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views,
952 const char *func_name) const {
John Zulauf355e49b2020-04-24 15:11:15 -0600953 bool skip = false;
John Zulauf7635de32020-05-29 17:14:15 -0600954 // As validation methods are const and precede the record/update phase, for any tranistions from the immediately
955 // previous subpass, we have to validate them against a copy of the AccessContext, with resolve operations applied, as
956 // those affects have not been recorded yet.
957 //
958 // Note: we could be more efficient by tracking whether or not we actually *have* any changes (e.g. attachment resolve)
959 // to apply and only copy then, if this proves a hot spot.
960 std::unique_ptr<AccessContext> proxy_for_prev;
961 TrackBack proxy_track_back;
962
John Zulauf355e49b2020-04-24 15:11:15 -0600963 const auto &transitions = rp_state.subpass_transitions[subpass];
964 for (const auto &transition : transitions) {
John Zulauf7635de32020-05-29 17:14:15 -0600965 const bool prev_needs_proxy = transition.prev_pass != VK_SUBPASS_EXTERNAL && (transition.prev_pass + 1 == subpass);
966
967 const auto *track_back = GetTrackBackFromSubpass(transition.prev_pass);
968 if (prev_needs_proxy) {
969 if (!proxy_for_prev) {
970 proxy_for_prev.reset(CreateStoreResolveProxyContext(*track_back->context, rp_state, transition.prev_pass,
971 render_area, attachment_views));
972 proxy_track_back = *track_back;
973 proxy_track_back.context = proxy_for_prev.get();
974 }
975 track_back = &proxy_track_back;
976 }
977 auto hazard = DetectSubpassTransitionHazard(*track_back, attachment_views[transition.attachment]);
John Zulauf355e49b2020-04-24 15:11:15 -0600978 if (hazard.hazard) {
John Zulauf64ffe552021-02-06 10:25:07 -0700979 skip |= ex_context.GetSyncState().LogError(rp_state.renderPass, string_SyncHazardVUID(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -0700980 "%s: Hazard %s in subpass %" PRIu32 " for attachment %" PRIu32
981 " image layout transition (old_layout: %s, new_layout: %s). Access info %s.",
982 func_name, string_SyncHazard(hazard.hazard), subpass, transition.attachment,
983 string_VkImageLayout(transition.old_layout),
984 string_VkImageLayout(transition.new_layout),
John Zulauf64ffe552021-02-06 10:25:07 -0700985 ex_context.FormatUsage(hazard).c_str());
John Zulauf355e49b2020-04-24 15:11:15 -0600986 }
987 }
988 return skip;
989}
990
John Zulauf64ffe552021-02-06 10:25:07 -0700991bool AccessContext::ValidateLoadOperation(const CommandExecutionContext &ex_context, const RENDER_PASS_STATE &rp_state,
John Zulauf7635de32020-05-29 17:14:15 -0600992 const VkRect2D &render_area, uint32_t subpass,
993 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views,
994 const char *func_name) const {
John Zulauf1507ee42020-05-18 11:33:09 -0600995 bool skip = false;
996 const auto *attachment_ci = rp_state.createInfo.pAttachments;
997 VkExtent3D extent = CastTo3D(render_area.extent);
998 VkOffset3D offset = CastTo3D(render_area.offset);
John Zulaufa0a98292020-09-18 09:30:10 -0600999
John Zulauf1507ee42020-05-18 11:33:09 -06001000 for (uint32_t i = 0; i < rp_state.createInfo.attachmentCount; i++) {
1001 if (subpass == rp_state.attachment_first_subpass[i]) {
1002 if (attachment_views[i] == nullptr) continue;
1003 const IMAGE_VIEW_STATE &view = *attachment_views[i];
1004 const IMAGE_STATE *image = view.image_state.get();
1005 if (image == nullptr) continue;
1006 const auto &ci = attachment_ci[i];
John Zulauf1507ee42020-05-18 11:33:09 -06001007
1008 // Need check in the following way
1009 // 1) if the usage bit isn't in the dest_access_scope, and there is layout traniition for initial use, report hazard
1010 // vs. transition
1011 // 2) if there isn't a layout transition, we need to look at the external context with a "detect hazard" operation
1012 // for each aspect loaded.
1013
1014 const bool has_depth = FormatHasDepth(ci.format);
John Zulaufb027cdb2020-05-21 14:25:22 -06001015 const bool has_stencil = FormatHasStencil(ci.format);
John Zulauf1507ee42020-05-18 11:33:09 -06001016 const bool is_color = !(has_depth || has_stencil);
1017
1018 const SyncStageAccessIndex load_index = has_depth ? DepthStencilLoadUsage(ci.loadOp) : ColorLoadUsage(ci.loadOp);
John Zulauf1507ee42020-05-18 11:33:09 -06001019 const SyncStageAccessIndex stencil_load_index = has_stencil ? DepthStencilLoadUsage(ci.stencilLoadOp) : load_index;
John Zulauf1507ee42020-05-18 11:33:09 -06001020
John Zulaufaff20662020-06-01 14:07:58 -06001021 HazardResult hazard;
John Zulauf1507ee42020-05-18 11:33:09 -06001022 const char *aspect = nullptr;
John Zulauf1507ee42020-05-18 11:33:09 -06001023
John Zulaufb02c1eb2020-10-06 16:33:36 -06001024 auto hazard_range = view.normalized_subresource_range;
1025 bool checked_stencil = false;
1026 if (is_color) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07001027 hazard = DetectHazard(*image, load_index, view.normalized_subresource_range, SyncOrdering::kColorAttachment, offset,
John Zulauf859089b2020-10-29 17:37:03 -06001028 extent);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001029 aspect = "color";
1030 } else {
1031 if (has_depth) {
1032 hazard_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001033 hazard = DetectHazard(*image, load_index, hazard_range, SyncOrdering::kDepthStencilAttachment, offset, extent);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001034 aspect = "depth";
1035 }
1036 if (!hazard.hazard && has_stencil) {
1037 hazard_range.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001038 hazard = DetectHazard(*image, stencil_load_index, hazard_range, SyncOrdering::kDepthStencilAttachment, offset,
1039 extent);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001040 aspect = "stencil";
1041 checked_stencil = true;
1042 }
1043 }
1044
1045 if (hazard.hazard) {
1046 auto load_op_string = string_VkAttachmentLoadOp(checked_stencil ? ci.stencilLoadOp : ci.loadOp);
John Zulauf64ffe552021-02-06 10:25:07 -07001047 const auto &sync_state = ex_context.GetSyncState();
John Zulaufb02c1eb2020-10-06 16:33:36 -06001048 if (hazard.tag == kCurrentCommandTag) {
1049 // Hazard vs. ILT
1050 skip |= sync_state.LogError(rp_state.renderPass, string_SyncHazardVUID(hazard.hazard),
1051 "%s: Hazard %s vs. layout transition in subpass %" PRIu32 " for attachment %" PRIu32
1052 " aspect %s during load with loadOp %s.",
1053 func_name, string_SyncHazard(hazard.hazard), subpass, i, aspect, load_op_string);
1054 } else {
John Zulauf1507ee42020-05-18 11:33:09 -06001055 skip |= sync_state.LogError(rp_state.renderPass, string_SyncHazardVUID(hazard.hazard),
1056 "%s: Hazard %s in subpass %" PRIu32 " for attachment %" PRIu32
John Zulauf59e25072020-07-17 10:55:21 -06001057 " aspect %s during load with loadOp %s. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06001058 func_name, string_SyncHazard(hazard.hazard), subpass, i, aspect, load_op_string,
John Zulauf64ffe552021-02-06 10:25:07 -07001059 ex_context.FormatUsage(hazard).c_str());
John Zulauf1507ee42020-05-18 11:33:09 -06001060 }
1061 }
1062 }
1063 }
1064 return skip;
1065}
1066
John Zulaufaff20662020-06-01 14:07:58 -06001067// Store operation validation can ignore resolve (before it) and layout tranistions after it. The first is ignored
1068// because of the ordering guarantees w.r.t. sample access and that the resolve validation hasn't altered the state, because
1069// store is part of the same Next/End operation.
1070// The latter is handled in layout transistion validation directly
John Zulauf64ffe552021-02-06 10:25:07 -07001071bool AccessContext::ValidateStoreOperation(const CommandExecutionContext &ex_context, const RENDER_PASS_STATE &rp_state,
John Zulaufaff20662020-06-01 14:07:58 -06001072 const VkRect2D &render_area, uint32_t subpass,
1073 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views,
1074 const char *func_name) const {
1075 bool skip = false;
1076 const auto *attachment_ci = rp_state.createInfo.pAttachments;
1077 VkExtent3D extent = CastTo3D(render_area.extent);
1078 VkOffset3D offset = CastTo3D(render_area.offset);
1079
1080 for (uint32_t i = 0; i < rp_state.createInfo.attachmentCount; i++) {
1081 if (subpass == rp_state.attachment_last_subpass[i]) {
1082 if (attachment_views[i] == nullptr) continue;
1083 const IMAGE_VIEW_STATE &view = *attachment_views[i];
1084 const IMAGE_STATE *image = view.image_state.get();
1085 if (image == nullptr) continue;
1086 const auto &ci = attachment_ci[i];
1087
1088 // The spec states that "don't care" is an operation with VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
1089 // so we assume that an implementation is *free* to write in that case, meaning that for correctness
1090 // sake, we treat DONT_CARE as writing.
1091 const bool has_depth = FormatHasDepth(ci.format);
1092 const bool has_stencil = FormatHasStencil(ci.format);
1093 const bool is_color = !(has_depth || has_stencil);
1094 const bool store_op_stores = ci.storeOp != VK_ATTACHMENT_STORE_OP_NONE_QCOM;
1095 if (!has_stencil && !store_op_stores) continue;
1096
1097 HazardResult hazard;
1098 const char *aspect = nullptr;
1099 bool checked_stencil = false;
1100 if (is_color) {
1101 hazard = DetectHazard(*image, SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE,
John Zulauf8e3c3e92021-01-06 11:19:36 -07001102 view.normalized_subresource_range, SyncOrdering::kRaster, offset, extent);
John Zulaufaff20662020-06-01 14:07:58 -06001103 aspect = "color";
1104 } else {
1105 const bool stencil_op_stores = ci.stencilStoreOp != VK_ATTACHMENT_STORE_OP_NONE_QCOM;
1106 auto hazard_range = view.normalized_subresource_range;
1107 if (has_depth && store_op_stores) {
1108 hazard_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
1109 hazard = DetectHazard(*image, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, hazard_range,
John Zulauf8e3c3e92021-01-06 11:19:36 -07001110 SyncOrdering::kRaster, offset, extent);
John Zulaufaff20662020-06-01 14:07:58 -06001111 aspect = "depth";
1112 }
1113 if (!hazard.hazard && has_stencil && stencil_op_stores) {
1114 hazard_range.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
1115 hazard = DetectHazard(*image, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, hazard_range,
John Zulauf8e3c3e92021-01-06 11:19:36 -07001116 SyncOrdering::kRaster, offset, extent);
John Zulaufaff20662020-06-01 14:07:58 -06001117 aspect = "stencil";
1118 checked_stencil = true;
1119 }
1120 }
1121
1122 if (hazard.hazard) {
1123 const char *const op_type_string = checked_stencil ? "stencilStoreOp" : "storeOp";
1124 const char *const store_op_string = string_VkAttachmentStoreOp(checked_stencil ? ci.stencilStoreOp : ci.storeOp);
John Zulauf64ffe552021-02-06 10:25:07 -07001125 skip |= ex_context.GetSyncState().LogError(rp_state.renderPass, string_SyncHazardVUID(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07001126 "%s: Hazard %s in subpass %" PRIu32 " for attachment %" PRIu32
1127 " %s aspect during store with %s %s. Access info %s",
1128 func_name, string_SyncHazard(hazard.hazard), subpass, i, aspect,
John Zulauf64ffe552021-02-06 10:25:07 -07001129 op_type_string, store_op_string, ex_context.FormatUsage(hazard).c_str());
John Zulaufaff20662020-06-01 14:07:58 -06001130 }
1131 }
1132 }
1133 return skip;
1134}
1135
John Zulauf64ffe552021-02-06 10:25:07 -07001136bool AccessContext::ValidateResolveOperations(const CommandExecutionContext &ex_context, const RENDER_PASS_STATE &rp_state,
John Zulaufb027cdb2020-05-21 14:25:22 -06001137 const VkRect2D &render_area,
1138 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views, const char *func_name,
1139 uint32_t subpass) const {
John Zulauf64ffe552021-02-06 10:25:07 -07001140 ValidateResolveAction validate_action(rp_state.renderPass, subpass, *this, ex_context, func_name);
John Zulauf7635de32020-05-29 17:14:15 -06001141 ResolveOperation(validate_action, rp_state, render_area, attachment_views, subpass);
1142 return validate_action.GetSkip();
John Zulaufb027cdb2020-05-21 14:25:22 -06001143}
1144
John Zulauf3d84f1b2020-03-09 13:33:25 -06001145class HazardDetector {
1146 SyncStageAccessIndex usage_index_;
1147
1148 public:
John Zulauf5f13a792020-03-10 07:31:21 -06001149 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const { return pos->second.DetectHazard(usage_index_); }
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07001150 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, const ResourceUsageTag &start_tag) const {
1151 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001152 }
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001153 explicit HazardDetector(SyncStageAccessIndex usage) : usage_index_(usage) {}
John Zulauf3d84f1b2020-03-09 13:33:25 -06001154};
1155
John Zulauf69133422020-05-20 14:55:53 -06001156class HazardDetectorWithOrdering {
1157 const SyncStageAccessIndex usage_index_;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001158 const SyncOrdering ordering_rule_;
John Zulauf69133422020-05-20 14:55:53 -06001159
1160 public:
1161 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
John Zulauf8e3c3e92021-01-06 11:19:36 -07001162 return pos->second.DetectHazard(usage_index_, ordering_rule_);
John Zulauf69133422020-05-20 14:55:53 -06001163 }
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07001164 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, const ResourceUsageTag &start_tag) const {
1165 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
John Zulauf69133422020-05-20 14:55:53 -06001166 }
John Zulauf8e3c3e92021-01-06 11:19:36 -07001167 HazardDetectorWithOrdering(SyncStageAccessIndex usage, SyncOrdering ordering) : usage_index_(usage), ordering_rule_(ordering) {}
John Zulauf69133422020-05-20 14:55:53 -06001168};
1169
John Zulauf16adfc92020-04-08 10:28:33 -06001170HazardResult AccessContext::DetectHazard(const BUFFER_STATE &buffer, SyncStageAccessIndex usage_index,
John Zulauf355e49b2020-04-24 15:11:15 -06001171 const ResourceAccessRange &range) const {
John Zulauf16adfc92020-04-08 10:28:33 -06001172 if (!SimpleBinding(buffer)) return HazardResult();
John Zulauf150e5332020-12-03 08:52:52 -07001173 const auto base_address = ResourceBaseAddress(buffer);
1174 HazardDetector detector(usage_index);
1175 return DetectHazard(AccessAddressType::kLinear, detector, (range + base_address), DetectOptions::kDetectAll);
John Zulaufe5da6e52020-03-18 15:32:18 -06001176}
1177
John Zulauf69133422020-05-20 14:55:53 -06001178template <typename Detector>
1179HazardResult AccessContext::DetectHazard(Detector &detector, const IMAGE_STATE &image,
1180 const VkImageSubresourceRange &subresource_range, const VkOffset3D &offset,
1181 const VkExtent3D &extent, DetectOptions options) const {
1182 if (!SimpleBinding(image)) return HazardResult();
John Zulauf69133422020-05-20 14:55:53 -06001183 const auto base_address = ResourceBaseAddress(image);
John Zulauf150e5332020-12-03 08:52:52 -07001184 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, offset, extent,
1185 base_address);
1186 const auto address_type = ImageAddressType(image);
John Zulauf69133422020-05-20 14:55:53 -06001187 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf150e5332020-12-03 08:52:52 -07001188 HazardResult hazard = DetectHazard(address_type, detector, *range_gen, options);
John Zulauf69133422020-05-20 14:55:53 -06001189 if (hazard.hazard) return hazard;
1190 }
1191 return HazardResult();
1192}
1193
John Zulauf540266b2020-04-06 18:54:53 -06001194HazardResult AccessContext::DetectHazard(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
1195 const VkImageSubresourceLayers &subresource, const VkOffset3D &offset,
1196 const VkExtent3D &extent) const {
John Zulauf5c5e88d2019-12-26 11:22:02 -07001197 VkImageSubresourceRange subresource_range = {subresource.aspectMask, subresource.mipLevel, 1, subresource.baseArrayLayer,
1198 subresource.layerCount};
John Zulauf1507ee42020-05-18 11:33:09 -06001199 return DetectHazard(image, current_usage, subresource_range, offset, extent);
1200}
1201
1202HazardResult AccessContext::DetectHazard(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
1203 const VkImageSubresourceRange &subresource_range, const VkOffset3D &offset,
1204 const VkExtent3D &extent) const {
John Zulauf69133422020-05-20 14:55:53 -06001205 HazardDetector detector(current_usage);
1206 return DetectHazard(detector, image, subresource_range, offset, extent, DetectOptions::kDetectAll);
1207}
1208
1209HazardResult AccessContext::DetectHazard(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
John Zulauf8e3c3e92021-01-06 11:19:36 -07001210 const VkImageSubresourceRange &subresource_range, SyncOrdering ordering_rule,
John Zulauf69133422020-05-20 14:55:53 -06001211 const VkOffset3D &offset, const VkExtent3D &extent) const {
John Zulauf8e3c3e92021-01-06 11:19:36 -07001212 HazardDetectorWithOrdering detector(current_usage, ordering_rule);
John Zulauf69133422020-05-20 14:55:53 -06001213 return DetectHazard(detector, image, subresource_range, offset, extent, DetectOptions::kDetectAll);
John Zulauf9cb530d2019-09-30 14:14:10 -06001214}
1215
John Zulaufb027cdb2020-05-21 14:25:22 -06001216// Some common code for looking at attachments, if there's anything wrong, we return no hazard, core validation
1217// should have reported the issue regarding an invalid attachment entry
1218HazardResult AccessContext::DetectHazard(const IMAGE_VIEW_STATE *view, SyncStageAccessIndex current_usage,
John Zulauf8e3c3e92021-01-06 11:19:36 -07001219 SyncOrdering ordering_rule, const VkOffset3D &offset, const VkExtent3D &extent,
John Zulaufb027cdb2020-05-21 14:25:22 -06001220 VkImageAspectFlags aspect_mask) const {
1221 if (view != nullptr) {
1222 const IMAGE_STATE *image = view->image_state.get();
1223 if (image != nullptr) {
1224 auto *detect_range = &view->normalized_subresource_range;
1225 VkImageSubresourceRange masked_range;
1226 if (aspect_mask) { // If present and non-zero, restrict the normalized range to aspects present in aspect_mask
1227 masked_range = view->normalized_subresource_range;
1228 masked_range.aspectMask = aspect_mask & masked_range.aspectMask;
1229 detect_range = &masked_range;
1230 }
1231
1232 // NOTE: The range encoding code is not robust to invalid ranges, so we protect it from our change
1233 if (detect_range->aspectMask) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07001234 return DetectHazard(*image, current_usage, *detect_range, ordering_rule, offset, extent);
John Zulaufb027cdb2020-05-21 14:25:22 -06001235 }
1236 }
1237 }
1238 return HazardResult();
1239}
John Zulauf43cc7462020-12-03 12:33:12 -07001240
John Zulauf3d84f1b2020-03-09 13:33:25 -06001241class BarrierHazardDetector {
1242 public:
Jeremy Gebben40a22942020-12-22 14:22:06 -07001243 BarrierHazardDetector(SyncStageAccessIndex usage_index, VkPipelineStageFlags2KHR src_exec_scope,
John Zulauf3d84f1b2020-03-09 13:33:25 -06001244 SyncStageAccessFlags src_access_scope)
1245 : usage_index_(usage_index), src_exec_scope_(src_exec_scope), src_access_scope_(src_access_scope) {}
1246
John Zulauf5f13a792020-03-10 07:31:21 -06001247 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
1248 return pos->second.DetectBarrierHazard(usage_index_, src_exec_scope_, src_access_scope_);
John Zulauf0cb5be22020-01-23 12:18:22 -07001249 }
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07001250 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, const ResourceUsageTag &start_tag) const {
John Zulauf3d84f1b2020-03-09 13:33:25 -06001251 // Async barrier hazard detection can use the same path as the usage index is not IsRead, but is IsWrite
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07001252 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001253 }
1254
1255 private:
1256 SyncStageAccessIndex usage_index_;
Jeremy Gebben40a22942020-12-22 14:22:06 -07001257 VkPipelineStageFlags2KHR src_exec_scope_;
John Zulauf3d84f1b2020-03-09 13:33:25 -06001258 SyncStageAccessFlags src_access_scope_;
1259};
1260
John Zulauf4a6105a2020-11-17 15:11:05 -07001261class EventBarrierHazardDetector {
1262 public:
Jeremy Gebben40a22942020-12-22 14:22:06 -07001263 EventBarrierHazardDetector(SyncStageAccessIndex usage_index, VkPipelineStageFlags2KHR src_exec_scope,
John Zulauf4a6105a2020-11-17 15:11:05 -07001264 SyncStageAccessFlags src_access_scope, const SyncEventState::ScopeMap &event_scope,
1265 const ResourceUsageTag &scope_tag)
1266 : usage_index_(usage_index),
1267 src_exec_scope_(src_exec_scope),
1268 src_access_scope_(src_access_scope),
1269 event_scope_(event_scope),
1270 scope_pos_(event_scope.cbegin()),
1271 scope_end_(event_scope.cend()),
1272 scope_tag_(scope_tag) {}
1273
1274 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
1275 // TODO NOTE: This is almost the slowest way to do this... need to intelligently walk this...
1276 // Need to find a more efficient sync, since we know pos->first is strictly increasing call to call
1277 // NOTE: "cached_lower_bound_impl" with upgrades could do this.
1278 if (scope_pos_ == scope_end_) return HazardResult();
1279 if (!scope_pos_->first.intersects(pos->first)) {
1280 event_scope_.lower_bound(pos->first);
1281 if ((scope_pos_ == scope_end_) || !scope_pos_->first.intersects(pos->first)) return HazardResult();
1282 }
1283
1284 // Some portion of this pos is in the event_scope, so check for a barrier hazard
1285 return pos->second.DetectBarrierHazard(usage_index_, src_exec_scope_, src_access_scope_, scope_tag_);
1286 }
1287 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, const ResourceUsageTag &start_tag) const {
1288 // Async barrier hazard detection can use the same path as the usage index is not IsRead, but is IsWrite
1289 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
1290 }
1291
1292 private:
1293 SyncStageAccessIndex usage_index_;
Jeremy Gebben40a22942020-12-22 14:22:06 -07001294 VkPipelineStageFlags2KHR src_exec_scope_;
John Zulauf4a6105a2020-11-17 15:11:05 -07001295 SyncStageAccessFlags src_access_scope_;
1296 const SyncEventState::ScopeMap &event_scope_;
1297 SyncEventState::ScopeMap::const_iterator scope_pos_;
1298 SyncEventState::ScopeMap::const_iterator scope_end_;
1299 const ResourceUsageTag &scope_tag_;
1300};
1301
Jeremy Gebben40a22942020-12-22 14:22:06 -07001302HazardResult AccessContext::DetectImageBarrierHazard(const IMAGE_STATE &image, VkPipelineStageFlags2KHR src_exec_scope,
John Zulauf4a6105a2020-11-17 15:11:05 -07001303 const SyncStageAccessFlags &src_access_scope,
1304 const VkImageSubresourceRange &subresource_range,
1305 const SyncEventState &sync_event, DetectOptions options) const {
1306 // It's not particularly DRY to get the address type in this function as well as lower down, but we have to select the
1307 // first access scope map to use, and there's no easy way to plumb it in below.
1308 const auto address_type = ImageAddressType(image);
1309 const auto &event_scope = sync_event.FirstScope(address_type);
1310
1311 EventBarrierHazardDetector detector(SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION, src_exec_scope, src_access_scope,
1312 event_scope, sync_event.first_scope_tag);
1313 VkOffset3D zero_offset = {0, 0, 0};
1314 return DetectHazard(detector, image, subresource_range, zero_offset, image.createInfo.extent, options);
1315}
1316
Jeremy Gebben40a22942020-12-22 14:22:06 -07001317HazardResult AccessContext::DetectImageBarrierHazard(const IMAGE_STATE &image, VkPipelineStageFlags2KHR src_exec_scope,
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07001318 const SyncStageAccessFlags &src_access_scope,
John Zulauf355e49b2020-04-24 15:11:15 -06001319 const VkImageSubresourceRange &subresource_range,
John Zulauf43cc7462020-12-03 12:33:12 -07001320 const DetectOptions options) const {
John Zulauf69133422020-05-20 14:55:53 -06001321 BarrierHazardDetector detector(SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION, src_exec_scope, src_access_scope);
1322 VkOffset3D zero_offset = {0, 0, 0};
1323 return DetectHazard(detector, image, subresource_range, zero_offset, image.createInfo.extent, options);
John Zulauf0cb5be22020-01-23 12:18:22 -07001324}
1325
Jeremy Gebben40a22942020-12-22 14:22:06 -07001326HazardResult AccessContext::DetectImageBarrierHazard(const IMAGE_STATE &image, VkPipelineStageFlags2KHR src_exec_scope,
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07001327 const SyncStageAccessFlags &src_stage_accesses,
John Zulauf355e49b2020-04-24 15:11:15 -06001328 const VkImageMemoryBarrier &barrier) const {
1329 auto subresource_range = NormalizeSubresourceRange(image.createInfo, barrier.subresourceRange);
1330 const auto src_access_scope = SyncStageAccess::AccessScope(src_stage_accesses, barrier.srcAccessMask);
1331 return DetectImageBarrierHazard(image, src_exec_scope, src_access_scope, subresource_range, kDetectAll);
1332}
John Zulaufe7f6a5e2021-01-16 14:31:18 -07001333HazardResult AccessContext::DetectImageBarrierHazard(const SyncImageMemoryBarrier &image_barrier) const {
John Zulaufc523bf62021-02-16 08:20:34 -07001334 return DetectImageBarrierHazard(*image_barrier.image.get(), image_barrier.barrier.src_exec_scope.exec_scope,
John Zulaufd5115702021-01-18 12:34:33 -07001335 image_barrier.barrier.src_access_scope, image_barrier.range.subresource_range, kDetectAll);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07001336}
John Zulauf355e49b2020-04-24 15:11:15 -06001337
John Zulauf9cb530d2019-09-30 14:14:10 -06001338template <typename Flags, typename Map>
1339SyncStageAccessFlags AccessScopeImpl(Flags flag_mask, const Map &map) {
1340 SyncStageAccessFlags scope = 0;
1341 for (const auto &bit_scope : map) {
1342 if (flag_mask < bit_scope.first) break;
1343
1344 if (flag_mask & bit_scope.first) {
1345 scope |= bit_scope.second;
1346 }
1347 }
1348 return scope;
1349}
1350
Jeremy Gebben40a22942020-12-22 14:22:06 -07001351SyncStageAccessFlags SyncStageAccess::AccessScopeByStage(VkPipelineStageFlags2KHR stages) {
John Zulauf9cb530d2019-09-30 14:14:10 -06001352 return AccessScopeImpl(stages, syncStageAccessMaskByStageBit);
1353}
1354
Jeremy Gebben40a22942020-12-22 14:22:06 -07001355SyncStageAccessFlags SyncStageAccess::AccessScopeByAccess(VkAccessFlags2KHR accesses) {
1356 return AccessScopeImpl(sync_utils::ExpandAccessFlags(accesses), syncStageAccessMaskByAccessBit);
John Zulauf9cb530d2019-09-30 14:14:10 -06001357}
1358
Jeremy Gebben40a22942020-12-22 14:22:06 -07001359// Getting from stage mask and access mask to stage/access masks is something we need to be good at...
1360SyncStageAccessFlags SyncStageAccess::AccessScope(VkPipelineStageFlags2KHR stages, VkAccessFlags2KHR accesses) {
John Zulauf5f13a792020-03-10 07:31:21 -06001361 // The access scope is the intersection of all stage/access types possible for the enabled stages and the enables
1362 // accesses (after doing a couple factoring of common terms the union of stage/access intersections is the intersections
1363 // of the union of all stage/access types for all the stages and the same unions for the access mask...
John Zulauf9cb530d2019-09-30 14:14:10 -06001364 return AccessScopeByStage(stages) & AccessScopeByAccess(accesses);
1365}
1366
1367template <typename Action>
John Zulauf5c5e88d2019-12-26 11:22:02 -07001368void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const ResourceAccessRange &range, const Action &action) {
John Zulauf7635de32020-05-29 17:14:15 -06001369 // TODO: Optimization for operations that do a pure overwrite (i.e. WRITE usages which rewrite the state, vs READ usages
1370 // that do incrementalupdates
John Zulauf4a6105a2020-11-17 15:11:05 -07001371 assert(accesses);
John Zulauf9cb530d2019-09-30 14:14:10 -06001372 auto pos = accesses->lower_bound(range);
1373 if (pos == accesses->end() || !pos->first.intersects(range)) {
1374 // The range is empty, fill it with a default value.
1375 pos = action.Infill(accesses, pos, range);
1376 } else if (range.begin < pos->first.begin) {
1377 // Leading empty space, infill
John Zulauf5c5e88d2019-12-26 11:22:02 -07001378 pos = action.Infill(accesses, pos, ResourceAccessRange(range.begin, pos->first.begin));
John Zulauf9cb530d2019-09-30 14:14:10 -06001379 } else if (pos->first.begin < range.begin) {
1380 // Trim the beginning if needed
1381 pos = accesses->split(pos, range.begin, sparse_container::split_op_keep_both());
1382 ++pos;
1383 }
1384
1385 const auto the_end = accesses->end();
1386 while ((pos != the_end) && pos->first.intersects(range)) {
1387 if (pos->first.end > range.end) {
1388 pos = accesses->split(pos, range.end, sparse_container::split_op_keep_both());
1389 }
1390
1391 pos = action(accesses, pos);
1392 if (pos == the_end) break;
1393
1394 auto next = pos;
1395 ++next;
1396 if ((pos->first.end < range.end) && (next != the_end) && !next->first.is_subsequent_to(pos->first)) {
1397 // Need to infill if next is disjoint
1398 VkDeviceSize limit = (next == the_end) ? range.end : std::min(range.end, next->first.begin);
John Zulauf5c5e88d2019-12-26 11:22:02 -07001399 ResourceAccessRange new_range(pos->first.end, limit);
John Zulauf9cb530d2019-09-30 14:14:10 -06001400 next = action.Infill(accesses, next, new_range);
1401 }
1402 pos = next;
1403 }
1404}
John Zulaufd5115702021-01-18 12:34:33 -07001405
1406// Give a comparable interface for range generators and ranges
1407template <typename Action>
1408inline void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const Action &action, ResourceAccessRange *range) {
1409 assert(range);
1410 UpdateMemoryAccessState(accesses, *range, action);
1411}
1412
John Zulauf4a6105a2020-11-17 15:11:05 -07001413template <typename Action, typename RangeGen>
1414void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const Action &action, RangeGen *range_gen_arg) {
1415 assert(range_gen_arg);
John Zulaufd5115702021-01-18 12:34:33 -07001416 RangeGen &range_gen = *range_gen_arg; // Non-const references must be * by style requirement but deref-ing * iterator is a pain
John Zulauf4a6105a2020-11-17 15:11:05 -07001417 for (; range_gen->non_empty(); ++range_gen) {
1418 UpdateMemoryAccessState(accesses, *range_gen, action);
1419 }
1420}
John Zulauf9cb530d2019-09-30 14:14:10 -06001421
1422struct UpdateMemoryAccessStateFunctor {
John Zulauf5c5e88d2019-12-26 11:22:02 -07001423 using Iterator = ResourceAccessRangeMap::iterator;
1424 Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const {
John Zulauf5f13a792020-03-10 07:31:21 -06001425 // this is only called on gaps, and never returns a gap.
1426 ResourceAccessState default_state;
John Zulauf16adfc92020-04-08 10:28:33 -06001427 context.ResolvePreviousAccess(type, range, accesses, &default_state);
John Zulauf5f13a792020-03-10 07:31:21 -06001428 return accesses->lower_bound(range);
John Zulauf9cb530d2019-09-30 14:14:10 -06001429 }
John Zulauf5f13a792020-03-10 07:31:21 -06001430
John Zulauf5c5e88d2019-12-26 11:22:02 -07001431 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -06001432 auto &access_state = pos->second;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001433 access_state.Update(usage, ordering_rule, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06001434 return pos;
1435 }
1436
John Zulauf43cc7462020-12-03 12:33:12 -07001437 UpdateMemoryAccessStateFunctor(AccessAddressType type_, const AccessContext &context_, SyncStageAccessIndex usage_,
John Zulauf8e3c3e92021-01-06 11:19:36 -07001438 SyncOrdering ordering_rule_, const ResourceUsageTag &tag_)
1439 : type(type_), context(context_), usage(usage_), ordering_rule(ordering_rule_), tag(tag_) {}
John Zulauf43cc7462020-12-03 12:33:12 -07001440 const AccessAddressType type;
John Zulauf540266b2020-04-06 18:54:53 -06001441 const AccessContext &context;
John Zulauf16adfc92020-04-08 10:28:33 -06001442 const SyncStageAccessIndex usage;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001443 const SyncOrdering ordering_rule;
John Zulauf9cb530d2019-09-30 14:14:10 -06001444 const ResourceUsageTag &tag;
1445};
1446
John Zulauf4a6105a2020-11-17 15:11:05 -07001447// The barrier operation for pipeline and subpass dependencies`
John Zulauf1e331ec2020-12-04 18:29:38 -07001448struct PipelineBarrierOp {
1449 SyncBarrier barrier;
1450 bool layout_transition;
1451 PipelineBarrierOp(const SyncBarrier &barrier_, bool layout_transition_)
1452 : barrier(barrier_), layout_transition(layout_transition_) {}
1453 PipelineBarrierOp() = default;
John Zulaufd5115702021-01-18 12:34:33 -07001454 PipelineBarrierOp(const PipelineBarrierOp &) = default;
John Zulauf1e331ec2020-12-04 18:29:38 -07001455 void operator()(ResourceAccessState *access_state) const { access_state->ApplyBarrier(barrier, layout_transition); }
1456};
John Zulauf4a6105a2020-11-17 15:11:05 -07001457// The barrier operation for wait events
1458struct WaitEventBarrierOp {
1459 const ResourceUsageTag *scope_tag;
1460 SyncBarrier barrier;
1461 bool layout_transition;
1462 WaitEventBarrierOp(const ResourceUsageTag &scope_tag_, const SyncBarrier &barrier_, bool layout_transition_)
1463 : scope_tag(&scope_tag_), barrier(barrier_), layout_transition(layout_transition_) {}
1464 WaitEventBarrierOp() = default;
1465 void operator()(ResourceAccessState *access_state) const {
1466 assert(scope_tag); // Not valid to have a non-scope op executed, default construct included for std::vector support
1467 access_state->ApplyBarrier(*scope_tag, barrier, layout_transition);
1468 }
1469};
John Zulauf1e331ec2020-12-04 18:29:38 -07001470
John Zulauf4a6105a2020-11-17 15:11:05 -07001471// This functor applies a collection of barriers, updating the "pending state" in each touched memory range, and optionally
1472// resolves the pending state. Suitable for processing Global memory barriers, or Subpass Barriers when the "final" barrier
1473// of a collection is known/present.
John Zulauf1e331ec2020-12-04 18:29:38 -07001474template <typename BarrierOp>
John Zulauf89311b42020-09-29 16:28:47 -06001475class ApplyBarrierOpsFunctor {
1476 public:
John Zulauf5c5e88d2019-12-26 11:22:02 -07001477 using Iterator = ResourceAccessRangeMap::iterator;
1478 inline Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const { return pos; }
John Zulauf9cb530d2019-09-30 14:14:10 -06001479
John Zulauf5c5e88d2019-12-26 11:22:02 -07001480 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -06001481 auto &access_state = pos->second;
John Zulauf1e331ec2020-12-04 18:29:38 -07001482 for (const auto &op : barrier_ops_) {
1483 op(&access_state);
John Zulauf89311b42020-09-29 16:28:47 -06001484 }
John Zulauf9cb530d2019-09-30 14:14:10 -06001485
John Zulauf89311b42020-09-29 16:28:47 -06001486 if (resolve_) {
1487 // If this is the last (or only) batch, we can do the pending resolve as the last step in this operation to avoid
1488 // another walk
1489 access_state.ApplyPendingBarriers(tag_);
John Zulauf9cb530d2019-09-30 14:14:10 -06001490 }
1491 return pos;
1492 }
1493
John Zulauf89311b42020-09-29 16:28:47 -06001494 // A valid tag is required IFF layout_transition is true, as transitions are write ops
John Zulaufd5115702021-01-18 12:34:33 -07001495 ApplyBarrierOpsFunctor(bool resolve, size_t size_hint, const ResourceUsageTag &tag)
1496 : resolve_(resolve), barrier_ops_(), tag_(tag) {
1497 barrier_ops_.reserve(size_hint);
1498 }
1499 void EmplaceBack(const BarrierOp &op) { barrier_ops_.emplace_back(op); }
John Zulauf89311b42020-09-29 16:28:47 -06001500
1501 private:
1502 bool resolve_;
John Zulaufd5115702021-01-18 12:34:33 -07001503 std::vector<BarrierOp> barrier_ops_;
John Zulauf1e331ec2020-12-04 18:29:38 -07001504 const ResourceUsageTag &tag_;
1505};
1506
John Zulauf4a6105a2020-11-17 15:11:05 -07001507// This functor applies a single barrier, updating the "pending state" in each touched memory range, but does not
1508// resolve the pendinging state. Suitable for processing Image and Buffer barriers from PipelineBarriers or Events
1509template <typename BarrierOp>
1510class ApplyBarrierFunctor {
1511 public:
1512 using Iterator = ResourceAccessRangeMap::iterator;
1513 inline Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const { return pos; }
1514
1515 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
1516 auto &access_state = pos->second;
1517 barrier_op_(&access_state);
1518 return pos;
1519 }
1520
1521 ApplyBarrierFunctor(const BarrierOp &barrier_op) : barrier_op_(barrier_op) {}
1522
1523 private:
John Zulaufd5115702021-01-18 12:34:33 -07001524 BarrierOp barrier_op_;
John Zulauf4a6105a2020-11-17 15:11:05 -07001525};
1526
John Zulauf1e331ec2020-12-04 18:29:38 -07001527// This functor resolves the pendinging state.
1528class ResolvePendingBarrierFunctor {
1529 public:
1530 using Iterator = ResourceAccessRangeMap::iterator;
1531 inline Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const { return pos; }
1532
1533 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
1534 auto &access_state = pos->second;
1535 access_state.ApplyPendingBarriers(tag_);
1536 return pos;
1537 }
1538
1539 ResolvePendingBarrierFunctor(const ResourceUsageTag &tag) : tag_(tag) {}
1540
1541 private:
John Zulauf89311b42020-09-29 16:28:47 -06001542 const ResourceUsageTag &tag_;
John Zulauf9cb530d2019-09-30 14:14:10 -06001543};
1544
John Zulauf8e3c3e92021-01-06 11:19:36 -07001545void AccessContext::UpdateAccessState(AccessAddressType type, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
1546 const ResourceAccessRange &range, const ResourceUsageTag &tag) {
1547 UpdateMemoryAccessStateFunctor action(type, *this, current_usage, ordering_rule, tag);
John Zulauf16adfc92020-04-08 10:28:33 -06001548 UpdateMemoryAccessState(&GetAccessStateMap(type), range, action);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001549}
1550
John Zulauf8e3c3e92021-01-06 11:19:36 -07001551void AccessContext::UpdateAccessState(const BUFFER_STATE &buffer, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf355e49b2020-04-24 15:11:15 -06001552 const ResourceAccessRange &range, const ResourceUsageTag &tag) {
John Zulauf16adfc92020-04-08 10:28:33 -06001553 if (!SimpleBinding(buffer)) return;
1554 const auto base_address = ResourceBaseAddress(buffer);
John Zulauf8e3c3e92021-01-06 11:19:36 -07001555 UpdateAccessState(AccessAddressType::kLinear, current_usage, ordering_rule, range + base_address, tag);
John Zulauf16adfc92020-04-08 10:28:33 -06001556}
John Zulauf355e49b2020-04-24 15:11:15 -06001557
John Zulauf8e3c3e92021-01-06 11:19:36 -07001558void AccessContext::UpdateAccessState(const IMAGE_STATE &image, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf355e49b2020-04-24 15:11:15 -06001559 const VkImageSubresourceRange &subresource_range, const VkOffset3D &offset,
John Zulauf540266b2020-04-06 18:54:53 -06001560 const VkExtent3D &extent, const ResourceUsageTag &tag) {
John Zulauf16adfc92020-04-08 10:28:33 -06001561 if (!SimpleBinding(image)) return;
John Zulauf16adfc92020-04-08 10:28:33 -06001562 const auto base_address = ResourceBaseAddress(image);
John Zulauf150e5332020-12-03 08:52:52 -07001563 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, offset, extent,
1564 base_address);
1565 const auto address_type = ImageAddressType(image);
John Zulauf8e3c3e92021-01-06 11:19:36 -07001566 UpdateMemoryAccessStateFunctor action(address_type, *this, current_usage, ordering_rule, tag);
John Zulauf5f13a792020-03-10 07:31:21 -06001567 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf150e5332020-12-03 08:52:52 -07001568 UpdateMemoryAccessState(&GetAccessStateMap(address_type), *range_gen, action);
John Zulauf5f13a792020-03-10 07:31:21 -06001569 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06001570}
John Zulauf8e3c3e92021-01-06 11:19:36 -07001571void AccessContext::UpdateAccessState(const IMAGE_VIEW_STATE *view, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
1572 const VkOffset3D &offset, const VkExtent3D &extent, VkImageAspectFlags aspect_mask,
1573 const ResourceUsageTag &tag) {
John Zulauf7635de32020-05-29 17:14:15 -06001574 if (view != nullptr) {
1575 const IMAGE_STATE *image = view->image_state.get();
1576 if (image != nullptr) {
1577 auto *update_range = &view->normalized_subresource_range;
1578 VkImageSubresourceRange masked_range;
1579 if (aspect_mask) { // If present and non-zero, restrict the normalized range to aspects present in aspect_mask
1580 masked_range = view->normalized_subresource_range;
1581 masked_range.aspectMask = aspect_mask & masked_range.aspectMask;
1582 update_range = &masked_range;
1583 }
John Zulauf8e3c3e92021-01-06 11:19:36 -07001584 UpdateAccessState(*image, current_usage, ordering_rule, *update_range, offset, extent, tag);
John Zulauf7635de32020-05-29 17:14:15 -06001585 }
1586 }
1587}
John Zulauf3d84f1b2020-03-09 13:33:25 -06001588
John Zulauf8e3c3e92021-01-06 11:19:36 -07001589void AccessContext::UpdateAccessState(const IMAGE_STATE &image, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf355e49b2020-04-24 15:11:15 -06001590 const VkImageSubresourceLayers &subresource, const VkOffset3D &offset,
1591 const VkExtent3D &extent, const ResourceUsageTag &tag) {
John Zulauf355e49b2020-04-24 15:11:15 -06001592 VkImageSubresourceRange subresource_range = {subresource.aspectMask, subresource.mipLevel, 1, subresource.baseArrayLayer,
1593 subresource.layerCount};
John Zulauf8e3c3e92021-01-06 11:19:36 -07001594 UpdateAccessState(image, current_usage, ordering_rule, subresource_range, offset, extent, tag);
John Zulauf355e49b2020-04-24 15:11:15 -06001595}
1596
John Zulauf540266b2020-04-06 18:54:53 -06001597template <typename Action>
John Zulauf89311b42020-09-29 16:28:47 -06001598void AccessContext::UpdateResourceAccess(const BUFFER_STATE &buffer, const ResourceAccessRange &range, const Action action) {
John Zulauf16adfc92020-04-08 10:28:33 -06001599 if (!SimpleBinding(buffer)) return;
1600 const auto base_address = ResourceBaseAddress(buffer);
John Zulauf43cc7462020-12-03 12:33:12 -07001601 UpdateMemoryAccessState(&GetAccessStateMap(AccessAddressType::kLinear), (range + base_address), action);
John Zulauf540266b2020-04-06 18:54:53 -06001602}
1603
1604template <typename Action>
John Zulauf89311b42020-09-29 16:28:47 -06001605void AccessContext::UpdateResourceAccess(const IMAGE_STATE &image, const VkImageSubresourceRange &subresource_range,
1606 const Action action) {
John Zulauf16adfc92020-04-08 10:28:33 -06001607 if (!SimpleBinding(image)) return;
1608 const auto address_type = ImageAddressType(image);
1609 auto *accesses = &GetAccessStateMap(address_type);
John Zulauf540266b2020-04-06 18:54:53 -06001610
John Zulauf16adfc92020-04-08 10:28:33 -06001611 const auto base_address = ResourceBaseAddress(image);
John Zulauf150e5332020-12-03 08:52:52 -07001612 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, {0, 0, 0},
1613 image.createInfo.extent, base_address);
1614
John Zulauf540266b2020-04-06 18:54:53 -06001615 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf150e5332020-12-03 08:52:52 -07001616 UpdateMemoryAccessState(accesses, *range_gen, action);
John Zulauf540266b2020-04-06 18:54:53 -06001617 }
1618}
1619
John Zulauf7635de32020-05-29 17:14:15 -06001620void AccessContext::UpdateAttachmentResolveAccess(const RENDER_PASS_STATE &rp_state, const VkRect2D &render_area,
1621 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views, uint32_t subpass,
1622 const ResourceUsageTag &tag) {
1623 UpdateStateResolveAction update(*this, tag);
1624 ResolveOperation(update, rp_state, render_area, attachment_views, subpass);
1625}
1626
John Zulaufaff20662020-06-01 14:07:58 -06001627void AccessContext::UpdateAttachmentStoreAccess(const RENDER_PASS_STATE &rp_state, const VkRect2D &render_area,
1628 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views, uint32_t subpass,
1629 const ResourceUsageTag &tag) {
1630 const auto *attachment_ci = rp_state.createInfo.pAttachments;
1631 VkExtent3D extent = CastTo3D(render_area.extent);
1632 VkOffset3D offset = CastTo3D(render_area.offset);
1633
1634 for (uint32_t i = 0; i < rp_state.createInfo.attachmentCount; i++) {
1635 if (rp_state.attachment_last_subpass[i] == subpass) {
1636 if (attachment_views[i] == nullptr) continue; // UNUSED
1637 const auto &view = *attachment_views[i];
1638 const IMAGE_STATE *image = view.image_state.get();
1639 if (image == nullptr) continue;
1640
1641 const auto &ci = attachment_ci[i];
1642 const bool has_depth = FormatHasDepth(ci.format);
1643 const bool has_stencil = FormatHasStencil(ci.format);
1644 const bool is_color = !(has_depth || has_stencil);
1645 const bool store_op_stores = ci.storeOp != VK_ATTACHMENT_STORE_OP_NONE_QCOM;
1646
1647 if (is_color && store_op_stores) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07001648 UpdateAccessState(*image, SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kRaster,
1649 view.normalized_subresource_range, offset, extent, tag);
John Zulaufaff20662020-06-01 14:07:58 -06001650 } else {
1651 auto update_range = view.normalized_subresource_range;
1652 if (has_depth && store_op_stores) {
1653 update_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001654 UpdateAccessState(*image, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, SyncOrdering::kRaster,
1655 update_range, offset, extent, tag);
John Zulaufaff20662020-06-01 14:07:58 -06001656 }
1657 const bool stencil_op_stores = ci.stencilStoreOp != VK_ATTACHMENT_STORE_OP_NONE_QCOM;
1658 if (has_stencil && stencil_op_stores) {
1659 update_range.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001660 UpdateAccessState(*image, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, SyncOrdering::kRaster,
1661 update_range, offset, extent, tag);
John Zulaufaff20662020-06-01 14:07:58 -06001662 }
1663 }
1664 }
1665 }
1666}
1667
John Zulauf540266b2020-04-06 18:54:53 -06001668template <typename Action>
John Zulaufd5115702021-01-18 12:34:33 -07001669void AccessContext::ApplyToContext(const Action &barrier_action) {
John Zulauf540266b2020-04-06 18:54:53 -06001670 // Note: Barriers do *not* cross context boundaries, applying to accessess within.... (at least for renderpass subpasses)
John Zulauf16adfc92020-04-08 10:28:33 -06001671 for (const auto address_type : kAddressTypes) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001672 UpdateMemoryAccessState(&GetAccessStateMap(address_type), kFullRange, barrier_action);
John Zulauf540266b2020-04-06 18:54:53 -06001673 }
1674}
1675
1676void AccessContext::ResolveChildContexts(const std::vector<AccessContext> &contexts) {
John Zulauf540266b2020-04-06 18:54:53 -06001677 for (uint32_t subpass_index = 0; subpass_index < contexts.size(); subpass_index++) {
1678 auto &context = contexts[subpass_index];
John Zulaufb02c1eb2020-10-06 16:33:36 -06001679 ApplyTrackbackBarriersAction barrier_action(context.GetDstExternalTrackBack().barriers);
John Zulauf16adfc92020-04-08 10:28:33 -06001680 for (const auto address_type : kAddressTypes) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001681 context.ResolveAccessRange(address_type, kFullRange, barrier_action, &GetAccessStateMap(address_type), nullptr, false);
John Zulauf540266b2020-04-06 18:54:53 -06001682 }
1683 }
1684}
1685
John Zulauf355e49b2020-04-24 15:11:15 -06001686// Suitable only for *subpass* access contexts
John Zulauf7635de32020-05-29 17:14:15 -06001687HazardResult AccessContext::DetectSubpassTransitionHazard(const TrackBack &track_back, const IMAGE_VIEW_STATE *attach_view) const {
John Zulauf355e49b2020-04-24 15:11:15 -06001688 if (!attach_view) return HazardResult();
1689 const auto image_state = attach_view->image_state.get();
1690 if (!image_state) return HazardResult();
1691
John Zulauf355e49b2020-04-24 15:11:15 -06001692 // We should never ask for a transition from a context we don't have
John Zulauf7635de32020-05-29 17:14:15 -06001693 assert(track_back.context);
John Zulauf355e49b2020-04-24 15:11:15 -06001694
1695 // Do the detection against the specific prior context independent of other contexts. (Synchronous only)
John Zulaufa0a98292020-09-18 09:30:10 -06001696 // Hazard detection for the transition can be against the merged of the barriers (it only uses src_...)
1697 const auto merged_barrier = MergeBarriers(track_back.barriers);
John Zulaufc523bf62021-02-16 08:20:34 -07001698 HazardResult hazard = track_back.context->DetectImageBarrierHazard(*image_state, merged_barrier.src_exec_scope.exec_scope,
1699 merged_barrier.src_access_scope,
1700 attach_view->normalized_subresource_range, kDetectPrevious);
John Zulauf355e49b2020-04-24 15:11:15 -06001701 if (!hazard.hazard) {
1702 // The Async hazard check is against the current context's async set.
John Zulaufc523bf62021-02-16 08:20:34 -07001703 hazard = DetectImageBarrierHazard(*image_state, merged_barrier.src_exec_scope.exec_scope, merged_barrier.src_access_scope,
John Zulauf355e49b2020-04-24 15:11:15 -06001704 attach_view->normalized_subresource_range, kDetectAsync);
1705 }
John Zulaufa0a98292020-09-18 09:30:10 -06001706
John Zulauf355e49b2020-04-24 15:11:15 -06001707 return hazard;
1708}
1709
John Zulaufb02c1eb2020-10-06 16:33:36 -06001710void AccessContext::RecordLayoutTransitions(const RENDER_PASS_STATE &rp_state, uint32_t subpass,
1711 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views,
1712 const ResourceUsageTag &tag) {
1713 const auto &transitions = rp_state.subpass_transitions[subpass];
John Zulauf646cc292020-10-23 09:16:45 -06001714 const ResourceAccessState empty_infill;
John Zulaufb02c1eb2020-10-06 16:33:36 -06001715 for (const auto &transition : transitions) {
1716 const auto prev_pass = transition.prev_pass;
1717 const auto attachment_view = attachment_views[transition.attachment];
1718 if (!attachment_view) continue;
1719 const auto *image = attachment_view->image_state.get();
1720 if (!image) continue;
1721 if (!SimpleBinding(*image)) continue;
1722
1723 const auto *trackback = GetTrackBackFromSubpass(prev_pass);
1724 assert(trackback);
1725
1726 // Import the attachments into the current context
1727 const auto *prev_context = trackback->context;
1728 assert(prev_context);
1729 const auto address_type = ImageAddressType(*image);
1730 auto &target_map = GetAccessStateMap(address_type);
1731 ApplySubpassTransitionBarriersAction barrier_action(trackback->barriers);
1732 prev_context->ResolveAccessRange(*image, attachment_view->normalized_subresource_range, barrier_action, address_type,
John Zulauf646cc292020-10-23 09:16:45 -06001733 &target_map, &empty_infill);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001734 }
1735
John Zulauf86356ca2020-10-19 11:46:41 -06001736 // If there were no transitions skip this global map walk
1737 if (transitions.size()) {
John Zulauf1e331ec2020-12-04 18:29:38 -07001738 ResolvePendingBarrierFunctor apply_pending_action(tag);
John Zulaufd5115702021-01-18 12:34:33 -07001739 ApplyToContext(apply_pending_action);
John Zulauf86356ca2020-10-19 11:46:41 -06001740 }
John Zulaufb02c1eb2020-10-06 16:33:36 -06001741}
Jeremy Gebben9893daf2021-01-04 10:40:50 -07001742
Jeremy Gebben9893daf2021-01-04 10:40:50 -07001743void CommandBufferAccessContext::ApplyGlobalBarriersToEvents(const SyncExecScope &src, const SyncExecScope &dst) {
1744 const bool all_commands_bit = 0 != (src.mask_param & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
John Zulauf669dfd52021-01-27 17:15:28 -07001745
1746 auto *events_context = GetCurrentEventsContext();
1747 assert(events_context);
1748 for (auto &event_pair : *events_context) {
John Zulauf4a6105a2020-11-17 15:11:05 -07001749 assert(event_pair.second); // Shouldn't be storing empty
1750 auto &sync_event = *event_pair.second;
1751 // Events don't happen at a stage, so we need to check and store the unexpanded ALL_COMMANDS if set for inter-event-calls
Jeremy Gebben9893daf2021-01-04 10:40:50 -07001752 if ((sync_event.barriers & src.exec_scope) || all_commands_bit) {
1753 sync_event.barriers |= dst.exec_scope;
1754 sync_event.barriers |= dst.mask_param & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
John Zulauf4a6105a2020-11-17 15:11:05 -07001755 }
1756 }
1757}
1758
John Zulauf355e49b2020-04-24 15:11:15 -06001759
locke-lunarg61870c22020-06-09 14:51:50 -06001760bool CommandBufferAccessContext::ValidateDispatchDrawDescriptorSet(VkPipelineBindPoint pipelineBindPoint,
1761 const char *func_name) const {
1762 bool skip = false;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001763 const PIPELINE_STATE *pipe = nullptr;
locke-lunarg61870c22020-06-09 14:51:50 -06001764 const std::vector<LAST_BOUND_STATE::PER_SET> *per_sets = nullptr;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001765 GetCurrentPipelineAndDesriptorSetsFromCommandBuffer(*cb_state_.get(), pipelineBindPoint, &pipe, &per_sets);
1766 if (!pipe || !per_sets) {
locke-lunarg61870c22020-06-09 14:51:50 -06001767 return skip;
1768 }
1769
1770 using DescriptorClass = cvdescriptorset::DescriptorClass;
1771 using BufferDescriptor = cvdescriptorset::BufferDescriptor;
1772 using ImageDescriptor = cvdescriptorset::ImageDescriptor;
1773 using ImageSamplerDescriptor = cvdescriptorset::ImageSamplerDescriptor;
1774 using TexelDescriptor = cvdescriptorset::TexelDescriptor;
1775
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001776 for (const auto &stage_state : pipe->stage_state) {
1777 if (stage_state.stage_flag == VK_SHADER_STAGE_FRAGMENT_BIT && pipe->graphicsPipelineCI.pRasterizationState &&
1778 pipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) {
locke-lunarge9f1cdf2020-06-12 12:28:57 -06001779 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001780 }
locke-lunarg61870c22020-06-09 14:51:50 -06001781 for (const auto &set_binding : stage_state.descriptor_uses) {
1782 cvdescriptorset::DescriptorSet *descriptor_set = (*per_sets)[set_binding.first.first].bound_descriptor_set;
1783 cvdescriptorset::DescriptorSetLayout::ConstBindingIterator binding_it(descriptor_set->GetLayout().get(),
1784 set_binding.first.second);
1785 const auto descriptor_type = binding_it.GetType();
1786 cvdescriptorset::IndexRange index_range = binding_it.GetGlobalIndexRange();
1787 auto array_idx = 0;
1788
1789 if (binding_it.IsVariableDescriptorCount()) {
1790 index_range.end = index_range.start + descriptor_set->GetVariableDescriptorCount();
1791 }
1792 SyncStageAccessIndex sync_index =
1793 GetSyncStageAccessIndexsByDescriptorSet(descriptor_type, set_binding.second, stage_state.stage_flag);
1794
1795 for (uint32_t i = index_range.start; i < index_range.end; ++i, ++array_idx) {
1796 uint32_t index = i - index_range.start;
1797 const auto *descriptor = descriptor_set->GetDescriptorFromGlobalIndex(i);
1798 switch (descriptor->GetClass()) {
1799 case DescriptorClass::ImageSampler:
1800 case DescriptorClass::Image: {
1801 const IMAGE_VIEW_STATE *img_view_state = nullptr;
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001802 VkImageLayout image_layout;
locke-lunarg61870c22020-06-09 14:51:50 -06001803 if (descriptor->GetClass() == DescriptorClass::ImageSampler) {
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001804 const auto image_sampler_descriptor = static_cast<const ImageSamplerDescriptor *>(descriptor);
1805 img_view_state = image_sampler_descriptor->GetImageViewState();
1806 image_layout = image_sampler_descriptor->GetImageLayout();
locke-lunarg61870c22020-06-09 14:51:50 -06001807 } else {
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001808 const auto image_descriptor = static_cast<const ImageDescriptor *>(descriptor);
1809 img_view_state = image_descriptor->GetImageViewState();
1810 image_layout = image_descriptor->GetImageLayout();
locke-lunarg61870c22020-06-09 14:51:50 -06001811 }
1812 if (!img_view_state) continue;
1813 const IMAGE_STATE *img_state = img_view_state->image_state.get();
1814 VkExtent3D extent = {};
1815 VkOffset3D offset = {};
1816 if (sync_index == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ) {
1817 extent = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.extent);
1818 offset = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.offset);
1819 } else {
1820 extent = img_state->createInfo.extent;
1821 }
John Zulauf361fb532020-07-22 10:45:39 -06001822 HazardResult hazard;
1823 const auto &subresource_range = img_view_state->normalized_subresource_range;
1824 if (descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT) {
1825 // Input attachments are subject to raster ordering rules
1826 hazard = current_context_->DetectHazard(*img_state, sync_index, subresource_range,
John Zulauf8e3c3e92021-01-06 11:19:36 -07001827 SyncOrdering::kRaster, offset, extent);
John Zulauf361fb532020-07-22 10:45:39 -06001828 } else {
1829 hazard = current_context_->DetectHazard(*img_state, sync_index, subresource_range, offset, extent);
1830 }
John Zulauf33fc1d52020-07-17 11:01:10 -06001831 if (hazard.hazard && !sync_state_->SupressedBoundDescriptorWAW(hazard)) {
John Zulauf1dae9192020-06-16 15:46:44 -06001832 skip |= sync_state_->LogError(
1833 img_view_state->image_view, string_SyncHazardVUID(hazard.hazard),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001834 "%s: Hazard %s for %s, in %s, and %s, %s, type: %s, imageLayout: %s, binding #%" PRIu32
1835 ", index %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06001836 func_name, string_SyncHazard(hazard.hazard),
1837 sync_state_->report_data->FormatHandle(img_view_state->image_view).c_str(),
1838 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer).c_str(),
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001839 sync_state_->report_data->FormatHandle(pipe->pipeline).c_str(),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001840 sync_state_->report_data->FormatHandle(descriptor_set->GetSet()).c_str(),
1841 string_VkDescriptorType(descriptor_type), string_VkImageLayout(image_layout),
John Zulauffaea0ee2021-01-14 14:01:32 -07001842 set_binding.first.second, index, FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06001843 }
1844 break;
1845 }
1846 case DescriptorClass::TexelBuffer: {
1847 auto buf_view_state = static_cast<const TexelDescriptor *>(descriptor)->GetBufferViewState();
1848 if (!buf_view_state) continue;
1849 const BUFFER_STATE *buf_state = buf_view_state->buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06001850 const ResourceAccessRange range = MakeRange(*buf_view_state);
locke-lunarg61870c22020-06-09 14:51:50 -06001851 auto hazard = current_context_->DetectHazard(*buf_state, sync_index, range);
John Zulauf33fc1d52020-07-17 11:01:10 -06001852 if (hazard.hazard && !sync_state_->SupressedBoundDescriptorWAW(hazard)) {
locke-lunarg88dbb542020-06-23 22:05:42 -06001853 skip |= sync_state_->LogError(
1854 buf_view_state->buffer_view, string_SyncHazardVUID(hazard.hazard),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001855 "%s: Hazard %s for %s in %s, %s, and %s, type: %s, binding #%d index %d. Access info %s.",
1856 func_name, string_SyncHazard(hazard.hazard),
locke-lunarg88dbb542020-06-23 22:05:42 -06001857 sync_state_->report_data->FormatHandle(buf_view_state->buffer_view).c_str(),
1858 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer).c_str(),
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001859 sync_state_->report_data->FormatHandle(pipe->pipeline).c_str(),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001860 sync_state_->report_data->FormatHandle(descriptor_set->GetSet()).c_str(),
1861 string_VkDescriptorType(descriptor_type), set_binding.first.second, index,
John Zulauffaea0ee2021-01-14 14:01:32 -07001862 FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06001863 }
1864 break;
1865 }
1866 case DescriptorClass::GeneralBuffer: {
1867 const auto *buffer_descriptor = static_cast<const BufferDescriptor *>(descriptor);
1868 auto buf_state = buffer_descriptor->GetBufferState();
1869 if (!buf_state) continue;
John Zulauf3e86bf02020-09-12 10:47:57 -06001870 const ResourceAccessRange range =
1871 MakeRange(*buf_state, buffer_descriptor->GetOffset(), buffer_descriptor->GetRange());
locke-lunarg61870c22020-06-09 14:51:50 -06001872 auto hazard = current_context_->DetectHazard(*buf_state, sync_index, range);
John Zulauf3ac701a2020-09-07 14:34:41 -06001873 if (hazard.hazard && !sync_state_->SupressedBoundDescriptorWAW(hazard)) {
locke-lunarg88dbb542020-06-23 22:05:42 -06001874 skip |= sync_state_->LogError(
1875 buf_state->buffer, string_SyncHazardVUID(hazard.hazard),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001876 "%s: Hazard %s for %s in %s, %s, and %s, type: %s, binding #%d index %d. Access info %s.",
1877 func_name, string_SyncHazard(hazard.hazard),
1878 sync_state_->report_data->FormatHandle(buf_state->buffer).c_str(),
locke-lunarg88dbb542020-06-23 22:05:42 -06001879 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer).c_str(),
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001880 sync_state_->report_data->FormatHandle(pipe->pipeline).c_str(),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001881 sync_state_->report_data->FormatHandle(descriptor_set->GetSet()).c_str(),
1882 string_VkDescriptorType(descriptor_type), set_binding.first.second, index,
John Zulauffaea0ee2021-01-14 14:01:32 -07001883 FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06001884 }
1885 break;
1886 }
1887 // TODO: INLINE_UNIFORM_BLOCK_EXT, ACCELERATION_STRUCTURE_KHR
1888 default:
1889 break;
1890 }
1891 }
1892 }
1893 }
1894 return skip;
1895}
1896
1897void CommandBufferAccessContext::RecordDispatchDrawDescriptorSet(VkPipelineBindPoint pipelineBindPoint,
1898 const ResourceUsageTag &tag) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001899 const PIPELINE_STATE *pipe = nullptr;
locke-lunarg61870c22020-06-09 14:51:50 -06001900 const std::vector<LAST_BOUND_STATE::PER_SET> *per_sets = nullptr;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001901 GetCurrentPipelineAndDesriptorSetsFromCommandBuffer(*cb_state_.get(), pipelineBindPoint, &pipe, &per_sets);
1902 if (!pipe || !per_sets) {
locke-lunarg61870c22020-06-09 14:51:50 -06001903 return;
1904 }
1905
1906 using DescriptorClass = cvdescriptorset::DescriptorClass;
1907 using BufferDescriptor = cvdescriptorset::BufferDescriptor;
1908 using ImageDescriptor = cvdescriptorset::ImageDescriptor;
1909 using ImageSamplerDescriptor = cvdescriptorset::ImageSamplerDescriptor;
1910 using TexelDescriptor = cvdescriptorset::TexelDescriptor;
1911
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001912 for (const auto &stage_state : pipe->stage_state) {
1913 if (stage_state.stage_flag == VK_SHADER_STAGE_FRAGMENT_BIT && pipe->graphicsPipelineCI.pRasterizationState &&
1914 pipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) {
locke-lunarge9f1cdf2020-06-12 12:28:57 -06001915 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001916 }
locke-lunarg61870c22020-06-09 14:51:50 -06001917 for (const auto &set_binding : stage_state.descriptor_uses) {
1918 cvdescriptorset::DescriptorSet *descriptor_set = (*per_sets)[set_binding.first.first].bound_descriptor_set;
1919 cvdescriptorset::DescriptorSetLayout::ConstBindingIterator binding_it(descriptor_set->GetLayout().get(),
1920 set_binding.first.second);
1921 const auto descriptor_type = binding_it.GetType();
1922 cvdescriptorset::IndexRange index_range = binding_it.GetGlobalIndexRange();
1923 auto array_idx = 0;
1924
1925 if (binding_it.IsVariableDescriptorCount()) {
1926 index_range.end = index_range.start + descriptor_set->GetVariableDescriptorCount();
1927 }
1928 SyncStageAccessIndex sync_index =
1929 GetSyncStageAccessIndexsByDescriptorSet(descriptor_type, set_binding.second, stage_state.stage_flag);
1930
1931 for (uint32_t i = index_range.start; i < index_range.end; ++i, ++array_idx) {
1932 const auto *descriptor = descriptor_set->GetDescriptorFromGlobalIndex(i);
1933 switch (descriptor->GetClass()) {
1934 case DescriptorClass::ImageSampler:
1935 case DescriptorClass::Image: {
1936 const IMAGE_VIEW_STATE *img_view_state = nullptr;
1937 if (descriptor->GetClass() == DescriptorClass::ImageSampler) {
1938 img_view_state = static_cast<const ImageSamplerDescriptor *>(descriptor)->GetImageViewState();
1939 } else {
1940 img_view_state = static_cast<const ImageDescriptor *>(descriptor)->GetImageViewState();
1941 }
1942 if (!img_view_state) continue;
1943 const IMAGE_STATE *img_state = img_view_state->image_state.get();
1944 VkExtent3D extent = {};
1945 VkOffset3D offset = {};
1946 if (sync_index == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ) {
1947 extent = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.extent);
1948 offset = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.offset);
1949 } else {
1950 extent = img_state->createInfo.extent;
1951 }
John Zulauf8e3c3e92021-01-06 11:19:36 -07001952 SyncOrdering ordering_rule = (descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT)
1953 ? SyncOrdering::kRaster
1954 : SyncOrdering::kNonAttachment;
1955 current_context_->UpdateAccessState(*img_state, sync_index, ordering_rule,
1956 img_view_state->normalized_subresource_range, offset, extent, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06001957 break;
1958 }
1959 case DescriptorClass::TexelBuffer: {
1960 auto buf_view_state = static_cast<const TexelDescriptor *>(descriptor)->GetBufferViewState();
1961 if (!buf_view_state) continue;
1962 const BUFFER_STATE *buf_state = buf_view_state->buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06001963 const ResourceAccessRange range = MakeRange(*buf_view_state);
John Zulauf8e3c3e92021-01-06 11:19:36 -07001964 current_context_->UpdateAccessState(*buf_state, sync_index, SyncOrdering::kNonAttachment, range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06001965 break;
1966 }
1967 case DescriptorClass::GeneralBuffer: {
1968 const auto *buffer_descriptor = static_cast<const BufferDescriptor *>(descriptor);
1969 auto buf_state = buffer_descriptor->GetBufferState();
1970 if (!buf_state) continue;
John Zulauf3e86bf02020-09-12 10:47:57 -06001971 const ResourceAccessRange range =
1972 MakeRange(*buf_state, buffer_descriptor->GetOffset(), buffer_descriptor->GetRange());
John Zulauf8e3c3e92021-01-06 11:19:36 -07001973 current_context_->UpdateAccessState(*buf_state, sync_index, SyncOrdering::kNonAttachment, range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06001974 break;
1975 }
1976 // TODO: INLINE_UNIFORM_BLOCK_EXT, ACCELERATION_STRUCTURE_KHR
1977 default:
1978 break;
1979 }
1980 }
1981 }
1982 }
1983}
1984
1985bool CommandBufferAccessContext::ValidateDrawVertex(uint32_t vertexCount, uint32_t firstVertex, const char *func_name) const {
1986 bool skip = false;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001987 const auto *pipe = GetCurrentPipelineFromCommandBuffer(*cb_state_.get(), VK_PIPELINE_BIND_POINT_GRAPHICS);
1988 if (!pipe) {
locke-lunarg61870c22020-06-09 14:51:50 -06001989 return skip;
1990 }
1991
1992 const auto &binding_buffers = cb_state_->current_vertex_buffer_binding_info.vertex_buffer_bindings;
1993 const auto &binding_buffers_size = binding_buffers.size();
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001994 const auto &binding_descriptions_size = pipe->vertex_binding_descriptions_.size();
locke-lunarg61870c22020-06-09 14:51:50 -06001995
1996 for (size_t i = 0; i < binding_descriptions_size; ++i) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001997 const auto &binding_description = pipe->vertex_binding_descriptions_[i];
locke-lunarg61870c22020-06-09 14:51:50 -06001998 if (binding_description.binding < binding_buffers_size) {
1999 const auto &binding_buffer = binding_buffers[binding_description.binding];
locke-lunarg1ae57d62020-11-18 10:49:19 -07002000 if (binding_buffer.buffer_state == nullptr || binding_buffer.buffer_state->destroyed) continue;
locke-lunarg61870c22020-06-09 14:51:50 -06002001
locke-lunarg1ae57d62020-11-18 10:49:19 -07002002 auto *buf_state = binding_buffer.buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06002003 const ResourceAccessRange range = GetBufferRange(binding_buffer.offset, buf_state->createInfo.size, firstVertex,
2004 vertexCount, binding_description.stride);
Jeremy Gebben40a22942020-12-22 14:22:06 -07002005 auto hazard = current_context_->DetectHazard(*buf_state, SYNC_VERTEX_ATTRIBUTE_INPUT_VERTEX_ATTRIBUTE_READ, range);
locke-lunarg61870c22020-06-09 14:51:50 -06002006 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002007 skip |= sync_state_->LogError(
John Zulauf59e25072020-07-17 10:55:21 -06002008 buf_state->buffer, string_SyncHazardVUID(hazard.hazard), "%s: Hazard %s for vertex %s in %s. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06002009 func_name, string_SyncHazard(hazard.hazard), sync_state_->report_data->FormatHandle(buf_state->buffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07002010 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer).c_str(), FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06002011 }
2012 }
2013 }
2014 return skip;
2015}
2016
2017void CommandBufferAccessContext::RecordDrawVertex(uint32_t vertexCount, uint32_t firstVertex, const ResourceUsageTag &tag) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002018 const auto *pipe = GetCurrentPipelineFromCommandBuffer(*cb_state_.get(), VK_PIPELINE_BIND_POINT_GRAPHICS);
2019 if (!pipe) {
locke-lunarg61870c22020-06-09 14:51:50 -06002020 return;
2021 }
2022 const auto &binding_buffers = cb_state_->current_vertex_buffer_binding_info.vertex_buffer_bindings;
2023 const auto &binding_buffers_size = binding_buffers.size();
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002024 const auto &binding_descriptions_size = pipe->vertex_binding_descriptions_.size();
locke-lunarg61870c22020-06-09 14:51:50 -06002025
2026 for (size_t i = 0; i < binding_descriptions_size; ++i) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002027 const auto &binding_description = pipe->vertex_binding_descriptions_[i];
locke-lunarg61870c22020-06-09 14:51:50 -06002028 if (binding_description.binding < binding_buffers_size) {
2029 const auto &binding_buffer = binding_buffers[binding_description.binding];
locke-lunarg1ae57d62020-11-18 10:49:19 -07002030 if (binding_buffer.buffer_state == nullptr || binding_buffer.buffer_state->destroyed) continue;
locke-lunarg61870c22020-06-09 14:51:50 -06002031
locke-lunarg1ae57d62020-11-18 10:49:19 -07002032 auto *buf_state = binding_buffer.buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06002033 const ResourceAccessRange range = GetBufferRange(binding_buffer.offset, buf_state->createInfo.size, firstVertex,
2034 vertexCount, binding_description.stride);
Jeremy Gebben40a22942020-12-22 14:22:06 -07002035 current_context_->UpdateAccessState(*buf_state, SYNC_VERTEX_ATTRIBUTE_INPUT_VERTEX_ATTRIBUTE_READ,
2036 SyncOrdering::kNonAttachment, range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002037 }
2038 }
2039}
2040
2041bool CommandBufferAccessContext::ValidateDrawVertexIndex(uint32_t indexCount, uint32_t firstIndex, const char *func_name) const {
2042 bool skip = false;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002043 if (cb_state_->index_buffer_binding.buffer_state == nullptr || cb_state_->index_buffer_binding.buffer_state->destroyed) {
locke-lunarg1ae57d62020-11-18 10:49:19 -07002044 return skip;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002045 }
locke-lunarg61870c22020-06-09 14:51:50 -06002046
locke-lunarg1ae57d62020-11-18 10:49:19 -07002047 auto *index_buf_state = cb_state_->index_buffer_binding.buffer_state.get();
locke-lunarg61870c22020-06-09 14:51:50 -06002048 const auto index_size = GetIndexAlignment(cb_state_->index_buffer_binding.index_type);
John Zulauf3e86bf02020-09-12 10:47:57 -06002049 const ResourceAccessRange range = GetBufferRange(cb_state_->index_buffer_binding.offset, index_buf_state->createInfo.size,
2050 firstIndex, indexCount, index_size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07002051 auto hazard = current_context_->DetectHazard(*index_buf_state, SYNC_INDEX_INPUT_INDEX_READ, range);
locke-lunarg61870c22020-06-09 14:51:50 -06002052 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002053 skip |= sync_state_->LogError(
John Zulauf59e25072020-07-17 10:55:21 -06002054 index_buf_state->buffer, string_SyncHazardVUID(hazard.hazard), "%s: Hazard %s for index %s in %s. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06002055 func_name, string_SyncHazard(hazard.hazard), sync_state_->report_data->FormatHandle(index_buf_state->buffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07002056 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer).c_str(), FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06002057 }
2058
2059 // TODO: For now, we detect the whole vertex buffer. Index buffer could be changed until SubmitQueue.
2060 // We will detect more accurate range in the future.
2061 skip |= ValidateDrawVertex(UINT32_MAX, 0, func_name);
2062 return skip;
2063}
2064
2065void CommandBufferAccessContext::RecordDrawVertexIndex(uint32_t indexCount, uint32_t firstIndex, const ResourceUsageTag &tag) {
locke-lunarg1ae57d62020-11-18 10:49:19 -07002066 if (cb_state_->index_buffer_binding.buffer_state == nullptr || cb_state_->index_buffer_binding.buffer_state->destroyed) return;
locke-lunarg61870c22020-06-09 14:51:50 -06002067
locke-lunarg1ae57d62020-11-18 10:49:19 -07002068 auto *index_buf_state = cb_state_->index_buffer_binding.buffer_state.get();
locke-lunarg61870c22020-06-09 14:51:50 -06002069 const auto index_size = GetIndexAlignment(cb_state_->index_buffer_binding.index_type);
John Zulauf3e86bf02020-09-12 10:47:57 -06002070 const ResourceAccessRange range = GetBufferRange(cb_state_->index_buffer_binding.offset, index_buf_state->createInfo.size,
2071 firstIndex, indexCount, index_size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07002072 current_context_->UpdateAccessState(*index_buf_state, SYNC_INDEX_INPUT_INDEX_READ, SyncOrdering::kNonAttachment, range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002073
2074 // TODO: For now, we detect the whole vertex buffer. Index buffer could be changed until SubmitQueue.
2075 // We will detect more accurate range in the future.
2076 RecordDrawVertex(UINT32_MAX, 0, tag);
2077}
2078
2079bool CommandBufferAccessContext::ValidateDrawSubpassAttachment(const char *func_name) const {
locke-lunarg7077d502020-06-18 21:37:26 -06002080 bool skip = false;
2081 if (!current_renderpass_context_) return skip;
John Zulauf64ffe552021-02-06 10:25:07 -07002082 skip |= current_renderpass_context_->ValidateDrawSubpassAttachment(GetExecutionContext(), *cb_state_.get(), func_name);
locke-lunarg7077d502020-06-18 21:37:26 -06002083 return skip;
locke-lunarg61870c22020-06-09 14:51:50 -06002084}
2085
2086void CommandBufferAccessContext::RecordDrawSubpassAttachment(const ResourceUsageTag &tag) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002087 if (current_renderpass_context_) {
John Zulauf64ffe552021-02-06 10:25:07 -07002088 current_renderpass_context_->RecordDrawSubpassAttachment(*cb_state_.get(), tag);
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002089 }
locke-lunarg61870c22020-06-09 14:51:50 -06002090}
2091
John Zulauf64ffe552021-02-06 10:25:07 -07002092void CommandBufferAccessContext::RecordBeginRenderPass(const RENDER_PASS_STATE &rp_state, const VkRect2D &render_area,
2093 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views,
2094 const ResourceUsageTag &tag) {
John Zulauf355e49b2020-04-24 15:11:15 -06002095 // Create an access context the current renderpass.
John Zulauf64ffe552021-02-06 10:25:07 -07002096 render_pass_contexts_.emplace_back(rp_state, render_area, GetQueueFlags(), attachment_views, &cb_access_context_);
John Zulauf16adfc92020-04-08 10:28:33 -06002097 current_renderpass_context_ = &render_pass_contexts_.back();
John Zulauf64ffe552021-02-06 10:25:07 -07002098 current_renderpass_context_->RecordBeginRenderPass(tag);
John Zulauf16adfc92020-04-08 10:28:33 -06002099 current_context_ = &current_renderpass_context_->CurrentContext();
John Zulauf16adfc92020-04-08 10:28:33 -06002100}
2101
John Zulauf64ffe552021-02-06 10:25:07 -07002102void CommandBufferAccessContext::RecordNextSubpass(CMD_TYPE command) {
John Zulauf16adfc92020-04-08 10:28:33 -06002103 assert(current_renderpass_context_);
John Zulauffaea0ee2021-01-14 14:01:32 -07002104 auto prev_tag = NextCommandTag(command);
2105 auto next_tag = NextSubcommandTag(command);
John Zulauf64ffe552021-02-06 10:25:07 -07002106 current_renderpass_context_->RecordNextSubpass(prev_tag, next_tag);
John Zulauf16adfc92020-04-08 10:28:33 -06002107 current_context_ = &current_renderpass_context_->CurrentContext();
2108}
2109
John Zulauf64ffe552021-02-06 10:25:07 -07002110void CommandBufferAccessContext::RecordEndRenderPass(CMD_TYPE command) {
John Zulauf16adfc92020-04-08 10:28:33 -06002111 assert(current_renderpass_context_);
2112 if (!current_renderpass_context_) return;
2113
John Zulauf64ffe552021-02-06 10:25:07 -07002114 current_renderpass_context_->RecordEndRenderPass(&cb_access_context_, NextCommandTag(command));
John Zulauf355e49b2020-04-24 15:11:15 -06002115 current_context_ = &cb_access_context_;
John Zulauf16adfc92020-04-08 10:28:33 -06002116 current_renderpass_context_ = nullptr;
2117}
2118
John Zulauf4a6105a2020-11-17 15:11:05 -07002119void CommandBufferAccessContext::RecordDestroyEvent(VkEvent event) {
2120 // Erase is okay with the key not being
John Zulauf669dfd52021-01-27 17:15:28 -07002121 const auto *event_state = sync_state_->Get<EVENT_STATE>(event);
2122 if (event_state) {
2123 GetCurrentEventsContext()->Destroy(event_state);
John Zulaufd5115702021-01-18 12:34:33 -07002124 }
2125}
2126
John Zulauf64ffe552021-02-06 10:25:07 -07002127bool RenderPassAccessContext::ValidateDrawSubpassAttachment(const CommandExecutionContext &ex_context, const CMD_BUFFER_STATE &cmd,
John Zulauffaea0ee2021-01-14 14:01:32 -07002128 const char *func_name) const {
locke-lunarg61870c22020-06-09 14:51:50 -06002129 bool skip = false;
John Zulauf64ffe552021-02-06 10:25:07 -07002130 const auto &sync_state = ex_context.GetSyncState();
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002131 const auto *pipe = GetCurrentPipelineFromCommandBuffer(cmd, VK_PIPELINE_BIND_POINT_GRAPHICS);
2132 if (!pipe ||
2133 (pipe->graphicsPipelineCI.pRasterizationState && pipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable)) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002134 return skip;
2135 }
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002136 const auto &list = pipe->fragmentShader_writable_output_location_list;
locke-lunarg61870c22020-06-09 14:51:50 -06002137 const auto &subpass = rp_state_->createInfo.pSubpasses[current_subpass_];
John Zulauf64ffe552021-02-06 10:25:07 -07002138 VkExtent3D extent = CastTo3D(render_area_.extent);
2139 VkOffset3D offset = CastTo3D(render_area_.offset);
locke-lunarg37047832020-06-12 13:44:45 -06002140
John Zulauf1a224292020-06-30 14:52:13 -06002141 const auto &current_context = CurrentContext();
locke-lunarg44f9bb12020-06-10 14:43:57 -06002142 // Subpass's inputAttachment has been done in ValidateDispatchDrawDescriptorSet
locke-lunarg96dc9632020-06-10 17:22:18 -06002143 if (subpass.pColorAttachments && subpass.colorAttachmentCount && !list.empty()) {
2144 for (const auto location : list) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002145 if (location >= subpass.colorAttachmentCount ||
2146 subpass.pColorAttachments[location].attachment == VK_ATTACHMENT_UNUSED) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002147 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002148 }
locke-lunarg96dc9632020-06-10 17:22:18 -06002149 const IMAGE_VIEW_STATE *img_view_state = attachment_views_[subpass.pColorAttachments[location].attachment];
John Zulauf1a224292020-06-30 14:52:13 -06002150 HazardResult hazard = current_context.DetectHazard(img_view_state, SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE,
John Zulauf8e3c3e92021-01-06 11:19:36 -07002151 SyncOrdering::kColorAttachment, offset, extent);
locke-lunarg96dc9632020-06-10 17:22:18 -06002152 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002153 skip |= sync_state.LogError(img_view_state->image_view, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06002154 "%s: Hazard %s for %s in %s, Subpass #%d, and pColorAttachments #%d. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06002155 func_name, string_SyncHazard(hazard.hazard),
2156 sync_state.report_data->FormatHandle(img_view_state->image_view).c_str(),
2157 sync_state.report_data->FormatHandle(cmd.commandBuffer).c_str(), cmd.activeSubpass,
John Zulauf64ffe552021-02-06 10:25:07 -07002158 location, ex_context.FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06002159 }
2160 }
2161 }
locke-lunarg37047832020-06-12 13:44:45 -06002162
2163 // PHASE1 TODO: Add layout based read/vs. write selection.
2164 // PHASE1 TODO: Read operations for both depth and stencil are possible in the future.
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002165 if (pipe->graphicsPipelineCI.pDepthStencilState && subpass.pDepthStencilAttachment &&
locke-lunarg37047832020-06-12 13:44:45 -06002166 subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
locke-lunarg61870c22020-06-09 14:51:50 -06002167 const IMAGE_VIEW_STATE *img_view_state = attachment_views_[subpass.pDepthStencilAttachment->attachment];
locke-lunarg37047832020-06-12 13:44:45 -06002168 bool depth_write = false, stencil_write = false;
2169
2170 // PHASE1 TODO: These validation should be in core_checks.
2171 if (!FormatIsStencilOnly(img_view_state->create_info.format) &&
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002172 pipe->graphicsPipelineCI.pDepthStencilState->depthTestEnable &&
2173 pipe->graphicsPipelineCI.pDepthStencilState->depthWriteEnable &&
locke-lunarg37047832020-06-12 13:44:45 -06002174 IsImageLayoutDepthWritable(subpass.pDepthStencilAttachment->layout)) {
2175 depth_write = true;
2176 }
2177 // PHASE1 TODO: It needs to check if stencil is writable.
2178 // If failOp, passOp, or depthFailOp are not KEEP, and writeMask isn't 0, it's writable.
2179 // If depth test is disable, it's considered depth test passes, and then depthFailOp doesn't run.
2180 // PHASE1 TODO: These validation should be in core_checks.
2181 if (!FormatIsDepthOnly(img_view_state->create_info.format) &&
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002182 pipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable &&
locke-lunarg37047832020-06-12 13:44:45 -06002183 IsImageLayoutStencilWritable(subpass.pDepthStencilAttachment->layout)) {
2184 stencil_write = true;
2185 }
2186
2187 // PHASE1 TODO: Add EARLY stage detection based on ExecutionMode.
2188 if (depth_write) {
2189 HazardResult hazard =
John Zulauf1a224292020-06-30 14:52:13 -06002190 current_context.DetectHazard(img_view_state, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE,
John Zulauf8e3c3e92021-01-06 11:19:36 -07002191 SyncOrdering::kDepthStencilAttachment, offset, extent, VK_IMAGE_ASPECT_DEPTH_BIT);
locke-lunarg37047832020-06-12 13:44:45 -06002192 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002193 skip |= sync_state.LogError(
2194 img_view_state->image_view, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06002195 "%s: Hazard %s for %s in %s, Subpass #%d, and depth part of pDepthStencilAttachment. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06002196 func_name, string_SyncHazard(hazard.hazard),
2197 sync_state.report_data->FormatHandle(img_view_state->image_view).c_str(),
2198 sync_state.report_data->FormatHandle(cmd.commandBuffer).c_str(), cmd.activeSubpass,
John Zulauf64ffe552021-02-06 10:25:07 -07002199 ex_context.FormatUsage(hazard).c_str());
locke-lunarg37047832020-06-12 13:44:45 -06002200 }
2201 }
2202 if (stencil_write) {
2203 HazardResult hazard =
John Zulauf1a224292020-06-30 14:52:13 -06002204 current_context.DetectHazard(img_view_state, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE,
John Zulauf8e3c3e92021-01-06 11:19:36 -07002205 SyncOrdering::kDepthStencilAttachment, offset, extent, VK_IMAGE_ASPECT_STENCIL_BIT);
locke-lunarg37047832020-06-12 13:44:45 -06002206 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002207 skip |= sync_state.LogError(
2208 img_view_state->image_view, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06002209 "%s: Hazard %s for %s in %s, Subpass #%d, and stencil part of pDepthStencilAttachment. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06002210 func_name, string_SyncHazard(hazard.hazard),
2211 sync_state.report_data->FormatHandle(img_view_state->image_view).c_str(),
2212 sync_state.report_data->FormatHandle(cmd.commandBuffer).c_str(), cmd.activeSubpass,
John Zulauf64ffe552021-02-06 10:25:07 -07002213 ex_context.FormatUsage(hazard).c_str());
locke-lunarg37047832020-06-12 13:44:45 -06002214 }
locke-lunarg61870c22020-06-09 14:51:50 -06002215 }
2216 }
2217 return skip;
2218}
2219
John Zulauf64ffe552021-02-06 10:25:07 -07002220void RenderPassAccessContext::RecordDrawSubpassAttachment(const CMD_BUFFER_STATE &cmd, const ResourceUsageTag &tag) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002221 const auto *pipe = GetCurrentPipelineFromCommandBuffer(cmd, VK_PIPELINE_BIND_POINT_GRAPHICS);
2222 if (!pipe ||
2223 (pipe->graphicsPipelineCI.pRasterizationState && pipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable)) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002224 return;
2225 }
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002226 const auto &list = pipe->fragmentShader_writable_output_location_list;
locke-lunarg61870c22020-06-09 14:51:50 -06002227 const auto &subpass = rp_state_->createInfo.pSubpasses[current_subpass_];
John Zulauf64ffe552021-02-06 10:25:07 -07002228 VkExtent3D extent = CastTo3D(render_area_.extent);
2229 VkOffset3D offset = CastTo3D(render_area_.offset);
locke-lunarg61870c22020-06-09 14:51:50 -06002230
John Zulauf1a224292020-06-30 14:52:13 -06002231 auto &current_context = CurrentContext();
locke-lunarg44f9bb12020-06-10 14:43:57 -06002232 // Subpass's inputAttachment has been done in RecordDispatchDrawDescriptorSet
locke-lunarg96dc9632020-06-10 17:22:18 -06002233 if (subpass.pColorAttachments && subpass.colorAttachmentCount && !list.empty()) {
2234 for (const auto location : list) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002235 if (location >= subpass.colorAttachmentCount ||
2236 subpass.pColorAttachments[location].attachment == VK_ATTACHMENT_UNUSED) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002237 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002238 }
locke-lunarg96dc9632020-06-10 17:22:18 -06002239 const IMAGE_VIEW_STATE *img_view_state = attachment_views_[subpass.pColorAttachments[location].attachment];
John Zulauf8e3c3e92021-01-06 11:19:36 -07002240 current_context.UpdateAccessState(img_view_state, SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE,
2241 SyncOrdering::kColorAttachment, offset, extent, 0, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002242 }
2243 }
locke-lunarg37047832020-06-12 13:44:45 -06002244
2245 // PHASE1 TODO: Add layout based read/vs. write selection.
2246 // PHASE1 TODO: Read operations for both depth and stencil are possible in the future.
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002247 if (pipe->graphicsPipelineCI.pDepthStencilState && subpass.pDepthStencilAttachment &&
locke-lunarg37047832020-06-12 13:44:45 -06002248 subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
locke-lunarg61870c22020-06-09 14:51:50 -06002249 const IMAGE_VIEW_STATE *img_view_state = attachment_views_[subpass.pDepthStencilAttachment->attachment];
locke-lunarg37047832020-06-12 13:44:45 -06002250 bool depth_write = false, stencil_write = false;
2251
2252 // PHASE1 TODO: These validation should be in core_checks.
2253 if (!FormatIsStencilOnly(img_view_state->create_info.format) &&
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002254 pipe->graphicsPipelineCI.pDepthStencilState->depthTestEnable &&
2255 pipe->graphicsPipelineCI.pDepthStencilState->depthWriteEnable &&
locke-lunarg37047832020-06-12 13:44:45 -06002256 IsImageLayoutDepthWritable(subpass.pDepthStencilAttachment->layout)) {
2257 depth_write = true;
2258 }
2259 // PHASE1 TODO: It needs to check if stencil is writable.
2260 // If failOp, passOp, or depthFailOp are not KEEP, and writeMask isn't 0, it's writable.
2261 // If depth test is disable, it's considered depth test passes, and then depthFailOp doesn't run.
2262 // PHASE1 TODO: These validation should be in core_checks.
2263 if (!FormatIsDepthOnly(img_view_state->create_info.format) &&
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002264 pipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable &&
locke-lunarg37047832020-06-12 13:44:45 -06002265 IsImageLayoutStencilWritable(subpass.pDepthStencilAttachment->layout)) {
2266 stencil_write = true;
2267 }
2268
2269 // PHASE1 TODO: Add EARLY stage detection based on ExecutionMode.
2270 if (depth_write) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07002271 current_context.UpdateAccessState(img_view_state, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE,
2272 SyncOrdering::kDepthStencilAttachment, offset, extent, VK_IMAGE_ASPECT_DEPTH_BIT,
2273 tag);
locke-lunarg37047832020-06-12 13:44:45 -06002274 }
2275 if (stencil_write) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07002276 current_context.UpdateAccessState(img_view_state, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE,
2277 SyncOrdering::kDepthStencilAttachment, offset, extent, VK_IMAGE_ASPECT_STENCIL_BIT,
2278 tag);
locke-lunarg37047832020-06-12 13:44:45 -06002279 }
locke-lunarg61870c22020-06-09 14:51:50 -06002280 }
2281}
2282
John Zulauf64ffe552021-02-06 10:25:07 -07002283bool RenderPassAccessContext::ValidateNextSubpass(const CommandExecutionContext &ex_context, const char *func_name) const {
John Zulaufaff20662020-06-01 14:07:58 -06002284 // PHASE1 TODO: Add Validate Preserve attachments
John Zulauf355e49b2020-04-24 15:11:15 -06002285 bool skip = false;
John Zulauf64ffe552021-02-06 10:25:07 -07002286 skip |= CurrentContext().ValidateResolveOperations(ex_context, *rp_state_, render_area_, attachment_views_, func_name,
John Zulaufb027cdb2020-05-21 14:25:22 -06002287 current_subpass_);
John Zulauf64ffe552021-02-06 10:25:07 -07002288 skip |= CurrentContext().ValidateStoreOperation(ex_context, *rp_state_, render_area_, current_subpass_, attachment_views_,
John Zulaufaff20662020-06-01 14:07:58 -06002289 func_name);
2290
John Zulauf355e49b2020-04-24 15:11:15 -06002291 const auto next_subpass = current_subpass_ + 1;
John Zulauf1507ee42020-05-18 11:33:09 -06002292 const auto &next_context = subpass_contexts_[next_subpass];
John Zulauf64ffe552021-02-06 10:25:07 -07002293 skip |=
2294 next_context.ValidateLayoutTransitions(ex_context, *rp_state_, render_area_, next_subpass, attachment_views_, func_name);
John Zulaufb02c1eb2020-10-06 16:33:36 -06002295 if (!skip) {
2296 // To avoid complex (and buggy) duplication of the affect of layout transitions on load operations, we'll record them
2297 // on a copy of the (empty) next context.
2298 // Note: The resource access map should be empty so hopefully this copy isn't too horrible from a perf POV.
2299 AccessContext temp_context(next_context);
2300 temp_context.RecordLayoutTransitions(*rp_state_, next_subpass, attachment_views_, kCurrentCommandTag);
John Zulauf64ffe552021-02-06 10:25:07 -07002301 skip |=
2302 temp_context.ValidateLoadOperation(ex_context, *rp_state_, render_area_, next_subpass, attachment_views_, func_name);
John Zulaufb02c1eb2020-10-06 16:33:36 -06002303 }
John Zulauf7635de32020-05-29 17:14:15 -06002304 return skip;
2305}
John Zulauf64ffe552021-02-06 10:25:07 -07002306bool RenderPassAccessContext::ValidateEndRenderPass(const CommandExecutionContext &ex_context, const char *func_name) const {
John Zulaufaff20662020-06-01 14:07:58 -06002307 // PHASE1 TODO: Validate Preserve
John Zulauf7635de32020-05-29 17:14:15 -06002308 bool skip = false;
John Zulauf64ffe552021-02-06 10:25:07 -07002309 skip |= CurrentContext().ValidateResolveOperations(ex_context, *rp_state_, render_area_, attachment_views_, func_name,
John Zulauf7635de32020-05-29 17:14:15 -06002310 current_subpass_);
John Zulauf64ffe552021-02-06 10:25:07 -07002311 skip |= CurrentContext().ValidateStoreOperation(ex_context, *rp_state_, render_area_, current_subpass_, attachment_views_,
John Zulaufaff20662020-06-01 14:07:58 -06002312 func_name);
John Zulauf64ffe552021-02-06 10:25:07 -07002313 skip |= ValidateFinalSubpassLayoutTransitions(ex_context, func_name);
John Zulauf355e49b2020-04-24 15:11:15 -06002314 return skip;
2315}
2316
John Zulauf64ffe552021-02-06 10:25:07 -07002317AccessContext *RenderPassAccessContext::CreateStoreResolveProxy() const {
2318 return CreateStoreResolveProxyContext(CurrentContext(), *rp_state_, current_subpass_, render_area_, attachment_views_);
John Zulauf7635de32020-05-29 17:14:15 -06002319}
2320
John Zulauf64ffe552021-02-06 10:25:07 -07002321bool RenderPassAccessContext::ValidateFinalSubpassLayoutTransitions(const CommandExecutionContext &ex_context,
2322 const char *func_name) const {
John Zulauf355e49b2020-04-24 15:11:15 -06002323 bool skip = false;
2324
John Zulauf7635de32020-05-29 17:14:15 -06002325 // As validation methods are const and precede the record/update phase, for any tranistions from the current (last)
2326 // subpass, we have to validate them against a copy of the current AccessContext, with resolve operations applied.
2327 // Note: we could be more efficient by tracking whether or not we actually *have* any changes (e.g. attachment resolve)
2328 // to apply and only copy then, if this proves a hot spot.
2329 std::unique_ptr<AccessContext> proxy_for_current;
2330
John Zulauf355e49b2020-04-24 15:11:15 -06002331 // Validate the "finalLayout" transitions to external
2332 // Get them from where there we're hidding in the extra entry.
2333 const auto &final_transitions = rp_state_->subpass_transitions.back();
2334 for (const auto &transition : final_transitions) {
2335 const auto &attach_view = attachment_views_[transition.attachment];
2336 const auto &trackback = subpass_contexts_[transition.prev_pass].GetDstExternalTrackBack();
2337 assert(trackback.context); // Transitions are given implicit transitions if the StateTracker is working correctly
John Zulauf7635de32020-05-29 17:14:15 -06002338 auto *context = trackback.context;
2339
2340 if (transition.prev_pass == current_subpass_) {
2341 if (!proxy_for_current) {
2342 // We haven't recorded resolve ofor the current_subpass, so we need to copy current and update it *as if*
John Zulauf64ffe552021-02-06 10:25:07 -07002343 proxy_for_current.reset(CreateStoreResolveProxy());
John Zulauf7635de32020-05-29 17:14:15 -06002344 }
2345 context = proxy_for_current.get();
2346 }
2347
John Zulaufa0a98292020-09-18 09:30:10 -06002348 // Use the merged barrier for the hazard check (safe since it just considers the src (first) scope.
2349 const auto merged_barrier = MergeBarriers(trackback.barriers);
John Zulaufc523bf62021-02-16 08:20:34 -07002350 auto hazard = context->DetectImageBarrierHazard(*attach_view->image_state, merged_barrier.src_exec_scope.exec_scope,
John Zulaufa0a98292020-09-18 09:30:10 -06002351 merged_barrier.src_access_scope, attach_view->normalized_subresource_range,
2352 AccessContext::DetectOptions::kDetectPrevious);
John Zulauf355e49b2020-04-24 15:11:15 -06002353 if (hazard.hazard) {
John Zulauf64ffe552021-02-06 10:25:07 -07002354 skip |= ex_context.GetSyncState().LogError(
John Zulauffaea0ee2021-01-14 14:01:32 -07002355 rp_state_->renderPass, string_SyncHazardVUID(hazard.hazard),
2356 "%s: Hazard %s with last use subpass %" PRIu32 " for attachment %" PRIu32
2357 " final image layout transition (old_layout: %s, new_layout: %s). Access info %s.",
2358 func_name, string_SyncHazard(hazard.hazard), transition.prev_pass, transition.attachment,
2359 string_VkImageLayout(transition.old_layout), string_VkImageLayout(transition.new_layout),
John Zulauf64ffe552021-02-06 10:25:07 -07002360 ex_context.FormatUsage(hazard).c_str());
John Zulauf355e49b2020-04-24 15:11:15 -06002361 }
2362 }
2363 return skip;
2364}
2365
2366void RenderPassAccessContext::RecordLayoutTransitions(const ResourceUsageTag &tag) {
2367 // Add layout transitions...
John Zulaufb02c1eb2020-10-06 16:33:36 -06002368 subpass_contexts_[current_subpass_].RecordLayoutTransitions(*rp_state_, current_subpass_, attachment_views_, tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002369}
2370
John Zulauf64ffe552021-02-06 10:25:07 -07002371void RenderPassAccessContext::RecordLoadOperations(const ResourceUsageTag &tag) {
John Zulauf1507ee42020-05-18 11:33:09 -06002372 const auto *attachment_ci = rp_state_->createInfo.pAttachments;
2373 auto &subpass_context = subpass_contexts_[current_subpass_];
John Zulauf64ffe552021-02-06 10:25:07 -07002374 VkExtent3D extent = CastTo3D(render_area_.extent);
2375 VkOffset3D offset = CastTo3D(render_area_.offset);
John Zulauf1507ee42020-05-18 11:33:09 -06002376
2377 for (uint32_t i = 0; i < rp_state_->createInfo.attachmentCount; i++) {
2378 if (rp_state_->attachment_first_subpass[i] == current_subpass_) {
2379 if (attachment_views_[i] == nullptr) continue; // UNUSED
2380 const auto &view = *attachment_views_[i];
2381 const IMAGE_STATE *image = view.image_state.get();
2382 if (image == nullptr) continue;
2383
2384 const auto &ci = attachment_ci[i];
2385 const bool has_depth = FormatHasDepth(ci.format);
John Zulaufb027cdb2020-05-21 14:25:22 -06002386 const bool has_stencil = FormatHasStencil(ci.format);
John Zulauf1507ee42020-05-18 11:33:09 -06002387 const bool is_color = !(has_depth || has_stencil);
2388
2389 if (is_color) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07002390 subpass_context.UpdateAccessState(*image, ColorLoadUsage(ci.loadOp), SyncOrdering::kColorAttachment,
2391 view.normalized_subresource_range, offset, extent, tag);
John Zulauf1507ee42020-05-18 11:33:09 -06002392 } else {
2393 auto update_range = view.normalized_subresource_range;
2394 if (has_depth) {
2395 update_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
John Zulauf8e3c3e92021-01-06 11:19:36 -07002396 subpass_context.UpdateAccessState(*image, DepthStencilLoadUsage(ci.loadOp),
2397 SyncOrdering::kDepthStencilAttachment, update_range, offset, extent, tag);
John Zulauf1507ee42020-05-18 11:33:09 -06002398 }
2399 if (has_stencil) {
2400 update_range.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
John Zulauf8e3c3e92021-01-06 11:19:36 -07002401 subpass_context.UpdateAccessState(*image, DepthStencilLoadUsage(ci.stencilLoadOp),
2402 SyncOrdering::kDepthStencilAttachment, update_range, offset, extent, tag);
John Zulauf1507ee42020-05-18 11:33:09 -06002403 }
2404 }
2405 }
2406 }
2407}
John Zulauf64ffe552021-02-06 10:25:07 -07002408RenderPassAccessContext::RenderPassAccessContext(const RENDER_PASS_STATE &rp_state, const VkRect2D &render_area,
2409 VkQueueFlags queue_flags,
2410 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views,
2411 const AccessContext *external_context)
2412 : rp_state_(&rp_state), render_area_(render_area), current_subpass_(0U), attachment_views_(attachment_views) {
John Zulauf355e49b2020-04-24 15:11:15 -06002413 // Add this for all subpasses here so that they exsist during next subpass validation
John Zulauf64ffe552021-02-06 10:25:07 -07002414 subpass_contexts_.reserve(rp_state_->createInfo.subpassCount);
John Zulauf355e49b2020-04-24 15:11:15 -06002415 for (uint32_t pass = 0; pass < rp_state_->createInfo.subpassCount; pass++) {
John Zulauf1a224292020-06-30 14:52:13 -06002416 subpass_contexts_.emplace_back(pass, queue_flags, rp_state_->subpass_dependencies, subpass_contexts_, external_context);
John Zulauf355e49b2020-04-24 15:11:15 -06002417 }
John Zulauf64ffe552021-02-06 10:25:07 -07002418}
2419void RenderPassAccessContext::RecordBeginRenderPass(const ResourceUsageTag &tag) {
2420 assert(0 == current_subpass_);
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07002421 subpass_contexts_[current_subpass_].SetStartTag(tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002422 RecordLayoutTransitions(tag);
John Zulauf64ffe552021-02-06 10:25:07 -07002423 RecordLoadOperations(tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002424}
John Zulauf1507ee42020-05-18 11:33:09 -06002425
John Zulauf64ffe552021-02-06 10:25:07 -07002426void RenderPassAccessContext::RecordNextSubpass(const ResourceUsageTag &prev_subpass_tag,
John Zulauffaea0ee2021-01-14 14:01:32 -07002427 const ResourceUsageTag &next_subpass_tag) {
John Zulauf7635de32020-05-29 17:14:15 -06002428 // Resolves are against *prior* subpass context and thus *before* the subpass increment
John Zulauf64ffe552021-02-06 10:25:07 -07002429 CurrentContext().UpdateAttachmentResolveAccess(*rp_state_, render_area_, attachment_views_, current_subpass_, prev_subpass_tag);
2430 CurrentContext().UpdateAttachmentStoreAccess(*rp_state_, render_area_, attachment_views_, current_subpass_, prev_subpass_tag);
John Zulauf7635de32020-05-29 17:14:15 -06002431
Jeremy Gebben6ea9d9e2020-12-11 09:41:01 -07002432 // Move to the next sub-command for the new subpass. The resolve and store are logically part of the previous
2433 // subpass, so their tag needs to be different from the layout and load operations below.
John Zulauf355e49b2020-04-24 15:11:15 -06002434 current_subpass_++;
2435 assert(current_subpass_ < subpass_contexts_.size());
John Zulauffaea0ee2021-01-14 14:01:32 -07002436 subpass_contexts_[current_subpass_].SetStartTag(next_subpass_tag);
2437 RecordLayoutTransitions(next_subpass_tag);
John Zulauf64ffe552021-02-06 10:25:07 -07002438 RecordLoadOperations(next_subpass_tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002439}
2440
John Zulauf64ffe552021-02-06 10:25:07 -07002441void RenderPassAccessContext::RecordEndRenderPass(AccessContext *external_context, const ResourceUsageTag &tag) {
John Zulaufaff20662020-06-01 14:07:58 -06002442 // Add the resolve and store accesses
John Zulauf64ffe552021-02-06 10:25:07 -07002443 CurrentContext().UpdateAttachmentResolveAccess(*rp_state_, render_area_, attachment_views_, current_subpass_, tag);
2444 CurrentContext().UpdateAttachmentStoreAccess(*rp_state_, render_area_, attachment_views_, current_subpass_, tag);
John Zulauf7635de32020-05-29 17:14:15 -06002445
John Zulauf355e49b2020-04-24 15:11:15 -06002446 // Export the accesses from the renderpass...
John Zulauf1a224292020-06-30 14:52:13 -06002447 external_context->ResolveChildContexts(subpass_contexts_);
John Zulauf355e49b2020-04-24 15:11:15 -06002448
2449 // Add the "finalLayout" transitions to external
2450 // Get them from where there we're hidding in the extra entry.
John Zulauf89311b42020-09-29 16:28:47 -06002451 // Not that since *final* always comes from *one* subpass per view, we don't have to accumulate the barriers
2452 // TODO Aliasing we may need to reconsider barrier accumulation... though I don't know that it would be valid for aliasing
2453 // that had mulitple final layout transistions from mulitple final subpasses.
John Zulauf355e49b2020-04-24 15:11:15 -06002454 const auto &final_transitions = rp_state_->subpass_transitions.back();
2455 for (const auto &transition : final_transitions) {
2456 const auto &attachment = attachment_views_[transition.attachment];
2457 const auto &last_trackback = subpass_contexts_[transition.prev_pass].GetDstExternalTrackBack();
John Zulaufaa97d8b2020-07-14 10:58:13 -06002458 assert(&subpass_contexts_[transition.prev_pass] == last_trackback.context);
John Zulaufd5115702021-01-18 12:34:33 -07002459 ApplyBarrierOpsFunctor<PipelineBarrierOp> barrier_action(true /* resolve */, last_trackback.barriers.size(), tag);
John Zulauf1e331ec2020-12-04 18:29:38 -07002460 for (const auto &barrier : last_trackback.barriers) {
John Zulaufd5115702021-01-18 12:34:33 -07002461 barrier_action.EmplaceBack(PipelineBarrierOp(barrier, true));
John Zulauf1e331ec2020-12-04 18:29:38 -07002462 }
John Zulauf1e331ec2020-12-04 18:29:38 -07002463 external_context->UpdateResourceAccess(*attachment->image_state, attachment->normalized_subresource_range, barrier_action);
John Zulauf355e49b2020-04-24 15:11:15 -06002464 }
2465}
2466
Jeremy Gebben40a22942020-12-22 14:22:06 -07002467SyncExecScope SyncExecScope::MakeSrc(VkQueueFlags queue_flags, VkPipelineStageFlags2KHR mask_param) {
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002468 SyncExecScope result;
2469 result.mask_param = mask_param;
Jeremy Gebben5f585ae2021-02-02 09:03:06 -07002470 result.expanded_mask = sync_utils::ExpandPipelineStages(mask_param, queue_flags);
2471 result.exec_scope = sync_utils::WithEarlierPipelineStages(result.expanded_mask);
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002472 result.valid_accesses = SyncStageAccess::AccessScopeByStage(result.exec_scope);
2473 return result;
2474}
2475
Jeremy Gebben40a22942020-12-22 14:22:06 -07002476SyncExecScope SyncExecScope::MakeDst(VkQueueFlags queue_flags, VkPipelineStageFlags2KHR mask_param) {
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002477 SyncExecScope result;
2478 result.mask_param = mask_param;
Jeremy Gebben5f585ae2021-02-02 09:03:06 -07002479 result.expanded_mask = sync_utils::ExpandPipelineStages(mask_param, queue_flags);
2480 result.exec_scope = sync_utils::WithLaterPipelineStages(result.expanded_mask);
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002481 result.valid_accesses = SyncStageAccess::AccessScopeByStage(result.exec_scope);
2482 return result;
2483}
2484
2485SyncBarrier::SyncBarrier(const SyncExecScope &src, const SyncExecScope &dst) {
John Zulaufc523bf62021-02-16 08:20:34 -07002486 src_exec_scope = src;
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002487 src_access_scope = 0;
John Zulaufc523bf62021-02-16 08:20:34 -07002488 dst_exec_scope = dst;
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002489 dst_access_scope = 0;
2490}
2491
2492template <typename Barrier>
2493SyncBarrier::SyncBarrier(const Barrier &barrier, const SyncExecScope &src, const SyncExecScope &dst) {
John Zulaufc523bf62021-02-16 08:20:34 -07002494 src_exec_scope = src;
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002495 src_access_scope = SyncStageAccess::AccessScope(src.valid_accesses, barrier.srcAccessMask);
John Zulaufc523bf62021-02-16 08:20:34 -07002496 dst_exec_scope = dst;
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002497 dst_access_scope = SyncStageAccess::AccessScope(dst.valid_accesses, barrier.dstAccessMask);
2498}
2499
2500SyncBarrier::SyncBarrier(VkQueueFlags queue_flags, const VkSubpassDependency2 &subpass) {
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002501 const auto barrier = lvl_find_in_chain<VkMemoryBarrier2KHR>(subpass.pNext);
2502 if (barrier) {
2503 auto src = SyncExecScope::MakeSrc(queue_flags, barrier->srcStageMask);
John Zulaufc523bf62021-02-16 08:20:34 -07002504 src_exec_scope = src;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002505 src_access_scope = SyncStageAccess::AccessScope(src.valid_accesses, barrier->srcAccessMask);
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002506
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002507 auto dst = SyncExecScope::MakeDst(queue_flags, barrier->dstStageMask);
John Zulaufc523bf62021-02-16 08:20:34 -07002508 dst_exec_scope = dst;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002509 dst_access_scope = SyncStageAccess::AccessScope(dst.valid_accesses, barrier->dstAccessMask);
2510
2511 } else {
2512 auto src = SyncExecScope::MakeSrc(queue_flags, subpass.srcStageMask);
John Zulaufc523bf62021-02-16 08:20:34 -07002513 src_exec_scope = src;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002514 src_access_scope = SyncStageAccess::AccessScope(src.valid_accesses, subpass.srcAccessMask);
2515
2516 auto dst = SyncExecScope::MakeDst(queue_flags, subpass.dstStageMask);
John Zulaufc523bf62021-02-16 08:20:34 -07002517 dst_exec_scope = dst;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002518 dst_access_scope = SyncStageAccess::AccessScope(dst.valid_accesses, subpass.dstAccessMask);
2519 }
2520}
2521
2522template <typename Barrier>
2523SyncBarrier::SyncBarrier(VkQueueFlags queue_flags, const Barrier &barrier) {
2524 auto src = SyncExecScope::MakeSrc(queue_flags, barrier.srcStageMask);
2525 src_exec_scope = src.exec_scope;
2526 src_access_scope = SyncStageAccess::AccessScope(src.valid_accesses, barrier.srcAccessMask);
2527
2528 auto dst = SyncExecScope::MakeDst(queue_flags, barrier.dstStageMask);
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002529 dst_exec_scope = dst.exec_scope;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002530 dst_access_scope = SyncStageAccess::AccessScope(dst.valid_accesses, barrier.dstAccessMask);
John Zulauf3d84f1b2020-03-09 13:33:25 -06002531}
2532
John Zulaufb02c1eb2020-10-06 16:33:36 -06002533// Apply a list of barriers, without resolving pending state, useful for subpass layout transitions
2534void ResourceAccessState::ApplyBarriers(const std::vector<SyncBarrier> &barriers, bool layout_transition) {
2535 for (const auto &barrier : barriers) {
2536 ApplyBarrier(barrier, layout_transition);
2537 }
2538}
2539
John Zulauf89311b42020-09-29 16:28:47 -06002540// ApplyBarriers is design for *fully* inclusive barrier lists without layout tranistions. Designed use was for
2541// inter-subpass barriers for lazy-evaluation of parent context memory ranges. Subpass layout transistions are *not* done
2542// lazily, s.t. no previous access reports should need layout transitions.
John Zulaufb02c1eb2020-10-06 16:33:36 -06002543void ResourceAccessState::ApplyBarriers(const std::vector<SyncBarrier> &barriers, const ResourceUsageTag &tag) {
2544 assert(!pending_layout_transition); // This should never be call in the middle of another barrier application
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002545 assert(pending_write_barriers.none());
John Zulaufb02c1eb2020-10-06 16:33:36 -06002546 assert(!pending_write_dep_chain);
John Zulaufa0a98292020-09-18 09:30:10 -06002547 for (const auto &barrier : barriers) {
John Zulauf89311b42020-09-29 16:28:47 -06002548 ApplyBarrier(barrier, false);
John Zulaufa0a98292020-09-18 09:30:10 -06002549 }
John Zulaufb02c1eb2020-10-06 16:33:36 -06002550 ApplyPendingBarriers(tag);
John Zulauf3d84f1b2020-03-09 13:33:25 -06002551}
John Zulauf9cb530d2019-09-30 14:14:10 -06002552HazardResult ResourceAccessState::DetectHazard(SyncStageAccessIndex usage_index) const {
2553 HazardResult hazard;
2554 auto usage = FlagBit(usage_index);
John Zulauf361fb532020-07-22 10:45:39 -06002555 const auto usage_stage = PipelineStageBit(usage_index);
John Zulauf9cb530d2019-09-30 14:14:10 -06002556 if (IsRead(usage)) {
John Zulauf4285ee92020-09-23 10:20:52 -06002557 if (IsRAWHazard(usage_stage, usage)) {
John Zulauf59e25072020-07-17 10:55:21 -06002558 hazard.Set(this, usage_index, READ_AFTER_WRITE, last_write, write_tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06002559 }
2560 } else {
John Zulauf361fb532020-07-22 10:45:39 -06002561 // Write operation:
2562 // Check for read operations more recent than last_write (as setting last_write clears reads, that would be *any*
2563 // If reads exists -- test only against them because either:
2564 // * the reads were hazards, and we've reported the hazard, so just test the current write vs. the read operations
2565 // * the read weren't hazards, and thus if the write is safe w.r.t. the reads, no hazard vs. last_write is possible if
2566 // the current write happens after the reads, so just test the write against the reades
2567 // Otherwise test against last_write
2568 //
2569 // Look for casus belli for WAR
John Zulaufab7756b2020-12-29 16:10:16 -07002570 if (last_reads.size()) {
2571 for (const auto &read_access : last_reads) {
John Zulauf361fb532020-07-22 10:45:39 -06002572 if (IsReadHazard(usage_stage, read_access)) {
2573 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
2574 break;
2575 }
2576 }
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002577 } else if (last_write.any() && IsWriteHazard(usage)) {
John Zulauf361fb532020-07-22 10:45:39 -06002578 // Write-After-Write check -- if we have a previous write to test against
2579 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06002580 }
2581 }
2582 return hazard;
2583}
2584
John Zulauf8e3c3e92021-01-06 11:19:36 -07002585HazardResult ResourceAccessState::DetectHazard(SyncStageAccessIndex usage_index, const SyncOrdering &ordering_rule) const {
2586 const auto &ordering = GetOrderingRules(ordering_rule);
John Zulauf69133422020-05-20 14:55:53 -06002587 // The ordering guarantees act as barriers to the last accesses, independent of synchronization operations
2588 HazardResult hazard;
John Zulauf4285ee92020-09-23 10:20:52 -06002589 const auto usage_bit = FlagBit(usage_index);
John Zulauf361fb532020-07-22 10:45:39 -06002590 const auto usage_stage = PipelineStageBit(usage_index);
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002591 const bool input_attachment_ordering = (ordering.access_scope & SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT).any();
2592 const bool last_write_is_ordered = (last_write & ordering.access_scope).any();
John Zulauf4285ee92020-09-23 10:20:52 -06002593 if (IsRead(usage_bit)) {
2594 // Exclude RAW if no write, or write not most "most recent" operation w.r.t. usage;
2595 bool is_raw_hazard = IsRAWHazard(usage_stage, usage_bit);
2596 if (is_raw_hazard) {
2597 // NOTE: we know last_write is non-zero
2598 // See if the ordering rules save us from the simple RAW check above
2599 // First check to see if the current usage is covered by the ordering rules
2600 const bool usage_is_input_attachment = (usage_index == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ);
2601 const bool usage_is_ordered =
2602 (input_attachment_ordering && usage_is_input_attachment) || (0 != (usage_stage & ordering.exec_scope));
2603 if (usage_is_ordered) {
2604 // Now see of the most recent write (or a subsequent read) are ordered
2605 const bool most_recent_is_ordered = last_write_is_ordered || (0 != GetOrderedStages(ordering));
2606 is_raw_hazard = !most_recent_is_ordered;
John Zulauf361fb532020-07-22 10:45:39 -06002607 }
2608 }
John Zulauf4285ee92020-09-23 10:20:52 -06002609 if (is_raw_hazard) {
2610 hazard.Set(this, usage_index, READ_AFTER_WRITE, last_write, write_tag);
2611 }
John Zulauf361fb532020-07-22 10:45:39 -06002612 } else {
2613 // Only check for WAW if there are no reads since last_write
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002614 bool usage_write_is_ordered = (usage_bit & ordering.access_scope).any();
John Zulaufab7756b2020-12-29 16:10:16 -07002615 if (last_reads.size()) {
John Zulauf361fb532020-07-22 10:45:39 -06002616 // Look for any WAR hazards outside the ordered set of stages
Jeremy Gebben40a22942020-12-22 14:22:06 -07002617 VkPipelineStageFlags2KHR ordered_stages = 0;
John Zulauf4285ee92020-09-23 10:20:52 -06002618 if (usage_write_is_ordered) {
2619 // If the usage is ordered, we can ignore all ordered read stages w.r.t. WAR)
2620 ordered_stages = GetOrderedStages(ordering);
2621 }
2622 // If we're tracking any reads that aren't ordered against the current write, got to check 'em all.
2623 if ((ordered_stages & last_read_stages) != last_read_stages) {
John Zulaufab7756b2020-12-29 16:10:16 -07002624 for (const auto &read_access : last_reads) {
John Zulauf4285ee92020-09-23 10:20:52 -06002625 if (read_access.stage & ordered_stages) continue; // but we can skip the ordered ones
2626 if (IsReadHazard(usage_stage, read_access)) {
2627 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
2628 break;
2629 }
John Zulaufd14743a2020-07-03 09:42:39 -06002630 }
2631 }
John Zulauf4285ee92020-09-23 10:20:52 -06002632 } else if (!(last_write_is_ordered && usage_write_is_ordered)) {
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002633 if (last_write.any() && IsWriteHazard(usage_bit)) {
John Zulauf4285ee92020-09-23 10:20:52 -06002634 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
John Zulauf361fb532020-07-22 10:45:39 -06002635 }
John Zulauf69133422020-05-20 14:55:53 -06002636 }
2637 }
2638 return hazard;
2639}
2640
John Zulauf2f952d22020-02-10 11:34:51 -07002641// Asynchronous Hazards occur between subpasses with no connection through the DAG
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07002642HazardResult ResourceAccessState::DetectAsyncHazard(SyncStageAccessIndex usage_index, const ResourceUsageTag &start_tag) const {
John Zulauf2f952d22020-02-10 11:34:51 -07002643 HazardResult hazard;
2644 auto usage = FlagBit(usage_index);
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07002645 // Async checks need to not go back further than the start of the subpass, as we only want to find hazards between the async
2646 // subpasses. Anything older than that should have been checked at the start of each subpass, taking into account all of
2647 // the raster ordering rules.
John Zulauf2f952d22020-02-10 11:34:51 -07002648 if (IsRead(usage)) {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07002649 if (last_write.any() && (write_tag.index >= start_tag.index)) {
John Zulauf59e25072020-07-17 10:55:21 -06002650 hazard.Set(this, usage_index, READ_RACING_WRITE, last_write, write_tag);
John Zulauf2f952d22020-02-10 11:34:51 -07002651 }
2652 } else {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07002653 if (last_write.any() && (write_tag.index >= start_tag.index)) {
John Zulauf59e25072020-07-17 10:55:21 -06002654 hazard.Set(this, usage_index, WRITE_RACING_WRITE, last_write, write_tag);
John Zulaufab7756b2020-12-29 16:10:16 -07002655 } else if (last_reads.size() > 0) {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07002656 // Any reads during the other subpass will conflict with this write, so we need to check them all.
John Zulaufab7756b2020-12-29 16:10:16 -07002657 for (const auto &read_access : last_reads) {
2658 if (read_access.tag.index >= start_tag.index) {
2659 hazard.Set(this, usage_index, WRITE_RACING_READ, read_access.access, read_access.tag);
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07002660 break;
2661 }
2662 }
John Zulauf2f952d22020-02-10 11:34:51 -07002663 }
2664 }
2665 return hazard;
2666}
2667
Jeremy Gebben40a22942020-12-22 14:22:06 -07002668HazardResult ResourceAccessState::DetectBarrierHazard(SyncStageAccessIndex usage_index, VkPipelineStageFlags2KHR src_exec_scope,
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002669 const SyncStageAccessFlags &src_access_scope) const {
John Zulauf0cb5be22020-01-23 12:18:22 -07002670 // Only supporting image layout transitions for now
2671 assert(usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION);
2672 HazardResult hazard;
John Zulauf361fb532020-07-22 10:45:39 -06002673 // only test for WAW if there no intervening read operations.
2674 // See DetectHazard(SyncStagetAccessIndex) above for more details.
John Zulaufab7756b2020-12-29 16:10:16 -07002675 if (last_reads.size()) {
John Zulauf355e49b2020-04-24 15:11:15 -06002676 // Look at the reads if any
John Zulaufab7756b2020-12-29 16:10:16 -07002677 for (const auto &read_access : last_reads) {
John Zulauf4a6105a2020-11-17 15:11:05 -07002678 if (read_access.IsReadBarrierHazard(src_exec_scope)) {
John Zulauf59e25072020-07-17 10:55:21 -06002679 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
John Zulauf0cb5be22020-01-23 12:18:22 -07002680 break;
2681 }
2682 }
John Zulauf4a6105a2020-11-17 15:11:05 -07002683 } else if (last_write.any() && IsWriteBarrierHazard(src_exec_scope, src_access_scope)) {
2684 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
2685 }
2686
2687 return hazard;
2688}
2689
Jeremy Gebben40a22942020-12-22 14:22:06 -07002690HazardResult ResourceAccessState::DetectBarrierHazard(SyncStageAccessIndex usage_index, VkPipelineStageFlags2KHR src_exec_scope,
John Zulauf4a6105a2020-11-17 15:11:05 -07002691 const SyncStageAccessFlags &src_access_scope,
2692 const ResourceUsageTag &event_tag) const {
2693 // Only supporting image layout transitions for now
2694 assert(usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION);
2695 HazardResult hazard;
2696 // only test for WAW if there no intervening read operations.
2697 // See DetectHazard(SyncStagetAccessIndex) above for more details.
2698
John Zulaufab7756b2020-12-29 16:10:16 -07002699 if (last_reads.size()) {
John Zulauf4a6105a2020-11-17 15:11:05 -07002700 // Look at the reads if any... if reads exist, they are either the resaon the access is in the event
2701 // first scope, or they are a hazard.
John Zulaufab7756b2020-12-29 16:10:16 -07002702 for (const auto &read_access : last_reads) {
John Zulauf4a6105a2020-11-17 15:11:05 -07002703 if (read_access.tag.IsBefore(event_tag)) {
2704 // The read is in the events first synchronization scope, so we use a barrier hazard check
2705 // If the read stage is not in the src sync scope
2706 // *AND* not execution chained with an existing sync barrier (that's the or)
2707 // then the barrier access is unsafe (R/W after R)
2708 if (read_access.IsReadBarrierHazard(src_exec_scope)) {
2709 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
2710 break;
2711 }
2712 } else {
2713 // The read not in the event first sync scope and so is a hazard vs. the layout transition
2714 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
2715 }
2716 }
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002717 } else if (last_write.any()) {
John Zulauf4a6105a2020-11-17 15:11:05 -07002718 // if there are no reads, the write is either the reason the access is in the event scope... they are a hazard
2719 if (write_tag.IsBefore(event_tag)) {
2720 // The write is in the first sync scope of the event (sync their aren't any reads to be the reason)
2721 // So do a normal barrier hazard check
2722 if (IsWriteBarrierHazard(src_exec_scope, src_access_scope)) {
2723 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
2724 }
2725 } else {
2726 // The write isn't in scope, and is thus a hazard to the layout transistion for wait
John Zulauf361fb532020-07-22 10:45:39 -06002727 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
2728 }
John Zulaufd14743a2020-07-03 09:42:39 -06002729 }
John Zulauf361fb532020-07-22 10:45:39 -06002730
John Zulauf0cb5be22020-01-23 12:18:22 -07002731 return hazard;
2732}
2733
John Zulauf5f13a792020-03-10 07:31:21 -06002734// The logic behind resolves is the same as update, we assume that earlier hazards have be reported, and that no
2735// tranistive hazard can exists with a hazard between the earlier operations. Yes, an early hazard can mask that another
2736// exists, but if you fix *that* hazard it either fixes or unmasks the subsequent ones.
2737void ResourceAccessState::Resolve(const ResourceAccessState &other) {
2738 if (write_tag.IsBefore(other.write_tag)) {
John Zulauf4285ee92020-09-23 10:20:52 -06002739 // If this is a later write, we've reported any exsiting hazard, and we can just overwrite as the more recent
2740 // operation
John Zulauf5f13a792020-03-10 07:31:21 -06002741 *this = other;
2742 } else if (!other.write_tag.IsBefore(write_tag)) {
2743 // This is the *equals* case for write operations, we merged the write barriers and the read state (but without the
2744 // dependency chaining logic or any stage expansion)
2745 write_barriers |= other.write_barriers;
John Zulaufb02c1eb2020-10-06 16:33:36 -06002746 pending_write_barriers |= other.pending_write_barriers;
2747 pending_layout_transition |= other.pending_layout_transition;
2748 pending_write_dep_chain |= other.pending_write_dep_chain;
John Zulauf5f13a792020-03-10 07:31:21 -06002749
John Zulaufd14743a2020-07-03 09:42:39 -06002750 // Merge the read states
John Zulaufab7756b2020-12-29 16:10:16 -07002751 const auto pre_merge_count = last_reads.size();
John Zulauf4285ee92020-09-23 10:20:52 -06002752 const auto pre_merge_stages = last_read_stages;
John Zulaufab7756b2020-12-29 16:10:16 -07002753 for (uint32_t other_read_index = 0; other_read_index < other.last_reads.size(); other_read_index++) {
John Zulauf5f13a792020-03-10 07:31:21 -06002754 auto &other_read = other.last_reads[other_read_index];
John Zulauf4285ee92020-09-23 10:20:52 -06002755 if (pre_merge_stages & other_read.stage) {
John Zulauf5f13a792020-03-10 07:31:21 -06002756 // Merge in the barriers for read stages that exist in *both* this and other
John Zulauf4285ee92020-09-23 10:20:52 -06002757 // TODO: This is N^2 with stages... perhaps the ReadStates should be sorted by stage index.
2758 // but we should wait on profiling data for that.
2759 for (uint32_t my_read_index = 0; my_read_index < pre_merge_count; my_read_index++) {
John Zulauf5f13a792020-03-10 07:31:21 -06002760 auto &my_read = last_reads[my_read_index];
2761 if (other_read.stage == my_read.stage) {
2762 if (my_read.tag.IsBefore(other_read.tag)) {
John Zulauf4285ee92020-09-23 10:20:52 -06002763 // Other is more recent, copy in the state
John Zulauf37ceaed2020-07-03 16:18:15 -06002764 my_read.access = other_read.access;
John Zulauf4285ee92020-09-23 10:20:52 -06002765 my_read.tag = other_read.tag;
John Zulaufb02c1eb2020-10-06 16:33:36 -06002766 my_read.pending_dep_chain = other_read.pending_dep_chain;
2767 // TODO: Phase 2 -- review the state merge logic to avoid false positive from overwriting the barriers
2768 // May require tracking more than one access per stage.
2769 my_read.barriers = other_read.barriers;
Jeremy Gebben40a22942020-12-22 14:22:06 -07002770 if (my_read.stage == VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR) {
John Zulauf4285ee92020-09-23 10:20:52 -06002771 // Since I'm overwriting the fragement stage read, also update the input attachment info
2772 // as this is the only stage that affects it.
John Zulauff51fbb62020-10-02 14:43:24 -06002773 input_attachment_read = other.input_attachment_read;
John Zulauf4285ee92020-09-23 10:20:52 -06002774 }
John Zulaufb02c1eb2020-10-06 16:33:36 -06002775 } else if (other_read.tag.IsBefore(my_read.tag)) {
2776 // The read tags match so merge the barriers
2777 my_read.barriers |= other_read.barriers;
2778 my_read.pending_dep_chain |= other_read.pending_dep_chain;
John Zulauf5f13a792020-03-10 07:31:21 -06002779 }
John Zulaufb02c1eb2020-10-06 16:33:36 -06002780
John Zulauf5f13a792020-03-10 07:31:21 -06002781 break;
2782 }
2783 }
2784 } else {
2785 // The other read stage doesn't exist in this, so add it.
John Zulaufab7756b2020-12-29 16:10:16 -07002786 last_reads.emplace_back(other_read);
John Zulauf5f13a792020-03-10 07:31:21 -06002787 last_read_stages |= other_read.stage;
Jeremy Gebben40a22942020-12-22 14:22:06 -07002788 if (other_read.stage == VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR) {
John Zulauff51fbb62020-10-02 14:43:24 -06002789 input_attachment_read = other.input_attachment_read;
John Zulauf4285ee92020-09-23 10:20:52 -06002790 }
John Zulauf5f13a792020-03-10 07:31:21 -06002791 }
2792 }
John Zulauf361fb532020-07-22 10:45:39 -06002793 read_execution_barriers |= other.read_execution_barriers;
John Zulauf4285ee92020-09-23 10:20:52 -06002794 } // the else clause would be that other write is before this write... in which case we supercede the other state and
2795 // ignore it.
John Zulauffaea0ee2021-01-14 14:01:32 -07002796
2797 // Merge first access information by making a copy of this first_access and reconstructing with a shuffle
2798 // of the copy and other into this using the update first logic.
2799 // NOTE: All sorts of additional cleverness could be put into short circuts. (for example back is write and is before front
2800 // of the other first_accesses... )
2801 if (!(first_accesses_ == other.first_accesses_) && !other.first_accesses_.empty()) {
2802 FirstAccesses firsts(std::move(first_accesses_));
2803 first_accesses_.clear();
2804 first_read_stages_ = 0U;
2805 auto a = firsts.begin();
2806 auto a_end = firsts.end();
2807 for (auto &b : other.first_accesses_) {
2808 // TODO: Determine whether "IsBefore" or "IsGloballyBefore" is needed...
2809 while (a != a_end && a->tag.IsBefore(b.tag)) {
2810 UpdateFirst(a->tag, a->usage_index, a->ordering_rule);
2811 ++a;
2812 }
2813 UpdateFirst(b.tag, b.usage_index, b.ordering_rule);
2814 }
2815 for (; a != a_end; ++a) {
2816 UpdateFirst(a->tag, a->usage_index, a->ordering_rule);
2817 }
2818 }
John Zulauf5f13a792020-03-10 07:31:21 -06002819}
2820
John Zulauf8e3c3e92021-01-06 11:19:36 -07002821void ResourceAccessState::Update(SyncStageAccessIndex usage_index, SyncOrdering ordering_rule, const ResourceUsageTag &tag) {
John Zulauf9cb530d2019-09-30 14:14:10 -06002822 // Move this logic in the ResourceStateTracker as methods, thereof (or we'll repeat it for every flavor of resource...
2823 const auto usage_bit = FlagBit(usage_index);
John Zulauf4285ee92020-09-23 10:20:52 -06002824 if (IsRead(usage_index)) {
John Zulauf9cb530d2019-09-30 14:14:10 -06002825 // Mulitple outstanding reads may be of interest and do dependency chains independently
2826 // However, for purposes of barrier tracking, only one read per pipeline stage matters
2827 const auto usage_stage = PipelineStageBit(usage_index);
2828 if (usage_stage & last_read_stages) {
John Zulaufab7756b2020-12-29 16:10:16 -07002829 for (auto &read_access : last_reads) {
2830 if (read_access.stage == usage_stage) {
2831 read_access.Set(usage_stage, usage_bit, 0, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06002832 break;
2833 }
2834 }
2835 } else {
John Zulaufab7756b2020-12-29 16:10:16 -07002836 last_reads.emplace_back(usage_stage, usage_bit, 0, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06002837 last_read_stages |= usage_stage;
2838 }
John Zulauf4285ee92020-09-23 10:20:52 -06002839
2840 // Fragment shader reads come in two flavors, and we need to track if the one we're tracking is the special one.
Jeremy Gebben40a22942020-12-22 14:22:06 -07002841 if (usage_stage == VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR) {
John Zulauff51fbb62020-10-02 14:43:24 -06002842 // TODO Revisit re: multiple reads for a given stage
2843 input_attachment_read = (usage_bit == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT);
John Zulauf4285ee92020-09-23 10:20:52 -06002844 }
John Zulauf9cb530d2019-09-30 14:14:10 -06002845 } else {
2846 // Assume write
2847 // TODO determine what to do with READ-WRITE operations if any
John Zulauf89311b42020-09-29 16:28:47 -06002848 SetWrite(usage_bit, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06002849 }
John Zulauffaea0ee2021-01-14 14:01:32 -07002850 UpdateFirst(tag, usage_index, ordering_rule);
John Zulauf9cb530d2019-09-30 14:14:10 -06002851}
John Zulauf5f13a792020-03-10 07:31:21 -06002852
John Zulauf89311b42020-09-29 16:28:47 -06002853// Clobber last read and all barriers... because all we have is DANGER, DANGER, WILL ROBINSON!!!
2854// if the last_reads/last_write were unsafe, we've reported them, in either case the prior access is irrelevant.
2855// We can overwrite them as *this* write is now after them.
2856//
2857// Note: intentionally ignore pending barriers and chains (i.e. don't apply or clear them), let ApplyPendingBarriers handle them.
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002858void ResourceAccessState::SetWrite(const SyncStageAccessFlags &usage_bit, const ResourceUsageTag &tag) {
John Zulaufab7756b2020-12-29 16:10:16 -07002859 last_reads.clear();
John Zulauf89311b42020-09-29 16:28:47 -06002860 last_read_stages = 0;
2861 read_execution_barriers = 0;
John Zulauff51fbb62020-10-02 14:43:24 -06002862 input_attachment_read = false; // Denotes no outstanding input attachment read after the last write.
John Zulauf89311b42020-09-29 16:28:47 -06002863
2864 write_barriers = 0;
2865 write_dependency_chain = 0;
2866 write_tag = tag;
2867 last_write = usage_bit;
John Zulauf9cb530d2019-09-30 14:14:10 -06002868}
2869
John Zulauf89311b42020-09-29 16:28:47 -06002870// Apply the memory barrier without updating the existing barriers. The execution barrier
2871// changes the "chaining" state, but to keep barriers independent, we defer this until all barriers
2872// of the batch have been processed. Also, depending on whether layout transition happens, we'll either
2873// replace the current write barriers or add to them, so accumulate to pending as well.
2874void ResourceAccessState::ApplyBarrier(const SyncBarrier &barrier, bool layout_transition) {
2875 // For independent barriers we need to track what the new barriers and dependency chain *will* be when we're done
2876 // applying the memory barriers
John Zulauf86356ca2020-10-19 11:46:41 -06002877 // NOTE: We update the write barrier if the write is in the first access scope or if there is a layout
2878 // transistion, under the theory of "most recent access". If the read/write *isn't* safe
2879 // vs. this layout transition DetectBarrierHazard should report it. We treat the layout
2880 // transistion *as* a write and in scope with the barrier (it's before visibility).
John Zulaufc523bf62021-02-16 08:20:34 -07002881 if (layout_transition || WriteInSourceScopeOrChain(barrier.src_exec_scope.exec_scope, barrier.src_access_scope)) {
John Zulauf89311b42020-09-29 16:28:47 -06002882 pending_write_barriers |= barrier.dst_access_scope;
John Zulaufc523bf62021-02-16 08:20:34 -07002883 pending_write_dep_chain |= barrier.dst_exec_scope.exec_scope;
John Zulaufa0a98292020-09-18 09:30:10 -06002884 }
John Zulauf89311b42020-09-29 16:28:47 -06002885 // Track layout transistion as pending as we can't modify last_write until all barriers processed
2886 pending_layout_transition |= layout_transition;
John Zulaufa0a98292020-09-18 09:30:10 -06002887
John Zulauf89311b42020-09-29 16:28:47 -06002888 if (!pending_layout_transition) {
2889 // Once we're dealing with a layout transition (which is modelled as a *write*) then the last reads/writes/chains
2890 // don't need to be tracked as we're just going to zero them.
John Zulaufab7756b2020-12-29 16:10:16 -07002891 for (auto &read_access : last_reads) {
John Zulauf89311b42020-09-29 16:28:47 -06002892 // The | implements the "dependency chain" logic for this access, as the barriers field stores the second sync scope
John Zulaufc523bf62021-02-16 08:20:34 -07002893 if (barrier.src_exec_scope.exec_scope & (read_access.stage | read_access.barriers)) {
2894 read_access.pending_dep_chain |= barrier.dst_exec_scope.exec_scope;
John Zulaufa0a98292020-09-18 09:30:10 -06002895 }
2896 }
John Zulaufa0a98292020-09-18 09:30:10 -06002897 }
John Zulaufa0a98292020-09-18 09:30:10 -06002898}
2899
John Zulauf4a6105a2020-11-17 15:11:05 -07002900// Apply the tag scoped memory barrier without updating the existing barriers. The execution barrier
2901// changes the "chaining" state, but to keep barriers independent. See discussion above.
2902void ResourceAccessState::ApplyBarrier(const ResourceUsageTag &scope_tag, const SyncBarrier &barrier, bool layout_transition) {
2903 // The scope logic for events is, if we're here, the resource usage was flagged as "in the first execution scope" at
2904 // the time of the SetEvent, thus all we need check is whether the access is the same one (i.e. before the scope tag
2905 // in order to know if it's in the excecution scope
2906 // Notice that the layout transition sets the pending barriers *regardless*, as any lack of src_access_scope to
2907 // guard against the layout transition should be reported in the detect barrier hazard phase, and we only report
2908 // errors w.r.t. "most recent" accesses.
2909 if (layout_transition || ((write_tag.IsBefore(scope_tag)) && (barrier.src_access_scope & last_write).any())) {
2910 pending_write_barriers |= barrier.dst_access_scope;
John Zulaufc523bf62021-02-16 08:20:34 -07002911 pending_write_dep_chain |= barrier.dst_exec_scope.exec_scope;
John Zulauf4a6105a2020-11-17 15:11:05 -07002912 }
2913 // Track layout transistion as pending as we can't modify last_write until all barriers processed
2914 pending_layout_transition |= layout_transition;
2915
2916 if (!pending_layout_transition) {
2917 // Once we're dealing with a layout transition (which is modelled as a *write*) then the last reads/writes/chains
2918 // don't need to be tracked as we're just going to zero them.
John Zulaufab7756b2020-12-29 16:10:16 -07002919 for (auto &read_access : last_reads) {
John Zulauf4a6105a2020-11-17 15:11:05 -07002920 // If this read is the same one we included in the set event and in scope, then apply the execution barrier...
2921 // NOTE: That's not really correct... this read stage might *not* have been included in the setevent, and the barriers
2922 // representing the chain might have changed since then (that would be an odd usage), so as a first approximation
2923 // we'll assume the barriers *haven't* been changed since (if the tag hasn't), and while this could be a false
2924 // positive in the case of Set; SomeBarrier; Wait; we'll live with it until we can add more state to the first scope
2925 // capture (the specific write and read stages that *were* in scope at the moment of SetEvents.
2926 // TODO: eliminate the false positive by including write/read-stages "in scope" information in SetEvents first_scope
John Zulaufc523bf62021-02-16 08:20:34 -07002927 if (read_access.tag.IsBefore(scope_tag) &&
2928 (barrier.src_exec_scope.exec_scope & (read_access.stage | read_access.barriers))) {
2929 read_access.pending_dep_chain |= barrier.dst_exec_scope.exec_scope;
John Zulauf4a6105a2020-11-17 15:11:05 -07002930 }
2931 }
2932 }
2933}
John Zulauf89311b42020-09-29 16:28:47 -06002934void ResourceAccessState::ApplyPendingBarriers(const ResourceUsageTag &tag) {
2935 if (pending_layout_transition) {
John Zulauf89311b42020-09-29 16:28:47 -06002936 // SetWrite clobbers the read count, and thus we don't have to clear the read_state out.
2937 SetWrite(SYNC_IMAGE_LAYOUT_TRANSITION_BIT, tag); // Side effect notes below
John Zulauffaea0ee2021-01-14 14:01:32 -07002938 UpdateFirst(tag, SYNC_IMAGE_LAYOUT_TRANSITION, SyncOrdering::kNonAttachment);
John Zulauf89311b42020-09-29 16:28:47 -06002939 pending_layout_transition = false;
John Zulauf9cb530d2019-09-30 14:14:10 -06002940 }
John Zulauf89311b42020-09-29 16:28:47 -06002941
2942 // Apply the accumulate execution barriers (and thus update chaining information)
2943 // for layout transition, read count is zeroed by SetWrite, so this will be skipped.
John Zulaufab7756b2020-12-29 16:10:16 -07002944 for (auto &read_access : last_reads) {
2945 read_access.barriers |= read_access.pending_dep_chain;
2946 read_execution_barriers |= read_access.barriers;
2947 read_access.pending_dep_chain = 0;
John Zulauf89311b42020-09-29 16:28:47 -06002948 }
2949
2950 // We OR in the accumulated write chain and barriers even in the case of a layout transition as SetWrite zeros them.
2951 write_dependency_chain |= pending_write_dep_chain;
2952 write_barriers |= pending_write_barriers;
2953 pending_write_dep_chain = 0;
2954 pending_write_barriers = 0;
John Zulauf9cb530d2019-09-30 14:14:10 -06002955}
2956
John Zulauf59e25072020-07-17 10:55:21 -06002957// This should be just Bits or Index, but we don't have an invalid state for Index
Jeremy Gebben40a22942020-12-22 14:22:06 -07002958VkPipelineStageFlags2KHR ResourceAccessState::GetReadBarriers(const SyncStageAccessFlags &usage_bit) const {
2959 VkPipelineStageFlags2KHR barriers = 0U;
John Zulauf4285ee92020-09-23 10:20:52 -06002960
John Zulaufab7756b2020-12-29 16:10:16 -07002961 for (const auto &read_access : last_reads) {
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002962 if ((read_access.access & usage_bit).any()) {
John Zulauf4285ee92020-09-23 10:20:52 -06002963 barriers = read_access.barriers;
2964 break;
John Zulauf59e25072020-07-17 10:55:21 -06002965 }
2966 }
John Zulauf4285ee92020-09-23 10:20:52 -06002967
John Zulauf59e25072020-07-17 10:55:21 -06002968 return barriers;
2969}
2970
Jeremy Gebben40a22942020-12-22 14:22:06 -07002971inline bool ResourceAccessState::IsRAWHazard(VkPipelineStageFlags2KHR usage_stage, const SyncStageAccessFlags &usage) const {
John Zulauf4285ee92020-09-23 10:20:52 -06002972 assert(IsRead(usage));
2973 // Only RAW vs. last_write if it doesn't happen-after any other read because either:
2974 // * the previous reads are not hazards, and thus last_write must be visible and available to
2975 // any reads that happen after.
2976 // * the previous reads *are* hazards to last_write, have been reported, and if that hazard is fixed
2977 // the current read will be also not be a hazard, thus reporting a hazard here adds no needed information.
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002978 return last_write.any() && (0 == (read_execution_barriers & usage_stage)) && IsWriteHazard(usage);
John Zulauf4285ee92020-09-23 10:20:52 -06002979}
2980
Jeremy Gebben40a22942020-12-22 14:22:06 -07002981VkPipelineStageFlags2KHR ResourceAccessState::GetOrderedStages(const OrderingBarrier &ordering) const {
John Zulauf4285ee92020-09-23 10:20:52 -06002982 // Whether the stage are in the ordering scope only matters if the current write is ordered
Jeremy Gebben40a22942020-12-22 14:22:06 -07002983 VkPipelineStageFlags2KHR ordered_stages = last_read_stages & ordering.exec_scope;
John Zulauf4285ee92020-09-23 10:20:52 -06002984 // Special input attachment handling as always (not encoded in exec_scop)
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002985 const bool input_attachment_ordering = (ordering.access_scope & SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT).any();
John Zulauff51fbb62020-10-02 14:43:24 -06002986 if (input_attachment_ordering && input_attachment_read) {
John Zulauf4285ee92020-09-23 10:20:52 -06002987 // If we have an input attachment in last_reads and input attachments are ordered we all that stage
Jeremy Gebben40a22942020-12-22 14:22:06 -07002988 ordered_stages |= VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR;
John Zulauf4285ee92020-09-23 10:20:52 -06002989 }
2990
2991 return ordered_stages;
2992}
2993
John Zulauffaea0ee2021-01-14 14:01:32 -07002994void ResourceAccessState::UpdateFirst(const ResourceUsageTag &tag, SyncStageAccessIndex usage_index, SyncOrdering ordering_rule) {
2995 // Only record until we record a write.
2996 if (first_accesses_.empty() || IsRead(first_accesses_.back().usage_index)) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07002997 const VkPipelineStageFlags2KHR usage_stage = IsRead(usage_index) ? PipelineStageBit(usage_index) : 0U;
John Zulauffaea0ee2021-01-14 14:01:32 -07002998 if (0 == (usage_stage & first_read_stages_)) {
2999 // If this is a read we haven't seen or a write, record.
3000 first_read_stages_ |= usage_stage;
3001 first_accesses_.emplace_back(tag, usage_index, ordering_rule);
3002 }
3003 }
3004}
3005
John Zulaufd1f85d42020-04-15 12:23:15 -06003006void SyncValidator::ResetCommandBufferCallback(VkCommandBuffer command_buffer) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003007 auto *access_context = GetAccessContextNoInsert(command_buffer);
3008 if (access_context) {
3009 access_context->Reset();
John Zulauf9cb530d2019-09-30 14:14:10 -06003010 }
3011}
3012
John Zulaufd1f85d42020-04-15 12:23:15 -06003013void SyncValidator::FreeCommandBufferCallback(VkCommandBuffer command_buffer) {
3014 auto access_found = cb_access_state.find(command_buffer);
3015 if (access_found != cb_access_state.end()) {
3016 access_found->second->Reset();
3017 cb_access_state.erase(access_found);
3018 }
3019}
3020
John Zulauf9cb530d2019-09-30 14:14:10 -06003021bool SyncValidator::PreCallValidateCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
3022 uint32_t regionCount, const VkBufferCopy *pRegions) const {
3023 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -06003024 const auto *cb_context = GetAccessContext(commandBuffer);
3025 assert(cb_context);
3026 if (!cb_context) return skip;
3027 const auto *context = cb_context->GetCurrentAccessContext();
John Zulauf9cb530d2019-09-30 14:14:10 -06003028
John Zulauf3d84f1b2020-03-09 13:33:25 -06003029 // If we have no previous accesses, we have no hazards
John Zulauf3d84f1b2020-03-09 13:33:25 -06003030 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003031 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003032
3033 for (uint32_t region = 0; region < regionCount; region++) {
3034 const auto &copy_region = pRegions[region];
John Zulauf16adfc92020-04-08 10:28:33 -06003035 if (src_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06003036 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003037 auto hazard = context->DetectHazard(*src_buffer, SYNC_COPY_TRANSFER_READ, src_range);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003038 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003039 skip |= LogError(srcBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003040 "vkCmdCopyBuffer: Hazard %s for srcBuffer %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06003041 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcBuffer).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003042 cb_context->FormatUsage(hazard).c_str());
John Zulauf9cb530d2019-09-30 14:14:10 -06003043 }
John Zulauf9cb530d2019-09-30 14:14:10 -06003044 }
John Zulauf16adfc92020-04-08 10:28:33 -06003045 if (dst_buffer && !skip) {
John Zulauf3e86bf02020-09-12 10:47:57 -06003046 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003047 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, dst_range);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003048 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003049 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003050 "vkCmdCopyBuffer: Hazard %s for dstBuffer %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06003051 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstBuffer).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003052 cb_context->FormatUsage(hazard).c_str());
John Zulauf3d84f1b2020-03-09 13:33:25 -06003053 }
3054 }
3055 if (skip) break;
John Zulauf9cb530d2019-09-30 14:14:10 -06003056 }
3057 return skip;
3058}
3059
3060void SyncValidator::PreCallRecordCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
3061 uint32_t regionCount, const VkBufferCopy *pRegions) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003062 auto *cb_context = GetAccessContext(commandBuffer);
3063 assert(cb_context);
John Zulauf2b151bf2020-04-24 15:37:44 -06003064 const auto tag = cb_context->NextCommandTag(CMD_COPYBUFFER);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003065 auto *context = cb_context->GetCurrentAccessContext();
3066
John Zulauf9cb530d2019-09-30 14:14:10 -06003067 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
John Zulauf9cb530d2019-09-30 14:14:10 -06003068 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
John Zulauf9cb530d2019-09-30 14:14:10 -06003069
3070 for (uint32_t region = 0; region < regionCount; region++) {
3071 const auto &copy_region = pRegions[region];
John Zulauf16adfc92020-04-08 10:28:33 -06003072 if (src_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06003073 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003074 context->UpdateAccessState(*src_buffer, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment, src_range, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003075 }
John Zulauf16adfc92020-04-08 10:28:33 -06003076 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06003077 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003078 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, dst_range, tag);
John Zulauf5c5e88d2019-12-26 11:22:02 -07003079 }
3080 }
3081}
3082
John Zulauf4a6105a2020-11-17 15:11:05 -07003083void SyncValidator::PreCallRecordDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
3084 // Clear out events from the command buffer contexts
3085 for (auto &cb_context : cb_access_state) {
3086 cb_context.second->RecordDestroyEvent(event);
3087 }
3088}
3089
Jeff Leger178b1e52020-10-05 12:22:23 -04003090bool SyncValidator::PreCallValidateCmdCopyBuffer2KHR(VkCommandBuffer commandBuffer,
3091 const VkCopyBufferInfo2KHR *pCopyBufferInfos) const {
3092 bool skip = false;
3093 const auto *cb_context = GetAccessContext(commandBuffer);
3094 assert(cb_context);
3095 if (!cb_context) return skip;
3096 const auto *context = cb_context->GetCurrentAccessContext();
3097
3098 // If we have no previous accesses, we have no hazards
3099 const auto *src_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->srcBuffer);
3100 const auto *dst_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->dstBuffer);
3101
3102 for (uint32_t region = 0; region < pCopyBufferInfos->regionCount; region++) {
3103 const auto &copy_region = pCopyBufferInfos->pRegions[region];
3104 if (src_buffer) {
3105 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003106 auto hazard = context->DetectHazard(*src_buffer, SYNC_COPY_TRANSFER_READ, src_range);
Jeff Leger178b1e52020-10-05 12:22:23 -04003107 if (hazard.hazard) {
3108 // TODO -- add tag information to log msg when useful.
3109 skip |= LogError(pCopyBufferInfos->srcBuffer, string_SyncHazardVUID(hazard.hazard),
3110 "vkCmdCopyBuffer2KHR(): Hazard %s for srcBuffer %s, region %" PRIu32 ". Access info %s.",
3111 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyBufferInfos->srcBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07003112 region, cb_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04003113 }
3114 }
3115 if (dst_buffer && !skip) {
3116 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003117 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, dst_range);
Jeff Leger178b1e52020-10-05 12:22:23 -04003118 if (hazard.hazard) {
3119 skip |= LogError(pCopyBufferInfos->dstBuffer, string_SyncHazardVUID(hazard.hazard),
3120 "vkCmdCopyBuffer2KHR(): Hazard %s for dstBuffer %s, region %" PRIu32 ". Access info %s.",
3121 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyBufferInfos->dstBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07003122 region, cb_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04003123 }
3124 }
3125 if (skip) break;
3126 }
3127 return skip;
3128}
3129
3130void SyncValidator::PreCallRecordCmdCopyBuffer2KHR(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2KHR *pCopyBufferInfos) {
3131 auto *cb_context = GetAccessContext(commandBuffer);
3132 assert(cb_context);
3133 const auto tag = cb_context->NextCommandTag(CMD_COPYBUFFER2KHR);
3134 auto *context = cb_context->GetCurrentAccessContext();
3135
3136 const auto *src_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->srcBuffer);
3137 const auto *dst_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->dstBuffer);
3138
3139 for (uint32_t region = 0; region < pCopyBufferInfos->regionCount; region++) {
3140 const auto &copy_region = pCopyBufferInfos->pRegions[region];
3141 if (src_buffer) {
3142 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003143 context->UpdateAccessState(*src_buffer, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment, src_range, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04003144 }
3145 if (dst_buffer) {
3146 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003147 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, dst_range, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04003148 }
3149 }
3150}
3151
John Zulauf5c5e88d2019-12-26 11:22:02 -07003152bool SyncValidator::PreCallValidateCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
3153 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
3154 const VkImageCopy *pRegions) const {
3155 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -06003156 const auto *cb_access_context = GetAccessContext(commandBuffer);
3157 assert(cb_access_context);
3158 if (!cb_access_context) return skip;
John Zulauf5c5e88d2019-12-26 11:22:02 -07003159
John Zulauf3d84f1b2020-03-09 13:33:25 -06003160 const auto *context = cb_access_context->GetCurrentAccessContext();
3161 assert(context);
3162 if (!context) return skip;
3163
3164 const auto *src_image = Get<IMAGE_STATE>(srcImage);
3165 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003166 for (uint32_t region = 0; region < regionCount; region++) {
3167 const auto &copy_region = pRegions[region];
3168 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003169 auto hazard = context->DetectHazard(*src_image, SYNC_COPY_TRANSFER_READ, copy_region.srcSubresource,
John Zulauf3d84f1b2020-03-09 13:33:25 -06003170 copy_region.srcOffset, copy_region.extent);
3171 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003172 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003173 "vkCmdCopyImage: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06003174 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003175 cb_access_context->FormatUsage(hazard).c_str());
John Zulauf5c5e88d2019-12-26 11:22:02 -07003176 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06003177 }
3178
3179 if (dst_image) {
locke-lunarg1df1f882020-03-02 16:42:08 -07003180 VkExtent3D dst_copy_extent =
3181 GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003182 auto hazard = context->DetectHazard(*dst_image, SYNC_COPY_TRANSFER_WRITE, copy_region.dstSubresource,
locke-lunarg1df1f882020-03-02 16:42:08 -07003183 copy_region.dstOffset, dst_copy_extent);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003184 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003185 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003186 "vkCmdCopyImage: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06003187 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003188 cb_access_context->FormatUsage(hazard).c_str());
John Zulauf5c5e88d2019-12-26 11:22:02 -07003189 }
locke-lunarg1dbbb9e2020-02-28 22:43:53 -07003190 if (skip) break;
John Zulauf5c5e88d2019-12-26 11:22:02 -07003191 }
3192 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06003193
John Zulauf5c5e88d2019-12-26 11:22:02 -07003194 return skip;
3195}
3196
3197void SyncValidator::PreCallRecordCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
3198 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
3199 const VkImageCopy *pRegions) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003200 auto *cb_access_context = GetAccessContext(commandBuffer);
3201 assert(cb_access_context);
John Zulauf2b151bf2020-04-24 15:37:44 -06003202 const auto tag = cb_access_context->NextCommandTag(CMD_COPYIMAGE);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003203 auto *context = cb_access_context->GetCurrentAccessContext();
3204 assert(context);
3205
John Zulauf5c5e88d2019-12-26 11:22:02 -07003206 auto *src_image = Get<IMAGE_STATE>(srcImage);
John Zulauf5c5e88d2019-12-26 11:22:02 -07003207 auto *dst_image = Get<IMAGE_STATE>(dstImage);
John Zulauf5c5e88d2019-12-26 11:22:02 -07003208
3209 for (uint32_t region = 0; region < regionCount; region++) {
3210 const auto &copy_region = pRegions[region];
John Zulauf3d84f1b2020-03-09 13:33:25 -06003211 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003212 context->UpdateAccessState(*src_image, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07003213 copy_region.srcSubresource, copy_region.srcOffset, copy_region.extent, tag);
John Zulauf5c5e88d2019-12-26 11:22:02 -07003214 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06003215 if (dst_image) {
locke-lunarg1df1f882020-03-02 16:42:08 -07003216 VkExtent3D dst_copy_extent =
3217 GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003218 context->UpdateAccessState(*dst_image, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07003219 copy_region.dstSubresource, copy_region.dstOffset, dst_copy_extent, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003220 }
3221 }
3222}
3223
Jeff Leger178b1e52020-10-05 12:22:23 -04003224bool SyncValidator::PreCallValidateCmdCopyImage2KHR(VkCommandBuffer commandBuffer,
3225 const VkCopyImageInfo2KHR *pCopyImageInfo) const {
3226 bool skip = false;
3227 const auto *cb_access_context = GetAccessContext(commandBuffer);
3228 assert(cb_access_context);
3229 if (!cb_access_context) return skip;
3230
3231 const auto *context = cb_access_context->GetCurrentAccessContext();
3232 assert(context);
3233 if (!context) return skip;
3234
3235 const auto *src_image = Get<IMAGE_STATE>(pCopyImageInfo->srcImage);
3236 const auto *dst_image = Get<IMAGE_STATE>(pCopyImageInfo->dstImage);
3237 for (uint32_t region = 0; region < pCopyImageInfo->regionCount; region++) {
3238 const auto &copy_region = pCopyImageInfo->pRegions[region];
3239 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003240 auto hazard = context->DetectHazard(*src_image, SYNC_COPY_TRANSFER_READ, copy_region.srcSubresource,
Jeff Leger178b1e52020-10-05 12:22:23 -04003241 copy_region.srcOffset, copy_region.extent);
3242 if (hazard.hazard) {
3243 skip |= LogError(pCopyImageInfo->srcImage, string_SyncHazardVUID(hazard.hazard),
3244 "vkCmdCopyImage2KHR: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.",
3245 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyImageInfo->srcImage).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07003246 region, cb_access_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04003247 }
3248 }
3249
3250 if (dst_image) {
3251 VkExtent3D dst_copy_extent =
3252 GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003253 auto hazard = context->DetectHazard(*dst_image, SYNC_COPY_TRANSFER_WRITE, copy_region.dstSubresource,
Jeff Leger178b1e52020-10-05 12:22:23 -04003254 copy_region.dstOffset, dst_copy_extent);
3255 if (hazard.hazard) {
3256 skip |= LogError(pCopyImageInfo->dstImage, string_SyncHazardVUID(hazard.hazard),
3257 "vkCmdCopyImage2KHR: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.",
3258 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyImageInfo->dstImage).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07003259 region, cb_access_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04003260 }
3261 if (skip) break;
3262 }
3263 }
3264
3265 return skip;
3266}
3267
3268void SyncValidator::PreCallRecordCmdCopyImage2KHR(VkCommandBuffer commandBuffer, const VkCopyImageInfo2KHR *pCopyImageInfo) {
3269 auto *cb_access_context = GetAccessContext(commandBuffer);
3270 assert(cb_access_context);
3271 const auto tag = cb_access_context->NextCommandTag(CMD_COPYIMAGE2KHR);
3272 auto *context = cb_access_context->GetCurrentAccessContext();
3273 assert(context);
3274
3275 auto *src_image = Get<IMAGE_STATE>(pCopyImageInfo->srcImage);
3276 auto *dst_image = Get<IMAGE_STATE>(pCopyImageInfo->dstImage);
3277
3278 for (uint32_t region = 0; region < pCopyImageInfo->regionCount; region++) {
3279 const auto &copy_region = pCopyImageInfo->pRegions[region];
3280 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003281 context->UpdateAccessState(*src_image, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07003282 copy_region.srcSubresource, copy_region.srcOffset, copy_region.extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04003283 }
3284 if (dst_image) {
3285 VkExtent3D dst_copy_extent =
3286 GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003287 context->UpdateAccessState(*dst_image, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07003288 copy_region.dstSubresource, copy_region.dstOffset, dst_copy_extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04003289 }
3290 }
3291}
3292
John Zulauf9cb530d2019-09-30 14:14:10 -06003293bool SyncValidator::PreCallValidateCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
3294 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
3295 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
3296 uint32_t bufferMemoryBarrierCount,
3297 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
3298 uint32_t imageMemoryBarrierCount,
3299 const VkImageMemoryBarrier *pImageMemoryBarriers) const {
3300 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -06003301 const auto *cb_access_context = GetAccessContext(commandBuffer);
3302 assert(cb_access_context);
3303 if (!cb_access_context) return skip;
John Zulauf0cb5be22020-01-23 12:18:22 -07003304
John Zulauf36ef9282021-02-02 11:47:24 -07003305 SyncOpPipelineBarrier pipeline_barrier(CMD_PIPELINEBARRIER, *this, cb_access_context->GetQueueFlags(), srcStageMask,
3306 dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers,
3307 bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
3308 pImageMemoryBarriers);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07003309 skip = pipeline_barrier.Validate(*cb_access_context);
John Zulauf9cb530d2019-09-30 14:14:10 -06003310 return skip;
3311}
3312
3313void SyncValidator::PreCallRecordCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
3314 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
3315 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
3316 uint32_t bufferMemoryBarrierCount,
3317 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
3318 uint32_t imageMemoryBarrierCount,
3319 const VkImageMemoryBarrier *pImageMemoryBarriers) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003320 auto *cb_access_context = GetAccessContext(commandBuffer);
3321 assert(cb_access_context);
3322 if (!cb_access_context) return;
John Zulauf9cb530d2019-09-30 14:14:10 -06003323
John Zulauf36ef9282021-02-02 11:47:24 -07003324 SyncOpPipelineBarrier pipeline_barrier(CMD_PIPELINEBARRIER, *this, cb_access_context->GetQueueFlags(), srcStageMask,
3325 dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers,
3326 bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
3327 pImageMemoryBarriers);
3328 pipeline_barrier.Record(cb_access_context);
John Zulauf9cb530d2019-09-30 14:14:10 -06003329}
3330
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07003331bool SyncValidator::PreCallValidateCmdPipelineBarrier2KHR(VkCommandBuffer commandBuffer,
3332 const VkDependencyInfoKHR *pDependencyInfo) const {
3333 bool skip = false;
3334 const auto *cb_access_context = GetAccessContext(commandBuffer);
3335 assert(cb_access_context);
3336 if (!cb_access_context) return skip;
3337
3338 SyncOpPipelineBarrier pipeline_barrier(CMD_PIPELINEBARRIER2KHR, *this, cb_access_context->GetQueueFlags(), *pDependencyInfo);
3339 skip = pipeline_barrier.Validate(*cb_access_context);
3340 return skip;
3341}
3342
3343void SyncValidator::PreCallRecordCmdPipelineBarrier2KHR(VkCommandBuffer commandBuffer, const VkDependencyInfoKHR *pDependencyInfo) {
3344 auto *cb_access_context = GetAccessContext(commandBuffer);
3345 assert(cb_access_context);
3346 if (!cb_access_context) return;
3347
3348 SyncOpPipelineBarrier pipeline_barrier(CMD_PIPELINEBARRIER2KHR, *this, cb_access_context->GetQueueFlags(), *pDependencyInfo);
3349 pipeline_barrier.Record(cb_access_context);
3350}
3351
John Zulauf9cb530d2019-09-30 14:14:10 -06003352void SyncValidator::PostCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
3353 const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, VkResult result) {
3354 // The state tracker sets up the device state
3355 StateTracker::PostCallRecordCreateDevice(gpu, pCreateInfo, pAllocator, pDevice, result);
3356
John Zulauf5f13a792020-03-10 07:31:21 -06003357 // Add the callback hooks for the functions that are either broadly or deeply used and that the ValidationStateTracker
3358 // refactor would be messier without.
John Zulauf9cb530d2019-09-30 14:14:10 -06003359 // TODO: Find a good way to do this hooklessly.
3360 ValidationObject *device_object = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
3361 ValidationObject *validation_data = GetValidationObject(device_object->object_dispatch, LayerObjectTypeSyncValidation);
3362 SyncValidator *sync_device_state = static_cast<SyncValidator *>(validation_data);
3363
John Zulaufd1f85d42020-04-15 12:23:15 -06003364 sync_device_state->SetCommandBufferResetCallback([sync_device_state](VkCommandBuffer command_buffer) -> void {
3365 sync_device_state->ResetCommandBufferCallback(command_buffer);
3366 });
3367 sync_device_state->SetCommandBufferFreeCallback([sync_device_state](VkCommandBuffer command_buffer) -> void {
3368 sync_device_state->FreeCommandBufferCallback(command_buffer);
3369 });
John Zulauf9cb530d2019-09-30 14:14:10 -06003370}
John Zulauf3d84f1b2020-03-09 13:33:25 -06003371
John Zulauf355e49b2020-04-24 15:11:15 -06003372bool SyncValidator::ValidateBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
John Zulauf64ffe552021-02-06 10:25:07 -07003373 const VkSubpassBeginInfo *pSubpassBeginInfo, CMD_TYPE cmd, const char *cmd_name) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003374 bool skip = false;
John Zulauf355e49b2020-04-24 15:11:15 -06003375 auto cb_context = GetAccessContext(commandBuffer);
John Zulauf64ffe552021-02-06 10:25:07 -07003376 if (cb_context) {
3377 SyncOpBeginRenderPass sync_op(cmd, *this, pRenderPassBegin, pSubpassBeginInfo, cmd_name);
3378 skip = sync_op.Validate(*cb_context);
John Zulauf355e49b2020-04-24 15:11:15 -06003379 }
John Zulauf355e49b2020-04-24 15:11:15 -06003380 return skip;
3381}
3382
3383bool SyncValidator::PreCallValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
3384 VkSubpassContents contents) const {
3385 bool skip = StateTracker::PreCallValidateCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07003386 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf355e49b2020-04-24 15:11:15 -06003387 subpass_begin_info.contents = contents;
John Zulauf64ffe552021-02-06 10:25:07 -07003388 skip |= ValidateBeginRenderPass(commandBuffer, pRenderPassBegin, &subpass_begin_info, CMD_BEGINRENDERPASS);
John Zulauf355e49b2020-04-24 15:11:15 -06003389 return skip;
3390}
3391
3392bool SyncValidator::PreCallValidateCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
Mike Schuchardt2df08912020-12-15 16:28:09 -08003393 const VkSubpassBeginInfo *pSubpassBeginInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003394 bool skip = StateTracker::PreCallValidateCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07003395 skip |= ValidateBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2);
John Zulauf355e49b2020-04-24 15:11:15 -06003396 return skip;
3397}
3398
John Zulauf64ffe552021-02-06 10:25:07 -07003399static const char *kBeginRenderPass2KhrName = "vkCmdBeginRenderPass2KHR";
John Zulauf355e49b2020-04-24 15:11:15 -06003400bool SyncValidator::PreCallValidateCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
3401 const VkRenderPassBeginInfo *pRenderPassBegin,
Mike Schuchardt2df08912020-12-15 16:28:09 -08003402 const VkSubpassBeginInfo *pSubpassBeginInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003403 bool skip = StateTracker::PreCallValidateCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07003404 skip |=
3405 ValidateBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2, kBeginRenderPass2KhrName);
John Zulauf355e49b2020-04-24 15:11:15 -06003406 return skip;
3407}
3408
John Zulauf3d84f1b2020-03-09 13:33:25 -06003409void SyncValidator::PostCallRecordBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo,
3410 VkResult result) {
3411 // The state tracker sets up the command buffer state
3412 StateTracker::PostCallRecordBeginCommandBuffer(commandBuffer, pBeginInfo, result);
3413
3414 // Create/initialize the structure that trackers accesses at the command buffer scope.
3415 auto cb_access_context = GetAccessContext(commandBuffer);
3416 assert(cb_access_context);
3417 cb_access_context->Reset();
3418}
3419
3420void SyncValidator::RecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
John Zulauf64ffe552021-02-06 10:25:07 -07003421 const VkSubpassBeginInfo *pSubpassBeginInfo, CMD_TYPE cmd, const char *cmd_name) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003422 auto cb_context = GetAccessContext(commandBuffer);
John Zulauf355e49b2020-04-24 15:11:15 -06003423 if (cb_context) {
John Zulauf64ffe552021-02-06 10:25:07 -07003424 SyncOpBeginRenderPass sync_op(cmd, *this, pRenderPassBegin, pSubpassBeginInfo, cmd_name);
3425 sync_op.Record(cb_context);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003426 }
3427}
3428
3429void SyncValidator::PostCallRecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
3430 VkSubpassContents contents) {
3431 StateTracker::PostCallRecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07003432 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf3d84f1b2020-03-09 13:33:25 -06003433 subpass_begin_info.contents = contents;
John Zulauf355e49b2020-04-24 15:11:15 -06003434 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, &subpass_begin_info, CMD_BEGINRENDERPASS);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003435}
3436
3437void SyncValidator::PostCallRecordCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
3438 const VkSubpassBeginInfo *pSubpassBeginInfo) {
3439 StateTracker::PostCallRecordCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
John Zulauf355e49b2020-04-24 15:11:15 -06003440 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003441}
3442
3443void SyncValidator::PostCallRecordCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
3444 const VkRenderPassBeginInfo *pRenderPassBegin,
3445 const VkSubpassBeginInfo *pSubpassBeginInfo) {
3446 StateTracker::PostCallRecordCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07003447 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2, kBeginRenderPass2KhrName);
John Zulauf355e49b2020-04-24 15:11:15 -06003448}
3449
Mike Schuchardt2df08912020-12-15 16:28:09 -08003450bool SyncValidator::ValidateCmdNextSubpass(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
John Zulauf64ffe552021-02-06 10:25:07 -07003451 const VkSubpassEndInfo *pSubpassEndInfo, CMD_TYPE cmd, const char *cmd_name) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003452 bool skip = false;
3453
3454 auto cb_context = GetAccessContext(commandBuffer);
3455 assert(cb_context);
John Zulauf64ffe552021-02-06 10:25:07 -07003456 if (!cb_context) return skip;
3457 SyncOpNextSubpass sync_op(cmd, *this, pSubpassBeginInfo, pSubpassEndInfo, cmd_name);
3458 return sync_op.Validate(*cb_context);
John Zulauf355e49b2020-04-24 15:11:15 -06003459}
3460
3461bool SyncValidator::PreCallValidateCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) const {
3462 bool skip = StateTracker::PreCallValidateCmdNextSubpass(commandBuffer, contents);
John Zulauf64ffe552021-02-06 10:25:07 -07003463 // Convert to a NextSubpass2
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07003464 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf355e49b2020-04-24 15:11:15 -06003465 subpass_begin_info.contents = contents;
John Zulauf64ffe552021-02-06 10:25:07 -07003466 auto subpass_end_info = LvlInitStruct<VkSubpassEndInfo>();
3467 skip |= ValidateCmdNextSubpass(commandBuffer, &subpass_begin_info, &subpass_end_info, CMD_NEXTSUBPASS);
John Zulauf355e49b2020-04-24 15:11:15 -06003468 return skip;
3469}
3470
John Zulauf64ffe552021-02-06 10:25:07 -07003471static const char *kNextSubpass2KhrName = "vkCmdNextSubpass2KHR";
Mike Schuchardt2df08912020-12-15 16:28:09 -08003472bool SyncValidator::PreCallValidateCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
3473 const VkSubpassEndInfo *pSubpassEndInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003474 bool skip = StateTracker::PreCallValidateCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07003475 skip |= ValidateCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2, kNextSubpass2KhrName);
John Zulauf355e49b2020-04-24 15:11:15 -06003476 return skip;
3477}
3478
3479bool SyncValidator::PreCallValidateCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
3480 const VkSubpassEndInfo *pSubpassEndInfo) const {
3481 bool skip = StateTracker::PreCallValidateCmdNextSubpass2(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07003482 skip |= ValidateCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2);
John Zulauf355e49b2020-04-24 15:11:15 -06003483 return skip;
John Zulauf3d84f1b2020-03-09 13:33:25 -06003484}
3485
3486void SyncValidator::RecordCmdNextSubpass(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
John Zulauf64ffe552021-02-06 10:25:07 -07003487 const VkSubpassEndInfo *pSubpassEndInfo, CMD_TYPE cmd, const char *cmd_name) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003488 auto cb_context = GetAccessContext(commandBuffer);
3489 assert(cb_context);
John Zulauf64ffe552021-02-06 10:25:07 -07003490 if (!cb_context) return;
John Zulauf3d84f1b2020-03-09 13:33:25 -06003491
John Zulauf64ffe552021-02-06 10:25:07 -07003492 SyncOpNextSubpass sync_op(cmd, *this, pSubpassBeginInfo, pSubpassEndInfo, cmd_name);
3493 sync_op.Record(cb_context);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003494}
3495
3496void SyncValidator::PostCallRecordCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
3497 StateTracker::PostCallRecordCmdNextSubpass(commandBuffer, contents);
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07003498 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf3d84f1b2020-03-09 13:33:25 -06003499 subpass_begin_info.contents = contents;
John Zulauf355e49b2020-04-24 15:11:15 -06003500 RecordCmdNextSubpass(commandBuffer, &subpass_begin_info, nullptr, CMD_NEXTSUBPASS);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003501}
3502
3503void SyncValidator::PostCallRecordCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
3504 const VkSubpassEndInfo *pSubpassEndInfo) {
3505 StateTracker::PostCallRecordCmdNextSubpass2(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
John Zulauf355e49b2020-04-24 15:11:15 -06003506 RecordCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003507}
3508
3509void SyncValidator::PostCallRecordCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
3510 const VkSubpassEndInfo *pSubpassEndInfo) {
3511 StateTracker::PostCallRecordCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07003512 RecordCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2, kNextSubpass2KhrName);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003513}
3514
John Zulauf64ffe552021-02-06 10:25:07 -07003515bool SyncValidator::ValidateCmdEndRenderPass(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo, CMD_TYPE cmd,
3516 const char *cmd_name) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003517 bool skip = false;
3518
3519 auto cb_context = GetAccessContext(commandBuffer);
3520 assert(cb_context);
John Zulauf64ffe552021-02-06 10:25:07 -07003521 if (!cb_context) return skip;
John Zulauf355e49b2020-04-24 15:11:15 -06003522
John Zulauf64ffe552021-02-06 10:25:07 -07003523 SyncOpEndRenderPass sync_op(cmd, *this, pSubpassEndInfo, cmd_name);
3524 skip |= sync_op.Validate(*cb_context);
John Zulauf355e49b2020-04-24 15:11:15 -06003525 return skip;
3526}
3527
3528bool SyncValidator::PreCallValidateCmdEndRenderPass(VkCommandBuffer commandBuffer) const {
3529 bool skip = StateTracker::PreCallValidateCmdEndRenderPass(commandBuffer);
John Zulauf64ffe552021-02-06 10:25:07 -07003530 skip |= ValidateCmdEndRenderPass(commandBuffer, nullptr, CMD_ENDRENDERPASS);
John Zulauf355e49b2020-04-24 15:11:15 -06003531 return skip;
3532}
3533
Mike Schuchardt2df08912020-12-15 16:28:09 -08003534bool SyncValidator::PreCallValidateCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003535 bool skip = StateTracker::PreCallValidateCmdEndRenderPass2(commandBuffer, pSubpassEndInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07003536 skip |= ValidateCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2);
John Zulauf355e49b2020-04-24 15:11:15 -06003537 return skip;
3538}
3539
John Zulauf64ffe552021-02-06 10:25:07 -07003540const static char *kEndRenderPass2KhrName = "vkEndRenderPass2KHR";
John Zulauf355e49b2020-04-24 15:11:15 -06003541bool SyncValidator::PreCallValidateCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer,
Mike Schuchardt2df08912020-12-15 16:28:09 -08003542 const VkSubpassEndInfo *pSubpassEndInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003543 bool skip = StateTracker::PreCallValidateCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07003544 skip |= ValidateCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2, kEndRenderPass2KhrName);
John Zulauf355e49b2020-04-24 15:11:15 -06003545 return skip;
3546}
3547
John Zulauf64ffe552021-02-06 10:25:07 -07003548void SyncValidator::RecordCmdEndRenderPass(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo, CMD_TYPE cmd,
3549 const char *cmd_name) {
John Zulaufe5da6e52020-03-18 15:32:18 -06003550 // Resolve the all subpass contexts to the command buffer contexts
3551 auto cb_context = GetAccessContext(commandBuffer);
3552 assert(cb_context);
John Zulauf64ffe552021-02-06 10:25:07 -07003553 if (!cb_context) return;
John Zulaufe5da6e52020-03-18 15:32:18 -06003554
John Zulauf64ffe552021-02-06 10:25:07 -07003555 SyncOpEndRenderPass sync_op(cmd, *this, pSubpassEndInfo, cmd_name);
3556 sync_op.Record(cb_context);
3557 return;
John Zulaufe5da6e52020-03-18 15:32:18 -06003558}
John Zulauf3d84f1b2020-03-09 13:33:25 -06003559
John Zulauf33fc1d52020-07-17 11:01:10 -06003560// Simple heuristic rule to detect WAW operations representing algorithmically safe or increment
3561// updates to a resource which do not conflict at the byte level.
3562// TODO: Revisit this rule to see if it needs to be tighter or looser
3563// TODO: Add programatic control over suppression heuristics
3564bool SyncValidator::SupressedBoundDescriptorWAW(const HazardResult &hazard) const {
3565 return (hazard.hazard == WRITE_AFTER_WRITE) && (FlagBit(hazard.usage_index) == hazard.prior_access);
3566}
3567
John Zulauf3d84f1b2020-03-09 13:33:25 -06003568void SyncValidator::PostCallRecordCmdEndRenderPass(VkCommandBuffer commandBuffer) {
John Zulauf355e49b2020-04-24 15:11:15 -06003569 RecordCmdEndRenderPass(commandBuffer, nullptr, CMD_ENDRENDERPASS);
John Zulauf5a1a5382020-06-22 17:23:25 -06003570 StateTracker::PostCallRecordCmdEndRenderPass(commandBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003571}
3572
3573void SyncValidator::PostCallRecordCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
John Zulauf355e49b2020-04-24 15:11:15 -06003574 RecordCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2);
John Zulauf5a1a5382020-06-22 17:23:25 -06003575 StateTracker::PostCallRecordCmdEndRenderPass2(commandBuffer, pSubpassEndInfo);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003576}
3577
3578void SyncValidator::PostCallRecordCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
John Zulauf64ffe552021-02-06 10:25:07 -07003579 RecordCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2, kEndRenderPass2KhrName);
John Zulauf5a1a5382020-06-22 17:23:25 -06003580 StateTracker::PostCallRecordCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003581}
locke-lunarga19c71d2020-03-02 18:17:04 -07003582
Jeff Leger178b1e52020-10-05 12:22:23 -04003583template <typename BufferImageCopyRegionType>
3584bool SyncValidator::ValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
3585 VkImageLayout dstImageLayout, uint32_t regionCount,
3586 const BufferImageCopyRegionType *pRegions, CopyCommandVersion version) const {
locke-lunarga19c71d2020-03-02 18:17:04 -07003587 bool skip = false;
3588 const auto *cb_access_context = GetAccessContext(commandBuffer);
3589 assert(cb_access_context);
3590 if (!cb_access_context) return skip;
3591
Jeff Leger178b1e52020-10-05 12:22:23 -04003592 const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
3593 const char *func_name = is_2khr ? "vkCmdCopyBufferToImage2KHR()" : "vkCmdCopyBufferToImage()";
3594
locke-lunarga19c71d2020-03-02 18:17:04 -07003595 const auto *context = cb_access_context->GetCurrentAccessContext();
3596 assert(context);
3597 if (!context) return skip;
3598
3599 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
locke-lunarga19c71d2020-03-02 18:17:04 -07003600 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
3601
3602 for (uint32_t region = 0; region < regionCount; region++) {
3603 const auto &copy_region = pRegions[region];
John Zulauf477700e2021-01-06 11:41:49 -07003604 HazardResult hazard;
locke-lunarga19c71d2020-03-02 18:17:04 -07003605 if (dst_image) {
John Zulauf477700e2021-01-06 11:41:49 -07003606 if (src_buffer) {
3607 ResourceAccessRange src_range =
3608 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, dst_image->createInfo.format));
Jeremy Gebben40a22942020-12-22 14:22:06 -07003609 hazard = context->DetectHazard(*src_buffer, SYNC_COPY_TRANSFER_READ, src_range);
John Zulauf477700e2021-01-06 11:41:49 -07003610 if (hazard.hazard) {
3611 // PHASE1 TODO -- add tag information to log msg when useful.
3612 skip |= LogError(srcBuffer, string_SyncHazardVUID(hazard.hazard),
3613 "%s: Hazard %s for srcBuffer %s, region %" PRIu32 ". Access info %s.", func_name,
3614 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcBuffer).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003615 cb_access_context->FormatUsage(hazard).c_str());
John Zulauf477700e2021-01-06 11:41:49 -07003616 }
3617 }
3618
Jeremy Gebben40a22942020-12-22 14:22:06 -07003619 hazard = context->DetectHazard(*dst_image, SYNC_COPY_TRANSFER_WRITE, copy_region.imageSubresource,
John Zulauf477700e2021-01-06 11:41:49 -07003620 copy_region.imageOffset, copy_region.imageExtent);
locke-lunarga19c71d2020-03-02 18:17:04 -07003621 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003622 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
Jeff Leger178b1e52020-10-05 12:22:23 -04003623 "%s: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.", func_name,
John Zulauf1dae9192020-06-16 15:46:44 -06003624 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003625 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07003626 }
3627 if (skip) break;
3628 }
3629 if (skip) break;
3630 }
3631 return skip;
3632}
3633
Jeff Leger178b1e52020-10-05 12:22:23 -04003634bool SyncValidator::PreCallValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
3635 VkImageLayout dstImageLayout, uint32_t regionCount,
3636 const VkBufferImageCopy *pRegions) const {
3637 return ValidateCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions,
3638 COPY_COMMAND_VERSION_1);
3639}
3640
3641bool SyncValidator::PreCallValidateCmdCopyBufferToImage2KHR(VkCommandBuffer commandBuffer,
3642 const VkCopyBufferToImageInfo2KHR *pCopyBufferToImageInfo) const {
3643 return ValidateCmdCopyBufferToImage(commandBuffer, pCopyBufferToImageInfo->srcBuffer, pCopyBufferToImageInfo->dstImage,
3644 pCopyBufferToImageInfo->dstImageLayout, pCopyBufferToImageInfo->regionCount,
3645 pCopyBufferToImageInfo->pRegions, COPY_COMMAND_VERSION_2);
3646}
3647
3648template <typename BufferImageCopyRegionType>
3649void SyncValidator::RecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
3650 VkImageLayout dstImageLayout, uint32_t regionCount,
3651 const BufferImageCopyRegionType *pRegions, CopyCommandVersion version) {
locke-lunarga19c71d2020-03-02 18:17:04 -07003652 auto *cb_access_context = GetAccessContext(commandBuffer);
3653 assert(cb_access_context);
Jeff Leger178b1e52020-10-05 12:22:23 -04003654
3655 const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
3656 const CMD_TYPE cmd_type = is_2khr ? CMD_COPYBUFFERTOIMAGE2KHR : CMD_COPYBUFFERTOIMAGE;
3657
3658 const auto tag = cb_access_context->NextCommandTag(cmd_type);
locke-lunarga19c71d2020-03-02 18:17:04 -07003659 auto *context = cb_access_context->GetCurrentAccessContext();
3660 assert(context);
3661
3662 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
John Zulauf16adfc92020-04-08 10:28:33 -06003663 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07003664
3665 for (uint32_t region = 0; region < regionCount; region++) {
3666 const auto &copy_region = pRegions[region];
locke-lunarga19c71d2020-03-02 18:17:04 -07003667 if (dst_image) {
John Zulauf477700e2021-01-06 11:41:49 -07003668 if (src_buffer) {
3669 ResourceAccessRange src_range =
3670 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, dst_image->createInfo.format));
Jeremy Gebben40a22942020-12-22 14:22:06 -07003671 context->UpdateAccessState(*src_buffer, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment, src_range, tag);
John Zulauf477700e2021-01-06 11:41:49 -07003672 }
Jeremy Gebben40a22942020-12-22 14:22:06 -07003673 context->UpdateAccessState(*dst_image, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07003674 copy_region.imageSubresource, copy_region.imageOffset, copy_region.imageExtent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07003675 }
3676 }
3677}
3678
Jeff Leger178b1e52020-10-05 12:22:23 -04003679void SyncValidator::PreCallRecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
3680 VkImageLayout dstImageLayout, uint32_t regionCount,
3681 const VkBufferImageCopy *pRegions) {
3682 StateTracker::PreCallRecordCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
3683 RecordCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions, COPY_COMMAND_VERSION_1);
3684}
3685
3686void SyncValidator::PreCallRecordCmdCopyBufferToImage2KHR(VkCommandBuffer commandBuffer,
3687 const VkCopyBufferToImageInfo2KHR *pCopyBufferToImageInfo) {
3688 StateTracker::PreCallRecordCmdCopyBufferToImage2KHR(commandBuffer, pCopyBufferToImageInfo);
3689 RecordCmdCopyBufferToImage(commandBuffer, pCopyBufferToImageInfo->srcBuffer, pCopyBufferToImageInfo->dstImage,
3690 pCopyBufferToImageInfo->dstImageLayout, pCopyBufferToImageInfo->regionCount,
3691 pCopyBufferToImageInfo->pRegions, COPY_COMMAND_VERSION_2);
3692}
3693
3694template <typename BufferImageCopyRegionType>
3695bool SyncValidator::ValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
3696 VkBuffer dstBuffer, uint32_t regionCount,
3697 const BufferImageCopyRegionType *pRegions, CopyCommandVersion version) const {
locke-lunarga19c71d2020-03-02 18:17:04 -07003698 bool skip = false;
3699 const auto *cb_access_context = GetAccessContext(commandBuffer);
3700 assert(cb_access_context);
3701 if (!cb_access_context) return skip;
3702
Jeff Leger178b1e52020-10-05 12:22:23 -04003703 const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
3704 const char *func_name = is_2khr ? "vkCmdCopyImageToBuffer2KHR()" : "vkCmdCopyImageToBuffer()";
3705
locke-lunarga19c71d2020-03-02 18:17:04 -07003706 const auto *context = cb_access_context->GetCurrentAccessContext();
3707 assert(context);
3708 if (!context) return skip;
3709
3710 const auto *src_image = Get<IMAGE_STATE>(srcImage);
3711 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
3712 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
3713 for (uint32_t region = 0; region < regionCount; region++) {
3714 const auto &copy_region = pRegions[region];
3715 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003716 auto hazard = context->DetectHazard(*src_image, SYNC_COPY_TRANSFER_READ, copy_region.imageSubresource,
locke-lunarga19c71d2020-03-02 18:17:04 -07003717 copy_region.imageOffset, copy_region.imageExtent);
3718 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003719 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
Jeff Leger178b1e52020-10-05 12:22:23 -04003720 "%s: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.", func_name,
John Zulauf1dae9192020-06-16 15:46:44 -06003721 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003722 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07003723 }
John Zulauf477700e2021-01-06 11:41:49 -07003724 if (dst_mem) {
3725 ResourceAccessRange dst_range =
3726 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, src_image->createInfo.format));
Jeremy Gebben40a22942020-12-22 14:22:06 -07003727 hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, dst_range);
John Zulauf477700e2021-01-06 11:41:49 -07003728 if (hazard.hazard) {
3729 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
3730 "%s: Hazard %s for dstBuffer %s, region %" PRIu32 ". Access info %s.", func_name,
3731 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstBuffer).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003732 cb_access_context->FormatUsage(hazard).c_str());
John Zulauf477700e2021-01-06 11:41:49 -07003733 }
locke-lunarga19c71d2020-03-02 18:17:04 -07003734 }
3735 }
3736 if (skip) break;
3737 }
3738 return skip;
3739}
3740
Jeff Leger178b1e52020-10-05 12:22:23 -04003741bool SyncValidator::PreCallValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
3742 VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount,
3743 const VkBufferImageCopy *pRegions) const {
3744 return ValidateCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions,
3745 COPY_COMMAND_VERSION_1);
3746}
3747
3748bool SyncValidator::PreCallValidateCmdCopyImageToBuffer2KHR(VkCommandBuffer commandBuffer,
3749 const VkCopyImageToBufferInfo2KHR *pCopyImageToBufferInfo) const {
3750 return ValidateCmdCopyImageToBuffer(commandBuffer, pCopyImageToBufferInfo->srcImage, pCopyImageToBufferInfo->srcImageLayout,
3751 pCopyImageToBufferInfo->dstBuffer, pCopyImageToBufferInfo->regionCount,
3752 pCopyImageToBufferInfo->pRegions, COPY_COMMAND_VERSION_2);
3753}
3754
3755template <typename BufferImageCopyRegionType>
3756void SyncValidator::RecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
3757 VkBuffer dstBuffer, uint32_t regionCount, const BufferImageCopyRegionType *pRegions,
3758 CopyCommandVersion version) {
locke-lunarga19c71d2020-03-02 18:17:04 -07003759 auto *cb_access_context = GetAccessContext(commandBuffer);
3760 assert(cb_access_context);
Jeff Leger178b1e52020-10-05 12:22:23 -04003761
3762 const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
3763 const CMD_TYPE cmd_type = is_2khr ? CMD_COPYIMAGETOBUFFER2KHR : CMD_COPYIMAGETOBUFFER;
3764
3765 const auto tag = cb_access_context->NextCommandTag(cmd_type);
locke-lunarga19c71d2020-03-02 18:17:04 -07003766 auto *context = cb_access_context->GetCurrentAccessContext();
3767 assert(context);
3768
3769 const auto *src_image = Get<IMAGE_STATE>(srcImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07003770 auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
3771 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
John Zulauf5f13a792020-03-10 07:31:21 -06003772 const VulkanTypedHandle dst_handle(dst_mem, kVulkanObjectTypeDeviceMemory);
locke-lunarga19c71d2020-03-02 18:17:04 -07003773
3774 for (uint32_t region = 0; region < regionCount; region++) {
3775 const auto &copy_region = pRegions[region];
3776 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003777 context->UpdateAccessState(*src_image, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07003778 copy_region.imageSubresource, copy_region.imageOffset, copy_region.imageExtent, tag);
John Zulauf477700e2021-01-06 11:41:49 -07003779 if (dst_buffer) {
3780 ResourceAccessRange dst_range =
3781 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, src_image->createInfo.format));
Jeremy Gebben40a22942020-12-22 14:22:06 -07003782 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, dst_range, tag);
John Zulauf477700e2021-01-06 11:41:49 -07003783 }
locke-lunarga19c71d2020-03-02 18:17:04 -07003784 }
3785 }
3786}
3787
Jeff Leger178b1e52020-10-05 12:22:23 -04003788void SyncValidator::PreCallRecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
3789 VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
3790 StateTracker::PreCallRecordCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
3791 RecordCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions, COPY_COMMAND_VERSION_1);
3792}
3793
3794void SyncValidator::PreCallRecordCmdCopyImageToBuffer2KHR(VkCommandBuffer commandBuffer,
3795 const VkCopyImageToBufferInfo2KHR *pCopyImageToBufferInfo) {
3796 StateTracker::PreCallRecordCmdCopyImageToBuffer2KHR(commandBuffer, pCopyImageToBufferInfo);
3797 RecordCmdCopyImageToBuffer(commandBuffer, pCopyImageToBufferInfo->srcImage, pCopyImageToBufferInfo->srcImageLayout,
3798 pCopyImageToBufferInfo->dstBuffer, pCopyImageToBufferInfo->regionCount,
3799 pCopyImageToBufferInfo->pRegions, COPY_COMMAND_VERSION_2);
3800}
3801
3802template <typename RegionType>
3803bool SyncValidator::ValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
3804 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
3805 const RegionType *pRegions, VkFilter filter, const char *apiName) const {
locke-lunarga19c71d2020-03-02 18:17:04 -07003806 bool skip = false;
3807 const auto *cb_access_context = GetAccessContext(commandBuffer);
3808 assert(cb_access_context);
3809 if (!cb_access_context) return skip;
3810
3811 const auto *context = cb_access_context->GetCurrentAccessContext();
3812 assert(context);
3813 if (!context) return skip;
3814
3815 const auto *src_image = Get<IMAGE_STATE>(srcImage);
3816 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
3817
3818 for (uint32_t region = 0; region < regionCount; region++) {
3819 const auto &blit_region = pRegions[region];
3820 if (src_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06003821 VkOffset3D offset = {std::min(blit_region.srcOffsets[0].x, blit_region.srcOffsets[1].x),
3822 std::min(blit_region.srcOffsets[0].y, blit_region.srcOffsets[1].y),
3823 std::min(blit_region.srcOffsets[0].z, blit_region.srcOffsets[1].z)};
3824 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.srcOffsets[1].x - blit_region.srcOffsets[0].x)),
3825 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].y - blit_region.srcOffsets[0].y)),
3826 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].z - blit_region.srcOffsets[0].z))};
Jeremy Gebben40a22942020-12-22 14:22:06 -07003827 auto hazard = context->DetectHazard(*src_image, SYNC_BLIT_TRANSFER_READ, blit_region.srcSubresource, offset, extent);
locke-lunarga19c71d2020-03-02 18:17:04 -07003828 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003829 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
Jeff Leger178b1e52020-10-05 12:22:23 -04003830 "%s: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.", apiName,
John Zulauf1dae9192020-06-16 15:46:44 -06003831 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003832 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07003833 }
3834 }
3835
3836 if (dst_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06003837 VkOffset3D offset = {std::min(blit_region.dstOffsets[0].x, blit_region.dstOffsets[1].x),
3838 std::min(blit_region.dstOffsets[0].y, blit_region.dstOffsets[1].y),
3839 std::min(blit_region.dstOffsets[0].z, blit_region.dstOffsets[1].z)};
3840 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.dstOffsets[1].x - blit_region.dstOffsets[0].x)),
3841 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].y - blit_region.dstOffsets[0].y)),
3842 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].z - blit_region.dstOffsets[0].z))};
Jeremy Gebben40a22942020-12-22 14:22:06 -07003843 auto hazard = context->DetectHazard(*dst_image, SYNC_BLIT_TRANSFER_WRITE, blit_region.dstSubresource, offset, extent);
locke-lunarga19c71d2020-03-02 18:17:04 -07003844 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003845 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
Jeff Leger178b1e52020-10-05 12:22:23 -04003846 "%s: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.", apiName,
John Zulauf1dae9192020-06-16 15:46:44 -06003847 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003848 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07003849 }
3850 if (skip) break;
3851 }
3852 }
3853
3854 return skip;
3855}
3856
Jeff Leger178b1e52020-10-05 12:22:23 -04003857bool SyncValidator::PreCallValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
3858 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
3859 const VkImageBlit *pRegions, VkFilter filter) const {
3860 return ValidateCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter,
3861 "vkCmdBlitImage");
3862}
3863
3864bool SyncValidator::PreCallValidateCmdBlitImage2KHR(VkCommandBuffer commandBuffer,
3865 const VkBlitImageInfo2KHR *pBlitImageInfo) const {
3866 return ValidateCmdBlitImage(commandBuffer, pBlitImageInfo->srcImage, pBlitImageInfo->srcImageLayout, pBlitImageInfo->dstImage,
3867 pBlitImageInfo->dstImageLayout, pBlitImageInfo->regionCount, pBlitImageInfo->pRegions,
3868 pBlitImageInfo->filter, "vkCmdBlitImage2KHR");
3869}
3870
3871template <typename RegionType>
3872void SyncValidator::RecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
3873 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
3874 const RegionType *pRegions, VkFilter filter, ResourceUsageTag tag) {
locke-lunarga19c71d2020-03-02 18:17:04 -07003875 auto *cb_access_context = GetAccessContext(commandBuffer);
3876 assert(cb_access_context);
3877 auto *context = cb_access_context->GetCurrentAccessContext();
3878 assert(context);
3879
3880 auto *src_image = Get<IMAGE_STATE>(srcImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07003881 auto *dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07003882
3883 for (uint32_t region = 0; region < regionCount; region++) {
3884 const auto &blit_region = pRegions[region];
3885 if (src_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06003886 VkOffset3D offset = {std::min(blit_region.srcOffsets[0].x, blit_region.srcOffsets[1].x),
3887 std::min(blit_region.srcOffsets[0].y, blit_region.srcOffsets[1].y),
3888 std::min(blit_region.srcOffsets[0].z, blit_region.srcOffsets[1].z)};
3889 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.srcOffsets[1].x - blit_region.srcOffsets[0].x)),
3890 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].y - blit_region.srcOffsets[0].y)),
3891 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].z - blit_region.srcOffsets[0].z))};
Jeremy Gebben40a22942020-12-22 14:22:06 -07003892 context->UpdateAccessState(*src_image, SYNC_BLIT_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07003893 blit_region.srcSubresource, offset, extent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07003894 }
3895 if (dst_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06003896 VkOffset3D offset = {std::min(blit_region.dstOffsets[0].x, blit_region.dstOffsets[1].x),
3897 std::min(blit_region.dstOffsets[0].y, blit_region.dstOffsets[1].y),
3898 std::min(blit_region.dstOffsets[0].z, blit_region.dstOffsets[1].z)};
3899 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.dstOffsets[1].x - blit_region.dstOffsets[0].x)),
3900 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].y - blit_region.dstOffsets[0].y)),
3901 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].z - blit_region.dstOffsets[0].z))};
Jeremy Gebben40a22942020-12-22 14:22:06 -07003902 context->UpdateAccessState(*dst_image, SYNC_BLIT_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07003903 blit_region.dstSubresource, offset, extent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07003904 }
3905 }
3906}
locke-lunarg36ba2592020-04-03 09:42:04 -06003907
Jeff Leger178b1e52020-10-05 12:22:23 -04003908void SyncValidator::PreCallRecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
3909 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
3910 const VkImageBlit *pRegions, VkFilter filter) {
3911 auto *cb_access_context = GetAccessContext(commandBuffer);
3912 assert(cb_access_context);
3913 const auto tag = cb_access_context->NextCommandTag(CMD_BLITIMAGE);
3914 StateTracker::PreCallRecordCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
3915 pRegions, filter);
3916 RecordCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter, tag);
3917}
3918
3919void SyncValidator::PreCallRecordCmdBlitImage2KHR(VkCommandBuffer commandBuffer, const VkBlitImageInfo2KHR *pBlitImageInfo) {
3920 StateTracker::PreCallRecordCmdBlitImage2KHR(commandBuffer, pBlitImageInfo);
3921 auto *cb_access_context = GetAccessContext(commandBuffer);
3922 assert(cb_access_context);
3923 const auto tag = cb_access_context->NextCommandTag(CMD_BLITIMAGE2KHR);
3924 RecordCmdBlitImage(commandBuffer, pBlitImageInfo->srcImage, pBlitImageInfo->srcImageLayout, pBlitImageInfo->dstImage,
3925 pBlitImageInfo->dstImageLayout, pBlitImageInfo->regionCount, pBlitImageInfo->pRegions,
3926 pBlitImageInfo->filter, tag);
3927}
3928
John Zulauffaea0ee2021-01-14 14:01:32 -07003929bool SyncValidator::ValidateIndirectBuffer(const CommandBufferAccessContext &cb_context, const AccessContext &context,
3930 VkCommandBuffer commandBuffer, const VkDeviceSize struct_size, const VkBuffer buffer,
3931 const VkDeviceSize offset, const uint32_t drawCount, const uint32_t stride,
3932 const char *function) const {
locke-lunargff255f92020-05-13 18:53:52 -06003933 bool skip = false;
3934 if (drawCount == 0) return skip;
3935
3936 const auto *buf_state = Get<BUFFER_STATE>(buffer);
3937 VkDeviceSize size = struct_size;
3938 if (drawCount == 1 || stride == size) {
3939 if (drawCount > 1) size *= drawCount;
John Zulauf3e86bf02020-09-12 10:47:57 -06003940 const ResourceAccessRange range = MakeRange(offset, size);
locke-lunargff255f92020-05-13 18:53:52 -06003941 auto hazard = context.DetectHazard(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, range);
3942 if (hazard.hazard) {
John Zulauf1dae9192020-06-16 15:46:44 -06003943 skip |= LogError(buf_state->buffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003944 "%s: Hazard %s for indirect %s in %s. Access info %s.", function, string_SyncHazard(hazard.hazard),
John Zulauf1dae9192020-06-16 15:46:44 -06003945 report_data->FormatHandle(buffer).c_str(), report_data->FormatHandle(commandBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07003946 cb_context.FormatUsage(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06003947 }
3948 } else {
3949 for (uint32_t i = 0; i < drawCount; ++i) {
John Zulauf3e86bf02020-09-12 10:47:57 -06003950 const ResourceAccessRange range = MakeRange(offset + i * stride, size);
locke-lunargff255f92020-05-13 18:53:52 -06003951 auto hazard = context.DetectHazard(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, range);
3952 if (hazard.hazard) {
John Zulauf1dae9192020-06-16 15:46:44 -06003953 skip |= LogError(buf_state->buffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003954 "%s: Hazard %s for indirect %s in %s. Access info %s.", function, string_SyncHazard(hazard.hazard),
3955 report_data->FormatHandle(buffer).c_str(), report_data->FormatHandle(commandBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07003956 cb_context.FormatUsage(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06003957 break;
3958 }
3959 }
3960 }
3961 return skip;
3962}
3963
locke-lunarg61870c22020-06-09 14:51:50 -06003964void SyncValidator::RecordIndirectBuffer(AccessContext &context, const ResourceUsageTag &tag, const VkDeviceSize struct_size,
3965 const VkBuffer buffer, const VkDeviceSize offset, const uint32_t drawCount,
3966 uint32_t stride) {
locke-lunargff255f92020-05-13 18:53:52 -06003967 const auto *buf_state = Get<BUFFER_STATE>(buffer);
3968 VkDeviceSize size = struct_size;
3969 if (drawCount == 1 || stride == size) {
3970 if (drawCount > 1) size *= drawCount;
John Zulauf3e86bf02020-09-12 10:47:57 -06003971 const ResourceAccessRange range = MakeRange(offset, size);
John Zulauf8e3c3e92021-01-06 11:19:36 -07003972 context.UpdateAccessState(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, SyncOrdering::kNonAttachment, range, tag);
locke-lunargff255f92020-05-13 18:53:52 -06003973 } else {
3974 for (uint32_t i = 0; i < drawCount; ++i) {
John Zulauf3e86bf02020-09-12 10:47:57 -06003975 const ResourceAccessRange range = MakeRange(offset + i * stride, size);
John Zulauf8e3c3e92021-01-06 11:19:36 -07003976 context.UpdateAccessState(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, SyncOrdering::kNonAttachment, range,
3977 tag);
locke-lunargff255f92020-05-13 18:53:52 -06003978 }
3979 }
3980}
3981
John Zulauffaea0ee2021-01-14 14:01:32 -07003982bool SyncValidator::ValidateCountBuffer(const CommandBufferAccessContext &cb_context, const AccessContext &context,
3983 VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
3984 const char *function) const {
locke-lunargff255f92020-05-13 18:53:52 -06003985 bool skip = false;
3986
3987 const auto *count_buf_state = Get<BUFFER_STATE>(buffer);
John Zulauf3e86bf02020-09-12 10:47:57 -06003988 const ResourceAccessRange range = MakeRange(offset, 4);
locke-lunargff255f92020-05-13 18:53:52 -06003989 auto hazard = context.DetectHazard(*count_buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, range);
3990 if (hazard.hazard) {
John Zulauf1dae9192020-06-16 15:46:44 -06003991 skip |= LogError(count_buf_state->buffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003992 "%s: Hazard %s for countBuffer %s in %s. Access info %s.", function, string_SyncHazard(hazard.hazard),
John Zulauf1dae9192020-06-16 15:46:44 -06003993 report_data->FormatHandle(buffer).c_str(), report_data->FormatHandle(commandBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07003994 cb_context.FormatUsage(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06003995 }
3996 return skip;
3997}
3998
locke-lunarg61870c22020-06-09 14:51:50 -06003999void SyncValidator::RecordCountBuffer(AccessContext &context, const ResourceUsageTag &tag, VkBuffer buffer, VkDeviceSize offset) {
locke-lunargff255f92020-05-13 18:53:52 -06004000 const auto *count_buf_state = Get<BUFFER_STATE>(buffer);
John Zulauf3e86bf02020-09-12 10:47:57 -06004001 const ResourceAccessRange range = MakeRange(offset, 4);
John Zulauf8e3c3e92021-01-06 11:19:36 -07004002 context.UpdateAccessState(*count_buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, SyncOrdering::kNonAttachment, range, tag);
locke-lunargff255f92020-05-13 18:53:52 -06004003}
4004
locke-lunarg36ba2592020-04-03 09:42:04 -06004005bool SyncValidator::PreCallValidateCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) const {
locke-lunargff255f92020-05-13 18:53:52 -06004006 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004007 const auto *cb_access_context = GetAccessContext(commandBuffer);
4008 assert(cb_access_context);
4009 if (!cb_access_context) return skip;
4010
locke-lunarg61870c22020-06-09 14:51:50 -06004011 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, "vkCmdDispatch");
locke-lunargff255f92020-05-13 18:53:52 -06004012 return skip;
locke-lunarg36ba2592020-04-03 09:42:04 -06004013}
4014
4015void SyncValidator::PreCallRecordCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004016 StateTracker::PreCallRecordCmdDispatch(commandBuffer, x, y, z);
locke-lunargff255f92020-05-13 18:53:52 -06004017 auto *cb_access_context = GetAccessContext(commandBuffer);
4018 assert(cb_access_context);
4019 const auto tag = cb_access_context->NextCommandTag(CMD_DISPATCH);
locke-lunargff255f92020-05-13 18:53:52 -06004020
locke-lunarg61870c22020-06-09 14:51:50 -06004021 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, tag);
locke-lunarg36ba2592020-04-03 09:42:04 -06004022}
locke-lunarge1a67022020-04-29 00:15:36 -06004023
4024bool SyncValidator::PreCallValidateCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) const {
locke-lunargff255f92020-05-13 18:53:52 -06004025 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004026 const auto *cb_access_context = GetAccessContext(commandBuffer);
4027 assert(cb_access_context);
4028 if (!cb_access_context) return skip;
4029
4030 const auto *context = cb_access_context->GetCurrentAccessContext();
4031 assert(context);
4032 if (!context) return skip;
4033
locke-lunarg61870c22020-06-09 14:51:50 -06004034 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, "vkCmdDispatchIndirect");
John Zulauffaea0ee2021-01-14 14:01:32 -07004035 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDispatchIndirectCommand), buffer, offset,
4036 1, sizeof(VkDispatchIndirectCommand), "vkCmdDispatchIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004037 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004038}
4039
4040void SyncValidator::PreCallRecordCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004041 StateTracker::PreCallRecordCmdDispatchIndirect(commandBuffer, buffer, offset);
locke-lunargff255f92020-05-13 18:53:52 -06004042 auto *cb_access_context = GetAccessContext(commandBuffer);
4043 assert(cb_access_context);
4044 const auto tag = cb_access_context->NextCommandTag(CMD_DISPATCHINDIRECT);
4045 auto *context = cb_access_context->GetCurrentAccessContext();
4046 assert(context);
4047
locke-lunarg61870c22020-06-09 14:51:50 -06004048 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, tag);
4049 RecordIndirectBuffer(*context, tag, sizeof(VkDispatchIndirectCommand), buffer, offset, 1, sizeof(VkDispatchIndirectCommand));
locke-lunarge1a67022020-04-29 00:15:36 -06004050}
4051
4052bool SyncValidator::PreCallValidateCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
4053 uint32_t firstVertex, uint32_t firstInstance) const {
locke-lunarga4d39ea2020-05-22 14:17:29 -06004054 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004055 const auto *cb_access_context = GetAccessContext(commandBuffer);
4056 assert(cb_access_context);
4057 if (!cb_access_context) return skip;
4058
locke-lunarg61870c22020-06-09 14:51:50 -06004059 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDraw");
4060 skip |= cb_access_context->ValidateDrawVertex(vertexCount, firstVertex, "vkCmdDraw");
4061 skip |= cb_access_context->ValidateDrawSubpassAttachment("vkCmdDraw");
locke-lunarga4d39ea2020-05-22 14:17:29 -06004062 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004063}
4064
4065void SyncValidator::PreCallRecordCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
4066 uint32_t firstVertex, uint32_t firstInstance) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004067 StateTracker::PreCallRecordCmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
locke-lunargff255f92020-05-13 18:53:52 -06004068 auto *cb_access_context = GetAccessContext(commandBuffer);
4069 assert(cb_access_context);
4070 const auto tag = cb_access_context->NextCommandTag(CMD_DRAW);
locke-lunargff255f92020-05-13 18:53:52 -06004071
locke-lunarg61870c22020-06-09 14:51:50 -06004072 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4073 cb_access_context->RecordDrawVertex(vertexCount, firstVertex, tag);
4074 cb_access_context->RecordDrawSubpassAttachment(tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004075}
4076
4077bool SyncValidator::PreCallValidateCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
4078 uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) const {
locke-lunarga4d39ea2020-05-22 14:17:29 -06004079 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004080 const auto *cb_access_context = GetAccessContext(commandBuffer);
4081 assert(cb_access_context);
4082 if (!cb_access_context) return skip;
4083
locke-lunarg61870c22020-06-09 14:51:50 -06004084 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndexed");
4085 skip |= cb_access_context->ValidateDrawVertexIndex(indexCount, firstIndex, "vkCmdDrawIndexed");
4086 skip |= cb_access_context->ValidateDrawSubpassAttachment("vkCmdDrawIndexed");
locke-lunarga4d39ea2020-05-22 14:17:29 -06004087 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004088}
4089
4090void SyncValidator::PreCallRecordCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
4091 uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004092 StateTracker::PreCallRecordCmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
locke-lunargff255f92020-05-13 18:53:52 -06004093 auto *cb_access_context = GetAccessContext(commandBuffer);
4094 assert(cb_access_context);
4095 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDEXED);
locke-lunargff255f92020-05-13 18:53:52 -06004096
locke-lunarg61870c22020-06-09 14:51:50 -06004097 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4098 cb_access_context->RecordDrawVertexIndex(indexCount, firstIndex, tag);
4099 cb_access_context->RecordDrawSubpassAttachment(tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004100}
4101
4102bool SyncValidator::PreCallValidateCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4103 uint32_t drawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004104 bool skip = false;
4105 if (drawCount == 0) return skip;
4106
locke-lunargff255f92020-05-13 18:53:52 -06004107 const auto *cb_access_context = GetAccessContext(commandBuffer);
4108 assert(cb_access_context);
4109 if (!cb_access_context) return skip;
4110
4111 const auto *context = cb_access_context->GetCurrentAccessContext();
4112 assert(context);
4113 if (!context) return skip;
4114
locke-lunarg61870c22020-06-09 14:51:50 -06004115 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndirect");
4116 skip |= cb_access_context->ValidateDrawSubpassAttachment("vkCmdDrawIndirect");
John Zulauffaea0ee2021-01-14 14:01:32 -07004117 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndirectCommand), buffer, offset,
4118 drawCount, stride, "vkCmdDrawIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004119
4120 // TODO: For now, we validate the whole vertex buffer. It might cause some false positive.
4121 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
4122 // We will validate the vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004123 skip |= cb_access_context->ValidateDrawVertex(UINT32_MAX, 0, "vkCmdDrawIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004124 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004125}
4126
4127void SyncValidator::PreCallRecordCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4128 uint32_t drawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004129 StateTracker::PreCallRecordCmdDrawIndirect(commandBuffer, buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06004130 if (drawCount == 0) return;
locke-lunargff255f92020-05-13 18:53:52 -06004131 auto *cb_access_context = GetAccessContext(commandBuffer);
4132 assert(cb_access_context);
4133 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDIRECT);
4134 auto *context = cb_access_context->GetCurrentAccessContext();
4135 assert(context);
4136
locke-lunarg61870c22020-06-09 14:51:50 -06004137 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4138 cb_access_context->RecordDrawSubpassAttachment(tag);
4139 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndirectCommand), buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06004140
4141 // TODO: For now, we record the whole vertex buffer. It might cause some false positive.
4142 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
4143 // We will record the vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004144 cb_access_context->RecordDrawVertex(UINT32_MAX, 0, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004145}
4146
4147bool SyncValidator::PreCallValidateCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4148 uint32_t drawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004149 bool skip = false;
4150 if (drawCount == 0) return skip;
locke-lunargff255f92020-05-13 18:53:52 -06004151 const auto *cb_access_context = GetAccessContext(commandBuffer);
4152 assert(cb_access_context);
4153 if (!cb_access_context) return skip;
4154
4155 const auto *context = cb_access_context->GetCurrentAccessContext();
4156 assert(context);
4157 if (!context) return skip;
4158
locke-lunarg61870c22020-06-09 14:51:50 -06004159 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndexedIndirect");
4160 skip |= cb_access_context->ValidateDrawSubpassAttachment("vkCmdDrawIndexedIndirect");
John Zulauffaea0ee2021-01-14 14:01:32 -07004161 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndexedIndirectCommand), buffer,
4162 offset, drawCount, stride, "vkCmdDrawIndexedIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004163
4164 // TODO: For now, we validate the whole index and vertex buffer. It might cause some false positive.
4165 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
4166 // We will validate the index and vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004167 skip |= cb_access_context->ValidateDrawVertexIndex(UINT32_MAX, 0, "vkCmdDrawIndexedIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004168 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004169}
4170
4171void SyncValidator::PreCallRecordCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4172 uint32_t drawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004173 StateTracker::PreCallRecordCmdDrawIndexedIndirect(commandBuffer, buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06004174 auto *cb_access_context = GetAccessContext(commandBuffer);
4175 assert(cb_access_context);
4176 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDEXEDINDIRECT);
4177 auto *context = cb_access_context->GetCurrentAccessContext();
4178 assert(context);
4179
locke-lunarg61870c22020-06-09 14:51:50 -06004180 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4181 cb_access_context->RecordDrawSubpassAttachment(tag);
4182 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndexedIndirectCommand), buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06004183
4184 // TODO: For now, we record the whole index and vertex buffer. It might cause some false positive.
4185 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
4186 // We will record the index and vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004187 cb_access_context->RecordDrawVertexIndex(UINT32_MAX, 0, tag);
locke-lunargff255f92020-05-13 18:53:52 -06004188}
4189
4190bool SyncValidator::ValidateCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4191 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4192 uint32_t stride, const char *function) const {
4193 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004194 const auto *cb_access_context = GetAccessContext(commandBuffer);
4195 assert(cb_access_context);
4196 if (!cb_access_context) return skip;
4197
4198 const auto *context = cb_access_context->GetCurrentAccessContext();
4199 assert(context);
4200 if (!context) return skip;
4201
locke-lunarg61870c22020-06-09 14:51:50 -06004202 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, function);
4203 skip |= cb_access_context->ValidateDrawSubpassAttachment(function);
John Zulauffaea0ee2021-01-14 14:01:32 -07004204 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndirectCommand), buffer, offset,
4205 maxDrawCount, stride, function);
4206 skip |= ValidateCountBuffer(*cb_access_context, *context, commandBuffer, countBuffer, countBufferOffset, function);
locke-lunargff255f92020-05-13 18:53:52 -06004207
4208 // TODO: For now, we validate the whole vertex buffer. It might cause some false positive.
4209 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
4210 // We will validate the vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004211 skip |= cb_access_context->ValidateDrawVertex(UINT32_MAX, 0, function);
locke-lunargff255f92020-05-13 18:53:52 -06004212 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004213}
4214
4215bool SyncValidator::PreCallValidateCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4216 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4217 uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004218 return ValidateCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4219 "vkCmdDrawIndirectCount");
locke-lunarge1a67022020-04-29 00:15:36 -06004220}
4221
4222void SyncValidator::PreCallRecordCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4223 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4224 uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004225 StateTracker::PreCallRecordCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount,
4226 stride);
locke-lunargff255f92020-05-13 18:53:52 -06004227 auto *cb_access_context = GetAccessContext(commandBuffer);
4228 assert(cb_access_context);
4229 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDIRECTCOUNT);
4230 auto *context = cb_access_context->GetCurrentAccessContext();
4231 assert(context);
4232
locke-lunarg61870c22020-06-09 14:51:50 -06004233 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4234 cb_access_context->RecordDrawSubpassAttachment(tag);
4235 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndirectCommand), buffer, offset, 1, stride);
4236 RecordCountBuffer(*context, tag, countBuffer, countBufferOffset);
locke-lunargff255f92020-05-13 18:53:52 -06004237
4238 // TODO: For now, we record the whole vertex buffer. It might cause some false positive.
4239 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
4240 // We will record the vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004241 cb_access_context->RecordDrawVertex(UINT32_MAX, 0, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004242}
4243
4244bool SyncValidator::PreCallValidateCmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4245 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4246 uint32_t maxDrawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004247 return ValidateCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4248 "vkCmdDrawIndirectCountKHR");
locke-lunarge1a67022020-04-29 00:15:36 -06004249}
4250
4251void SyncValidator::PreCallRecordCmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4252 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4253 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004254 StateTracker::PreCallRecordCmdDrawIndirectCountKHR(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount,
4255 stride);
locke-lunargff255f92020-05-13 18:53:52 -06004256 PreCallRecordCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride);
locke-lunarge1a67022020-04-29 00:15:36 -06004257}
4258
4259bool SyncValidator::PreCallValidateCmdDrawIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4260 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4261 uint32_t maxDrawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004262 return ValidateCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4263 "vkCmdDrawIndirectCountAMD");
locke-lunarge1a67022020-04-29 00:15:36 -06004264}
4265
4266void SyncValidator::PreCallRecordCmdDrawIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4267 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4268 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004269 StateTracker::PreCallRecordCmdDrawIndirectCountAMD(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount,
4270 stride);
locke-lunargff255f92020-05-13 18:53:52 -06004271 PreCallRecordCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride);
4272}
4273
4274bool SyncValidator::ValidateCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4275 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4276 uint32_t stride, const char *function) const {
4277 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004278 const auto *cb_access_context = GetAccessContext(commandBuffer);
4279 assert(cb_access_context);
4280 if (!cb_access_context) return skip;
4281
4282 const auto *context = cb_access_context->GetCurrentAccessContext();
4283 assert(context);
4284 if (!context) return skip;
4285
locke-lunarg61870c22020-06-09 14:51:50 -06004286 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, function);
4287 skip |= cb_access_context->ValidateDrawSubpassAttachment(function);
John Zulauffaea0ee2021-01-14 14:01:32 -07004288 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndexedIndirectCommand), buffer,
4289 offset, maxDrawCount, stride, function);
4290 skip |= ValidateCountBuffer(*cb_access_context, *context, commandBuffer, countBuffer, countBufferOffset, function);
locke-lunargff255f92020-05-13 18:53:52 -06004291
4292 // TODO: For now, we validate the whole index and vertex buffer. It might cause some false positive.
4293 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
4294 // We will validate the index and vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004295 skip |= cb_access_context->ValidateDrawVertexIndex(UINT32_MAX, 0, function);
locke-lunargff255f92020-05-13 18:53:52 -06004296 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004297}
4298
4299bool SyncValidator::PreCallValidateCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4300 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4301 uint32_t maxDrawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004302 return ValidateCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4303 "vkCmdDrawIndexedIndirectCount");
locke-lunarge1a67022020-04-29 00:15:36 -06004304}
4305
4306void SyncValidator::PreCallRecordCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4307 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4308 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004309 StateTracker::PreCallRecordCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
4310 maxDrawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06004311 auto *cb_access_context = GetAccessContext(commandBuffer);
4312 assert(cb_access_context);
4313 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDEXEDINDIRECTCOUNT);
4314 auto *context = cb_access_context->GetCurrentAccessContext();
4315 assert(context);
4316
locke-lunarg61870c22020-06-09 14:51:50 -06004317 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4318 cb_access_context->RecordDrawSubpassAttachment(tag);
4319 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndexedIndirectCommand), buffer, offset, 1, stride);
4320 RecordCountBuffer(*context, tag, countBuffer, countBufferOffset);
locke-lunargff255f92020-05-13 18:53:52 -06004321
4322 // TODO: For now, we record the whole index and vertex buffer. It might cause some false positive.
4323 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
locke-lunarg61870c22020-06-09 14:51:50 -06004324 // We will update the index and vertex buffer in SubmitQueue in the future.
4325 cb_access_context->RecordDrawVertexIndex(UINT32_MAX, 0, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004326}
4327
4328bool SyncValidator::PreCallValidateCmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer,
4329 VkDeviceSize offset, VkBuffer countBuffer,
4330 VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4331 uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004332 return ValidateCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4333 "vkCmdDrawIndexedIndirectCountKHR");
locke-lunarge1a67022020-04-29 00:15:36 -06004334}
4335
4336void SyncValidator::PreCallRecordCmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4337 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4338 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004339 StateTracker::PreCallRecordCmdDrawIndexedIndirectCountKHR(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
4340 maxDrawCount, stride);
locke-lunarge1a67022020-04-29 00:15:36 -06004341 PreCallRecordCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride);
4342}
4343
4344bool SyncValidator::PreCallValidateCmdDrawIndexedIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer,
4345 VkDeviceSize offset, VkBuffer countBuffer,
4346 VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4347 uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004348 return ValidateCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4349 "vkCmdDrawIndexedIndirectCountAMD");
locke-lunarge1a67022020-04-29 00:15:36 -06004350}
4351
4352void SyncValidator::PreCallRecordCmdDrawIndexedIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4353 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4354 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004355 StateTracker::PreCallRecordCmdDrawIndexedIndirectCountAMD(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
4356 maxDrawCount, stride);
locke-lunarge1a67022020-04-29 00:15:36 -06004357 PreCallRecordCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride);
4358}
4359
4360bool SyncValidator::PreCallValidateCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
4361 const VkClearColorValue *pColor, uint32_t rangeCount,
4362 const VkImageSubresourceRange *pRanges) const {
4363 bool skip = false;
4364 const auto *cb_access_context = GetAccessContext(commandBuffer);
4365 assert(cb_access_context);
4366 if (!cb_access_context) return skip;
4367
4368 const auto *context = cb_access_context->GetCurrentAccessContext();
4369 assert(context);
4370 if (!context) return skip;
4371
4372 const auto *image_state = Get<IMAGE_STATE>(image);
4373
4374 for (uint32_t index = 0; index < rangeCount; index++) {
4375 const auto &range = pRanges[index];
4376 if (image_state) {
4377 auto hazard =
Jeremy Gebben40a22942020-12-22 14:22:06 -07004378 context->DetectHazard(*image_state, SYNC_CLEAR_TRANSFER_WRITE, range, {0, 0, 0}, image_state->createInfo.extent);
locke-lunarge1a67022020-04-29 00:15:36 -06004379 if (hazard.hazard) {
4380 skip |= LogError(image, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004381 "vkCmdClearColorImage: Hazard %s for %s, range index %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06004382 string_SyncHazard(hazard.hazard), report_data->FormatHandle(image).c_str(), index,
John Zulauffaea0ee2021-01-14 14:01:32 -07004383 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06004384 }
4385 }
4386 }
4387 return skip;
4388}
4389
4390void SyncValidator::PreCallRecordCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
4391 const VkClearColorValue *pColor, uint32_t rangeCount,
4392 const VkImageSubresourceRange *pRanges) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004393 StateTracker::PreCallRecordCmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
locke-lunarge1a67022020-04-29 00:15:36 -06004394 auto *cb_access_context = GetAccessContext(commandBuffer);
4395 assert(cb_access_context);
4396 const auto tag = cb_access_context->NextCommandTag(CMD_CLEARCOLORIMAGE);
4397 auto *context = cb_access_context->GetCurrentAccessContext();
4398 assert(context);
4399
4400 const auto *image_state = Get<IMAGE_STATE>(image);
4401
4402 for (uint32_t index = 0; index < rangeCount; index++) {
4403 const auto &range = pRanges[index];
4404 if (image_state) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004405 context->UpdateAccessState(*image_state, SYNC_CLEAR_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, {0, 0, 0},
John Zulauf8e3c3e92021-01-06 11:19:36 -07004406 image_state->createInfo.extent, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004407 }
4408 }
4409}
4410
4411bool SyncValidator::PreCallValidateCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image,
4412 VkImageLayout imageLayout,
4413 const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
4414 const VkImageSubresourceRange *pRanges) const {
4415 bool skip = false;
4416 const auto *cb_access_context = GetAccessContext(commandBuffer);
4417 assert(cb_access_context);
4418 if (!cb_access_context) return skip;
4419
4420 const auto *context = cb_access_context->GetCurrentAccessContext();
4421 assert(context);
4422 if (!context) return skip;
4423
4424 const auto *image_state = Get<IMAGE_STATE>(image);
4425
4426 for (uint32_t index = 0; index < rangeCount; index++) {
4427 const auto &range = pRanges[index];
4428 if (image_state) {
4429 auto hazard =
Jeremy Gebben40a22942020-12-22 14:22:06 -07004430 context->DetectHazard(*image_state, SYNC_CLEAR_TRANSFER_WRITE, range, {0, 0, 0}, image_state->createInfo.extent);
locke-lunarge1a67022020-04-29 00:15:36 -06004431 if (hazard.hazard) {
4432 skip |= LogError(image, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004433 "vkCmdClearDepthStencilImage: Hazard %s for %s, range index %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06004434 string_SyncHazard(hazard.hazard), report_data->FormatHandle(image).c_str(), index,
John Zulauffaea0ee2021-01-14 14:01:32 -07004435 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06004436 }
4437 }
4438 }
4439 return skip;
4440}
4441
4442void SyncValidator::PreCallRecordCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
4443 const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
4444 const VkImageSubresourceRange *pRanges) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004445 StateTracker::PreCallRecordCmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
locke-lunarge1a67022020-04-29 00:15:36 -06004446 auto *cb_access_context = GetAccessContext(commandBuffer);
4447 assert(cb_access_context);
4448 const auto tag = cb_access_context->NextCommandTag(CMD_CLEARDEPTHSTENCILIMAGE);
4449 auto *context = cb_access_context->GetCurrentAccessContext();
4450 assert(context);
4451
4452 const auto *image_state = Get<IMAGE_STATE>(image);
4453
4454 for (uint32_t index = 0; index < rangeCount; index++) {
4455 const auto &range = pRanges[index];
4456 if (image_state) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004457 context->UpdateAccessState(*image_state, SYNC_CLEAR_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, {0, 0, 0},
John Zulauf8e3c3e92021-01-06 11:19:36 -07004458 image_state->createInfo.extent, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004459 }
4460 }
4461}
4462
4463bool SyncValidator::PreCallValidateCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool,
4464 uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer,
4465 VkDeviceSize dstOffset, VkDeviceSize stride,
4466 VkQueryResultFlags flags) const {
4467 bool skip = false;
4468 const auto *cb_access_context = GetAccessContext(commandBuffer);
4469 assert(cb_access_context);
4470 if (!cb_access_context) return skip;
4471
4472 const auto *context = cb_access_context->GetCurrentAccessContext();
4473 assert(context);
4474 if (!context) return skip;
4475
4476 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
4477
4478 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004479 const ResourceAccessRange range = MakeRange(dstOffset, stride * queryCount);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004480 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
locke-lunarge1a67022020-04-29 00:15:36 -06004481 if (hazard.hazard) {
John Zulauf59e25072020-07-17 10:55:21 -06004482 skip |=
4483 LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
4484 "vkCmdCopyQueryPoolResults: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07004485 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06004486 }
4487 }
locke-lunargff255f92020-05-13 18:53:52 -06004488
4489 // TODO:Track VkQueryPool
locke-lunarge1a67022020-04-29 00:15:36 -06004490 return skip;
4491}
4492
4493void SyncValidator::PreCallRecordCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
4494 uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
4495 VkDeviceSize stride, VkQueryResultFlags flags) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004496 StateTracker::PreCallRecordCmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset,
4497 stride, flags);
locke-lunarge1a67022020-04-29 00:15:36 -06004498 auto *cb_access_context = GetAccessContext(commandBuffer);
4499 assert(cb_access_context);
locke-lunargff255f92020-05-13 18:53:52 -06004500 const auto tag = cb_access_context->NextCommandTag(CMD_COPYQUERYPOOLRESULTS);
locke-lunarge1a67022020-04-29 00:15:36 -06004501 auto *context = cb_access_context->GetCurrentAccessContext();
4502 assert(context);
4503
4504 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
4505
4506 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004507 const ResourceAccessRange range = MakeRange(dstOffset, stride * queryCount);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004508 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004509 }
locke-lunargff255f92020-05-13 18:53:52 -06004510
4511 // TODO:Track VkQueryPool
locke-lunarge1a67022020-04-29 00:15:36 -06004512}
4513
4514bool SyncValidator::PreCallValidateCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
4515 VkDeviceSize size, uint32_t data) const {
4516 bool skip = false;
4517 const auto *cb_access_context = GetAccessContext(commandBuffer);
4518 assert(cb_access_context);
4519 if (!cb_access_context) return skip;
4520
4521 const auto *context = cb_access_context->GetCurrentAccessContext();
4522 assert(context);
4523 if (!context) return skip;
4524
4525 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
4526
4527 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004528 const ResourceAccessRange range = MakeRange(*dst_buffer, dstOffset, size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004529 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
locke-lunarge1a67022020-04-29 00:15:36 -06004530 if (hazard.hazard) {
John Zulauf1dae9192020-06-16 15:46:44 -06004531 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004532 "vkCmdFillBuffer: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07004533 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06004534 }
4535 }
4536 return skip;
4537}
4538
4539void SyncValidator::PreCallRecordCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
4540 VkDeviceSize size, uint32_t data) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004541 StateTracker::PreCallRecordCmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
locke-lunarge1a67022020-04-29 00:15:36 -06004542 auto *cb_access_context = GetAccessContext(commandBuffer);
4543 assert(cb_access_context);
4544 const auto tag = cb_access_context->NextCommandTag(CMD_FILLBUFFER);
4545 auto *context = cb_access_context->GetCurrentAccessContext();
4546 assert(context);
4547
4548 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
4549
4550 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004551 const ResourceAccessRange range = MakeRange(*dst_buffer, dstOffset, size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004552 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004553 }
4554}
4555
4556bool SyncValidator::PreCallValidateCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4557 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
4558 const VkImageResolve *pRegions) const {
4559 bool skip = false;
4560 const auto *cb_access_context = GetAccessContext(commandBuffer);
4561 assert(cb_access_context);
4562 if (!cb_access_context) return skip;
4563
4564 const auto *context = cb_access_context->GetCurrentAccessContext();
4565 assert(context);
4566 if (!context) return skip;
4567
4568 const auto *src_image = Get<IMAGE_STATE>(srcImage);
4569 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
4570
4571 for (uint32_t region = 0; region < regionCount; region++) {
4572 const auto &resolve_region = pRegions[region];
4573 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004574 auto hazard = context->DetectHazard(*src_image, SYNC_RESOLVE_TRANSFER_READ, resolve_region.srcSubresource,
locke-lunarge1a67022020-04-29 00:15:36 -06004575 resolve_region.srcOffset, resolve_region.extent);
4576 if (hazard.hazard) {
4577 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004578 "vkCmdResolveImage: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06004579 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07004580 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06004581 }
4582 }
4583
4584 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004585 auto hazard = context->DetectHazard(*dst_image, SYNC_RESOLVE_TRANSFER_WRITE, resolve_region.dstSubresource,
locke-lunarge1a67022020-04-29 00:15:36 -06004586 resolve_region.dstOffset, resolve_region.extent);
4587 if (hazard.hazard) {
4588 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004589 "vkCmdResolveImage: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06004590 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07004591 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06004592 }
4593 if (skip) break;
4594 }
4595 }
4596
4597 return skip;
4598}
4599
4600void SyncValidator::PreCallRecordCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4601 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
4602 const VkImageResolve *pRegions) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004603 StateTracker::PreCallRecordCmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
4604 pRegions);
locke-lunarge1a67022020-04-29 00:15:36 -06004605 auto *cb_access_context = GetAccessContext(commandBuffer);
4606 assert(cb_access_context);
4607 const auto tag = cb_access_context->NextCommandTag(CMD_RESOLVEIMAGE);
4608 auto *context = cb_access_context->GetCurrentAccessContext();
4609 assert(context);
4610
4611 auto *src_image = Get<IMAGE_STATE>(srcImage);
4612 auto *dst_image = Get<IMAGE_STATE>(dstImage);
4613
4614 for (uint32_t region = 0; region < regionCount; region++) {
4615 const auto &resolve_region = pRegions[region];
4616 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004617 context->UpdateAccessState(*src_image, SYNC_RESOLVE_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07004618 resolve_region.srcSubresource, resolve_region.srcOffset, resolve_region.extent, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004619 }
4620 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004621 context->UpdateAccessState(*dst_image, SYNC_RESOLVE_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07004622 resolve_region.dstSubresource, resolve_region.dstOffset, resolve_region.extent, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004623 }
4624 }
4625}
4626
Jeff Leger178b1e52020-10-05 12:22:23 -04004627bool SyncValidator::PreCallValidateCmdResolveImage2KHR(VkCommandBuffer commandBuffer,
4628 const VkResolveImageInfo2KHR *pResolveImageInfo) const {
4629 bool skip = false;
4630 const auto *cb_access_context = GetAccessContext(commandBuffer);
4631 assert(cb_access_context);
4632 if (!cb_access_context) return skip;
4633
4634 const auto *context = cb_access_context->GetCurrentAccessContext();
4635 assert(context);
4636 if (!context) return skip;
4637
4638 const auto *src_image = Get<IMAGE_STATE>(pResolveImageInfo->srcImage);
4639 const auto *dst_image = Get<IMAGE_STATE>(pResolveImageInfo->dstImage);
4640
4641 for (uint32_t region = 0; region < pResolveImageInfo->regionCount; region++) {
4642 const auto &resolve_region = pResolveImageInfo->pRegions[region];
4643 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004644 auto hazard = context->DetectHazard(*src_image, SYNC_RESOLVE_TRANSFER_READ, resolve_region.srcSubresource,
Jeff Leger178b1e52020-10-05 12:22:23 -04004645 resolve_region.srcOffset, resolve_region.extent);
4646 if (hazard.hazard) {
4647 skip |= LogError(pResolveImageInfo->srcImage, string_SyncHazardVUID(hazard.hazard),
4648 "vkCmdResolveImage2KHR: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.",
4649 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pResolveImageInfo->srcImage).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07004650 region, cb_access_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04004651 }
4652 }
4653
4654 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004655 auto hazard = context->DetectHazard(*dst_image, SYNC_RESOLVE_TRANSFER_WRITE, resolve_region.dstSubresource,
Jeff Leger178b1e52020-10-05 12:22:23 -04004656 resolve_region.dstOffset, resolve_region.extent);
4657 if (hazard.hazard) {
4658 skip |= LogError(pResolveImageInfo->dstImage, string_SyncHazardVUID(hazard.hazard),
4659 "vkCmdResolveImage2KHR: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.",
4660 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pResolveImageInfo->dstImage).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07004661 region, cb_access_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04004662 }
4663 if (skip) break;
4664 }
4665 }
4666
4667 return skip;
4668}
4669
4670void SyncValidator::PreCallRecordCmdResolveImage2KHR(VkCommandBuffer commandBuffer,
4671 const VkResolveImageInfo2KHR *pResolveImageInfo) {
4672 StateTracker::PreCallRecordCmdResolveImage2KHR(commandBuffer, pResolveImageInfo);
4673 auto *cb_access_context = GetAccessContext(commandBuffer);
4674 assert(cb_access_context);
4675 const auto tag = cb_access_context->NextCommandTag(CMD_RESOLVEIMAGE2KHR);
4676 auto *context = cb_access_context->GetCurrentAccessContext();
4677 assert(context);
4678
4679 auto *src_image = Get<IMAGE_STATE>(pResolveImageInfo->srcImage);
4680 auto *dst_image = Get<IMAGE_STATE>(pResolveImageInfo->dstImage);
4681
4682 for (uint32_t region = 0; region < pResolveImageInfo->regionCount; region++) {
4683 const auto &resolve_region = pResolveImageInfo->pRegions[region];
4684 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004685 context->UpdateAccessState(*src_image, SYNC_RESOLVE_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07004686 resolve_region.srcSubresource, resolve_region.srcOffset, resolve_region.extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04004687 }
4688 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004689 context->UpdateAccessState(*dst_image, SYNC_RESOLVE_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07004690 resolve_region.dstSubresource, resolve_region.dstOffset, resolve_region.extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04004691 }
4692 }
4693}
4694
locke-lunarge1a67022020-04-29 00:15:36 -06004695bool SyncValidator::PreCallValidateCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
4696 VkDeviceSize dataSize, const void *pData) const {
4697 bool skip = false;
4698 const auto *cb_access_context = GetAccessContext(commandBuffer);
4699 assert(cb_access_context);
4700 if (!cb_access_context) return skip;
4701
4702 const auto *context = cb_access_context->GetCurrentAccessContext();
4703 assert(context);
4704 if (!context) return skip;
4705
4706 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
4707
4708 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004709 // VK_WHOLE_SIZE not allowed
4710 const ResourceAccessRange range = MakeRange(dstOffset, dataSize);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004711 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
locke-lunarge1a67022020-04-29 00:15:36 -06004712 if (hazard.hazard) {
John Zulauf1dae9192020-06-16 15:46:44 -06004713 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004714 "vkCmdUpdateBuffer: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07004715 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06004716 }
4717 }
4718 return skip;
4719}
4720
4721void SyncValidator::PreCallRecordCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
4722 VkDeviceSize dataSize, const void *pData) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004723 StateTracker::PreCallRecordCmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
locke-lunarge1a67022020-04-29 00:15:36 -06004724 auto *cb_access_context = GetAccessContext(commandBuffer);
4725 assert(cb_access_context);
4726 const auto tag = cb_access_context->NextCommandTag(CMD_UPDATEBUFFER);
4727 auto *context = cb_access_context->GetCurrentAccessContext();
4728 assert(context);
4729
4730 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
4731
4732 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004733 // VK_WHOLE_SIZE not allowed
4734 const ResourceAccessRange range = MakeRange(dstOffset, dataSize);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004735 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004736 }
4737}
locke-lunargff255f92020-05-13 18:53:52 -06004738
4739bool SyncValidator::PreCallValidateCmdWriteBufferMarkerAMD(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
4740 VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) const {
4741 bool skip = false;
4742 const auto *cb_access_context = GetAccessContext(commandBuffer);
4743 assert(cb_access_context);
4744 if (!cb_access_context) return skip;
4745
4746 const auto *context = cb_access_context->GetCurrentAccessContext();
4747 assert(context);
4748 if (!context) return skip;
4749
4750 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
4751
4752 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004753 const ResourceAccessRange range = MakeRange(dstOffset, 4);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004754 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
locke-lunargff255f92020-05-13 18:53:52 -06004755 if (hazard.hazard) {
John Zulauf59e25072020-07-17 10:55:21 -06004756 skip |=
4757 LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
4758 "vkCmdWriteBufferMarkerAMD: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07004759 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatUsage(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06004760 }
4761 }
4762 return skip;
4763}
4764
4765void SyncValidator::PreCallRecordCmdWriteBufferMarkerAMD(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
4766 VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004767 StateTracker::PreCallRecordCmdWriteBufferMarkerAMD(commandBuffer, pipelineStage, dstBuffer, dstOffset, marker);
locke-lunargff255f92020-05-13 18:53:52 -06004768 auto *cb_access_context = GetAccessContext(commandBuffer);
4769 assert(cb_access_context);
4770 const auto tag = cb_access_context->NextCommandTag(CMD_WRITEBUFFERMARKERAMD);
4771 auto *context = cb_access_context->GetCurrentAccessContext();
4772 assert(context);
4773
4774 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
4775
4776 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004777 const ResourceAccessRange range = MakeRange(dstOffset, 4);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004778 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunargff255f92020-05-13 18:53:52 -06004779 }
4780}
John Zulauf49beb112020-11-04 16:06:31 -07004781
4782bool SyncValidator::PreCallValidateCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) const {
4783 bool skip = false;
4784 const auto *cb_context = GetAccessContext(commandBuffer);
4785 assert(cb_context);
4786 if (!cb_context) return skip;
4787
John Zulauf36ef9282021-02-02 11:47:24 -07004788 SyncOpSetEvent set_event_op(CMD_SETEVENT, *this, cb_context->GetQueueFlags(), event, stageMask);
John Zulauf6ce24372021-01-30 05:56:25 -07004789 return set_event_op.Validate(*cb_context);
John Zulauf49beb112020-11-04 16:06:31 -07004790}
4791
4792void SyncValidator::PostCallRecordCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
4793 StateTracker::PostCallRecordCmdSetEvent(commandBuffer, event, stageMask);
4794 auto *cb_context = GetAccessContext(commandBuffer);
4795 assert(cb_context);
4796 if (!cb_context) return;
John Zulauf36ef9282021-02-02 11:47:24 -07004797 SyncOpSetEvent set_event_op(CMD_SETEVENT, *this, cb_context->GetQueueFlags(), event, stageMask);
4798 set_event_op.Record(cb_context);
John Zulauf49beb112020-11-04 16:06:31 -07004799}
4800
4801bool SyncValidator::PreCallValidateCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event,
4802 VkPipelineStageFlags stageMask) const {
4803 bool skip = false;
4804 const auto *cb_context = GetAccessContext(commandBuffer);
4805 assert(cb_context);
4806 if (!cb_context) return skip;
4807
John Zulauf36ef9282021-02-02 11:47:24 -07004808 SyncOpResetEvent reset_event_op(CMD_RESETEVENT, *this, cb_context->GetQueueFlags(), event, stageMask);
John Zulauf6ce24372021-01-30 05:56:25 -07004809 return reset_event_op.Validate(*cb_context);
John Zulauf49beb112020-11-04 16:06:31 -07004810}
4811
4812void SyncValidator::PostCallRecordCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
4813 StateTracker::PostCallRecordCmdResetEvent(commandBuffer, event, stageMask);
4814 auto *cb_context = GetAccessContext(commandBuffer);
4815 assert(cb_context);
4816 if (!cb_context) return;
4817
John Zulauf36ef9282021-02-02 11:47:24 -07004818 SyncOpResetEvent reset_event_op(CMD_RESETEVENT, *this, cb_context->GetQueueFlags(), event, stageMask);
4819 reset_event_op.Record(cb_context);
John Zulauf49beb112020-11-04 16:06:31 -07004820}
4821
4822bool SyncValidator::PreCallValidateCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
4823 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
4824 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
4825 uint32_t bufferMemoryBarrierCount,
4826 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
4827 uint32_t imageMemoryBarrierCount,
4828 const VkImageMemoryBarrier *pImageMemoryBarriers) const {
4829 bool skip = false;
4830 const auto *cb_context = GetAccessContext(commandBuffer);
4831 assert(cb_context);
4832 if (!cb_context) return skip;
4833
John Zulauf36ef9282021-02-02 11:47:24 -07004834 SyncOpWaitEvents wait_events_op(CMD_WAITEVENTS, *this, cb_context->GetQueueFlags(), eventCount, pEvents, srcStageMask,
4835 dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
4836 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
John Zulaufd5115702021-01-18 12:34:33 -07004837 return wait_events_op.Validate(*cb_context);
John Zulauf49beb112020-11-04 16:06:31 -07004838}
4839
4840void SyncValidator::PostCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
4841 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
4842 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
4843 uint32_t bufferMemoryBarrierCount,
4844 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
4845 uint32_t imageMemoryBarrierCount,
4846 const VkImageMemoryBarrier *pImageMemoryBarriers) {
4847 StateTracker::PostCallRecordCmdWaitEvents(commandBuffer, eventCount, pEvents, srcStageMask, dstStageMask, memoryBarrierCount,
4848 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
4849 imageMemoryBarrierCount, pImageMemoryBarriers);
4850
4851 auto *cb_context = GetAccessContext(commandBuffer);
4852 assert(cb_context);
4853 if (!cb_context) return;
4854
John Zulauf36ef9282021-02-02 11:47:24 -07004855 SyncOpWaitEvents wait_events_op(CMD_WAITEVENTS, *this, cb_context->GetQueueFlags(), eventCount, pEvents, srcStageMask,
4856 dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
4857 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
4858 return wait_events_op.Record(cb_context);
John Zulauf4a6105a2020-11-17 15:11:05 -07004859}
4860
4861void SyncEventState::ResetFirstScope() {
4862 for (const auto address_type : kAddressTypes) {
4863 first_scope[static_cast<size_t>(address_type)].clear();
4864 }
Jeremy Gebben9893daf2021-01-04 10:40:50 -07004865 scope = SyncExecScope();
John Zulauf4a6105a2020-11-17 15:11:05 -07004866}
4867
4868// Keep the "ignore this event" logic in same place for ValidateWait and RecordWait to use
Jeremy Gebben40a22942020-12-22 14:22:06 -07004869SyncEventState::IgnoreReason SyncEventState::IsIgnoredByWait(VkPipelineStageFlags2KHR srcStageMask) const {
John Zulauf4a6105a2020-11-17 15:11:05 -07004870 IgnoreReason reason = NotIgnored;
4871
4872 if (last_command == CMD_RESETEVENT && !HasBarrier(0U, 0U)) {
4873 reason = ResetWaitRace;
4874 } else if (unsynchronized_set) {
4875 reason = SetRace;
4876 } else {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004877 const VkPipelineStageFlags2KHR missing_bits = scope.mask_param & ~srcStageMask;
John Zulauf4a6105a2020-11-17 15:11:05 -07004878 if (missing_bits) reason = MissingStageBits;
4879 }
4880
4881 return reason;
4882}
4883
Jeremy Gebben40a22942020-12-22 14:22:06 -07004884bool SyncEventState::HasBarrier(VkPipelineStageFlags2KHR stageMask, VkPipelineStageFlags2KHR exec_scope_arg) const {
John Zulauf4a6105a2020-11-17 15:11:05 -07004885 bool has_barrier = (last_command == CMD_NONE) || (stageMask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) ||
4886 (barriers & exec_scope_arg) || (barriers & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
4887 return has_barrier;
John Zulauf49beb112020-11-04 16:06:31 -07004888}
John Zulaufe7f6a5e2021-01-16 14:31:18 -07004889
John Zulauf36ef9282021-02-02 11:47:24 -07004890SyncOpBarriers::SyncOpBarriers(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags,
4891 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
4892 VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount,
John Zulaufd5115702021-01-18 12:34:33 -07004893 const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount,
4894 const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount,
4895 const VkImageMemoryBarrier *pImageMemoryBarriers)
John Zulauf36ef9282021-02-02 11:47:24 -07004896 : SyncOpBase(cmd),
4897 dependency_flags_(dependencyFlags),
John Zulaufe7f6a5e2021-01-16 14:31:18 -07004898 src_exec_scope_(SyncExecScope::MakeSrc(queue_flags, srcStageMask)),
4899 dst_exec_scope_(SyncExecScope::MakeDst(queue_flags, dstStageMask)) {
4900 // Translate the API parameters into structures SyncVal understands directly, and dehandle for safer/faster replay.
4901 MakeMemoryBarriers(src_exec_scope_, dst_exec_scope_, dependencyFlags, memoryBarrierCount, pMemoryBarriers);
4902 MakeBufferMemoryBarriers(sync_state, src_exec_scope_, dst_exec_scope_, dependencyFlags, bufferMemoryBarrierCount,
4903 pBufferMemoryBarriers);
4904 MakeImageMemoryBarriers(sync_state, src_exec_scope_, dst_exec_scope_, dependencyFlags, imageMemoryBarrierCount,
4905 pImageMemoryBarriers);
4906}
4907
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07004908SyncOpBarriers::SyncOpBarriers(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags,
4909 const VkDependencyInfoKHR &dep_info)
4910 : SyncOpBase(cmd), dependency_flags_(dep_info.dependencyFlags) {
4911 auto stage_masks = sync_utils::GetGlobalStageMasks(dep_info);
4912 src_exec_scope_ = SyncExecScope::MakeSrc(queue_flags, stage_masks.src);
4913 dst_exec_scope_ = SyncExecScope::MakeDst(queue_flags, stage_masks.dst);
4914 // Translate the API parameters into structures SyncVal understands directly, and dehandle for safer/faster replay.
4915 MakeMemoryBarriers(queue_flags, dep_info.dependencyFlags, dep_info.memoryBarrierCount, dep_info.pMemoryBarriers);
4916 MakeBufferMemoryBarriers(sync_state, queue_flags, dep_info.dependencyFlags, dep_info.bufferMemoryBarrierCount,
4917 dep_info.pBufferMemoryBarriers);
4918 MakeImageMemoryBarriers(sync_state, queue_flags, dep_info.dependencyFlags, dep_info.imageMemoryBarrierCount,
4919 dep_info.pImageMemoryBarriers);
4920}
4921
John Zulauf36ef9282021-02-02 11:47:24 -07004922SyncOpPipelineBarrier::SyncOpPipelineBarrier(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags,
John Zulaufd5115702021-01-18 12:34:33 -07004923 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
4924 VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount,
4925 const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount,
4926 const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount,
4927 const VkImageMemoryBarrier *pImageMemoryBarriers)
John Zulauf36ef9282021-02-02 11:47:24 -07004928 : SyncOpBarriers(cmd, sync_state, queue_flags, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers,
John Zulaufd5115702021-01-18 12:34:33 -07004929 bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers) {}
4930
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07004931SyncOpPipelineBarrier::SyncOpPipelineBarrier(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags,
4932 const VkDependencyInfoKHR &dep_info)
4933 : SyncOpBarriers(cmd, sync_state, queue_flags, dep_info) {}
4934
John Zulaufe7f6a5e2021-01-16 14:31:18 -07004935bool SyncOpPipelineBarrier::Validate(const CommandBufferAccessContext &cb_context) const {
4936 bool skip = false;
4937 const auto *context = cb_context.GetCurrentAccessContext();
4938 assert(context);
4939 if (!context) return skip;
4940 // Validate Image Layout transitions
Nathaniel Cesarioe3025c62021-02-03 16:36:22 -07004941 for (const auto &image_barrier : image_memory_barriers_) {
John Zulaufe7f6a5e2021-01-16 14:31:18 -07004942 if (image_barrier.new_layout == image_barrier.old_layout) continue; // Only interested in layout transitions at this point.
4943 const auto *image_state = image_barrier.image.get();
4944 if (!image_state) continue;
4945 const auto hazard = context->DetectImageBarrierHazard(image_barrier);
4946 if (hazard.hazard) {
4947 // PHASE1 TODO -- add tag information to log msg when useful.
4948 const auto &sync_state = cb_context.GetSyncState();
4949 const auto image_handle = image_state->image;
4950 skip |= sync_state.LogError(image_handle, string_SyncHazardVUID(hazard.hazard),
4951 "vkCmdPipelineBarrier: Hazard %s for image barrier %" PRIu32 " %s. Access info %s.",
4952 string_SyncHazard(hazard.hazard), image_barrier.index,
4953 sync_state.report_data->FormatHandle(image_handle).c_str(),
4954 cb_context.FormatUsage(hazard).c_str());
4955 }
4956 }
4957
4958 return skip;
4959}
4960
John Zulaufd5115702021-01-18 12:34:33 -07004961struct SyncOpPipelineBarrierFunctorFactory {
4962 using BarrierOpFunctor = PipelineBarrierOp;
4963 using ApplyFunctor = ApplyBarrierFunctor<BarrierOpFunctor>;
4964 using GlobalBarrierOpFunctor = PipelineBarrierOp;
4965 using GlobalApplyFunctor = ApplyBarrierOpsFunctor<GlobalBarrierOpFunctor>;
4966 using BufferRange = ResourceAccessRange;
4967 using ImageRange = subresource_adapter::ImageRangeGenerator;
4968 using GlobalRange = ResourceAccessRange;
4969
4970 ApplyFunctor MakeApplyFunctor(const SyncBarrier &barrier, bool layout_transition) const {
4971 return ApplyFunctor(BarrierOpFunctor(barrier, layout_transition));
4972 }
4973 GlobalApplyFunctor MakeGlobalApplyFunctor(size_t size_hint, const ResourceUsageTag &tag) const {
4974 return GlobalApplyFunctor(true /* resolve */, size_hint, tag);
4975 }
4976 GlobalBarrierOpFunctor MakeGlobalBarrierOpFunctor(const SyncBarrier &barrier) const {
4977 return GlobalBarrierOpFunctor(barrier, false);
4978 }
4979
4980 BufferRange MakeRangeGen(const BUFFER_STATE &buffer, const ResourceAccessRange &range) const {
4981 if (!SimpleBinding(buffer)) return ResourceAccessRange();
4982 const auto base_address = ResourceBaseAddress(buffer);
4983 return (range + base_address);
4984 }
4985 ImageRange MakeRangeGen(const IMAGE_STATE &image, const SyncImageMemoryBarrier::SubImageRange &range) const {
John Zulauf264cce02021-02-05 14:40:47 -07004986 if (!SimpleBinding(image)) return subresource_adapter::ImageRangeGenerator();
John Zulaufd5115702021-01-18 12:34:33 -07004987
4988 const auto base_address = ResourceBaseAddress(image);
4989 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), range.subresource_range, range.offset,
4990 range.extent, base_address);
4991 return range_gen;
4992 }
4993 GlobalRange MakeGlobalRangeGen(AccessAddressType) const { return kFullRange; }
4994};
4995
4996template <typename Barriers, typename FunctorFactory>
4997void SyncOpBarriers::ApplyBarriers(const Barriers &barriers, const FunctorFactory &factory, const ResourceUsageTag &tag,
4998 AccessContext *context) {
4999 for (const auto &barrier : barriers) {
5000 const auto *state = barrier.GetState();
5001 if (state) {
5002 auto *const accesses = &context->GetAccessStateMap(GetAccessAddressType(*state));
5003 auto update_action = factory.MakeApplyFunctor(barrier.barrier, barrier.IsLayoutTransition());
5004 auto range_gen = factory.MakeRangeGen(*state, barrier.Range());
5005 UpdateMemoryAccessState(accesses, update_action, &range_gen);
5006 }
5007 }
5008}
5009
5010template <typename Barriers, typename FunctorFactory>
5011void SyncOpBarriers::ApplyGlobalBarriers(const Barriers &barriers, const FunctorFactory &factory, const ResourceUsageTag &tag,
5012 AccessContext *access_context) {
5013 auto barriers_functor = factory.MakeGlobalApplyFunctor(barriers.size(), tag);
5014 for (const auto &barrier : barriers) {
5015 barriers_functor.EmplaceBack(factory.MakeGlobalBarrierOpFunctor(barrier));
5016 }
5017 for (const auto address_type : kAddressTypes) {
5018 auto range_gen = factory.MakeGlobalRangeGen(address_type);
5019 UpdateMemoryAccessState(&(access_context->GetAccessStateMap(address_type)), barriers_functor, &range_gen);
5020 }
5021}
5022
John Zulauf36ef9282021-02-02 11:47:24 -07005023void SyncOpPipelineBarrier::Record(CommandBufferAccessContext *cb_context) const {
John Zulaufd5115702021-01-18 12:34:33 -07005024 SyncOpPipelineBarrierFunctorFactory factory;
5025 auto *access_context = cb_context->GetCurrentAccessContext();
John Zulauf36ef9282021-02-02 11:47:24 -07005026 const auto tag = cb_context->NextCommandTag(cmd_);
John Zulaufd5115702021-01-18 12:34:33 -07005027 ApplyBarriers(buffer_memory_barriers_, factory, tag, access_context);
5028 ApplyBarriers(image_memory_barriers_, factory, tag, access_context);
5029 ApplyGlobalBarriers(memory_barriers_, factory, tag, access_context);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005030
5031 cb_context->ApplyGlobalBarriersToEvents(src_exec_scope_, dst_exec_scope_);
5032}
5033
John Zulaufd5115702021-01-18 12:34:33 -07005034void SyncOpBarriers::MakeMemoryBarriers(const SyncExecScope &src, const SyncExecScope &dst, VkDependencyFlags dependency_flags,
5035 uint32_t memory_barrier_count, const VkMemoryBarrier *memory_barriers) {
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005036 memory_barriers_.reserve(std::min<uint32_t>(1, memory_barrier_count));
5037 for (uint32_t barrier_index = 0; barrier_index < memory_barrier_count; barrier_index++) {
5038 const auto &barrier = memory_barriers[barrier_index];
5039 SyncBarrier sync_barrier(barrier, src, dst);
5040 memory_barriers_.emplace_back(sync_barrier);
5041 }
5042 if (0 == memory_barrier_count) {
5043 // If there are no global memory barriers, force an exec barrier
5044 memory_barriers_.emplace_back(SyncBarrier(src, dst));
5045 }
5046}
5047
John Zulaufd5115702021-01-18 12:34:33 -07005048void SyncOpBarriers::MakeBufferMemoryBarriers(const SyncValidator &sync_state, const SyncExecScope &src, const SyncExecScope &dst,
5049 VkDependencyFlags dependencyFlags, uint32_t barrier_count,
5050 const VkBufferMemoryBarrier *barriers) {
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005051 buffer_memory_barriers_.reserve(barrier_count);
5052 for (uint32_t index = 0; index < barrier_count; index++) {
5053 const auto &barrier = barriers[index];
5054 auto buffer = sync_state.GetShared<BUFFER_STATE>(barrier.buffer);
5055 if (buffer) {
5056 const auto barrier_size = GetBufferWholeSize(*buffer, barrier.offset, barrier.size);
5057 const auto range = MakeRange(barrier.offset, barrier_size);
5058 const SyncBarrier sync_barrier(barrier, src, dst);
5059 buffer_memory_barriers_.emplace_back(buffer, sync_barrier, range);
5060 } else {
5061 buffer_memory_barriers_.emplace_back();
5062 }
5063 }
5064}
5065
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005066void SyncOpBarriers::MakeMemoryBarriers(VkQueueFlags queue_flags, VkDependencyFlags dependency_flags, uint32_t memory_barrier_count,
5067 const VkMemoryBarrier2KHR *memory_barriers) {
5068 memory_barriers_.reserve(std::min<uint32_t>(1, memory_barrier_count));
5069 for (uint32_t barrier_index = 0; barrier_index < memory_barrier_count; barrier_index++) {
5070 const auto &barrier = memory_barriers[barrier_index];
5071 auto src = SyncExecScope::MakeSrc(queue_flags, barrier.srcStageMask);
5072 auto dst = SyncExecScope::MakeDst(queue_flags, barrier.dstStageMask);
5073 SyncBarrier sync_barrier(barrier, src, dst);
5074 memory_barriers_.emplace_back(sync_barrier);
5075 }
5076 if (0 == memory_barrier_count) {
5077 // If there are no global memory barriers, force an exec barrier
5078 memory_barriers_.emplace_back(SyncBarrier(src_exec_scope_, dst_exec_scope_));
5079 }
5080}
5081
5082void SyncOpBarriers::MakeBufferMemoryBarriers(const SyncValidator &sync_state, VkQueueFlags queue_flags,
5083 VkDependencyFlags dependencyFlags, uint32_t barrier_count,
5084 const VkBufferMemoryBarrier2KHR *barriers) {
5085 buffer_memory_barriers_.reserve(barrier_count);
5086 for (uint32_t index = 0; index < barrier_count; index++) {
5087 const auto &barrier = barriers[index];
5088 auto src = SyncExecScope::MakeSrc(queue_flags, barrier.srcStageMask);
5089 auto dst = SyncExecScope::MakeDst(queue_flags, barrier.dstStageMask);
5090 auto buffer = sync_state.GetShared<BUFFER_STATE>(barrier.buffer);
5091 if (buffer) {
5092 const auto barrier_size = GetBufferWholeSize(*buffer, barrier.offset, barrier.size);
5093 const auto range = MakeRange(barrier.offset, barrier_size);
5094 const SyncBarrier sync_barrier(barrier, src, dst);
5095 buffer_memory_barriers_.emplace_back(buffer, sync_barrier, range);
5096 } else {
5097 buffer_memory_barriers_.emplace_back();
5098 }
5099 }
5100}
5101
John Zulaufd5115702021-01-18 12:34:33 -07005102void SyncOpBarriers::MakeImageMemoryBarriers(const SyncValidator &sync_state, const SyncExecScope &src, const SyncExecScope &dst,
5103 VkDependencyFlags dependencyFlags, uint32_t barrier_count,
5104 const VkImageMemoryBarrier *barriers) {
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005105 image_memory_barriers_.reserve(barrier_count);
5106 for (uint32_t index = 0; index < barrier_count; index++) {
5107 const auto &barrier = barriers[index];
5108 const auto image = sync_state.GetShared<IMAGE_STATE>(barrier.image);
5109 if (image) {
5110 auto subresource_range = NormalizeSubresourceRange(image->createInfo, barrier.subresourceRange);
5111 const SyncBarrier sync_barrier(barrier, src, dst);
5112 image_memory_barriers_.emplace_back(image, index, sync_barrier, barrier.oldLayout, barrier.newLayout,
5113 subresource_range);
5114 } else {
5115 image_memory_barriers_.emplace_back();
5116 image_memory_barriers_.back().index = index; // Just in case we're interested in the ones we skipped.
5117 }
5118 }
5119}
John Zulaufd5115702021-01-18 12:34:33 -07005120
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005121void SyncOpBarriers::MakeImageMemoryBarriers(const SyncValidator &sync_state, VkQueueFlags queue_flags,
5122 VkDependencyFlags dependencyFlags, uint32_t barrier_count,
5123 const VkImageMemoryBarrier2KHR *barriers) {
5124 image_memory_barriers_.reserve(barrier_count);
5125 for (uint32_t index = 0; index < barrier_count; index++) {
5126 const auto &barrier = barriers[index];
5127 auto src = SyncExecScope::MakeSrc(queue_flags, barrier.srcStageMask);
5128 auto dst = SyncExecScope::MakeDst(queue_flags, barrier.dstStageMask);
5129 const auto image = sync_state.GetShared<IMAGE_STATE>(barrier.image);
5130 if (image) {
5131 auto subresource_range = NormalizeSubresourceRange(image->createInfo, barrier.subresourceRange);
5132 const SyncBarrier sync_barrier(barrier, src, dst);
5133 image_memory_barriers_.emplace_back(image, index, sync_barrier, barrier.oldLayout, barrier.newLayout,
5134 subresource_range);
5135 } else {
5136 image_memory_barriers_.emplace_back();
5137 image_memory_barriers_.back().index = index; // Just in case we're interested in the ones we skipped.
5138 }
5139 }
5140}
5141
John Zulauf36ef9282021-02-02 11:47:24 -07005142SyncOpWaitEvents::SyncOpWaitEvents(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags, uint32_t eventCount,
John Zulaufd5115702021-01-18 12:34:33 -07005143 const VkEvent *pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
5144 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
5145 uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
5146 uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers)
John Zulauf36ef9282021-02-02 11:47:24 -07005147 : SyncOpBarriers(cmd, sync_state, queue_flags, srcStageMask, dstStageMask, VkDependencyFlags(0U), memoryBarrierCount,
John Zulaufd5115702021-01-18 12:34:33 -07005148 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
5149 pImageMemoryBarriers) {
John Zulauf669dfd52021-01-27 17:15:28 -07005150 MakeEventsList(sync_state, eventCount, pEvents);
John Zulaufd5115702021-01-18 12:34:33 -07005151}
5152
5153bool SyncOpWaitEvents::Validate(const CommandBufferAccessContext &cb_context) const {
John Zulaufd5115702021-01-18 12:34:33 -07005154 const char *const ignored = "Wait operation is ignored for this event.";
5155 bool skip = false;
5156 const auto &sync_state = cb_context.GetSyncState();
5157 const auto command_buffer_handle = cb_context.GetCBState().commandBuffer;
5158
5159 if (src_exec_scope_.mask_param & VK_PIPELINE_STAGE_HOST_BIT) {
John Zulaufd5115702021-01-18 12:34:33 -07005160 const char *const vuid = "SYNC-vkCmdWaitEvents-hostevent-unsupported";
5161 skip = sync_state.LogInfo(command_buffer_handle, vuid,
John Zulauf36ef9282021-02-02 11:47:24 -07005162 "%s, srcStageMask includes %s, unsupported by synchronization validaton.", CmdName(),
Jeremy Gebben40a22942020-12-22 14:22:06 -07005163 string_VkPipelineStageFlagBits(VK_PIPELINE_STAGE_HOST_BIT));
John Zulaufd5115702021-01-18 12:34:33 -07005164 }
5165
Jeremy Gebben40a22942020-12-22 14:22:06 -07005166 VkPipelineStageFlags2KHR event_stage_masks = 0U;
John Zulaufd5115702021-01-18 12:34:33 -07005167 bool events_not_found = false;
John Zulauf669dfd52021-01-27 17:15:28 -07005168 const auto *events_context = cb_context.GetCurrentEventsContext();
5169 assert(events_context);
5170 for (const auto &sync_event_pair : *events_context) {
5171 const auto *sync_event = sync_event_pair.second.get();
John Zulaufd5115702021-01-18 12:34:33 -07005172 if (!sync_event) {
5173 // NOTE PHASE2: This is where we'll need queue submit time validation to come back and check the srcStageMask bits
John Zulauf669dfd52021-01-27 17:15:28 -07005174 // or solve this with replay creating the SyncEventState in the queue context... also this will be a
5175 // new validation error... wait without previously submitted set event...
5176 events_not_found = true; // Demote "extra_stage_bits" error to warning, to avoid false positives at *record time*
John Zulaufd5115702021-01-18 12:34:33 -07005177
5178 continue; // Core, Lifetimes, or Param check needs to catch invalid events.
5179 }
5180 const auto event = sync_event->event->event;
5181 // TODO add "destroyed" checks
5182
5183 event_stage_masks |= sync_event->scope.mask_param;
5184 const auto ignore_reason = sync_event->IsIgnoredByWait(src_exec_scope_.mask_param);
5185 if (ignore_reason) {
5186 switch (ignore_reason) {
5187 case SyncEventState::ResetWaitRace: {
John Zulaufd5115702021-01-18 12:34:33 -07005188 const char *const vuid = "SYNC-vkCmdWaitEvents-missingbarrier-reset";
5189 const char *const message =
5190 "%s: %s %s operation following %s without intervening execution barrier, may cause race condition. %s";
John Zulauf36ef9282021-02-02 11:47:24 -07005191 skip |=
5192 sync_state.LogError(event, vuid, message, CmdName(), sync_state.report_data->FormatHandle(event).c_str(),
5193 CmdName(), CommandTypeString(sync_event->last_command), ignored);
John Zulaufd5115702021-01-18 12:34:33 -07005194 break;
5195 }
5196 case SyncEventState::SetRace: {
5197 // Issue error message that Wait is waiting on an signal subject to race condition, and is thus ignored for this
5198 // event
John Zulaufd5115702021-01-18 12:34:33 -07005199 const char *const vuid = "SYNC-vkCmdWaitEvents-unsynchronized-setops";
5200 const char *const message =
Jeremy Gebben5f585ae2021-02-02 09:03:06 -07005201 "%s: %s Unsychronized %s calls result in race conditions w.r.t. event signalling, %s %s";
John Zulaufd5115702021-01-18 12:34:33 -07005202 const char *const reason = "First synchronization scope is undefined.";
John Zulauf36ef9282021-02-02 11:47:24 -07005203 skip |=
5204 sync_state.LogError(event, vuid, message, CmdName(), sync_state.report_data->FormatHandle(event).c_str(),
5205 CommandTypeString(sync_event->last_command), reason, ignored);
John Zulaufd5115702021-01-18 12:34:33 -07005206 break;
5207 }
5208 case SyncEventState::MissingStageBits: {
Jeremy Gebben40a22942020-12-22 14:22:06 -07005209 const VkPipelineStageFlags2KHR missing_bits = sync_event->scope.mask_param & ~src_exec_scope_.mask_param;
John Zulaufd5115702021-01-18 12:34:33 -07005210 // Issue error message that event waited for is not in wait events scope
John Zulaufd5115702021-01-18 12:34:33 -07005211 const char *const vuid = "VUID-vkCmdWaitEvents-srcStageMask-01158";
5212 const char *const message =
Jeremy Gebben40a22942020-12-22 14:22:06 -07005213 "%s: %s stageMask %" PRIx64 " includes bits not present in srcStageMask 0x%" PRIx64
John Zulaufd5115702021-01-18 12:34:33 -07005214 ". Bits missing from srcStageMask %s. %s";
John Zulauf36ef9282021-02-02 11:47:24 -07005215 skip |=
5216 sync_state.LogError(event, vuid, message, CmdName(), sync_state.report_data->FormatHandle(event).c_str(),
5217 sync_event->scope.mask_param, src_exec_scope_.mask_param,
Jeremy Gebben40a22942020-12-22 14:22:06 -07005218 sync_utils::StringPipelineStageFlags(missing_bits).c_str(), ignored);
John Zulaufd5115702021-01-18 12:34:33 -07005219 break;
5220 }
5221 default:
5222 assert(ignore_reason == SyncEventState::NotIgnored);
5223 }
5224 } else if (image_memory_barriers_.size()) {
5225 const auto *context = cb_context.GetCurrentAccessContext();
5226 assert(context);
5227 for (const auto &image_memory_barrier : image_memory_barriers_) {
5228 if (image_memory_barrier.old_layout == image_memory_barrier.new_layout) continue;
5229 const auto *image_state = image_memory_barrier.image.get();
5230 if (!image_state) continue;
5231 const auto &subresource_range = image_memory_barrier.range.subresource_range;
5232 const auto &src_access_scope = image_memory_barrier.barrier.src_access_scope;
5233 const auto hazard =
5234 context->DetectImageBarrierHazard(*image_state, sync_event->scope.exec_scope, src_access_scope,
5235 subresource_range, *sync_event, AccessContext::DetectOptions::kDetectAll);
5236 if (hazard.hazard) {
John Zulaufd5115702021-01-18 12:34:33 -07005237 skip |= sync_state.LogError(image_state->image, string_SyncHazardVUID(hazard.hazard),
John Zulauf36ef9282021-02-02 11:47:24 -07005238 "%s: Hazard %s for image barrier %" PRIu32 " %s. Access info %s.", CmdName(),
John Zulaufd5115702021-01-18 12:34:33 -07005239 string_SyncHazard(hazard.hazard), image_memory_barrier.index,
5240 sync_state.report_data->FormatHandle(image_state->image).c_str(),
5241 cb_context.FormatUsage(hazard).c_str());
5242 break;
5243 }
5244 }
5245 }
5246 }
5247
5248 // Note that we can't check for HOST in pEvents as we don't track that set event type
Jeremy Gebben40a22942020-12-22 14:22:06 -07005249 const auto extra_stage_bits = (src_exec_scope_.mask_param & ~VK_PIPELINE_STAGE_2_HOST_BIT_KHR) & ~event_stage_masks;
John Zulaufd5115702021-01-18 12:34:33 -07005250 if (extra_stage_bits) {
5251 // Issue error message that event waited for is not in wait events scope
John Zulaufd5115702021-01-18 12:34:33 -07005252 const char *const vuid = "VUID-vkCmdWaitEvents-srcStageMask-01158";
5253 const char *const message =
Jeremy Gebben40a22942020-12-22 14:22:06 -07005254 "%s: srcStageMask 0x%" PRIx64 " contains stages not present in pEvents stageMask. Extra stages are %s.%s";
John Zulaufd5115702021-01-18 12:34:33 -07005255 if (events_not_found) {
John Zulauf36ef9282021-02-02 11:47:24 -07005256 skip |= sync_state.LogInfo(command_buffer_handle, vuid, message, CmdName(), src_exec_scope_.mask_param,
Jeremy Gebben40a22942020-12-22 14:22:06 -07005257 sync_utils::StringPipelineStageFlags(extra_stage_bits).c_str(),
John Zulaufd5115702021-01-18 12:34:33 -07005258 " vkCmdSetEvent may be in previously submitted command buffer.");
5259 } else {
John Zulauf36ef9282021-02-02 11:47:24 -07005260 skip |= sync_state.LogError(command_buffer_handle, vuid, message, CmdName(), src_exec_scope_.mask_param,
Jeremy Gebben40a22942020-12-22 14:22:06 -07005261 sync_utils::StringPipelineStageFlags(extra_stage_bits).c_str(), "");
John Zulaufd5115702021-01-18 12:34:33 -07005262 }
5263 }
5264 return skip;
5265}
5266
5267struct SyncOpWaitEventsFunctorFactory {
5268 using BarrierOpFunctor = WaitEventBarrierOp;
5269 using ApplyFunctor = ApplyBarrierFunctor<BarrierOpFunctor>;
5270 using GlobalBarrierOpFunctor = WaitEventBarrierOp;
5271 using GlobalApplyFunctor = ApplyBarrierOpsFunctor<GlobalBarrierOpFunctor>;
5272 using BufferRange = EventSimpleRangeGenerator;
5273 using ImageRange = EventImageRangeGenerator;
5274 using GlobalRange = EventSimpleRangeGenerator;
5275
5276 // Need to restrict to only valid exec and access scope for this event
5277 // Pass by value is intentional to get a copy we can change without modifying the passed barrier
5278 SyncBarrier RestrictToEvent(SyncBarrier barrier) const {
John Zulaufc523bf62021-02-16 08:20:34 -07005279 barrier.src_exec_scope.exec_scope = sync_event->scope.exec_scope & barrier.src_exec_scope.exec_scope;
John Zulaufd5115702021-01-18 12:34:33 -07005280 barrier.src_access_scope = sync_event->scope.valid_accesses & barrier.src_access_scope;
5281 return barrier;
5282 }
5283 ApplyFunctor MakeApplyFunctor(const SyncBarrier &barrier_arg, bool layout_transition) const {
5284 auto barrier = RestrictToEvent(barrier_arg);
5285 return ApplyFunctor(BarrierOpFunctor(sync_event->first_scope_tag, barrier, layout_transition));
5286 }
5287 GlobalApplyFunctor MakeGlobalApplyFunctor(size_t size_hint, const ResourceUsageTag &tag) const {
5288 return GlobalApplyFunctor(false /* don't resolve */, size_hint, tag);
5289 }
5290 GlobalBarrierOpFunctor MakeGlobalBarrierOpFunctor(const SyncBarrier &barrier_arg) const {
5291 auto barrier = RestrictToEvent(barrier_arg);
5292 return GlobalBarrierOpFunctor(sync_event->first_scope_tag, barrier, false);
5293 }
5294
5295 BufferRange MakeRangeGen(const BUFFER_STATE &buffer, const ResourceAccessRange &range_arg) const {
5296 const AccessAddressType address_type = GetAccessAddressType(buffer);
5297 const auto base_address = ResourceBaseAddress(buffer);
5298 ResourceAccessRange range = SimpleBinding(buffer) ? (range_arg + base_address) : ResourceAccessRange();
5299 EventSimpleRangeGenerator filtered_range_gen(sync_event->FirstScope(address_type), range);
5300 return filtered_range_gen;
5301 }
5302 ImageRange MakeRangeGen(const IMAGE_STATE &image, const SyncImageMemoryBarrier::SubImageRange &range) const {
5303 if (!SimpleBinding(image)) return ImageRange();
5304 const auto address_type = GetAccessAddressType(image);
5305 const auto base_address = ResourceBaseAddress(image);
5306 subresource_adapter::ImageRangeGenerator image_range_gen(*image.fragment_encoder.get(), range.subresource_range,
5307 range.offset, range.extent, base_address);
5308 EventImageRangeGenerator filtered_range_gen(sync_event->FirstScope(address_type), image_range_gen);
5309
5310 return filtered_range_gen;
5311 }
5312 GlobalRange MakeGlobalRangeGen(AccessAddressType address_type) const {
5313 return EventSimpleRangeGenerator(sync_event->FirstScope(address_type), kFullRange);
5314 }
5315 SyncOpWaitEventsFunctorFactory(SyncEventState *sync_event_) : sync_event(sync_event_) { assert(sync_event); }
5316 SyncEventState *sync_event;
5317};
5318
John Zulauf36ef9282021-02-02 11:47:24 -07005319void SyncOpWaitEvents::Record(CommandBufferAccessContext *cb_context) const {
5320 const auto tag = cb_context->NextCommandTag(cmd_);
John Zulaufd5115702021-01-18 12:34:33 -07005321 auto *access_context = cb_context->GetCurrentAccessContext();
5322 assert(access_context);
5323 if (!access_context) return;
John Zulauf669dfd52021-01-27 17:15:28 -07005324 auto *events_context = cb_context->GetCurrentEventsContext();
5325 assert(events_context);
5326 if (!events_context) return;
John Zulaufd5115702021-01-18 12:34:33 -07005327
5328 // Unlike PipelineBarrier, WaitEvent is *not* limited to accesses within the current subpass (if any) and thus needs to import
5329 // all accesses. Can instead import for all first_scopes, or a union of them, if this becomes a performance/memory issue,
5330 // but with no idea of the performance of the union, nor of whether it even matters... take the simplest approach here,
5331 access_context->ResolvePreviousAccesses();
5332
5333 const auto &dst = dst_exec_scope_;
5334 // TODO... this needs change the SyncEventContext it's using depending on whether this is replay... the recorded
5335 // sync_event will be in the recorded context, but we need to update the sync_events in the current context....
John Zulauf669dfd52021-01-27 17:15:28 -07005336 for (auto &event_shared : events_) {
5337 if (!event_shared.get()) continue;
5338 auto *sync_event = events_context->GetFromShared(event_shared);
John Zulaufd5115702021-01-18 12:34:33 -07005339
5340 sync_event->last_command = CMD_WAITEVENTS;
5341
5342 if (!sync_event->IsIgnoredByWait(src_exec_scope_.mask_param)) {
5343 // These apply barriers one at a time as the are restricted to the resource ranges specified per each barrier,
5344 // but do not update the dependency chain information (but set the "pending" state) // s.t. the order independence
5345 // of the barriers is maintained.
5346 SyncOpWaitEventsFunctorFactory factory(sync_event);
5347 ApplyBarriers(buffer_memory_barriers_, factory, tag, access_context);
5348 ApplyBarriers(image_memory_barriers_, factory, tag, access_context);
5349 ApplyGlobalBarriers(memory_barriers_, factory, tag, access_context);
5350
5351 // Apply the global barrier to the event itself (for race condition tracking)
5352 // Events don't happen at a stage, so we need to store the unexpanded ALL_COMMANDS if set for inter-event-calls
5353 sync_event->barriers = dst.mask_param & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
5354 sync_event->barriers |= dst.exec_scope;
5355 } else {
5356 // We ignored this wait, so we don't have any effective synchronization barriers for it.
5357 sync_event->barriers = 0U;
5358 }
5359 }
5360
5361 // Apply the pending barriers
5362 ResolvePendingBarrierFunctor apply_pending_action(tag);
5363 access_context->ApplyToContext(apply_pending_action);
5364}
5365
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005366bool SyncValidator::PreCallValidateCmdWriteBufferMarker2AMD(VkCommandBuffer commandBuffer, VkPipelineStageFlags2KHR pipelineStage,
5367 VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) const {
5368 bool skip = false;
5369 const auto *cb_access_context = GetAccessContext(commandBuffer);
5370 assert(cb_access_context);
5371 if (!cb_access_context) return skip;
5372
5373 const auto *context = cb_access_context->GetCurrentAccessContext();
5374 assert(context);
5375 if (!context) return skip;
5376
5377 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
5378
5379 if (dst_buffer) {
5380 const ResourceAccessRange range = MakeRange(dstOffset, 4);
5381 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
5382 if (hazard.hazard) {
5383 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
5384 "vkCmdWriteBufferMarkerAMD2: Hazard %s for dstBuffer %s. Access info %s.",
5385 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstBuffer).c_str(),
5386 string_UsageTag(hazard.tag).c_str());
5387 }
5388 }
5389 return skip;
5390}
5391
John Zulauf669dfd52021-01-27 17:15:28 -07005392void SyncOpWaitEvents::MakeEventsList(const SyncValidator &sync_state, uint32_t event_count, const VkEvent *events) {
John Zulaufd5115702021-01-18 12:34:33 -07005393 events_.reserve(event_count);
5394 for (uint32_t event_index = 0; event_index < event_count; event_index++) {
John Zulauf669dfd52021-01-27 17:15:28 -07005395 events_.emplace_back(sync_state.GetShared<EVENT_STATE>(events[event_index]));
John Zulaufd5115702021-01-18 12:34:33 -07005396 }
5397}
John Zulauf6ce24372021-01-30 05:56:25 -07005398
John Zulauf36ef9282021-02-02 11:47:24 -07005399SyncOpResetEvent::SyncOpResetEvent(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags, VkEvent event,
John Zulauf6ce24372021-01-30 05:56:25 -07005400 VkPipelineStageFlags stageMask)
John Zulauf36ef9282021-02-02 11:47:24 -07005401 : SyncOpBase(cmd),
5402 event_(sync_state.GetShared<EVENT_STATE>(event)),
5403 exec_scope_(SyncExecScope::MakeSrc(queue_flags, stageMask)) {}
John Zulauf6ce24372021-01-30 05:56:25 -07005404
5405bool SyncOpResetEvent::Validate(const CommandBufferAccessContext &cb_context) const {
John Zulauf6ce24372021-01-30 05:56:25 -07005406 auto *events_context = cb_context.GetCurrentEventsContext();
5407 assert(events_context);
5408 bool skip = false;
5409 if (!events_context) return skip;
5410
5411 const auto &sync_state = cb_context.GetSyncState();
5412 const auto *sync_event = events_context->Get(event_);
5413 if (!sync_event) return skip; // Core, Lifetimes, or Param check needs to catch invalid events.
5414
5415 const char *const set_wait =
5416 "%s: %s %s operation following %s without intervening execution barrier, is a race condition and may result in data "
5417 "hazards.";
5418 const char *message = set_wait; // Only one message this call.
5419 if (!sync_event->HasBarrier(exec_scope_.mask_param, exec_scope_.exec_scope)) {
5420 const char *vuid = nullptr;
5421 switch (sync_event->last_command) {
5422 case CMD_SETEVENT:
5423 // Needs a barrier between set and reset
5424 vuid = "SYNC-vkCmdResetEvent-missingbarrier-set";
5425 break;
5426 case CMD_WAITEVENTS: {
5427 // Needs to be in the barriers chain (either because of a barrier, or because of dstStageMask
5428 vuid = "SYNC-vkCmdResetEvent-missingbarrier-wait";
5429 break;
5430 }
5431 default:
5432 // The only other valid last command that wasn't one.
5433 assert((sync_event->last_command == CMD_NONE) || (sync_event->last_command == CMD_RESETEVENT));
5434 break;
5435 }
5436 if (vuid) {
John Zulauf36ef9282021-02-02 11:47:24 -07005437 skip |= sync_state.LogError(event_->event, vuid, message, CmdName(),
5438 sync_state.report_data->FormatHandle(event_->event).c_str(), CmdName(),
John Zulauf6ce24372021-01-30 05:56:25 -07005439 CommandTypeString(sync_event->last_command));
5440 }
5441 }
5442 return skip;
5443}
5444
John Zulauf36ef9282021-02-02 11:47:24 -07005445void SyncOpResetEvent::Record(CommandBufferAccessContext *cb_context) const {
John Zulauf6ce24372021-01-30 05:56:25 -07005446 auto *events_context = cb_context->GetCurrentEventsContext();
5447 assert(events_context);
5448 if (!events_context) return;
5449
5450 auto *sync_event = events_context->GetFromShared(event_);
5451 if (!sync_event) return; // Core, Lifetimes, or Param check needs to catch invalid events.
5452
5453 // Update the event state
John Zulauf36ef9282021-02-02 11:47:24 -07005454 sync_event->last_command = cmd_;
John Zulauf6ce24372021-01-30 05:56:25 -07005455 sync_event->unsynchronized_set = CMD_NONE;
5456 sync_event->ResetFirstScope();
5457 sync_event->barriers = 0U;
5458}
5459
John Zulauf36ef9282021-02-02 11:47:24 -07005460SyncOpSetEvent::SyncOpSetEvent(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags, VkEvent event,
John Zulauf6ce24372021-01-30 05:56:25 -07005461 VkPipelineStageFlags stageMask)
John Zulauf36ef9282021-02-02 11:47:24 -07005462 : SyncOpBase(cmd),
5463 event_(sync_state.GetShared<EVENT_STATE>(event)),
5464 src_exec_scope_(SyncExecScope::MakeSrc(queue_flags, stageMask)) {}
John Zulauf6ce24372021-01-30 05:56:25 -07005465
5466bool SyncOpSetEvent::Validate(const CommandBufferAccessContext &cb_context) const {
5467 // I'll put this here just in case we need to pass this in for future extension support
John Zulauf6ce24372021-01-30 05:56:25 -07005468 bool skip = false;
5469
5470 const auto &sync_state = cb_context.GetSyncState();
5471 auto *events_context = cb_context.GetCurrentEventsContext();
5472 assert(events_context);
5473 if (!events_context) return skip;
5474
5475 const auto *sync_event = events_context->Get(event_);
5476 if (!sync_event) return skip; // Core, Lifetimes, or Param check needs to catch invalid events.
5477
5478 const char *const reset_set =
5479 "%s: %s %s operation following %s without intervening execution barrier, is a race condition and may result in data "
5480 "hazards.";
5481 const char *const wait =
5482 "%s: %s %s operation following %s without intervening vkCmdResetEvent, may result in data hazard and is ignored.";
5483
5484 if (!sync_event->HasBarrier(src_exec_scope_.mask_param, src_exec_scope_.exec_scope)) {
5485 const char *vuid = nullptr;
5486 const char *message = nullptr;
5487 switch (sync_event->last_command) {
5488 case CMD_RESETEVENT:
5489 // Needs a barrier between reset and set
5490 vuid = "SYNC-vkCmdSetEvent-missingbarrier-reset";
5491 message = reset_set;
5492 break;
5493 case CMD_SETEVENT:
5494 // Needs a barrier between set and set
5495 vuid = "SYNC-vkCmdSetEvent-missingbarrier-set";
5496 message = reset_set;
5497 break;
5498 case CMD_WAITEVENTS:
5499 // Needs a barrier or is in second execution scope
5500 vuid = "SYNC-vkCmdSetEvent-missingbarrier-wait";
5501 message = wait;
5502 break;
5503 default:
5504 // The only other valid last command that wasn't one.
5505 assert(sync_event->last_command == CMD_NONE);
5506 break;
5507 }
5508 if (vuid) {
5509 assert(nullptr != message);
John Zulauf36ef9282021-02-02 11:47:24 -07005510 skip |= sync_state.LogError(event_->event, vuid, message, CmdName(),
5511 sync_state.report_data->FormatHandle(event_->event).c_str(), CmdName(),
John Zulauf6ce24372021-01-30 05:56:25 -07005512 CommandTypeString(sync_event->last_command));
5513 }
5514 }
5515
5516 return skip;
5517}
5518
John Zulauf36ef9282021-02-02 11:47:24 -07005519void SyncOpSetEvent::Record(CommandBufferAccessContext *cb_context) const {
5520 const auto tag = cb_context->NextCommandTag(cmd_);
John Zulauf6ce24372021-01-30 05:56:25 -07005521 auto *events_context = cb_context->GetCurrentEventsContext();
5522 auto *access_context = cb_context->GetCurrentAccessContext();
5523 assert(events_context);
5524 if (!events_context) return;
5525
5526 auto *sync_event = events_context->GetFromShared(event_);
5527 if (!sync_event) return; // Core, Lifetimes, or Param check needs to catch invalid events.
5528
5529 // NOTE: We're going to simply record the sync scope here, as anything else would be implementation defined/undefined
5530 // and we're issuing errors re: missing barriers between event commands, which if the user fixes would fix
5531 // any issues caused by naive scope setting here.
5532
5533 // What happens with two SetEvent is that one cannot know what group of operations will be waited for.
5534 // Given:
5535 // Stuff1; SetEvent; Stuff2; SetEvent; WaitEvents;
5536 // WaitEvents cannot know which of Stuff1, Stuff2, or both has completed execution.
5537
5538 if (!sync_event->HasBarrier(src_exec_scope_.mask_param, src_exec_scope_.exec_scope)) {
5539 sync_event->unsynchronized_set = sync_event->last_command;
5540 sync_event->ResetFirstScope();
5541 } else if (sync_event->scope.exec_scope == 0) {
5542 // We only set the scope if there isn't one
5543 sync_event->scope = src_exec_scope_;
5544
5545 auto set_scope = [&sync_event](AccessAddressType address_type, const ResourceAccessRangeMap::value_type &access) {
5546 auto &scope_map = sync_event->first_scope[static_cast<size_t>(address_type)];
5547 if (access.second.InSourceScopeOrChain(sync_event->scope.exec_scope, sync_event->scope.valid_accesses)) {
5548 scope_map.insert(scope_map.end(), std::make_pair(access.first, true));
5549 }
5550 };
5551 access_context->ForAll(set_scope);
5552 sync_event->unsynchronized_set = CMD_NONE;
5553 sync_event->first_scope_tag = tag;
5554 }
5555 sync_event->last_command = CMD_SETEVENT;
5556 sync_event->barriers = 0U;
5557}
John Zulauf64ffe552021-02-06 10:25:07 -07005558
5559SyncOpBeginRenderPass::SyncOpBeginRenderPass(CMD_TYPE cmd, const SyncValidator &sync_state,
5560 const VkRenderPassBeginInfo *pRenderPassBegin,
5561 const VkSubpassBeginInfo *pSubpassBeginInfo, const char *cmd_name)
5562 : SyncOpBase(cmd, cmd_name) {
5563 if (pRenderPassBegin) {
5564 rp_state_ = sync_state.GetShared<RENDER_PASS_STATE>(pRenderPassBegin->renderPass);
5565 renderpass_begin_info_ = safe_VkRenderPassBeginInfo(pRenderPassBegin);
5566 const auto *fb_state = sync_state.Get<FRAMEBUFFER_STATE>(pRenderPassBegin->framebuffer);
5567 if (fb_state) {
5568 shared_attachments_ = sync_state.GetSharedAttachmentViews(*renderpass_begin_info_.ptr(), *fb_state);
5569 // TODO: Revisit this when all attachment validation is through SyncOps to see if we can discard the plain pointer copy
5570 // Note that this a safe to presist as long as shared_attachments is not cleared
5571 attachments_.reserve(shared_attachments_.size());
sfricke-samsung01c9ae92021-02-09 22:30:52 -08005572 for (const auto &attachment : shared_attachments_) {
John Zulauf64ffe552021-02-06 10:25:07 -07005573 attachments_.emplace_back(attachment.get());
5574 }
5575 }
5576 if (pSubpassBeginInfo) {
5577 subpass_begin_info_ = safe_VkSubpassBeginInfo(pSubpassBeginInfo);
5578 }
5579 }
5580}
5581
5582bool SyncOpBeginRenderPass::Validate(const CommandBufferAccessContext &cb_context) const {
5583 // Check if any of the layout transitions are hazardous.... but we don't have the renderpass context to work with, so we
5584 bool skip = false;
5585
5586 assert(rp_state_.get());
5587 if (nullptr == rp_state_.get()) return skip;
5588 auto &rp_state = *rp_state_.get();
5589
5590 const uint32_t subpass = 0;
5591
5592 // Construct the state we can use to validate against... (since validation is const and RecordCmdBeginRenderPass
5593 // hasn't happened yet)
5594 const std::vector<AccessContext> empty_context_vector;
5595 AccessContext temp_context(subpass, cb_context.GetQueueFlags(), rp_state.subpass_dependencies, empty_context_vector,
5596 cb_context.GetCurrentAccessContext());
5597
5598 // Validate attachment operations
5599 if (attachments_.size() == 0) return skip;
5600 const auto &render_area = renderpass_begin_info_.renderArea;
5601 skip |= temp_context.ValidateLayoutTransitions(cb_context, rp_state, render_area, subpass, attachments_, CmdName());
5602
5603 // Validate load operations if there were no layout transition hazards
5604 if (!skip) {
5605 temp_context.RecordLayoutTransitions(rp_state, subpass, attachments_, kCurrentCommandTag);
5606 skip |= temp_context.ValidateLoadOperation(cb_context, rp_state, render_area, subpass, attachments_, CmdName());
5607 }
5608
5609 return skip;
5610}
5611
5612void SyncOpBeginRenderPass::Record(CommandBufferAccessContext *cb_context) const {
5613 // TODO PHASE2 need to have a consistent way to record to either command buffer or queue contexts
5614 assert(rp_state_.get());
5615 if (nullptr == rp_state_.get()) return;
5616 const auto tag = cb_context->NextCommandTag(cmd_);
5617 cb_context->RecordBeginRenderPass(*rp_state_.get(), renderpass_begin_info_.renderArea, attachments_, tag);
5618}
5619
5620SyncOpNextSubpass::SyncOpNextSubpass(CMD_TYPE cmd, const SyncValidator &sync_state, const VkSubpassBeginInfo *pSubpassBeginInfo,
5621 const VkSubpassEndInfo *pSubpassEndInfo, const char *name_override)
5622 : SyncOpBase(cmd, name_override) {
5623 if (pSubpassBeginInfo) {
5624 subpass_begin_info_.initialize(pSubpassBeginInfo);
5625 }
5626 if (pSubpassEndInfo) {
5627 subpass_end_info_.initialize(pSubpassEndInfo);
5628 }
5629}
5630
5631bool SyncOpNextSubpass::Validate(const CommandBufferAccessContext &cb_context) const {
5632 bool skip = false;
5633 const auto *renderpass_context = cb_context.GetCurrentRenderPassContext();
5634 if (!renderpass_context) return skip;
5635
5636 skip |= renderpass_context->ValidateNextSubpass(cb_context.GetExecutionContext(), CmdName());
5637 return skip;
5638}
5639
5640void SyncOpNextSubpass::Record(CommandBufferAccessContext *cb_context) const {
5641 // TODO PHASE2 need to have a consistent way to record to either command buffer or queue contexts
5642 cb_context->RecordNextSubpass(cmd_);
5643}
5644
5645SyncOpEndRenderPass::SyncOpEndRenderPass(CMD_TYPE cmd, const SyncValidator &sync_state, const VkSubpassEndInfo *pSubpassEndInfo,
5646 const char *name_override)
5647 : SyncOpBase(cmd, name_override) {
5648 if (pSubpassEndInfo) {
5649 subpass_end_info_.initialize(pSubpassEndInfo);
5650 }
5651}
5652
5653bool SyncOpEndRenderPass::Validate(const CommandBufferAccessContext &cb_context) const {
5654 bool skip = false;
5655 const auto *renderpass_context = cb_context.GetCurrentRenderPassContext();
5656
5657 if (!renderpass_context) return skip;
5658 skip |= renderpass_context->ValidateEndRenderPass(cb_context.GetExecutionContext(), CmdName());
5659 return skip;
5660}
5661
5662void SyncOpEndRenderPass::Record(CommandBufferAccessContext *cb_context) const {
5663 // TODO PHASE2 need to have a consistent way to record to either command buffer or queue contexts
5664 cb_context->RecordEndRenderPass(cmd_);
5665}
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005666
5667void SyncValidator::PreCallRecordCmdWriteBufferMarker2AMD(VkCommandBuffer commandBuffer, VkPipelineStageFlags2KHR pipelineStage,
5668 VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) {
5669 StateTracker::PreCallRecordCmdWriteBufferMarker2AMD(commandBuffer, pipelineStage, dstBuffer, dstOffset, marker);
5670 auto *cb_access_context = GetAccessContext(commandBuffer);
5671 assert(cb_access_context);
5672 const auto tag = cb_access_context->NextCommandTag(CMD_WRITEBUFFERMARKERAMD);
5673 auto *context = cb_access_context->GetCurrentAccessContext();
5674 assert(context);
5675
5676 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
5677
5678 if (dst_buffer) {
5679 const ResourceAccessRange range = MakeRange(dstOffset, 4);
5680 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
5681 }
5682}