blob: 4c39918a2553d9e57ad31e47bc328e18e9373095 [file] [log] [blame]
John Zulaufab7756b2020-12-29 16:10:16 -07001/* Copyright (c) 2019-2021 The Khronos Group Inc.
2 * Copyright (c) 2019-2021 Valve Corporation
3 * Copyright (c) 2019-2021 LunarG, Inc.
John Zulauf9cb530d2019-09-30 14:14:10 -06004 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 * Author: John Zulauf <jzulauf@lunarg.com>
John Zulaufab7756b2020-12-29 16:10:16 -070018 * Author: Locke Lin <locke@lunarg.com>
19 * Author: Jeremy Gebben <jeremyg@lunarg.com>
John Zulauf9cb530d2019-09-30 14:14:10 -060020 */
21
22#include <limits>
23#include <vector>
locke-lunarg296a3c92020-03-25 01:04:29 -060024#include <memory>
25#include <bitset>
John Zulauf9cb530d2019-09-30 14:14:10 -060026#include "synchronization_validation.h"
Jeremy Gebben5f585ae2021-02-02 09:03:06 -070027#include "sync_utils.h"
John Zulauf9cb530d2019-09-30 14:14:10 -060028
Jeremy Gebben6fbf8242021-06-21 09:14:46 -060029static bool SimpleBinding(const BINDABLE &bindable) { return !bindable.sparse && bindable.Binding(); }
John Zulauf264cce02021-02-05 14:40:47 -070030
John Zulauf29d00532021-03-04 13:28:54 -070031static bool SimpleBinding(const IMAGE_STATE &image_state) {
Jeremy Gebben62c3bf42021-07-21 15:38:24 -060032 bool simple =
Jeremy Gebben82e11d52021-07-26 09:19:37 -060033 SimpleBinding(static_cast<const BINDABLE &>(image_state)) || image_state.IsSwapchainImage() || image_state.bind_swapchain;
John Zulauf29d00532021-03-04 13:28:54 -070034
35 // If it's not simple we must have an encoder.
36 assert(!simple || image_state.fragment_encoder.get());
37 return simple;
38}
39
John Zulauf43cc7462020-12-03 12:33:12 -070040const static std::array<AccessAddressType, static_cast<size_t>(AccessAddressType::kTypeCount)> kAddressTypes = {
41 AccessAddressType::kLinear, AccessAddressType::kIdealized};
42
John Zulaufd5115702021-01-18 12:34:33 -070043static constexpr AccessAddressType GetAccessAddressType(const BUFFER_STATE &) { return AccessAddressType::kLinear; };
John Zulauf264cce02021-02-05 14:40:47 -070044static AccessAddressType GetAccessAddressType(const IMAGE_STATE &image) {
45 return SimpleBinding(image) ? AccessContext::ImageAddressType(image) : AccessAddressType::kIdealized;
46}
John Zulaufd5115702021-01-18 12:34:33 -070047
John Zulauf9cb530d2019-09-30 14:14:10 -060048static const char *string_SyncHazardVUID(SyncHazard hazard) {
49 switch (hazard) {
50 case SyncHazard::NONE:
John Zulauf2f952d22020-02-10 11:34:51 -070051 return "SYNC-HAZARD-NONE";
John Zulauf9cb530d2019-09-30 14:14:10 -060052 break;
53 case SyncHazard::READ_AFTER_WRITE:
54 return "SYNC-HAZARD-READ_AFTER_WRITE";
55 break;
56 case SyncHazard::WRITE_AFTER_READ:
57 return "SYNC-HAZARD-WRITE_AFTER_READ";
58 break;
59 case SyncHazard::WRITE_AFTER_WRITE:
60 return "SYNC-HAZARD-WRITE_AFTER_WRITE";
61 break;
John Zulauf2f952d22020-02-10 11:34:51 -070062 case SyncHazard::READ_RACING_WRITE:
63 return "SYNC-HAZARD-READ-RACING-WRITE";
64 break;
65 case SyncHazard::WRITE_RACING_WRITE:
66 return "SYNC-HAZARD-WRITE-RACING-WRITE";
67 break;
68 case SyncHazard::WRITE_RACING_READ:
69 return "SYNC-HAZARD-WRITE-RACING-READ";
70 break;
John Zulauf9cb530d2019-09-30 14:14:10 -060071 default:
72 assert(0);
73 }
74 return "SYNC-HAZARD-INVALID";
75}
76
John Zulauf59e25072020-07-17 10:55:21 -060077static bool IsHazardVsRead(SyncHazard hazard) {
78 switch (hazard) {
79 case SyncHazard::NONE:
80 return false;
81 break;
82 case SyncHazard::READ_AFTER_WRITE:
83 return false;
84 break;
85 case SyncHazard::WRITE_AFTER_READ:
86 return true;
87 break;
88 case SyncHazard::WRITE_AFTER_WRITE:
89 return false;
90 break;
91 case SyncHazard::READ_RACING_WRITE:
92 return false;
93 break;
94 case SyncHazard::WRITE_RACING_WRITE:
95 return false;
96 break;
97 case SyncHazard::WRITE_RACING_READ:
98 return true;
99 break;
100 default:
101 assert(0);
102 }
103 return false;
104}
105
John Zulauf9cb530d2019-09-30 14:14:10 -0600106static const char *string_SyncHazard(SyncHazard hazard) {
107 switch (hazard) {
108 case SyncHazard::NONE:
109 return "NONR";
110 break;
111 case SyncHazard::READ_AFTER_WRITE:
112 return "READ_AFTER_WRITE";
113 break;
114 case SyncHazard::WRITE_AFTER_READ:
115 return "WRITE_AFTER_READ";
116 break;
117 case SyncHazard::WRITE_AFTER_WRITE:
118 return "WRITE_AFTER_WRITE";
119 break;
John Zulauf2f952d22020-02-10 11:34:51 -0700120 case SyncHazard::READ_RACING_WRITE:
121 return "READ_RACING_WRITE";
122 break;
123 case SyncHazard::WRITE_RACING_WRITE:
124 return "WRITE_RACING_WRITE";
125 break;
126 case SyncHazard::WRITE_RACING_READ:
127 return "WRITE_RACING_READ";
128 break;
John Zulauf9cb530d2019-09-30 14:14:10 -0600129 default:
130 assert(0);
131 }
132 return "INVALID HAZARD";
133}
134
John Zulauf37ceaed2020-07-03 16:18:15 -0600135static const SyncStageAccessInfoType *SyncStageAccessInfoFromMask(SyncStageAccessFlags flags) {
136 // Return the info for the first bit found
137 const SyncStageAccessInfoType *info = nullptr;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700138 for (size_t i = 0; i < flags.size(); i++) {
139 if (flags.test(i)) {
140 info = &syncStageAccessInfoByStageAccessIndex[i];
141 break;
John Zulauf37ceaed2020-07-03 16:18:15 -0600142 }
143 }
144 return info;
145}
146
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700147static std::string string_SyncStageAccessFlags(const SyncStageAccessFlags &flags, const char *sep = "|") {
John Zulauf59e25072020-07-17 10:55:21 -0600148 std::string out_str;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700149 if (flags.none()) {
John Zulauf389c34b2020-07-28 11:19:35 -0600150 out_str = "0";
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700151 } else {
152 for (size_t i = 0; i < syncStageAccessInfoByStageAccessIndex.size(); i++) {
153 const auto &info = syncStageAccessInfoByStageAccessIndex[i];
154 if ((flags & info.stage_access_bit).any()) {
155 if (!out_str.empty()) {
156 out_str.append(sep);
157 }
158 out_str.append(info.name);
John Zulauf59e25072020-07-17 10:55:21 -0600159 }
John Zulauf59e25072020-07-17 10:55:21 -0600160 }
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700161 if (out_str.length() == 0) {
162 out_str.append("Unhandled SyncStageAccess");
163 }
John Zulauf59e25072020-07-17 10:55:21 -0600164 }
165 return out_str;
166}
167
Jeremy Gebben6ea9d9e2020-12-11 09:41:01 -0700168static std::string string_UsageTag(const ResourceUsageTag &tag) {
169 std::stringstream out;
170
John Zulauffaea0ee2021-01-14 14:01:32 -0700171 out << "command: " << CommandTypeString(tag.command);
172 out << ", seq_no: " << tag.seq_num;
173 if (tag.sub_command != 0) {
174 out << ", subcmd: " << tag.sub_command;
Jeremy Gebben6ea9d9e2020-12-11 09:41:01 -0700175 }
176 return out.str();
177}
178
John Zulauffaea0ee2021-01-14 14:01:32 -0700179std::string CommandBufferAccessContext::FormatUsage(const HazardResult &hazard) const {
John Zulauf37ceaed2020-07-03 16:18:15 -0600180 const auto &tag = hazard.tag;
John Zulauf59e25072020-07-17 10:55:21 -0600181 assert(hazard.usage_index < static_cast<SyncStageAccessIndex>(syncStageAccessInfoByStageAccessIndex.size()));
182 const auto &usage_info = syncStageAccessInfoByStageAccessIndex[hazard.usage_index];
John Zulauf1dae9192020-06-16 15:46:44 -0600183 std::stringstream out;
John Zulauf37ceaed2020-07-03 16:18:15 -0600184 const auto *info = SyncStageAccessInfoFromMask(hazard.prior_access);
185 const char *stage_access_name = info ? info->name : "INVALID_STAGE_ACCESS";
John Zulauf59e25072020-07-17 10:55:21 -0600186 out << "(usage: " << usage_info.name << ", prior_usage: " << stage_access_name;
187 if (IsHazardVsRead(hazard.hazard)) {
188 const auto barriers = hazard.access_state->GetReadBarriers(hazard.prior_access);
Jeremy Gebben40a22942020-12-22 14:22:06 -0700189 out << ", read_barriers: " << string_VkPipelineStageFlags2KHR(barriers);
John Zulauf59e25072020-07-17 10:55:21 -0600190 } else {
191 SyncStageAccessFlags write_barrier = hazard.access_state->GetWriteBarriers();
192 out << ", write_barriers: " << string_SyncStageAccessFlags(write_barrier);
193 }
194
John Zulauffaea0ee2021-01-14 14:01:32 -0700195 // PHASE2 TODO -- add comand buffer and reset from secondary if applicable
ZaOniRinku56b86472021-03-23 20:25:05 +0100196 out << ", " << string_UsageTag(tag) << ", reset_no: " << reset_count_ << ")";
John Zulauf1dae9192020-06-16 15:46:44 -0600197 return out.str();
198}
199
John Zulaufd14743a2020-07-03 09:42:39 -0600200// NOTE: the attachement read flag is put *only* in the access scope and not in the exect scope, since the ordering
201// rules apply only to this specific access for this stage, and not the stage as a whole. The ordering detection
202// also reflects this special case for read hazard detection (using access instead of exec scope)
Jeremy Gebben40a22942020-12-22 14:22:06 -0700203static constexpr VkPipelineStageFlags2KHR kColorAttachmentExecScope = VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT_KHR;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700204static const SyncStageAccessFlags kColorAttachmentAccessScope =
205 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ_BIT |
206 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT |
207 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE_BIT |
208 SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT; // Note: this is intentionally not in the exec scope
Jeremy Gebben40a22942020-12-22 14:22:06 -0700209static constexpr VkPipelineStageFlags2KHR kDepthStencilAttachmentExecScope =
210 VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT_KHR | VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT_KHR;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700211static const SyncStageAccessFlags kDepthStencilAttachmentAccessScope =
212 SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
213 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
214 SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT; // Note: this is intentionally not in the exec scope
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -0700215static constexpr VkPipelineStageFlags2KHR kRasterAttachmentExecScope = kDepthStencilAttachmentExecScope | kColorAttachmentExecScope;
John Zulauf8e3c3e92021-01-06 11:19:36 -0700216static const SyncStageAccessFlags kRasterAttachmentAccessScope = kDepthStencilAttachmentAccessScope | kColorAttachmentAccessScope;
John Zulaufb027cdb2020-05-21 14:25:22 -0600217
John Zulauf8e3c3e92021-01-06 11:19:36 -0700218ResourceAccessState::OrderingBarriers ResourceAccessState::kOrderingRules = {
Jeremy Gebben40a22942020-12-22 14:22:06 -0700219 {{VK_PIPELINE_STAGE_2_NONE_KHR, SyncStageAccessFlags()},
John Zulauf8e3c3e92021-01-06 11:19:36 -0700220 {kColorAttachmentExecScope, kColorAttachmentAccessScope},
221 {kDepthStencilAttachmentExecScope, kDepthStencilAttachmentAccessScope},
222 {kRasterAttachmentExecScope, kRasterAttachmentAccessScope}}};
223
John Zulauf7635de32020-05-29 17:14:15 -0600224// Sometimes we have an internal access conflict, and we using the kCurrentCommandTag to set and detect in temporary/proxy contexts
John Zulauffaea0ee2021-01-14 14:01:32 -0700225static const ResourceUsageTag kCurrentCommandTag(ResourceUsageTag::kMaxIndex, ResourceUsageTag::kMaxCount,
226 ResourceUsageTag::kMaxCount, CMD_NONE);
John Zulaufb027cdb2020-05-21 14:25:22 -0600227
Jeremy Gebben62c3bf42021-07-21 15:38:24 -0600228static VkDeviceSize ResourceBaseAddress(const BINDABLE &bindable) { return bindable.GetFakeBaseAddress(); }
John Zulaufb02c1eb2020-10-06 16:33:36 -0600229
locke-lunarg3c038002020-04-30 23:08:08 -0600230inline VkDeviceSize GetRealWholeSize(VkDeviceSize offset, VkDeviceSize size, VkDeviceSize whole_size) {
231 if (size == VK_WHOLE_SIZE) {
232 return (whole_size - offset);
233 }
234 return size;
235}
236
John Zulauf3e86bf02020-09-12 10:47:57 -0600237static inline VkDeviceSize GetBufferWholeSize(const BUFFER_STATE &buf_state, VkDeviceSize offset, VkDeviceSize size) {
238 return GetRealWholeSize(offset, size, buf_state.createInfo.size);
239}
240
John Zulauf16adfc92020-04-08 10:28:33 -0600241template <typename T>
John Zulauf355e49b2020-04-24 15:11:15 -0600242static ResourceAccessRange MakeRange(const T &has_offset_and_size) {
John Zulauf16adfc92020-04-08 10:28:33 -0600243 return ResourceAccessRange(has_offset_and_size.offset, (has_offset_and_size.offset + has_offset_and_size.size));
244}
245
John Zulauf355e49b2020-04-24 15:11:15 -0600246static ResourceAccessRange MakeRange(VkDeviceSize start, VkDeviceSize size) { return ResourceAccessRange(start, (start + size)); }
John Zulauf16adfc92020-04-08 10:28:33 -0600247
John Zulauf3e86bf02020-09-12 10:47:57 -0600248static inline ResourceAccessRange MakeRange(const BUFFER_STATE &buffer, VkDeviceSize offset, VkDeviceSize size) {
249 return MakeRange(offset, GetBufferWholeSize(buffer, offset, size));
250}
251
252static inline ResourceAccessRange MakeRange(const BUFFER_VIEW_STATE &buf_view_state) {
253 return MakeRange(*buf_view_state.buffer_state.get(), buf_view_state.create_info.offset, buf_view_state.create_info.range);
254}
255
John Zulauf4a6105a2020-11-17 15:11:05 -0700256// Range generators for to allow event scope filtration to be limited to the top of the resource access traversal pipeline
257//
John Zulauf10f1f522020-12-18 12:00:35 -0700258// Note: there is no "begin/end" or reset facility. These are each written as "one time through" generators.
259//
John Zulauf4a6105a2020-11-17 15:11:05 -0700260// Usage:
261// Constructor() -- initializes the generator to point to the begin of the space declared.
262// * -- the current range of the generator empty signfies end
263// ++ -- advance to the next non-empty range (or end)
264
265// A wrapper for a single range with the same semantics as the actual generators below
266template <typename KeyType>
267class SingleRangeGenerator {
268 public:
269 SingleRangeGenerator(const KeyType &range) : current_(range) {}
John Zulaufd5115702021-01-18 12:34:33 -0700270 const KeyType &operator*() const { return current_; }
271 const KeyType *operator->() const { return &current_; }
John Zulauf4a6105a2020-11-17 15:11:05 -0700272 SingleRangeGenerator &operator++() {
273 current_ = KeyType(); // just one real range
274 return *this;
275 }
276
277 bool operator==(const SingleRangeGenerator &other) const { return current_ == other.current_; }
278
279 private:
280 SingleRangeGenerator() = default;
281 const KeyType range_;
282 KeyType current_;
283};
284
285// Generate the ranges that are the intersection of range and the entries in the FilterMap
286template <typename FilterMap, typename KeyType = typename FilterMap::key_type>
287class FilteredRangeGenerator {
288 public:
John Zulaufd5115702021-01-18 12:34:33 -0700289 // Default constructed is safe to dereference for "empty" test, but for no other operation.
290 FilteredRangeGenerator() : range_(), filter_(nullptr), filter_pos_(), current_() {
291 // Default construction for KeyType *must* be empty range
292 assert(current_.empty());
293 }
John Zulauf4a6105a2020-11-17 15:11:05 -0700294 FilteredRangeGenerator(const FilterMap &filter, const KeyType &range)
295 : range_(range), filter_(&filter), filter_pos_(), current_() {
296 SeekBegin();
297 }
John Zulaufd5115702021-01-18 12:34:33 -0700298 FilteredRangeGenerator(const FilteredRangeGenerator &from) = default;
299
John Zulauf4a6105a2020-11-17 15:11:05 -0700300 const KeyType &operator*() const { return current_; }
301 const KeyType *operator->() const { return &current_; }
302 FilteredRangeGenerator &operator++() {
303 ++filter_pos_;
304 UpdateCurrent();
305 return *this;
306 }
307
308 bool operator==(const FilteredRangeGenerator &other) const { return current_ == other.current_; }
309
310 private:
John Zulauf4a6105a2020-11-17 15:11:05 -0700311 void UpdateCurrent() {
312 if (filter_pos_ != filter_->cend()) {
313 current_ = range_ & filter_pos_->first;
314 } else {
315 current_ = KeyType();
316 }
317 }
318 void SeekBegin() {
319 filter_pos_ = filter_->lower_bound(range_);
320 UpdateCurrent();
321 }
322 const KeyType range_;
323 const FilterMap *filter_;
324 typename FilterMap::const_iterator filter_pos_;
325 KeyType current_;
326};
John Zulaufd5115702021-01-18 12:34:33 -0700327using SingleAccessRangeGenerator = SingleRangeGenerator<ResourceAccessRange>;
John Zulauf4a6105a2020-11-17 15:11:05 -0700328using EventSimpleRangeGenerator = FilteredRangeGenerator<SyncEventState::ScopeMap>;
329
330// Templated to allow for different Range generators or map sources...
331
332// Generate the ranges that are the intersection of the RangeGen ranges and the entries in the FilterMap
John Zulauf4a6105a2020-11-17 15:11:05 -0700333template <typename FilterMap, typename RangeGen, typename KeyType = typename FilterMap::key_type>
334class FilteredGeneratorGenerator {
335 public:
John Zulaufd5115702021-01-18 12:34:33 -0700336 // Default constructed is safe to dereference for "empty" test, but for no other operation.
337 FilteredGeneratorGenerator() : filter_(nullptr), gen_(), filter_pos_(), current_() {
338 // Default construction for KeyType *must* be empty range
339 assert(current_.empty());
340 }
341 FilteredGeneratorGenerator(const FilterMap &filter, RangeGen &gen) : filter_(&filter), gen_(gen), filter_pos_(), current_() {
John Zulauf4a6105a2020-11-17 15:11:05 -0700342 SeekBegin();
343 }
John Zulaufd5115702021-01-18 12:34:33 -0700344 FilteredGeneratorGenerator(const FilteredGeneratorGenerator &from) = default;
John Zulauf4a6105a2020-11-17 15:11:05 -0700345 const KeyType &operator*() const { return current_; }
346 const KeyType *operator->() const { return &current_; }
347 FilteredGeneratorGenerator &operator++() {
348 KeyType gen_range = GenRange();
349 KeyType filter_range = FilterRange();
350 current_ = KeyType();
351 while (gen_range.non_empty() && filter_range.non_empty() && current_.empty()) {
352 if (gen_range.end > filter_range.end) {
353 // if the generated range is beyond the filter_range, advance the filter range
354 filter_range = AdvanceFilter();
355 } else {
356 gen_range = AdvanceGen();
357 }
358 current_ = gen_range & filter_range;
359 }
360 return *this;
361 }
362
363 bool operator==(const FilteredGeneratorGenerator &other) const { return current_ == other.current_; }
364
365 private:
366 KeyType AdvanceFilter() {
367 ++filter_pos_;
368 auto filter_range = FilterRange();
369 if (filter_range.valid()) {
370 FastForwardGen(filter_range);
371 }
372 return filter_range;
373 }
374 KeyType AdvanceGen() {
John Zulaufd5115702021-01-18 12:34:33 -0700375 ++gen_;
John Zulauf4a6105a2020-11-17 15:11:05 -0700376 auto gen_range = GenRange();
377 if (gen_range.valid()) {
378 FastForwardFilter(gen_range);
379 }
380 return gen_range;
381 }
382
383 KeyType FilterRange() const { return (filter_pos_ != filter_->cend()) ? filter_pos_->first : KeyType(); }
John Zulaufd5115702021-01-18 12:34:33 -0700384 KeyType GenRange() const { return *gen_; }
John Zulauf4a6105a2020-11-17 15:11:05 -0700385
386 KeyType FastForwardFilter(const KeyType &range) {
387 auto filter_range = FilterRange();
388 int retry_count = 0;
John Zulauf10f1f522020-12-18 12:00:35 -0700389 const static int kRetryLimit = 2; // TODO -- determine whether this limit is optimal
John Zulauf4a6105a2020-11-17 15:11:05 -0700390 while (!filter_range.empty() && (filter_range.end <= range.begin)) {
391 if (retry_count < kRetryLimit) {
392 ++filter_pos_;
393 filter_range = FilterRange();
394 retry_count++;
395 } else {
396 // Okay we've tried walking, do a seek.
397 filter_pos_ = filter_->lower_bound(range);
398 break;
399 }
400 }
401 return FilterRange();
402 }
403
404 // TODO: Consider adding "seek" (or an absolute bound "get" to range generators to make this walk
405 // faster.
406 KeyType FastForwardGen(const KeyType &range) {
407 auto gen_range = GenRange();
408 while (!gen_range.empty() && (gen_range.end <= range.begin)) {
John Zulaufd5115702021-01-18 12:34:33 -0700409 ++gen_;
John Zulauf4a6105a2020-11-17 15:11:05 -0700410 gen_range = GenRange();
411 }
412 return gen_range;
413 }
414
415 void SeekBegin() {
416 auto gen_range = GenRange();
417 if (gen_range.empty()) {
418 current_ = KeyType();
419 filter_pos_ = filter_->cend();
420 } else {
421 filter_pos_ = filter_->lower_bound(gen_range);
422 current_ = gen_range & FilterRange();
423 }
424 }
425
John Zulauf4a6105a2020-11-17 15:11:05 -0700426 const FilterMap *filter_;
John Zulaufd5115702021-01-18 12:34:33 -0700427 RangeGen gen_;
John Zulauf4a6105a2020-11-17 15:11:05 -0700428 typename FilterMap::const_iterator filter_pos_;
429 KeyType current_;
430};
431
432using EventImageRangeGenerator = FilteredGeneratorGenerator<SyncEventState::ScopeMap, subresource_adapter::ImageRangeGenerator>;
433
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -0700434static const ResourceAccessRange kFullRange(std::numeric_limits<VkDeviceSize>::min(), std::numeric_limits<VkDeviceSize>::max());
John Zulauf5c5e88d2019-12-26 11:22:02 -0700435
John Zulauf3e86bf02020-09-12 10:47:57 -0600436ResourceAccessRange GetBufferRange(VkDeviceSize offset, VkDeviceSize buf_whole_size, uint32_t first_index, uint32_t count,
437 VkDeviceSize stride) {
438 VkDeviceSize range_start = offset + first_index * stride;
439 VkDeviceSize range_size = 0;
locke-lunargff255f92020-05-13 18:53:52 -0600440 if (count == UINT32_MAX) {
441 range_size = buf_whole_size - range_start;
442 } else {
443 range_size = count * stride;
444 }
John Zulauf3e86bf02020-09-12 10:47:57 -0600445 return MakeRange(range_start, range_size);
locke-lunargff255f92020-05-13 18:53:52 -0600446}
447
locke-lunarg654e3692020-06-04 17:19:15 -0600448SyncStageAccessIndex GetSyncStageAccessIndexsByDescriptorSet(VkDescriptorType descriptor_type, const interface_var &descriptor_data,
449 VkShaderStageFlagBits stage_flag) {
450 if (descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT) {
451 assert(stage_flag == VK_SHADER_STAGE_FRAGMENT_BIT);
452 return SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ;
453 }
454 auto stage_access = syncStageAccessMaskByShaderStage.find(stage_flag);
455 if (stage_access == syncStageAccessMaskByShaderStage.end()) {
456 assert(0);
457 }
458 if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER || descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
459 return stage_access->second.uniform_read;
460 }
461
462 // If the desriptorSet is writable, we don't need to care SHADER_READ. SHADER_WRITE is enough.
463 // Because if write hazard happens, read hazard might or might not happen.
464 // But if write hazard doesn't happen, read hazard is impossible to happen.
465 if (descriptor_data.is_writable) {
Jeremy Gebben40a22942020-12-22 14:22:06 -0700466 return stage_access->second.storage_write;
locke-lunarg654e3692020-06-04 17:19:15 -0600467 }
Jeremy Gebben40a22942020-12-22 14:22:06 -0700468 // TODO: sampled_read
469 return stage_access->second.storage_read;
locke-lunarg654e3692020-06-04 17:19:15 -0600470}
471
locke-lunarg37047832020-06-12 13:44:45 -0600472bool IsImageLayoutDepthWritable(VkImageLayout image_layout) {
473 return (image_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
474 image_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL ||
475 image_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL)
476 ? true
477 : false;
478}
479
480bool IsImageLayoutStencilWritable(VkImageLayout image_layout) {
481 return (image_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
482 image_layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL ||
483 image_layout == VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL)
484 ? true
485 : false;
486}
487
John Zulauf355e49b2020-04-24 15:11:15 -0600488// Class AccessContext stores the state of accesses specific to a Command, Subpass, or Queue
John Zulaufb02c1eb2020-10-06 16:33:36 -0600489template <typename Action>
490static void ApplyOverImageRange(const IMAGE_STATE &image_state, const VkImageSubresourceRange &subresource_range_arg,
491 Action &action) {
492 // At this point the "apply over range" logic only supports a single memory binding
493 if (!SimpleBinding(image_state)) return;
494 auto subresource_range = NormalizeSubresourceRange(image_state.createInfo, subresource_range_arg);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600495 const auto base_address = ResourceBaseAddress(image_state);
John Zulauf150e5332020-12-03 08:52:52 -0700496 subresource_adapter::ImageRangeGenerator range_gen(*image_state.fragment_encoder.get(), subresource_range, {0, 0, 0},
497 image_state.createInfo.extent, base_address);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600498 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf150e5332020-12-03 08:52:52 -0700499 action(*range_gen);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600500 }
501}
502
John Zulauf7635de32020-05-29 17:14:15 -0600503// Tranverse the attachment resolves for this a specific subpass, and do action() to them.
504// Used by both validation and record operations
505//
506// The signature for Action() reflect the needs of both uses.
507template <typename Action>
John Zulaufd0ec59f2021-03-13 14:25:08 -0700508void ResolveOperation(Action &action, const RENDER_PASS_STATE &rp_state, const AttachmentViewGenVector &attachment_views,
509 uint32_t subpass) {
John Zulauf7635de32020-05-29 17:14:15 -0600510 const auto &rp_ci = rp_state.createInfo;
511 const auto *attachment_ci = rp_ci.pAttachments;
512 const auto &subpass_ci = rp_ci.pSubpasses[subpass];
513
514 // Color resolves -- require an inuse color attachment and a matching inuse resolve attachment
515 const auto *color_attachments = subpass_ci.pColorAttachments;
516 const auto *color_resolve = subpass_ci.pResolveAttachments;
517 if (color_resolve && color_attachments) {
518 for (uint32_t i = 0; i < subpass_ci.colorAttachmentCount; i++) {
519 const auto &color_attach = color_attachments[i].attachment;
520 const auto &resolve_attach = subpass_ci.pResolveAttachments[i].attachment;
521 if ((color_attach != VK_ATTACHMENT_UNUSED) && (resolve_attach != VK_ATTACHMENT_UNUSED)) {
522 action("color", "resolve read", color_attach, resolve_attach, attachment_views[color_attach],
John Zulaufd0ec59f2021-03-13 14:25:08 -0700523 AttachmentViewGen::Gen::kRenderArea, SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ,
524 SyncOrdering::kColorAttachment);
John Zulauf7635de32020-05-29 17:14:15 -0600525 action("color", "resolve write", color_attach, resolve_attach, attachment_views[resolve_attach],
John Zulaufd0ec59f2021-03-13 14:25:08 -0700526 AttachmentViewGen::Gen::kRenderArea, SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE,
527 SyncOrdering::kColorAttachment);
John Zulauf7635de32020-05-29 17:14:15 -0600528 }
529 }
530 }
531
532 // Depth stencil resolve only if the extension is present
Mark Lobodzinski1f887d32020-12-30 15:31:33 -0700533 const auto ds_resolve = LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(subpass_ci.pNext);
John Zulauf7635de32020-05-29 17:14:15 -0600534 if (ds_resolve && ds_resolve->pDepthStencilResolveAttachment &&
535 (ds_resolve->pDepthStencilResolveAttachment->attachment != VK_ATTACHMENT_UNUSED) && subpass_ci.pDepthStencilAttachment &&
536 (subpass_ci.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
537 const auto src_at = subpass_ci.pDepthStencilAttachment->attachment;
538 const auto src_ci = attachment_ci[src_at];
539 // The formats are required to match so we can pick either
540 const bool resolve_depth = (ds_resolve->depthResolveMode != VK_RESOLVE_MODE_NONE) && FormatHasDepth(src_ci.format);
541 const bool resolve_stencil = (ds_resolve->stencilResolveMode != VK_RESOLVE_MODE_NONE) && FormatHasStencil(src_ci.format);
542 const auto dst_at = ds_resolve->pDepthStencilResolveAttachment->attachment;
John Zulauf7635de32020-05-29 17:14:15 -0600543
544 // Figure out which aspects are actually touched during resolve operations
545 const char *aspect_string = nullptr;
John Zulaufd0ec59f2021-03-13 14:25:08 -0700546 AttachmentViewGen::Gen gen_type = AttachmentViewGen::Gen::kRenderArea;
John Zulauf7635de32020-05-29 17:14:15 -0600547 if (resolve_depth && resolve_stencil) {
John Zulauf7635de32020-05-29 17:14:15 -0600548 aspect_string = "depth/stencil";
549 } else if (resolve_depth) {
550 // Validate depth only
John Zulaufd0ec59f2021-03-13 14:25:08 -0700551 gen_type = AttachmentViewGen::Gen::kDepthOnlyRenderArea;
John Zulauf7635de32020-05-29 17:14:15 -0600552 aspect_string = "depth";
553 } else if (resolve_stencil) {
554 // Validate all stencil only
John Zulaufd0ec59f2021-03-13 14:25:08 -0700555 gen_type = AttachmentViewGen::Gen::kStencilOnlyRenderArea;
John Zulauf7635de32020-05-29 17:14:15 -0600556 aspect_string = "stencil";
557 }
558
John Zulaufd0ec59f2021-03-13 14:25:08 -0700559 if (aspect_string) {
560 action(aspect_string, "resolve read", src_at, dst_at, attachment_views[src_at], gen_type,
561 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ, SyncOrdering::kRaster);
562 action(aspect_string, "resolve write", src_at, dst_at, attachment_views[dst_at], gen_type,
563 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kRaster);
John Zulauf7635de32020-05-29 17:14:15 -0600564 }
565 }
566}
567
568// Action for validating resolve operations
569class ValidateResolveAction {
570 public:
John Zulauffaea0ee2021-01-14 14:01:32 -0700571 ValidateResolveAction(VkRenderPass render_pass, uint32_t subpass, const AccessContext &context,
John Zulauf64ffe552021-02-06 10:25:07 -0700572 const CommandExecutionContext &ex_context, const char *func_name)
John Zulauf7635de32020-05-29 17:14:15 -0600573 : render_pass_(render_pass),
574 subpass_(subpass),
575 context_(context),
John Zulauf64ffe552021-02-06 10:25:07 -0700576 ex_context_(ex_context),
John Zulauf7635de32020-05-29 17:14:15 -0600577 func_name_(func_name),
578 skip_(false) {}
579 void operator()(const char *aspect_name, const char *attachment_name, uint32_t src_at, uint32_t dst_at,
John Zulaufd0ec59f2021-03-13 14:25:08 -0700580 const AttachmentViewGen &view_gen, AttachmentViewGen::Gen gen_type, SyncStageAccessIndex current_usage,
581 SyncOrdering ordering_rule) {
John Zulauf7635de32020-05-29 17:14:15 -0600582 HazardResult hazard;
John Zulaufd0ec59f2021-03-13 14:25:08 -0700583 hazard = context_.DetectHazard(view_gen, gen_type, current_usage, ordering_rule);
John Zulauf7635de32020-05-29 17:14:15 -0600584 if (hazard.hazard) {
John Zulauffaea0ee2021-01-14 14:01:32 -0700585 skip_ |=
John Zulauf64ffe552021-02-06 10:25:07 -0700586 ex_context_.GetSyncState().LogError(render_pass_, string_SyncHazardVUID(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -0700587 "%s: Hazard %s in subpass %" PRIu32 "during %s %s, from attachment %" PRIu32
588 " to resolve attachment %" PRIu32 ". Access info %s.",
589 func_name_, string_SyncHazard(hazard.hazard), subpass_, aspect_name,
John Zulauf64ffe552021-02-06 10:25:07 -0700590 attachment_name, src_at, dst_at, ex_context_.FormatUsage(hazard).c_str());
John Zulauf7635de32020-05-29 17:14:15 -0600591 }
592 }
593 // Providing a mechanism for the constructing caller to get the result of the validation
594 bool GetSkip() const { return skip_; }
595
596 private:
597 VkRenderPass render_pass_;
598 const uint32_t subpass_;
599 const AccessContext &context_;
John Zulauf64ffe552021-02-06 10:25:07 -0700600 const CommandExecutionContext &ex_context_;
John Zulauf7635de32020-05-29 17:14:15 -0600601 const char *func_name_;
602 bool skip_;
603};
604
605// Update action for resolve operations
606class UpdateStateResolveAction {
607 public:
608 UpdateStateResolveAction(AccessContext &context, const ResourceUsageTag &tag) : context_(context), tag_(tag) {}
John Zulaufd0ec59f2021-03-13 14:25:08 -0700609 void operator()(const char *, const char *, uint32_t, uint32_t, const AttachmentViewGen &view_gen,
610 AttachmentViewGen::Gen gen_type, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule) {
John Zulauf7635de32020-05-29 17:14:15 -0600611 // Ignores validation only arguments...
John Zulaufd0ec59f2021-03-13 14:25:08 -0700612 context_.UpdateAccessState(view_gen, gen_type, current_usage, ordering_rule, tag_);
John Zulauf7635de32020-05-29 17:14:15 -0600613 }
614
615 private:
616 AccessContext &context_;
617 const ResourceUsageTag &tag_;
618};
619
John Zulauf59e25072020-07-17 10:55:21 -0600620void HazardResult::Set(const ResourceAccessState *access_state_, SyncStageAccessIndex usage_index_, SyncHazard hazard_,
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700621 const SyncStageAccessFlags &prior_, const ResourceUsageTag &tag_) {
John Zulauf59e25072020-07-17 10:55:21 -0600622 access_state = std::unique_ptr<const ResourceAccessState>(new ResourceAccessState(*access_state_));
623 usage_index = usage_index_;
624 hazard = hazard_;
625 prior_access = prior_;
626 tag = tag_;
627}
628
John Zulauf540266b2020-04-06 18:54:53 -0600629AccessContext::AccessContext(uint32_t subpass, VkQueueFlags queue_flags,
630 const std::vector<SubpassDependencyGraphNode> &dependencies,
John Zulauf1a224292020-06-30 14:52:13 -0600631 const std::vector<AccessContext> &contexts, const AccessContext *external_context) {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600632 Reset();
633 const auto &subpass_dep = dependencies[subpass];
John Zulauf22aefed2021-03-11 18:14:35 -0700634 bool has_barrier_from_external = subpass_dep.barrier_from_external.size() > 0U;
635 prev_.reserve(subpass_dep.prev.size() + (has_barrier_from_external ? 1U : 0U));
John Zulauf355e49b2020-04-24 15:11:15 -0600636 prev_by_subpass_.resize(subpass, nullptr); // Can't be more prevs than the subpass we're on
John Zulauf3d84f1b2020-03-09 13:33:25 -0600637 for (const auto &prev_dep : subpass_dep.prev) {
John Zulaufbaea94f2020-09-15 17:55:16 -0600638 const auto prev_pass = prev_dep.first->pass;
639 const auto &prev_barriers = prev_dep.second;
640 assert(prev_dep.second.size());
641 prev_.emplace_back(&contexts[prev_pass], queue_flags, prev_barriers);
642 prev_by_subpass_[prev_pass] = &prev_.back();
John Zulauf5c5e88d2019-12-26 11:22:02 -0700643 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600644
645 async_.reserve(subpass_dep.async.size());
646 for (const auto async_subpass : subpass_dep.async) {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -0700647 async_.emplace_back(&contexts[async_subpass]);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600648 }
John Zulauf22aefed2021-03-11 18:14:35 -0700649 if (has_barrier_from_external) {
650 // Store the barrier from external with the reat, but save pointer for "by subpass" lookups.
651 prev_.emplace_back(external_context, queue_flags, subpass_dep.barrier_from_external);
652 src_external_ = &prev_.back();
John Zulaufe5da6e52020-03-18 15:32:18 -0600653 }
John Zulaufbaea94f2020-09-15 17:55:16 -0600654 if (subpass_dep.barrier_to_external.size()) {
655 dst_external_ = TrackBack(this, queue_flags, subpass_dep.barrier_to_external);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600656 }
John Zulauf5c5e88d2019-12-26 11:22:02 -0700657}
658
John Zulauf5f13a792020-03-10 07:31:21 -0600659template <typename Detector>
John Zulauf43cc7462020-12-03 12:33:12 -0700660HazardResult AccessContext::DetectPreviousHazard(AccessAddressType type, const Detector &detector,
John Zulauf540266b2020-04-06 18:54:53 -0600661 const ResourceAccessRange &range) const {
John Zulauf5f13a792020-03-10 07:31:21 -0600662 ResourceAccessRangeMap descent_map;
John Zulauf69133422020-05-20 14:55:53 -0600663 ResolvePreviousAccess(type, range, &descent_map, nullptr);
John Zulauf5f13a792020-03-10 07:31:21 -0600664
665 HazardResult hazard;
666 for (auto prev = descent_map.begin(); prev != descent_map.end() && !hazard.hazard; ++prev) {
667 hazard = detector.Detect(prev);
668 }
669 return hazard;
670}
671
John Zulauf4a6105a2020-11-17 15:11:05 -0700672template <typename Action>
673void AccessContext::ForAll(Action &&action) {
674 for (const auto address_type : kAddressTypes) {
675 auto &accesses = GetAccessStateMap(address_type);
676 for (const auto &access : accesses) {
677 action(address_type, access);
678 }
679 }
680}
681
John Zulauf3d84f1b2020-03-09 13:33:25 -0600682// A recursive range walker for hazard detection, first for the current context and the (DetectHazardRecur) to walk
683// the DAG of the contexts (for example subpasses)
684template <typename Detector>
John Zulauf43cc7462020-12-03 12:33:12 -0700685HazardResult AccessContext::DetectHazard(AccessAddressType type, const Detector &detector, const ResourceAccessRange &range,
John Zulauf355e49b2020-04-24 15:11:15 -0600686 DetectOptions options) const {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600687 HazardResult hazard;
John Zulauf5f13a792020-03-10 07:31:21 -0600688
John Zulauf1a224292020-06-30 14:52:13 -0600689 if (static_cast<uint32_t>(options) & DetectOptions::kDetectAsync) {
John Zulauf355e49b2020-04-24 15:11:15 -0600690 // Async checks don't require recursive lookups, as the async lists are exhaustive for the top-level context
691 // so we'll check these first
692 for (const auto &async_context : async_) {
693 hazard = async_context->DetectAsyncHazard(type, detector, range);
694 if (hazard.hazard) return hazard;
695 }
John Zulauf5f13a792020-03-10 07:31:21 -0600696 }
697
John Zulauf1a224292020-06-30 14:52:13 -0600698 const bool detect_prev = (static_cast<uint32_t>(options) & DetectOptions::kDetectPrevious) != 0;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600699
John Zulauf69133422020-05-20 14:55:53 -0600700 const auto &accesses = GetAccessStateMap(type);
John Zulauf3cafbf72021-03-26 16:55:19 -0600701 const auto the_end = accesses.cend(); // End is not invalidated
702 auto pos = accesses.lower_bound(range);
John Zulauf69133422020-05-20 14:55:53 -0600703 ResourceAccessRange gap = {range.begin, range.begin};
John Zulauf5f13a792020-03-10 07:31:21 -0600704
John Zulauf3cafbf72021-03-26 16:55:19 -0600705 while (pos != the_end && pos->first.begin < range.end) {
John Zulauf69133422020-05-20 14:55:53 -0600706 // Cover any leading gap, or gap between entries
707 if (detect_prev) {
708 // TODO: After profiling we may want to change the descent logic such that we don't recur per gap...
709 // Cover any leading gap, or gap between entries
710 gap.end = pos->first.begin; // We know this begin is < range.end
John Zulauf355e49b2020-04-24 15:11:15 -0600711 if (gap.non_empty()) {
John Zulauf69133422020-05-20 14:55:53 -0600712 // Recur on all gaps
John Zulauf16adfc92020-04-08 10:28:33 -0600713 hazard = DetectPreviousHazard(type, detector, gap);
John Zulauf5f13a792020-03-10 07:31:21 -0600714 if (hazard.hazard) return hazard;
715 }
John Zulauf69133422020-05-20 14:55:53 -0600716 // Set up for the next gap. If pos..end is >= range.end, loop will exit, and trailing gap will be empty
717 gap.begin = pos->first.end;
718 }
719
720 hazard = detector.Detect(pos);
721 if (hazard.hazard) return hazard;
John Zulauf3cafbf72021-03-26 16:55:19 -0600722 ++pos;
John Zulauf69133422020-05-20 14:55:53 -0600723 }
724
725 if (detect_prev) {
726 // Detect in the trailing empty as needed
727 gap.end = range.end;
728 if (gap.non_empty()) {
729 hazard = DetectPreviousHazard(type, detector, gap);
John Zulauf16adfc92020-04-08 10:28:33 -0600730 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600731 }
732
733 return hazard;
734}
735
736// A non recursive range walker for the asynchronous contexts (those we have no barriers with)
737template <typename Detector>
John Zulauf43cc7462020-12-03 12:33:12 -0700738HazardResult AccessContext::DetectAsyncHazard(AccessAddressType type, const Detector &detector,
739 const ResourceAccessRange &range) const {
John Zulauf16adfc92020-04-08 10:28:33 -0600740 auto &accesses = GetAccessStateMap(type);
John Zulauf3cafbf72021-03-26 16:55:19 -0600741 auto pos = accesses.lower_bound(range);
742 const auto the_end = accesses.end();
John Zulauf16adfc92020-04-08 10:28:33 -0600743
John Zulauf3d84f1b2020-03-09 13:33:25 -0600744 HazardResult hazard;
John Zulauf3cafbf72021-03-26 16:55:19 -0600745 while (pos != the_end && pos->first.begin < range.end) {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -0700746 hazard = detector.DetectAsync(pos, start_tag_);
John Zulauf3cafbf72021-03-26 16:55:19 -0600747 if (hazard.hazard) break;
748 ++pos;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600749 }
John Zulauf16adfc92020-04-08 10:28:33 -0600750
John Zulauf3d84f1b2020-03-09 13:33:25 -0600751 return hazard;
752}
753
John Zulaufb02c1eb2020-10-06 16:33:36 -0600754struct ApplySubpassTransitionBarriersAction {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -0700755 explicit ApplySubpassTransitionBarriersAction(const std::vector<SyncBarrier> &barriers_) : barriers(barriers_) {}
John Zulaufb02c1eb2020-10-06 16:33:36 -0600756 void operator()(ResourceAccessState *access) const {
757 assert(access);
758 access->ApplyBarriers(barriers, true);
759 }
760 const std::vector<SyncBarrier> &barriers;
761};
762
John Zulauf22aefed2021-03-11 18:14:35 -0700763struct ApplyTrackbackStackAction {
764 explicit ApplyTrackbackStackAction(const std::vector<SyncBarrier> &barriers_,
765 const ResourceAccessStateFunction *previous_barrier_ = nullptr)
766 : barriers(barriers_), previous_barrier(previous_barrier_) {}
John Zulaufb02c1eb2020-10-06 16:33:36 -0600767 void operator()(ResourceAccessState *access) const {
768 assert(access);
769 assert(!access->HasPendingState());
770 access->ApplyBarriers(barriers, false);
771 access->ApplyPendingBarriers(kCurrentCommandTag);
John Zulauf22aefed2021-03-11 18:14:35 -0700772 if (previous_barrier) {
773 assert(bool(*previous_barrier));
774 (*previous_barrier)(access);
775 }
John Zulaufb02c1eb2020-10-06 16:33:36 -0600776 }
777 const std::vector<SyncBarrier> &barriers;
John Zulauf22aefed2021-03-11 18:14:35 -0700778 const ResourceAccessStateFunction *previous_barrier;
John Zulaufb02c1eb2020-10-06 16:33:36 -0600779};
780
781// Splits a single map entry into piece matching the entries in [first, last) the total range over [first, last) must be
782// contained with entry. Entry must be an iterator pointing to dest, first and last must be iterators pointing to a
783// *different* map from dest.
784// Returns the position past the last resolved range -- the entry covering the remainder of entry->first not included in the
785// range [first, last)
786template <typename BarrierAction>
John Zulauf355e49b2020-04-24 15:11:15 -0600787static void ResolveMapToEntry(ResourceAccessRangeMap *dest, ResourceAccessRangeMap::iterator entry,
788 ResourceAccessRangeMap::const_iterator first, ResourceAccessRangeMap::const_iterator last,
John Zulaufb02c1eb2020-10-06 16:33:36 -0600789 BarrierAction &barrier_action) {
John Zulauf355e49b2020-04-24 15:11:15 -0600790 auto at = entry;
791 for (auto pos = first; pos != last; ++pos) {
792 // Every member of the input iterator range must fit within the remaining portion of entry
793 assert(at->first.includes(pos->first));
794 assert(at != dest->end());
795 // Trim up at to the same size as the entry to resolve
796 at = sparse_container::split(at, *dest, pos->first);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600797 auto access = pos->second; // intentional copy
798 barrier_action(&access);
John Zulauf355e49b2020-04-24 15:11:15 -0600799 at->second.Resolve(access);
800 ++at; // Go to the remaining unused section of entry
801 }
802}
803
John Zulaufa0a98292020-09-18 09:30:10 -0600804static SyncBarrier MergeBarriers(const std::vector<SyncBarrier> &barriers) {
805 SyncBarrier merged = {};
806 for (const auto &barrier : barriers) {
807 merged.Merge(barrier);
808 }
809 return merged;
810}
811
John Zulaufb02c1eb2020-10-06 16:33:36 -0600812template <typename BarrierAction>
John Zulauf43cc7462020-12-03 12:33:12 -0700813void AccessContext::ResolveAccessRange(AccessAddressType type, const ResourceAccessRange &range, BarrierAction &barrier_action,
John Zulauf355e49b2020-04-24 15:11:15 -0600814 ResourceAccessRangeMap *resolve_map, const ResourceAccessState *infill_state,
815 bool recur_to_infill) const {
John Zulauf3bcab5e2020-06-19 14:42:32 -0600816 if (!range.non_empty()) return;
817
John Zulauf355e49b2020-04-24 15:11:15 -0600818 ResourceRangeMergeIterator current(*resolve_map, GetAccessStateMap(type), range.begin);
819 while (current->range.non_empty() && range.includes(current->range.begin)) {
John Zulauf3bcab5e2020-06-19 14:42:32 -0600820 const auto current_range = current->range & range;
John Zulauf16adfc92020-04-08 10:28:33 -0600821 if (current->pos_B->valid) {
822 const auto &src_pos = current->pos_B->lower_bound;
John Zulaufb02c1eb2020-10-06 16:33:36 -0600823 auto access = src_pos->second; // intentional copy
824 barrier_action(&access);
825
John Zulauf16adfc92020-04-08 10:28:33 -0600826 if (current->pos_A->valid) {
John Zulauf3bcab5e2020-06-19 14:42:32 -0600827 const auto trimmed = sparse_container::split(current->pos_A->lower_bound, *resolve_map, current_range);
828 trimmed->second.Resolve(access);
829 current.invalidate_A(trimmed);
John Zulauf5f13a792020-03-10 07:31:21 -0600830 } else {
John Zulauf3bcab5e2020-06-19 14:42:32 -0600831 auto inserted = resolve_map->insert(current->pos_A->lower_bound, std::make_pair(current_range, access));
John Zulauf355e49b2020-04-24 15:11:15 -0600832 current.invalidate_A(inserted); // Update the parallel iterator to point at the insert segment
John Zulauf5f13a792020-03-10 07:31:21 -0600833 }
John Zulauf16adfc92020-04-08 10:28:33 -0600834 } else {
835 // we have to descend to fill this gap
836 if (recur_to_infill) {
John Zulauf22aefed2021-03-11 18:14:35 -0700837 ResourceAccessRange recurrence_range = current_range;
838 // The current context is empty for the current range, so recur to fill the gap.
839 // Since we will be recurring back up the DAG, expand the gap descent to cover the full range for which B
840 // is not valid, to minimize that recurrence
841 if (current->pos_B.at_end()) {
842 // Do the remainder here....
843 recurrence_range.end = range.end;
John Zulauf355e49b2020-04-24 15:11:15 -0600844 } else {
John Zulauf22aefed2021-03-11 18:14:35 -0700845 // Recur only over the range until B becomes valid (within the limits of range).
846 recurrence_range.end = std::min(range.end, current->pos_B->lower_bound->first.begin);
John Zulauf355e49b2020-04-24 15:11:15 -0600847 }
John Zulauf22aefed2021-03-11 18:14:35 -0700848 ResolvePreviousAccessStack(type, recurrence_range, resolve_map, infill_state, barrier_action);
849
John Zulauf355e49b2020-04-24 15:11:15 -0600850 // Given that there could be gaps we need to seek carefully to not repeatedly search the same gaps in the next
851 // iterator of the outer while.
852
853 // Set the parallel iterator to the end of this range s.t. ++ will move us to the next range whether or
854 // not the end of the range is a gap. For the seek to work, first we need to warn the parallel iterator
855 // we stepped on the dest map
John Zulauf22aefed2021-03-11 18:14:35 -0700856 const auto seek_to = recurrence_range.end - 1; // The subtraction is safe as range can't be empty (loop condition)
locke-lunarg88dbb542020-06-23 22:05:42 -0600857 current.invalidate_A(); // Changes current->range
John Zulauf355e49b2020-04-24 15:11:15 -0600858 current.seek(seek_to);
859 } else if (!current->pos_A->valid && infill_state) {
860 // If we didn't find anything in the current range, and we aren't reccuring... we infill if required
861 auto inserted = resolve_map->insert(current->pos_A->lower_bound, std::make_pair(current->range, *infill_state));
862 current.invalidate_A(inserted); // Update the parallel iterator to point at the correct segment after insert
John Zulauf16adfc92020-04-08 10:28:33 -0600863 }
John Zulauf5f13a792020-03-10 07:31:21 -0600864 }
John Zulauf16adfc92020-04-08 10:28:33 -0600865 ++current;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600866 }
John Zulauf1a224292020-06-30 14:52:13 -0600867
868 // Infill if range goes passed both the current and resolve map prior contents
869 if (recur_to_infill && (current->range.end < range.end)) {
870 ResourceAccessRange trailing_fill_range = {current->range.end, range.end};
John Zulauf22aefed2021-03-11 18:14:35 -0700871 ResolvePreviousAccessStack<BarrierAction>(type, trailing_fill_range, resolve_map, infill_state, barrier_action);
John Zulauf1a224292020-06-30 14:52:13 -0600872 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600873}
874
John Zulauf22aefed2021-03-11 18:14:35 -0700875template <typename BarrierAction>
876void AccessContext::ResolvePreviousAccessStack(AccessAddressType type, const ResourceAccessRange &range,
877 ResourceAccessRangeMap *descent_map, const ResourceAccessState *infill_state,
878 const BarrierAction &previous_barrier) const {
879 ResourceAccessStateFunction stacked_barrier(std::ref(previous_barrier));
880 ResolvePreviousAccess(type, range, descent_map, infill_state, &stacked_barrier);
881}
882
John Zulauf43cc7462020-12-03 12:33:12 -0700883void AccessContext::ResolvePreviousAccess(AccessAddressType type, const ResourceAccessRange &range,
John Zulauf22aefed2021-03-11 18:14:35 -0700884 ResourceAccessRangeMap *descent_map, const ResourceAccessState *infill_state,
885 const ResourceAccessStateFunction *previous_barrier) const {
886 if (prev_.size() == 0) {
John Zulauf5f13a792020-03-10 07:31:21 -0600887 if (range.non_empty() && infill_state) {
John Zulauf22aefed2021-03-11 18:14:35 -0700888 // Fill the empty poritions of descent_map with the default_state with the barrier function applied (iff present)
889 ResourceAccessState state_copy;
890 if (previous_barrier) {
891 assert(bool(*previous_barrier));
892 state_copy = *infill_state;
893 (*previous_barrier)(&state_copy);
894 infill_state = &state_copy;
895 }
896 sparse_container::update_range_value(*descent_map, range, *infill_state,
897 sparse_container::value_precedence::prefer_dest);
John Zulauf5f13a792020-03-10 07:31:21 -0600898 }
899 } else {
900 // Look for something to fill the gap further along.
901 for (const auto &prev_dep : prev_) {
John Zulauf22aefed2021-03-11 18:14:35 -0700902 const ApplyTrackbackStackAction barrier_action(prev_dep.barriers, previous_barrier);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600903 prev_dep.context->ResolveAccessRange(type, range, barrier_action, descent_map, infill_state);
John Zulauf5f13a792020-03-10 07:31:21 -0600904 }
John Zulauf5f13a792020-03-10 07:31:21 -0600905 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600906}
907
John Zulauf4a6105a2020-11-17 15:11:05 -0700908// Non-lazy import of all accesses, WaitEvents needs this.
909void AccessContext::ResolvePreviousAccesses() {
910 ResourceAccessState default_state;
John Zulauf22aefed2021-03-11 18:14:35 -0700911 if (!prev_.size()) return; // If no previous contexts, nothing to do
912
John Zulauf4a6105a2020-11-17 15:11:05 -0700913 for (const auto address_type : kAddressTypes) {
914 ResolvePreviousAccess(address_type, kFullRange, &GetAccessStateMap(address_type), &default_state);
915 }
916}
917
John Zulauf43cc7462020-12-03 12:33:12 -0700918AccessAddressType AccessContext::ImageAddressType(const IMAGE_STATE &image) {
919 return (image.fragment_encoder->IsLinearImage()) ? AccessAddressType::kLinear : AccessAddressType::kIdealized;
John Zulauf16adfc92020-04-08 10:28:33 -0600920}
921
John Zulauf1507ee42020-05-18 11:33:09 -0600922static SyncStageAccessIndex ColorLoadUsage(VkAttachmentLoadOp load_op) {
923 const auto stage_access = (load_op == VK_ATTACHMENT_LOAD_OP_LOAD) ? SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ
924 : SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE;
925 return stage_access;
926}
927static SyncStageAccessIndex DepthStencilLoadUsage(VkAttachmentLoadOp load_op) {
928 const auto stage_access = (load_op == VK_ATTACHMENT_LOAD_OP_LOAD) ? SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_READ
929 : SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE;
930 return stage_access;
931}
932
John Zulauf7635de32020-05-29 17:14:15 -0600933// Caller must manage returned pointer
934static AccessContext *CreateStoreResolveProxyContext(const AccessContext &context, const RENDER_PASS_STATE &rp_state,
John Zulaufd0ec59f2021-03-13 14:25:08 -0700935 uint32_t subpass, const AttachmentViewGenVector &attachment_views) {
John Zulauf7635de32020-05-29 17:14:15 -0600936 auto *proxy = new AccessContext(context);
John Zulaufd0ec59f2021-03-13 14:25:08 -0700937 proxy->UpdateAttachmentResolveAccess(rp_state, attachment_views, subpass, kCurrentCommandTag);
938 proxy->UpdateAttachmentStoreAccess(rp_state, attachment_views, subpass, kCurrentCommandTag);
John Zulauf7635de32020-05-29 17:14:15 -0600939 return proxy;
940}
941
John Zulaufb02c1eb2020-10-06 16:33:36 -0600942template <typename BarrierAction>
John Zulaufd0ec59f2021-03-13 14:25:08 -0700943void AccessContext::ResolveAccessRange(const AttachmentViewGen &view_gen, AttachmentViewGen::Gen gen_type,
944 BarrierAction &barrier_action, ResourceAccessRangeMap *descent_map,
945 const ResourceAccessState *infill_state) const {
946 const auto *attachment_gen = view_gen.GetRangeGen(gen_type);
947 if (!attachment_gen) return;
948
949 subresource_adapter::ImageRangeGenerator range_gen(*attachment_gen);
950 const AccessAddressType address_type = view_gen.GetAddressType();
951 for (; range_gen->non_empty(); ++range_gen) {
952 ResolveAccessRange(address_type, *range_gen, barrier_action, descent_map, infill_state);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600953 }
John Zulauf62f10592020-04-03 12:20:02 -0600954}
955
John Zulauf7635de32020-05-29 17:14:15 -0600956// Layout transitions are handled as if the were occuring in the beginning of the next subpass
John Zulauf64ffe552021-02-06 10:25:07 -0700957bool AccessContext::ValidateLayoutTransitions(const CommandExecutionContext &ex_context, const RENDER_PASS_STATE &rp_state,
John Zulauf7635de32020-05-29 17:14:15 -0600958 const VkRect2D &render_area, uint32_t subpass,
John Zulaufd0ec59f2021-03-13 14:25:08 -0700959 const AttachmentViewGenVector &attachment_views, const char *func_name) const {
John Zulauf355e49b2020-04-24 15:11:15 -0600960 bool skip = false;
John Zulauf7635de32020-05-29 17:14:15 -0600961 // As validation methods are const and precede the record/update phase, for any tranistions from the immediately
962 // previous subpass, we have to validate them against a copy of the AccessContext, with resolve operations applied, as
963 // those affects have not been recorded yet.
964 //
965 // Note: we could be more efficient by tracking whether or not we actually *have* any changes (e.g. attachment resolve)
966 // to apply and only copy then, if this proves a hot spot.
967 std::unique_ptr<AccessContext> proxy_for_prev;
968 TrackBack proxy_track_back;
969
John Zulauf355e49b2020-04-24 15:11:15 -0600970 const auto &transitions = rp_state.subpass_transitions[subpass];
971 for (const auto &transition : transitions) {
John Zulauf7635de32020-05-29 17:14:15 -0600972 const bool prev_needs_proxy = transition.prev_pass != VK_SUBPASS_EXTERNAL && (transition.prev_pass + 1 == subpass);
973
974 const auto *track_back = GetTrackBackFromSubpass(transition.prev_pass);
John Zulauf22aefed2021-03-11 18:14:35 -0700975 assert(track_back);
John Zulauf7635de32020-05-29 17:14:15 -0600976 if (prev_needs_proxy) {
977 if (!proxy_for_prev) {
John Zulaufd0ec59f2021-03-13 14:25:08 -0700978 proxy_for_prev.reset(
979 CreateStoreResolveProxyContext(*track_back->context, rp_state, transition.prev_pass, attachment_views));
John Zulauf7635de32020-05-29 17:14:15 -0600980 proxy_track_back = *track_back;
981 proxy_track_back.context = proxy_for_prev.get();
982 }
983 track_back = &proxy_track_back;
984 }
985 auto hazard = DetectSubpassTransitionHazard(*track_back, attachment_views[transition.attachment]);
John Zulauf355e49b2020-04-24 15:11:15 -0600986 if (hazard.hazard) {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -0600987 skip |= ex_context.GetSyncState().LogError(rp_state.renderPass(), string_SyncHazardVUID(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -0700988 "%s: Hazard %s in subpass %" PRIu32 " for attachment %" PRIu32
989 " image layout transition (old_layout: %s, new_layout: %s). Access info %s.",
990 func_name, string_SyncHazard(hazard.hazard), subpass, transition.attachment,
991 string_VkImageLayout(transition.old_layout),
992 string_VkImageLayout(transition.new_layout),
John Zulauf64ffe552021-02-06 10:25:07 -0700993 ex_context.FormatUsage(hazard).c_str());
John Zulauf355e49b2020-04-24 15:11:15 -0600994 }
995 }
996 return skip;
997}
998
John Zulauf64ffe552021-02-06 10:25:07 -0700999bool AccessContext::ValidateLoadOperation(const CommandExecutionContext &ex_context, const RENDER_PASS_STATE &rp_state,
John Zulauf7635de32020-05-29 17:14:15 -06001000 const VkRect2D &render_area, uint32_t subpass,
John Zulaufd0ec59f2021-03-13 14:25:08 -07001001 const AttachmentViewGenVector &attachment_views, const char *func_name) const {
John Zulauf1507ee42020-05-18 11:33:09 -06001002 bool skip = false;
1003 const auto *attachment_ci = rp_state.createInfo.pAttachments;
John Zulaufa0a98292020-09-18 09:30:10 -06001004
John Zulauf1507ee42020-05-18 11:33:09 -06001005 for (uint32_t i = 0; i < rp_state.createInfo.attachmentCount; i++) {
1006 if (subpass == rp_state.attachment_first_subpass[i]) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001007 const auto &view_gen = attachment_views[i];
1008 if (!view_gen.IsValid()) continue;
John Zulauf1507ee42020-05-18 11:33:09 -06001009 const auto &ci = attachment_ci[i];
John Zulauf1507ee42020-05-18 11:33:09 -06001010
1011 // Need check in the following way
1012 // 1) if the usage bit isn't in the dest_access_scope, and there is layout traniition for initial use, report hazard
1013 // vs. transition
1014 // 2) if there isn't a layout transition, we need to look at the external context with a "detect hazard" operation
1015 // for each aspect loaded.
1016
1017 const bool has_depth = FormatHasDepth(ci.format);
John Zulaufb027cdb2020-05-21 14:25:22 -06001018 const bool has_stencil = FormatHasStencil(ci.format);
John Zulauf1507ee42020-05-18 11:33:09 -06001019 const bool is_color = !(has_depth || has_stencil);
1020
1021 const SyncStageAccessIndex load_index = has_depth ? DepthStencilLoadUsage(ci.loadOp) : ColorLoadUsage(ci.loadOp);
John Zulauf1507ee42020-05-18 11:33:09 -06001022 const SyncStageAccessIndex stencil_load_index = has_stencil ? DepthStencilLoadUsage(ci.stencilLoadOp) : load_index;
John Zulauf1507ee42020-05-18 11:33:09 -06001023
John Zulaufaff20662020-06-01 14:07:58 -06001024 HazardResult hazard;
John Zulauf1507ee42020-05-18 11:33:09 -06001025 const char *aspect = nullptr;
John Zulauf1507ee42020-05-18 11:33:09 -06001026
John Zulaufb02c1eb2020-10-06 16:33:36 -06001027 bool checked_stencil = false;
1028 if (is_color) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001029 hazard = DetectHazard(view_gen, AttachmentViewGen::Gen::kRenderArea, load_index, SyncOrdering::kColorAttachment);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001030 aspect = "color";
1031 } else {
1032 if (has_depth) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001033 hazard = DetectHazard(view_gen, AttachmentViewGen::Gen::kDepthOnlyRenderArea, load_index,
1034 SyncOrdering::kDepthStencilAttachment);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001035 aspect = "depth";
1036 }
1037 if (!hazard.hazard && has_stencil) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001038 hazard = DetectHazard(view_gen, AttachmentViewGen::Gen::kStencilOnlyRenderArea, stencil_load_index,
1039 SyncOrdering::kDepthStencilAttachment);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001040 aspect = "stencil";
1041 checked_stencil = true;
1042 }
1043 }
1044
1045 if (hazard.hazard) {
1046 auto load_op_string = string_VkAttachmentLoadOp(checked_stencil ? ci.stencilLoadOp : ci.loadOp);
John Zulauf64ffe552021-02-06 10:25:07 -07001047 const auto &sync_state = ex_context.GetSyncState();
John Zulaufb02c1eb2020-10-06 16:33:36 -06001048 if (hazard.tag == kCurrentCommandTag) {
1049 // Hazard vs. ILT
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06001050 skip |= sync_state.LogError(rp_state.renderPass(), string_SyncHazardVUID(hazard.hazard),
John Zulaufb02c1eb2020-10-06 16:33:36 -06001051 "%s: Hazard %s vs. layout transition in subpass %" PRIu32 " for attachment %" PRIu32
1052 " aspect %s during load with loadOp %s.",
1053 func_name, string_SyncHazard(hazard.hazard), subpass, i, aspect, load_op_string);
1054 } else {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06001055 skip |= sync_state.LogError(rp_state.renderPass(), string_SyncHazardVUID(hazard.hazard),
John Zulauf1507ee42020-05-18 11:33:09 -06001056 "%s: Hazard %s in subpass %" PRIu32 " for attachment %" PRIu32
John Zulauf59e25072020-07-17 10:55:21 -06001057 " aspect %s during load with loadOp %s. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06001058 func_name, string_SyncHazard(hazard.hazard), subpass, i, aspect, load_op_string,
John Zulauf64ffe552021-02-06 10:25:07 -07001059 ex_context.FormatUsage(hazard).c_str());
John Zulauf1507ee42020-05-18 11:33:09 -06001060 }
1061 }
1062 }
1063 }
1064 return skip;
1065}
1066
John Zulaufaff20662020-06-01 14:07:58 -06001067// Store operation validation can ignore resolve (before it) and layout tranistions after it. The first is ignored
1068// because of the ordering guarantees w.r.t. sample access and that the resolve validation hasn't altered the state, because
1069// store is part of the same Next/End operation.
1070// The latter is handled in layout transistion validation directly
John Zulauf64ffe552021-02-06 10:25:07 -07001071bool AccessContext::ValidateStoreOperation(const CommandExecutionContext &ex_context, const RENDER_PASS_STATE &rp_state,
John Zulaufaff20662020-06-01 14:07:58 -06001072 const VkRect2D &render_area, uint32_t subpass,
John Zulaufd0ec59f2021-03-13 14:25:08 -07001073 const AttachmentViewGenVector &attachment_views, const char *func_name) const {
John Zulaufaff20662020-06-01 14:07:58 -06001074 bool skip = false;
1075 const auto *attachment_ci = rp_state.createInfo.pAttachments;
John Zulaufaff20662020-06-01 14:07:58 -06001076
1077 for (uint32_t i = 0; i < rp_state.createInfo.attachmentCount; i++) {
1078 if (subpass == rp_state.attachment_last_subpass[i]) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001079 const AttachmentViewGen &view_gen = attachment_views[i];
1080 if (!view_gen.IsValid()) continue;
John Zulaufaff20662020-06-01 14:07:58 -06001081 const auto &ci = attachment_ci[i];
1082
1083 // The spec states that "don't care" is an operation with VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
1084 // so we assume that an implementation is *free* to write in that case, meaning that for correctness
1085 // sake, we treat DONT_CARE as writing.
1086 const bool has_depth = FormatHasDepth(ci.format);
1087 const bool has_stencil = FormatHasStencil(ci.format);
1088 const bool is_color = !(has_depth || has_stencil);
1089 const bool store_op_stores = ci.storeOp != VK_ATTACHMENT_STORE_OP_NONE_QCOM;
1090 if (!has_stencil && !store_op_stores) continue;
1091
1092 HazardResult hazard;
1093 const char *aspect = nullptr;
1094 bool checked_stencil = false;
1095 if (is_color) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001096 hazard = DetectHazard(view_gen, AttachmentViewGen::Gen::kRenderArea,
1097 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kRaster);
John Zulaufaff20662020-06-01 14:07:58 -06001098 aspect = "color";
1099 } else {
1100 const bool stencil_op_stores = ci.stencilStoreOp != VK_ATTACHMENT_STORE_OP_NONE_QCOM;
John Zulaufaff20662020-06-01 14:07:58 -06001101 if (has_depth && store_op_stores) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001102 hazard = DetectHazard(view_gen, AttachmentViewGen::Gen::kDepthOnlyRenderArea,
1103 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, SyncOrdering::kRaster);
John Zulaufaff20662020-06-01 14:07:58 -06001104 aspect = "depth";
1105 }
1106 if (!hazard.hazard && has_stencil && stencil_op_stores) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001107 hazard = DetectHazard(view_gen, AttachmentViewGen::Gen::kStencilOnlyRenderArea,
1108 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, SyncOrdering::kRaster);
John Zulaufaff20662020-06-01 14:07:58 -06001109 aspect = "stencil";
1110 checked_stencil = true;
1111 }
1112 }
1113
1114 if (hazard.hazard) {
1115 const char *const op_type_string = checked_stencil ? "stencilStoreOp" : "storeOp";
1116 const char *const store_op_string = string_VkAttachmentStoreOp(checked_stencil ? ci.stencilStoreOp : ci.storeOp);
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06001117 skip |= ex_context.GetSyncState().LogError(rp_state.renderPass(), string_SyncHazardVUID(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07001118 "%s: Hazard %s in subpass %" PRIu32 " for attachment %" PRIu32
1119 " %s aspect during store with %s %s. Access info %s",
1120 func_name, string_SyncHazard(hazard.hazard), subpass, i, aspect,
John Zulauf64ffe552021-02-06 10:25:07 -07001121 op_type_string, store_op_string, ex_context.FormatUsage(hazard).c_str());
John Zulaufaff20662020-06-01 14:07:58 -06001122 }
1123 }
1124 }
1125 return skip;
1126}
1127
John Zulauf64ffe552021-02-06 10:25:07 -07001128bool AccessContext::ValidateResolveOperations(const CommandExecutionContext &ex_context, const RENDER_PASS_STATE &rp_state,
John Zulaufd0ec59f2021-03-13 14:25:08 -07001129 const VkRect2D &render_area, const AttachmentViewGenVector &attachment_views,
1130 const char *func_name, uint32_t subpass) const {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06001131 ValidateResolveAction validate_action(rp_state.renderPass(), subpass, *this, ex_context, func_name);
John Zulaufd0ec59f2021-03-13 14:25:08 -07001132 ResolveOperation(validate_action, rp_state, attachment_views, subpass);
John Zulauf7635de32020-05-29 17:14:15 -06001133 return validate_action.GetSkip();
John Zulaufb027cdb2020-05-21 14:25:22 -06001134}
1135
John Zulauf3d84f1b2020-03-09 13:33:25 -06001136class HazardDetector {
1137 SyncStageAccessIndex usage_index_;
1138
1139 public:
John Zulauf5f13a792020-03-10 07:31:21 -06001140 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const { return pos->second.DetectHazard(usage_index_); }
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07001141 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, const ResourceUsageTag &start_tag) const {
1142 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001143 }
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001144 explicit HazardDetector(SyncStageAccessIndex usage) : usage_index_(usage) {}
John Zulauf3d84f1b2020-03-09 13:33:25 -06001145};
1146
John Zulauf69133422020-05-20 14:55:53 -06001147class HazardDetectorWithOrdering {
1148 const SyncStageAccessIndex usage_index_;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001149 const SyncOrdering ordering_rule_;
John Zulauf69133422020-05-20 14:55:53 -06001150
1151 public:
1152 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
John Zulauf8e3c3e92021-01-06 11:19:36 -07001153 return pos->second.DetectHazard(usage_index_, ordering_rule_);
John Zulauf69133422020-05-20 14:55:53 -06001154 }
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07001155 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, const ResourceUsageTag &start_tag) const {
1156 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
John Zulauf69133422020-05-20 14:55:53 -06001157 }
John Zulauf8e3c3e92021-01-06 11:19:36 -07001158 HazardDetectorWithOrdering(SyncStageAccessIndex usage, SyncOrdering ordering) : usage_index_(usage), ordering_rule_(ordering) {}
John Zulauf69133422020-05-20 14:55:53 -06001159};
1160
John Zulauf16adfc92020-04-08 10:28:33 -06001161HazardResult AccessContext::DetectHazard(const BUFFER_STATE &buffer, SyncStageAccessIndex usage_index,
John Zulauf355e49b2020-04-24 15:11:15 -06001162 const ResourceAccessRange &range) const {
John Zulauf16adfc92020-04-08 10:28:33 -06001163 if (!SimpleBinding(buffer)) return HazardResult();
John Zulauf150e5332020-12-03 08:52:52 -07001164 const auto base_address = ResourceBaseAddress(buffer);
1165 HazardDetector detector(usage_index);
1166 return DetectHazard(AccessAddressType::kLinear, detector, (range + base_address), DetectOptions::kDetectAll);
John Zulaufe5da6e52020-03-18 15:32:18 -06001167}
1168
John Zulauf69133422020-05-20 14:55:53 -06001169template <typename Detector>
John Zulaufd0ec59f2021-03-13 14:25:08 -07001170HazardResult AccessContext::DetectHazard(Detector &detector, const AttachmentViewGen &view_gen, AttachmentViewGen::Gen gen_type,
1171 DetectOptions options) const {
1172 const auto *attachment_gen = view_gen.GetRangeGen(gen_type);
1173 if (!attachment_gen) return HazardResult();
1174
1175 subresource_adapter::ImageRangeGenerator range_gen(*attachment_gen);
1176 const auto address_type = view_gen.GetAddressType();
1177 for (; range_gen->non_empty(); ++range_gen) {
1178 HazardResult hazard = DetectHazard(address_type, detector, *range_gen, options);
1179 if (hazard.hazard) return hazard;
1180 }
1181
1182 return HazardResult();
1183}
1184
1185template <typename Detector>
John Zulauf69133422020-05-20 14:55:53 -06001186HazardResult AccessContext::DetectHazard(Detector &detector, const IMAGE_STATE &image,
1187 const VkImageSubresourceRange &subresource_range, const VkOffset3D &offset,
1188 const VkExtent3D &extent, DetectOptions options) const {
1189 if (!SimpleBinding(image)) return HazardResult();
John Zulauf69133422020-05-20 14:55:53 -06001190 const auto base_address = ResourceBaseAddress(image);
John Zulauf150e5332020-12-03 08:52:52 -07001191 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, offset, extent,
1192 base_address);
1193 const auto address_type = ImageAddressType(image);
John Zulauf69133422020-05-20 14:55:53 -06001194 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf150e5332020-12-03 08:52:52 -07001195 HazardResult hazard = DetectHazard(address_type, detector, *range_gen, options);
John Zulauf69133422020-05-20 14:55:53 -06001196 if (hazard.hazard) return hazard;
1197 }
1198 return HazardResult();
1199}
John Zulauf110413c2021-03-20 05:38:38 -06001200template <typename Detector>
1201HazardResult AccessContext::DetectHazard(Detector &detector, const IMAGE_STATE &image,
1202 const VkImageSubresourceRange &subresource_range, DetectOptions options) const {
1203 if (!SimpleBinding(image)) return HazardResult();
1204 const auto base_address = ResourceBaseAddress(image);
1205 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, base_address);
1206 const auto address_type = ImageAddressType(image);
1207 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf110413c2021-03-20 05:38:38 -06001208 HazardResult hazard = DetectHazard(address_type, detector, *range_gen, options);
1209 if (hazard.hazard) return hazard;
1210 }
1211 return HazardResult();
1212}
John Zulauf69133422020-05-20 14:55:53 -06001213
John Zulauf540266b2020-04-06 18:54:53 -06001214HazardResult AccessContext::DetectHazard(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
1215 const VkImageSubresourceLayers &subresource, const VkOffset3D &offset,
1216 const VkExtent3D &extent) const {
John Zulauf5c5e88d2019-12-26 11:22:02 -07001217 VkImageSubresourceRange subresource_range = {subresource.aspectMask, subresource.mipLevel, 1, subresource.baseArrayLayer,
1218 subresource.layerCount};
John Zulauf110413c2021-03-20 05:38:38 -06001219 HazardDetector detector(current_usage);
1220 return DetectHazard(detector, image, subresource_range, offset, extent, DetectOptions::kDetectAll);
John Zulauf1507ee42020-05-18 11:33:09 -06001221}
1222
1223HazardResult AccessContext::DetectHazard(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
John Zulauf110413c2021-03-20 05:38:38 -06001224 const VkImageSubresourceRange &subresource_range) const {
John Zulauf69133422020-05-20 14:55:53 -06001225 HazardDetector detector(current_usage);
John Zulauf110413c2021-03-20 05:38:38 -06001226 return DetectHazard(detector, image, subresource_range, DetectOptions::kDetectAll);
John Zulauf69133422020-05-20 14:55:53 -06001227}
1228
John Zulaufd0ec59f2021-03-13 14:25:08 -07001229HazardResult AccessContext::DetectHazard(const AttachmentViewGen &view_gen, AttachmentViewGen::Gen gen_type,
1230 SyncStageAccessIndex current_usage, SyncOrdering ordering_rule) const {
1231 HazardDetectorWithOrdering detector(current_usage, ordering_rule);
1232 return DetectHazard(detector, view_gen, gen_type, DetectOptions::kDetectAll);
1233}
1234
John Zulauf69133422020-05-20 14:55:53 -06001235HazardResult AccessContext::DetectHazard(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
John Zulauf8e3c3e92021-01-06 11:19:36 -07001236 const VkImageSubresourceRange &subresource_range, SyncOrdering ordering_rule,
John Zulauf69133422020-05-20 14:55:53 -06001237 const VkOffset3D &offset, const VkExtent3D &extent) const {
John Zulauf8e3c3e92021-01-06 11:19:36 -07001238 HazardDetectorWithOrdering detector(current_usage, ordering_rule);
John Zulauf69133422020-05-20 14:55:53 -06001239 return DetectHazard(detector, image, subresource_range, offset, extent, DetectOptions::kDetectAll);
John Zulauf9cb530d2019-09-30 14:14:10 -06001240}
1241
John Zulauf3d84f1b2020-03-09 13:33:25 -06001242class BarrierHazardDetector {
1243 public:
Jeremy Gebben40a22942020-12-22 14:22:06 -07001244 BarrierHazardDetector(SyncStageAccessIndex usage_index, VkPipelineStageFlags2KHR src_exec_scope,
John Zulauf3d84f1b2020-03-09 13:33:25 -06001245 SyncStageAccessFlags src_access_scope)
1246 : usage_index_(usage_index), src_exec_scope_(src_exec_scope), src_access_scope_(src_access_scope) {}
1247
John Zulauf5f13a792020-03-10 07:31:21 -06001248 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
1249 return pos->second.DetectBarrierHazard(usage_index_, src_exec_scope_, src_access_scope_);
John Zulauf0cb5be22020-01-23 12:18:22 -07001250 }
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07001251 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, const ResourceUsageTag &start_tag) const {
John Zulauf3d84f1b2020-03-09 13:33:25 -06001252 // Async barrier hazard detection can use the same path as the usage index is not IsRead, but is IsWrite
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07001253 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001254 }
1255
1256 private:
1257 SyncStageAccessIndex usage_index_;
Jeremy Gebben40a22942020-12-22 14:22:06 -07001258 VkPipelineStageFlags2KHR src_exec_scope_;
John Zulauf3d84f1b2020-03-09 13:33:25 -06001259 SyncStageAccessFlags src_access_scope_;
1260};
1261
John Zulauf4a6105a2020-11-17 15:11:05 -07001262class EventBarrierHazardDetector {
1263 public:
Jeremy Gebben40a22942020-12-22 14:22:06 -07001264 EventBarrierHazardDetector(SyncStageAccessIndex usage_index, VkPipelineStageFlags2KHR src_exec_scope,
John Zulauf4a6105a2020-11-17 15:11:05 -07001265 SyncStageAccessFlags src_access_scope, const SyncEventState::ScopeMap &event_scope,
1266 const ResourceUsageTag &scope_tag)
1267 : usage_index_(usage_index),
1268 src_exec_scope_(src_exec_scope),
1269 src_access_scope_(src_access_scope),
1270 event_scope_(event_scope),
1271 scope_pos_(event_scope.cbegin()),
1272 scope_end_(event_scope.cend()),
1273 scope_tag_(scope_tag) {}
1274
1275 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
1276 // TODO NOTE: This is almost the slowest way to do this... need to intelligently walk this...
1277 // Need to find a more efficient sync, since we know pos->first is strictly increasing call to call
1278 // NOTE: "cached_lower_bound_impl" with upgrades could do this.
1279 if (scope_pos_ == scope_end_) return HazardResult();
1280 if (!scope_pos_->first.intersects(pos->first)) {
1281 event_scope_.lower_bound(pos->first);
1282 if ((scope_pos_ == scope_end_) || !scope_pos_->first.intersects(pos->first)) return HazardResult();
1283 }
1284
1285 // Some portion of this pos is in the event_scope, so check for a barrier hazard
1286 return pos->second.DetectBarrierHazard(usage_index_, src_exec_scope_, src_access_scope_, scope_tag_);
1287 }
1288 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, const ResourceUsageTag &start_tag) const {
1289 // Async barrier hazard detection can use the same path as the usage index is not IsRead, but is IsWrite
1290 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
1291 }
1292
1293 private:
1294 SyncStageAccessIndex usage_index_;
Jeremy Gebben40a22942020-12-22 14:22:06 -07001295 VkPipelineStageFlags2KHR src_exec_scope_;
John Zulauf4a6105a2020-11-17 15:11:05 -07001296 SyncStageAccessFlags src_access_scope_;
1297 const SyncEventState::ScopeMap &event_scope_;
1298 SyncEventState::ScopeMap::const_iterator scope_pos_;
1299 SyncEventState::ScopeMap::const_iterator scope_end_;
1300 const ResourceUsageTag &scope_tag_;
1301};
1302
Jeremy Gebben40a22942020-12-22 14:22:06 -07001303HazardResult AccessContext::DetectImageBarrierHazard(const IMAGE_STATE &image, VkPipelineStageFlags2KHR src_exec_scope,
John Zulauf4a6105a2020-11-17 15:11:05 -07001304 const SyncStageAccessFlags &src_access_scope,
1305 const VkImageSubresourceRange &subresource_range,
1306 const SyncEventState &sync_event, DetectOptions options) const {
1307 // It's not particularly DRY to get the address type in this function as well as lower down, but we have to select the
1308 // first access scope map to use, and there's no easy way to plumb it in below.
1309 const auto address_type = ImageAddressType(image);
1310 const auto &event_scope = sync_event.FirstScope(address_type);
1311
1312 EventBarrierHazardDetector detector(SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION, src_exec_scope, src_access_scope,
1313 event_scope, sync_event.first_scope_tag);
John Zulauf110413c2021-03-20 05:38:38 -06001314 return DetectHazard(detector, image, subresource_range, options);
John Zulauf4a6105a2020-11-17 15:11:05 -07001315}
1316
John Zulaufd0ec59f2021-03-13 14:25:08 -07001317HazardResult AccessContext::DetectImageBarrierHazard(const AttachmentViewGen &view_gen, const SyncBarrier &barrier,
1318 DetectOptions options) const {
1319 BarrierHazardDetector detector(SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION, barrier.src_exec_scope.exec_scope,
1320 barrier.src_access_scope);
1321 return DetectHazard(detector, view_gen, AttachmentViewGen::Gen::kViewSubresource, options);
1322}
1323
Jeremy Gebben40a22942020-12-22 14:22:06 -07001324HazardResult AccessContext::DetectImageBarrierHazard(const IMAGE_STATE &image, VkPipelineStageFlags2KHR src_exec_scope,
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07001325 const SyncStageAccessFlags &src_access_scope,
John Zulauf355e49b2020-04-24 15:11:15 -06001326 const VkImageSubresourceRange &subresource_range,
John Zulauf43cc7462020-12-03 12:33:12 -07001327 const DetectOptions options) const {
John Zulauf69133422020-05-20 14:55:53 -06001328 BarrierHazardDetector detector(SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION, src_exec_scope, src_access_scope);
John Zulauf110413c2021-03-20 05:38:38 -06001329 return DetectHazard(detector, image, subresource_range, options);
John Zulauf0cb5be22020-01-23 12:18:22 -07001330}
1331
Jeremy Gebben40a22942020-12-22 14:22:06 -07001332HazardResult AccessContext::DetectImageBarrierHazard(const IMAGE_STATE &image, VkPipelineStageFlags2KHR src_exec_scope,
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07001333 const SyncStageAccessFlags &src_stage_accesses,
John Zulauf355e49b2020-04-24 15:11:15 -06001334 const VkImageMemoryBarrier &barrier) const {
1335 auto subresource_range = NormalizeSubresourceRange(image.createInfo, barrier.subresourceRange);
1336 const auto src_access_scope = SyncStageAccess::AccessScope(src_stage_accesses, barrier.srcAccessMask);
1337 return DetectImageBarrierHazard(image, src_exec_scope, src_access_scope, subresource_range, kDetectAll);
1338}
John Zulaufe7f6a5e2021-01-16 14:31:18 -07001339HazardResult AccessContext::DetectImageBarrierHazard(const SyncImageMemoryBarrier &image_barrier) const {
John Zulaufc523bf62021-02-16 08:20:34 -07001340 return DetectImageBarrierHazard(*image_barrier.image.get(), image_barrier.barrier.src_exec_scope.exec_scope,
John Zulauf110413c2021-03-20 05:38:38 -06001341 image_barrier.barrier.src_access_scope, image_barrier.range, kDetectAll);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07001342}
John Zulauf355e49b2020-04-24 15:11:15 -06001343
John Zulauf9cb530d2019-09-30 14:14:10 -06001344template <typename Flags, typename Map>
1345SyncStageAccessFlags AccessScopeImpl(Flags flag_mask, const Map &map) {
1346 SyncStageAccessFlags scope = 0;
1347 for (const auto &bit_scope : map) {
1348 if (flag_mask < bit_scope.first) break;
1349
1350 if (flag_mask & bit_scope.first) {
1351 scope |= bit_scope.second;
1352 }
1353 }
1354 return scope;
1355}
1356
Jeremy Gebben40a22942020-12-22 14:22:06 -07001357SyncStageAccessFlags SyncStageAccess::AccessScopeByStage(VkPipelineStageFlags2KHR stages) {
John Zulauf9cb530d2019-09-30 14:14:10 -06001358 return AccessScopeImpl(stages, syncStageAccessMaskByStageBit);
1359}
1360
Jeremy Gebben40a22942020-12-22 14:22:06 -07001361SyncStageAccessFlags SyncStageAccess::AccessScopeByAccess(VkAccessFlags2KHR accesses) {
1362 return AccessScopeImpl(sync_utils::ExpandAccessFlags(accesses), syncStageAccessMaskByAccessBit);
John Zulauf9cb530d2019-09-30 14:14:10 -06001363}
1364
Jeremy Gebben40a22942020-12-22 14:22:06 -07001365// Getting from stage mask and access mask to stage/access masks is something we need to be good at...
1366SyncStageAccessFlags SyncStageAccess::AccessScope(VkPipelineStageFlags2KHR stages, VkAccessFlags2KHR accesses) {
John Zulauf5f13a792020-03-10 07:31:21 -06001367 // The access scope is the intersection of all stage/access types possible for the enabled stages and the enables
1368 // accesses (after doing a couple factoring of common terms the union of stage/access intersections is the intersections
1369 // of the union of all stage/access types for all the stages and the same unions for the access mask...
John Zulauf9cb530d2019-09-30 14:14:10 -06001370 return AccessScopeByStage(stages) & AccessScopeByAccess(accesses);
1371}
1372
1373template <typename Action>
John Zulauf5c5e88d2019-12-26 11:22:02 -07001374void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const ResourceAccessRange &range, const Action &action) {
John Zulauf7635de32020-05-29 17:14:15 -06001375 // TODO: Optimization for operations that do a pure overwrite (i.e. WRITE usages which rewrite the state, vs READ usages
1376 // that do incrementalupdates
John Zulauf4a6105a2020-11-17 15:11:05 -07001377 assert(accesses);
John Zulauf9cb530d2019-09-30 14:14:10 -06001378 auto pos = accesses->lower_bound(range);
1379 if (pos == accesses->end() || !pos->first.intersects(range)) {
1380 // The range is empty, fill it with a default value.
1381 pos = action.Infill(accesses, pos, range);
1382 } else if (range.begin < pos->first.begin) {
1383 // Leading empty space, infill
John Zulauf5c5e88d2019-12-26 11:22:02 -07001384 pos = action.Infill(accesses, pos, ResourceAccessRange(range.begin, pos->first.begin));
John Zulauf9cb530d2019-09-30 14:14:10 -06001385 } else if (pos->first.begin < range.begin) {
1386 // Trim the beginning if needed
1387 pos = accesses->split(pos, range.begin, sparse_container::split_op_keep_both());
1388 ++pos;
1389 }
1390
1391 const auto the_end = accesses->end();
1392 while ((pos != the_end) && pos->first.intersects(range)) {
1393 if (pos->first.end > range.end) {
1394 pos = accesses->split(pos, range.end, sparse_container::split_op_keep_both());
1395 }
1396
1397 pos = action(accesses, pos);
1398 if (pos == the_end) break;
1399
1400 auto next = pos;
1401 ++next;
1402 if ((pos->first.end < range.end) && (next != the_end) && !next->first.is_subsequent_to(pos->first)) {
1403 // Need to infill if next is disjoint
1404 VkDeviceSize limit = (next == the_end) ? range.end : std::min(range.end, next->first.begin);
John Zulauf5c5e88d2019-12-26 11:22:02 -07001405 ResourceAccessRange new_range(pos->first.end, limit);
John Zulauf9cb530d2019-09-30 14:14:10 -06001406 next = action.Infill(accesses, next, new_range);
1407 }
1408 pos = next;
1409 }
1410}
John Zulaufd5115702021-01-18 12:34:33 -07001411
1412// Give a comparable interface for range generators and ranges
1413template <typename Action>
1414inline void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const Action &action, ResourceAccessRange *range) {
1415 assert(range);
1416 UpdateMemoryAccessState(accesses, *range, action);
1417}
1418
John Zulauf4a6105a2020-11-17 15:11:05 -07001419template <typename Action, typename RangeGen>
1420void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const Action &action, RangeGen *range_gen_arg) {
1421 assert(range_gen_arg);
John Zulaufd5115702021-01-18 12:34:33 -07001422 RangeGen &range_gen = *range_gen_arg; // Non-const references must be * by style requirement but deref-ing * iterator is a pain
John Zulauf4a6105a2020-11-17 15:11:05 -07001423 for (; range_gen->non_empty(); ++range_gen) {
1424 UpdateMemoryAccessState(accesses, *range_gen, action);
1425 }
1426}
John Zulauf9cb530d2019-09-30 14:14:10 -06001427
John Zulaufd0ec59f2021-03-13 14:25:08 -07001428template <typename Action, typename RangeGen>
1429void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const Action &action, const RangeGen &range_gen_prebuilt) {
1430 RangeGen range_gen(range_gen_prebuilt); // RangeGenerators can be expensive to create from scratch... initialize from built
1431 for (; range_gen->non_empty(); ++range_gen) {
1432 UpdateMemoryAccessState(accesses, *range_gen, action);
1433 }
1434}
John Zulauf9cb530d2019-09-30 14:14:10 -06001435struct UpdateMemoryAccessStateFunctor {
John Zulauf5c5e88d2019-12-26 11:22:02 -07001436 using Iterator = ResourceAccessRangeMap::iterator;
1437 Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const {
John Zulauf5f13a792020-03-10 07:31:21 -06001438 // this is only called on gaps, and never returns a gap.
1439 ResourceAccessState default_state;
John Zulauf16adfc92020-04-08 10:28:33 -06001440 context.ResolvePreviousAccess(type, range, accesses, &default_state);
John Zulauf5f13a792020-03-10 07:31:21 -06001441 return accesses->lower_bound(range);
John Zulauf9cb530d2019-09-30 14:14:10 -06001442 }
John Zulauf5f13a792020-03-10 07:31:21 -06001443
John Zulauf5c5e88d2019-12-26 11:22:02 -07001444 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -06001445 auto &access_state = pos->second;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001446 access_state.Update(usage, ordering_rule, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06001447 return pos;
1448 }
1449
John Zulauf43cc7462020-12-03 12:33:12 -07001450 UpdateMemoryAccessStateFunctor(AccessAddressType type_, const AccessContext &context_, SyncStageAccessIndex usage_,
John Zulauf8e3c3e92021-01-06 11:19:36 -07001451 SyncOrdering ordering_rule_, const ResourceUsageTag &tag_)
1452 : type(type_), context(context_), usage(usage_), ordering_rule(ordering_rule_), tag(tag_) {}
John Zulauf43cc7462020-12-03 12:33:12 -07001453 const AccessAddressType type;
John Zulauf540266b2020-04-06 18:54:53 -06001454 const AccessContext &context;
John Zulauf16adfc92020-04-08 10:28:33 -06001455 const SyncStageAccessIndex usage;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001456 const SyncOrdering ordering_rule;
John Zulauf9cb530d2019-09-30 14:14:10 -06001457 const ResourceUsageTag &tag;
1458};
1459
John Zulauf4a6105a2020-11-17 15:11:05 -07001460// The barrier operation for pipeline and subpass dependencies`
John Zulauf1e331ec2020-12-04 18:29:38 -07001461struct PipelineBarrierOp {
1462 SyncBarrier barrier;
1463 bool layout_transition;
1464 PipelineBarrierOp(const SyncBarrier &barrier_, bool layout_transition_)
1465 : barrier(barrier_), layout_transition(layout_transition_) {}
1466 PipelineBarrierOp() = default;
John Zulaufd5115702021-01-18 12:34:33 -07001467 PipelineBarrierOp(const PipelineBarrierOp &) = default;
John Zulauf1e331ec2020-12-04 18:29:38 -07001468 void operator()(ResourceAccessState *access_state) const { access_state->ApplyBarrier(barrier, layout_transition); }
1469};
John Zulauf4a6105a2020-11-17 15:11:05 -07001470// The barrier operation for wait events
1471struct WaitEventBarrierOp {
1472 const ResourceUsageTag *scope_tag;
1473 SyncBarrier barrier;
1474 bool layout_transition;
1475 WaitEventBarrierOp(const ResourceUsageTag &scope_tag_, const SyncBarrier &barrier_, bool layout_transition_)
1476 : scope_tag(&scope_tag_), barrier(barrier_), layout_transition(layout_transition_) {}
1477 WaitEventBarrierOp() = default;
1478 void operator()(ResourceAccessState *access_state) const {
1479 assert(scope_tag); // Not valid to have a non-scope op executed, default construct included for std::vector support
1480 access_state->ApplyBarrier(*scope_tag, barrier, layout_transition);
1481 }
1482};
John Zulauf1e331ec2020-12-04 18:29:38 -07001483
John Zulauf4a6105a2020-11-17 15:11:05 -07001484// This functor applies a collection of barriers, updating the "pending state" in each touched memory range, and optionally
1485// resolves the pending state. Suitable for processing Global memory barriers, or Subpass Barriers when the "final" barrier
1486// of a collection is known/present.
John Zulauf1e331ec2020-12-04 18:29:38 -07001487template <typename BarrierOp>
John Zulauf89311b42020-09-29 16:28:47 -06001488class ApplyBarrierOpsFunctor {
1489 public:
John Zulauf5c5e88d2019-12-26 11:22:02 -07001490 using Iterator = ResourceAccessRangeMap::iterator;
1491 inline Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const { return pos; }
John Zulauf9cb530d2019-09-30 14:14:10 -06001492
John Zulauf5c5e88d2019-12-26 11:22:02 -07001493 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -06001494 auto &access_state = pos->second;
John Zulauf1e331ec2020-12-04 18:29:38 -07001495 for (const auto &op : barrier_ops_) {
1496 op(&access_state);
John Zulauf89311b42020-09-29 16:28:47 -06001497 }
John Zulauf9cb530d2019-09-30 14:14:10 -06001498
John Zulauf89311b42020-09-29 16:28:47 -06001499 if (resolve_) {
1500 // If this is the last (or only) batch, we can do the pending resolve as the last step in this operation to avoid
1501 // another walk
1502 access_state.ApplyPendingBarriers(tag_);
John Zulauf9cb530d2019-09-30 14:14:10 -06001503 }
1504 return pos;
1505 }
1506
John Zulauf89311b42020-09-29 16:28:47 -06001507 // A valid tag is required IFF layout_transition is true, as transitions are write ops
John Zulaufd5115702021-01-18 12:34:33 -07001508 ApplyBarrierOpsFunctor(bool resolve, size_t size_hint, const ResourceUsageTag &tag)
1509 : resolve_(resolve), barrier_ops_(), tag_(tag) {
1510 barrier_ops_.reserve(size_hint);
1511 }
1512 void EmplaceBack(const BarrierOp &op) { barrier_ops_.emplace_back(op); }
John Zulauf89311b42020-09-29 16:28:47 -06001513
1514 private:
1515 bool resolve_;
John Zulaufd5115702021-01-18 12:34:33 -07001516 std::vector<BarrierOp> barrier_ops_;
John Zulauf1e331ec2020-12-04 18:29:38 -07001517 const ResourceUsageTag &tag_;
1518};
1519
John Zulauf4a6105a2020-11-17 15:11:05 -07001520// This functor applies a single barrier, updating the "pending state" in each touched memory range, but does not
1521// resolve the pendinging state. Suitable for processing Image and Buffer barriers from PipelineBarriers or Events
1522template <typename BarrierOp>
1523class ApplyBarrierFunctor {
1524 public:
1525 using Iterator = ResourceAccessRangeMap::iterator;
1526 inline Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const { return pos; }
1527
1528 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
1529 auto &access_state = pos->second;
1530 barrier_op_(&access_state);
1531 return pos;
1532 }
1533
1534 ApplyBarrierFunctor(const BarrierOp &barrier_op) : barrier_op_(barrier_op) {}
1535
1536 private:
John Zulaufd5115702021-01-18 12:34:33 -07001537 BarrierOp barrier_op_;
John Zulauf4a6105a2020-11-17 15:11:05 -07001538};
1539
John Zulauf1e331ec2020-12-04 18:29:38 -07001540// This functor resolves the pendinging state.
1541class ResolvePendingBarrierFunctor {
1542 public:
1543 using Iterator = ResourceAccessRangeMap::iterator;
1544 inline Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const { return pos; }
1545
1546 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
1547 auto &access_state = pos->second;
1548 access_state.ApplyPendingBarriers(tag_);
1549 return pos;
1550 }
1551
1552 ResolvePendingBarrierFunctor(const ResourceUsageTag &tag) : tag_(tag) {}
1553
1554 private:
John Zulauf89311b42020-09-29 16:28:47 -06001555 const ResourceUsageTag &tag_;
John Zulauf9cb530d2019-09-30 14:14:10 -06001556};
1557
John Zulauf8e3c3e92021-01-06 11:19:36 -07001558void AccessContext::UpdateAccessState(AccessAddressType type, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
1559 const ResourceAccessRange &range, const ResourceUsageTag &tag) {
1560 UpdateMemoryAccessStateFunctor action(type, *this, current_usage, ordering_rule, tag);
John Zulauf16adfc92020-04-08 10:28:33 -06001561 UpdateMemoryAccessState(&GetAccessStateMap(type), range, action);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001562}
1563
John Zulauf8e3c3e92021-01-06 11:19:36 -07001564void AccessContext::UpdateAccessState(const BUFFER_STATE &buffer, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf355e49b2020-04-24 15:11:15 -06001565 const ResourceAccessRange &range, const ResourceUsageTag &tag) {
John Zulauf16adfc92020-04-08 10:28:33 -06001566 if (!SimpleBinding(buffer)) return;
1567 const auto base_address = ResourceBaseAddress(buffer);
John Zulauf8e3c3e92021-01-06 11:19:36 -07001568 UpdateAccessState(AccessAddressType::kLinear, current_usage, ordering_rule, range + base_address, tag);
John Zulauf16adfc92020-04-08 10:28:33 -06001569}
John Zulauf355e49b2020-04-24 15:11:15 -06001570
John Zulauf8e3c3e92021-01-06 11:19:36 -07001571void AccessContext::UpdateAccessState(const IMAGE_STATE &image, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf110413c2021-03-20 05:38:38 -06001572 const VkImageSubresourceRange &subresource_range, const ResourceUsageTag &tag) {
1573 if (!SimpleBinding(image)) return;
1574 const auto base_address = ResourceBaseAddress(image);
1575 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, base_address);
1576 const auto address_type = ImageAddressType(image);
1577 UpdateMemoryAccessStateFunctor action(address_type, *this, current_usage, ordering_rule, tag);
1578 UpdateMemoryAccessState(&GetAccessStateMap(address_type), action, &range_gen);
1579}
1580void AccessContext::UpdateAccessState(const IMAGE_STATE &image, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf355e49b2020-04-24 15:11:15 -06001581 const VkImageSubresourceRange &subresource_range, const VkOffset3D &offset,
John Zulauf540266b2020-04-06 18:54:53 -06001582 const VkExtent3D &extent, const ResourceUsageTag &tag) {
John Zulauf16adfc92020-04-08 10:28:33 -06001583 if (!SimpleBinding(image)) return;
John Zulauf16adfc92020-04-08 10:28:33 -06001584 const auto base_address = ResourceBaseAddress(image);
John Zulauf150e5332020-12-03 08:52:52 -07001585 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, offset, extent,
1586 base_address);
1587 const auto address_type = ImageAddressType(image);
John Zulauf8e3c3e92021-01-06 11:19:36 -07001588 UpdateMemoryAccessStateFunctor action(address_type, *this, current_usage, ordering_rule, tag);
John Zulauf110413c2021-03-20 05:38:38 -06001589 UpdateMemoryAccessState(&GetAccessStateMap(address_type), action, &range_gen);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001590}
John Zulaufd0ec59f2021-03-13 14:25:08 -07001591
1592void AccessContext::UpdateAccessState(const AttachmentViewGen &view_gen, AttachmentViewGen::Gen gen_type,
1593 SyncStageAccessIndex current_usage, SyncOrdering ordering_rule, const ResourceUsageTag &tag) {
1594 const ImageRangeGen *gen = view_gen.GetRangeGen(gen_type);
1595 if (!gen) return;
1596 subresource_adapter::ImageRangeGenerator range_gen(*gen);
1597 const auto address_type = view_gen.GetAddressType();
1598 UpdateMemoryAccessStateFunctor action(address_type, *this, current_usage, ordering_rule, tag);
1599 ApplyUpdateAction(address_type, action, &range_gen);
John Zulauf7635de32020-05-29 17:14:15 -06001600}
John Zulauf3d84f1b2020-03-09 13:33:25 -06001601
John Zulauf8e3c3e92021-01-06 11:19:36 -07001602void AccessContext::UpdateAccessState(const IMAGE_STATE &image, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf355e49b2020-04-24 15:11:15 -06001603 const VkImageSubresourceLayers &subresource, const VkOffset3D &offset,
1604 const VkExtent3D &extent, const ResourceUsageTag &tag) {
John Zulauf355e49b2020-04-24 15:11:15 -06001605 VkImageSubresourceRange subresource_range = {subresource.aspectMask, subresource.mipLevel, 1, subresource.baseArrayLayer,
1606 subresource.layerCount};
John Zulauf8e3c3e92021-01-06 11:19:36 -07001607 UpdateAccessState(image, current_usage, ordering_rule, subresource_range, offset, extent, tag);
John Zulauf355e49b2020-04-24 15:11:15 -06001608}
1609
John Zulaufd0ec59f2021-03-13 14:25:08 -07001610template <typename Action, typename RangeGen>
1611void AccessContext::ApplyUpdateAction(AccessAddressType address_type, const Action &action, RangeGen *range_gen_arg) {
1612 assert(range_gen_arg); // Old Google C++ styleguide require non-const object pass by * not &, but this isn't an optional arg.
1613 UpdateMemoryAccessState(&GetAccessStateMap(address_type), action, range_gen_arg);
John Zulauf540266b2020-04-06 18:54:53 -06001614}
1615
1616template <typename Action>
John Zulaufd0ec59f2021-03-13 14:25:08 -07001617void AccessContext::ApplyUpdateAction(const AttachmentViewGen &view_gen, AttachmentViewGen::Gen gen_type, const Action &action) {
1618 const ImageRangeGen *gen = view_gen.GetRangeGen(gen_type);
1619 if (!gen) return;
1620 UpdateMemoryAccessState(&GetAccessStateMap(view_gen.GetAddressType()), action, *gen);
John Zulauf540266b2020-04-06 18:54:53 -06001621}
1622
John Zulaufd0ec59f2021-03-13 14:25:08 -07001623void AccessContext::UpdateAttachmentResolveAccess(const RENDER_PASS_STATE &rp_state,
1624 const AttachmentViewGenVector &attachment_views, uint32_t subpass,
John Zulauf7635de32020-05-29 17:14:15 -06001625 const ResourceUsageTag &tag) {
1626 UpdateStateResolveAction update(*this, tag);
John Zulaufd0ec59f2021-03-13 14:25:08 -07001627 ResolveOperation(update, rp_state, attachment_views, subpass);
John Zulauf7635de32020-05-29 17:14:15 -06001628}
1629
John Zulaufd0ec59f2021-03-13 14:25:08 -07001630void AccessContext::UpdateAttachmentStoreAccess(const RENDER_PASS_STATE &rp_state, const AttachmentViewGenVector &attachment_views,
1631 uint32_t subpass, const ResourceUsageTag &tag) {
John Zulaufaff20662020-06-01 14:07:58 -06001632 const auto *attachment_ci = rp_state.createInfo.pAttachments;
John Zulaufaff20662020-06-01 14:07:58 -06001633
1634 for (uint32_t i = 0; i < rp_state.createInfo.attachmentCount; i++) {
1635 if (rp_state.attachment_last_subpass[i] == subpass) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001636 const auto &view_gen = attachment_views[i];
1637 if (!view_gen.IsValid()) continue; // UNUSED
John Zulaufaff20662020-06-01 14:07:58 -06001638
1639 const auto &ci = attachment_ci[i];
1640 const bool has_depth = FormatHasDepth(ci.format);
1641 const bool has_stencil = FormatHasStencil(ci.format);
1642 const bool is_color = !(has_depth || has_stencil);
1643 const bool store_op_stores = ci.storeOp != VK_ATTACHMENT_STORE_OP_NONE_QCOM;
1644
1645 if (is_color && store_op_stores) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001646 UpdateAccessState(view_gen, AttachmentViewGen::Gen::kRenderArea,
1647 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kRaster, tag);
John Zulaufaff20662020-06-01 14:07:58 -06001648 } else {
John Zulaufaff20662020-06-01 14:07:58 -06001649 if (has_depth && store_op_stores) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001650 UpdateAccessState(view_gen, AttachmentViewGen::Gen::kDepthOnlyRenderArea,
1651 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, SyncOrdering::kRaster, tag);
John Zulaufaff20662020-06-01 14:07:58 -06001652 }
1653 const bool stencil_op_stores = ci.stencilStoreOp != VK_ATTACHMENT_STORE_OP_NONE_QCOM;
1654 if (has_stencil && stencil_op_stores) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07001655 UpdateAccessState(view_gen, AttachmentViewGen::Gen::kStencilOnlyRenderArea,
1656 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, SyncOrdering::kRaster, tag);
John Zulaufaff20662020-06-01 14:07:58 -06001657 }
1658 }
1659 }
1660 }
1661}
1662
John Zulauf540266b2020-04-06 18:54:53 -06001663template <typename Action>
John Zulaufd5115702021-01-18 12:34:33 -07001664void AccessContext::ApplyToContext(const Action &barrier_action) {
John Zulauf540266b2020-04-06 18:54:53 -06001665 // Note: Barriers do *not* cross context boundaries, applying to accessess within.... (at least for renderpass subpasses)
John Zulauf16adfc92020-04-08 10:28:33 -06001666 for (const auto address_type : kAddressTypes) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001667 UpdateMemoryAccessState(&GetAccessStateMap(address_type), kFullRange, barrier_action);
John Zulauf540266b2020-04-06 18:54:53 -06001668 }
1669}
1670
1671void AccessContext::ResolveChildContexts(const std::vector<AccessContext> &contexts) {
John Zulauf540266b2020-04-06 18:54:53 -06001672 for (uint32_t subpass_index = 0; subpass_index < contexts.size(); subpass_index++) {
1673 auto &context = contexts[subpass_index];
John Zulauf22aefed2021-03-11 18:14:35 -07001674 ApplyTrackbackStackAction barrier_action(context.GetDstExternalTrackBack().barriers);
John Zulauf16adfc92020-04-08 10:28:33 -06001675 for (const auto address_type : kAddressTypes) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001676 context.ResolveAccessRange(address_type, kFullRange, barrier_action, &GetAccessStateMap(address_type), nullptr, false);
John Zulauf540266b2020-04-06 18:54:53 -06001677 }
1678 }
1679}
1680
John Zulauf355e49b2020-04-24 15:11:15 -06001681// Suitable only for *subpass* access contexts
John Zulaufd0ec59f2021-03-13 14:25:08 -07001682HazardResult AccessContext::DetectSubpassTransitionHazard(const TrackBack &track_back, const AttachmentViewGen &attach_view) const {
1683 if (!attach_view.IsValid()) return HazardResult();
John Zulauf355e49b2020-04-24 15:11:15 -06001684
John Zulauf355e49b2020-04-24 15:11:15 -06001685 // We should never ask for a transition from a context we don't have
John Zulauf7635de32020-05-29 17:14:15 -06001686 assert(track_back.context);
John Zulauf355e49b2020-04-24 15:11:15 -06001687
1688 // Do the detection against the specific prior context independent of other contexts. (Synchronous only)
John Zulaufa0a98292020-09-18 09:30:10 -06001689 // Hazard detection for the transition can be against the merged of the barriers (it only uses src_...)
1690 const auto merged_barrier = MergeBarriers(track_back.barriers);
John Zulaufd0ec59f2021-03-13 14:25:08 -07001691 HazardResult hazard = track_back.context->DetectImageBarrierHazard(attach_view, merged_barrier, kDetectPrevious);
John Zulauf355e49b2020-04-24 15:11:15 -06001692 if (!hazard.hazard) {
1693 // The Async hazard check is against the current context's async set.
John Zulaufd0ec59f2021-03-13 14:25:08 -07001694 hazard = DetectImageBarrierHazard(attach_view, merged_barrier, kDetectAsync);
John Zulauf355e49b2020-04-24 15:11:15 -06001695 }
John Zulaufa0a98292020-09-18 09:30:10 -06001696
John Zulauf355e49b2020-04-24 15:11:15 -06001697 return hazard;
1698}
1699
John Zulaufb02c1eb2020-10-06 16:33:36 -06001700void AccessContext::RecordLayoutTransitions(const RENDER_PASS_STATE &rp_state, uint32_t subpass,
John Zulaufd0ec59f2021-03-13 14:25:08 -07001701 const AttachmentViewGenVector &attachment_views, const ResourceUsageTag &tag) {
John Zulaufb02c1eb2020-10-06 16:33:36 -06001702 const auto &transitions = rp_state.subpass_transitions[subpass];
John Zulauf646cc292020-10-23 09:16:45 -06001703 const ResourceAccessState empty_infill;
John Zulaufb02c1eb2020-10-06 16:33:36 -06001704 for (const auto &transition : transitions) {
1705 const auto prev_pass = transition.prev_pass;
John Zulaufd0ec59f2021-03-13 14:25:08 -07001706 const auto &view_gen = attachment_views[transition.attachment];
1707 if (!view_gen.IsValid()) continue;
John Zulaufb02c1eb2020-10-06 16:33:36 -06001708
1709 const auto *trackback = GetTrackBackFromSubpass(prev_pass);
1710 assert(trackback);
1711
1712 // Import the attachments into the current context
1713 const auto *prev_context = trackback->context;
1714 assert(prev_context);
John Zulaufd0ec59f2021-03-13 14:25:08 -07001715 const auto address_type = view_gen.GetAddressType();
John Zulaufb02c1eb2020-10-06 16:33:36 -06001716 auto &target_map = GetAccessStateMap(address_type);
1717 ApplySubpassTransitionBarriersAction barrier_action(trackback->barriers);
John Zulaufd0ec59f2021-03-13 14:25:08 -07001718 prev_context->ResolveAccessRange(view_gen, AttachmentViewGen::Gen::kViewSubresource, barrier_action, &target_map,
1719 &empty_infill);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001720 }
1721
John Zulauf86356ca2020-10-19 11:46:41 -06001722 // If there were no transitions skip this global map walk
1723 if (transitions.size()) {
John Zulauf1e331ec2020-12-04 18:29:38 -07001724 ResolvePendingBarrierFunctor apply_pending_action(tag);
John Zulaufd5115702021-01-18 12:34:33 -07001725 ApplyToContext(apply_pending_action);
John Zulauf86356ca2020-10-19 11:46:41 -06001726 }
John Zulaufb02c1eb2020-10-06 16:33:36 -06001727}
Jeremy Gebben9893daf2021-01-04 10:40:50 -07001728
Jeremy Gebben9893daf2021-01-04 10:40:50 -07001729void CommandBufferAccessContext::ApplyGlobalBarriersToEvents(const SyncExecScope &src, const SyncExecScope &dst) {
1730 const bool all_commands_bit = 0 != (src.mask_param & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
John Zulauf669dfd52021-01-27 17:15:28 -07001731
1732 auto *events_context = GetCurrentEventsContext();
1733 assert(events_context);
1734 for (auto &event_pair : *events_context) {
John Zulauf4a6105a2020-11-17 15:11:05 -07001735 assert(event_pair.second); // Shouldn't be storing empty
1736 auto &sync_event = *event_pair.second;
1737 // Events don't happen at a stage, so we need to check and store the unexpanded ALL_COMMANDS if set for inter-event-calls
Jeremy Gebben9893daf2021-01-04 10:40:50 -07001738 if ((sync_event.barriers & src.exec_scope) || all_commands_bit) {
1739 sync_event.barriers |= dst.exec_scope;
1740 sync_event.barriers |= dst.mask_param & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
John Zulauf4a6105a2020-11-17 15:11:05 -07001741 }
1742 }
1743}
1744
John Zulauf355e49b2020-04-24 15:11:15 -06001745
locke-lunarg61870c22020-06-09 14:51:50 -06001746bool CommandBufferAccessContext::ValidateDispatchDrawDescriptorSet(VkPipelineBindPoint pipelineBindPoint,
1747 const char *func_name) const {
1748 bool skip = false;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001749 const PIPELINE_STATE *pipe = nullptr;
locke-lunarg61870c22020-06-09 14:51:50 -06001750 const std::vector<LAST_BOUND_STATE::PER_SET> *per_sets = nullptr;
Jeremy Gebben159b3cc2021-06-03 09:09:03 -06001751 cb_state_->GetCurrentPipelineAndDesriptorSets(pipelineBindPoint, &pipe, &per_sets);
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001752 if (!pipe || !per_sets) {
locke-lunarg61870c22020-06-09 14:51:50 -06001753 return skip;
1754 }
1755
1756 using DescriptorClass = cvdescriptorset::DescriptorClass;
1757 using BufferDescriptor = cvdescriptorset::BufferDescriptor;
1758 using ImageDescriptor = cvdescriptorset::ImageDescriptor;
1759 using ImageSamplerDescriptor = cvdescriptorset::ImageSamplerDescriptor;
1760 using TexelDescriptor = cvdescriptorset::TexelDescriptor;
1761
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001762 for (const auto &stage_state : pipe->stage_state) {
1763 if (stage_state.stage_flag == VK_SHADER_STAGE_FRAGMENT_BIT && pipe->graphicsPipelineCI.pRasterizationState &&
1764 pipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) {
locke-lunarge9f1cdf2020-06-12 12:28:57 -06001765 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001766 }
locke-lunarg61870c22020-06-09 14:51:50 -06001767 for (const auto &set_binding : stage_state.descriptor_uses) {
1768 cvdescriptorset::DescriptorSet *descriptor_set = (*per_sets)[set_binding.first.first].bound_descriptor_set;
1769 cvdescriptorset::DescriptorSetLayout::ConstBindingIterator binding_it(descriptor_set->GetLayout().get(),
1770 set_binding.first.second);
1771 const auto descriptor_type = binding_it.GetType();
1772 cvdescriptorset::IndexRange index_range = binding_it.GetGlobalIndexRange();
1773 auto array_idx = 0;
1774
1775 if (binding_it.IsVariableDescriptorCount()) {
1776 index_range.end = index_range.start + descriptor_set->GetVariableDescriptorCount();
1777 }
1778 SyncStageAccessIndex sync_index =
1779 GetSyncStageAccessIndexsByDescriptorSet(descriptor_type, set_binding.second, stage_state.stage_flag);
1780
1781 for (uint32_t i = index_range.start; i < index_range.end; ++i, ++array_idx) {
1782 uint32_t index = i - index_range.start;
1783 const auto *descriptor = descriptor_set->GetDescriptorFromGlobalIndex(i);
1784 switch (descriptor->GetClass()) {
1785 case DescriptorClass::ImageSampler:
1786 case DescriptorClass::Image: {
1787 const IMAGE_VIEW_STATE *img_view_state = nullptr;
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001788 VkImageLayout image_layout;
locke-lunarg61870c22020-06-09 14:51:50 -06001789 if (descriptor->GetClass() == DescriptorClass::ImageSampler) {
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001790 const auto image_sampler_descriptor = static_cast<const ImageSamplerDescriptor *>(descriptor);
1791 img_view_state = image_sampler_descriptor->GetImageViewState();
1792 image_layout = image_sampler_descriptor->GetImageLayout();
locke-lunarg61870c22020-06-09 14:51:50 -06001793 } else {
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001794 const auto image_descriptor = static_cast<const ImageDescriptor *>(descriptor);
1795 img_view_state = image_descriptor->GetImageViewState();
1796 image_layout = image_descriptor->GetImageLayout();
locke-lunarg61870c22020-06-09 14:51:50 -06001797 }
1798 if (!img_view_state) continue;
John Zulauf361fb532020-07-22 10:45:39 -06001799 HazardResult hazard;
John Zulauf110413c2021-03-20 05:38:38 -06001800 const IMAGE_STATE *img_state = img_view_state->image_state.get();
John Zulauf361fb532020-07-22 10:45:39 -06001801 const auto &subresource_range = img_view_state->normalized_subresource_range;
John Zulauf110413c2021-03-20 05:38:38 -06001802
1803 if (sync_index == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ) {
1804 const VkExtent3D extent = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.extent);
1805 const VkOffset3D offset = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.offset);
John Zulauf361fb532020-07-22 10:45:39 -06001806 // Input attachments are subject to raster ordering rules
1807 hazard = current_context_->DetectHazard(*img_state, sync_index, subresource_range,
John Zulauf8e3c3e92021-01-06 11:19:36 -07001808 SyncOrdering::kRaster, offset, extent);
John Zulauf361fb532020-07-22 10:45:39 -06001809 } else {
John Zulauf110413c2021-03-20 05:38:38 -06001810 hazard = current_context_->DetectHazard(*img_state, sync_index, subresource_range);
John Zulauf361fb532020-07-22 10:45:39 -06001811 }
John Zulauf110413c2021-03-20 05:38:38 -06001812
John Zulauf33fc1d52020-07-17 11:01:10 -06001813 if (hazard.hazard && !sync_state_->SupressedBoundDescriptorWAW(hazard)) {
John Zulauf1dae9192020-06-16 15:46:44 -06001814 skip |= sync_state_->LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06001815 img_view_state->image_view(), string_SyncHazardVUID(hazard.hazard),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001816 "%s: Hazard %s for %s, in %s, and %s, %s, type: %s, imageLayout: %s, binding #%" PRIu32
1817 ", index %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06001818 func_name, string_SyncHazard(hazard.hazard),
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06001819 sync_state_->report_data->FormatHandle(img_view_state->image_view()).c_str(),
1820 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer()).c_str(),
1821 sync_state_->report_data->FormatHandle(pipe->pipeline()).c_str(),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001822 sync_state_->report_data->FormatHandle(descriptor_set->GetSet()).c_str(),
1823 string_VkDescriptorType(descriptor_type), string_VkImageLayout(image_layout),
John Zulauffaea0ee2021-01-14 14:01:32 -07001824 set_binding.first.second, index, FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06001825 }
1826 break;
1827 }
1828 case DescriptorClass::TexelBuffer: {
1829 auto buf_view_state = static_cast<const TexelDescriptor *>(descriptor)->GetBufferViewState();
1830 if (!buf_view_state) continue;
1831 const BUFFER_STATE *buf_state = buf_view_state->buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06001832 const ResourceAccessRange range = MakeRange(*buf_view_state);
locke-lunarg61870c22020-06-09 14:51:50 -06001833 auto hazard = current_context_->DetectHazard(*buf_state, sync_index, range);
John Zulauf33fc1d52020-07-17 11:01:10 -06001834 if (hazard.hazard && !sync_state_->SupressedBoundDescriptorWAW(hazard)) {
locke-lunarg88dbb542020-06-23 22:05:42 -06001835 skip |= sync_state_->LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06001836 buf_view_state->buffer_view(), string_SyncHazardVUID(hazard.hazard),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001837 "%s: Hazard %s for %s in %s, %s, and %s, type: %s, binding #%d index %d. Access info %s.",
1838 func_name, string_SyncHazard(hazard.hazard),
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06001839 sync_state_->report_data->FormatHandle(buf_view_state->buffer_view()).c_str(),
1840 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer()).c_str(),
1841 sync_state_->report_data->FormatHandle(pipe->pipeline()).c_str(),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001842 sync_state_->report_data->FormatHandle(descriptor_set->GetSet()).c_str(),
1843 string_VkDescriptorType(descriptor_type), set_binding.first.second, index,
John Zulauffaea0ee2021-01-14 14:01:32 -07001844 FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06001845 }
1846 break;
1847 }
1848 case DescriptorClass::GeneralBuffer: {
1849 const auto *buffer_descriptor = static_cast<const BufferDescriptor *>(descriptor);
1850 auto buf_state = buffer_descriptor->GetBufferState();
1851 if (!buf_state) continue;
John Zulauf3e86bf02020-09-12 10:47:57 -06001852 const ResourceAccessRange range =
1853 MakeRange(*buf_state, buffer_descriptor->GetOffset(), buffer_descriptor->GetRange());
locke-lunarg61870c22020-06-09 14:51:50 -06001854 auto hazard = current_context_->DetectHazard(*buf_state, sync_index, range);
John Zulauf3ac701a2020-09-07 14:34:41 -06001855 if (hazard.hazard && !sync_state_->SupressedBoundDescriptorWAW(hazard)) {
locke-lunarg88dbb542020-06-23 22:05:42 -06001856 skip |= sync_state_->LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06001857 buf_state->buffer(), string_SyncHazardVUID(hazard.hazard),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001858 "%s: Hazard %s for %s in %s, %s, and %s, type: %s, binding #%d index %d. Access info %s.",
1859 func_name, string_SyncHazard(hazard.hazard),
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06001860 sync_state_->report_data->FormatHandle(buf_state->buffer()).c_str(),
1861 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer()).c_str(),
1862 sync_state_->report_data->FormatHandle(pipe->pipeline()).c_str(),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001863 sync_state_->report_data->FormatHandle(descriptor_set->GetSet()).c_str(),
1864 string_VkDescriptorType(descriptor_type), set_binding.first.second, index,
John Zulauffaea0ee2021-01-14 14:01:32 -07001865 FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06001866 }
1867 break;
1868 }
1869 // TODO: INLINE_UNIFORM_BLOCK_EXT, ACCELERATION_STRUCTURE_KHR
1870 default:
1871 break;
1872 }
1873 }
1874 }
1875 }
1876 return skip;
1877}
1878
1879void CommandBufferAccessContext::RecordDispatchDrawDescriptorSet(VkPipelineBindPoint pipelineBindPoint,
1880 const ResourceUsageTag &tag) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001881 const PIPELINE_STATE *pipe = nullptr;
locke-lunarg61870c22020-06-09 14:51:50 -06001882 const std::vector<LAST_BOUND_STATE::PER_SET> *per_sets = nullptr;
Jeremy Gebben159b3cc2021-06-03 09:09:03 -06001883 cb_state_->GetCurrentPipelineAndDesriptorSets(pipelineBindPoint, &pipe, &per_sets);
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001884 if (!pipe || !per_sets) {
locke-lunarg61870c22020-06-09 14:51:50 -06001885 return;
1886 }
1887
1888 using DescriptorClass = cvdescriptorset::DescriptorClass;
1889 using BufferDescriptor = cvdescriptorset::BufferDescriptor;
1890 using ImageDescriptor = cvdescriptorset::ImageDescriptor;
1891 using ImageSamplerDescriptor = cvdescriptorset::ImageSamplerDescriptor;
1892 using TexelDescriptor = cvdescriptorset::TexelDescriptor;
1893
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001894 for (const auto &stage_state : pipe->stage_state) {
1895 if (stage_state.stage_flag == VK_SHADER_STAGE_FRAGMENT_BIT && pipe->graphicsPipelineCI.pRasterizationState &&
1896 pipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) {
locke-lunarge9f1cdf2020-06-12 12:28:57 -06001897 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001898 }
locke-lunarg61870c22020-06-09 14:51:50 -06001899 for (const auto &set_binding : stage_state.descriptor_uses) {
1900 cvdescriptorset::DescriptorSet *descriptor_set = (*per_sets)[set_binding.first.first].bound_descriptor_set;
1901 cvdescriptorset::DescriptorSetLayout::ConstBindingIterator binding_it(descriptor_set->GetLayout().get(),
1902 set_binding.first.second);
1903 const auto descriptor_type = binding_it.GetType();
1904 cvdescriptorset::IndexRange index_range = binding_it.GetGlobalIndexRange();
1905 auto array_idx = 0;
1906
1907 if (binding_it.IsVariableDescriptorCount()) {
1908 index_range.end = index_range.start + descriptor_set->GetVariableDescriptorCount();
1909 }
1910 SyncStageAccessIndex sync_index =
1911 GetSyncStageAccessIndexsByDescriptorSet(descriptor_type, set_binding.second, stage_state.stage_flag);
1912
1913 for (uint32_t i = index_range.start; i < index_range.end; ++i, ++array_idx) {
1914 const auto *descriptor = descriptor_set->GetDescriptorFromGlobalIndex(i);
1915 switch (descriptor->GetClass()) {
1916 case DescriptorClass::ImageSampler:
1917 case DescriptorClass::Image: {
1918 const IMAGE_VIEW_STATE *img_view_state = nullptr;
1919 if (descriptor->GetClass() == DescriptorClass::ImageSampler) {
1920 img_view_state = static_cast<const ImageSamplerDescriptor *>(descriptor)->GetImageViewState();
1921 } else {
1922 img_view_state = static_cast<const ImageDescriptor *>(descriptor)->GetImageViewState();
1923 }
1924 if (!img_view_state) continue;
1925 const IMAGE_STATE *img_state = img_view_state->image_state.get();
locke-lunarg61870c22020-06-09 14:51:50 -06001926 if (sync_index == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ) {
John Zulauf110413c2021-03-20 05:38:38 -06001927 const VkExtent3D extent = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.extent);
1928 const VkOffset3D offset = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.offset);
1929 current_context_->UpdateAccessState(*img_state, sync_index, SyncOrdering::kRaster,
1930 img_view_state->normalized_subresource_range, offset, extent, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06001931 } else {
John Zulauf110413c2021-03-20 05:38:38 -06001932 current_context_->UpdateAccessState(*img_state, sync_index, SyncOrdering::kNonAttachment,
1933 img_view_state->normalized_subresource_range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06001934 }
locke-lunarg61870c22020-06-09 14:51:50 -06001935 break;
1936 }
1937 case DescriptorClass::TexelBuffer: {
1938 auto buf_view_state = static_cast<const TexelDescriptor *>(descriptor)->GetBufferViewState();
1939 if (!buf_view_state) continue;
1940 const BUFFER_STATE *buf_state = buf_view_state->buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06001941 const ResourceAccessRange range = MakeRange(*buf_view_state);
John Zulauf8e3c3e92021-01-06 11:19:36 -07001942 current_context_->UpdateAccessState(*buf_state, sync_index, SyncOrdering::kNonAttachment, range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06001943 break;
1944 }
1945 case DescriptorClass::GeneralBuffer: {
1946 const auto *buffer_descriptor = static_cast<const BufferDescriptor *>(descriptor);
1947 auto buf_state = buffer_descriptor->GetBufferState();
1948 if (!buf_state) continue;
John Zulauf3e86bf02020-09-12 10:47:57 -06001949 const ResourceAccessRange range =
1950 MakeRange(*buf_state, buffer_descriptor->GetOffset(), buffer_descriptor->GetRange());
John Zulauf8e3c3e92021-01-06 11:19:36 -07001951 current_context_->UpdateAccessState(*buf_state, sync_index, SyncOrdering::kNonAttachment, range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06001952 break;
1953 }
1954 // TODO: INLINE_UNIFORM_BLOCK_EXT, ACCELERATION_STRUCTURE_KHR
1955 default:
1956 break;
1957 }
1958 }
1959 }
1960 }
1961}
1962
1963bool CommandBufferAccessContext::ValidateDrawVertex(uint32_t vertexCount, uint32_t firstVertex, const char *func_name) const {
1964 bool skip = false;
Jeremy Gebben159b3cc2021-06-03 09:09:03 -06001965 const auto *pipe = cb_state_->GetCurrentPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS);
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001966 if (!pipe) {
locke-lunarg61870c22020-06-09 14:51:50 -06001967 return skip;
1968 }
1969
1970 const auto &binding_buffers = cb_state_->current_vertex_buffer_binding_info.vertex_buffer_bindings;
1971 const auto &binding_buffers_size = binding_buffers.size();
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001972 const auto &binding_descriptions_size = pipe->vertex_binding_descriptions_.size();
locke-lunarg61870c22020-06-09 14:51:50 -06001973
1974 for (size_t i = 0; i < binding_descriptions_size; ++i) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001975 const auto &binding_description = pipe->vertex_binding_descriptions_[i];
locke-lunarg61870c22020-06-09 14:51:50 -06001976 if (binding_description.binding < binding_buffers_size) {
1977 const auto &binding_buffer = binding_buffers[binding_description.binding];
Jeremy Gebben9efe1cf2021-05-15 20:05:09 -06001978 if (binding_buffer.buffer_state == nullptr || binding_buffer.buffer_state->Destroyed()) continue;
locke-lunarg61870c22020-06-09 14:51:50 -06001979
locke-lunarg1ae57d62020-11-18 10:49:19 -07001980 auto *buf_state = binding_buffer.buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06001981 const ResourceAccessRange range = GetBufferRange(binding_buffer.offset, buf_state->createInfo.size, firstVertex,
1982 vertexCount, binding_description.stride);
Jeremy Gebben40a22942020-12-22 14:22:06 -07001983 auto hazard = current_context_->DetectHazard(*buf_state, SYNC_VERTEX_ATTRIBUTE_INPUT_VERTEX_ATTRIBUTE_READ, range);
locke-lunarg61870c22020-06-09 14:51:50 -06001984 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06001985 skip |= sync_state_->LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06001986 buf_state->buffer(), string_SyncHazardVUID(hazard.hazard), "%s: Hazard %s for vertex %s in %s. Access info %s.",
1987 func_name, string_SyncHazard(hazard.hazard), sync_state_->report_data->FormatHandle(buf_state->buffer()).c_str(),
1988 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer()).c_str(), FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06001989 }
1990 }
1991 }
1992 return skip;
1993}
1994
1995void CommandBufferAccessContext::RecordDrawVertex(uint32_t vertexCount, uint32_t firstVertex, const ResourceUsageTag &tag) {
Jeremy Gebben159b3cc2021-06-03 09:09:03 -06001996 const auto *pipe = cb_state_->GetCurrentPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS);
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001997 if (!pipe) {
locke-lunarg61870c22020-06-09 14:51:50 -06001998 return;
1999 }
2000 const auto &binding_buffers = cb_state_->current_vertex_buffer_binding_info.vertex_buffer_bindings;
2001 const auto &binding_buffers_size = binding_buffers.size();
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002002 const auto &binding_descriptions_size = pipe->vertex_binding_descriptions_.size();
locke-lunarg61870c22020-06-09 14:51:50 -06002003
2004 for (size_t i = 0; i < binding_descriptions_size; ++i) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002005 const auto &binding_description = pipe->vertex_binding_descriptions_[i];
locke-lunarg61870c22020-06-09 14:51:50 -06002006 if (binding_description.binding < binding_buffers_size) {
2007 const auto &binding_buffer = binding_buffers[binding_description.binding];
Jeremy Gebben9efe1cf2021-05-15 20:05:09 -06002008 if (binding_buffer.buffer_state == nullptr || binding_buffer.buffer_state->Destroyed()) continue;
locke-lunarg61870c22020-06-09 14:51:50 -06002009
locke-lunarg1ae57d62020-11-18 10:49:19 -07002010 auto *buf_state = binding_buffer.buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06002011 const ResourceAccessRange range = GetBufferRange(binding_buffer.offset, buf_state->createInfo.size, firstVertex,
2012 vertexCount, binding_description.stride);
Jeremy Gebben40a22942020-12-22 14:22:06 -07002013 current_context_->UpdateAccessState(*buf_state, SYNC_VERTEX_ATTRIBUTE_INPUT_VERTEX_ATTRIBUTE_READ,
2014 SyncOrdering::kNonAttachment, range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002015 }
2016 }
2017}
2018
2019bool CommandBufferAccessContext::ValidateDrawVertexIndex(uint32_t indexCount, uint32_t firstIndex, const char *func_name) const {
2020 bool skip = false;
Jeremy Gebben9efe1cf2021-05-15 20:05:09 -06002021 if (cb_state_->index_buffer_binding.buffer_state == nullptr || cb_state_->index_buffer_binding.buffer_state->Destroyed()) {
locke-lunarg1ae57d62020-11-18 10:49:19 -07002022 return skip;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002023 }
locke-lunarg61870c22020-06-09 14:51:50 -06002024
locke-lunarg1ae57d62020-11-18 10:49:19 -07002025 auto *index_buf_state = cb_state_->index_buffer_binding.buffer_state.get();
locke-lunarg61870c22020-06-09 14:51:50 -06002026 const auto index_size = GetIndexAlignment(cb_state_->index_buffer_binding.index_type);
John Zulauf3e86bf02020-09-12 10:47:57 -06002027 const ResourceAccessRange range = GetBufferRange(cb_state_->index_buffer_binding.offset, index_buf_state->createInfo.size,
2028 firstIndex, indexCount, index_size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07002029 auto hazard = current_context_->DetectHazard(*index_buf_state, SYNC_INDEX_INPUT_INDEX_READ, range);
locke-lunarg61870c22020-06-09 14:51:50 -06002030 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002031 skip |= sync_state_->LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002032 index_buf_state->buffer(), string_SyncHazardVUID(hazard.hazard), "%s: Hazard %s for index %s in %s. Access info %s.",
2033 func_name, string_SyncHazard(hazard.hazard), sync_state_->report_data->FormatHandle(index_buf_state->buffer()).c_str(),
2034 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer()).c_str(), FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06002035 }
2036
2037 // TODO: For now, we detect the whole vertex buffer. Index buffer could be changed until SubmitQueue.
2038 // We will detect more accurate range in the future.
2039 skip |= ValidateDrawVertex(UINT32_MAX, 0, func_name);
2040 return skip;
2041}
2042
2043void CommandBufferAccessContext::RecordDrawVertexIndex(uint32_t indexCount, uint32_t firstIndex, const ResourceUsageTag &tag) {
Jeremy Gebben9efe1cf2021-05-15 20:05:09 -06002044 if (cb_state_->index_buffer_binding.buffer_state == nullptr || cb_state_->index_buffer_binding.buffer_state->Destroyed()) return;
locke-lunarg61870c22020-06-09 14:51:50 -06002045
locke-lunarg1ae57d62020-11-18 10:49:19 -07002046 auto *index_buf_state = cb_state_->index_buffer_binding.buffer_state.get();
locke-lunarg61870c22020-06-09 14:51:50 -06002047 const auto index_size = GetIndexAlignment(cb_state_->index_buffer_binding.index_type);
John Zulauf3e86bf02020-09-12 10:47:57 -06002048 const ResourceAccessRange range = GetBufferRange(cb_state_->index_buffer_binding.offset, index_buf_state->createInfo.size,
2049 firstIndex, indexCount, index_size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07002050 current_context_->UpdateAccessState(*index_buf_state, SYNC_INDEX_INPUT_INDEX_READ, SyncOrdering::kNonAttachment, range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002051
2052 // TODO: For now, we detect the whole vertex buffer. Index buffer could be changed until SubmitQueue.
2053 // We will detect more accurate range in the future.
2054 RecordDrawVertex(UINT32_MAX, 0, tag);
2055}
2056
2057bool CommandBufferAccessContext::ValidateDrawSubpassAttachment(const char *func_name) const {
locke-lunarg7077d502020-06-18 21:37:26 -06002058 bool skip = false;
2059 if (!current_renderpass_context_) return skip;
John Zulauf64ffe552021-02-06 10:25:07 -07002060 skip |= current_renderpass_context_->ValidateDrawSubpassAttachment(GetExecutionContext(), *cb_state_.get(), func_name);
locke-lunarg7077d502020-06-18 21:37:26 -06002061 return skip;
locke-lunarg61870c22020-06-09 14:51:50 -06002062}
2063
2064void CommandBufferAccessContext::RecordDrawSubpassAttachment(const ResourceUsageTag &tag) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002065 if (current_renderpass_context_) {
John Zulauf64ffe552021-02-06 10:25:07 -07002066 current_renderpass_context_->RecordDrawSubpassAttachment(*cb_state_.get(), tag);
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002067 }
locke-lunarg61870c22020-06-09 14:51:50 -06002068}
2069
John Zulauf64ffe552021-02-06 10:25:07 -07002070void CommandBufferAccessContext::RecordBeginRenderPass(const RENDER_PASS_STATE &rp_state, const VkRect2D &render_area,
2071 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views,
2072 const ResourceUsageTag &tag) {
John Zulauf355e49b2020-04-24 15:11:15 -06002073 // Create an access context the current renderpass.
John Zulauf64ffe552021-02-06 10:25:07 -07002074 render_pass_contexts_.emplace_back(rp_state, render_area, GetQueueFlags(), attachment_views, &cb_access_context_);
John Zulauf16adfc92020-04-08 10:28:33 -06002075 current_renderpass_context_ = &render_pass_contexts_.back();
John Zulauf64ffe552021-02-06 10:25:07 -07002076 current_renderpass_context_->RecordBeginRenderPass(tag);
John Zulauf16adfc92020-04-08 10:28:33 -06002077 current_context_ = &current_renderpass_context_->CurrentContext();
John Zulauf16adfc92020-04-08 10:28:33 -06002078}
2079
John Zulauf64ffe552021-02-06 10:25:07 -07002080void CommandBufferAccessContext::RecordNextSubpass(CMD_TYPE command) {
John Zulauf16adfc92020-04-08 10:28:33 -06002081 assert(current_renderpass_context_);
John Zulauffaea0ee2021-01-14 14:01:32 -07002082 auto prev_tag = NextCommandTag(command);
2083 auto next_tag = NextSubcommandTag(command);
John Zulauf64ffe552021-02-06 10:25:07 -07002084 current_renderpass_context_->RecordNextSubpass(prev_tag, next_tag);
John Zulauf16adfc92020-04-08 10:28:33 -06002085 current_context_ = &current_renderpass_context_->CurrentContext();
2086}
2087
John Zulauf64ffe552021-02-06 10:25:07 -07002088void CommandBufferAccessContext::RecordEndRenderPass(CMD_TYPE command) {
John Zulauf16adfc92020-04-08 10:28:33 -06002089 assert(current_renderpass_context_);
2090 if (!current_renderpass_context_) return;
2091
John Zulauf64ffe552021-02-06 10:25:07 -07002092 current_renderpass_context_->RecordEndRenderPass(&cb_access_context_, NextCommandTag(command));
John Zulauf355e49b2020-04-24 15:11:15 -06002093 current_context_ = &cb_access_context_;
John Zulauf16adfc92020-04-08 10:28:33 -06002094 current_renderpass_context_ = nullptr;
2095}
2096
John Zulauf4a6105a2020-11-17 15:11:05 -07002097void CommandBufferAccessContext::RecordDestroyEvent(VkEvent event) {
2098 // Erase is okay with the key not being
John Zulauf669dfd52021-01-27 17:15:28 -07002099 const auto *event_state = sync_state_->Get<EVENT_STATE>(event);
2100 if (event_state) {
2101 GetCurrentEventsContext()->Destroy(event_state);
John Zulaufd5115702021-01-18 12:34:33 -07002102 }
2103}
2104
John Zulauf64ffe552021-02-06 10:25:07 -07002105bool RenderPassAccessContext::ValidateDrawSubpassAttachment(const CommandExecutionContext &ex_context, const CMD_BUFFER_STATE &cmd,
John Zulauffaea0ee2021-01-14 14:01:32 -07002106 const char *func_name) const {
locke-lunarg61870c22020-06-09 14:51:50 -06002107 bool skip = false;
John Zulauf64ffe552021-02-06 10:25:07 -07002108 const auto &sync_state = ex_context.GetSyncState();
Jeremy Gebben159b3cc2021-06-03 09:09:03 -06002109 const auto *pipe = cmd.GetCurrentPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS);
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002110 if (!pipe ||
2111 (pipe->graphicsPipelineCI.pRasterizationState && pipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable)) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002112 return skip;
2113 }
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002114 const auto &list = pipe->fragmentShader_writable_output_location_list;
locke-lunarg61870c22020-06-09 14:51:50 -06002115 const auto &subpass = rp_state_->createInfo.pSubpasses[current_subpass_];
locke-lunarg37047832020-06-12 13:44:45 -06002116
John Zulauf1a224292020-06-30 14:52:13 -06002117 const auto &current_context = CurrentContext();
locke-lunarg44f9bb12020-06-10 14:43:57 -06002118 // Subpass's inputAttachment has been done in ValidateDispatchDrawDescriptorSet
locke-lunarg96dc9632020-06-10 17:22:18 -06002119 if (subpass.pColorAttachments && subpass.colorAttachmentCount && !list.empty()) {
2120 for (const auto location : list) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002121 if (location >= subpass.colorAttachmentCount ||
2122 subpass.pColorAttachments[location].attachment == VK_ATTACHMENT_UNUSED) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002123 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002124 }
John Zulaufd0ec59f2021-03-13 14:25:08 -07002125 const AttachmentViewGen &view_gen = attachment_views_[subpass.pColorAttachments[location].attachment];
2126 if (!view_gen.IsValid()) continue;
2127 HazardResult hazard =
2128 current_context.DetectHazard(view_gen, AttachmentViewGen::Gen::kRenderArea,
2129 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kColorAttachment);
locke-lunarg96dc9632020-06-10 17:22:18 -06002130 if (hazard.hazard) {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002131 const VkImageView view_handle = view_gen.GetViewState()->image_view();
John Zulaufd0ec59f2021-03-13 14:25:08 -07002132 skip |= sync_state.LogError(view_handle, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06002133 "%s: Hazard %s for %s in %s, Subpass #%d, and pColorAttachments #%d. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06002134 func_name, string_SyncHazard(hazard.hazard),
John Zulaufd0ec59f2021-03-13 14:25:08 -07002135 sync_state.report_data->FormatHandle(view_handle).c_str(),
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002136 sync_state.report_data->FormatHandle(cmd.commandBuffer()).c_str(), cmd.activeSubpass,
John Zulauf64ffe552021-02-06 10:25:07 -07002137 location, ex_context.FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06002138 }
2139 }
2140 }
locke-lunarg37047832020-06-12 13:44:45 -06002141
2142 // PHASE1 TODO: Add layout based read/vs. write selection.
2143 // PHASE1 TODO: Read operations for both depth and stencil are possible in the future.
John Zulaufd0ec59f2021-03-13 14:25:08 -07002144 const uint32_t depth_stencil_attachment =
2145 GetSubpassDepthStencilAttachmentIndex(pipe->graphicsPipelineCI.pDepthStencilState, subpass.pDepthStencilAttachment);
2146
2147 if ((depth_stencil_attachment != VK_ATTACHMENT_UNUSED) && attachment_views_[depth_stencil_attachment].IsValid()) {
2148 const AttachmentViewGen &view_gen = attachment_views_[depth_stencil_attachment];
2149 const IMAGE_VIEW_STATE &view_state = *view_gen.GetViewState();
locke-lunarg37047832020-06-12 13:44:45 -06002150 bool depth_write = false, stencil_write = false;
2151
2152 // PHASE1 TODO: These validation should be in core_checks.
John Zulaufd0ec59f2021-03-13 14:25:08 -07002153 if (!FormatIsStencilOnly(view_state.create_info.format) && pipe->graphicsPipelineCI.pDepthStencilState->depthTestEnable &&
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002154 pipe->graphicsPipelineCI.pDepthStencilState->depthWriteEnable &&
locke-lunarg37047832020-06-12 13:44:45 -06002155 IsImageLayoutDepthWritable(subpass.pDepthStencilAttachment->layout)) {
2156 depth_write = true;
2157 }
2158 // PHASE1 TODO: It needs to check if stencil is writable.
2159 // If failOp, passOp, or depthFailOp are not KEEP, and writeMask isn't 0, it's writable.
2160 // If depth test is disable, it's considered depth test passes, and then depthFailOp doesn't run.
2161 // PHASE1 TODO: These validation should be in core_checks.
John Zulaufd0ec59f2021-03-13 14:25:08 -07002162 if (!FormatIsDepthOnly(view_state.create_info.format) && pipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable &&
locke-lunarg37047832020-06-12 13:44:45 -06002163 IsImageLayoutStencilWritable(subpass.pDepthStencilAttachment->layout)) {
2164 stencil_write = true;
2165 }
2166
2167 // PHASE1 TODO: Add EARLY stage detection based on ExecutionMode.
2168 if (depth_write) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07002169 HazardResult hazard = current_context.DetectHazard(view_gen, AttachmentViewGen::Gen::kDepthOnlyRenderArea,
2170 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE,
2171 SyncOrdering::kDepthStencilAttachment);
locke-lunarg37047832020-06-12 13:44:45 -06002172 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002173 skip |= sync_state.LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002174 view_state.image_view(), string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06002175 "%s: Hazard %s for %s in %s, Subpass #%d, and depth part of pDepthStencilAttachment. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06002176 func_name, string_SyncHazard(hazard.hazard),
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002177 sync_state.report_data->FormatHandle(view_state.image_view()).c_str(),
2178 sync_state.report_data->FormatHandle(cmd.commandBuffer()).c_str(), cmd.activeSubpass,
John Zulauf64ffe552021-02-06 10:25:07 -07002179 ex_context.FormatUsage(hazard).c_str());
locke-lunarg37047832020-06-12 13:44:45 -06002180 }
2181 }
2182 if (stencil_write) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07002183 HazardResult hazard = current_context.DetectHazard(view_gen, AttachmentViewGen::Gen::kStencilOnlyRenderArea,
2184 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE,
2185 SyncOrdering::kDepthStencilAttachment);
locke-lunarg37047832020-06-12 13:44:45 -06002186 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002187 skip |= sync_state.LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002188 view_state.image_view(), string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06002189 "%s: Hazard %s for %s in %s, Subpass #%d, and stencil part of pDepthStencilAttachment. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06002190 func_name, string_SyncHazard(hazard.hazard),
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002191 sync_state.report_data->FormatHandle(view_state.image_view()).c_str(),
2192 sync_state.report_data->FormatHandle(cmd.commandBuffer()).c_str(), cmd.activeSubpass,
John Zulauf64ffe552021-02-06 10:25:07 -07002193 ex_context.FormatUsage(hazard).c_str());
locke-lunarg37047832020-06-12 13:44:45 -06002194 }
locke-lunarg61870c22020-06-09 14:51:50 -06002195 }
2196 }
2197 return skip;
2198}
2199
John Zulauf64ffe552021-02-06 10:25:07 -07002200void RenderPassAccessContext::RecordDrawSubpassAttachment(const CMD_BUFFER_STATE &cmd, const ResourceUsageTag &tag) {
Jeremy Gebben159b3cc2021-06-03 09:09:03 -06002201 const auto *pipe = cmd.GetCurrentPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS);
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002202 if (!pipe ||
2203 (pipe->graphicsPipelineCI.pRasterizationState && pipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable)) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002204 return;
2205 }
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002206 const auto &list = pipe->fragmentShader_writable_output_location_list;
locke-lunarg61870c22020-06-09 14:51:50 -06002207 const auto &subpass = rp_state_->createInfo.pSubpasses[current_subpass_];
locke-lunarg61870c22020-06-09 14:51:50 -06002208
John Zulauf1a224292020-06-30 14:52:13 -06002209 auto &current_context = CurrentContext();
locke-lunarg44f9bb12020-06-10 14:43:57 -06002210 // Subpass's inputAttachment has been done in RecordDispatchDrawDescriptorSet
locke-lunarg96dc9632020-06-10 17:22:18 -06002211 if (subpass.pColorAttachments && subpass.colorAttachmentCount && !list.empty()) {
2212 for (const auto location : list) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002213 if (location >= subpass.colorAttachmentCount ||
2214 subpass.pColorAttachments[location].attachment == VK_ATTACHMENT_UNUSED) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002215 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002216 }
John Zulaufd0ec59f2021-03-13 14:25:08 -07002217 const AttachmentViewGen &view_gen = attachment_views_[subpass.pColorAttachments[location].attachment];
2218 current_context.UpdateAccessState(view_gen, AttachmentViewGen::Gen::kRenderArea,
2219 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kColorAttachment,
2220 tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002221 }
2222 }
locke-lunarg37047832020-06-12 13:44:45 -06002223
2224 // PHASE1 TODO: Add layout based read/vs. write selection.
2225 // PHASE1 TODO: Read operations for both depth and stencil are possible in the future.
John Zulaufd0ec59f2021-03-13 14:25:08 -07002226 const uint32_t depth_stencil_attachment =
2227 GetSubpassDepthStencilAttachmentIndex(pipe->graphicsPipelineCI.pDepthStencilState, subpass.pDepthStencilAttachment);
2228 if ((depth_stencil_attachment != VK_ATTACHMENT_UNUSED) && attachment_views_[depth_stencil_attachment].IsValid()) {
2229 const AttachmentViewGen &view_gen = attachment_views_[depth_stencil_attachment];
2230 const IMAGE_VIEW_STATE &view_state = *view_gen.GetViewState();
locke-lunarg37047832020-06-12 13:44:45 -06002231 bool depth_write = false, stencil_write = false;
John Zulaufd0ec59f2021-03-13 14:25:08 -07002232 const bool has_depth = 0 != (view_state.normalized_subresource_range.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT);
2233 const bool has_stencil = 0 != (view_state.normalized_subresource_range.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT);
locke-lunarg37047832020-06-12 13:44:45 -06002234
2235 // PHASE1 TODO: These validation should be in core_checks.
John Zulaufd0ec59f2021-03-13 14:25:08 -07002236 if (has_depth && !FormatIsStencilOnly(view_state.create_info.format) &&
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002237 pipe->graphicsPipelineCI.pDepthStencilState->depthTestEnable &&
2238 pipe->graphicsPipelineCI.pDepthStencilState->depthWriteEnable &&
locke-lunarg37047832020-06-12 13:44:45 -06002239 IsImageLayoutDepthWritable(subpass.pDepthStencilAttachment->layout)) {
2240 depth_write = true;
2241 }
2242 // PHASE1 TODO: It needs to check if stencil is writable.
2243 // If failOp, passOp, or depthFailOp are not KEEP, and writeMask isn't 0, it's writable.
2244 // If depth test is disable, it's considered depth test passes, and then depthFailOp doesn't run.
2245 // PHASE1 TODO: These validation should be in core_checks.
John Zulaufd0ec59f2021-03-13 14:25:08 -07002246 if (has_stencil && !FormatIsDepthOnly(view_state.create_info.format) &&
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002247 pipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable &&
locke-lunarg37047832020-06-12 13:44:45 -06002248 IsImageLayoutStencilWritable(subpass.pDepthStencilAttachment->layout)) {
2249 stencil_write = true;
2250 }
2251
John Zulaufd0ec59f2021-03-13 14:25:08 -07002252 if (depth_write || stencil_write) {
2253 const auto ds_gentype = view_gen.GetDepthStencilRenderAreaGenType(depth_write, stencil_write);
2254 // PHASE1 TODO: Add EARLY stage detection based on ExecutionMode.
2255 current_context.UpdateAccessState(view_gen, ds_gentype, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE,
2256 SyncOrdering::kDepthStencilAttachment, tag);
locke-lunarg37047832020-06-12 13:44:45 -06002257 }
locke-lunarg61870c22020-06-09 14:51:50 -06002258 }
2259}
2260
John Zulauf64ffe552021-02-06 10:25:07 -07002261bool RenderPassAccessContext::ValidateNextSubpass(const CommandExecutionContext &ex_context, const char *func_name) const {
John Zulaufaff20662020-06-01 14:07:58 -06002262 // PHASE1 TODO: Add Validate Preserve attachments
John Zulauf355e49b2020-04-24 15:11:15 -06002263 bool skip = false;
John Zulauf64ffe552021-02-06 10:25:07 -07002264 skip |= CurrentContext().ValidateResolveOperations(ex_context, *rp_state_, render_area_, attachment_views_, func_name,
John Zulaufb027cdb2020-05-21 14:25:22 -06002265 current_subpass_);
John Zulauf64ffe552021-02-06 10:25:07 -07002266 skip |= CurrentContext().ValidateStoreOperation(ex_context, *rp_state_, render_area_, current_subpass_, attachment_views_,
John Zulaufaff20662020-06-01 14:07:58 -06002267 func_name);
2268
John Zulauf355e49b2020-04-24 15:11:15 -06002269 const auto next_subpass = current_subpass_ + 1;
John Zulauf1507ee42020-05-18 11:33:09 -06002270 const auto &next_context = subpass_contexts_[next_subpass];
John Zulauf64ffe552021-02-06 10:25:07 -07002271 skip |=
2272 next_context.ValidateLayoutTransitions(ex_context, *rp_state_, render_area_, next_subpass, attachment_views_, func_name);
John Zulaufb02c1eb2020-10-06 16:33:36 -06002273 if (!skip) {
2274 // To avoid complex (and buggy) duplication of the affect of layout transitions on load operations, we'll record them
2275 // on a copy of the (empty) next context.
2276 // Note: The resource access map should be empty so hopefully this copy isn't too horrible from a perf POV.
2277 AccessContext temp_context(next_context);
2278 temp_context.RecordLayoutTransitions(*rp_state_, next_subpass, attachment_views_, kCurrentCommandTag);
John Zulauf64ffe552021-02-06 10:25:07 -07002279 skip |=
2280 temp_context.ValidateLoadOperation(ex_context, *rp_state_, render_area_, next_subpass, attachment_views_, func_name);
John Zulaufb02c1eb2020-10-06 16:33:36 -06002281 }
John Zulauf7635de32020-05-29 17:14:15 -06002282 return skip;
2283}
John Zulauf64ffe552021-02-06 10:25:07 -07002284bool RenderPassAccessContext::ValidateEndRenderPass(const CommandExecutionContext &ex_context, const char *func_name) const {
John Zulaufaff20662020-06-01 14:07:58 -06002285 // PHASE1 TODO: Validate Preserve
John Zulauf7635de32020-05-29 17:14:15 -06002286 bool skip = false;
John Zulauf64ffe552021-02-06 10:25:07 -07002287 skip |= CurrentContext().ValidateResolveOperations(ex_context, *rp_state_, render_area_, attachment_views_, func_name,
John Zulauf7635de32020-05-29 17:14:15 -06002288 current_subpass_);
John Zulaufd0ec59f2021-03-13 14:25:08 -07002289 skip |= CurrentContext().ValidateStoreOperation(ex_context, *rp_state_, render_area_, current_subpass_,
2290
2291 attachment_views_, func_name);
John Zulauf64ffe552021-02-06 10:25:07 -07002292 skip |= ValidateFinalSubpassLayoutTransitions(ex_context, func_name);
John Zulauf355e49b2020-04-24 15:11:15 -06002293 return skip;
2294}
2295
John Zulauf64ffe552021-02-06 10:25:07 -07002296AccessContext *RenderPassAccessContext::CreateStoreResolveProxy() const {
John Zulaufd0ec59f2021-03-13 14:25:08 -07002297 return CreateStoreResolveProxyContext(CurrentContext(), *rp_state_, current_subpass_, attachment_views_);
John Zulauf7635de32020-05-29 17:14:15 -06002298}
2299
John Zulauf64ffe552021-02-06 10:25:07 -07002300bool RenderPassAccessContext::ValidateFinalSubpassLayoutTransitions(const CommandExecutionContext &ex_context,
2301 const char *func_name) const {
John Zulauf355e49b2020-04-24 15:11:15 -06002302 bool skip = false;
2303
John Zulauf7635de32020-05-29 17:14:15 -06002304 // As validation methods are const and precede the record/update phase, for any tranistions from the current (last)
2305 // subpass, we have to validate them against a copy of the current AccessContext, with resolve operations applied.
2306 // Note: we could be more efficient by tracking whether or not we actually *have* any changes (e.g. attachment resolve)
2307 // to apply and only copy then, if this proves a hot spot.
2308 std::unique_ptr<AccessContext> proxy_for_current;
2309
John Zulauf355e49b2020-04-24 15:11:15 -06002310 // Validate the "finalLayout" transitions to external
2311 // Get them from where there we're hidding in the extra entry.
2312 const auto &final_transitions = rp_state_->subpass_transitions.back();
2313 for (const auto &transition : final_transitions) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07002314 const auto &view_gen = attachment_views_[transition.attachment];
John Zulauf355e49b2020-04-24 15:11:15 -06002315 const auto &trackback = subpass_contexts_[transition.prev_pass].GetDstExternalTrackBack();
2316 assert(trackback.context); // Transitions are given implicit transitions if the StateTracker is working correctly
John Zulauf7635de32020-05-29 17:14:15 -06002317 auto *context = trackback.context;
2318
2319 if (transition.prev_pass == current_subpass_) {
2320 if (!proxy_for_current) {
2321 // We haven't recorded resolve ofor the current_subpass, so we need to copy current and update it *as if*
John Zulauf64ffe552021-02-06 10:25:07 -07002322 proxy_for_current.reset(CreateStoreResolveProxy());
John Zulauf7635de32020-05-29 17:14:15 -06002323 }
2324 context = proxy_for_current.get();
2325 }
2326
John Zulaufa0a98292020-09-18 09:30:10 -06002327 // Use the merged barrier for the hazard check (safe since it just considers the src (first) scope.
2328 const auto merged_barrier = MergeBarriers(trackback.barriers);
John Zulaufd0ec59f2021-03-13 14:25:08 -07002329 auto hazard = context->DetectImageBarrierHazard(view_gen, merged_barrier, AccessContext::DetectOptions::kDetectPrevious);
John Zulauf355e49b2020-04-24 15:11:15 -06002330 if (hazard.hazard) {
John Zulauf64ffe552021-02-06 10:25:07 -07002331 skip |= ex_context.GetSyncState().LogError(
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06002332 rp_state_->renderPass(), string_SyncHazardVUID(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07002333 "%s: Hazard %s with last use subpass %" PRIu32 " for attachment %" PRIu32
2334 " final image layout transition (old_layout: %s, new_layout: %s). Access info %s.",
2335 func_name, string_SyncHazard(hazard.hazard), transition.prev_pass, transition.attachment,
2336 string_VkImageLayout(transition.old_layout), string_VkImageLayout(transition.new_layout),
John Zulauf64ffe552021-02-06 10:25:07 -07002337 ex_context.FormatUsage(hazard).c_str());
John Zulauf355e49b2020-04-24 15:11:15 -06002338 }
2339 }
2340 return skip;
2341}
2342
2343void RenderPassAccessContext::RecordLayoutTransitions(const ResourceUsageTag &tag) {
2344 // Add layout transitions...
John Zulaufb02c1eb2020-10-06 16:33:36 -06002345 subpass_contexts_[current_subpass_].RecordLayoutTransitions(*rp_state_, current_subpass_, attachment_views_, tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002346}
2347
John Zulauf64ffe552021-02-06 10:25:07 -07002348void RenderPassAccessContext::RecordLoadOperations(const ResourceUsageTag &tag) {
John Zulauf1507ee42020-05-18 11:33:09 -06002349 const auto *attachment_ci = rp_state_->createInfo.pAttachments;
2350 auto &subpass_context = subpass_contexts_[current_subpass_];
John Zulauf1507ee42020-05-18 11:33:09 -06002351
2352 for (uint32_t i = 0; i < rp_state_->createInfo.attachmentCount; i++) {
2353 if (rp_state_->attachment_first_subpass[i] == current_subpass_) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07002354 const AttachmentViewGen &view_gen = attachment_views_[i];
2355 if (!view_gen.IsValid()) continue; // UNUSED
John Zulauf1507ee42020-05-18 11:33:09 -06002356
2357 const auto &ci = attachment_ci[i];
2358 const bool has_depth = FormatHasDepth(ci.format);
John Zulaufb027cdb2020-05-21 14:25:22 -06002359 const bool has_stencil = FormatHasStencil(ci.format);
John Zulauf1507ee42020-05-18 11:33:09 -06002360 const bool is_color = !(has_depth || has_stencil);
2361
2362 if (is_color) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07002363 subpass_context.UpdateAccessState(view_gen, AttachmentViewGen::Gen::kRenderArea, ColorLoadUsage(ci.loadOp),
2364 SyncOrdering::kColorAttachment, tag);
John Zulauf1507ee42020-05-18 11:33:09 -06002365 } else {
John Zulauf1507ee42020-05-18 11:33:09 -06002366 if (has_depth) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07002367 subpass_context.UpdateAccessState(view_gen, AttachmentViewGen::Gen::kDepthOnlyRenderArea,
2368 DepthStencilLoadUsage(ci.loadOp), SyncOrdering::kDepthStencilAttachment, tag);
John Zulauf1507ee42020-05-18 11:33:09 -06002369 }
2370 if (has_stencil) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07002371 subpass_context.UpdateAccessState(view_gen, AttachmentViewGen::Gen::kStencilOnlyRenderArea,
2372 DepthStencilLoadUsage(ci.stencilLoadOp),
2373 SyncOrdering::kDepthStencilAttachment, tag);
John Zulauf1507ee42020-05-18 11:33:09 -06002374 }
2375 }
2376 }
2377 }
2378}
John Zulaufd0ec59f2021-03-13 14:25:08 -07002379AttachmentViewGenVector RenderPassAccessContext::CreateAttachmentViewGen(
2380 const VkRect2D &render_area, const std::vector<const IMAGE_VIEW_STATE *> &attachment_views) {
2381 AttachmentViewGenVector view_gens;
2382 VkExtent3D extent = CastTo3D(render_area.extent);
2383 VkOffset3D offset = CastTo3D(render_area.offset);
2384 view_gens.reserve(attachment_views.size());
2385 for (const auto *view : attachment_views) {
2386 view_gens.emplace_back(view, offset, extent);
2387 }
2388 return view_gens;
2389}
John Zulauf64ffe552021-02-06 10:25:07 -07002390RenderPassAccessContext::RenderPassAccessContext(const RENDER_PASS_STATE &rp_state, const VkRect2D &render_area,
2391 VkQueueFlags queue_flags,
2392 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views,
2393 const AccessContext *external_context)
John Zulaufd0ec59f2021-03-13 14:25:08 -07002394 : rp_state_(&rp_state), render_area_(render_area), current_subpass_(0U), attachment_views_() {
John Zulauf355e49b2020-04-24 15:11:15 -06002395 // Add this for all subpasses here so that they exsist during next subpass validation
John Zulauf64ffe552021-02-06 10:25:07 -07002396 subpass_contexts_.reserve(rp_state_->createInfo.subpassCount);
John Zulauf355e49b2020-04-24 15:11:15 -06002397 for (uint32_t pass = 0; pass < rp_state_->createInfo.subpassCount; pass++) {
John Zulauf1a224292020-06-30 14:52:13 -06002398 subpass_contexts_.emplace_back(pass, queue_flags, rp_state_->subpass_dependencies, subpass_contexts_, external_context);
John Zulauf355e49b2020-04-24 15:11:15 -06002399 }
John Zulaufd0ec59f2021-03-13 14:25:08 -07002400 attachment_views_ = CreateAttachmentViewGen(render_area, attachment_views);
John Zulauf64ffe552021-02-06 10:25:07 -07002401}
2402void RenderPassAccessContext::RecordBeginRenderPass(const ResourceUsageTag &tag) {
2403 assert(0 == current_subpass_);
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07002404 subpass_contexts_[current_subpass_].SetStartTag(tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002405 RecordLayoutTransitions(tag);
John Zulauf64ffe552021-02-06 10:25:07 -07002406 RecordLoadOperations(tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002407}
John Zulauf1507ee42020-05-18 11:33:09 -06002408
John Zulauf64ffe552021-02-06 10:25:07 -07002409void RenderPassAccessContext::RecordNextSubpass(const ResourceUsageTag &prev_subpass_tag,
John Zulauffaea0ee2021-01-14 14:01:32 -07002410 const ResourceUsageTag &next_subpass_tag) {
John Zulauf7635de32020-05-29 17:14:15 -06002411 // Resolves are against *prior* subpass context and thus *before* the subpass increment
John Zulaufd0ec59f2021-03-13 14:25:08 -07002412 CurrentContext().UpdateAttachmentResolveAccess(*rp_state_, attachment_views_, current_subpass_, prev_subpass_tag);
2413 CurrentContext().UpdateAttachmentStoreAccess(*rp_state_, attachment_views_, current_subpass_, prev_subpass_tag);
John Zulauf7635de32020-05-29 17:14:15 -06002414
Jeremy Gebben6ea9d9e2020-12-11 09:41:01 -07002415 // Move to the next sub-command for the new subpass. The resolve and store are logically part of the previous
2416 // subpass, so their tag needs to be different from the layout and load operations below.
John Zulauf355e49b2020-04-24 15:11:15 -06002417 current_subpass_++;
2418 assert(current_subpass_ < subpass_contexts_.size());
John Zulauffaea0ee2021-01-14 14:01:32 -07002419 subpass_contexts_[current_subpass_].SetStartTag(next_subpass_tag);
2420 RecordLayoutTransitions(next_subpass_tag);
John Zulauf64ffe552021-02-06 10:25:07 -07002421 RecordLoadOperations(next_subpass_tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002422}
2423
John Zulauf64ffe552021-02-06 10:25:07 -07002424void RenderPassAccessContext::RecordEndRenderPass(AccessContext *external_context, const ResourceUsageTag &tag) {
John Zulaufaff20662020-06-01 14:07:58 -06002425 // Add the resolve and store accesses
John Zulaufd0ec59f2021-03-13 14:25:08 -07002426 CurrentContext().UpdateAttachmentResolveAccess(*rp_state_, attachment_views_, current_subpass_, tag);
2427 CurrentContext().UpdateAttachmentStoreAccess(*rp_state_, attachment_views_, current_subpass_, tag);
John Zulauf7635de32020-05-29 17:14:15 -06002428
John Zulauf355e49b2020-04-24 15:11:15 -06002429 // Export the accesses from the renderpass...
John Zulauf1a224292020-06-30 14:52:13 -06002430 external_context->ResolveChildContexts(subpass_contexts_);
John Zulauf355e49b2020-04-24 15:11:15 -06002431
2432 // Add the "finalLayout" transitions to external
2433 // Get them from where there we're hidding in the extra entry.
John Zulauf89311b42020-09-29 16:28:47 -06002434 // Not that since *final* always comes from *one* subpass per view, we don't have to accumulate the barriers
2435 // TODO Aliasing we may need to reconsider barrier accumulation... though I don't know that it would be valid for aliasing
2436 // that had mulitple final layout transistions from mulitple final subpasses.
John Zulauf355e49b2020-04-24 15:11:15 -06002437 const auto &final_transitions = rp_state_->subpass_transitions.back();
2438 for (const auto &transition : final_transitions) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07002439 const AttachmentViewGen &view_gen = attachment_views_[transition.attachment];
John Zulauf355e49b2020-04-24 15:11:15 -06002440 const auto &last_trackback = subpass_contexts_[transition.prev_pass].GetDstExternalTrackBack();
John Zulaufaa97d8b2020-07-14 10:58:13 -06002441 assert(&subpass_contexts_[transition.prev_pass] == last_trackback.context);
John Zulaufd5115702021-01-18 12:34:33 -07002442 ApplyBarrierOpsFunctor<PipelineBarrierOp> barrier_action(true /* resolve */, last_trackback.barriers.size(), tag);
John Zulauf1e331ec2020-12-04 18:29:38 -07002443 for (const auto &barrier : last_trackback.barriers) {
John Zulaufd5115702021-01-18 12:34:33 -07002444 barrier_action.EmplaceBack(PipelineBarrierOp(barrier, true));
John Zulauf1e331ec2020-12-04 18:29:38 -07002445 }
John Zulaufd0ec59f2021-03-13 14:25:08 -07002446 external_context->ApplyUpdateAction(view_gen, AttachmentViewGen::Gen::kViewSubresource, barrier_action);
John Zulauf355e49b2020-04-24 15:11:15 -06002447 }
2448}
2449
Jeremy Gebben40a22942020-12-22 14:22:06 -07002450SyncExecScope SyncExecScope::MakeSrc(VkQueueFlags queue_flags, VkPipelineStageFlags2KHR mask_param) {
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002451 SyncExecScope result;
2452 result.mask_param = mask_param;
Jeremy Gebben5f585ae2021-02-02 09:03:06 -07002453 result.expanded_mask = sync_utils::ExpandPipelineStages(mask_param, queue_flags);
2454 result.exec_scope = sync_utils::WithEarlierPipelineStages(result.expanded_mask);
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002455 result.valid_accesses = SyncStageAccess::AccessScopeByStage(result.exec_scope);
2456 return result;
2457}
2458
Jeremy Gebben40a22942020-12-22 14:22:06 -07002459SyncExecScope SyncExecScope::MakeDst(VkQueueFlags queue_flags, VkPipelineStageFlags2KHR mask_param) {
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002460 SyncExecScope result;
2461 result.mask_param = mask_param;
Jeremy Gebben5f585ae2021-02-02 09:03:06 -07002462 result.expanded_mask = sync_utils::ExpandPipelineStages(mask_param, queue_flags);
2463 result.exec_scope = sync_utils::WithLaterPipelineStages(result.expanded_mask);
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002464 result.valid_accesses = SyncStageAccess::AccessScopeByStage(result.exec_scope);
2465 return result;
2466}
2467
2468SyncBarrier::SyncBarrier(const SyncExecScope &src, const SyncExecScope &dst) {
John Zulaufc523bf62021-02-16 08:20:34 -07002469 src_exec_scope = src;
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002470 src_access_scope = 0;
John Zulaufc523bf62021-02-16 08:20:34 -07002471 dst_exec_scope = dst;
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002472 dst_access_scope = 0;
2473}
2474
2475template <typename Barrier>
2476SyncBarrier::SyncBarrier(const Barrier &barrier, const SyncExecScope &src, const SyncExecScope &dst) {
John Zulaufc523bf62021-02-16 08:20:34 -07002477 src_exec_scope = src;
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002478 src_access_scope = SyncStageAccess::AccessScope(src.valid_accesses, barrier.srcAccessMask);
John Zulaufc523bf62021-02-16 08:20:34 -07002479 dst_exec_scope = dst;
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002480 dst_access_scope = SyncStageAccess::AccessScope(dst.valid_accesses, barrier.dstAccessMask);
2481}
2482
2483SyncBarrier::SyncBarrier(VkQueueFlags queue_flags, const VkSubpassDependency2 &subpass) {
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002484 const auto barrier = lvl_find_in_chain<VkMemoryBarrier2KHR>(subpass.pNext);
2485 if (barrier) {
2486 auto src = SyncExecScope::MakeSrc(queue_flags, barrier->srcStageMask);
John Zulaufc523bf62021-02-16 08:20:34 -07002487 src_exec_scope = src;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002488 src_access_scope = SyncStageAccess::AccessScope(src.valid_accesses, barrier->srcAccessMask);
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002489
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002490 auto dst = SyncExecScope::MakeDst(queue_flags, barrier->dstStageMask);
John Zulaufc523bf62021-02-16 08:20:34 -07002491 dst_exec_scope = dst;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002492 dst_access_scope = SyncStageAccess::AccessScope(dst.valid_accesses, barrier->dstAccessMask);
2493
2494 } else {
2495 auto src = SyncExecScope::MakeSrc(queue_flags, subpass.srcStageMask);
John Zulaufc523bf62021-02-16 08:20:34 -07002496 src_exec_scope = src;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002497 src_access_scope = SyncStageAccess::AccessScope(src.valid_accesses, subpass.srcAccessMask);
2498
2499 auto dst = SyncExecScope::MakeDst(queue_flags, subpass.dstStageMask);
John Zulaufc523bf62021-02-16 08:20:34 -07002500 dst_exec_scope = dst;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002501 dst_access_scope = SyncStageAccess::AccessScope(dst.valid_accesses, subpass.dstAccessMask);
2502 }
2503}
2504
2505template <typename Barrier>
2506SyncBarrier::SyncBarrier(VkQueueFlags queue_flags, const Barrier &barrier) {
2507 auto src = SyncExecScope::MakeSrc(queue_flags, barrier.srcStageMask);
2508 src_exec_scope = src.exec_scope;
2509 src_access_scope = SyncStageAccess::AccessScope(src.valid_accesses, barrier.srcAccessMask);
2510
2511 auto dst = SyncExecScope::MakeDst(queue_flags, barrier.dstStageMask);
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002512 dst_exec_scope = dst.exec_scope;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002513 dst_access_scope = SyncStageAccess::AccessScope(dst.valid_accesses, barrier.dstAccessMask);
John Zulauf3d84f1b2020-03-09 13:33:25 -06002514}
2515
John Zulaufb02c1eb2020-10-06 16:33:36 -06002516// Apply a list of barriers, without resolving pending state, useful for subpass layout transitions
2517void ResourceAccessState::ApplyBarriers(const std::vector<SyncBarrier> &barriers, bool layout_transition) {
2518 for (const auto &barrier : barriers) {
2519 ApplyBarrier(barrier, layout_transition);
2520 }
2521}
2522
John Zulauf89311b42020-09-29 16:28:47 -06002523// ApplyBarriers is design for *fully* inclusive barrier lists without layout tranistions. Designed use was for
2524// inter-subpass barriers for lazy-evaluation of parent context memory ranges. Subpass layout transistions are *not* done
2525// lazily, s.t. no previous access reports should need layout transitions.
John Zulaufb02c1eb2020-10-06 16:33:36 -06002526void ResourceAccessState::ApplyBarriers(const std::vector<SyncBarrier> &barriers, const ResourceUsageTag &tag) {
2527 assert(!pending_layout_transition); // This should never be call in the middle of another barrier application
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002528 assert(pending_write_barriers.none());
John Zulaufb02c1eb2020-10-06 16:33:36 -06002529 assert(!pending_write_dep_chain);
John Zulaufa0a98292020-09-18 09:30:10 -06002530 for (const auto &barrier : barriers) {
John Zulauf89311b42020-09-29 16:28:47 -06002531 ApplyBarrier(barrier, false);
John Zulaufa0a98292020-09-18 09:30:10 -06002532 }
John Zulaufb02c1eb2020-10-06 16:33:36 -06002533 ApplyPendingBarriers(tag);
John Zulauf3d84f1b2020-03-09 13:33:25 -06002534}
John Zulauf9cb530d2019-09-30 14:14:10 -06002535HazardResult ResourceAccessState::DetectHazard(SyncStageAccessIndex usage_index) const {
2536 HazardResult hazard;
2537 auto usage = FlagBit(usage_index);
John Zulauf361fb532020-07-22 10:45:39 -06002538 const auto usage_stage = PipelineStageBit(usage_index);
John Zulauf9cb530d2019-09-30 14:14:10 -06002539 if (IsRead(usage)) {
John Zulauf4285ee92020-09-23 10:20:52 -06002540 if (IsRAWHazard(usage_stage, usage)) {
John Zulauf59e25072020-07-17 10:55:21 -06002541 hazard.Set(this, usage_index, READ_AFTER_WRITE, last_write, write_tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06002542 }
2543 } else {
John Zulauf361fb532020-07-22 10:45:39 -06002544 // Write operation:
2545 // Check for read operations more recent than last_write (as setting last_write clears reads, that would be *any*
2546 // If reads exists -- test only against them because either:
2547 // * the reads were hazards, and we've reported the hazard, so just test the current write vs. the read operations
2548 // * the read weren't hazards, and thus if the write is safe w.r.t. the reads, no hazard vs. last_write is possible if
2549 // the current write happens after the reads, so just test the write against the reades
2550 // Otherwise test against last_write
2551 //
2552 // Look for casus belli for WAR
John Zulaufab7756b2020-12-29 16:10:16 -07002553 if (last_reads.size()) {
2554 for (const auto &read_access : last_reads) {
John Zulauf361fb532020-07-22 10:45:39 -06002555 if (IsReadHazard(usage_stage, read_access)) {
2556 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
2557 break;
2558 }
2559 }
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002560 } else if (last_write.any() && IsWriteHazard(usage)) {
John Zulauf361fb532020-07-22 10:45:39 -06002561 // Write-After-Write check -- if we have a previous write to test against
2562 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06002563 }
2564 }
2565 return hazard;
2566}
2567
John Zulauf8e3c3e92021-01-06 11:19:36 -07002568HazardResult ResourceAccessState::DetectHazard(SyncStageAccessIndex usage_index, const SyncOrdering &ordering_rule) const {
2569 const auto &ordering = GetOrderingRules(ordering_rule);
John Zulauf69133422020-05-20 14:55:53 -06002570 // The ordering guarantees act as barriers to the last accesses, independent of synchronization operations
2571 HazardResult hazard;
John Zulauf4285ee92020-09-23 10:20:52 -06002572 const auto usage_bit = FlagBit(usage_index);
John Zulauf361fb532020-07-22 10:45:39 -06002573 const auto usage_stage = PipelineStageBit(usage_index);
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002574 const bool input_attachment_ordering = (ordering.access_scope & SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT).any();
2575 const bool last_write_is_ordered = (last_write & ordering.access_scope).any();
John Zulauf4285ee92020-09-23 10:20:52 -06002576 if (IsRead(usage_bit)) {
2577 // Exclude RAW if no write, or write not most "most recent" operation w.r.t. usage;
2578 bool is_raw_hazard = IsRAWHazard(usage_stage, usage_bit);
2579 if (is_raw_hazard) {
2580 // NOTE: we know last_write is non-zero
2581 // See if the ordering rules save us from the simple RAW check above
2582 // First check to see if the current usage is covered by the ordering rules
2583 const bool usage_is_input_attachment = (usage_index == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ);
2584 const bool usage_is_ordered =
2585 (input_attachment_ordering && usage_is_input_attachment) || (0 != (usage_stage & ordering.exec_scope));
2586 if (usage_is_ordered) {
2587 // Now see of the most recent write (or a subsequent read) are ordered
2588 const bool most_recent_is_ordered = last_write_is_ordered || (0 != GetOrderedStages(ordering));
2589 is_raw_hazard = !most_recent_is_ordered;
John Zulauf361fb532020-07-22 10:45:39 -06002590 }
2591 }
John Zulauf4285ee92020-09-23 10:20:52 -06002592 if (is_raw_hazard) {
2593 hazard.Set(this, usage_index, READ_AFTER_WRITE, last_write, write_tag);
2594 }
John Zulauf361fb532020-07-22 10:45:39 -06002595 } else {
2596 // Only check for WAW if there are no reads since last_write
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002597 bool usage_write_is_ordered = (usage_bit & ordering.access_scope).any();
John Zulaufab7756b2020-12-29 16:10:16 -07002598 if (last_reads.size()) {
John Zulauf361fb532020-07-22 10:45:39 -06002599 // Look for any WAR hazards outside the ordered set of stages
Jeremy Gebben40a22942020-12-22 14:22:06 -07002600 VkPipelineStageFlags2KHR ordered_stages = 0;
John Zulauf4285ee92020-09-23 10:20:52 -06002601 if (usage_write_is_ordered) {
2602 // If the usage is ordered, we can ignore all ordered read stages w.r.t. WAR)
2603 ordered_stages = GetOrderedStages(ordering);
2604 }
2605 // If we're tracking any reads that aren't ordered against the current write, got to check 'em all.
2606 if ((ordered_stages & last_read_stages) != last_read_stages) {
John Zulaufab7756b2020-12-29 16:10:16 -07002607 for (const auto &read_access : last_reads) {
John Zulauf4285ee92020-09-23 10:20:52 -06002608 if (read_access.stage & ordered_stages) continue; // but we can skip the ordered ones
2609 if (IsReadHazard(usage_stage, read_access)) {
2610 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
2611 break;
2612 }
John Zulaufd14743a2020-07-03 09:42:39 -06002613 }
2614 }
John Zulauf4285ee92020-09-23 10:20:52 -06002615 } else if (!(last_write_is_ordered && usage_write_is_ordered)) {
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002616 if (last_write.any() && IsWriteHazard(usage_bit)) {
John Zulauf4285ee92020-09-23 10:20:52 -06002617 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
John Zulauf361fb532020-07-22 10:45:39 -06002618 }
John Zulauf69133422020-05-20 14:55:53 -06002619 }
2620 }
2621 return hazard;
2622}
2623
John Zulauf2f952d22020-02-10 11:34:51 -07002624// Asynchronous Hazards occur between subpasses with no connection through the DAG
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07002625HazardResult ResourceAccessState::DetectAsyncHazard(SyncStageAccessIndex usage_index, const ResourceUsageTag &start_tag) const {
John Zulauf2f952d22020-02-10 11:34:51 -07002626 HazardResult hazard;
2627 auto usage = FlagBit(usage_index);
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07002628 // Async checks need to not go back further than the start of the subpass, as we only want to find hazards between the async
2629 // subpasses. Anything older than that should have been checked at the start of each subpass, taking into account all of
2630 // the raster ordering rules.
John Zulauf2f952d22020-02-10 11:34:51 -07002631 if (IsRead(usage)) {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07002632 if (last_write.any() && (write_tag.index >= start_tag.index)) {
John Zulauf59e25072020-07-17 10:55:21 -06002633 hazard.Set(this, usage_index, READ_RACING_WRITE, last_write, write_tag);
John Zulauf2f952d22020-02-10 11:34:51 -07002634 }
2635 } else {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07002636 if (last_write.any() && (write_tag.index >= start_tag.index)) {
John Zulauf59e25072020-07-17 10:55:21 -06002637 hazard.Set(this, usage_index, WRITE_RACING_WRITE, last_write, write_tag);
John Zulaufab7756b2020-12-29 16:10:16 -07002638 } else if (last_reads.size() > 0) {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07002639 // Any reads during the other subpass will conflict with this write, so we need to check them all.
John Zulaufab7756b2020-12-29 16:10:16 -07002640 for (const auto &read_access : last_reads) {
2641 if (read_access.tag.index >= start_tag.index) {
2642 hazard.Set(this, usage_index, WRITE_RACING_READ, read_access.access, read_access.tag);
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07002643 break;
2644 }
2645 }
John Zulauf2f952d22020-02-10 11:34:51 -07002646 }
2647 }
2648 return hazard;
2649}
2650
Jeremy Gebben40a22942020-12-22 14:22:06 -07002651HazardResult ResourceAccessState::DetectBarrierHazard(SyncStageAccessIndex usage_index, VkPipelineStageFlags2KHR src_exec_scope,
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002652 const SyncStageAccessFlags &src_access_scope) const {
John Zulauf0cb5be22020-01-23 12:18:22 -07002653 // Only supporting image layout transitions for now
2654 assert(usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION);
2655 HazardResult hazard;
John Zulauf361fb532020-07-22 10:45:39 -06002656 // only test for WAW if there no intervening read operations.
2657 // See DetectHazard(SyncStagetAccessIndex) above for more details.
John Zulaufab7756b2020-12-29 16:10:16 -07002658 if (last_reads.size()) {
John Zulauf355e49b2020-04-24 15:11:15 -06002659 // Look at the reads if any
John Zulaufab7756b2020-12-29 16:10:16 -07002660 for (const auto &read_access : last_reads) {
John Zulauf4a6105a2020-11-17 15:11:05 -07002661 if (read_access.IsReadBarrierHazard(src_exec_scope)) {
John Zulauf59e25072020-07-17 10:55:21 -06002662 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
John Zulauf0cb5be22020-01-23 12:18:22 -07002663 break;
2664 }
2665 }
John Zulauf4a6105a2020-11-17 15:11:05 -07002666 } else if (last_write.any() && IsWriteBarrierHazard(src_exec_scope, src_access_scope)) {
2667 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
2668 }
2669
2670 return hazard;
2671}
2672
Jeremy Gebben40a22942020-12-22 14:22:06 -07002673HazardResult ResourceAccessState::DetectBarrierHazard(SyncStageAccessIndex usage_index, VkPipelineStageFlags2KHR src_exec_scope,
John Zulauf4a6105a2020-11-17 15:11:05 -07002674 const SyncStageAccessFlags &src_access_scope,
2675 const ResourceUsageTag &event_tag) const {
2676 // Only supporting image layout transitions for now
2677 assert(usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION);
2678 HazardResult hazard;
2679 // only test for WAW if there no intervening read operations.
2680 // See DetectHazard(SyncStagetAccessIndex) above for more details.
2681
John Zulaufab7756b2020-12-29 16:10:16 -07002682 if (last_reads.size()) {
John Zulauf4a6105a2020-11-17 15:11:05 -07002683 // Look at the reads if any... if reads exist, they are either the resaon the access is in the event
2684 // first scope, or they are a hazard.
John Zulaufab7756b2020-12-29 16:10:16 -07002685 for (const auto &read_access : last_reads) {
John Zulauf4a6105a2020-11-17 15:11:05 -07002686 if (read_access.tag.IsBefore(event_tag)) {
2687 // The read is in the events first synchronization scope, so we use a barrier hazard check
2688 // If the read stage is not in the src sync scope
2689 // *AND* not execution chained with an existing sync barrier (that's the or)
2690 // then the barrier access is unsafe (R/W after R)
2691 if (read_access.IsReadBarrierHazard(src_exec_scope)) {
2692 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
2693 break;
2694 }
2695 } else {
2696 // The read not in the event first sync scope and so is a hazard vs. the layout transition
2697 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
2698 }
2699 }
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002700 } else if (last_write.any()) {
John Zulauf4a6105a2020-11-17 15:11:05 -07002701 // if there are no reads, the write is either the reason the access is in the event scope... they are a hazard
2702 if (write_tag.IsBefore(event_tag)) {
2703 // The write is in the first sync scope of the event (sync their aren't any reads to be the reason)
2704 // So do a normal barrier hazard check
2705 if (IsWriteBarrierHazard(src_exec_scope, src_access_scope)) {
2706 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
2707 }
2708 } else {
2709 // The write isn't in scope, and is thus a hazard to the layout transistion for wait
John Zulauf361fb532020-07-22 10:45:39 -06002710 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
2711 }
John Zulaufd14743a2020-07-03 09:42:39 -06002712 }
John Zulauf361fb532020-07-22 10:45:39 -06002713
John Zulauf0cb5be22020-01-23 12:18:22 -07002714 return hazard;
2715}
2716
John Zulauf5f13a792020-03-10 07:31:21 -06002717// The logic behind resolves is the same as update, we assume that earlier hazards have be reported, and that no
2718// tranistive hazard can exists with a hazard between the earlier operations. Yes, an early hazard can mask that another
2719// exists, but if you fix *that* hazard it either fixes or unmasks the subsequent ones.
2720void ResourceAccessState::Resolve(const ResourceAccessState &other) {
2721 if (write_tag.IsBefore(other.write_tag)) {
John Zulauf4285ee92020-09-23 10:20:52 -06002722 // If this is a later write, we've reported any exsiting hazard, and we can just overwrite as the more recent
2723 // operation
John Zulauf5f13a792020-03-10 07:31:21 -06002724 *this = other;
2725 } else if (!other.write_tag.IsBefore(write_tag)) {
2726 // This is the *equals* case for write operations, we merged the write barriers and the read state (but without the
2727 // dependency chaining logic or any stage expansion)
2728 write_barriers |= other.write_barriers;
John Zulaufb02c1eb2020-10-06 16:33:36 -06002729 pending_write_barriers |= other.pending_write_barriers;
2730 pending_layout_transition |= other.pending_layout_transition;
2731 pending_write_dep_chain |= other.pending_write_dep_chain;
John Zulauf5f13a792020-03-10 07:31:21 -06002732
John Zulaufd14743a2020-07-03 09:42:39 -06002733 // Merge the read states
John Zulaufab7756b2020-12-29 16:10:16 -07002734 const auto pre_merge_count = last_reads.size();
John Zulauf4285ee92020-09-23 10:20:52 -06002735 const auto pre_merge_stages = last_read_stages;
John Zulaufab7756b2020-12-29 16:10:16 -07002736 for (uint32_t other_read_index = 0; other_read_index < other.last_reads.size(); other_read_index++) {
John Zulauf5f13a792020-03-10 07:31:21 -06002737 auto &other_read = other.last_reads[other_read_index];
John Zulauf4285ee92020-09-23 10:20:52 -06002738 if (pre_merge_stages & other_read.stage) {
John Zulauf5f13a792020-03-10 07:31:21 -06002739 // Merge in the barriers for read stages that exist in *both* this and other
John Zulauf4285ee92020-09-23 10:20:52 -06002740 // TODO: This is N^2 with stages... perhaps the ReadStates should be sorted by stage index.
2741 // but we should wait on profiling data for that.
2742 for (uint32_t my_read_index = 0; my_read_index < pre_merge_count; my_read_index++) {
John Zulauf5f13a792020-03-10 07:31:21 -06002743 auto &my_read = last_reads[my_read_index];
2744 if (other_read.stage == my_read.stage) {
2745 if (my_read.tag.IsBefore(other_read.tag)) {
John Zulauf4285ee92020-09-23 10:20:52 -06002746 // Other is more recent, copy in the state
John Zulauf37ceaed2020-07-03 16:18:15 -06002747 my_read.access = other_read.access;
John Zulauf4285ee92020-09-23 10:20:52 -06002748 my_read.tag = other_read.tag;
John Zulaufb02c1eb2020-10-06 16:33:36 -06002749 my_read.pending_dep_chain = other_read.pending_dep_chain;
2750 // TODO: Phase 2 -- review the state merge logic to avoid false positive from overwriting the barriers
2751 // May require tracking more than one access per stage.
2752 my_read.barriers = other_read.barriers;
Jeremy Gebben40a22942020-12-22 14:22:06 -07002753 if (my_read.stage == VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR) {
John Zulauf4285ee92020-09-23 10:20:52 -06002754 // Since I'm overwriting the fragement stage read, also update the input attachment info
2755 // as this is the only stage that affects it.
John Zulauff51fbb62020-10-02 14:43:24 -06002756 input_attachment_read = other.input_attachment_read;
John Zulauf4285ee92020-09-23 10:20:52 -06002757 }
John Zulaufb02c1eb2020-10-06 16:33:36 -06002758 } else if (other_read.tag.IsBefore(my_read.tag)) {
2759 // The read tags match so merge the barriers
2760 my_read.barriers |= other_read.barriers;
2761 my_read.pending_dep_chain |= other_read.pending_dep_chain;
John Zulauf5f13a792020-03-10 07:31:21 -06002762 }
John Zulaufb02c1eb2020-10-06 16:33:36 -06002763
John Zulauf5f13a792020-03-10 07:31:21 -06002764 break;
2765 }
2766 }
2767 } else {
2768 // The other read stage doesn't exist in this, so add it.
John Zulaufab7756b2020-12-29 16:10:16 -07002769 last_reads.emplace_back(other_read);
John Zulauf5f13a792020-03-10 07:31:21 -06002770 last_read_stages |= other_read.stage;
Jeremy Gebben40a22942020-12-22 14:22:06 -07002771 if (other_read.stage == VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR) {
John Zulauff51fbb62020-10-02 14:43:24 -06002772 input_attachment_read = other.input_attachment_read;
John Zulauf4285ee92020-09-23 10:20:52 -06002773 }
John Zulauf5f13a792020-03-10 07:31:21 -06002774 }
2775 }
John Zulauf361fb532020-07-22 10:45:39 -06002776 read_execution_barriers |= other.read_execution_barriers;
John Zulauf4285ee92020-09-23 10:20:52 -06002777 } // the else clause would be that other write is before this write... in which case we supercede the other state and
2778 // ignore it.
John Zulauffaea0ee2021-01-14 14:01:32 -07002779
2780 // Merge first access information by making a copy of this first_access and reconstructing with a shuffle
2781 // of the copy and other into this using the update first logic.
2782 // NOTE: All sorts of additional cleverness could be put into short circuts. (for example back is write and is before front
2783 // of the other first_accesses... )
2784 if (!(first_accesses_ == other.first_accesses_) && !other.first_accesses_.empty()) {
2785 FirstAccesses firsts(std::move(first_accesses_));
2786 first_accesses_.clear();
2787 first_read_stages_ = 0U;
2788 auto a = firsts.begin();
2789 auto a_end = firsts.end();
2790 for (auto &b : other.first_accesses_) {
2791 // TODO: Determine whether "IsBefore" or "IsGloballyBefore" is needed...
2792 while (a != a_end && a->tag.IsBefore(b.tag)) {
2793 UpdateFirst(a->tag, a->usage_index, a->ordering_rule);
2794 ++a;
2795 }
2796 UpdateFirst(b.tag, b.usage_index, b.ordering_rule);
2797 }
2798 for (; a != a_end; ++a) {
2799 UpdateFirst(a->tag, a->usage_index, a->ordering_rule);
2800 }
2801 }
John Zulauf5f13a792020-03-10 07:31:21 -06002802}
2803
John Zulauf8e3c3e92021-01-06 11:19:36 -07002804void ResourceAccessState::Update(SyncStageAccessIndex usage_index, SyncOrdering ordering_rule, const ResourceUsageTag &tag) {
John Zulauf9cb530d2019-09-30 14:14:10 -06002805 // Move this logic in the ResourceStateTracker as methods, thereof (or we'll repeat it for every flavor of resource...
2806 const auto usage_bit = FlagBit(usage_index);
John Zulauf4285ee92020-09-23 10:20:52 -06002807 if (IsRead(usage_index)) {
John Zulauf9cb530d2019-09-30 14:14:10 -06002808 // Mulitple outstanding reads may be of interest and do dependency chains independently
2809 // However, for purposes of barrier tracking, only one read per pipeline stage matters
2810 const auto usage_stage = PipelineStageBit(usage_index);
2811 if (usage_stage & last_read_stages) {
John Zulaufab7756b2020-12-29 16:10:16 -07002812 for (auto &read_access : last_reads) {
2813 if (read_access.stage == usage_stage) {
2814 read_access.Set(usage_stage, usage_bit, 0, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06002815 break;
2816 }
2817 }
2818 } else {
John Zulaufab7756b2020-12-29 16:10:16 -07002819 last_reads.emplace_back(usage_stage, usage_bit, 0, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06002820 last_read_stages |= usage_stage;
2821 }
John Zulauf4285ee92020-09-23 10:20:52 -06002822
2823 // Fragment shader reads come in two flavors, and we need to track if the one we're tracking is the special one.
Jeremy Gebben40a22942020-12-22 14:22:06 -07002824 if (usage_stage == VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR) {
John Zulauff51fbb62020-10-02 14:43:24 -06002825 // TODO Revisit re: multiple reads for a given stage
2826 input_attachment_read = (usage_bit == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT);
John Zulauf4285ee92020-09-23 10:20:52 -06002827 }
John Zulauf9cb530d2019-09-30 14:14:10 -06002828 } else {
2829 // Assume write
2830 // TODO determine what to do with READ-WRITE operations if any
John Zulauf89311b42020-09-29 16:28:47 -06002831 SetWrite(usage_bit, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06002832 }
John Zulauffaea0ee2021-01-14 14:01:32 -07002833 UpdateFirst(tag, usage_index, ordering_rule);
John Zulauf9cb530d2019-09-30 14:14:10 -06002834}
John Zulauf5f13a792020-03-10 07:31:21 -06002835
John Zulauf89311b42020-09-29 16:28:47 -06002836// Clobber last read and all barriers... because all we have is DANGER, DANGER, WILL ROBINSON!!!
2837// if the last_reads/last_write were unsafe, we've reported them, in either case the prior access is irrelevant.
2838// We can overwrite them as *this* write is now after them.
2839//
2840// Note: intentionally ignore pending barriers and chains (i.e. don't apply or clear them), let ApplyPendingBarriers handle them.
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002841void ResourceAccessState::SetWrite(const SyncStageAccessFlags &usage_bit, const ResourceUsageTag &tag) {
John Zulaufab7756b2020-12-29 16:10:16 -07002842 last_reads.clear();
John Zulauf89311b42020-09-29 16:28:47 -06002843 last_read_stages = 0;
2844 read_execution_barriers = 0;
John Zulauff51fbb62020-10-02 14:43:24 -06002845 input_attachment_read = false; // Denotes no outstanding input attachment read after the last write.
John Zulauf89311b42020-09-29 16:28:47 -06002846
2847 write_barriers = 0;
2848 write_dependency_chain = 0;
2849 write_tag = tag;
2850 last_write = usage_bit;
John Zulauf9cb530d2019-09-30 14:14:10 -06002851}
2852
John Zulauf89311b42020-09-29 16:28:47 -06002853// Apply the memory barrier without updating the existing barriers. The execution barrier
2854// changes the "chaining" state, but to keep barriers independent, we defer this until all barriers
2855// of the batch have been processed. Also, depending on whether layout transition happens, we'll either
2856// replace the current write barriers or add to them, so accumulate to pending as well.
2857void ResourceAccessState::ApplyBarrier(const SyncBarrier &barrier, bool layout_transition) {
2858 // For independent barriers we need to track what the new barriers and dependency chain *will* be when we're done
2859 // applying the memory barriers
John Zulauf86356ca2020-10-19 11:46:41 -06002860 // NOTE: We update the write barrier if the write is in the first access scope or if there is a layout
2861 // transistion, under the theory of "most recent access". If the read/write *isn't* safe
2862 // vs. this layout transition DetectBarrierHazard should report it. We treat the layout
2863 // transistion *as* a write and in scope with the barrier (it's before visibility).
John Zulaufc523bf62021-02-16 08:20:34 -07002864 if (layout_transition || WriteInSourceScopeOrChain(barrier.src_exec_scope.exec_scope, barrier.src_access_scope)) {
John Zulauf89311b42020-09-29 16:28:47 -06002865 pending_write_barriers |= barrier.dst_access_scope;
John Zulaufc523bf62021-02-16 08:20:34 -07002866 pending_write_dep_chain |= barrier.dst_exec_scope.exec_scope;
John Zulaufa0a98292020-09-18 09:30:10 -06002867 }
John Zulauf89311b42020-09-29 16:28:47 -06002868 // Track layout transistion as pending as we can't modify last_write until all barriers processed
2869 pending_layout_transition |= layout_transition;
John Zulaufa0a98292020-09-18 09:30:10 -06002870
John Zulauf89311b42020-09-29 16:28:47 -06002871 if (!pending_layout_transition) {
2872 // Once we're dealing with a layout transition (which is modelled as a *write*) then the last reads/writes/chains
2873 // don't need to be tracked as we're just going to zero them.
John Zulaufab7756b2020-12-29 16:10:16 -07002874 for (auto &read_access : last_reads) {
John Zulauf89311b42020-09-29 16:28:47 -06002875 // The | implements the "dependency chain" logic for this access, as the barriers field stores the second sync scope
John Zulaufc523bf62021-02-16 08:20:34 -07002876 if (barrier.src_exec_scope.exec_scope & (read_access.stage | read_access.barriers)) {
2877 read_access.pending_dep_chain |= barrier.dst_exec_scope.exec_scope;
John Zulaufa0a98292020-09-18 09:30:10 -06002878 }
2879 }
John Zulaufa0a98292020-09-18 09:30:10 -06002880 }
John Zulaufa0a98292020-09-18 09:30:10 -06002881}
2882
John Zulauf4a6105a2020-11-17 15:11:05 -07002883// Apply the tag scoped memory barrier without updating the existing barriers. The execution barrier
2884// changes the "chaining" state, but to keep barriers independent. See discussion above.
2885void ResourceAccessState::ApplyBarrier(const ResourceUsageTag &scope_tag, const SyncBarrier &barrier, bool layout_transition) {
2886 // The scope logic for events is, if we're here, the resource usage was flagged as "in the first execution scope" at
2887 // the time of the SetEvent, thus all we need check is whether the access is the same one (i.e. before the scope tag
2888 // in order to know if it's in the excecution scope
2889 // Notice that the layout transition sets the pending barriers *regardless*, as any lack of src_access_scope to
2890 // guard against the layout transition should be reported in the detect barrier hazard phase, and we only report
2891 // errors w.r.t. "most recent" accesses.
2892 if (layout_transition || ((write_tag.IsBefore(scope_tag)) && (barrier.src_access_scope & last_write).any())) {
2893 pending_write_barriers |= barrier.dst_access_scope;
John Zulaufc523bf62021-02-16 08:20:34 -07002894 pending_write_dep_chain |= barrier.dst_exec_scope.exec_scope;
John Zulauf4a6105a2020-11-17 15:11:05 -07002895 }
2896 // Track layout transistion as pending as we can't modify last_write until all barriers processed
2897 pending_layout_transition |= layout_transition;
2898
2899 if (!pending_layout_transition) {
2900 // Once we're dealing with a layout transition (which is modelled as a *write*) then the last reads/writes/chains
2901 // don't need to be tracked as we're just going to zero them.
John Zulaufab7756b2020-12-29 16:10:16 -07002902 for (auto &read_access : last_reads) {
John Zulauf4a6105a2020-11-17 15:11:05 -07002903 // If this read is the same one we included in the set event and in scope, then apply the execution barrier...
2904 // NOTE: That's not really correct... this read stage might *not* have been included in the setevent, and the barriers
2905 // representing the chain might have changed since then (that would be an odd usage), so as a first approximation
2906 // we'll assume the barriers *haven't* been changed since (if the tag hasn't), and while this could be a false
2907 // positive in the case of Set; SomeBarrier; Wait; we'll live with it until we can add more state to the first scope
2908 // capture (the specific write and read stages that *were* in scope at the moment of SetEvents.
2909 // TODO: eliminate the false positive by including write/read-stages "in scope" information in SetEvents first_scope
John Zulaufc523bf62021-02-16 08:20:34 -07002910 if (read_access.tag.IsBefore(scope_tag) &&
2911 (barrier.src_exec_scope.exec_scope & (read_access.stage | read_access.barriers))) {
2912 read_access.pending_dep_chain |= barrier.dst_exec_scope.exec_scope;
John Zulauf4a6105a2020-11-17 15:11:05 -07002913 }
2914 }
2915 }
2916}
John Zulauf89311b42020-09-29 16:28:47 -06002917void ResourceAccessState::ApplyPendingBarriers(const ResourceUsageTag &tag) {
2918 if (pending_layout_transition) {
John Zulauf89311b42020-09-29 16:28:47 -06002919 // SetWrite clobbers the read count, and thus we don't have to clear the read_state out.
2920 SetWrite(SYNC_IMAGE_LAYOUT_TRANSITION_BIT, tag); // Side effect notes below
John Zulauffaea0ee2021-01-14 14:01:32 -07002921 UpdateFirst(tag, SYNC_IMAGE_LAYOUT_TRANSITION, SyncOrdering::kNonAttachment);
John Zulauf89311b42020-09-29 16:28:47 -06002922 pending_layout_transition = false;
John Zulauf9cb530d2019-09-30 14:14:10 -06002923 }
John Zulauf89311b42020-09-29 16:28:47 -06002924
2925 // Apply the accumulate execution barriers (and thus update chaining information)
2926 // for layout transition, read count is zeroed by SetWrite, so this will be skipped.
John Zulaufab7756b2020-12-29 16:10:16 -07002927 for (auto &read_access : last_reads) {
2928 read_access.barriers |= read_access.pending_dep_chain;
2929 read_execution_barriers |= read_access.barriers;
2930 read_access.pending_dep_chain = 0;
John Zulauf89311b42020-09-29 16:28:47 -06002931 }
2932
2933 // We OR in the accumulated write chain and barriers even in the case of a layout transition as SetWrite zeros them.
2934 write_dependency_chain |= pending_write_dep_chain;
2935 write_barriers |= pending_write_barriers;
2936 pending_write_dep_chain = 0;
2937 pending_write_barriers = 0;
John Zulauf9cb530d2019-09-30 14:14:10 -06002938}
2939
John Zulauf59e25072020-07-17 10:55:21 -06002940// This should be just Bits or Index, but we don't have an invalid state for Index
Jeremy Gebben40a22942020-12-22 14:22:06 -07002941VkPipelineStageFlags2KHR ResourceAccessState::GetReadBarriers(const SyncStageAccessFlags &usage_bit) const {
2942 VkPipelineStageFlags2KHR barriers = 0U;
John Zulauf4285ee92020-09-23 10:20:52 -06002943
John Zulaufab7756b2020-12-29 16:10:16 -07002944 for (const auto &read_access : last_reads) {
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002945 if ((read_access.access & usage_bit).any()) {
John Zulauf4285ee92020-09-23 10:20:52 -06002946 barriers = read_access.barriers;
2947 break;
John Zulauf59e25072020-07-17 10:55:21 -06002948 }
2949 }
John Zulauf4285ee92020-09-23 10:20:52 -06002950
John Zulauf59e25072020-07-17 10:55:21 -06002951 return barriers;
2952}
2953
Jeremy Gebben40a22942020-12-22 14:22:06 -07002954inline bool ResourceAccessState::IsRAWHazard(VkPipelineStageFlags2KHR usage_stage, const SyncStageAccessFlags &usage) const {
John Zulauf4285ee92020-09-23 10:20:52 -06002955 assert(IsRead(usage));
2956 // Only RAW vs. last_write if it doesn't happen-after any other read because either:
2957 // * the previous reads are not hazards, and thus last_write must be visible and available to
2958 // any reads that happen after.
2959 // * the previous reads *are* hazards to last_write, have been reported, and if that hazard is fixed
2960 // the current read will be also not be a hazard, thus reporting a hazard here adds no needed information.
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002961 return last_write.any() && (0 == (read_execution_barriers & usage_stage)) && IsWriteHazard(usage);
John Zulauf4285ee92020-09-23 10:20:52 -06002962}
2963
Jeremy Gebben40a22942020-12-22 14:22:06 -07002964VkPipelineStageFlags2KHR ResourceAccessState::GetOrderedStages(const OrderingBarrier &ordering) const {
John Zulauf4285ee92020-09-23 10:20:52 -06002965 // Whether the stage are in the ordering scope only matters if the current write is ordered
Jeremy Gebben40a22942020-12-22 14:22:06 -07002966 VkPipelineStageFlags2KHR ordered_stages = last_read_stages & ordering.exec_scope;
John Zulauf4285ee92020-09-23 10:20:52 -06002967 // Special input attachment handling as always (not encoded in exec_scop)
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002968 const bool input_attachment_ordering = (ordering.access_scope & SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT).any();
John Zulauff51fbb62020-10-02 14:43:24 -06002969 if (input_attachment_ordering && input_attachment_read) {
John Zulauf4285ee92020-09-23 10:20:52 -06002970 // If we have an input attachment in last_reads and input attachments are ordered we all that stage
Jeremy Gebben40a22942020-12-22 14:22:06 -07002971 ordered_stages |= VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR;
John Zulauf4285ee92020-09-23 10:20:52 -06002972 }
2973
2974 return ordered_stages;
2975}
2976
John Zulauffaea0ee2021-01-14 14:01:32 -07002977void ResourceAccessState::UpdateFirst(const ResourceUsageTag &tag, SyncStageAccessIndex usage_index, SyncOrdering ordering_rule) {
2978 // Only record until we record a write.
2979 if (first_accesses_.empty() || IsRead(first_accesses_.back().usage_index)) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07002980 const VkPipelineStageFlags2KHR usage_stage = IsRead(usage_index) ? PipelineStageBit(usage_index) : 0U;
John Zulauffaea0ee2021-01-14 14:01:32 -07002981 if (0 == (usage_stage & first_read_stages_)) {
2982 // If this is a read we haven't seen or a write, record.
2983 first_read_stages_ |= usage_stage;
2984 first_accesses_.emplace_back(tag, usage_index, ordering_rule);
2985 }
2986 }
2987}
2988
John Zulaufd1f85d42020-04-15 12:23:15 -06002989void SyncValidator::ResetCommandBufferCallback(VkCommandBuffer command_buffer) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06002990 auto *access_context = GetAccessContextNoInsert(command_buffer);
2991 if (access_context) {
2992 access_context->Reset();
John Zulauf9cb530d2019-09-30 14:14:10 -06002993 }
2994}
2995
John Zulaufd1f85d42020-04-15 12:23:15 -06002996void SyncValidator::FreeCommandBufferCallback(VkCommandBuffer command_buffer) {
2997 auto access_found = cb_access_state.find(command_buffer);
2998 if (access_found != cb_access_state.end()) {
2999 access_found->second->Reset();
3000 cb_access_state.erase(access_found);
3001 }
3002}
3003
John Zulauf9cb530d2019-09-30 14:14:10 -06003004bool SyncValidator::PreCallValidateCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
3005 uint32_t regionCount, const VkBufferCopy *pRegions) const {
3006 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -06003007 const auto *cb_context = GetAccessContext(commandBuffer);
3008 assert(cb_context);
3009 if (!cb_context) return skip;
3010 const auto *context = cb_context->GetCurrentAccessContext();
John Zulauf9cb530d2019-09-30 14:14:10 -06003011
John Zulauf3d84f1b2020-03-09 13:33:25 -06003012 // If we have no previous accesses, we have no hazards
John Zulauf3d84f1b2020-03-09 13:33:25 -06003013 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003014 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003015
3016 for (uint32_t region = 0; region < regionCount; region++) {
3017 const auto &copy_region = pRegions[region];
John Zulauf16adfc92020-04-08 10:28:33 -06003018 if (src_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06003019 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003020 auto hazard = context->DetectHazard(*src_buffer, SYNC_COPY_TRANSFER_READ, src_range);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003021 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003022 skip |= LogError(srcBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003023 "vkCmdCopyBuffer: Hazard %s for srcBuffer %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06003024 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcBuffer).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003025 cb_context->FormatUsage(hazard).c_str());
John Zulauf9cb530d2019-09-30 14:14:10 -06003026 }
John Zulauf9cb530d2019-09-30 14:14:10 -06003027 }
John Zulauf16adfc92020-04-08 10:28:33 -06003028 if (dst_buffer && !skip) {
John Zulauf3e86bf02020-09-12 10:47:57 -06003029 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003030 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, dst_range);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003031 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003032 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003033 "vkCmdCopyBuffer: Hazard %s for dstBuffer %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06003034 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstBuffer).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003035 cb_context->FormatUsage(hazard).c_str());
John Zulauf3d84f1b2020-03-09 13:33:25 -06003036 }
3037 }
3038 if (skip) break;
John Zulauf9cb530d2019-09-30 14:14:10 -06003039 }
3040 return skip;
3041}
3042
3043void SyncValidator::PreCallRecordCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
3044 uint32_t regionCount, const VkBufferCopy *pRegions) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003045 auto *cb_context = GetAccessContext(commandBuffer);
3046 assert(cb_context);
John Zulauf2b151bf2020-04-24 15:37:44 -06003047 const auto tag = cb_context->NextCommandTag(CMD_COPYBUFFER);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003048 auto *context = cb_context->GetCurrentAccessContext();
3049
John Zulauf9cb530d2019-09-30 14:14:10 -06003050 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
John Zulauf9cb530d2019-09-30 14:14:10 -06003051 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
John Zulauf9cb530d2019-09-30 14:14:10 -06003052
3053 for (uint32_t region = 0; region < regionCount; region++) {
3054 const auto &copy_region = pRegions[region];
John Zulauf16adfc92020-04-08 10:28:33 -06003055 if (src_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06003056 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003057 context->UpdateAccessState(*src_buffer, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment, src_range, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003058 }
John Zulauf16adfc92020-04-08 10:28:33 -06003059 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06003060 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003061 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, dst_range, tag);
John Zulauf5c5e88d2019-12-26 11:22:02 -07003062 }
3063 }
3064}
3065
John Zulauf4a6105a2020-11-17 15:11:05 -07003066void SyncValidator::PreCallRecordDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
3067 // Clear out events from the command buffer contexts
3068 for (auto &cb_context : cb_access_state) {
3069 cb_context.second->RecordDestroyEvent(event);
3070 }
3071}
3072
Jeff Leger178b1e52020-10-05 12:22:23 -04003073bool SyncValidator::PreCallValidateCmdCopyBuffer2KHR(VkCommandBuffer commandBuffer,
3074 const VkCopyBufferInfo2KHR *pCopyBufferInfos) const {
3075 bool skip = false;
3076 const auto *cb_context = GetAccessContext(commandBuffer);
3077 assert(cb_context);
3078 if (!cb_context) return skip;
3079 const auto *context = cb_context->GetCurrentAccessContext();
3080
3081 // If we have no previous accesses, we have no hazards
3082 const auto *src_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->srcBuffer);
3083 const auto *dst_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->dstBuffer);
3084
3085 for (uint32_t region = 0; region < pCopyBufferInfos->regionCount; region++) {
3086 const auto &copy_region = pCopyBufferInfos->pRegions[region];
3087 if (src_buffer) {
3088 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003089 auto hazard = context->DetectHazard(*src_buffer, SYNC_COPY_TRANSFER_READ, src_range);
Jeff Leger178b1e52020-10-05 12:22:23 -04003090 if (hazard.hazard) {
3091 // TODO -- add tag information to log msg when useful.
3092 skip |= LogError(pCopyBufferInfos->srcBuffer, string_SyncHazardVUID(hazard.hazard),
3093 "vkCmdCopyBuffer2KHR(): Hazard %s for srcBuffer %s, region %" PRIu32 ". Access info %s.",
3094 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyBufferInfos->srcBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07003095 region, cb_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04003096 }
3097 }
3098 if (dst_buffer && !skip) {
3099 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003100 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, dst_range);
Jeff Leger178b1e52020-10-05 12:22:23 -04003101 if (hazard.hazard) {
3102 skip |= LogError(pCopyBufferInfos->dstBuffer, string_SyncHazardVUID(hazard.hazard),
3103 "vkCmdCopyBuffer2KHR(): Hazard %s for dstBuffer %s, region %" PRIu32 ". Access info %s.",
3104 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyBufferInfos->dstBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07003105 region, cb_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04003106 }
3107 }
3108 if (skip) break;
3109 }
3110 return skip;
3111}
3112
3113void SyncValidator::PreCallRecordCmdCopyBuffer2KHR(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2KHR *pCopyBufferInfos) {
3114 auto *cb_context = GetAccessContext(commandBuffer);
3115 assert(cb_context);
3116 const auto tag = cb_context->NextCommandTag(CMD_COPYBUFFER2KHR);
3117 auto *context = cb_context->GetCurrentAccessContext();
3118
3119 const auto *src_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->srcBuffer);
3120 const auto *dst_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->dstBuffer);
3121
3122 for (uint32_t region = 0; region < pCopyBufferInfos->regionCount; region++) {
3123 const auto &copy_region = pCopyBufferInfos->pRegions[region];
3124 if (src_buffer) {
3125 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003126 context->UpdateAccessState(*src_buffer, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment, src_range, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04003127 }
3128 if (dst_buffer) {
3129 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003130 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, dst_range, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04003131 }
3132 }
3133}
3134
John Zulauf5c5e88d2019-12-26 11:22:02 -07003135bool SyncValidator::PreCallValidateCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
3136 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
3137 const VkImageCopy *pRegions) const {
3138 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -06003139 const auto *cb_access_context = GetAccessContext(commandBuffer);
3140 assert(cb_access_context);
3141 if (!cb_access_context) return skip;
John Zulauf5c5e88d2019-12-26 11:22:02 -07003142
John Zulauf3d84f1b2020-03-09 13:33:25 -06003143 const auto *context = cb_access_context->GetCurrentAccessContext();
3144 assert(context);
3145 if (!context) return skip;
3146
3147 const auto *src_image = Get<IMAGE_STATE>(srcImage);
3148 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003149 for (uint32_t region = 0; region < regionCount; region++) {
3150 const auto &copy_region = pRegions[region];
3151 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003152 auto hazard = context->DetectHazard(*src_image, SYNC_COPY_TRANSFER_READ, copy_region.srcSubresource,
John Zulauf3d84f1b2020-03-09 13:33:25 -06003153 copy_region.srcOffset, copy_region.extent);
3154 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003155 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003156 "vkCmdCopyImage: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06003157 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003158 cb_access_context->FormatUsage(hazard).c_str());
John Zulauf5c5e88d2019-12-26 11:22:02 -07003159 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06003160 }
3161
3162 if (dst_image) {
locke-lunarg1df1f882020-03-02 16:42:08 -07003163 VkExtent3D dst_copy_extent =
3164 GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003165 auto hazard = context->DetectHazard(*dst_image, SYNC_COPY_TRANSFER_WRITE, copy_region.dstSubresource,
locke-lunarg1df1f882020-03-02 16:42:08 -07003166 copy_region.dstOffset, dst_copy_extent);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003167 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003168 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003169 "vkCmdCopyImage: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06003170 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003171 cb_access_context->FormatUsage(hazard).c_str());
John Zulauf5c5e88d2019-12-26 11:22:02 -07003172 }
locke-lunarg1dbbb9e2020-02-28 22:43:53 -07003173 if (skip) break;
John Zulauf5c5e88d2019-12-26 11:22:02 -07003174 }
3175 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06003176
John Zulauf5c5e88d2019-12-26 11:22:02 -07003177 return skip;
3178}
3179
3180void SyncValidator::PreCallRecordCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
3181 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
3182 const VkImageCopy *pRegions) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003183 auto *cb_access_context = GetAccessContext(commandBuffer);
3184 assert(cb_access_context);
John Zulauf2b151bf2020-04-24 15:37:44 -06003185 const auto tag = cb_access_context->NextCommandTag(CMD_COPYIMAGE);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003186 auto *context = cb_access_context->GetCurrentAccessContext();
3187 assert(context);
3188
John Zulauf5c5e88d2019-12-26 11:22:02 -07003189 auto *src_image = Get<IMAGE_STATE>(srcImage);
John Zulauf5c5e88d2019-12-26 11:22:02 -07003190 auto *dst_image = Get<IMAGE_STATE>(dstImage);
John Zulauf5c5e88d2019-12-26 11:22:02 -07003191
3192 for (uint32_t region = 0; region < regionCount; region++) {
3193 const auto &copy_region = pRegions[region];
John Zulauf3d84f1b2020-03-09 13:33:25 -06003194 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003195 context->UpdateAccessState(*src_image, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07003196 copy_region.srcSubresource, copy_region.srcOffset, copy_region.extent, tag);
John Zulauf5c5e88d2019-12-26 11:22:02 -07003197 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06003198 if (dst_image) {
locke-lunarg1df1f882020-03-02 16:42:08 -07003199 VkExtent3D dst_copy_extent =
3200 GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003201 context->UpdateAccessState(*dst_image, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07003202 copy_region.dstSubresource, copy_region.dstOffset, dst_copy_extent, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003203 }
3204 }
3205}
3206
Jeff Leger178b1e52020-10-05 12:22:23 -04003207bool SyncValidator::PreCallValidateCmdCopyImage2KHR(VkCommandBuffer commandBuffer,
3208 const VkCopyImageInfo2KHR *pCopyImageInfo) const {
3209 bool skip = false;
3210 const auto *cb_access_context = GetAccessContext(commandBuffer);
3211 assert(cb_access_context);
3212 if (!cb_access_context) return skip;
3213
3214 const auto *context = cb_access_context->GetCurrentAccessContext();
3215 assert(context);
3216 if (!context) return skip;
3217
3218 const auto *src_image = Get<IMAGE_STATE>(pCopyImageInfo->srcImage);
3219 const auto *dst_image = Get<IMAGE_STATE>(pCopyImageInfo->dstImage);
3220 for (uint32_t region = 0; region < pCopyImageInfo->regionCount; region++) {
3221 const auto &copy_region = pCopyImageInfo->pRegions[region];
3222 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003223 auto hazard = context->DetectHazard(*src_image, SYNC_COPY_TRANSFER_READ, copy_region.srcSubresource,
Jeff Leger178b1e52020-10-05 12:22:23 -04003224 copy_region.srcOffset, copy_region.extent);
3225 if (hazard.hazard) {
3226 skip |= LogError(pCopyImageInfo->srcImage, string_SyncHazardVUID(hazard.hazard),
3227 "vkCmdCopyImage2KHR: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.",
3228 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyImageInfo->srcImage).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07003229 region, cb_access_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04003230 }
3231 }
3232
3233 if (dst_image) {
3234 VkExtent3D dst_copy_extent =
3235 GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003236 auto hazard = context->DetectHazard(*dst_image, SYNC_COPY_TRANSFER_WRITE, copy_region.dstSubresource,
Jeff Leger178b1e52020-10-05 12:22:23 -04003237 copy_region.dstOffset, dst_copy_extent);
3238 if (hazard.hazard) {
3239 skip |= LogError(pCopyImageInfo->dstImage, string_SyncHazardVUID(hazard.hazard),
3240 "vkCmdCopyImage2KHR: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.",
3241 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyImageInfo->dstImage).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07003242 region, cb_access_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04003243 }
3244 if (skip) break;
3245 }
3246 }
3247
3248 return skip;
3249}
3250
3251void SyncValidator::PreCallRecordCmdCopyImage2KHR(VkCommandBuffer commandBuffer, const VkCopyImageInfo2KHR *pCopyImageInfo) {
3252 auto *cb_access_context = GetAccessContext(commandBuffer);
3253 assert(cb_access_context);
3254 const auto tag = cb_access_context->NextCommandTag(CMD_COPYIMAGE2KHR);
3255 auto *context = cb_access_context->GetCurrentAccessContext();
3256 assert(context);
3257
3258 auto *src_image = Get<IMAGE_STATE>(pCopyImageInfo->srcImage);
3259 auto *dst_image = Get<IMAGE_STATE>(pCopyImageInfo->dstImage);
3260
3261 for (uint32_t region = 0; region < pCopyImageInfo->regionCount; region++) {
3262 const auto &copy_region = pCopyImageInfo->pRegions[region];
3263 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003264 context->UpdateAccessState(*src_image, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07003265 copy_region.srcSubresource, copy_region.srcOffset, copy_region.extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04003266 }
3267 if (dst_image) {
3268 VkExtent3D dst_copy_extent =
3269 GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003270 context->UpdateAccessState(*dst_image, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07003271 copy_region.dstSubresource, copy_region.dstOffset, dst_copy_extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04003272 }
3273 }
3274}
3275
John Zulauf9cb530d2019-09-30 14:14:10 -06003276bool SyncValidator::PreCallValidateCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
3277 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
3278 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
3279 uint32_t bufferMemoryBarrierCount,
3280 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
3281 uint32_t imageMemoryBarrierCount,
3282 const VkImageMemoryBarrier *pImageMemoryBarriers) const {
3283 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -06003284 const auto *cb_access_context = GetAccessContext(commandBuffer);
3285 assert(cb_access_context);
3286 if (!cb_access_context) return skip;
John Zulauf0cb5be22020-01-23 12:18:22 -07003287
John Zulauf36ef9282021-02-02 11:47:24 -07003288 SyncOpPipelineBarrier pipeline_barrier(CMD_PIPELINEBARRIER, *this, cb_access_context->GetQueueFlags(), srcStageMask,
3289 dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers,
3290 bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
3291 pImageMemoryBarriers);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07003292 skip = pipeline_barrier.Validate(*cb_access_context);
John Zulauf9cb530d2019-09-30 14:14:10 -06003293 return skip;
3294}
3295
3296void SyncValidator::PreCallRecordCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
3297 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
3298 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
3299 uint32_t bufferMemoryBarrierCount,
3300 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
3301 uint32_t imageMemoryBarrierCount,
3302 const VkImageMemoryBarrier *pImageMemoryBarriers) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003303 auto *cb_access_context = GetAccessContext(commandBuffer);
3304 assert(cb_access_context);
3305 if (!cb_access_context) return;
John Zulauf9cb530d2019-09-30 14:14:10 -06003306
John Zulauf36ef9282021-02-02 11:47:24 -07003307 SyncOpPipelineBarrier pipeline_barrier(CMD_PIPELINEBARRIER, *this, cb_access_context->GetQueueFlags(), srcStageMask,
3308 dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers,
3309 bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
3310 pImageMemoryBarriers);
3311 pipeline_barrier.Record(cb_access_context);
John Zulauf9cb530d2019-09-30 14:14:10 -06003312}
3313
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07003314bool SyncValidator::PreCallValidateCmdPipelineBarrier2KHR(VkCommandBuffer commandBuffer,
3315 const VkDependencyInfoKHR *pDependencyInfo) const {
3316 bool skip = false;
3317 const auto *cb_access_context = GetAccessContext(commandBuffer);
3318 assert(cb_access_context);
3319 if (!cb_access_context) return skip;
3320
3321 SyncOpPipelineBarrier pipeline_barrier(CMD_PIPELINEBARRIER2KHR, *this, cb_access_context->GetQueueFlags(), *pDependencyInfo);
3322 skip = pipeline_barrier.Validate(*cb_access_context);
3323 return skip;
3324}
3325
3326void SyncValidator::PreCallRecordCmdPipelineBarrier2KHR(VkCommandBuffer commandBuffer, const VkDependencyInfoKHR *pDependencyInfo) {
3327 auto *cb_access_context = GetAccessContext(commandBuffer);
3328 assert(cb_access_context);
3329 if (!cb_access_context) return;
3330
3331 SyncOpPipelineBarrier pipeline_barrier(CMD_PIPELINEBARRIER2KHR, *this, cb_access_context->GetQueueFlags(), *pDependencyInfo);
3332 pipeline_barrier.Record(cb_access_context);
3333}
3334
John Zulauf9cb530d2019-09-30 14:14:10 -06003335void SyncValidator::PostCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
3336 const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, VkResult result) {
3337 // The state tracker sets up the device state
3338 StateTracker::PostCallRecordCreateDevice(gpu, pCreateInfo, pAllocator, pDevice, result);
3339
John Zulauf5f13a792020-03-10 07:31:21 -06003340 // Add the callback hooks for the functions that are either broadly or deeply used and that the ValidationStateTracker
3341 // refactor would be messier without.
John Zulauf9cb530d2019-09-30 14:14:10 -06003342 // TODO: Find a good way to do this hooklessly.
3343 ValidationObject *device_object = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
3344 ValidationObject *validation_data = GetValidationObject(device_object->object_dispatch, LayerObjectTypeSyncValidation);
3345 SyncValidator *sync_device_state = static_cast<SyncValidator *>(validation_data);
3346
John Zulaufd1f85d42020-04-15 12:23:15 -06003347 sync_device_state->SetCommandBufferResetCallback([sync_device_state](VkCommandBuffer command_buffer) -> void {
3348 sync_device_state->ResetCommandBufferCallback(command_buffer);
3349 });
3350 sync_device_state->SetCommandBufferFreeCallback([sync_device_state](VkCommandBuffer command_buffer) -> void {
3351 sync_device_state->FreeCommandBufferCallback(command_buffer);
3352 });
John Zulauf9cb530d2019-09-30 14:14:10 -06003353}
John Zulauf3d84f1b2020-03-09 13:33:25 -06003354
John Zulauf355e49b2020-04-24 15:11:15 -06003355bool SyncValidator::ValidateBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
John Zulauf64ffe552021-02-06 10:25:07 -07003356 const VkSubpassBeginInfo *pSubpassBeginInfo, CMD_TYPE cmd, const char *cmd_name) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003357 bool skip = false;
John Zulauf355e49b2020-04-24 15:11:15 -06003358 auto cb_context = GetAccessContext(commandBuffer);
John Zulauf64ffe552021-02-06 10:25:07 -07003359 if (cb_context) {
3360 SyncOpBeginRenderPass sync_op(cmd, *this, pRenderPassBegin, pSubpassBeginInfo, cmd_name);
3361 skip = sync_op.Validate(*cb_context);
John Zulauf355e49b2020-04-24 15:11:15 -06003362 }
John Zulauf355e49b2020-04-24 15:11:15 -06003363 return skip;
3364}
3365
3366bool SyncValidator::PreCallValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
3367 VkSubpassContents contents) const {
3368 bool skip = StateTracker::PreCallValidateCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07003369 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf355e49b2020-04-24 15:11:15 -06003370 subpass_begin_info.contents = contents;
John Zulauf64ffe552021-02-06 10:25:07 -07003371 skip |= ValidateBeginRenderPass(commandBuffer, pRenderPassBegin, &subpass_begin_info, CMD_BEGINRENDERPASS);
John Zulauf355e49b2020-04-24 15:11:15 -06003372 return skip;
3373}
3374
3375bool SyncValidator::PreCallValidateCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
Mike Schuchardt2df08912020-12-15 16:28:09 -08003376 const VkSubpassBeginInfo *pSubpassBeginInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003377 bool skip = StateTracker::PreCallValidateCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07003378 skip |= ValidateBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2);
John Zulauf355e49b2020-04-24 15:11:15 -06003379 return skip;
3380}
3381
John Zulauf64ffe552021-02-06 10:25:07 -07003382static const char *kBeginRenderPass2KhrName = "vkCmdBeginRenderPass2KHR";
John Zulauf355e49b2020-04-24 15:11:15 -06003383bool SyncValidator::PreCallValidateCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
3384 const VkRenderPassBeginInfo *pRenderPassBegin,
Mike Schuchardt2df08912020-12-15 16:28:09 -08003385 const VkSubpassBeginInfo *pSubpassBeginInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003386 bool skip = StateTracker::PreCallValidateCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07003387 skip |=
3388 ValidateBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2, kBeginRenderPass2KhrName);
John Zulauf355e49b2020-04-24 15:11:15 -06003389 return skip;
3390}
3391
John Zulauf3d84f1b2020-03-09 13:33:25 -06003392void SyncValidator::PostCallRecordBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo,
3393 VkResult result) {
3394 // The state tracker sets up the command buffer state
3395 StateTracker::PostCallRecordBeginCommandBuffer(commandBuffer, pBeginInfo, result);
3396
3397 // Create/initialize the structure that trackers accesses at the command buffer scope.
3398 auto cb_access_context = GetAccessContext(commandBuffer);
3399 assert(cb_access_context);
3400 cb_access_context->Reset();
3401}
3402
3403void SyncValidator::RecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
John Zulauf64ffe552021-02-06 10:25:07 -07003404 const VkSubpassBeginInfo *pSubpassBeginInfo, CMD_TYPE cmd, const char *cmd_name) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003405 auto cb_context = GetAccessContext(commandBuffer);
John Zulauf355e49b2020-04-24 15:11:15 -06003406 if (cb_context) {
John Zulauf64ffe552021-02-06 10:25:07 -07003407 SyncOpBeginRenderPass sync_op(cmd, *this, pRenderPassBegin, pSubpassBeginInfo, cmd_name);
3408 sync_op.Record(cb_context);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003409 }
3410}
3411
3412void SyncValidator::PostCallRecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
3413 VkSubpassContents contents) {
3414 StateTracker::PostCallRecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07003415 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf3d84f1b2020-03-09 13:33:25 -06003416 subpass_begin_info.contents = contents;
John Zulauf355e49b2020-04-24 15:11:15 -06003417 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, &subpass_begin_info, CMD_BEGINRENDERPASS);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003418}
3419
3420void SyncValidator::PostCallRecordCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
3421 const VkSubpassBeginInfo *pSubpassBeginInfo) {
3422 StateTracker::PostCallRecordCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
John Zulauf355e49b2020-04-24 15:11:15 -06003423 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003424}
3425
3426void SyncValidator::PostCallRecordCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
3427 const VkRenderPassBeginInfo *pRenderPassBegin,
3428 const VkSubpassBeginInfo *pSubpassBeginInfo) {
3429 StateTracker::PostCallRecordCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07003430 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2, kBeginRenderPass2KhrName);
John Zulauf355e49b2020-04-24 15:11:15 -06003431}
3432
Mike Schuchardt2df08912020-12-15 16:28:09 -08003433bool SyncValidator::ValidateCmdNextSubpass(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
John Zulauf64ffe552021-02-06 10:25:07 -07003434 const VkSubpassEndInfo *pSubpassEndInfo, CMD_TYPE cmd, const char *cmd_name) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003435 bool skip = false;
3436
3437 auto cb_context = GetAccessContext(commandBuffer);
3438 assert(cb_context);
John Zulauf64ffe552021-02-06 10:25:07 -07003439 if (!cb_context) return skip;
3440 SyncOpNextSubpass sync_op(cmd, *this, pSubpassBeginInfo, pSubpassEndInfo, cmd_name);
3441 return sync_op.Validate(*cb_context);
John Zulauf355e49b2020-04-24 15:11:15 -06003442}
3443
3444bool SyncValidator::PreCallValidateCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) const {
3445 bool skip = StateTracker::PreCallValidateCmdNextSubpass(commandBuffer, contents);
John Zulauf64ffe552021-02-06 10:25:07 -07003446 // Convert to a NextSubpass2
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07003447 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf355e49b2020-04-24 15:11:15 -06003448 subpass_begin_info.contents = contents;
John Zulauf64ffe552021-02-06 10:25:07 -07003449 auto subpass_end_info = LvlInitStruct<VkSubpassEndInfo>();
3450 skip |= ValidateCmdNextSubpass(commandBuffer, &subpass_begin_info, &subpass_end_info, CMD_NEXTSUBPASS);
John Zulauf355e49b2020-04-24 15:11:15 -06003451 return skip;
3452}
3453
John Zulauf64ffe552021-02-06 10:25:07 -07003454static const char *kNextSubpass2KhrName = "vkCmdNextSubpass2KHR";
Mike Schuchardt2df08912020-12-15 16:28:09 -08003455bool SyncValidator::PreCallValidateCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
3456 const VkSubpassEndInfo *pSubpassEndInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003457 bool skip = StateTracker::PreCallValidateCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07003458 skip |= ValidateCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2, kNextSubpass2KhrName);
John Zulauf355e49b2020-04-24 15:11:15 -06003459 return skip;
3460}
3461
3462bool SyncValidator::PreCallValidateCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
3463 const VkSubpassEndInfo *pSubpassEndInfo) const {
3464 bool skip = StateTracker::PreCallValidateCmdNextSubpass2(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07003465 skip |= ValidateCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2);
John Zulauf355e49b2020-04-24 15:11:15 -06003466 return skip;
John Zulauf3d84f1b2020-03-09 13:33:25 -06003467}
3468
3469void SyncValidator::RecordCmdNextSubpass(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
John Zulauf64ffe552021-02-06 10:25:07 -07003470 const VkSubpassEndInfo *pSubpassEndInfo, CMD_TYPE cmd, const char *cmd_name) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003471 auto cb_context = GetAccessContext(commandBuffer);
3472 assert(cb_context);
John Zulauf64ffe552021-02-06 10:25:07 -07003473 if (!cb_context) return;
John Zulauf3d84f1b2020-03-09 13:33:25 -06003474
John Zulauf64ffe552021-02-06 10:25:07 -07003475 SyncOpNextSubpass sync_op(cmd, *this, pSubpassBeginInfo, pSubpassEndInfo, cmd_name);
3476 sync_op.Record(cb_context);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003477}
3478
3479void SyncValidator::PostCallRecordCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
3480 StateTracker::PostCallRecordCmdNextSubpass(commandBuffer, contents);
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07003481 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf3d84f1b2020-03-09 13:33:25 -06003482 subpass_begin_info.contents = contents;
John Zulauf355e49b2020-04-24 15:11:15 -06003483 RecordCmdNextSubpass(commandBuffer, &subpass_begin_info, nullptr, CMD_NEXTSUBPASS);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003484}
3485
3486void SyncValidator::PostCallRecordCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
3487 const VkSubpassEndInfo *pSubpassEndInfo) {
3488 StateTracker::PostCallRecordCmdNextSubpass2(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
John Zulauf355e49b2020-04-24 15:11:15 -06003489 RecordCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003490}
3491
3492void SyncValidator::PostCallRecordCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
3493 const VkSubpassEndInfo *pSubpassEndInfo) {
3494 StateTracker::PostCallRecordCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07003495 RecordCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2, kNextSubpass2KhrName);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003496}
3497
John Zulauf64ffe552021-02-06 10:25:07 -07003498bool SyncValidator::ValidateCmdEndRenderPass(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo, CMD_TYPE cmd,
3499 const char *cmd_name) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003500 bool skip = false;
3501
3502 auto cb_context = GetAccessContext(commandBuffer);
3503 assert(cb_context);
John Zulauf64ffe552021-02-06 10:25:07 -07003504 if (!cb_context) return skip;
John Zulauf355e49b2020-04-24 15:11:15 -06003505
John Zulauf64ffe552021-02-06 10:25:07 -07003506 SyncOpEndRenderPass sync_op(cmd, *this, pSubpassEndInfo, cmd_name);
3507 skip |= sync_op.Validate(*cb_context);
John Zulauf355e49b2020-04-24 15:11:15 -06003508 return skip;
3509}
3510
3511bool SyncValidator::PreCallValidateCmdEndRenderPass(VkCommandBuffer commandBuffer) const {
3512 bool skip = StateTracker::PreCallValidateCmdEndRenderPass(commandBuffer);
John Zulauf64ffe552021-02-06 10:25:07 -07003513 skip |= ValidateCmdEndRenderPass(commandBuffer, nullptr, CMD_ENDRENDERPASS);
John Zulauf355e49b2020-04-24 15:11:15 -06003514 return skip;
3515}
3516
Mike Schuchardt2df08912020-12-15 16:28:09 -08003517bool SyncValidator::PreCallValidateCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003518 bool skip = StateTracker::PreCallValidateCmdEndRenderPass2(commandBuffer, pSubpassEndInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07003519 skip |= ValidateCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2);
John Zulauf355e49b2020-04-24 15:11:15 -06003520 return skip;
3521}
3522
John Zulauf64ffe552021-02-06 10:25:07 -07003523const static char *kEndRenderPass2KhrName = "vkEndRenderPass2KHR";
John Zulauf355e49b2020-04-24 15:11:15 -06003524bool SyncValidator::PreCallValidateCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer,
Mike Schuchardt2df08912020-12-15 16:28:09 -08003525 const VkSubpassEndInfo *pSubpassEndInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003526 bool skip = StateTracker::PreCallValidateCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07003527 skip |= ValidateCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2, kEndRenderPass2KhrName);
John Zulauf355e49b2020-04-24 15:11:15 -06003528 return skip;
3529}
3530
John Zulauf64ffe552021-02-06 10:25:07 -07003531void SyncValidator::RecordCmdEndRenderPass(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo, CMD_TYPE cmd,
3532 const char *cmd_name) {
John Zulaufe5da6e52020-03-18 15:32:18 -06003533 // Resolve the all subpass contexts to the command buffer contexts
3534 auto cb_context = GetAccessContext(commandBuffer);
3535 assert(cb_context);
John Zulauf64ffe552021-02-06 10:25:07 -07003536 if (!cb_context) return;
John Zulaufe5da6e52020-03-18 15:32:18 -06003537
John Zulauf64ffe552021-02-06 10:25:07 -07003538 SyncOpEndRenderPass sync_op(cmd, *this, pSubpassEndInfo, cmd_name);
3539 sync_op.Record(cb_context);
3540 return;
John Zulaufe5da6e52020-03-18 15:32:18 -06003541}
John Zulauf3d84f1b2020-03-09 13:33:25 -06003542
John Zulauf33fc1d52020-07-17 11:01:10 -06003543// Simple heuristic rule to detect WAW operations representing algorithmically safe or increment
3544// updates to a resource which do not conflict at the byte level.
3545// TODO: Revisit this rule to see if it needs to be tighter or looser
3546// TODO: Add programatic control over suppression heuristics
3547bool SyncValidator::SupressedBoundDescriptorWAW(const HazardResult &hazard) const {
3548 return (hazard.hazard == WRITE_AFTER_WRITE) && (FlagBit(hazard.usage_index) == hazard.prior_access);
3549}
3550
John Zulauf3d84f1b2020-03-09 13:33:25 -06003551void SyncValidator::PostCallRecordCmdEndRenderPass(VkCommandBuffer commandBuffer) {
John Zulauf355e49b2020-04-24 15:11:15 -06003552 RecordCmdEndRenderPass(commandBuffer, nullptr, CMD_ENDRENDERPASS);
John Zulauf5a1a5382020-06-22 17:23:25 -06003553 StateTracker::PostCallRecordCmdEndRenderPass(commandBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003554}
3555
3556void SyncValidator::PostCallRecordCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
John Zulauf355e49b2020-04-24 15:11:15 -06003557 RecordCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2);
John Zulauf5a1a5382020-06-22 17:23:25 -06003558 StateTracker::PostCallRecordCmdEndRenderPass2(commandBuffer, pSubpassEndInfo);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003559}
3560
3561void SyncValidator::PostCallRecordCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
John Zulauf64ffe552021-02-06 10:25:07 -07003562 RecordCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2, kEndRenderPass2KhrName);
John Zulauf5a1a5382020-06-22 17:23:25 -06003563 StateTracker::PostCallRecordCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003564}
locke-lunarga19c71d2020-03-02 18:17:04 -07003565
Jeff Leger178b1e52020-10-05 12:22:23 -04003566template <typename BufferImageCopyRegionType>
3567bool SyncValidator::ValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
3568 VkImageLayout dstImageLayout, uint32_t regionCount,
3569 const BufferImageCopyRegionType *pRegions, CopyCommandVersion version) const {
locke-lunarga19c71d2020-03-02 18:17:04 -07003570 bool skip = false;
3571 const auto *cb_access_context = GetAccessContext(commandBuffer);
3572 assert(cb_access_context);
3573 if (!cb_access_context) return skip;
3574
Jeff Leger178b1e52020-10-05 12:22:23 -04003575 const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
3576 const char *func_name = is_2khr ? "vkCmdCopyBufferToImage2KHR()" : "vkCmdCopyBufferToImage()";
3577
locke-lunarga19c71d2020-03-02 18:17:04 -07003578 const auto *context = cb_access_context->GetCurrentAccessContext();
3579 assert(context);
3580 if (!context) return skip;
3581
3582 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
locke-lunarga19c71d2020-03-02 18:17:04 -07003583 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
3584
3585 for (uint32_t region = 0; region < regionCount; region++) {
3586 const auto &copy_region = pRegions[region];
John Zulauf477700e2021-01-06 11:41:49 -07003587 HazardResult hazard;
locke-lunarga19c71d2020-03-02 18:17:04 -07003588 if (dst_image) {
John Zulauf477700e2021-01-06 11:41:49 -07003589 if (src_buffer) {
3590 ResourceAccessRange src_range =
3591 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, dst_image->createInfo.format));
Jeremy Gebben40a22942020-12-22 14:22:06 -07003592 hazard = context->DetectHazard(*src_buffer, SYNC_COPY_TRANSFER_READ, src_range);
John Zulauf477700e2021-01-06 11:41:49 -07003593 if (hazard.hazard) {
3594 // PHASE1 TODO -- add tag information to log msg when useful.
3595 skip |= LogError(srcBuffer, string_SyncHazardVUID(hazard.hazard),
3596 "%s: Hazard %s for srcBuffer %s, region %" PRIu32 ". Access info %s.", func_name,
3597 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcBuffer).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003598 cb_access_context->FormatUsage(hazard).c_str());
John Zulauf477700e2021-01-06 11:41:49 -07003599 }
3600 }
3601
Jeremy Gebben40a22942020-12-22 14:22:06 -07003602 hazard = context->DetectHazard(*dst_image, SYNC_COPY_TRANSFER_WRITE, copy_region.imageSubresource,
John Zulauf477700e2021-01-06 11:41:49 -07003603 copy_region.imageOffset, copy_region.imageExtent);
locke-lunarga19c71d2020-03-02 18:17:04 -07003604 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003605 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
Jeff Leger178b1e52020-10-05 12:22:23 -04003606 "%s: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.", func_name,
John Zulauf1dae9192020-06-16 15:46:44 -06003607 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003608 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07003609 }
3610 if (skip) break;
3611 }
3612 if (skip) break;
3613 }
3614 return skip;
3615}
3616
Jeff Leger178b1e52020-10-05 12:22:23 -04003617bool SyncValidator::PreCallValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
3618 VkImageLayout dstImageLayout, uint32_t regionCount,
3619 const VkBufferImageCopy *pRegions) const {
3620 return ValidateCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions,
3621 COPY_COMMAND_VERSION_1);
3622}
3623
3624bool SyncValidator::PreCallValidateCmdCopyBufferToImage2KHR(VkCommandBuffer commandBuffer,
3625 const VkCopyBufferToImageInfo2KHR *pCopyBufferToImageInfo) const {
3626 return ValidateCmdCopyBufferToImage(commandBuffer, pCopyBufferToImageInfo->srcBuffer, pCopyBufferToImageInfo->dstImage,
3627 pCopyBufferToImageInfo->dstImageLayout, pCopyBufferToImageInfo->regionCount,
3628 pCopyBufferToImageInfo->pRegions, COPY_COMMAND_VERSION_2);
3629}
3630
3631template <typename BufferImageCopyRegionType>
3632void SyncValidator::RecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
3633 VkImageLayout dstImageLayout, uint32_t regionCount,
3634 const BufferImageCopyRegionType *pRegions, CopyCommandVersion version) {
locke-lunarga19c71d2020-03-02 18:17:04 -07003635 auto *cb_access_context = GetAccessContext(commandBuffer);
3636 assert(cb_access_context);
Jeff Leger178b1e52020-10-05 12:22:23 -04003637
3638 const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
3639 const CMD_TYPE cmd_type = is_2khr ? CMD_COPYBUFFERTOIMAGE2KHR : CMD_COPYBUFFERTOIMAGE;
3640
3641 const auto tag = cb_access_context->NextCommandTag(cmd_type);
locke-lunarga19c71d2020-03-02 18:17:04 -07003642 auto *context = cb_access_context->GetCurrentAccessContext();
3643 assert(context);
3644
3645 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
John Zulauf16adfc92020-04-08 10:28:33 -06003646 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07003647
3648 for (uint32_t region = 0; region < regionCount; region++) {
3649 const auto &copy_region = pRegions[region];
locke-lunarga19c71d2020-03-02 18:17:04 -07003650 if (dst_image) {
John Zulauf477700e2021-01-06 11:41:49 -07003651 if (src_buffer) {
3652 ResourceAccessRange src_range =
3653 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, dst_image->createInfo.format));
Jeremy Gebben40a22942020-12-22 14:22:06 -07003654 context->UpdateAccessState(*src_buffer, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment, src_range, tag);
John Zulauf477700e2021-01-06 11:41:49 -07003655 }
Jeremy Gebben40a22942020-12-22 14:22:06 -07003656 context->UpdateAccessState(*dst_image, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07003657 copy_region.imageSubresource, copy_region.imageOffset, copy_region.imageExtent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07003658 }
3659 }
3660}
3661
Jeff Leger178b1e52020-10-05 12:22:23 -04003662void SyncValidator::PreCallRecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
3663 VkImageLayout dstImageLayout, uint32_t regionCount,
3664 const VkBufferImageCopy *pRegions) {
3665 StateTracker::PreCallRecordCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
3666 RecordCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions, COPY_COMMAND_VERSION_1);
3667}
3668
3669void SyncValidator::PreCallRecordCmdCopyBufferToImage2KHR(VkCommandBuffer commandBuffer,
3670 const VkCopyBufferToImageInfo2KHR *pCopyBufferToImageInfo) {
3671 StateTracker::PreCallRecordCmdCopyBufferToImage2KHR(commandBuffer, pCopyBufferToImageInfo);
3672 RecordCmdCopyBufferToImage(commandBuffer, pCopyBufferToImageInfo->srcBuffer, pCopyBufferToImageInfo->dstImage,
3673 pCopyBufferToImageInfo->dstImageLayout, pCopyBufferToImageInfo->regionCount,
3674 pCopyBufferToImageInfo->pRegions, COPY_COMMAND_VERSION_2);
3675}
3676
3677template <typename BufferImageCopyRegionType>
3678bool SyncValidator::ValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
3679 VkBuffer dstBuffer, uint32_t regionCount,
3680 const BufferImageCopyRegionType *pRegions, CopyCommandVersion version) const {
locke-lunarga19c71d2020-03-02 18:17:04 -07003681 bool skip = false;
3682 const auto *cb_access_context = GetAccessContext(commandBuffer);
3683 assert(cb_access_context);
3684 if (!cb_access_context) return skip;
3685
Jeff Leger178b1e52020-10-05 12:22:23 -04003686 const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
3687 const char *func_name = is_2khr ? "vkCmdCopyImageToBuffer2KHR()" : "vkCmdCopyImageToBuffer()";
3688
locke-lunarga19c71d2020-03-02 18:17:04 -07003689 const auto *context = cb_access_context->GetCurrentAccessContext();
3690 assert(context);
3691 if (!context) return skip;
3692
3693 const auto *src_image = Get<IMAGE_STATE>(srcImage);
3694 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
Jeremy Gebben6fbf8242021-06-21 09:14:46 -06003695 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->MemState()->mem() : VK_NULL_HANDLE;
locke-lunarga19c71d2020-03-02 18:17:04 -07003696 for (uint32_t region = 0; region < regionCount; region++) {
3697 const auto &copy_region = pRegions[region];
3698 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003699 auto hazard = context->DetectHazard(*src_image, SYNC_COPY_TRANSFER_READ, copy_region.imageSubresource,
locke-lunarga19c71d2020-03-02 18:17:04 -07003700 copy_region.imageOffset, copy_region.imageExtent);
3701 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003702 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
Jeff Leger178b1e52020-10-05 12:22:23 -04003703 "%s: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.", func_name,
John Zulauf1dae9192020-06-16 15:46:44 -06003704 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003705 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07003706 }
John Zulauf477700e2021-01-06 11:41:49 -07003707 if (dst_mem) {
3708 ResourceAccessRange dst_range =
3709 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, src_image->createInfo.format));
Jeremy Gebben40a22942020-12-22 14:22:06 -07003710 hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, dst_range);
John Zulauf477700e2021-01-06 11:41:49 -07003711 if (hazard.hazard) {
3712 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
3713 "%s: Hazard %s for dstBuffer %s, region %" PRIu32 ". Access info %s.", func_name,
3714 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstBuffer).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003715 cb_access_context->FormatUsage(hazard).c_str());
John Zulauf477700e2021-01-06 11:41:49 -07003716 }
locke-lunarga19c71d2020-03-02 18:17:04 -07003717 }
3718 }
3719 if (skip) break;
3720 }
3721 return skip;
3722}
3723
Jeff Leger178b1e52020-10-05 12:22:23 -04003724bool SyncValidator::PreCallValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
3725 VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount,
3726 const VkBufferImageCopy *pRegions) const {
3727 return ValidateCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions,
3728 COPY_COMMAND_VERSION_1);
3729}
3730
3731bool SyncValidator::PreCallValidateCmdCopyImageToBuffer2KHR(VkCommandBuffer commandBuffer,
3732 const VkCopyImageToBufferInfo2KHR *pCopyImageToBufferInfo) const {
3733 return ValidateCmdCopyImageToBuffer(commandBuffer, pCopyImageToBufferInfo->srcImage, pCopyImageToBufferInfo->srcImageLayout,
3734 pCopyImageToBufferInfo->dstBuffer, pCopyImageToBufferInfo->regionCount,
3735 pCopyImageToBufferInfo->pRegions, COPY_COMMAND_VERSION_2);
3736}
3737
3738template <typename BufferImageCopyRegionType>
3739void SyncValidator::RecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
3740 VkBuffer dstBuffer, uint32_t regionCount, const BufferImageCopyRegionType *pRegions,
3741 CopyCommandVersion version) {
locke-lunarga19c71d2020-03-02 18:17:04 -07003742 auto *cb_access_context = GetAccessContext(commandBuffer);
3743 assert(cb_access_context);
Jeff Leger178b1e52020-10-05 12:22:23 -04003744
3745 const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
3746 const CMD_TYPE cmd_type = is_2khr ? CMD_COPYIMAGETOBUFFER2KHR : CMD_COPYIMAGETOBUFFER;
3747
3748 const auto tag = cb_access_context->NextCommandTag(cmd_type);
locke-lunarga19c71d2020-03-02 18:17:04 -07003749 auto *context = cb_access_context->GetCurrentAccessContext();
3750 assert(context);
3751
3752 const auto *src_image = Get<IMAGE_STATE>(srcImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07003753 auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
Jeremy Gebben6fbf8242021-06-21 09:14:46 -06003754 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->MemState()->mem() : VK_NULL_HANDLE;
John Zulauf5f13a792020-03-10 07:31:21 -06003755 const VulkanTypedHandle dst_handle(dst_mem, kVulkanObjectTypeDeviceMemory);
locke-lunarga19c71d2020-03-02 18:17:04 -07003756
3757 for (uint32_t region = 0; region < regionCount; region++) {
3758 const auto &copy_region = pRegions[region];
3759 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003760 context->UpdateAccessState(*src_image, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07003761 copy_region.imageSubresource, copy_region.imageOffset, copy_region.imageExtent, tag);
John Zulauf477700e2021-01-06 11:41:49 -07003762 if (dst_buffer) {
3763 ResourceAccessRange dst_range =
3764 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, src_image->createInfo.format));
Jeremy Gebben40a22942020-12-22 14:22:06 -07003765 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, dst_range, tag);
John Zulauf477700e2021-01-06 11:41:49 -07003766 }
locke-lunarga19c71d2020-03-02 18:17:04 -07003767 }
3768 }
3769}
3770
Jeff Leger178b1e52020-10-05 12:22:23 -04003771void SyncValidator::PreCallRecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
3772 VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
3773 StateTracker::PreCallRecordCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
3774 RecordCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions, COPY_COMMAND_VERSION_1);
3775}
3776
3777void SyncValidator::PreCallRecordCmdCopyImageToBuffer2KHR(VkCommandBuffer commandBuffer,
3778 const VkCopyImageToBufferInfo2KHR *pCopyImageToBufferInfo) {
3779 StateTracker::PreCallRecordCmdCopyImageToBuffer2KHR(commandBuffer, pCopyImageToBufferInfo);
3780 RecordCmdCopyImageToBuffer(commandBuffer, pCopyImageToBufferInfo->srcImage, pCopyImageToBufferInfo->srcImageLayout,
3781 pCopyImageToBufferInfo->dstBuffer, pCopyImageToBufferInfo->regionCount,
3782 pCopyImageToBufferInfo->pRegions, COPY_COMMAND_VERSION_2);
3783}
3784
3785template <typename RegionType>
3786bool SyncValidator::ValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
3787 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
3788 const RegionType *pRegions, VkFilter filter, const char *apiName) const {
locke-lunarga19c71d2020-03-02 18:17:04 -07003789 bool skip = false;
3790 const auto *cb_access_context = GetAccessContext(commandBuffer);
3791 assert(cb_access_context);
3792 if (!cb_access_context) return skip;
3793
3794 const auto *context = cb_access_context->GetCurrentAccessContext();
3795 assert(context);
3796 if (!context) return skip;
3797
3798 const auto *src_image = Get<IMAGE_STATE>(srcImage);
3799 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
3800
3801 for (uint32_t region = 0; region < regionCount; region++) {
3802 const auto &blit_region = pRegions[region];
3803 if (src_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06003804 VkOffset3D offset = {std::min(blit_region.srcOffsets[0].x, blit_region.srcOffsets[1].x),
3805 std::min(blit_region.srcOffsets[0].y, blit_region.srcOffsets[1].y),
3806 std::min(blit_region.srcOffsets[0].z, blit_region.srcOffsets[1].z)};
3807 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.srcOffsets[1].x - blit_region.srcOffsets[0].x)),
3808 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].y - blit_region.srcOffsets[0].y)),
3809 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].z - blit_region.srcOffsets[0].z))};
Jeremy Gebben40a22942020-12-22 14:22:06 -07003810 auto hazard = context->DetectHazard(*src_image, SYNC_BLIT_TRANSFER_READ, blit_region.srcSubresource, offset, extent);
locke-lunarga19c71d2020-03-02 18:17:04 -07003811 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003812 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
Jeff Leger178b1e52020-10-05 12:22:23 -04003813 "%s: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.", apiName,
John Zulauf1dae9192020-06-16 15:46:44 -06003814 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003815 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07003816 }
3817 }
3818
3819 if (dst_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06003820 VkOffset3D offset = {std::min(blit_region.dstOffsets[0].x, blit_region.dstOffsets[1].x),
3821 std::min(blit_region.dstOffsets[0].y, blit_region.dstOffsets[1].y),
3822 std::min(blit_region.dstOffsets[0].z, blit_region.dstOffsets[1].z)};
3823 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.dstOffsets[1].x - blit_region.dstOffsets[0].x)),
3824 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].y - blit_region.dstOffsets[0].y)),
3825 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].z - blit_region.dstOffsets[0].z))};
Jeremy Gebben40a22942020-12-22 14:22:06 -07003826 auto hazard = context->DetectHazard(*dst_image, SYNC_BLIT_TRANSFER_WRITE, blit_region.dstSubresource, offset, extent);
locke-lunarga19c71d2020-03-02 18:17:04 -07003827 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003828 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
Jeff Leger178b1e52020-10-05 12:22:23 -04003829 "%s: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.", apiName,
John Zulauf1dae9192020-06-16 15:46:44 -06003830 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003831 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07003832 }
3833 if (skip) break;
3834 }
3835 }
3836
3837 return skip;
3838}
3839
Jeff Leger178b1e52020-10-05 12:22:23 -04003840bool SyncValidator::PreCallValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
3841 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
3842 const VkImageBlit *pRegions, VkFilter filter) const {
3843 return ValidateCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter,
3844 "vkCmdBlitImage");
3845}
3846
3847bool SyncValidator::PreCallValidateCmdBlitImage2KHR(VkCommandBuffer commandBuffer,
3848 const VkBlitImageInfo2KHR *pBlitImageInfo) const {
3849 return ValidateCmdBlitImage(commandBuffer, pBlitImageInfo->srcImage, pBlitImageInfo->srcImageLayout, pBlitImageInfo->dstImage,
3850 pBlitImageInfo->dstImageLayout, pBlitImageInfo->regionCount, pBlitImageInfo->pRegions,
3851 pBlitImageInfo->filter, "vkCmdBlitImage2KHR");
3852}
3853
3854template <typename RegionType>
3855void SyncValidator::RecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
3856 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
3857 const RegionType *pRegions, VkFilter filter, ResourceUsageTag tag) {
locke-lunarga19c71d2020-03-02 18:17:04 -07003858 auto *cb_access_context = GetAccessContext(commandBuffer);
3859 assert(cb_access_context);
3860 auto *context = cb_access_context->GetCurrentAccessContext();
3861 assert(context);
3862
3863 auto *src_image = Get<IMAGE_STATE>(srcImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07003864 auto *dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07003865
3866 for (uint32_t region = 0; region < regionCount; region++) {
3867 const auto &blit_region = pRegions[region];
3868 if (src_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06003869 VkOffset3D offset = {std::min(blit_region.srcOffsets[0].x, blit_region.srcOffsets[1].x),
3870 std::min(blit_region.srcOffsets[0].y, blit_region.srcOffsets[1].y),
3871 std::min(blit_region.srcOffsets[0].z, blit_region.srcOffsets[1].z)};
3872 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.srcOffsets[1].x - blit_region.srcOffsets[0].x)),
3873 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].y - blit_region.srcOffsets[0].y)),
3874 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].z - blit_region.srcOffsets[0].z))};
Jeremy Gebben40a22942020-12-22 14:22:06 -07003875 context->UpdateAccessState(*src_image, SYNC_BLIT_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07003876 blit_region.srcSubresource, offset, extent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07003877 }
3878 if (dst_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06003879 VkOffset3D offset = {std::min(blit_region.dstOffsets[0].x, blit_region.dstOffsets[1].x),
3880 std::min(blit_region.dstOffsets[0].y, blit_region.dstOffsets[1].y),
3881 std::min(blit_region.dstOffsets[0].z, blit_region.dstOffsets[1].z)};
3882 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.dstOffsets[1].x - blit_region.dstOffsets[0].x)),
3883 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].y - blit_region.dstOffsets[0].y)),
3884 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].z - blit_region.dstOffsets[0].z))};
Jeremy Gebben40a22942020-12-22 14:22:06 -07003885 context->UpdateAccessState(*dst_image, SYNC_BLIT_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07003886 blit_region.dstSubresource, offset, extent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07003887 }
3888 }
3889}
locke-lunarg36ba2592020-04-03 09:42:04 -06003890
Jeff Leger178b1e52020-10-05 12:22:23 -04003891void SyncValidator::PreCallRecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
3892 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
3893 const VkImageBlit *pRegions, VkFilter filter) {
3894 auto *cb_access_context = GetAccessContext(commandBuffer);
3895 assert(cb_access_context);
3896 const auto tag = cb_access_context->NextCommandTag(CMD_BLITIMAGE);
3897 StateTracker::PreCallRecordCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
3898 pRegions, filter);
3899 RecordCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter, tag);
3900}
3901
3902void SyncValidator::PreCallRecordCmdBlitImage2KHR(VkCommandBuffer commandBuffer, const VkBlitImageInfo2KHR *pBlitImageInfo) {
3903 StateTracker::PreCallRecordCmdBlitImage2KHR(commandBuffer, pBlitImageInfo);
3904 auto *cb_access_context = GetAccessContext(commandBuffer);
3905 assert(cb_access_context);
3906 const auto tag = cb_access_context->NextCommandTag(CMD_BLITIMAGE2KHR);
3907 RecordCmdBlitImage(commandBuffer, pBlitImageInfo->srcImage, pBlitImageInfo->srcImageLayout, pBlitImageInfo->dstImage,
3908 pBlitImageInfo->dstImageLayout, pBlitImageInfo->regionCount, pBlitImageInfo->pRegions,
3909 pBlitImageInfo->filter, tag);
3910}
3911
John Zulauffaea0ee2021-01-14 14:01:32 -07003912bool SyncValidator::ValidateIndirectBuffer(const CommandBufferAccessContext &cb_context, const AccessContext &context,
3913 VkCommandBuffer commandBuffer, const VkDeviceSize struct_size, const VkBuffer buffer,
3914 const VkDeviceSize offset, const uint32_t drawCount, const uint32_t stride,
3915 const char *function) const {
locke-lunargff255f92020-05-13 18:53:52 -06003916 bool skip = false;
3917 if (drawCount == 0) return skip;
3918
3919 const auto *buf_state = Get<BUFFER_STATE>(buffer);
3920 VkDeviceSize size = struct_size;
3921 if (drawCount == 1 || stride == size) {
3922 if (drawCount > 1) size *= drawCount;
John Zulauf3e86bf02020-09-12 10:47:57 -06003923 const ResourceAccessRange range = MakeRange(offset, size);
locke-lunargff255f92020-05-13 18:53:52 -06003924 auto hazard = context.DetectHazard(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, range);
3925 if (hazard.hazard) {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06003926 skip |= LogError(buf_state->buffer(), string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003927 "%s: Hazard %s for indirect %s in %s. Access info %s.", function, string_SyncHazard(hazard.hazard),
John Zulauf1dae9192020-06-16 15:46:44 -06003928 report_data->FormatHandle(buffer).c_str(), report_data->FormatHandle(commandBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07003929 cb_context.FormatUsage(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06003930 }
3931 } else {
3932 for (uint32_t i = 0; i < drawCount; ++i) {
John Zulauf3e86bf02020-09-12 10:47:57 -06003933 const ResourceAccessRange range = MakeRange(offset + i * stride, size);
locke-lunargff255f92020-05-13 18:53:52 -06003934 auto hazard = context.DetectHazard(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, range);
3935 if (hazard.hazard) {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06003936 skip |= LogError(buf_state->buffer(), string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003937 "%s: Hazard %s for indirect %s in %s. Access info %s.", function, string_SyncHazard(hazard.hazard),
3938 report_data->FormatHandle(buffer).c_str(), report_data->FormatHandle(commandBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07003939 cb_context.FormatUsage(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06003940 break;
3941 }
3942 }
3943 }
3944 return skip;
3945}
3946
locke-lunarg61870c22020-06-09 14:51:50 -06003947void SyncValidator::RecordIndirectBuffer(AccessContext &context, const ResourceUsageTag &tag, const VkDeviceSize struct_size,
3948 const VkBuffer buffer, const VkDeviceSize offset, const uint32_t drawCount,
3949 uint32_t stride) {
locke-lunargff255f92020-05-13 18:53:52 -06003950 const auto *buf_state = Get<BUFFER_STATE>(buffer);
3951 VkDeviceSize size = struct_size;
3952 if (drawCount == 1 || stride == size) {
3953 if (drawCount > 1) size *= drawCount;
John Zulauf3e86bf02020-09-12 10:47:57 -06003954 const ResourceAccessRange range = MakeRange(offset, size);
John Zulauf8e3c3e92021-01-06 11:19:36 -07003955 context.UpdateAccessState(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, SyncOrdering::kNonAttachment, range, tag);
locke-lunargff255f92020-05-13 18:53:52 -06003956 } else {
3957 for (uint32_t i = 0; i < drawCount; ++i) {
John Zulauf3e86bf02020-09-12 10:47:57 -06003958 const ResourceAccessRange range = MakeRange(offset + i * stride, size);
John Zulauf8e3c3e92021-01-06 11:19:36 -07003959 context.UpdateAccessState(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, SyncOrdering::kNonAttachment, range,
3960 tag);
locke-lunargff255f92020-05-13 18:53:52 -06003961 }
3962 }
3963}
3964
John Zulauffaea0ee2021-01-14 14:01:32 -07003965bool SyncValidator::ValidateCountBuffer(const CommandBufferAccessContext &cb_context, const AccessContext &context,
3966 VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
3967 const char *function) const {
locke-lunargff255f92020-05-13 18:53:52 -06003968 bool skip = false;
3969
3970 const auto *count_buf_state = Get<BUFFER_STATE>(buffer);
John Zulauf3e86bf02020-09-12 10:47:57 -06003971 const ResourceAccessRange range = MakeRange(offset, 4);
locke-lunargff255f92020-05-13 18:53:52 -06003972 auto hazard = context.DetectHazard(*count_buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, range);
3973 if (hazard.hazard) {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06003974 skip |= LogError(count_buf_state->buffer(), string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003975 "%s: Hazard %s for countBuffer %s in %s. Access info %s.", function, string_SyncHazard(hazard.hazard),
John Zulauf1dae9192020-06-16 15:46:44 -06003976 report_data->FormatHandle(buffer).c_str(), report_data->FormatHandle(commandBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07003977 cb_context.FormatUsage(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06003978 }
3979 return skip;
3980}
3981
locke-lunarg61870c22020-06-09 14:51:50 -06003982void SyncValidator::RecordCountBuffer(AccessContext &context, const ResourceUsageTag &tag, VkBuffer buffer, VkDeviceSize offset) {
locke-lunargff255f92020-05-13 18:53:52 -06003983 const auto *count_buf_state = Get<BUFFER_STATE>(buffer);
John Zulauf3e86bf02020-09-12 10:47:57 -06003984 const ResourceAccessRange range = MakeRange(offset, 4);
John Zulauf8e3c3e92021-01-06 11:19:36 -07003985 context.UpdateAccessState(*count_buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, SyncOrdering::kNonAttachment, range, tag);
locke-lunargff255f92020-05-13 18:53:52 -06003986}
3987
locke-lunarg36ba2592020-04-03 09:42:04 -06003988bool SyncValidator::PreCallValidateCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) const {
locke-lunargff255f92020-05-13 18:53:52 -06003989 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06003990 const auto *cb_access_context = GetAccessContext(commandBuffer);
3991 assert(cb_access_context);
3992 if (!cb_access_context) return skip;
3993
locke-lunarg61870c22020-06-09 14:51:50 -06003994 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, "vkCmdDispatch");
locke-lunargff255f92020-05-13 18:53:52 -06003995 return skip;
locke-lunarg36ba2592020-04-03 09:42:04 -06003996}
3997
3998void SyncValidator::PreCallRecordCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
locke-lunarg8ec19162020-06-16 18:48:34 -06003999 StateTracker::PreCallRecordCmdDispatch(commandBuffer, x, y, z);
locke-lunargff255f92020-05-13 18:53:52 -06004000 auto *cb_access_context = GetAccessContext(commandBuffer);
4001 assert(cb_access_context);
4002 const auto tag = cb_access_context->NextCommandTag(CMD_DISPATCH);
locke-lunargff255f92020-05-13 18:53:52 -06004003
locke-lunarg61870c22020-06-09 14:51:50 -06004004 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, tag);
locke-lunarg36ba2592020-04-03 09:42:04 -06004005}
locke-lunarge1a67022020-04-29 00:15:36 -06004006
4007bool SyncValidator::PreCallValidateCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) const {
locke-lunargff255f92020-05-13 18:53:52 -06004008 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004009 const auto *cb_access_context = GetAccessContext(commandBuffer);
4010 assert(cb_access_context);
4011 if (!cb_access_context) return skip;
4012
4013 const auto *context = cb_access_context->GetCurrentAccessContext();
4014 assert(context);
4015 if (!context) return skip;
4016
locke-lunarg61870c22020-06-09 14:51:50 -06004017 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, "vkCmdDispatchIndirect");
John Zulauffaea0ee2021-01-14 14:01:32 -07004018 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDispatchIndirectCommand), buffer, offset,
4019 1, sizeof(VkDispatchIndirectCommand), "vkCmdDispatchIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004020 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004021}
4022
4023void SyncValidator::PreCallRecordCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004024 StateTracker::PreCallRecordCmdDispatchIndirect(commandBuffer, buffer, offset);
locke-lunargff255f92020-05-13 18:53:52 -06004025 auto *cb_access_context = GetAccessContext(commandBuffer);
4026 assert(cb_access_context);
4027 const auto tag = cb_access_context->NextCommandTag(CMD_DISPATCHINDIRECT);
4028 auto *context = cb_access_context->GetCurrentAccessContext();
4029 assert(context);
4030
locke-lunarg61870c22020-06-09 14:51:50 -06004031 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, tag);
4032 RecordIndirectBuffer(*context, tag, sizeof(VkDispatchIndirectCommand), buffer, offset, 1, sizeof(VkDispatchIndirectCommand));
locke-lunarge1a67022020-04-29 00:15:36 -06004033}
4034
4035bool SyncValidator::PreCallValidateCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
4036 uint32_t firstVertex, uint32_t firstInstance) const {
locke-lunarga4d39ea2020-05-22 14:17:29 -06004037 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004038 const auto *cb_access_context = GetAccessContext(commandBuffer);
4039 assert(cb_access_context);
4040 if (!cb_access_context) return skip;
4041
locke-lunarg61870c22020-06-09 14:51:50 -06004042 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDraw");
4043 skip |= cb_access_context->ValidateDrawVertex(vertexCount, firstVertex, "vkCmdDraw");
4044 skip |= cb_access_context->ValidateDrawSubpassAttachment("vkCmdDraw");
locke-lunarga4d39ea2020-05-22 14:17:29 -06004045 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004046}
4047
4048void SyncValidator::PreCallRecordCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
4049 uint32_t firstVertex, uint32_t firstInstance) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004050 StateTracker::PreCallRecordCmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
locke-lunargff255f92020-05-13 18:53:52 -06004051 auto *cb_access_context = GetAccessContext(commandBuffer);
4052 assert(cb_access_context);
4053 const auto tag = cb_access_context->NextCommandTag(CMD_DRAW);
locke-lunargff255f92020-05-13 18:53:52 -06004054
locke-lunarg61870c22020-06-09 14:51:50 -06004055 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4056 cb_access_context->RecordDrawVertex(vertexCount, firstVertex, tag);
4057 cb_access_context->RecordDrawSubpassAttachment(tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004058}
4059
4060bool SyncValidator::PreCallValidateCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
4061 uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) const {
locke-lunarga4d39ea2020-05-22 14:17:29 -06004062 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004063 const auto *cb_access_context = GetAccessContext(commandBuffer);
4064 assert(cb_access_context);
4065 if (!cb_access_context) return skip;
4066
locke-lunarg61870c22020-06-09 14:51:50 -06004067 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndexed");
4068 skip |= cb_access_context->ValidateDrawVertexIndex(indexCount, firstIndex, "vkCmdDrawIndexed");
4069 skip |= cb_access_context->ValidateDrawSubpassAttachment("vkCmdDrawIndexed");
locke-lunarga4d39ea2020-05-22 14:17:29 -06004070 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004071}
4072
4073void SyncValidator::PreCallRecordCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
4074 uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004075 StateTracker::PreCallRecordCmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
locke-lunargff255f92020-05-13 18:53:52 -06004076 auto *cb_access_context = GetAccessContext(commandBuffer);
4077 assert(cb_access_context);
4078 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDEXED);
locke-lunargff255f92020-05-13 18:53:52 -06004079
locke-lunarg61870c22020-06-09 14:51:50 -06004080 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4081 cb_access_context->RecordDrawVertexIndex(indexCount, firstIndex, tag);
4082 cb_access_context->RecordDrawSubpassAttachment(tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004083}
4084
4085bool SyncValidator::PreCallValidateCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4086 uint32_t drawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004087 bool skip = false;
4088 if (drawCount == 0) return skip;
4089
locke-lunargff255f92020-05-13 18:53:52 -06004090 const auto *cb_access_context = GetAccessContext(commandBuffer);
4091 assert(cb_access_context);
4092 if (!cb_access_context) return skip;
4093
4094 const auto *context = cb_access_context->GetCurrentAccessContext();
4095 assert(context);
4096 if (!context) return skip;
4097
locke-lunarg61870c22020-06-09 14:51:50 -06004098 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndirect");
4099 skip |= cb_access_context->ValidateDrawSubpassAttachment("vkCmdDrawIndirect");
John Zulauffaea0ee2021-01-14 14:01:32 -07004100 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndirectCommand), buffer, offset,
4101 drawCount, stride, "vkCmdDrawIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004102
4103 // TODO: For now, we validate the whole vertex buffer. It might cause some false positive.
4104 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
4105 // We will validate the vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004106 skip |= cb_access_context->ValidateDrawVertex(UINT32_MAX, 0, "vkCmdDrawIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004107 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004108}
4109
4110void SyncValidator::PreCallRecordCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4111 uint32_t drawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004112 StateTracker::PreCallRecordCmdDrawIndirect(commandBuffer, buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06004113 if (drawCount == 0) return;
locke-lunargff255f92020-05-13 18:53:52 -06004114 auto *cb_access_context = GetAccessContext(commandBuffer);
4115 assert(cb_access_context);
4116 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDIRECT);
4117 auto *context = cb_access_context->GetCurrentAccessContext();
4118 assert(context);
4119
locke-lunarg61870c22020-06-09 14:51:50 -06004120 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4121 cb_access_context->RecordDrawSubpassAttachment(tag);
4122 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndirectCommand), buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06004123
4124 // TODO: For now, we record the whole vertex buffer. It might cause some false positive.
4125 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
4126 // We will record the vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004127 cb_access_context->RecordDrawVertex(UINT32_MAX, 0, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004128}
4129
4130bool SyncValidator::PreCallValidateCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4131 uint32_t drawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004132 bool skip = false;
4133 if (drawCount == 0) return skip;
locke-lunargff255f92020-05-13 18:53:52 -06004134 const auto *cb_access_context = GetAccessContext(commandBuffer);
4135 assert(cb_access_context);
4136 if (!cb_access_context) return skip;
4137
4138 const auto *context = cb_access_context->GetCurrentAccessContext();
4139 assert(context);
4140 if (!context) return skip;
4141
locke-lunarg61870c22020-06-09 14:51:50 -06004142 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndexedIndirect");
4143 skip |= cb_access_context->ValidateDrawSubpassAttachment("vkCmdDrawIndexedIndirect");
John Zulauffaea0ee2021-01-14 14:01:32 -07004144 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndexedIndirectCommand), buffer,
4145 offset, drawCount, stride, "vkCmdDrawIndexedIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004146
4147 // TODO: For now, we validate the whole index and vertex buffer. It might cause some false positive.
4148 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
4149 // We will validate the index and vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004150 skip |= cb_access_context->ValidateDrawVertexIndex(UINT32_MAX, 0, "vkCmdDrawIndexedIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004151 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004152}
4153
4154void SyncValidator::PreCallRecordCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4155 uint32_t drawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004156 StateTracker::PreCallRecordCmdDrawIndexedIndirect(commandBuffer, buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06004157 auto *cb_access_context = GetAccessContext(commandBuffer);
4158 assert(cb_access_context);
4159 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDEXEDINDIRECT);
4160 auto *context = cb_access_context->GetCurrentAccessContext();
4161 assert(context);
4162
locke-lunarg61870c22020-06-09 14:51:50 -06004163 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4164 cb_access_context->RecordDrawSubpassAttachment(tag);
4165 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndexedIndirectCommand), buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06004166
4167 // TODO: For now, we record the whole index and vertex buffer. It might cause some false positive.
4168 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
4169 // We will record the index and vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004170 cb_access_context->RecordDrawVertexIndex(UINT32_MAX, 0, tag);
locke-lunargff255f92020-05-13 18:53:52 -06004171}
4172
4173bool SyncValidator::ValidateCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4174 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4175 uint32_t stride, const char *function) const {
4176 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004177 const auto *cb_access_context = GetAccessContext(commandBuffer);
4178 assert(cb_access_context);
4179 if (!cb_access_context) return skip;
4180
4181 const auto *context = cb_access_context->GetCurrentAccessContext();
4182 assert(context);
4183 if (!context) return skip;
4184
locke-lunarg61870c22020-06-09 14:51:50 -06004185 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, function);
4186 skip |= cb_access_context->ValidateDrawSubpassAttachment(function);
John Zulauffaea0ee2021-01-14 14:01:32 -07004187 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndirectCommand), buffer, offset,
4188 maxDrawCount, stride, function);
4189 skip |= ValidateCountBuffer(*cb_access_context, *context, commandBuffer, countBuffer, countBufferOffset, function);
locke-lunargff255f92020-05-13 18:53:52 -06004190
4191 // TODO: For now, we validate the whole vertex buffer. It might cause some false positive.
4192 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
4193 // We will validate the vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004194 skip |= cb_access_context->ValidateDrawVertex(UINT32_MAX, 0, function);
locke-lunargff255f92020-05-13 18:53:52 -06004195 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004196}
4197
4198bool SyncValidator::PreCallValidateCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4199 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4200 uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004201 return ValidateCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4202 "vkCmdDrawIndirectCount");
locke-lunarge1a67022020-04-29 00:15:36 -06004203}
4204
4205void SyncValidator::PreCallRecordCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4206 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4207 uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004208 StateTracker::PreCallRecordCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount,
4209 stride);
locke-lunargff255f92020-05-13 18:53:52 -06004210 auto *cb_access_context = GetAccessContext(commandBuffer);
4211 assert(cb_access_context);
4212 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDIRECTCOUNT);
4213 auto *context = cb_access_context->GetCurrentAccessContext();
4214 assert(context);
4215
locke-lunarg61870c22020-06-09 14:51:50 -06004216 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4217 cb_access_context->RecordDrawSubpassAttachment(tag);
4218 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndirectCommand), buffer, offset, 1, stride);
4219 RecordCountBuffer(*context, tag, countBuffer, countBufferOffset);
locke-lunargff255f92020-05-13 18:53:52 -06004220
4221 // TODO: For now, we record the whole vertex buffer. It might cause some false positive.
4222 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
4223 // We will record the vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004224 cb_access_context->RecordDrawVertex(UINT32_MAX, 0, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004225}
4226
4227bool SyncValidator::PreCallValidateCmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4228 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4229 uint32_t maxDrawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004230 return ValidateCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4231 "vkCmdDrawIndirectCountKHR");
locke-lunarge1a67022020-04-29 00:15:36 -06004232}
4233
4234void SyncValidator::PreCallRecordCmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4235 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4236 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004237 StateTracker::PreCallRecordCmdDrawIndirectCountKHR(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount,
4238 stride);
locke-lunargff255f92020-05-13 18:53:52 -06004239 PreCallRecordCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride);
locke-lunarge1a67022020-04-29 00:15:36 -06004240}
4241
4242bool SyncValidator::PreCallValidateCmdDrawIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4243 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4244 uint32_t maxDrawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004245 return ValidateCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4246 "vkCmdDrawIndirectCountAMD");
locke-lunarge1a67022020-04-29 00:15:36 -06004247}
4248
4249void SyncValidator::PreCallRecordCmdDrawIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4250 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4251 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004252 StateTracker::PreCallRecordCmdDrawIndirectCountAMD(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount,
4253 stride);
locke-lunargff255f92020-05-13 18:53:52 -06004254 PreCallRecordCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride);
4255}
4256
4257bool SyncValidator::ValidateCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4258 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4259 uint32_t stride, const char *function) const {
4260 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004261 const auto *cb_access_context = GetAccessContext(commandBuffer);
4262 assert(cb_access_context);
4263 if (!cb_access_context) return skip;
4264
4265 const auto *context = cb_access_context->GetCurrentAccessContext();
4266 assert(context);
4267 if (!context) return skip;
4268
locke-lunarg61870c22020-06-09 14:51:50 -06004269 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, function);
4270 skip |= cb_access_context->ValidateDrawSubpassAttachment(function);
John Zulauffaea0ee2021-01-14 14:01:32 -07004271 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndexedIndirectCommand), buffer,
4272 offset, maxDrawCount, stride, function);
4273 skip |= ValidateCountBuffer(*cb_access_context, *context, commandBuffer, countBuffer, countBufferOffset, function);
locke-lunargff255f92020-05-13 18:53:52 -06004274
4275 // TODO: For now, we validate the whole index and vertex buffer. It might cause some false positive.
4276 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
4277 // We will validate the index and vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004278 skip |= cb_access_context->ValidateDrawVertexIndex(UINT32_MAX, 0, function);
locke-lunargff255f92020-05-13 18:53:52 -06004279 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004280}
4281
4282bool SyncValidator::PreCallValidateCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4283 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4284 uint32_t maxDrawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004285 return ValidateCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4286 "vkCmdDrawIndexedIndirectCount");
locke-lunarge1a67022020-04-29 00:15:36 -06004287}
4288
4289void SyncValidator::PreCallRecordCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4290 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4291 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004292 StateTracker::PreCallRecordCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
4293 maxDrawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06004294 auto *cb_access_context = GetAccessContext(commandBuffer);
4295 assert(cb_access_context);
4296 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDEXEDINDIRECTCOUNT);
4297 auto *context = cb_access_context->GetCurrentAccessContext();
4298 assert(context);
4299
locke-lunarg61870c22020-06-09 14:51:50 -06004300 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4301 cb_access_context->RecordDrawSubpassAttachment(tag);
4302 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndexedIndirectCommand), buffer, offset, 1, stride);
4303 RecordCountBuffer(*context, tag, countBuffer, countBufferOffset);
locke-lunargff255f92020-05-13 18:53:52 -06004304
4305 // TODO: For now, we record the whole index and vertex buffer. It might cause some false positive.
4306 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
locke-lunarg61870c22020-06-09 14:51:50 -06004307 // We will update the index and vertex buffer in SubmitQueue in the future.
4308 cb_access_context->RecordDrawVertexIndex(UINT32_MAX, 0, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004309}
4310
4311bool SyncValidator::PreCallValidateCmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer,
4312 VkDeviceSize offset, VkBuffer countBuffer,
4313 VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4314 uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004315 return ValidateCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4316 "vkCmdDrawIndexedIndirectCountKHR");
locke-lunarge1a67022020-04-29 00:15:36 -06004317}
4318
4319void SyncValidator::PreCallRecordCmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4320 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4321 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004322 StateTracker::PreCallRecordCmdDrawIndexedIndirectCountKHR(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
4323 maxDrawCount, stride);
locke-lunarge1a67022020-04-29 00:15:36 -06004324 PreCallRecordCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride);
4325}
4326
4327bool SyncValidator::PreCallValidateCmdDrawIndexedIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer,
4328 VkDeviceSize offset, VkBuffer countBuffer,
4329 VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4330 uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004331 return ValidateCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4332 "vkCmdDrawIndexedIndirectCountAMD");
locke-lunarge1a67022020-04-29 00:15:36 -06004333}
4334
4335void SyncValidator::PreCallRecordCmdDrawIndexedIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4336 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4337 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004338 StateTracker::PreCallRecordCmdDrawIndexedIndirectCountAMD(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
4339 maxDrawCount, stride);
locke-lunarge1a67022020-04-29 00:15:36 -06004340 PreCallRecordCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride);
4341}
4342
4343bool SyncValidator::PreCallValidateCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
4344 const VkClearColorValue *pColor, uint32_t rangeCount,
4345 const VkImageSubresourceRange *pRanges) const {
4346 bool skip = false;
4347 const auto *cb_access_context = GetAccessContext(commandBuffer);
4348 assert(cb_access_context);
4349 if (!cb_access_context) return skip;
4350
4351 const auto *context = cb_access_context->GetCurrentAccessContext();
4352 assert(context);
4353 if (!context) return skip;
4354
4355 const auto *image_state = Get<IMAGE_STATE>(image);
4356
4357 for (uint32_t index = 0; index < rangeCount; index++) {
4358 const auto &range = pRanges[index];
4359 if (image_state) {
John Zulauf110413c2021-03-20 05:38:38 -06004360 auto hazard = context->DetectHazard(*image_state, SYNC_CLEAR_TRANSFER_WRITE, range);
locke-lunarge1a67022020-04-29 00:15:36 -06004361 if (hazard.hazard) {
4362 skip |= LogError(image, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004363 "vkCmdClearColorImage: Hazard %s for %s, range index %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06004364 string_SyncHazard(hazard.hazard), report_data->FormatHandle(image).c_str(), index,
John Zulauffaea0ee2021-01-14 14:01:32 -07004365 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06004366 }
4367 }
4368 }
4369 return skip;
4370}
4371
4372void SyncValidator::PreCallRecordCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
4373 const VkClearColorValue *pColor, uint32_t rangeCount,
4374 const VkImageSubresourceRange *pRanges) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004375 StateTracker::PreCallRecordCmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
locke-lunarge1a67022020-04-29 00:15:36 -06004376 auto *cb_access_context = GetAccessContext(commandBuffer);
4377 assert(cb_access_context);
4378 const auto tag = cb_access_context->NextCommandTag(CMD_CLEARCOLORIMAGE);
4379 auto *context = cb_access_context->GetCurrentAccessContext();
4380 assert(context);
4381
4382 const auto *image_state = Get<IMAGE_STATE>(image);
4383
4384 for (uint32_t index = 0; index < rangeCount; index++) {
4385 const auto &range = pRanges[index];
4386 if (image_state) {
John Zulauf110413c2021-03-20 05:38:38 -06004387 context->UpdateAccessState(*image_state, SYNC_CLEAR_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004388 }
4389 }
4390}
4391
4392bool SyncValidator::PreCallValidateCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image,
4393 VkImageLayout imageLayout,
4394 const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
4395 const VkImageSubresourceRange *pRanges) const {
4396 bool skip = false;
4397 const auto *cb_access_context = GetAccessContext(commandBuffer);
4398 assert(cb_access_context);
4399 if (!cb_access_context) return skip;
4400
4401 const auto *context = cb_access_context->GetCurrentAccessContext();
4402 assert(context);
4403 if (!context) return skip;
4404
4405 const auto *image_state = Get<IMAGE_STATE>(image);
4406
4407 for (uint32_t index = 0; index < rangeCount; index++) {
4408 const auto &range = pRanges[index];
4409 if (image_state) {
John Zulauf110413c2021-03-20 05:38:38 -06004410 auto hazard = context->DetectHazard(*image_state, SYNC_CLEAR_TRANSFER_WRITE, range);
locke-lunarge1a67022020-04-29 00:15:36 -06004411 if (hazard.hazard) {
4412 skip |= LogError(image, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004413 "vkCmdClearDepthStencilImage: Hazard %s for %s, range index %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06004414 string_SyncHazard(hazard.hazard), report_data->FormatHandle(image).c_str(), index,
John Zulauffaea0ee2021-01-14 14:01:32 -07004415 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06004416 }
4417 }
4418 }
4419 return skip;
4420}
4421
4422void SyncValidator::PreCallRecordCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
4423 const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
4424 const VkImageSubresourceRange *pRanges) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004425 StateTracker::PreCallRecordCmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
locke-lunarge1a67022020-04-29 00:15:36 -06004426 auto *cb_access_context = GetAccessContext(commandBuffer);
4427 assert(cb_access_context);
4428 const auto tag = cb_access_context->NextCommandTag(CMD_CLEARDEPTHSTENCILIMAGE);
4429 auto *context = cb_access_context->GetCurrentAccessContext();
4430 assert(context);
4431
4432 const auto *image_state = Get<IMAGE_STATE>(image);
4433
4434 for (uint32_t index = 0; index < rangeCount; index++) {
4435 const auto &range = pRanges[index];
4436 if (image_state) {
John Zulauf110413c2021-03-20 05:38:38 -06004437 context->UpdateAccessState(*image_state, SYNC_CLEAR_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004438 }
4439 }
4440}
4441
4442bool SyncValidator::PreCallValidateCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool,
4443 uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer,
4444 VkDeviceSize dstOffset, VkDeviceSize stride,
4445 VkQueryResultFlags flags) const {
4446 bool skip = false;
4447 const auto *cb_access_context = GetAccessContext(commandBuffer);
4448 assert(cb_access_context);
4449 if (!cb_access_context) return skip;
4450
4451 const auto *context = cb_access_context->GetCurrentAccessContext();
4452 assert(context);
4453 if (!context) return skip;
4454
4455 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
4456
4457 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004458 const ResourceAccessRange range = MakeRange(dstOffset, stride * queryCount);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004459 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
locke-lunarge1a67022020-04-29 00:15:36 -06004460 if (hazard.hazard) {
John Zulauf59e25072020-07-17 10:55:21 -06004461 skip |=
4462 LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
4463 "vkCmdCopyQueryPoolResults: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07004464 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06004465 }
4466 }
locke-lunargff255f92020-05-13 18:53:52 -06004467
4468 // TODO:Track VkQueryPool
locke-lunarge1a67022020-04-29 00:15:36 -06004469 return skip;
4470}
4471
4472void SyncValidator::PreCallRecordCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
4473 uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
4474 VkDeviceSize stride, VkQueryResultFlags flags) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004475 StateTracker::PreCallRecordCmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset,
4476 stride, flags);
locke-lunarge1a67022020-04-29 00:15:36 -06004477 auto *cb_access_context = GetAccessContext(commandBuffer);
4478 assert(cb_access_context);
locke-lunargff255f92020-05-13 18:53:52 -06004479 const auto tag = cb_access_context->NextCommandTag(CMD_COPYQUERYPOOLRESULTS);
locke-lunarge1a67022020-04-29 00:15:36 -06004480 auto *context = cb_access_context->GetCurrentAccessContext();
4481 assert(context);
4482
4483 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
4484
4485 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004486 const ResourceAccessRange range = MakeRange(dstOffset, stride * queryCount);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004487 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004488 }
locke-lunargff255f92020-05-13 18:53:52 -06004489
4490 // TODO:Track VkQueryPool
locke-lunarge1a67022020-04-29 00:15:36 -06004491}
4492
4493bool SyncValidator::PreCallValidateCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
4494 VkDeviceSize size, uint32_t data) const {
4495 bool skip = false;
4496 const auto *cb_access_context = GetAccessContext(commandBuffer);
4497 assert(cb_access_context);
4498 if (!cb_access_context) return skip;
4499
4500 const auto *context = cb_access_context->GetCurrentAccessContext();
4501 assert(context);
4502 if (!context) return skip;
4503
4504 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
4505
4506 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004507 const ResourceAccessRange range = MakeRange(*dst_buffer, dstOffset, size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004508 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
locke-lunarge1a67022020-04-29 00:15:36 -06004509 if (hazard.hazard) {
John Zulauf1dae9192020-06-16 15:46:44 -06004510 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004511 "vkCmdFillBuffer: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07004512 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06004513 }
4514 }
4515 return skip;
4516}
4517
4518void SyncValidator::PreCallRecordCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
4519 VkDeviceSize size, uint32_t data) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004520 StateTracker::PreCallRecordCmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
locke-lunarge1a67022020-04-29 00:15:36 -06004521 auto *cb_access_context = GetAccessContext(commandBuffer);
4522 assert(cb_access_context);
4523 const auto tag = cb_access_context->NextCommandTag(CMD_FILLBUFFER);
4524 auto *context = cb_access_context->GetCurrentAccessContext();
4525 assert(context);
4526
4527 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
4528
4529 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004530 const ResourceAccessRange range = MakeRange(*dst_buffer, dstOffset, size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004531 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004532 }
4533}
4534
4535bool SyncValidator::PreCallValidateCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4536 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
4537 const VkImageResolve *pRegions) const {
4538 bool skip = false;
4539 const auto *cb_access_context = GetAccessContext(commandBuffer);
4540 assert(cb_access_context);
4541 if (!cb_access_context) return skip;
4542
4543 const auto *context = cb_access_context->GetCurrentAccessContext();
4544 assert(context);
4545 if (!context) return skip;
4546
4547 const auto *src_image = Get<IMAGE_STATE>(srcImage);
4548 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
4549
4550 for (uint32_t region = 0; region < regionCount; region++) {
4551 const auto &resolve_region = pRegions[region];
4552 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004553 auto hazard = context->DetectHazard(*src_image, SYNC_RESOLVE_TRANSFER_READ, resolve_region.srcSubresource,
locke-lunarge1a67022020-04-29 00:15:36 -06004554 resolve_region.srcOffset, resolve_region.extent);
4555 if (hazard.hazard) {
4556 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004557 "vkCmdResolveImage: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06004558 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07004559 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06004560 }
4561 }
4562
4563 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004564 auto hazard = context->DetectHazard(*dst_image, SYNC_RESOLVE_TRANSFER_WRITE, resolve_region.dstSubresource,
locke-lunarge1a67022020-04-29 00:15:36 -06004565 resolve_region.dstOffset, resolve_region.extent);
4566 if (hazard.hazard) {
4567 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004568 "vkCmdResolveImage: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06004569 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07004570 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06004571 }
4572 if (skip) break;
4573 }
4574 }
4575
4576 return skip;
4577}
4578
4579void SyncValidator::PreCallRecordCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4580 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
4581 const VkImageResolve *pRegions) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004582 StateTracker::PreCallRecordCmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
4583 pRegions);
locke-lunarge1a67022020-04-29 00:15:36 -06004584 auto *cb_access_context = GetAccessContext(commandBuffer);
4585 assert(cb_access_context);
4586 const auto tag = cb_access_context->NextCommandTag(CMD_RESOLVEIMAGE);
4587 auto *context = cb_access_context->GetCurrentAccessContext();
4588 assert(context);
4589
4590 auto *src_image = Get<IMAGE_STATE>(srcImage);
4591 auto *dst_image = Get<IMAGE_STATE>(dstImage);
4592
4593 for (uint32_t region = 0; region < regionCount; region++) {
4594 const auto &resolve_region = pRegions[region];
4595 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004596 context->UpdateAccessState(*src_image, SYNC_RESOLVE_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07004597 resolve_region.srcSubresource, resolve_region.srcOffset, resolve_region.extent, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004598 }
4599 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004600 context->UpdateAccessState(*dst_image, SYNC_RESOLVE_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07004601 resolve_region.dstSubresource, resolve_region.dstOffset, resolve_region.extent, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004602 }
4603 }
4604}
4605
Jeff Leger178b1e52020-10-05 12:22:23 -04004606bool SyncValidator::PreCallValidateCmdResolveImage2KHR(VkCommandBuffer commandBuffer,
4607 const VkResolveImageInfo2KHR *pResolveImageInfo) const {
4608 bool skip = false;
4609 const auto *cb_access_context = GetAccessContext(commandBuffer);
4610 assert(cb_access_context);
4611 if (!cb_access_context) return skip;
4612
4613 const auto *context = cb_access_context->GetCurrentAccessContext();
4614 assert(context);
4615 if (!context) return skip;
4616
4617 const auto *src_image = Get<IMAGE_STATE>(pResolveImageInfo->srcImage);
4618 const auto *dst_image = Get<IMAGE_STATE>(pResolveImageInfo->dstImage);
4619
4620 for (uint32_t region = 0; region < pResolveImageInfo->regionCount; region++) {
4621 const auto &resolve_region = pResolveImageInfo->pRegions[region];
4622 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004623 auto hazard = context->DetectHazard(*src_image, SYNC_RESOLVE_TRANSFER_READ, resolve_region.srcSubresource,
Jeff Leger178b1e52020-10-05 12:22:23 -04004624 resolve_region.srcOffset, resolve_region.extent);
4625 if (hazard.hazard) {
4626 skip |= LogError(pResolveImageInfo->srcImage, string_SyncHazardVUID(hazard.hazard),
4627 "vkCmdResolveImage2KHR: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.",
4628 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pResolveImageInfo->srcImage).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07004629 region, cb_access_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04004630 }
4631 }
4632
4633 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004634 auto hazard = context->DetectHazard(*dst_image, SYNC_RESOLVE_TRANSFER_WRITE, resolve_region.dstSubresource,
Jeff Leger178b1e52020-10-05 12:22:23 -04004635 resolve_region.dstOffset, resolve_region.extent);
4636 if (hazard.hazard) {
4637 skip |= LogError(pResolveImageInfo->dstImage, string_SyncHazardVUID(hazard.hazard),
4638 "vkCmdResolveImage2KHR: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.",
4639 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pResolveImageInfo->dstImage).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07004640 region, cb_access_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04004641 }
4642 if (skip) break;
4643 }
4644 }
4645
4646 return skip;
4647}
4648
4649void SyncValidator::PreCallRecordCmdResolveImage2KHR(VkCommandBuffer commandBuffer,
4650 const VkResolveImageInfo2KHR *pResolveImageInfo) {
4651 StateTracker::PreCallRecordCmdResolveImage2KHR(commandBuffer, pResolveImageInfo);
4652 auto *cb_access_context = GetAccessContext(commandBuffer);
4653 assert(cb_access_context);
4654 const auto tag = cb_access_context->NextCommandTag(CMD_RESOLVEIMAGE2KHR);
4655 auto *context = cb_access_context->GetCurrentAccessContext();
4656 assert(context);
4657
4658 auto *src_image = Get<IMAGE_STATE>(pResolveImageInfo->srcImage);
4659 auto *dst_image = Get<IMAGE_STATE>(pResolveImageInfo->dstImage);
4660
4661 for (uint32_t region = 0; region < pResolveImageInfo->regionCount; region++) {
4662 const auto &resolve_region = pResolveImageInfo->pRegions[region];
4663 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004664 context->UpdateAccessState(*src_image, SYNC_RESOLVE_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07004665 resolve_region.srcSubresource, resolve_region.srcOffset, resolve_region.extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04004666 }
4667 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004668 context->UpdateAccessState(*dst_image, SYNC_RESOLVE_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07004669 resolve_region.dstSubresource, resolve_region.dstOffset, resolve_region.extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04004670 }
4671 }
4672}
4673
locke-lunarge1a67022020-04-29 00:15:36 -06004674bool SyncValidator::PreCallValidateCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
4675 VkDeviceSize dataSize, const void *pData) const {
4676 bool skip = false;
4677 const auto *cb_access_context = GetAccessContext(commandBuffer);
4678 assert(cb_access_context);
4679 if (!cb_access_context) return skip;
4680
4681 const auto *context = cb_access_context->GetCurrentAccessContext();
4682 assert(context);
4683 if (!context) return skip;
4684
4685 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
4686
4687 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004688 // VK_WHOLE_SIZE not allowed
4689 const ResourceAccessRange range = MakeRange(dstOffset, dataSize);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004690 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
locke-lunarge1a67022020-04-29 00:15:36 -06004691 if (hazard.hazard) {
John Zulauf1dae9192020-06-16 15:46:44 -06004692 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004693 "vkCmdUpdateBuffer: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07004694 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06004695 }
4696 }
4697 return skip;
4698}
4699
4700void SyncValidator::PreCallRecordCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
4701 VkDeviceSize dataSize, const void *pData) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004702 StateTracker::PreCallRecordCmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
locke-lunarge1a67022020-04-29 00:15:36 -06004703 auto *cb_access_context = GetAccessContext(commandBuffer);
4704 assert(cb_access_context);
4705 const auto tag = cb_access_context->NextCommandTag(CMD_UPDATEBUFFER);
4706 auto *context = cb_access_context->GetCurrentAccessContext();
4707 assert(context);
4708
4709 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
4710
4711 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004712 // VK_WHOLE_SIZE not allowed
4713 const ResourceAccessRange range = MakeRange(dstOffset, dataSize);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004714 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004715 }
4716}
locke-lunargff255f92020-05-13 18:53:52 -06004717
4718bool SyncValidator::PreCallValidateCmdWriteBufferMarkerAMD(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
4719 VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) const {
4720 bool skip = false;
4721 const auto *cb_access_context = GetAccessContext(commandBuffer);
4722 assert(cb_access_context);
4723 if (!cb_access_context) return skip;
4724
4725 const auto *context = cb_access_context->GetCurrentAccessContext();
4726 assert(context);
4727 if (!context) return skip;
4728
4729 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
4730
4731 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004732 const ResourceAccessRange range = MakeRange(dstOffset, 4);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004733 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
locke-lunargff255f92020-05-13 18:53:52 -06004734 if (hazard.hazard) {
John Zulauf59e25072020-07-17 10:55:21 -06004735 skip |=
4736 LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
4737 "vkCmdWriteBufferMarkerAMD: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07004738 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatUsage(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06004739 }
4740 }
4741 return skip;
4742}
4743
4744void SyncValidator::PreCallRecordCmdWriteBufferMarkerAMD(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
4745 VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004746 StateTracker::PreCallRecordCmdWriteBufferMarkerAMD(commandBuffer, pipelineStage, dstBuffer, dstOffset, marker);
locke-lunargff255f92020-05-13 18:53:52 -06004747 auto *cb_access_context = GetAccessContext(commandBuffer);
4748 assert(cb_access_context);
4749 const auto tag = cb_access_context->NextCommandTag(CMD_WRITEBUFFERMARKERAMD);
4750 auto *context = cb_access_context->GetCurrentAccessContext();
4751 assert(context);
4752
4753 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
4754
4755 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004756 const ResourceAccessRange range = MakeRange(dstOffset, 4);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004757 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunargff255f92020-05-13 18:53:52 -06004758 }
4759}
John Zulauf49beb112020-11-04 16:06:31 -07004760
4761bool SyncValidator::PreCallValidateCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) const {
4762 bool skip = false;
4763 const auto *cb_context = GetAccessContext(commandBuffer);
4764 assert(cb_context);
4765 if (!cb_context) return skip;
4766
John Zulauf36ef9282021-02-02 11:47:24 -07004767 SyncOpSetEvent set_event_op(CMD_SETEVENT, *this, cb_context->GetQueueFlags(), event, stageMask);
John Zulauf6ce24372021-01-30 05:56:25 -07004768 return set_event_op.Validate(*cb_context);
John Zulauf49beb112020-11-04 16:06:31 -07004769}
4770
4771void SyncValidator::PostCallRecordCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
4772 StateTracker::PostCallRecordCmdSetEvent(commandBuffer, event, stageMask);
4773 auto *cb_context = GetAccessContext(commandBuffer);
4774 assert(cb_context);
4775 if (!cb_context) return;
John Zulauf36ef9282021-02-02 11:47:24 -07004776 SyncOpSetEvent set_event_op(CMD_SETEVENT, *this, cb_context->GetQueueFlags(), event, stageMask);
4777 set_event_op.Record(cb_context);
John Zulauf49beb112020-11-04 16:06:31 -07004778}
4779
John Zulauf4edde622021-02-15 08:54:50 -07004780bool SyncValidator::PreCallValidateCmdSetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event,
4781 const VkDependencyInfoKHR *pDependencyInfo) const {
4782 bool skip = false;
4783 const auto *cb_context = GetAccessContext(commandBuffer);
4784 assert(cb_context);
4785 if (!cb_context || !pDependencyInfo) return skip;
4786
4787 SyncOpSetEvent set_event_op(CMD_SETEVENT2KHR, *this, cb_context->GetQueueFlags(), event, *pDependencyInfo);
4788 return set_event_op.Validate(*cb_context);
4789}
4790
4791void SyncValidator::PostCallRecordCmdSetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event,
4792 const VkDependencyInfoKHR *pDependencyInfo) {
4793 StateTracker::PostCallRecordCmdSetEvent2KHR(commandBuffer, event, pDependencyInfo);
4794 auto *cb_context = GetAccessContext(commandBuffer);
4795 assert(cb_context);
4796 if (!cb_context || !pDependencyInfo) return;
4797
4798 SyncOpSetEvent set_event_op(CMD_SETEVENT2KHR, *this, cb_context->GetQueueFlags(), event, *pDependencyInfo);
4799 set_event_op.Record(cb_context);
4800}
4801
John Zulauf49beb112020-11-04 16:06:31 -07004802bool SyncValidator::PreCallValidateCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event,
4803 VkPipelineStageFlags stageMask) const {
4804 bool skip = false;
4805 const auto *cb_context = GetAccessContext(commandBuffer);
4806 assert(cb_context);
4807 if (!cb_context) return skip;
4808
John Zulauf36ef9282021-02-02 11:47:24 -07004809 SyncOpResetEvent reset_event_op(CMD_RESETEVENT, *this, cb_context->GetQueueFlags(), event, stageMask);
John Zulauf6ce24372021-01-30 05:56:25 -07004810 return reset_event_op.Validate(*cb_context);
John Zulauf49beb112020-11-04 16:06:31 -07004811}
4812
4813void SyncValidator::PostCallRecordCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
4814 StateTracker::PostCallRecordCmdResetEvent(commandBuffer, event, stageMask);
4815 auto *cb_context = GetAccessContext(commandBuffer);
4816 assert(cb_context);
4817 if (!cb_context) return;
4818
John Zulauf36ef9282021-02-02 11:47:24 -07004819 SyncOpResetEvent reset_event_op(CMD_RESETEVENT, *this, cb_context->GetQueueFlags(), event, stageMask);
4820 reset_event_op.Record(cb_context);
John Zulauf49beb112020-11-04 16:06:31 -07004821}
4822
John Zulauf4edde622021-02-15 08:54:50 -07004823bool SyncValidator::PreCallValidateCmdResetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event,
4824 VkPipelineStageFlags2KHR stageMask) const {
4825 bool skip = false;
4826 const auto *cb_context = GetAccessContext(commandBuffer);
4827 assert(cb_context);
4828 if (!cb_context) return skip;
4829
4830 SyncOpResetEvent reset_event_op(CMD_RESETEVENT2KHR, *this, cb_context->GetQueueFlags(), event, stageMask);
4831 return reset_event_op.Validate(*cb_context);
4832}
4833
4834void SyncValidator::PostCallRecordCmdResetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event,
4835 VkPipelineStageFlags2KHR stageMask) {
4836 StateTracker::PostCallRecordCmdResetEvent2KHR(commandBuffer, event, stageMask);
4837 auto *cb_context = GetAccessContext(commandBuffer);
4838 assert(cb_context);
4839 if (!cb_context) return;
4840
4841 SyncOpResetEvent reset_event_op(CMD_RESETEVENT2KHR, *this, cb_context->GetQueueFlags(), event, stageMask);
4842 reset_event_op.Record(cb_context);
4843}
4844
John Zulauf49beb112020-11-04 16:06:31 -07004845bool SyncValidator::PreCallValidateCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
4846 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
4847 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
4848 uint32_t bufferMemoryBarrierCount,
4849 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
4850 uint32_t imageMemoryBarrierCount,
4851 const VkImageMemoryBarrier *pImageMemoryBarriers) const {
4852 bool skip = false;
4853 const auto *cb_context = GetAccessContext(commandBuffer);
4854 assert(cb_context);
4855 if (!cb_context) return skip;
4856
John Zulauf36ef9282021-02-02 11:47:24 -07004857 SyncOpWaitEvents wait_events_op(CMD_WAITEVENTS, *this, cb_context->GetQueueFlags(), eventCount, pEvents, srcStageMask,
4858 dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
4859 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
John Zulaufd5115702021-01-18 12:34:33 -07004860 return wait_events_op.Validate(*cb_context);
John Zulauf49beb112020-11-04 16:06:31 -07004861}
4862
4863void SyncValidator::PostCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
4864 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
4865 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
4866 uint32_t bufferMemoryBarrierCount,
4867 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
4868 uint32_t imageMemoryBarrierCount,
4869 const VkImageMemoryBarrier *pImageMemoryBarriers) {
4870 StateTracker::PostCallRecordCmdWaitEvents(commandBuffer, eventCount, pEvents, srcStageMask, dstStageMask, memoryBarrierCount,
4871 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
4872 imageMemoryBarrierCount, pImageMemoryBarriers);
4873
4874 auto *cb_context = GetAccessContext(commandBuffer);
4875 assert(cb_context);
4876 if (!cb_context) return;
4877
John Zulauf36ef9282021-02-02 11:47:24 -07004878 SyncOpWaitEvents wait_events_op(CMD_WAITEVENTS, *this, cb_context->GetQueueFlags(), eventCount, pEvents, srcStageMask,
4879 dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
4880 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
4881 return wait_events_op.Record(cb_context);
John Zulauf4a6105a2020-11-17 15:11:05 -07004882}
4883
John Zulauf4edde622021-02-15 08:54:50 -07004884bool SyncValidator::PreCallValidateCmdWaitEvents2KHR(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
4885 const VkDependencyInfoKHR *pDependencyInfos) const {
4886 bool skip = false;
4887 const auto *cb_context = GetAccessContext(commandBuffer);
4888 assert(cb_context);
4889 if (!cb_context) return skip;
4890
4891 SyncOpWaitEvents wait_events_op(CMD_WAITEVENTS2KHR, *this, cb_context->GetQueueFlags(), eventCount, pEvents, pDependencyInfos);
4892 skip |= wait_events_op.Validate(*cb_context);
4893 return skip;
4894}
4895
4896void SyncValidator::PostCallRecordCmdWaitEvents2KHR(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
4897 const VkDependencyInfoKHR *pDependencyInfos) {
4898 StateTracker::PostCallRecordCmdWaitEvents2KHR(commandBuffer, eventCount, pEvents, pDependencyInfos);
4899
4900 auto *cb_context = GetAccessContext(commandBuffer);
4901 assert(cb_context);
4902 if (!cb_context) return;
4903
4904 SyncOpWaitEvents wait_events_op(CMD_WAITEVENTS2KHR, *this, cb_context->GetQueueFlags(), eventCount, pEvents, pDependencyInfos);
4905 wait_events_op.Record(cb_context);
4906}
4907
John Zulauf4a6105a2020-11-17 15:11:05 -07004908void SyncEventState::ResetFirstScope() {
4909 for (const auto address_type : kAddressTypes) {
4910 first_scope[static_cast<size_t>(address_type)].clear();
4911 }
Jeremy Gebben9893daf2021-01-04 10:40:50 -07004912 scope = SyncExecScope();
John Zulauf4a6105a2020-11-17 15:11:05 -07004913}
4914
4915// Keep the "ignore this event" logic in same place for ValidateWait and RecordWait to use
John Zulauf4edde622021-02-15 08:54:50 -07004916SyncEventState::IgnoreReason SyncEventState::IsIgnoredByWait(CMD_TYPE cmd, VkPipelineStageFlags2KHR srcStageMask) const {
John Zulauf4a6105a2020-11-17 15:11:05 -07004917 IgnoreReason reason = NotIgnored;
4918
John Zulauf4edde622021-02-15 08:54:50 -07004919 if ((CMD_WAITEVENTS2KHR == cmd) && (CMD_SETEVENT == last_command)) {
4920 reason = SetVsWait2;
4921 } else if ((last_command == CMD_RESETEVENT || last_command == CMD_RESETEVENT2KHR) && !HasBarrier(0U, 0U)) {
4922 reason = (last_command == CMD_RESETEVENT) ? ResetWaitRace : Reset2WaitRace;
John Zulauf4a6105a2020-11-17 15:11:05 -07004923 } else if (unsynchronized_set) {
4924 reason = SetRace;
4925 } else {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004926 const VkPipelineStageFlags2KHR missing_bits = scope.mask_param & ~srcStageMask;
John Zulauf4a6105a2020-11-17 15:11:05 -07004927 if (missing_bits) reason = MissingStageBits;
4928 }
4929
4930 return reason;
4931}
4932
Jeremy Gebben40a22942020-12-22 14:22:06 -07004933bool SyncEventState::HasBarrier(VkPipelineStageFlags2KHR stageMask, VkPipelineStageFlags2KHR exec_scope_arg) const {
John Zulauf4a6105a2020-11-17 15:11:05 -07004934 bool has_barrier = (last_command == CMD_NONE) || (stageMask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) ||
4935 (barriers & exec_scope_arg) || (barriers & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
4936 return has_barrier;
John Zulauf49beb112020-11-04 16:06:31 -07004937}
John Zulaufe7f6a5e2021-01-16 14:31:18 -07004938
John Zulauf36ef9282021-02-02 11:47:24 -07004939SyncOpBarriers::SyncOpBarriers(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags,
4940 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
4941 VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount,
John Zulaufd5115702021-01-18 12:34:33 -07004942 const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount,
4943 const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount,
4944 const VkImageMemoryBarrier *pImageMemoryBarriers)
John Zulauf4edde622021-02-15 08:54:50 -07004945 : SyncOpBase(cmd), barriers_(1) {
4946 auto &barrier_set = barriers_[0];
4947 barrier_set.dependency_flags = dependencyFlags;
4948 barrier_set.src_exec_scope = SyncExecScope::MakeSrc(queue_flags, srcStageMask);
4949 barrier_set.dst_exec_scope = SyncExecScope::MakeDst(queue_flags, dstStageMask);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07004950 // Translate the API parameters into structures SyncVal understands directly, and dehandle for safer/faster replay.
John Zulauf4edde622021-02-15 08:54:50 -07004951 barrier_set.MakeMemoryBarriers(barrier_set.src_exec_scope, barrier_set.dst_exec_scope, dependencyFlags, memoryBarrierCount,
4952 pMemoryBarriers);
4953 barrier_set.MakeBufferMemoryBarriers(sync_state, barrier_set.src_exec_scope, barrier_set.dst_exec_scope, dependencyFlags,
4954 bufferMemoryBarrierCount, pBufferMemoryBarriers);
4955 barrier_set.MakeImageMemoryBarriers(sync_state, barrier_set.src_exec_scope, barrier_set.dst_exec_scope, dependencyFlags,
4956 imageMemoryBarrierCount, pImageMemoryBarriers);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07004957}
4958
John Zulauf4edde622021-02-15 08:54:50 -07004959SyncOpBarriers::SyncOpBarriers(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags, uint32_t event_count,
4960 const VkDependencyInfoKHR *dep_infos)
4961 : SyncOpBase(cmd), barriers_(event_count) {
4962 for (uint32_t i = 0; i < event_count; i++) {
4963 const auto &dep_info = dep_infos[i];
4964 auto &barrier_set = barriers_[i];
4965 barrier_set.dependency_flags = dep_info.dependencyFlags;
4966 auto stage_masks = sync_utils::GetGlobalStageMasks(dep_info);
4967 barrier_set.src_exec_scope = SyncExecScope::MakeSrc(queue_flags, stage_masks.src);
4968 barrier_set.dst_exec_scope = SyncExecScope::MakeDst(queue_flags, stage_masks.dst);
4969 // Translate the API parameters into structures SyncVal understands directly, and dehandle for safer/faster replay.
4970 barrier_set.MakeMemoryBarriers(queue_flags, dep_info.dependencyFlags, dep_info.memoryBarrierCount,
4971 dep_info.pMemoryBarriers);
4972 barrier_set.MakeBufferMemoryBarriers(sync_state, queue_flags, dep_info.dependencyFlags, dep_info.bufferMemoryBarrierCount,
4973 dep_info.pBufferMemoryBarriers);
4974 barrier_set.MakeImageMemoryBarriers(sync_state, queue_flags, dep_info.dependencyFlags, dep_info.imageMemoryBarrierCount,
4975 dep_info.pImageMemoryBarriers);
4976 }
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07004977}
4978
John Zulauf36ef9282021-02-02 11:47:24 -07004979SyncOpPipelineBarrier::SyncOpPipelineBarrier(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags,
John Zulaufd5115702021-01-18 12:34:33 -07004980 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
4981 VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount,
4982 const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount,
4983 const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount,
4984 const VkImageMemoryBarrier *pImageMemoryBarriers)
John Zulauf36ef9282021-02-02 11:47:24 -07004985 : SyncOpBarriers(cmd, sync_state, queue_flags, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers,
John Zulaufd5115702021-01-18 12:34:33 -07004986 bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers) {}
4987
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07004988SyncOpPipelineBarrier::SyncOpPipelineBarrier(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags,
4989 const VkDependencyInfoKHR &dep_info)
John Zulauf4edde622021-02-15 08:54:50 -07004990 : SyncOpBarriers(cmd, sync_state, queue_flags, 1, &dep_info) {}
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07004991
John Zulaufe7f6a5e2021-01-16 14:31:18 -07004992bool SyncOpPipelineBarrier::Validate(const CommandBufferAccessContext &cb_context) const {
4993 bool skip = false;
4994 const auto *context = cb_context.GetCurrentAccessContext();
4995 assert(context);
4996 if (!context) return skip;
John Zulauf6fdf3d02021-03-05 16:50:47 -07004997 assert(barriers_.size() == 1); // PipelineBarriers only support a single barrier set.
4998
John Zulaufe7f6a5e2021-01-16 14:31:18 -07004999 // Validate Image Layout transitions
John Zulauf6fdf3d02021-03-05 16:50:47 -07005000 const auto &barrier_set = barriers_[0];
5001 for (const auto &image_barrier : barrier_set.image_memory_barriers) {
5002 if (image_barrier.new_layout == image_barrier.old_layout) continue; // Only interested in layout transitions at this point.
5003 const auto *image_state = image_barrier.image.get();
5004 if (!image_state) continue;
5005 const auto hazard = context->DetectImageBarrierHazard(image_barrier);
5006 if (hazard.hazard) {
5007 // PHASE1 TODO -- add tag information to log msg when useful.
5008 const auto &sync_state = cb_context.GetSyncState();
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06005009 const auto image_handle = image_state->image();
John Zulauf6fdf3d02021-03-05 16:50:47 -07005010 skip |= sync_state.LogError(image_handle, string_SyncHazardVUID(hazard.hazard),
5011 "%s: Hazard %s for image barrier %" PRIu32 " %s. Access info %s.", CmdName(),
5012 string_SyncHazard(hazard.hazard), image_barrier.index,
5013 sync_state.report_data->FormatHandle(image_handle).c_str(),
5014 cb_context.FormatUsage(hazard).c_str());
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005015 }
5016 }
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005017 return skip;
5018}
5019
John Zulaufd5115702021-01-18 12:34:33 -07005020struct SyncOpPipelineBarrierFunctorFactory {
5021 using BarrierOpFunctor = PipelineBarrierOp;
5022 using ApplyFunctor = ApplyBarrierFunctor<BarrierOpFunctor>;
5023 using GlobalBarrierOpFunctor = PipelineBarrierOp;
5024 using GlobalApplyFunctor = ApplyBarrierOpsFunctor<GlobalBarrierOpFunctor>;
5025 using BufferRange = ResourceAccessRange;
5026 using ImageRange = subresource_adapter::ImageRangeGenerator;
5027 using GlobalRange = ResourceAccessRange;
5028
5029 ApplyFunctor MakeApplyFunctor(const SyncBarrier &barrier, bool layout_transition) const {
5030 return ApplyFunctor(BarrierOpFunctor(barrier, layout_transition));
5031 }
5032 GlobalApplyFunctor MakeGlobalApplyFunctor(size_t size_hint, const ResourceUsageTag &tag) const {
5033 return GlobalApplyFunctor(true /* resolve */, size_hint, tag);
5034 }
5035 GlobalBarrierOpFunctor MakeGlobalBarrierOpFunctor(const SyncBarrier &barrier) const {
5036 return GlobalBarrierOpFunctor(barrier, false);
5037 }
5038
5039 BufferRange MakeRangeGen(const BUFFER_STATE &buffer, const ResourceAccessRange &range) const {
5040 if (!SimpleBinding(buffer)) return ResourceAccessRange();
5041 const auto base_address = ResourceBaseAddress(buffer);
5042 return (range + base_address);
5043 }
John Zulauf110413c2021-03-20 05:38:38 -06005044 ImageRange MakeRangeGen(const IMAGE_STATE &image, const VkImageSubresourceRange &subresource_range) const {
John Zulauf264cce02021-02-05 14:40:47 -07005045 if (!SimpleBinding(image)) return subresource_adapter::ImageRangeGenerator();
John Zulaufd5115702021-01-18 12:34:33 -07005046
5047 const auto base_address = ResourceBaseAddress(image);
John Zulauf110413c2021-03-20 05:38:38 -06005048 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, base_address);
John Zulaufd5115702021-01-18 12:34:33 -07005049 return range_gen;
5050 }
5051 GlobalRange MakeGlobalRangeGen(AccessAddressType) const { return kFullRange; }
5052};
5053
5054template <typename Barriers, typename FunctorFactory>
5055void SyncOpBarriers::ApplyBarriers(const Barriers &barriers, const FunctorFactory &factory, const ResourceUsageTag &tag,
5056 AccessContext *context) {
5057 for (const auto &barrier : barriers) {
5058 const auto *state = barrier.GetState();
5059 if (state) {
5060 auto *const accesses = &context->GetAccessStateMap(GetAccessAddressType(*state));
5061 auto update_action = factory.MakeApplyFunctor(barrier.barrier, barrier.IsLayoutTransition());
5062 auto range_gen = factory.MakeRangeGen(*state, barrier.Range());
5063 UpdateMemoryAccessState(accesses, update_action, &range_gen);
5064 }
5065 }
5066}
5067
5068template <typename Barriers, typename FunctorFactory>
5069void SyncOpBarriers::ApplyGlobalBarriers(const Barriers &barriers, const FunctorFactory &factory, const ResourceUsageTag &tag,
5070 AccessContext *access_context) {
5071 auto barriers_functor = factory.MakeGlobalApplyFunctor(barriers.size(), tag);
5072 for (const auto &barrier : barriers) {
5073 barriers_functor.EmplaceBack(factory.MakeGlobalBarrierOpFunctor(barrier));
5074 }
5075 for (const auto address_type : kAddressTypes) {
5076 auto range_gen = factory.MakeGlobalRangeGen(address_type);
5077 UpdateMemoryAccessState(&(access_context->GetAccessStateMap(address_type)), barriers_functor, &range_gen);
5078 }
5079}
5080
John Zulauf36ef9282021-02-02 11:47:24 -07005081void SyncOpPipelineBarrier::Record(CommandBufferAccessContext *cb_context) const {
John Zulaufd5115702021-01-18 12:34:33 -07005082 SyncOpPipelineBarrierFunctorFactory factory;
5083 auto *access_context = cb_context->GetCurrentAccessContext();
John Zulauf36ef9282021-02-02 11:47:24 -07005084 const auto tag = cb_context->NextCommandTag(cmd_);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005085
John Zulauf4edde622021-02-15 08:54:50 -07005086 // Pipeline barriers only have a single barrier set, unlike WaitEvents2
5087 assert(barriers_.size() == 1);
5088 const auto &barrier_set = barriers_[0];
5089 ApplyBarriers(barrier_set.buffer_memory_barriers, factory, tag, access_context);
5090 ApplyBarriers(barrier_set.image_memory_barriers, factory, tag, access_context);
5091 ApplyGlobalBarriers(barrier_set.memory_barriers, factory, tag, access_context);
5092
5093 if (barrier_set.single_exec_scope) {
5094 cb_context->ApplyGlobalBarriersToEvents(barrier_set.src_exec_scope, barrier_set.dst_exec_scope);
5095 } else {
5096 for (const auto &barrier : barrier_set.memory_barriers) {
5097 cb_context->ApplyGlobalBarriersToEvents(barrier.src_exec_scope, barrier.dst_exec_scope);
5098 }
5099 }
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005100}
5101
John Zulauf4edde622021-02-15 08:54:50 -07005102void SyncOpBarriers::BarrierSet::MakeMemoryBarriers(const SyncExecScope &src, const SyncExecScope &dst,
5103 VkDependencyFlags dependency_flags, uint32_t memory_barrier_count,
5104 const VkMemoryBarrier *barriers) {
5105 memory_barriers.reserve(std::max<uint32_t>(1, memory_barrier_count));
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005106 for (uint32_t barrier_index = 0; barrier_index < memory_barrier_count; barrier_index++) {
John Zulauf4edde622021-02-15 08:54:50 -07005107 const auto &barrier = barriers[barrier_index];
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005108 SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07005109 memory_barriers.emplace_back(sync_barrier);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005110 }
5111 if (0 == memory_barrier_count) {
5112 // If there are no global memory barriers, force an exec barrier
John Zulauf4edde622021-02-15 08:54:50 -07005113 memory_barriers.emplace_back(SyncBarrier(src, dst));
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005114 }
John Zulauf4edde622021-02-15 08:54:50 -07005115 single_exec_scope = true;
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005116}
5117
John Zulauf4edde622021-02-15 08:54:50 -07005118void SyncOpBarriers::BarrierSet::MakeBufferMemoryBarriers(const SyncValidator &sync_state, const SyncExecScope &src,
5119 const SyncExecScope &dst, VkDependencyFlags dependencyFlags,
5120 uint32_t barrier_count, const VkBufferMemoryBarrier *barriers) {
5121 buffer_memory_barriers.reserve(barrier_count);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005122 for (uint32_t index = 0; index < barrier_count; index++) {
5123 const auto &barrier = barriers[index];
5124 auto buffer = sync_state.GetShared<BUFFER_STATE>(barrier.buffer);
5125 if (buffer) {
5126 const auto barrier_size = GetBufferWholeSize(*buffer, barrier.offset, barrier.size);
5127 const auto range = MakeRange(barrier.offset, barrier_size);
5128 const SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07005129 buffer_memory_barriers.emplace_back(buffer, sync_barrier, range);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005130 } else {
John Zulauf4edde622021-02-15 08:54:50 -07005131 buffer_memory_barriers.emplace_back();
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005132 }
5133 }
5134}
5135
John Zulauf4edde622021-02-15 08:54:50 -07005136void SyncOpBarriers::BarrierSet::MakeMemoryBarriers(VkQueueFlags queue_flags, VkDependencyFlags dependency_flags,
5137 uint32_t memory_barrier_count, const VkMemoryBarrier2KHR *barriers) {
5138 memory_barriers.reserve(memory_barrier_count);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005139 for (uint32_t barrier_index = 0; barrier_index < memory_barrier_count; barrier_index++) {
John Zulauf4edde622021-02-15 08:54:50 -07005140 const auto &barrier = barriers[barrier_index];
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005141 auto src = SyncExecScope::MakeSrc(queue_flags, barrier.srcStageMask);
5142 auto dst = SyncExecScope::MakeDst(queue_flags, barrier.dstStageMask);
5143 SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07005144 memory_barriers.emplace_back(sync_barrier);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005145 }
John Zulauf4edde622021-02-15 08:54:50 -07005146 single_exec_scope = false;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005147}
5148
John Zulauf4edde622021-02-15 08:54:50 -07005149void SyncOpBarriers::BarrierSet::MakeBufferMemoryBarriers(const SyncValidator &sync_state, VkQueueFlags queue_flags,
5150 VkDependencyFlags dependencyFlags, uint32_t barrier_count,
5151 const VkBufferMemoryBarrier2KHR *barriers) {
5152 buffer_memory_barriers.reserve(barrier_count);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005153 for (uint32_t index = 0; index < barrier_count; index++) {
5154 const auto &barrier = barriers[index];
5155 auto src = SyncExecScope::MakeSrc(queue_flags, barrier.srcStageMask);
5156 auto dst = SyncExecScope::MakeDst(queue_flags, barrier.dstStageMask);
5157 auto buffer = sync_state.GetShared<BUFFER_STATE>(barrier.buffer);
5158 if (buffer) {
5159 const auto barrier_size = GetBufferWholeSize(*buffer, barrier.offset, barrier.size);
5160 const auto range = MakeRange(barrier.offset, barrier_size);
5161 const SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07005162 buffer_memory_barriers.emplace_back(buffer, sync_barrier, range);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005163 } else {
John Zulauf4edde622021-02-15 08:54:50 -07005164 buffer_memory_barriers.emplace_back();
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005165 }
5166 }
5167}
5168
John Zulauf4edde622021-02-15 08:54:50 -07005169void SyncOpBarriers::BarrierSet::MakeImageMemoryBarriers(const SyncValidator &sync_state, const SyncExecScope &src,
5170 const SyncExecScope &dst, VkDependencyFlags dependencyFlags,
5171 uint32_t barrier_count, const VkImageMemoryBarrier *barriers) {
5172 image_memory_barriers.reserve(barrier_count);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005173 for (uint32_t index = 0; index < barrier_count; index++) {
5174 const auto &barrier = barriers[index];
5175 const auto image = sync_state.GetShared<IMAGE_STATE>(barrier.image);
5176 if (image) {
5177 auto subresource_range = NormalizeSubresourceRange(image->createInfo, barrier.subresourceRange);
5178 const SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07005179 image_memory_barriers.emplace_back(image, index, sync_barrier, barrier.oldLayout, barrier.newLayout, subresource_range);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005180 } else {
John Zulauf4edde622021-02-15 08:54:50 -07005181 image_memory_barriers.emplace_back();
5182 image_memory_barriers.back().index = index; // Just in case we're interested in the ones we skipped.
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005183 }
5184 }
5185}
John Zulaufd5115702021-01-18 12:34:33 -07005186
John Zulauf4edde622021-02-15 08:54:50 -07005187void SyncOpBarriers::BarrierSet::MakeImageMemoryBarriers(const SyncValidator &sync_state, VkQueueFlags queue_flags,
5188 VkDependencyFlags dependencyFlags, uint32_t barrier_count,
5189 const VkImageMemoryBarrier2KHR *barriers) {
5190 image_memory_barriers.reserve(barrier_count);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005191 for (uint32_t index = 0; index < barrier_count; index++) {
5192 const auto &barrier = barriers[index];
5193 auto src = SyncExecScope::MakeSrc(queue_flags, barrier.srcStageMask);
5194 auto dst = SyncExecScope::MakeDst(queue_flags, barrier.dstStageMask);
5195 const auto image = sync_state.GetShared<IMAGE_STATE>(barrier.image);
5196 if (image) {
5197 auto subresource_range = NormalizeSubresourceRange(image->createInfo, barrier.subresourceRange);
5198 const SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07005199 image_memory_barriers.emplace_back(image, index, sync_barrier, barrier.oldLayout, barrier.newLayout, subresource_range);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005200 } else {
John Zulauf4edde622021-02-15 08:54:50 -07005201 image_memory_barriers.emplace_back();
5202 image_memory_barriers.back().index = index; // Just in case we're interested in the ones we skipped.
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005203 }
5204 }
5205}
5206
John Zulauf36ef9282021-02-02 11:47:24 -07005207SyncOpWaitEvents::SyncOpWaitEvents(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags, uint32_t eventCount,
John Zulaufd5115702021-01-18 12:34:33 -07005208 const VkEvent *pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
5209 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
5210 uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
5211 uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers)
John Zulauf36ef9282021-02-02 11:47:24 -07005212 : SyncOpBarriers(cmd, sync_state, queue_flags, srcStageMask, dstStageMask, VkDependencyFlags(0U), memoryBarrierCount,
John Zulaufd5115702021-01-18 12:34:33 -07005213 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
5214 pImageMemoryBarriers) {
John Zulauf669dfd52021-01-27 17:15:28 -07005215 MakeEventsList(sync_state, eventCount, pEvents);
John Zulaufd5115702021-01-18 12:34:33 -07005216}
5217
John Zulauf4edde622021-02-15 08:54:50 -07005218SyncOpWaitEvents::SyncOpWaitEvents(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags, uint32_t eventCount,
5219 const VkEvent *pEvents, const VkDependencyInfoKHR *pDependencyInfo)
5220 : SyncOpBarriers(cmd, sync_state, queue_flags, eventCount, pDependencyInfo) {
5221 MakeEventsList(sync_state, eventCount, pEvents);
5222 assert(events_.size() == barriers_.size()); // Just so nobody gets clever and decides to cull the event or barrier arrays
5223}
5224
John Zulaufd5115702021-01-18 12:34:33 -07005225bool SyncOpWaitEvents::Validate(const CommandBufferAccessContext &cb_context) const {
John Zulaufd5115702021-01-18 12:34:33 -07005226 const char *const ignored = "Wait operation is ignored for this event.";
5227 bool skip = false;
5228 const auto &sync_state = cb_context.GetSyncState();
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06005229 const auto command_buffer_handle = cb_context.GetCBState().commandBuffer();
John Zulaufd5115702021-01-18 12:34:33 -07005230
John Zulauf4edde622021-02-15 08:54:50 -07005231 for (size_t barrier_set_index = 0; barrier_set_index < barriers_.size(); barrier_set_index++) {
5232 const auto &barrier_set = barriers_[barrier_set_index];
5233 if (barrier_set.single_exec_scope) {
5234 if (barrier_set.src_exec_scope.mask_param & VK_PIPELINE_STAGE_HOST_BIT) {
5235 const std::string vuid = std::string("SYNC-") + std::string(CmdName()) + std::string("-hostevent-unsupported");
5236 skip = sync_state.LogInfo(command_buffer_handle, vuid,
5237 "%s, srcStageMask includes %s, unsupported by synchronization validation.", CmdName(),
5238 string_VkPipelineStageFlagBits(VK_PIPELINE_STAGE_HOST_BIT));
5239 } else {
5240 const auto &barriers = barrier_set.memory_barriers;
5241 for (size_t barrier_index = 0; barrier_index < barriers.size(); barrier_index++) {
5242 const auto &barrier = barriers[barrier_index];
5243 if (barrier.src_exec_scope.mask_param & VK_PIPELINE_STAGE_HOST_BIT) {
5244 const std::string vuid =
5245 std::string("SYNC-") + std::string(CmdName()) + std::string("-hostevent-unsupported");
5246 skip =
5247 sync_state.LogInfo(command_buffer_handle, vuid,
5248 "%s, srcStageMask %s of %s %zu, %s %zu, unsupported by synchronization validation.",
5249 CmdName(), string_VkPipelineStageFlagBits(VK_PIPELINE_STAGE_HOST_BIT),
5250 "pDependencyInfo", barrier_set_index, "pMemoryBarriers", barrier_index);
5251 }
5252 }
5253 }
5254 }
John Zulaufd5115702021-01-18 12:34:33 -07005255 }
5256
Jeremy Gebben40a22942020-12-22 14:22:06 -07005257 VkPipelineStageFlags2KHR event_stage_masks = 0U;
John Zulauf4edde622021-02-15 08:54:50 -07005258 VkPipelineStageFlags2KHR barrier_mask_params = 0U;
John Zulaufd5115702021-01-18 12:34:33 -07005259 bool events_not_found = false;
John Zulauf669dfd52021-01-27 17:15:28 -07005260 const auto *events_context = cb_context.GetCurrentEventsContext();
5261 assert(events_context);
John Zulauf4edde622021-02-15 08:54:50 -07005262 size_t barrier_set_index = 0;
5263 size_t barrier_set_incr = (barriers_.size() == 1) ? 0 : 1;
5264 for (size_t event_index = 0; event_index < events_.size(); event_index++)
5265 for (const auto &event : events_) {
5266 const auto *sync_event = events_context->Get(event.get());
5267 const auto &barrier_set = barriers_[barrier_set_index];
5268 if (!sync_event) {
5269 // NOTE PHASE2: This is where we'll need queue submit time validation to come back and check the srcStageMask bits
5270 // or solve this with replay creating the SyncEventState in the queue context... also this will be a
5271 // new validation error... wait without previously submitted set event...
5272 events_not_found = true; // Demote "extra_stage_bits" error to warning, to avoid false positives at *record time*
5273 barrier_set_index += barrier_set_incr;
5274 continue; // Core, Lifetimes, or Param check needs to catch invalid events.
John Zulaufd5115702021-01-18 12:34:33 -07005275 }
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06005276 const auto event_handle = sync_event->event->event();
John Zulauf4edde622021-02-15 08:54:50 -07005277 // TODO add "destroyed" checks
5278
5279 barrier_mask_params |= barrier_set.src_exec_scope.mask_param;
5280 const auto &src_exec_scope = barrier_set.src_exec_scope;
5281 event_stage_masks |= sync_event->scope.mask_param;
5282 const auto ignore_reason = sync_event->IsIgnoredByWait(cmd_, src_exec_scope.mask_param);
5283 if (ignore_reason) {
5284 switch (ignore_reason) {
5285 case SyncEventState::ResetWaitRace:
5286 case SyncEventState::Reset2WaitRace: {
5287 // Four permuations of Reset and Wait calls...
5288 const char *vuid =
5289 (cmd_ == CMD_WAITEVENTS) ? "VUID-vkCmdResetEvent-event-03834" : "VUID-vkCmdResetEvent-event-03835";
5290 if (ignore_reason == SyncEventState::Reset2WaitRace) {
5291 vuid =
Jeremy Gebben476f5e22021-03-01 15:27:20 -07005292 (cmd_ == CMD_WAITEVENTS) ? "VUID-vkCmdResetEvent2KHR-event-03831" : "VUID-vkCmdResetEvent2KHR-event-03832";
John Zulauf4edde622021-02-15 08:54:50 -07005293 }
5294 const char *const message =
5295 "%s: %s %s operation following %s without intervening execution barrier, may cause race condition. %s";
5296 skip |= sync_state.LogError(event_handle, vuid, message, CmdName(),
5297 sync_state.report_data->FormatHandle(event_handle).c_str(), CmdName(),
5298 CommandTypeString(sync_event->last_command), ignored);
5299 break;
5300 }
5301 case SyncEventState::SetRace: {
5302 // Issue error message that Wait is waiting on an signal subject to race condition, and is thus ignored for
5303 // this event
5304 const char *const vuid = "SYNC-vkCmdWaitEvents-unsynchronized-setops";
5305 const char *const message =
5306 "%s: %s Unsychronized %s calls result in race conditions w.r.t. event signalling, %s %s";
5307 const char *const reason = "First synchronization scope is undefined.";
5308 skip |= sync_state.LogError(event_handle, vuid, message, CmdName(),
5309 sync_state.report_data->FormatHandle(event_handle).c_str(),
5310 CommandTypeString(sync_event->last_command), reason, ignored);
5311 break;
5312 }
5313 case SyncEventState::MissingStageBits: {
5314 const auto missing_bits = sync_event->scope.mask_param & ~src_exec_scope.mask_param;
5315 // Issue error message that event waited for is not in wait events scope
5316 const char *const vuid = "VUID-vkCmdWaitEvents-srcStageMask-01158";
5317 const char *const message =
5318 "%s: %s stageMask %" PRIx64 " includes bits not present in srcStageMask 0x%" PRIx64
5319 ". Bits missing from srcStageMask %s. %s";
5320 skip |= sync_state.LogError(event_handle, vuid, message, CmdName(),
5321 sync_state.report_data->FormatHandle(event_handle).c_str(),
5322 sync_event->scope.mask_param, src_exec_scope.mask_param,
5323 sync_utils::StringPipelineStageFlags(missing_bits).c_str(), ignored);
5324 break;
5325 }
5326 case SyncEventState::SetVsWait2: {
5327 skip |= sync_state.LogError(event_handle, "VUID-vkCmdWaitEvents2KHR-pEvents-03837",
5328 "%s: Follows set of %s by %s. Disallowed.", CmdName(),
5329 sync_state.report_data->FormatHandle(event_handle).c_str(),
5330 CommandTypeString(sync_event->last_command));
5331 break;
5332 }
5333 default:
5334 assert(ignore_reason == SyncEventState::NotIgnored);
5335 }
5336 } else if (barrier_set.image_memory_barriers.size()) {
5337 const auto &image_memory_barriers = barrier_set.image_memory_barriers;
5338 const auto *context = cb_context.GetCurrentAccessContext();
5339 assert(context);
5340 for (const auto &image_memory_barrier : image_memory_barriers) {
5341 if (image_memory_barrier.old_layout == image_memory_barrier.new_layout) continue;
5342 const auto *image_state = image_memory_barrier.image.get();
5343 if (!image_state) continue;
John Zulauf110413c2021-03-20 05:38:38 -06005344 const auto &subresource_range = image_memory_barrier.range;
John Zulauf4edde622021-02-15 08:54:50 -07005345 const auto &src_access_scope = image_memory_barrier.barrier.src_access_scope;
5346 const auto hazard =
5347 context->DetectImageBarrierHazard(*image_state, sync_event->scope.exec_scope, src_access_scope,
5348 subresource_range, *sync_event, AccessContext::DetectOptions::kDetectAll);
5349 if (hazard.hazard) {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06005350 skip |= sync_state.LogError(image_state->image(), string_SyncHazardVUID(hazard.hazard),
John Zulauf4edde622021-02-15 08:54:50 -07005351 "%s: Hazard %s for image barrier %" PRIu32 " %s. Access info %s.", CmdName(),
5352 string_SyncHazard(hazard.hazard), image_memory_barrier.index,
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06005353 sync_state.report_data->FormatHandle(image_state->image()).c_str(),
John Zulauf4edde622021-02-15 08:54:50 -07005354 cb_context.FormatUsage(hazard).c_str());
5355 break;
5356 }
John Zulaufd5115702021-01-18 12:34:33 -07005357 }
5358 }
John Zulauf4edde622021-02-15 08:54:50 -07005359 // TODO: Add infrastructure for checking pDependencyInfo's vs. CmdSetEvent2 VUID - vkCmdWaitEvents2KHR - pEvents -
5360 // 03839
5361 barrier_set_index += barrier_set_incr;
John Zulaufd5115702021-01-18 12:34:33 -07005362 }
John Zulaufd5115702021-01-18 12:34:33 -07005363
5364 // Note that we can't check for HOST in pEvents as we don't track that set event type
John Zulauf4edde622021-02-15 08:54:50 -07005365 const auto extra_stage_bits = (barrier_mask_params & ~VK_PIPELINE_STAGE_2_HOST_BIT_KHR) & ~event_stage_masks;
John Zulaufd5115702021-01-18 12:34:33 -07005366 if (extra_stage_bits) {
5367 // Issue error message that event waited for is not in wait events scope
John Zulauf4edde622021-02-15 08:54:50 -07005368 // NOTE: This isn't exactly the right VUID for WaitEvents2, but it's as close as we currently have support for
5369 const char *const vuid =
5370 (CMD_WAITEVENTS == cmd_) ? "VUID-vkCmdWaitEvents-srcStageMask-01158" : "VUID-vkCmdWaitEvents2KHR-pEvents-03838";
John Zulaufd5115702021-01-18 12:34:33 -07005371 const char *const message =
Jeremy Gebben40a22942020-12-22 14:22:06 -07005372 "%s: srcStageMask 0x%" PRIx64 " contains stages not present in pEvents stageMask. Extra stages are %s.%s";
John Zulaufd5115702021-01-18 12:34:33 -07005373 if (events_not_found) {
John Zulauf4edde622021-02-15 08:54:50 -07005374 skip |= sync_state.LogInfo(command_buffer_handle, vuid, message, CmdName(), barrier_mask_params,
Jeremy Gebben40a22942020-12-22 14:22:06 -07005375 sync_utils::StringPipelineStageFlags(extra_stage_bits).c_str(),
John Zulaufd5115702021-01-18 12:34:33 -07005376 " vkCmdSetEvent may be in previously submitted command buffer.");
5377 } else {
John Zulauf4edde622021-02-15 08:54:50 -07005378 skip |= sync_state.LogError(command_buffer_handle, vuid, message, CmdName(), barrier_mask_params,
Jeremy Gebben40a22942020-12-22 14:22:06 -07005379 sync_utils::StringPipelineStageFlags(extra_stage_bits).c_str(), "");
John Zulaufd5115702021-01-18 12:34:33 -07005380 }
5381 }
5382 return skip;
5383}
5384
5385struct SyncOpWaitEventsFunctorFactory {
5386 using BarrierOpFunctor = WaitEventBarrierOp;
5387 using ApplyFunctor = ApplyBarrierFunctor<BarrierOpFunctor>;
5388 using GlobalBarrierOpFunctor = WaitEventBarrierOp;
5389 using GlobalApplyFunctor = ApplyBarrierOpsFunctor<GlobalBarrierOpFunctor>;
5390 using BufferRange = EventSimpleRangeGenerator;
5391 using ImageRange = EventImageRangeGenerator;
5392 using GlobalRange = EventSimpleRangeGenerator;
5393
5394 // Need to restrict to only valid exec and access scope for this event
5395 // Pass by value is intentional to get a copy we can change without modifying the passed barrier
5396 SyncBarrier RestrictToEvent(SyncBarrier barrier) const {
John Zulaufc523bf62021-02-16 08:20:34 -07005397 barrier.src_exec_scope.exec_scope = sync_event->scope.exec_scope & barrier.src_exec_scope.exec_scope;
John Zulaufd5115702021-01-18 12:34:33 -07005398 barrier.src_access_scope = sync_event->scope.valid_accesses & barrier.src_access_scope;
5399 return barrier;
5400 }
5401 ApplyFunctor MakeApplyFunctor(const SyncBarrier &barrier_arg, bool layout_transition) const {
5402 auto barrier = RestrictToEvent(barrier_arg);
5403 return ApplyFunctor(BarrierOpFunctor(sync_event->first_scope_tag, barrier, layout_transition));
5404 }
5405 GlobalApplyFunctor MakeGlobalApplyFunctor(size_t size_hint, const ResourceUsageTag &tag) const {
5406 return GlobalApplyFunctor(false /* don't resolve */, size_hint, tag);
5407 }
5408 GlobalBarrierOpFunctor MakeGlobalBarrierOpFunctor(const SyncBarrier &barrier_arg) const {
5409 auto barrier = RestrictToEvent(barrier_arg);
5410 return GlobalBarrierOpFunctor(sync_event->first_scope_tag, barrier, false);
5411 }
5412
5413 BufferRange MakeRangeGen(const BUFFER_STATE &buffer, const ResourceAccessRange &range_arg) const {
5414 const AccessAddressType address_type = GetAccessAddressType(buffer);
5415 const auto base_address = ResourceBaseAddress(buffer);
5416 ResourceAccessRange range = SimpleBinding(buffer) ? (range_arg + base_address) : ResourceAccessRange();
5417 EventSimpleRangeGenerator filtered_range_gen(sync_event->FirstScope(address_type), range);
5418 return filtered_range_gen;
5419 }
John Zulauf110413c2021-03-20 05:38:38 -06005420 ImageRange MakeRangeGen(const IMAGE_STATE &image, const VkImageSubresourceRange &subresource_range) const {
John Zulaufd5115702021-01-18 12:34:33 -07005421 if (!SimpleBinding(image)) return ImageRange();
5422 const auto address_type = GetAccessAddressType(image);
5423 const auto base_address = ResourceBaseAddress(image);
John Zulauf110413c2021-03-20 05:38:38 -06005424 subresource_adapter::ImageRangeGenerator image_range_gen(*image.fragment_encoder.get(), subresource_range, base_address);
John Zulaufd5115702021-01-18 12:34:33 -07005425 EventImageRangeGenerator filtered_range_gen(sync_event->FirstScope(address_type), image_range_gen);
5426
5427 return filtered_range_gen;
5428 }
5429 GlobalRange MakeGlobalRangeGen(AccessAddressType address_type) const {
5430 return EventSimpleRangeGenerator(sync_event->FirstScope(address_type), kFullRange);
5431 }
5432 SyncOpWaitEventsFunctorFactory(SyncEventState *sync_event_) : sync_event(sync_event_) { assert(sync_event); }
5433 SyncEventState *sync_event;
5434};
5435
John Zulauf36ef9282021-02-02 11:47:24 -07005436void SyncOpWaitEvents::Record(CommandBufferAccessContext *cb_context) const {
5437 const auto tag = cb_context->NextCommandTag(cmd_);
John Zulaufd5115702021-01-18 12:34:33 -07005438 auto *access_context = cb_context->GetCurrentAccessContext();
5439 assert(access_context);
5440 if (!access_context) return;
John Zulauf669dfd52021-01-27 17:15:28 -07005441 auto *events_context = cb_context->GetCurrentEventsContext();
5442 assert(events_context);
5443 if (!events_context) return;
John Zulaufd5115702021-01-18 12:34:33 -07005444
5445 // Unlike PipelineBarrier, WaitEvent is *not* limited to accesses within the current subpass (if any) and thus needs to import
5446 // all accesses. Can instead import for all first_scopes, or a union of them, if this becomes a performance/memory issue,
5447 // but with no idea of the performance of the union, nor of whether it even matters... take the simplest approach here,
5448 access_context->ResolvePreviousAccesses();
5449
John Zulaufd5115702021-01-18 12:34:33 -07005450 // TODO... this needs change the SyncEventContext it's using depending on whether this is replay... the recorded
5451 // sync_event will be in the recorded context, but we need to update the sync_events in the current context....
John Zulauf4edde622021-02-15 08:54:50 -07005452 size_t barrier_set_index = 0;
5453 size_t barrier_set_incr = (barriers_.size() == 1) ? 0 : 1;
5454 assert(barriers_.size() == 1 || (barriers_.size() == events_.size()));
John Zulauf669dfd52021-01-27 17:15:28 -07005455 for (auto &event_shared : events_) {
5456 if (!event_shared.get()) continue;
5457 auto *sync_event = events_context->GetFromShared(event_shared);
John Zulaufd5115702021-01-18 12:34:33 -07005458
John Zulauf4edde622021-02-15 08:54:50 -07005459 sync_event->last_command = cmd_;
John Zulaufd5115702021-01-18 12:34:33 -07005460
John Zulauf4edde622021-02-15 08:54:50 -07005461 const auto &barrier_set = barriers_[barrier_set_index];
5462 const auto &dst = barrier_set.dst_exec_scope;
5463 if (!sync_event->IsIgnoredByWait(cmd_, barrier_set.src_exec_scope.mask_param)) {
John Zulaufd5115702021-01-18 12:34:33 -07005464 // These apply barriers one at a time as the are restricted to the resource ranges specified per each barrier,
5465 // but do not update the dependency chain information (but set the "pending" state) // s.t. the order independence
5466 // of the barriers is maintained.
5467 SyncOpWaitEventsFunctorFactory factory(sync_event);
John Zulauf4edde622021-02-15 08:54:50 -07005468 ApplyBarriers(barrier_set.buffer_memory_barriers, factory, tag, access_context);
5469 ApplyBarriers(barrier_set.image_memory_barriers, factory, tag, access_context);
5470 ApplyGlobalBarriers(barrier_set.memory_barriers, factory, tag, access_context);
John Zulaufd5115702021-01-18 12:34:33 -07005471
5472 // Apply the global barrier to the event itself (for race condition tracking)
5473 // Events don't happen at a stage, so we need to store the unexpanded ALL_COMMANDS if set for inter-event-calls
5474 sync_event->barriers = dst.mask_param & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
5475 sync_event->barriers |= dst.exec_scope;
5476 } else {
5477 // We ignored this wait, so we don't have any effective synchronization barriers for it.
5478 sync_event->barriers = 0U;
5479 }
John Zulauf4edde622021-02-15 08:54:50 -07005480 barrier_set_index += barrier_set_incr;
John Zulaufd5115702021-01-18 12:34:33 -07005481 }
5482
5483 // Apply the pending barriers
5484 ResolvePendingBarrierFunctor apply_pending_action(tag);
5485 access_context->ApplyToContext(apply_pending_action);
5486}
5487
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005488bool SyncValidator::PreCallValidateCmdWriteBufferMarker2AMD(VkCommandBuffer commandBuffer, VkPipelineStageFlags2KHR pipelineStage,
5489 VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) const {
5490 bool skip = false;
5491 const auto *cb_access_context = GetAccessContext(commandBuffer);
5492 assert(cb_access_context);
5493 if (!cb_access_context) return skip;
5494
5495 const auto *context = cb_access_context->GetCurrentAccessContext();
5496 assert(context);
5497 if (!context) return skip;
5498
5499 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
5500
5501 if (dst_buffer) {
5502 const ResourceAccessRange range = MakeRange(dstOffset, 4);
5503 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
5504 if (hazard.hazard) {
5505 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
5506 "vkCmdWriteBufferMarkerAMD2: Hazard %s for dstBuffer %s. Access info %s.",
5507 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstBuffer).c_str(),
5508 string_UsageTag(hazard.tag).c_str());
5509 }
5510 }
5511 return skip;
5512}
5513
John Zulauf669dfd52021-01-27 17:15:28 -07005514void SyncOpWaitEvents::MakeEventsList(const SyncValidator &sync_state, uint32_t event_count, const VkEvent *events) {
John Zulaufd5115702021-01-18 12:34:33 -07005515 events_.reserve(event_count);
5516 for (uint32_t event_index = 0; event_index < event_count; event_index++) {
John Zulauf669dfd52021-01-27 17:15:28 -07005517 events_.emplace_back(sync_state.GetShared<EVENT_STATE>(events[event_index]));
John Zulaufd5115702021-01-18 12:34:33 -07005518 }
5519}
John Zulauf6ce24372021-01-30 05:56:25 -07005520
John Zulauf36ef9282021-02-02 11:47:24 -07005521SyncOpResetEvent::SyncOpResetEvent(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags, VkEvent event,
John Zulauf4edde622021-02-15 08:54:50 -07005522 VkPipelineStageFlags2KHR stageMask)
John Zulauf36ef9282021-02-02 11:47:24 -07005523 : SyncOpBase(cmd),
5524 event_(sync_state.GetShared<EVENT_STATE>(event)),
5525 exec_scope_(SyncExecScope::MakeSrc(queue_flags, stageMask)) {}
John Zulauf6ce24372021-01-30 05:56:25 -07005526
5527bool SyncOpResetEvent::Validate(const CommandBufferAccessContext &cb_context) const {
John Zulauf6ce24372021-01-30 05:56:25 -07005528 auto *events_context = cb_context.GetCurrentEventsContext();
5529 assert(events_context);
5530 bool skip = false;
5531 if (!events_context) return skip;
5532
5533 const auto &sync_state = cb_context.GetSyncState();
5534 const auto *sync_event = events_context->Get(event_);
5535 if (!sync_event) return skip; // Core, Lifetimes, or Param check needs to catch invalid events.
5536
5537 const char *const set_wait =
5538 "%s: %s %s operation following %s without intervening execution barrier, is a race condition and may result in data "
5539 "hazards.";
5540 const char *message = set_wait; // Only one message this call.
5541 if (!sync_event->HasBarrier(exec_scope_.mask_param, exec_scope_.exec_scope)) {
5542 const char *vuid = nullptr;
5543 switch (sync_event->last_command) {
5544 case CMD_SETEVENT:
John Zulauf4edde622021-02-15 08:54:50 -07005545 case CMD_SETEVENT2KHR:
John Zulauf6ce24372021-01-30 05:56:25 -07005546 // Needs a barrier between set and reset
5547 vuid = "SYNC-vkCmdResetEvent-missingbarrier-set";
5548 break;
John Zulauf4edde622021-02-15 08:54:50 -07005549 case CMD_WAITEVENTS:
5550 case CMD_WAITEVENTS2KHR: {
John Zulauf6ce24372021-01-30 05:56:25 -07005551 // Needs to be in the barriers chain (either because of a barrier, or because of dstStageMask
5552 vuid = "SYNC-vkCmdResetEvent-missingbarrier-wait";
5553 break;
5554 }
5555 default:
5556 // The only other valid last command that wasn't one.
John Zulauf4edde622021-02-15 08:54:50 -07005557 assert((sync_event->last_command == CMD_NONE) || (sync_event->last_command == CMD_RESETEVENT) ||
5558 (sync_event->last_command == CMD_RESETEVENT2KHR));
John Zulauf6ce24372021-01-30 05:56:25 -07005559 break;
5560 }
5561 if (vuid) {
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06005562 skip |= sync_state.LogError(event_->event(), vuid, message, CmdName(),
5563 sync_state.report_data->FormatHandle(event_->event()).c_str(), CmdName(),
John Zulauf6ce24372021-01-30 05:56:25 -07005564 CommandTypeString(sync_event->last_command));
5565 }
5566 }
5567 return skip;
5568}
5569
John Zulauf36ef9282021-02-02 11:47:24 -07005570void SyncOpResetEvent::Record(CommandBufferAccessContext *cb_context) const {
John Zulauf6ce24372021-01-30 05:56:25 -07005571 auto *events_context = cb_context->GetCurrentEventsContext();
5572 assert(events_context);
5573 if (!events_context) return;
5574
5575 auto *sync_event = events_context->GetFromShared(event_);
5576 if (!sync_event) return; // Core, Lifetimes, or Param check needs to catch invalid events.
5577
5578 // Update the event state
John Zulauf36ef9282021-02-02 11:47:24 -07005579 sync_event->last_command = cmd_;
John Zulauf6ce24372021-01-30 05:56:25 -07005580 sync_event->unsynchronized_set = CMD_NONE;
5581 sync_event->ResetFirstScope();
5582 sync_event->barriers = 0U;
5583}
5584
John Zulauf36ef9282021-02-02 11:47:24 -07005585SyncOpSetEvent::SyncOpSetEvent(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags, VkEvent event,
John Zulauf4edde622021-02-15 08:54:50 -07005586 VkPipelineStageFlags2KHR stageMask)
John Zulauf36ef9282021-02-02 11:47:24 -07005587 : SyncOpBase(cmd),
5588 event_(sync_state.GetShared<EVENT_STATE>(event)),
John Zulauf4edde622021-02-15 08:54:50 -07005589 src_exec_scope_(SyncExecScope::MakeSrc(queue_flags, stageMask)),
5590 dep_info_() {}
5591
5592SyncOpSetEvent::SyncOpSetEvent(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags, VkEvent event,
5593 const VkDependencyInfoKHR &dep_info)
5594 : SyncOpBase(cmd),
5595 event_(sync_state.GetShared<EVENT_STATE>(event)),
5596 src_exec_scope_(SyncExecScope::MakeSrc(queue_flags, sync_utils::GetGlobalStageMasks(dep_info).src)),
5597 dep_info_(new safe_VkDependencyInfoKHR(&dep_info)) {}
John Zulauf6ce24372021-01-30 05:56:25 -07005598
5599bool SyncOpSetEvent::Validate(const CommandBufferAccessContext &cb_context) const {
5600 // I'll put this here just in case we need to pass this in for future extension support
John Zulauf6ce24372021-01-30 05:56:25 -07005601 bool skip = false;
5602
5603 const auto &sync_state = cb_context.GetSyncState();
5604 auto *events_context = cb_context.GetCurrentEventsContext();
5605 assert(events_context);
5606 if (!events_context) return skip;
5607
5608 const auto *sync_event = events_context->Get(event_);
5609 if (!sync_event) return skip; // Core, Lifetimes, or Param check needs to catch invalid events.
5610
5611 const char *const reset_set =
5612 "%s: %s %s operation following %s without intervening execution barrier, is a race condition and may result in data "
5613 "hazards.";
5614 const char *const wait =
5615 "%s: %s %s operation following %s without intervening vkCmdResetEvent, may result in data hazard and is ignored.";
5616
5617 if (!sync_event->HasBarrier(src_exec_scope_.mask_param, src_exec_scope_.exec_scope)) {
John Zulauf4edde622021-02-15 08:54:50 -07005618 const char *vuid_stem = nullptr;
John Zulauf6ce24372021-01-30 05:56:25 -07005619 const char *message = nullptr;
5620 switch (sync_event->last_command) {
5621 case CMD_RESETEVENT:
John Zulauf4edde622021-02-15 08:54:50 -07005622 case CMD_RESETEVENT2KHR:
John Zulauf6ce24372021-01-30 05:56:25 -07005623 // Needs a barrier between reset and set
John Zulauf4edde622021-02-15 08:54:50 -07005624 vuid_stem = "-missingbarrier-reset";
John Zulauf6ce24372021-01-30 05:56:25 -07005625 message = reset_set;
5626 break;
5627 case CMD_SETEVENT:
John Zulauf4edde622021-02-15 08:54:50 -07005628 case CMD_SETEVENT2KHR:
John Zulauf6ce24372021-01-30 05:56:25 -07005629 // Needs a barrier between set and set
John Zulauf4edde622021-02-15 08:54:50 -07005630 vuid_stem = "-missingbarrier-set";
John Zulauf6ce24372021-01-30 05:56:25 -07005631 message = reset_set;
5632 break;
5633 case CMD_WAITEVENTS:
John Zulauf4edde622021-02-15 08:54:50 -07005634 case CMD_WAITEVENTS2KHR:
John Zulauf6ce24372021-01-30 05:56:25 -07005635 // Needs a barrier or is in second execution scope
John Zulauf4edde622021-02-15 08:54:50 -07005636 vuid_stem = "-missingbarrier-wait";
John Zulauf6ce24372021-01-30 05:56:25 -07005637 message = wait;
5638 break;
5639 default:
5640 // The only other valid last command that wasn't one.
5641 assert(sync_event->last_command == CMD_NONE);
5642 break;
5643 }
John Zulauf4edde622021-02-15 08:54:50 -07005644 if (vuid_stem) {
John Zulauf6ce24372021-01-30 05:56:25 -07005645 assert(nullptr != message);
John Zulauf4edde622021-02-15 08:54:50 -07005646 std::string vuid("SYNC-");
5647 vuid.append(CmdName()).append(vuid_stem);
Jeremy Gebben14b0d1a2021-05-15 20:15:41 -06005648 skip |= sync_state.LogError(event_->event(), vuid.c_str(), message, CmdName(),
5649 sync_state.report_data->FormatHandle(event_->event()).c_str(), CmdName(),
John Zulauf6ce24372021-01-30 05:56:25 -07005650 CommandTypeString(sync_event->last_command));
5651 }
5652 }
5653
5654 return skip;
5655}
5656
John Zulauf36ef9282021-02-02 11:47:24 -07005657void SyncOpSetEvent::Record(CommandBufferAccessContext *cb_context) const {
5658 const auto tag = cb_context->NextCommandTag(cmd_);
John Zulauf6ce24372021-01-30 05:56:25 -07005659 auto *events_context = cb_context->GetCurrentEventsContext();
5660 auto *access_context = cb_context->GetCurrentAccessContext();
5661 assert(events_context);
5662 if (!events_context) return;
5663
5664 auto *sync_event = events_context->GetFromShared(event_);
5665 if (!sync_event) return; // Core, Lifetimes, or Param check needs to catch invalid events.
5666
5667 // NOTE: We're going to simply record the sync scope here, as anything else would be implementation defined/undefined
5668 // and we're issuing errors re: missing barriers between event commands, which if the user fixes would fix
5669 // any issues caused by naive scope setting here.
5670
5671 // What happens with two SetEvent is that one cannot know what group of operations will be waited for.
5672 // Given:
5673 // Stuff1; SetEvent; Stuff2; SetEvent; WaitEvents;
5674 // WaitEvents cannot know which of Stuff1, Stuff2, or both has completed execution.
5675
5676 if (!sync_event->HasBarrier(src_exec_scope_.mask_param, src_exec_scope_.exec_scope)) {
5677 sync_event->unsynchronized_set = sync_event->last_command;
5678 sync_event->ResetFirstScope();
5679 } else if (sync_event->scope.exec_scope == 0) {
5680 // We only set the scope if there isn't one
5681 sync_event->scope = src_exec_scope_;
5682
5683 auto set_scope = [&sync_event](AccessAddressType address_type, const ResourceAccessRangeMap::value_type &access) {
5684 auto &scope_map = sync_event->first_scope[static_cast<size_t>(address_type)];
5685 if (access.second.InSourceScopeOrChain(sync_event->scope.exec_scope, sync_event->scope.valid_accesses)) {
5686 scope_map.insert(scope_map.end(), std::make_pair(access.first, true));
5687 }
5688 };
5689 access_context->ForAll(set_scope);
5690 sync_event->unsynchronized_set = CMD_NONE;
5691 sync_event->first_scope_tag = tag;
5692 }
John Zulauf4edde622021-02-15 08:54:50 -07005693 // TODO: Store dep_info_ shared ptr in sync_state for WaitEvents2 validation
5694 sync_event->last_command = cmd_;
John Zulauf6ce24372021-01-30 05:56:25 -07005695 sync_event->barriers = 0U;
5696}
John Zulauf64ffe552021-02-06 10:25:07 -07005697
5698SyncOpBeginRenderPass::SyncOpBeginRenderPass(CMD_TYPE cmd, const SyncValidator &sync_state,
5699 const VkRenderPassBeginInfo *pRenderPassBegin,
5700 const VkSubpassBeginInfo *pSubpassBeginInfo, const char *cmd_name)
5701 : SyncOpBase(cmd, cmd_name) {
5702 if (pRenderPassBegin) {
5703 rp_state_ = sync_state.GetShared<RENDER_PASS_STATE>(pRenderPassBegin->renderPass);
5704 renderpass_begin_info_ = safe_VkRenderPassBeginInfo(pRenderPassBegin);
5705 const auto *fb_state = sync_state.Get<FRAMEBUFFER_STATE>(pRenderPassBegin->framebuffer);
5706 if (fb_state) {
5707 shared_attachments_ = sync_state.GetSharedAttachmentViews(*renderpass_begin_info_.ptr(), *fb_state);
5708 // TODO: Revisit this when all attachment validation is through SyncOps to see if we can discard the plain pointer copy
5709 // Note that this a safe to presist as long as shared_attachments is not cleared
5710 attachments_.reserve(shared_attachments_.size());
sfricke-samsung01c9ae92021-02-09 22:30:52 -08005711 for (const auto &attachment : shared_attachments_) {
John Zulauf64ffe552021-02-06 10:25:07 -07005712 attachments_.emplace_back(attachment.get());
5713 }
5714 }
5715 if (pSubpassBeginInfo) {
5716 subpass_begin_info_ = safe_VkSubpassBeginInfo(pSubpassBeginInfo);
5717 }
5718 }
5719}
5720
5721bool SyncOpBeginRenderPass::Validate(const CommandBufferAccessContext &cb_context) const {
5722 // Check if any of the layout transitions are hazardous.... but we don't have the renderpass context to work with, so we
5723 bool skip = false;
5724
5725 assert(rp_state_.get());
5726 if (nullptr == rp_state_.get()) return skip;
5727 auto &rp_state = *rp_state_.get();
5728
5729 const uint32_t subpass = 0;
5730
5731 // Construct the state we can use to validate against... (since validation is const and RecordCmdBeginRenderPass
5732 // hasn't happened yet)
5733 const std::vector<AccessContext> empty_context_vector;
5734 AccessContext temp_context(subpass, cb_context.GetQueueFlags(), rp_state.subpass_dependencies, empty_context_vector,
5735 cb_context.GetCurrentAccessContext());
5736
5737 // Validate attachment operations
5738 if (attachments_.size() == 0) return skip;
5739 const auto &render_area = renderpass_begin_info_.renderArea;
John Zulaufd0ec59f2021-03-13 14:25:08 -07005740
5741 // Since the isn't a valid RenderPassAccessContext until Record, needs to create the view/generator list... we could limit this
5742 // by predicating on whether subpass 0 uses the attachment if it is too expensive to create the full list redundantly here.
5743 // More broadly we could look at thread specific state shared between Validate and Record as is done for other heavyweight
5744 // operations (though it's currently a messy approach)
5745 AttachmentViewGenVector view_gens = RenderPassAccessContext::CreateAttachmentViewGen(render_area, attachments_);
5746 skip |= temp_context.ValidateLayoutTransitions(cb_context, rp_state, render_area, subpass, view_gens, CmdName());
John Zulauf64ffe552021-02-06 10:25:07 -07005747
5748 // Validate load operations if there were no layout transition hazards
5749 if (!skip) {
John Zulaufd0ec59f2021-03-13 14:25:08 -07005750 temp_context.RecordLayoutTransitions(rp_state, subpass, view_gens, kCurrentCommandTag);
5751 skip |= temp_context.ValidateLoadOperation(cb_context, rp_state, render_area, subpass, view_gens, CmdName());
John Zulauf64ffe552021-02-06 10:25:07 -07005752 }
5753
5754 return skip;
5755}
5756
5757void SyncOpBeginRenderPass::Record(CommandBufferAccessContext *cb_context) const {
5758 // TODO PHASE2 need to have a consistent way to record to either command buffer or queue contexts
5759 assert(rp_state_.get());
5760 if (nullptr == rp_state_.get()) return;
5761 const auto tag = cb_context->NextCommandTag(cmd_);
5762 cb_context->RecordBeginRenderPass(*rp_state_.get(), renderpass_begin_info_.renderArea, attachments_, tag);
5763}
5764
5765SyncOpNextSubpass::SyncOpNextSubpass(CMD_TYPE cmd, const SyncValidator &sync_state, const VkSubpassBeginInfo *pSubpassBeginInfo,
5766 const VkSubpassEndInfo *pSubpassEndInfo, const char *name_override)
5767 : SyncOpBase(cmd, name_override) {
5768 if (pSubpassBeginInfo) {
5769 subpass_begin_info_.initialize(pSubpassBeginInfo);
5770 }
5771 if (pSubpassEndInfo) {
5772 subpass_end_info_.initialize(pSubpassEndInfo);
5773 }
5774}
5775
5776bool SyncOpNextSubpass::Validate(const CommandBufferAccessContext &cb_context) const {
5777 bool skip = false;
5778 const auto *renderpass_context = cb_context.GetCurrentRenderPassContext();
5779 if (!renderpass_context) return skip;
5780
5781 skip |= renderpass_context->ValidateNextSubpass(cb_context.GetExecutionContext(), CmdName());
5782 return skip;
5783}
5784
5785void SyncOpNextSubpass::Record(CommandBufferAccessContext *cb_context) const {
5786 // TODO PHASE2 need to have a consistent way to record to either command buffer or queue contexts
5787 cb_context->RecordNextSubpass(cmd_);
5788}
5789
5790SyncOpEndRenderPass::SyncOpEndRenderPass(CMD_TYPE cmd, const SyncValidator &sync_state, const VkSubpassEndInfo *pSubpassEndInfo,
5791 const char *name_override)
5792 : SyncOpBase(cmd, name_override) {
5793 if (pSubpassEndInfo) {
5794 subpass_end_info_.initialize(pSubpassEndInfo);
5795 }
5796}
5797
5798bool SyncOpEndRenderPass::Validate(const CommandBufferAccessContext &cb_context) const {
5799 bool skip = false;
5800 const auto *renderpass_context = cb_context.GetCurrentRenderPassContext();
5801
5802 if (!renderpass_context) return skip;
5803 skip |= renderpass_context->ValidateEndRenderPass(cb_context.GetExecutionContext(), CmdName());
5804 return skip;
5805}
5806
5807void SyncOpEndRenderPass::Record(CommandBufferAccessContext *cb_context) const {
5808 // TODO PHASE2 need to have a consistent way to record to either command buffer or queue contexts
5809 cb_context->RecordEndRenderPass(cmd_);
5810}
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005811
5812void SyncValidator::PreCallRecordCmdWriteBufferMarker2AMD(VkCommandBuffer commandBuffer, VkPipelineStageFlags2KHR pipelineStage,
5813 VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) {
5814 StateTracker::PreCallRecordCmdWriteBufferMarker2AMD(commandBuffer, pipelineStage, dstBuffer, dstOffset, marker);
5815 auto *cb_access_context = GetAccessContext(commandBuffer);
5816 assert(cb_access_context);
5817 const auto tag = cb_access_context->NextCommandTag(CMD_WRITEBUFFERMARKERAMD);
5818 auto *context = cb_access_context->GetCurrentAccessContext();
5819 assert(context);
5820
5821 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
5822
5823 if (dst_buffer) {
5824 const ResourceAccessRange range = MakeRange(dstOffset, 4);
5825 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
5826 }
5827}
John Zulaufd05c5842021-03-26 11:32:16 -06005828
John Zulaufd0ec59f2021-03-13 14:25:08 -07005829AttachmentViewGen::AttachmentViewGen(const IMAGE_VIEW_STATE *view, const VkOffset3D &offset, const VkExtent3D &extent)
5830 : view_(view), view_mask_(), gen_store_() {
5831 if (!view_ || !view_->image_state || !SimpleBinding(*view_->image_state)) return;
5832 const IMAGE_STATE &image_state = *view_->image_state.get();
5833 const auto base_address = ResourceBaseAddress(image_state);
5834 const auto *encoder = image_state.fragment_encoder.get();
5835 if (!encoder) return;
5836 const VkOffset3D zero_offset = {0, 0, 0};
5837 const VkExtent3D &image_extent = image_state.createInfo.extent;
5838 // Intentional copy
5839 VkImageSubresourceRange subres_range = view_->normalized_subresource_range;
5840 view_mask_ = subres_range.aspectMask;
5841 gen_store_[Gen::kViewSubresource].emplace(*encoder, subres_range, zero_offset, image_extent, base_address);
5842 gen_store_[Gen::kRenderArea].emplace(*encoder, subres_range, offset, extent, base_address);
5843
5844 const auto depth = view_mask_ & VK_IMAGE_ASPECT_DEPTH_BIT;
5845 if (depth && (depth != view_mask_)) {
5846 subres_range.aspectMask = depth;
5847 gen_store_[Gen::kDepthOnlyRenderArea].emplace(*encoder, subres_range, offset, extent, base_address);
5848 }
5849 const auto stencil = view_mask_ & VK_IMAGE_ASPECT_STENCIL_BIT;
5850 if (stencil && (stencil != view_mask_)) {
5851 subres_range.aspectMask = stencil;
5852 gen_store_[Gen::kStencilOnlyRenderArea].emplace(*encoder, subres_range, offset, extent, base_address);
5853 }
5854}
5855
5856const ImageRangeGen *AttachmentViewGen::GetRangeGen(AttachmentViewGen::Gen gen_type) const {
5857 const ImageRangeGen *got = nullptr;
5858 switch (gen_type) {
5859 case kViewSubresource:
5860 got = &gen_store_[kViewSubresource];
5861 break;
5862 case kRenderArea:
5863 got = &gen_store_[kRenderArea];
5864 break;
5865 case kDepthOnlyRenderArea:
5866 got =
5867 (view_mask_ == VK_IMAGE_ASPECT_DEPTH_BIT) ? &gen_store_[Gen::kRenderArea] : &gen_store_[Gen::kDepthOnlyRenderArea];
5868 break;
5869 case kStencilOnlyRenderArea:
5870 got = (view_mask_ == VK_IMAGE_ASPECT_STENCIL_BIT) ? &gen_store_[Gen::kRenderArea]
5871 : &gen_store_[Gen::kStencilOnlyRenderArea];
5872 break;
5873 default:
5874 assert(got);
5875 }
5876 return got;
5877}
5878
5879AttachmentViewGen::Gen AttachmentViewGen::GetDepthStencilRenderAreaGenType(bool depth_op, bool stencil_op) const {
5880 assert(IsValid());
5881 assert(view_mask_ & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT));
5882 if (depth_op) {
5883 assert(view_mask_ & VK_IMAGE_ASPECT_DEPTH_BIT);
5884 if (stencil_op) {
5885 assert(view_mask_ & VK_IMAGE_ASPECT_STENCIL_BIT);
5886 return kRenderArea;
5887 }
5888 return kDepthOnlyRenderArea;
5889 }
5890 if (stencil_op) {
5891 assert(view_mask_ & VK_IMAGE_ASPECT_STENCIL_BIT);
5892 return kStencilOnlyRenderArea;
5893 }
5894
5895 assert(depth_op || stencil_op);
5896 return kRenderArea;
5897}
5898
5899AccessAddressType AttachmentViewGen::GetAddressType() const { return AccessContext::ImageAddressType(*view_->image_state); }