blob: 2678f75705f54d3ac1532277d1cdf2685c4993a9 [file] [log] [blame]
John Zulaufab7756b2020-12-29 16:10:16 -07001/* Copyright (c) 2019-2021 The Khronos Group Inc.
2 * Copyright (c) 2019-2021 Valve Corporation
3 * Copyright (c) 2019-2021 LunarG, Inc.
John Zulauf9cb530d2019-09-30 14:14:10 -06004 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 * Author: John Zulauf <jzulauf@lunarg.com>
John Zulaufab7756b2020-12-29 16:10:16 -070018 * Author: Locke Lin <locke@lunarg.com>
19 * Author: Jeremy Gebben <jeremyg@lunarg.com>
John Zulauf9cb530d2019-09-30 14:14:10 -060020 */
21
22#include <limits>
23#include <vector>
locke-lunarg296a3c92020-03-25 01:04:29 -060024#include <memory>
25#include <bitset>
John Zulauf9cb530d2019-09-30 14:14:10 -060026#include "synchronization_validation.h"
Jeremy Gebben5f585ae2021-02-02 09:03:06 -070027#include "sync_utils.h"
John Zulauf9cb530d2019-09-30 14:14:10 -060028
John Zulaufd05c5842021-03-26 11:32:16 -060029#ifdef SYNCVAL_DIAGNOSTICS
30struct SyncDiagnostics {
31 void DebugAction() const {
32#if defined(_WIN32)
33 __debugbreak();
34#endif
35 }
36 void Detect(const ResourceAccessRange &range) {
37 std::lock_guard<std::mutex> lock(diag_mutex);
38 if (range.distance() == kConditionValue) {
39 ++condition;
40 DebugAction();
41 }
42 detect_histogram[range.distance()] += 1;
43 }
44 void InstanceDump(VkInstance instance) {
45 std::cout << "# instance handle\n" << instance << "\n";
46 std::cout << "# condition count\n" << condition << "\n";
47 std::cout << "# Detection Size Histogram\n";
48 for (const auto &entry : detect_histogram) {
49 std::cout << "{ " << entry.first << ", " << entry.second << "}\n";
50 }
51 std::cout << std::endl;
52 detect_histogram.clear();
53 }
54 std::map<ResourceAccessRange::index_type, size_t> detect_histogram;
55 uint64_t condition;
56 std::mutex diag_mutex;
57 static const ResourceAccessRangeIndex kConditionValue = ~ResourceAccessRangeIndex(0);
58};
59static SyncDiagnostics sync_diagnostics;
60#endif
61
John Zulauf264cce02021-02-05 14:40:47 -070062static bool SimpleBinding(const BINDABLE &bindable) { return !bindable.sparse && bindable.binding.mem_state; }
63
John Zulauf29d00532021-03-04 13:28:54 -070064static bool SimpleBinding(const IMAGE_STATE &image_state) {
65 bool simple = SimpleBinding(static_cast<const BINDABLE &>(image_state)) || image_state.is_swapchain_image ||
66 (VK_NULL_HANDLE != image_state.bind_swapchain);
67
68 // If it's not simple we must have an encoder.
69 assert(!simple || image_state.fragment_encoder.get());
70 return simple;
71}
72
John Zulauf43cc7462020-12-03 12:33:12 -070073const static std::array<AccessAddressType, static_cast<size_t>(AccessAddressType::kTypeCount)> kAddressTypes = {
74 AccessAddressType::kLinear, AccessAddressType::kIdealized};
75
John Zulaufd5115702021-01-18 12:34:33 -070076static constexpr AccessAddressType GetAccessAddressType(const BUFFER_STATE &) { return AccessAddressType::kLinear; };
John Zulauf264cce02021-02-05 14:40:47 -070077static AccessAddressType GetAccessAddressType(const IMAGE_STATE &image) {
78 return SimpleBinding(image) ? AccessContext::ImageAddressType(image) : AccessAddressType::kIdealized;
79}
John Zulaufd5115702021-01-18 12:34:33 -070080
John Zulauf9cb530d2019-09-30 14:14:10 -060081static const char *string_SyncHazardVUID(SyncHazard hazard) {
82 switch (hazard) {
83 case SyncHazard::NONE:
John Zulauf2f952d22020-02-10 11:34:51 -070084 return "SYNC-HAZARD-NONE";
John Zulauf9cb530d2019-09-30 14:14:10 -060085 break;
86 case SyncHazard::READ_AFTER_WRITE:
87 return "SYNC-HAZARD-READ_AFTER_WRITE";
88 break;
89 case SyncHazard::WRITE_AFTER_READ:
90 return "SYNC-HAZARD-WRITE_AFTER_READ";
91 break;
92 case SyncHazard::WRITE_AFTER_WRITE:
93 return "SYNC-HAZARD-WRITE_AFTER_WRITE";
94 break;
John Zulauf2f952d22020-02-10 11:34:51 -070095 case SyncHazard::READ_RACING_WRITE:
96 return "SYNC-HAZARD-READ-RACING-WRITE";
97 break;
98 case SyncHazard::WRITE_RACING_WRITE:
99 return "SYNC-HAZARD-WRITE-RACING-WRITE";
100 break;
101 case SyncHazard::WRITE_RACING_READ:
102 return "SYNC-HAZARD-WRITE-RACING-READ";
103 break;
John Zulauf9cb530d2019-09-30 14:14:10 -0600104 default:
105 assert(0);
106 }
107 return "SYNC-HAZARD-INVALID";
108}
109
John Zulauf59e25072020-07-17 10:55:21 -0600110static bool IsHazardVsRead(SyncHazard hazard) {
111 switch (hazard) {
112 case SyncHazard::NONE:
113 return false;
114 break;
115 case SyncHazard::READ_AFTER_WRITE:
116 return false;
117 break;
118 case SyncHazard::WRITE_AFTER_READ:
119 return true;
120 break;
121 case SyncHazard::WRITE_AFTER_WRITE:
122 return false;
123 break;
124 case SyncHazard::READ_RACING_WRITE:
125 return false;
126 break;
127 case SyncHazard::WRITE_RACING_WRITE:
128 return false;
129 break;
130 case SyncHazard::WRITE_RACING_READ:
131 return true;
132 break;
133 default:
134 assert(0);
135 }
136 return false;
137}
138
John Zulauf9cb530d2019-09-30 14:14:10 -0600139static const char *string_SyncHazard(SyncHazard hazard) {
140 switch (hazard) {
141 case SyncHazard::NONE:
142 return "NONR";
143 break;
144 case SyncHazard::READ_AFTER_WRITE:
145 return "READ_AFTER_WRITE";
146 break;
147 case SyncHazard::WRITE_AFTER_READ:
148 return "WRITE_AFTER_READ";
149 break;
150 case SyncHazard::WRITE_AFTER_WRITE:
151 return "WRITE_AFTER_WRITE";
152 break;
John Zulauf2f952d22020-02-10 11:34:51 -0700153 case SyncHazard::READ_RACING_WRITE:
154 return "READ_RACING_WRITE";
155 break;
156 case SyncHazard::WRITE_RACING_WRITE:
157 return "WRITE_RACING_WRITE";
158 break;
159 case SyncHazard::WRITE_RACING_READ:
160 return "WRITE_RACING_READ";
161 break;
John Zulauf9cb530d2019-09-30 14:14:10 -0600162 default:
163 assert(0);
164 }
165 return "INVALID HAZARD";
166}
167
John Zulauf37ceaed2020-07-03 16:18:15 -0600168static const SyncStageAccessInfoType *SyncStageAccessInfoFromMask(SyncStageAccessFlags flags) {
169 // Return the info for the first bit found
170 const SyncStageAccessInfoType *info = nullptr;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700171 for (size_t i = 0; i < flags.size(); i++) {
172 if (flags.test(i)) {
173 info = &syncStageAccessInfoByStageAccessIndex[i];
174 break;
John Zulauf37ceaed2020-07-03 16:18:15 -0600175 }
176 }
177 return info;
178}
179
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700180static std::string string_SyncStageAccessFlags(const SyncStageAccessFlags &flags, const char *sep = "|") {
John Zulauf59e25072020-07-17 10:55:21 -0600181 std::string out_str;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700182 if (flags.none()) {
John Zulauf389c34b2020-07-28 11:19:35 -0600183 out_str = "0";
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700184 } else {
185 for (size_t i = 0; i < syncStageAccessInfoByStageAccessIndex.size(); i++) {
186 const auto &info = syncStageAccessInfoByStageAccessIndex[i];
187 if ((flags & info.stage_access_bit).any()) {
188 if (!out_str.empty()) {
189 out_str.append(sep);
190 }
191 out_str.append(info.name);
John Zulauf59e25072020-07-17 10:55:21 -0600192 }
John Zulauf59e25072020-07-17 10:55:21 -0600193 }
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700194 if (out_str.length() == 0) {
195 out_str.append("Unhandled SyncStageAccess");
196 }
John Zulauf59e25072020-07-17 10:55:21 -0600197 }
198 return out_str;
199}
200
Jeremy Gebben6ea9d9e2020-12-11 09:41:01 -0700201static std::string string_UsageTag(const ResourceUsageTag &tag) {
202 std::stringstream out;
203
John Zulauffaea0ee2021-01-14 14:01:32 -0700204 out << "command: " << CommandTypeString(tag.command);
205 out << ", seq_no: " << tag.seq_num;
206 if (tag.sub_command != 0) {
207 out << ", subcmd: " << tag.sub_command;
Jeremy Gebben6ea9d9e2020-12-11 09:41:01 -0700208 }
209 return out.str();
210}
211
John Zulauffaea0ee2021-01-14 14:01:32 -0700212std::string CommandBufferAccessContext::FormatUsage(const HazardResult &hazard) const {
John Zulauf37ceaed2020-07-03 16:18:15 -0600213 const auto &tag = hazard.tag;
John Zulauf59e25072020-07-17 10:55:21 -0600214 assert(hazard.usage_index < static_cast<SyncStageAccessIndex>(syncStageAccessInfoByStageAccessIndex.size()));
215 const auto &usage_info = syncStageAccessInfoByStageAccessIndex[hazard.usage_index];
John Zulauf1dae9192020-06-16 15:46:44 -0600216 std::stringstream out;
John Zulauf37ceaed2020-07-03 16:18:15 -0600217 const auto *info = SyncStageAccessInfoFromMask(hazard.prior_access);
218 const char *stage_access_name = info ? info->name : "INVALID_STAGE_ACCESS";
John Zulauf59e25072020-07-17 10:55:21 -0600219 out << "(usage: " << usage_info.name << ", prior_usage: " << stage_access_name;
220 if (IsHazardVsRead(hazard.hazard)) {
221 const auto barriers = hazard.access_state->GetReadBarriers(hazard.prior_access);
Jeremy Gebben40a22942020-12-22 14:22:06 -0700222 out << ", read_barriers: " << string_VkPipelineStageFlags2KHR(barriers);
John Zulauf59e25072020-07-17 10:55:21 -0600223 } else {
224 SyncStageAccessFlags write_barrier = hazard.access_state->GetWriteBarriers();
225 out << ", write_barriers: " << string_SyncStageAccessFlags(write_barrier);
226 }
227
John Zulauffaea0ee2021-01-14 14:01:32 -0700228 // PHASE2 TODO -- add comand buffer and reset from secondary if applicable
ZaOniRinku56b86472021-03-23 20:25:05 +0100229 out << ", " << string_UsageTag(tag) << ", reset_no: " << reset_count_ << ")";
John Zulauf1dae9192020-06-16 15:46:44 -0600230 return out.str();
231}
232
John Zulaufd14743a2020-07-03 09:42:39 -0600233// NOTE: the attachement read flag is put *only* in the access scope and not in the exect scope, since the ordering
234// rules apply only to this specific access for this stage, and not the stage as a whole. The ordering detection
235// also reflects this special case for read hazard detection (using access instead of exec scope)
Jeremy Gebben40a22942020-12-22 14:22:06 -0700236static constexpr VkPipelineStageFlags2KHR kColorAttachmentExecScope = VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT_KHR;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700237static const SyncStageAccessFlags kColorAttachmentAccessScope =
238 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ_BIT |
239 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT |
240 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE_BIT |
241 SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT; // Note: this is intentionally not in the exec scope
Jeremy Gebben40a22942020-12-22 14:22:06 -0700242static constexpr VkPipelineStageFlags2KHR kDepthStencilAttachmentExecScope =
243 VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT_KHR | VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT_KHR;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700244static const SyncStageAccessFlags kDepthStencilAttachmentAccessScope =
245 SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
246 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
247 SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT; // Note: this is intentionally not in the exec scope
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -0700248static constexpr VkPipelineStageFlags2KHR kRasterAttachmentExecScope = kDepthStencilAttachmentExecScope | kColorAttachmentExecScope;
John Zulauf8e3c3e92021-01-06 11:19:36 -0700249static const SyncStageAccessFlags kRasterAttachmentAccessScope = kDepthStencilAttachmentAccessScope | kColorAttachmentAccessScope;
John Zulaufb027cdb2020-05-21 14:25:22 -0600250
John Zulauf8e3c3e92021-01-06 11:19:36 -0700251ResourceAccessState::OrderingBarriers ResourceAccessState::kOrderingRules = {
Jeremy Gebben40a22942020-12-22 14:22:06 -0700252 {{VK_PIPELINE_STAGE_2_NONE_KHR, SyncStageAccessFlags()},
John Zulauf8e3c3e92021-01-06 11:19:36 -0700253 {kColorAttachmentExecScope, kColorAttachmentAccessScope},
254 {kDepthStencilAttachmentExecScope, kDepthStencilAttachmentAccessScope},
255 {kRasterAttachmentExecScope, kRasterAttachmentAccessScope}}};
256
John Zulauf7635de32020-05-29 17:14:15 -0600257// Sometimes we have an internal access conflict, and we using the kCurrentCommandTag to set and detect in temporary/proxy contexts
John Zulauffaea0ee2021-01-14 14:01:32 -0700258static const ResourceUsageTag kCurrentCommandTag(ResourceUsageTag::kMaxIndex, ResourceUsageTag::kMaxCount,
259 ResourceUsageTag::kMaxCount, CMD_NONE);
John Zulaufb027cdb2020-05-21 14:25:22 -0600260
John Zulaufb02c1eb2020-10-06 16:33:36 -0600261static VkDeviceSize ResourceBaseAddress(const BINDABLE &bindable) {
262 return bindable.binding.offset + bindable.binding.mem_state->fake_base_address;
263}
John Zulauf29d00532021-03-04 13:28:54 -0700264static VkDeviceSize ResourceBaseAddress(const IMAGE_STATE &image_state) {
265 VkDeviceSize base_address;
266 if (image_state.is_swapchain_image || (VK_NULL_HANDLE != image_state.bind_swapchain)) {
267 base_address = image_state.swapchain_fake_address;
268 } else {
269 base_address = ResourceBaseAddress(static_cast<const BINDABLE &>(image_state));
270 }
271 return base_address;
272}
John Zulaufb02c1eb2020-10-06 16:33:36 -0600273
locke-lunarg3c038002020-04-30 23:08:08 -0600274inline VkDeviceSize GetRealWholeSize(VkDeviceSize offset, VkDeviceSize size, VkDeviceSize whole_size) {
275 if (size == VK_WHOLE_SIZE) {
276 return (whole_size - offset);
277 }
278 return size;
279}
280
John Zulauf3e86bf02020-09-12 10:47:57 -0600281static inline VkDeviceSize GetBufferWholeSize(const BUFFER_STATE &buf_state, VkDeviceSize offset, VkDeviceSize size) {
282 return GetRealWholeSize(offset, size, buf_state.createInfo.size);
283}
284
John Zulauf16adfc92020-04-08 10:28:33 -0600285template <typename T>
John Zulauf355e49b2020-04-24 15:11:15 -0600286static ResourceAccessRange MakeRange(const T &has_offset_and_size) {
John Zulauf16adfc92020-04-08 10:28:33 -0600287 return ResourceAccessRange(has_offset_and_size.offset, (has_offset_and_size.offset + has_offset_and_size.size));
288}
289
John Zulauf355e49b2020-04-24 15:11:15 -0600290static ResourceAccessRange MakeRange(VkDeviceSize start, VkDeviceSize size) { return ResourceAccessRange(start, (start + size)); }
John Zulauf16adfc92020-04-08 10:28:33 -0600291
John Zulauf3e86bf02020-09-12 10:47:57 -0600292static inline ResourceAccessRange MakeRange(const BUFFER_STATE &buffer, VkDeviceSize offset, VkDeviceSize size) {
293 return MakeRange(offset, GetBufferWholeSize(buffer, offset, size));
294}
295
296static inline ResourceAccessRange MakeRange(const BUFFER_VIEW_STATE &buf_view_state) {
297 return MakeRange(*buf_view_state.buffer_state.get(), buf_view_state.create_info.offset, buf_view_state.create_info.range);
298}
299
John Zulauf4a6105a2020-11-17 15:11:05 -0700300// Range generators for to allow event scope filtration to be limited to the top of the resource access traversal pipeline
301//
John Zulauf10f1f522020-12-18 12:00:35 -0700302// Note: there is no "begin/end" or reset facility. These are each written as "one time through" generators.
303//
John Zulauf4a6105a2020-11-17 15:11:05 -0700304// Usage:
305// Constructor() -- initializes the generator to point to the begin of the space declared.
306// * -- the current range of the generator empty signfies end
307// ++ -- advance to the next non-empty range (or end)
308
309// A wrapper for a single range with the same semantics as the actual generators below
310template <typename KeyType>
311class SingleRangeGenerator {
312 public:
313 SingleRangeGenerator(const KeyType &range) : current_(range) {}
John Zulaufd5115702021-01-18 12:34:33 -0700314 const KeyType &operator*() const { return current_; }
315 const KeyType *operator->() const { return &current_; }
John Zulauf4a6105a2020-11-17 15:11:05 -0700316 SingleRangeGenerator &operator++() {
317 current_ = KeyType(); // just one real range
318 return *this;
319 }
320
321 bool operator==(const SingleRangeGenerator &other) const { return current_ == other.current_; }
322
323 private:
324 SingleRangeGenerator() = default;
325 const KeyType range_;
326 KeyType current_;
327};
328
329// Generate the ranges that are the intersection of range and the entries in the FilterMap
330template <typename FilterMap, typename KeyType = typename FilterMap::key_type>
331class FilteredRangeGenerator {
332 public:
John Zulaufd5115702021-01-18 12:34:33 -0700333 // Default constructed is safe to dereference for "empty" test, but for no other operation.
334 FilteredRangeGenerator() : range_(), filter_(nullptr), filter_pos_(), current_() {
335 // Default construction for KeyType *must* be empty range
336 assert(current_.empty());
337 }
John Zulauf4a6105a2020-11-17 15:11:05 -0700338 FilteredRangeGenerator(const FilterMap &filter, const KeyType &range)
339 : range_(range), filter_(&filter), filter_pos_(), current_() {
340 SeekBegin();
341 }
John Zulaufd5115702021-01-18 12:34:33 -0700342 FilteredRangeGenerator(const FilteredRangeGenerator &from) = default;
343
John Zulauf4a6105a2020-11-17 15:11:05 -0700344 const KeyType &operator*() const { return current_; }
345 const KeyType *operator->() const { return &current_; }
346 FilteredRangeGenerator &operator++() {
347 ++filter_pos_;
348 UpdateCurrent();
349 return *this;
350 }
351
352 bool operator==(const FilteredRangeGenerator &other) const { return current_ == other.current_; }
353
354 private:
John Zulauf4a6105a2020-11-17 15:11:05 -0700355 void UpdateCurrent() {
356 if (filter_pos_ != filter_->cend()) {
357 current_ = range_ & filter_pos_->first;
358 } else {
359 current_ = KeyType();
360 }
361 }
362 void SeekBegin() {
363 filter_pos_ = filter_->lower_bound(range_);
364 UpdateCurrent();
365 }
366 const KeyType range_;
367 const FilterMap *filter_;
368 typename FilterMap::const_iterator filter_pos_;
369 KeyType current_;
370};
John Zulaufd5115702021-01-18 12:34:33 -0700371using SingleAccessRangeGenerator = SingleRangeGenerator<ResourceAccessRange>;
John Zulauf4a6105a2020-11-17 15:11:05 -0700372using EventSimpleRangeGenerator = FilteredRangeGenerator<SyncEventState::ScopeMap>;
373
374// Templated to allow for different Range generators or map sources...
375
376// Generate the ranges that are the intersection of the RangeGen ranges and the entries in the FilterMap
John Zulauf4a6105a2020-11-17 15:11:05 -0700377template <typename FilterMap, typename RangeGen, typename KeyType = typename FilterMap::key_type>
378class FilteredGeneratorGenerator {
379 public:
John Zulaufd5115702021-01-18 12:34:33 -0700380 // Default constructed is safe to dereference for "empty" test, but for no other operation.
381 FilteredGeneratorGenerator() : filter_(nullptr), gen_(), filter_pos_(), current_() {
382 // Default construction for KeyType *must* be empty range
383 assert(current_.empty());
384 }
385 FilteredGeneratorGenerator(const FilterMap &filter, RangeGen &gen) : filter_(&filter), gen_(gen), filter_pos_(), current_() {
John Zulauf4a6105a2020-11-17 15:11:05 -0700386 SeekBegin();
387 }
John Zulaufd5115702021-01-18 12:34:33 -0700388 FilteredGeneratorGenerator(const FilteredGeneratorGenerator &from) = default;
John Zulauf4a6105a2020-11-17 15:11:05 -0700389 const KeyType &operator*() const { return current_; }
390 const KeyType *operator->() const { return &current_; }
391 FilteredGeneratorGenerator &operator++() {
392 KeyType gen_range = GenRange();
393 KeyType filter_range = FilterRange();
394 current_ = KeyType();
395 while (gen_range.non_empty() && filter_range.non_empty() && current_.empty()) {
396 if (gen_range.end > filter_range.end) {
397 // if the generated range is beyond the filter_range, advance the filter range
398 filter_range = AdvanceFilter();
399 } else {
400 gen_range = AdvanceGen();
401 }
402 current_ = gen_range & filter_range;
403 }
404 return *this;
405 }
406
407 bool operator==(const FilteredGeneratorGenerator &other) const { return current_ == other.current_; }
408
409 private:
410 KeyType AdvanceFilter() {
411 ++filter_pos_;
412 auto filter_range = FilterRange();
413 if (filter_range.valid()) {
414 FastForwardGen(filter_range);
415 }
416 return filter_range;
417 }
418 KeyType AdvanceGen() {
John Zulaufd5115702021-01-18 12:34:33 -0700419 ++gen_;
John Zulauf4a6105a2020-11-17 15:11:05 -0700420 auto gen_range = GenRange();
421 if (gen_range.valid()) {
422 FastForwardFilter(gen_range);
423 }
424 return gen_range;
425 }
426
427 KeyType FilterRange() const { return (filter_pos_ != filter_->cend()) ? filter_pos_->first : KeyType(); }
John Zulaufd5115702021-01-18 12:34:33 -0700428 KeyType GenRange() const { return *gen_; }
John Zulauf4a6105a2020-11-17 15:11:05 -0700429
430 KeyType FastForwardFilter(const KeyType &range) {
431 auto filter_range = FilterRange();
432 int retry_count = 0;
John Zulauf10f1f522020-12-18 12:00:35 -0700433 const static int kRetryLimit = 2; // TODO -- determine whether this limit is optimal
John Zulauf4a6105a2020-11-17 15:11:05 -0700434 while (!filter_range.empty() && (filter_range.end <= range.begin)) {
435 if (retry_count < kRetryLimit) {
436 ++filter_pos_;
437 filter_range = FilterRange();
438 retry_count++;
439 } else {
440 // Okay we've tried walking, do a seek.
441 filter_pos_ = filter_->lower_bound(range);
442 break;
443 }
444 }
445 return FilterRange();
446 }
447
448 // TODO: Consider adding "seek" (or an absolute bound "get" to range generators to make this walk
449 // faster.
450 KeyType FastForwardGen(const KeyType &range) {
451 auto gen_range = GenRange();
452 while (!gen_range.empty() && (gen_range.end <= range.begin)) {
John Zulaufd5115702021-01-18 12:34:33 -0700453 ++gen_;
John Zulauf4a6105a2020-11-17 15:11:05 -0700454 gen_range = GenRange();
455 }
456 return gen_range;
457 }
458
459 void SeekBegin() {
460 auto gen_range = GenRange();
461 if (gen_range.empty()) {
462 current_ = KeyType();
463 filter_pos_ = filter_->cend();
464 } else {
465 filter_pos_ = filter_->lower_bound(gen_range);
466 current_ = gen_range & FilterRange();
467 }
468 }
469
John Zulauf4a6105a2020-11-17 15:11:05 -0700470 const FilterMap *filter_;
John Zulaufd5115702021-01-18 12:34:33 -0700471 RangeGen gen_;
John Zulauf4a6105a2020-11-17 15:11:05 -0700472 typename FilterMap::const_iterator filter_pos_;
473 KeyType current_;
474};
475
476using EventImageRangeGenerator = FilteredGeneratorGenerator<SyncEventState::ScopeMap, subresource_adapter::ImageRangeGenerator>;
477
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -0700478static const ResourceAccessRange kFullRange(std::numeric_limits<VkDeviceSize>::min(), std::numeric_limits<VkDeviceSize>::max());
John Zulauf5c5e88d2019-12-26 11:22:02 -0700479
John Zulauf3e86bf02020-09-12 10:47:57 -0600480ResourceAccessRange GetBufferRange(VkDeviceSize offset, VkDeviceSize buf_whole_size, uint32_t first_index, uint32_t count,
481 VkDeviceSize stride) {
482 VkDeviceSize range_start = offset + first_index * stride;
483 VkDeviceSize range_size = 0;
locke-lunargff255f92020-05-13 18:53:52 -0600484 if (count == UINT32_MAX) {
485 range_size = buf_whole_size - range_start;
486 } else {
487 range_size = count * stride;
488 }
John Zulauf3e86bf02020-09-12 10:47:57 -0600489 return MakeRange(range_start, range_size);
locke-lunargff255f92020-05-13 18:53:52 -0600490}
491
locke-lunarg654e3692020-06-04 17:19:15 -0600492SyncStageAccessIndex GetSyncStageAccessIndexsByDescriptorSet(VkDescriptorType descriptor_type, const interface_var &descriptor_data,
493 VkShaderStageFlagBits stage_flag) {
494 if (descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT) {
495 assert(stage_flag == VK_SHADER_STAGE_FRAGMENT_BIT);
496 return SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ;
497 }
498 auto stage_access = syncStageAccessMaskByShaderStage.find(stage_flag);
499 if (stage_access == syncStageAccessMaskByShaderStage.end()) {
500 assert(0);
501 }
502 if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER || descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
503 return stage_access->second.uniform_read;
504 }
505
506 // If the desriptorSet is writable, we don't need to care SHADER_READ. SHADER_WRITE is enough.
507 // Because if write hazard happens, read hazard might or might not happen.
508 // But if write hazard doesn't happen, read hazard is impossible to happen.
509 if (descriptor_data.is_writable) {
Jeremy Gebben40a22942020-12-22 14:22:06 -0700510 return stage_access->second.storage_write;
locke-lunarg654e3692020-06-04 17:19:15 -0600511 }
Jeremy Gebben40a22942020-12-22 14:22:06 -0700512 // TODO: sampled_read
513 return stage_access->second.storage_read;
locke-lunarg654e3692020-06-04 17:19:15 -0600514}
515
locke-lunarg37047832020-06-12 13:44:45 -0600516bool IsImageLayoutDepthWritable(VkImageLayout image_layout) {
517 return (image_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
518 image_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL ||
519 image_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL)
520 ? true
521 : false;
522}
523
524bool IsImageLayoutStencilWritable(VkImageLayout image_layout) {
525 return (image_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
526 image_layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL ||
527 image_layout == VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL)
528 ? true
529 : false;
530}
531
John Zulauf355e49b2020-04-24 15:11:15 -0600532// Class AccessContext stores the state of accesses specific to a Command, Subpass, or Queue
John Zulaufb02c1eb2020-10-06 16:33:36 -0600533template <typename Action>
534static void ApplyOverImageRange(const IMAGE_STATE &image_state, const VkImageSubresourceRange &subresource_range_arg,
535 Action &action) {
536 // At this point the "apply over range" logic only supports a single memory binding
537 if (!SimpleBinding(image_state)) return;
538 auto subresource_range = NormalizeSubresourceRange(image_state.createInfo, subresource_range_arg);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600539 const auto base_address = ResourceBaseAddress(image_state);
John Zulauf150e5332020-12-03 08:52:52 -0700540 subresource_adapter::ImageRangeGenerator range_gen(*image_state.fragment_encoder.get(), subresource_range, {0, 0, 0},
541 image_state.createInfo.extent, base_address);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600542 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf150e5332020-12-03 08:52:52 -0700543 action(*range_gen);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600544 }
545}
546
John Zulauf7635de32020-05-29 17:14:15 -0600547// Tranverse the attachment resolves for this a specific subpass, and do action() to them.
548// Used by both validation and record operations
549//
550// The signature for Action() reflect the needs of both uses.
551template <typename Action>
552void ResolveOperation(Action &action, const RENDER_PASS_STATE &rp_state, const VkRect2D &render_area,
553 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views, uint32_t subpass) {
554 VkExtent3D extent = CastTo3D(render_area.extent);
555 VkOffset3D offset = CastTo3D(render_area.offset);
556 const auto &rp_ci = rp_state.createInfo;
557 const auto *attachment_ci = rp_ci.pAttachments;
558 const auto &subpass_ci = rp_ci.pSubpasses[subpass];
559
560 // Color resolves -- require an inuse color attachment and a matching inuse resolve attachment
561 const auto *color_attachments = subpass_ci.pColorAttachments;
562 const auto *color_resolve = subpass_ci.pResolveAttachments;
563 if (color_resolve && color_attachments) {
564 for (uint32_t i = 0; i < subpass_ci.colorAttachmentCount; i++) {
565 const auto &color_attach = color_attachments[i].attachment;
566 const auto &resolve_attach = subpass_ci.pResolveAttachments[i].attachment;
567 if ((color_attach != VK_ATTACHMENT_UNUSED) && (resolve_attach != VK_ATTACHMENT_UNUSED)) {
568 action("color", "resolve read", color_attach, resolve_attach, attachment_views[color_attach],
John Zulauf8e3c3e92021-01-06 11:19:36 -0700569 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ, SyncOrdering::kColorAttachment, offset, extent, 0);
John Zulauf7635de32020-05-29 17:14:15 -0600570 action("color", "resolve write", color_attach, resolve_attach, attachment_views[resolve_attach],
John Zulauf8e3c3e92021-01-06 11:19:36 -0700571 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kColorAttachment, offset, extent, 0);
John Zulauf7635de32020-05-29 17:14:15 -0600572 }
573 }
574 }
575
576 // Depth stencil resolve only if the extension is present
Mark Lobodzinski1f887d32020-12-30 15:31:33 -0700577 const auto ds_resolve = LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(subpass_ci.pNext);
John Zulauf7635de32020-05-29 17:14:15 -0600578 if (ds_resolve && ds_resolve->pDepthStencilResolveAttachment &&
579 (ds_resolve->pDepthStencilResolveAttachment->attachment != VK_ATTACHMENT_UNUSED) && subpass_ci.pDepthStencilAttachment &&
580 (subpass_ci.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
581 const auto src_at = subpass_ci.pDepthStencilAttachment->attachment;
582 const auto src_ci = attachment_ci[src_at];
583 // The formats are required to match so we can pick either
584 const bool resolve_depth = (ds_resolve->depthResolveMode != VK_RESOLVE_MODE_NONE) && FormatHasDepth(src_ci.format);
585 const bool resolve_stencil = (ds_resolve->stencilResolveMode != VK_RESOLVE_MODE_NONE) && FormatHasStencil(src_ci.format);
586 const auto dst_at = ds_resolve->pDepthStencilResolveAttachment->attachment;
587 VkImageAspectFlags aspect_mask = 0u;
588
589 // Figure out which aspects are actually touched during resolve operations
590 const char *aspect_string = nullptr;
591 if (resolve_depth && resolve_stencil) {
592 // Validate all aspects together
593 aspect_mask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
594 aspect_string = "depth/stencil";
595 } else if (resolve_depth) {
596 // Validate depth only
597 aspect_mask = VK_IMAGE_ASPECT_DEPTH_BIT;
598 aspect_string = "depth";
599 } else if (resolve_stencil) {
600 // Validate all stencil only
601 aspect_mask = VK_IMAGE_ASPECT_STENCIL_BIT;
602 aspect_string = "stencil";
603 }
604
605 if (aspect_mask) {
606 action(aspect_string, "resolve read", src_at, dst_at, attachment_views[src_at],
John Zulauf8e3c3e92021-01-06 11:19:36 -0700607 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ, SyncOrdering::kRaster, offset, extent, aspect_mask);
John Zulauf7635de32020-05-29 17:14:15 -0600608 action(aspect_string, "resolve write", src_at, dst_at, attachment_views[dst_at],
John Zulauf8e3c3e92021-01-06 11:19:36 -0700609 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kRaster, offset, extent, aspect_mask);
John Zulauf7635de32020-05-29 17:14:15 -0600610 }
611 }
612}
613
614// Action for validating resolve operations
615class ValidateResolveAction {
616 public:
John Zulauffaea0ee2021-01-14 14:01:32 -0700617 ValidateResolveAction(VkRenderPass render_pass, uint32_t subpass, const AccessContext &context,
John Zulauf64ffe552021-02-06 10:25:07 -0700618 const CommandExecutionContext &ex_context, const char *func_name)
John Zulauf7635de32020-05-29 17:14:15 -0600619 : render_pass_(render_pass),
620 subpass_(subpass),
621 context_(context),
John Zulauf64ffe552021-02-06 10:25:07 -0700622 ex_context_(ex_context),
John Zulauf7635de32020-05-29 17:14:15 -0600623 func_name_(func_name),
624 skip_(false) {}
625 void operator()(const char *aspect_name, const char *attachment_name, uint32_t src_at, uint32_t dst_at,
John Zulauf8e3c3e92021-01-06 11:19:36 -0700626 const IMAGE_VIEW_STATE *view, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf7635de32020-05-29 17:14:15 -0600627 const VkOffset3D &offset, const VkExtent3D &extent, VkImageAspectFlags aspect_mask) {
628 HazardResult hazard;
John Zulauf8e3c3e92021-01-06 11:19:36 -0700629 hazard = context_.DetectHazard(view, current_usage, ordering_rule, offset, extent, aspect_mask);
John Zulauf7635de32020-05-29 17:14:15 -0600630 if (hazard.hazard) {
John Zulauffaea0ee2021-01-14 14:01:32 -0700631 skip_ |=
John Zulauf64ffe552021-02-06 10:25:07 -0700632 ex_context_.GetSyncState().LogError(render_pass_, string_SyncHazardVUID(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -0700633 "%s: Hazard %s in subpass %" PRIu32 "during %s %s, from attachment %" PRIu32
634 " to resolve attachment %" PRIu32 ". Access info %s.",
635 func_name_, string_SyncHazard(hazard.hazard), subpass_, aspect_name,
John Zulauf64ffe552021-02-06 10:25:07 -0700636 attachment_name, src_at, dst_at, ex_context_.FormatUsage(hazard).c_str());
John Zulauf7635de32020-05-29 17:14:15 -0600637 }
638 }
639 // Providing a mechanism for the constructing caller to get the result of the validation
640 bool GetSkip() const { return skip_; }
641
642 private:
643 VkRenderPass render_pass_;
644 const uint32_t subpass_;
645 const AccessContext &context_;
John Zulauf64ffe552021-02-06 10:25:07 -0700646 const CommandExecutionContext &ex_context_;
John Zulauf7635de32020-05-29 17:14:15 -0600647 const char *func_name_;
648 bool skip_;
649};
650
651// Update action for resolve operations
652class UpdateStateResolveAction {
653 public:
654 UpdateStateResolveAction(AccessContext &context, const ResourceUsageTag &tag) : context_(context), tag_(tag) {}
655 void operator()(const char *aspect_name, const char *attachment_name, uint32_t src_at, uint32_t dst_at,
John Zulauf8e3c3e92021-01-06 11:19:36 -0700656 const IMAGE_VIEW_STATE *view, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf7635de32020-05-29 17:14:15 -0600657 const VkOffset3D &offset, const VkExtent3D &extent, VkImageAspectFlags aspect_mask) {
658 // Ignores validation only arguments...
John Zulauf8e3c3e92021-01-06 11:19:36 -0700659 context_.UpdateAccessState(view, current_usage, ordering_rule, offset, extent, aspect_mask, tag_);
John Zulauf7635de32020-05-29 17:14:15 -0600660 }
661
662 private:
663 AccessContext &context_;
664 const ResourceUsageTag &tag_;
665};
666
John Zulauf59e25072020-07-17 10:55:21 -0600667void HazardResult::Set(const ResourceAccessState *access_state_, SyncStageAccessIndex usage_index_, SyncHazard hazard_,
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700668 const SyncStageAccessFlags &prior_, const ResourceUsageTag &tag_) {
John Zulauf59e25072020-07-17 10:55:21 -0600669 access_state = std::unique_ptr<const ResourceAccessState>(new ResourceAccessState(*access_state_));
670 usage_index = usage_index_;
671 hazard = hazard_;
672 prior_access = prior_;
673 tag = tag_;
674}
675
John Zulauf540266b2020-04-06 18:54:53 -0600676AccessContext::AccessContext(uint32_t subpass, VkQueueFlags queue_flags,
677 const std::vector<SubpassDependencyGraphNode> &dependencies,
John Zulauf1a224292020-06-30 14:52:13 -0600678 const std::vector<AccessContext> &contexts, const AccessContext *external_context) {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600679 Reset();
680 const auto &subpass_dep = dependencies[subpass];
681 prev_.reserve(subpass_dep.prev.size());
John Zulauf355e49b2020-04-24 15:11:15 -0600682 prev_by_subpass_.resize(subpass, nullptr); // Can't be more prevs than the subpass we're on
John Zulauf3d84f1b2020-03-09 13:33:25 -0600683 for (const auto &prev_dep : subpass_dep.prev) {
John Zulaufbaea94f2020-09-15 17:55:16 -0600684 const auto prev_pass = prev_dep.first->pass;
685 const auto &prev_barriers = prev_dep.second;
686 assert(prev_dep.second.size());
687 prev_.emplace_back(&contexts[prev_pass], queue_flags, prev_barriers);
688 prev_by_subpass_[prev_pass] = &prev_.back();
John Zulauf5c5e88d2019-12-26 11:22:02 -0700689 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600690
691 async_.reserve(subpass_dep.async.size());
692 for (const auto async_subpass : subpass_dep.async) {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -0700693 async_.emplace_back(&contexts[async_subpass]);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600694 }
John Zulaufbaea94f2020-09-15 17:55:16 -0600695 if (subpass_dep.barrier_from_external.size()) {
696 src_external_ = TrackBack(external_context, queue_flags, subpass_dep.barrier_from_external);
John Zulaufe5da6e52020-03-18 15:32:18 -0600697 }
John Zulaufbaea94f2020-09-15 17:55:16 -0600698 if (subpass_dep.barrier_to_external.size()) {
699 dst_external_ = TrackBack(this, queue_flags, subpass_dep.barrier_to_external);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600700 }
John Zulauf5c5e88d2019-12-26 11:22:02 -0700701}
702
John Zulauf5f13a792020-03-10 07:31:21 -0600703template <typename Detector>
John Zulauf43cc7462020-12-03 12:33:12 -0700704HazardResult AccessContext::DetectPreviousHazard(AccessAddressType type, const Detector &detector,
John Zulauf540266b2020-04-06 18:54:53 -0600705 const ResourceAccessRange &range) const {
John Zulauf5f13a792020-03-10 07:31:21 -0600706 ResourceAccessRangeMap descent_map;
John Zulauf69133422020-05-20 14:55:53 -0600707 ResolvePreviousAccess(type, range, &descent_map, nullptr);
John Zulauf5f13a792020-03-10 07:31:21 -0600708
709 HazardResult hazard;
710 for (auto prev = descent_map.begin(); prev != descent_map.end() && !hazard.hazard; ++prev) {
711 hazard = detector.Detect(prev);
712 }
713 return hazard;
714}
715
John Zulauf4a6105a2020-11-17 15:11:05 -0700716template <typename Action>
717void AccessContext::ForAll(Action &&action) {
718 for (const auto address_type : kAddressTypes) {
719 auto &accesses = GetAccessStateMap(address_type);
720 for (const auto &access : accesses) {
721 action(address_type, access);
722 }
723 }
724}
725
John Zulauf3d84f1b2020-03-09 13:33:25 -0600726// A recursive range walker for hazard detection, first for the current context and the (DetectHazardRecur) to walk
727// the DAG of the contexts (for example subpasses)
728template <typename Detector>
John Zulauf43cc7462020-12-03 12:33:12 -0700729HazardResult AccessContext::DetectHazard(AccessAddressType type, const Detector &detector, const ResourceAccessRange &range,
John Zulauf355e49b2020-04-24 15:11:15 -0600730 DetectOptions options) const {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600731 HazardResult hazard;
John Zulauf5f13a792020-03-10 07:31:21 -0600732
John Zulauf1a224292020-06-30 14:52:13 -0600733 if (static_cast<uint32_t>(options) & DetectOptions::kDetectAsync) {
John Zulauf355e49b2020-04-24 15:11:15 -0600734 // Async checks don't require recursive lookups, as the async lists are exhaustive for the top-level context
735 // so we'll check these first
736 for (const auto &async_context : async_) {
737 hazard = async_context->DetectAsyncHazard(type, detector, range);
738 if (hazard.hazard) return hazard;
739 }
John Zulauf5f13a792020-03-10 07:31:21 -0600740 }
741
John Zulauf1a224292020-06-30 14:52:13 -0600742 const bool detect_prev = (static_cast<uint32_t>(options) & DetectOptions::kDetectPrevious) != 0;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600743
John Zulauf69133422020-05-20 14:55:53 -0600744 const auto &accesses = GetAccessStateMap(type);
745 const auto from = accesses.lower_bound(range);
746 const auto to = accesses.upper_bound(range);
747 ResourceAccessRange gap = {range.begin, range.begin};
John Zulauf5f13a792020-03-10 07:31:21 -0600748
John Zulauf69133422020-05-20 14:55:53 -0600749 for (auto pos = from; pos != to; ++pos) {
750 // Cover any leading gap, or gap between entries
751 if (detect_prev) {
752 // TODO: After profiling we may want to change the descent logic such that we don't recur per gap...
753 // Cover any leading gap, or gap between entries
754 gap.end = pos->first.begin; // We know this begin is < range.end
John Zulauf355e49b2020-04-24 15:11:15 -0600755 if (gap.non_empty()) {
John Zulauf69133422020-05-20 14:55:53 -0600756 // Recur on all gaps
John Zulauf16adfc92020-04-08 10:28:33 -0600757 hazard = DetectPreviousHazard(type, detector, gap);
John Zulauf5f13a792020-03-10 07:31:21 -0600758 if (hazard.hazard) return hazard;
759 }
John Zulauf69133422020-05-20 14:55:53 -0600760 // Set up for the next gap. If pos..end is >= range.end, loop will exit, and trailing gap will be empty
761 gap.begin = pos->first.end;
762 }
763
764 hazard = detector.Detect(pos);
765 if (hazard.hazard) return hazard;
766 }
767
768 if (detect_prev) {
769 // Detect in the trailing empty as needed
770 gap.end = range.end;
771 if (gap.non_empty()) {
772 hazard = DetectPreviousHazard(type, detector, gap);
John Zulauf16adfc92020-04-08 10:28:33 -0600773 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600774 }
775
776 return hazard;
777}
778
779// A non recursive range walker for the asynchronous contexts (those we have no barriers with)
780template <typename Detector>
John Zulauf43cc7462020-12-03 12:33:12 -0700781HazardResult AccessContext::DetectAsyncHazard(AccessAddressType type, const Detector &detector,
782 const ResourceAccessRange &range) const {
John Zulauf16adfc92020-04-08 10:28:33 -0600783 auto &accesses = GetAccessStateMap(type);
784 const auto from = accesses.lower_bound(range);
785 const auto to = accesses.upper_bound(range);
786
John Zulauf3d84f1b2020-03-09 13:33:25 -0600787 HazardResult hazard;
John Zulauf16adfc92020-04-08 10:28:33 -0600788 for (auto pos = from; pos != to && !hazard.hazard; ++pos) {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -0700789 hazard = detector.DetectAsync(pos, start_tag_);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600790 }
John Zulauf16adfc92020-04-08 10:28:33 -0600791
John Zulauf3d84f1b2020-03-09 13:33:25 -0600792 return hazard;
793}
794
John Zulaufb02c1eb2020-10-06 16:33:36 -0600795struct ApplySubpassTransitionBarriersAction {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -0700796 explicit ApplySubpassTransitionBarriersAction(const std::vector<SyncBarrier> &barriers_) : barriers(barriers_) {}
John Zulaufb02c1eb2020-10-06 16:33:36 -0600797 void operator()(ResourceAccessState *access) const {
798 assert(access);
799 access->ApplyBarriers(barriers, true);
800 }
801 const std::vector<SyncBarrier> &barriers;
802};
803
804struct ApplyTrackbackBarriersAction {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -0700805 explicit ApplyTrackbackBarriersAction(const std::vector<SyncBarrier> &barriers_) : barriers(barriers_) {}
John Zulaufb02c1eb2020-10-06 16:33:36 -0600806 void operator()(ResourceAccessState *access) const {
807 assert(access);
808 assert(!access->HasPendingState());
809 access->ApplyBarriers(barriers, false);
810 access->ApplyPendingBarriers(kCurrentCommandTag);
811 }
812 const std::vector<SyncBarrier> &barriers;
813};
814
815// Splits a single map entry into piece matching the entries in [first, last) the total range over [first, last) must be
816// contained with entry. Entry must be an iterator pointing to dest, first and last must be iterators pointing to a
817// *different* map from dest.
818// Returns the position past the last resolved range -- the entry covering the remainder of entry->first not included in the
819// range [first, last)
820template <typename BarrierAction>
John Zulauf355e49b2020-04-24 15:11:15 -0600821static void ResolveMapToEntry(ResourceAccessRangeMap *dest, ResourceAccessRangeMap::iterator entry,
822 ResourceAccessRangeMap::const_iterator first, ResourceAccessRangeMap::const_iterator last,
John Zulaufb02c1eb2020-10-06 16:33:36 -0600823 BarrierAction &barrier_action) {
John Zulauf355e49b2020-04-24 15:11:15 -0600824 auto at = entry;
825 for (auto pos = first; pos != last; ++pos) {
826 // Every member of the input iterator range must fit within the remaining portion of entry
827 assert(at->first.includes(pos->first));
828 assert(at != dest->end());
829 // Trim up at to the same size as the entry to resolve
830 at = sparse_container::split(at, *dest, pos->first);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600831 auto access = pos->second; // intentional copy
832 barrier_action(&access);
John Zulauf355e49b2020-04-24 15:11:15 -0600833 at->second.Resolve(access);
834 ++at; // Go to the remaining unused section of entry
835 }
836}
837
John Zulaufa0a98292020-09-18 09:30:10 -0600838static SyncBarrier MergeBarriers(const std::vector<SyncBarrier> &barriers) {
839 SyncBarrier merged = {};
840 for (const auto &barrier : barriers) {
841 merged.Merge(barrier);
842 }
843 return merged;
844}
845
John Zulaufb02c1eb2020-10-06 16:33:36 -0600846template <typename BarrierAction>
John Zulauf43cc7462020-12-03 12:33:12 -0700847void AccessContext::ResolveAccessRange(AccessAddressType type, const ResourceAccessRange &range, BarrierAction &barrier_action,
John Zulauf355e49b2020-04-24 15:11:15 -0600848 ResourceAccessRangeMap *resolve_map, const ResourceAccessState *infill_state,
849 bool recur_to_infill) const {
John Zulauf3bcab5e2020-06-19 14:42:32 -0600850 if (!range.non_empty()) return;
851
John Zulauf355e49b2020-04-24 15:11:15 -0600852 ResourceRangeMergeIterator current(*resolve_map, GetAccessStateMap(type), range.begin);
853 while (current->range.non_empty() && range.includes(current->range.begin)) {
John Zulauf3bcab5e2020-06-19 14:42:32 -0600854 const auto current_range = current->range & range;
John Zulauf16adfc92020-04-08 10:28:33 -0600855 if (current->pos_B->valid) {
856 const auto &src_pos = current->pos_B->lower_bound;
John Zulaufb02c1eb2020-10-06 16:33:36 -0600857 auto access = src_pos->second; // intentional copy
858 barrier_action(&access);
859
John Zulauf16adfc92020-04-08 10:28:33 -0600860 if (current->pos_A->valid) {
John Zulauf3bcab5e2020-06-19 14:42:32 -0600861 const auto trimmed = sparse_container::split(current->pos_A->lower_bound, *resolve_map, current_range);
862 trimmed->second.Resolve(access);
863 current.invalidate_A(trimmed);
John Zulauf5f13a792020-03-10 07:31:21 -0600864 } else {
John Zulauf3bcab5e2020-06-19 14:42:32 -0600865 auto inserted = resolve_map->insert(current->pos_A->lower_bound, std::make_pair(current_range, access));
John Zulauf355e49b2020-04-24 15:11:15 -0600866 current.invalidate_A(inserted); // Update the parallel iterator to point at the insert segment
John Zulauf5f13a792020-03-10 07:31:21 -0600867 }
John Zulauf16adfc92020-04-08 10:28:33 -0600868 } else {
869 // we have to descend to fill this gap
870 if (recur_to_infill) {
John Zulauf355e49b2020-04-24 15:11:15 -0600871 if (current->pos_A->valid) {
872 // Dest is valid, so we need to accumulate along the DAG and then resolve... in an N-to-1 resolve operation
873 ResourceAccessRangeMap gap_map;
John Zulauf3bcab5e2020-06-19 14:42:32 -0600874 ResolvePreviousAccess(type, current_range, &gap_map, infill_state);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600875 ResolveMapToEntry(resolve_map, current->pos_A->lower_bound, gap_map.begin(), gap_map.end(), barrier_action);
John Zulauf355e49b2020-04-24 15:11:15 -0600876 } else {
John Zulauf3bcab5e2020-06-19 14:42:32 -0600877 // There isn't anything in dest in current)range, so we can accumulate directly into it.
878 ResolvePreviousAccess(type, current_range, resolve_map, infill_state);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600879 // Need to apply the barrier to the accesses we accumulated, noting that we haven't updated current
880 for (auto pos = resolve_map->lower_bound(current_range); pos != current->pos_A->lower_bound; ++pos) {
881 barrier_action(&pos->second);
John Zulauf355e49b2020-04-24 15:11:15 -0600882 }
883 }
884 // Given that there could be gaps we need to seek carefully to not repeatedly search the same gaps in the next
885 // iterator of the outer while.
886
887 // Set the parallel iterator to the end of this range s.t. ++ will move us to the next range whether or
888 // not the end of the range is a gap. For the seek to work, first we need to warn the parallel iterator
889 // we stepped on the dest map
locke-lunarg88dbb542020-06-23 22:05:42 -0600890 const auto seek_to = current_range.end - 1; // The subtraction is safe as range can't be empty (loop condition)
891 current.invalidate_A(); // Changes current->range
John Zulauf355e49b2020-04-24 15:11:15 -0600892 current.seek(seek_to);
893 } else if (!current->pos_A->valid && infill_state) {
894 // If we didn't find anything in the current range, and we aren't reccuring... we infill if required
895 auto inserted = resolve_map->insert(current->pos_A->lower_bound, std::make_pair(current->range, *infill_state));
896 current.invalidate_A(inserted); // Update the parallel iterator to point at the correct segment after insert
John Zulauf16adfc92020-04-08 10:28:33 -0600897 }
John Zulauf5f13a792020-03-10 07:31:21 -0600898 }
John Zulauf16adfc92020-04-08 10:28:33 -0600899 ++current;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600900 }
John Zulauf1a224292020-06-30 14:52:13 -0600901
902 // Infill if range goes passed both the current and resolve map prior contents
903 if (recur_to_infill && (current->range.end < range.end)) {
904 ResourceAccessRange trailing_fill_range = {current->range.end, range.end};
905 ResourceAccessRangeMap gap_map;
906 const auto the_end = resolve_map->end();
907 ResolvePreviousAccess(type, trailing_fill_range, &gap_map, infill_state);
908 for (auto &access : gap_map) {
John Zulaufb02c1eb2020-10-06 16:33:36 -0600909 barrier_action(&access.second);
John Zulauf1a224292020-06-30 14:52:13 -0600910 resolve_map->insert(the_end, access);
911 }
912 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600913}
914
John Zulauf43cc7462020-12-03 12:33:12 -0700915void AccessContext::ResolvePreviousAccess(AccessAddressType type, const ResourceAccessRange &range,
916 ResourceAccessRangeMap *descent_map, const ResourceAccessState *infill_state) const {
John Zulaufe5da6e52020-03-18 15:32:18 -0600917 if ((prev_.size() == 0) && (src_external_.context == nullptr)) {
John Zulauf5f13a792020-03-10 07:31:21 -0600918 if (range.non_empty() && infill_state) {
919 descent_map->insert(std::make_pair(range, *infill_state));
920 }
921 } else {
922 // Look for something to fill the gap further along.
923 for (const auto &prev_dep : prev_) {
John Zulaufb02c1eb2020-10-06 16:33:36 -0600924 const ApplyTrackbackBarriersAction barrier_action(prev_dep.barriers);
925 prev_dep.context->ResolveAccessRange(type, range, barrier_action, descent_map, infill_state);
John Zulauf5f13a792020-03-10 07:31:21 -0600926 }
927
John Zulaufe5da6e52020-03-18 15:32:18 -0600928 if (src_external_.context) {
John Zulaufb02c1eb2020-10-06 16:33:36 -0600929 const ApplyTrackbackBarriersAction barrier_action(src_external_.barriers);
930 src_external_.context->ResolveAccessRange(type, range, barrier_action, descent_map, infill_state);
John Zulauf5f13a792020-03-10 07:31:21 -0600931 }
932 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600933}
934
John Zulauf4a6105a2020-11-17 15:11:05 -0700935// Non-lazy import of all accesses, WaitEvents needs this.
936void AccessContext::ResolvePreviousAccesses() {
937 ResourceAccessState default_state;
938 for (const auto address_type : kAddressTypes) {
939 ResolvePreviousAccess(address_type, kFullRange, &GetAccessStateMap(address_type), &default_state);
940 }
941}
942
John Zulauf43cc7462020-12-03 12:33:12 -0700943AccessAddressType AccessContext::ImageAddressType(const IMAGE_STATE &image) {
944 return (image.fragment_encoder->IsLinearImage()) ? AccessAddressType::kLinear : AccessAddressType::kIdealized;
John Zulauf16adfc92020-04-08 10:28:33 -0600945}
946
John Zulauf1507ee42020-05-18 11:33:09 -0600947static SyncStageAccessIndex ColorLoadUsage(VkAttachmentLoadOp load_op) {
948 const auto stage_access = (load_op == VK_ATTACHMENT_LOAD_OP_LOAD) ? SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ
949 : SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE;
950 return stage_access;
951}
952static SyncStageAccessIndex DepthStencilLoadUsage(VkAttachmentLoadOp load_op) {
953 const auto stage_access = (load_op == VK_ATTACHMENT_LOAD_OP_LOAD) ? SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_READ
954 : SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE;
955 return stage_access;
956}
957
John Zulauf7635de32020-05-29 17:14:15 -0600958// Caller must manage returned pointer
959static AccessContext *CreateStoreResolveProxyContext(const AccessContext &context, const RENDER_PASS_STATE &rp_state,
960 uint32_t subpass, const VkRect2D &render_area,
961 std::vector<const IMAGE_VIEW_STATE *> attachment_views) {
962 auto *proxy = new AccessContext(context);
963 proxy->UpdateAttachmentResolveAccess(rp_state, render_area, attachment_views, subpass, kCurrentCommandTag);
John Zulaufaff20662020-06-01 14:07:58 -0600964 proxy->UpdateAttachmentStoreAccess(rp_state, render_area, attachment_views, subpass, kCurrentCommandTag);
John Zulauf7635de32020-05-29 17:14:15 -0600965 return proxy;
966}
967
John Zulaufb02c1eb2020-10-06 16:33:36 -0600968template <typename BarrierAction>
John Zulauf52446eb2020-10-22 16:40:08 -0600969class ResolveAccessRangeFunctor {
John Zulaufb02c1eb2020-10-06 16:33:36 -0600970 public:
John Zulauf43cc7462020-12-03 12:33:12 -0700971 ResolveAccessRangeFunctor(const AccessContext &context, AccessAddressType address_type, ResourceAccessRangeMap *descent_map,
972 const ResourceAccessState *infill_state, BarrierAction &barrier_action)
John Zulauf52446eb2020-10-22 16:40:08 -0600973 : context_(context),
974 address_type_(address_type),
975 descent_map_(descent_map),
976 infill_state_(infill_state),
977 barrier_action_(barrier_action) {}
John Zulaufb02c1eb2020-10-06 16:33:36 -0600978 ResolveAccessRangeFunctor() = delete;
979 void operator()(const ResourceAccessRange &range) const {
980 context_.ResolveAccessRange(address_type_, range, barrier_action_, descent_map_, infill_state_);
981 }
982
983 private:
John Zulauf52446eb2020-10-22 16:40:08 -0600984 const AccessContext &context_;
John Zulauf43cc7462020-12-03 12:33:12 -0700985 const AccessAddressType address_type_;
John Zulauf52446eb2020-10-22 16:40:08 -0600986 ResourceAccessRangeMap *const descent_map_;
987 const ResourceAccessState *infill_state_;
John Zulaufb02c1eb2020-10-06 16:33:36 -0600988 BarrierAction &barrier_action_;
989};
990
John Zulaufb02c1eb2020-10-06 16:33:36 -0600991template <typename BarrierAction>
992void AccessContext::ResolveAccessRange(const IMAGE_STATE &image_state, const VkImageSubresourceRange &subresource_range,
John Zulauf43cc7462020-12-03 12:33:12 -0700993 BarrierAction &barrier_action, AccessAddressType address_type,
994 ResourceAccessRangeMap *descent_map, const ResourceAccessState *infill_state) const {
John Zulaufb02c1eb2020-10-06 16:33:36 -0600995 const ResolveAccessRangeFunctor<BarrierAction> action(*this, address_type, descent_map, infill_state, barrier_action);
996 ApplyOverImageRange(image_state, subresource_range, action);
John Zulauf62f10592020-04-03 12:20:02 -0600997}
998
John Zulauf7635de32020-05-29 17:14:15 -0600999// Layout transitions are handled as if the were occuring in the beginning of the next subpass
John Zulauf64ffe552021-02-06 10:25:07 -07001000bool AccessContext::ValidateLayoutTransitions(const CommandExecutionContext &ex_context, const RENDER_PASS_STATE &rp_state,
John Zulauf7635de32020-05-29 17:14:15 -06001001 const VkRect2D &render_area, uint32_t subpass,
1002 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views,
1003 const char *func_name) const {
John Zulauf355e49b2020-04-24 15:11:15 -06001004 bool skip = false;
John Zulauf7635de32020-05-29 17:14:15 -06001005 // As validation methods are const and precede the record/update phase, for any tranistions from the immediately
1006 // previous subpass, we have to validate them against a copy of the AccessContext, with resolve operations applied, as
1007 // those affects have not been recorded yet.
1008 //
1009 // Note: we could be more efficient by tracking whether or not we actually *have* any changes (e.g. attachment resolve)
1010 // to apply and only copy then, if this proves a hot spot.
1011 std::unique_ptr<AccessContext> proxy_for_prev;
1012 TrackBack proxy_track_back;
1013
John Zulauf355e49b2020-04-24 15:11:15 -06001014 const auto &transitions = rp_state.subpass_transitions[subpass];
1015 for (const auto &transition : transitions) {
John Zulauf7635de32020-05-29 17:14:15 -06001016 const bool prev_needs_proxy = transition.prev_pass != VK_SUBPASS_EXTERNAL && (transition.prev_pass + 1 == subpass);
1017
1018 const auto *track_back = GetTrackBackFromSubpass(transition.prev_pass);
1019 if (prev_needs_proxy) {
1020 if (!proxy_for_prev) {
1021 proxy_for_prev.reset(CreateStoreResolveProxyContext(*track_back->context, rp_state, transition.prev_pass,
1022 render_area, attachment_views));
1023 proxy_track_back = *track_back;
1024 proxy_track_back.context = proxy_for_prev.get();
1025 }
1026 track_back = &proxy_track_back;
1027 }
1028 auto hazard = DetectSubpassTransitionHazard(*track_back, attachment_views[transition.attachment]);
John Zulauf355e49b2020-04-24 15:11:15 -06001029 if (hazard.hazard) {
John Zulauf64ffe552021-02-06 10:25:07 -07001030 skip |= ex_context.GetSyncState().LogError(rp_state.renderPass, string_SyncHazardVUID(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07001031 "%s: Hazard %s in subpass %" PRIu32 " for attachment %" PRIu32
1032 " image layout transition (old_layout: %s, new_layout: %s). Access info %s.",
1033 func_name, string_SyncHazard(hazard.hazard), subpass, transition.attachment,
1034 string_VkImageLayout(transition.old_layout),
1035 string_VkImageLayout(transition.new_layout),
John Zulauf64ffe552021-02-06 10:25:07 -07001036 ex_context.FormatUsage(hazard).c_str());
John Zulauf355e49b2020-04-24 15:11:15 -06001037 }
1038 }
1039 return skip;
1040}
1041
John Zulauf64ffe552021-02-06 10:25:07 -07001042bool AccessContext::ValidateLoadOperation(const CommandExecutionContext &ex_context, const RENDER_PASS_STATE &rp_state,
John Zulauf7635de32020-05-29 17:14:15 -06001043 const VkRect2D &render_area, uint32_t subpass,
1044 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views,
1045 const char *func_name) const {
John Zulauf1507ee42020-05-18 11:33:09 -06001046 bool skip = false;
1047 const auto *attachment_ci = rp_state.createInfo.pAttachments;
1048 VkExtent3D extent = CastTo3D(render_area.extent);
1049 VkOffset3D offset = CastTo3D(render_area.offset);
John Zulaufa0a98292020-09-18 09:30:10 -06001050
John Zulauf1507ee42020-05-18 11:33:09 -06001051 for (uint32_t i = 0; i < rp_state.createInfo.attachmentCount; i++) {
1052 if (subpass == rp_state.attachment_first_subpass[i]) {
1053 if (attachment_views[i] == nullptr) continue;
1054 const IMAGE_VIEW_STATE &view = *attachment_views[i];
1055 const IMAGE_STATE *image = view.image_state.get();
1056 if (image == nullptr) continue;
1057 const auto &ci = attachment_ci[i];
John Zulauf1507ee42020-05-18 11:33:09 -06001058
1059 // Need check in the following way
1060 // 1) if the usage bit isn't in the dest_access_scope, and there is layout traniition for initial use, report hazard
1061 // vs. transition
1062 // 2) if there isn't a layout transition, we need to look at the external context with a "detect hazard" operation
1063 // for each aspect loaded.
1064
1065 const bool has_depth = FormatHasDepth(ci.format);
John Zulaufb027cdb2020-05-21 14:25:22 -06001066 const bool has_stencil = FormatHasStencil(ci.format);
John Zulauf1507ee42020-05-18 11:33:09 -06001067 const bool is_color = !(has_depth || has_stencil);
1068
1069 const SyncStageAccessIndex load_index = has_depth ? DepthStencilLoadUsage(ci.loadOp) : ColorLoadUsage(ci.loadOp);
John Zulauf1507ee42020-05-18 11:33:09 -06001070 const SyncStageAccessIndex stencil_load_index = has_stencil ? DepthStencilLoadUsage(ci.stencilLoadOp) : load_index;
John Zulauf1507ee42020-05-18 11:33:09 -06001071
John Zulaufaff20662020-06-01 14:07:58 -06001072 HazardResult hazard;
John Zulauf1507ee42020-05-18 11:33:09 -06001073 const char *aspect = nullptr;
John Zulauf1507ee42020-05-18 11:33:09 -06001074
John Zulaufb02c1eb2020-10-06 16:33:36 -06001075 auto hazard_range = view.normalized_subresource_range;
1076 bool checked_stencil = false;
1077 if (is_color) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07001078 hazard = DetectHazard(*image, load_index, view.normalized_subresource_range, SyncOrdering::kColorAttachment, offset,
John Zulauf859089b2020-10-29 17:37:03 -06001079 extent);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001080 aspect = "color";
1081 } else {
1082 if (has_depth) {
1083 hazard_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001084 hazard = DetectHazard(*image, load_index, hazard_range, SyncOrdering::kDepthStencilAttachment, offset, extent);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001085 aspect = "depth";
1086 }
1087 if (!hazard.hazard && has_stencil) {
1088 hazard_range.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001089 hazard = DetectHazard(*image, stencil_load_index, hazard_range, SyncOrdering::kDepthStencilAttachment, offset,
1090 extent);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001091 aspect = "stencil";
1092 checked_stencil = true;
1093 }
1094 }
1095
1096 if (hazard.hazard) {
1097 auto load_op_string = string_VkAttachmentLoadOp(checked_stencil ? ci.stencilLoadOp : ci.loadOp);
John Zulauf64ffe552021-02-06 10:25:07 -07001098 const auto &sync_state = ex_context.GetSyncState();
John Zulaufb02c1eb2020-10-06 16:33:36 -06001099 if (hazard.tag == kCurrentCommandTag) {
1100 // Hazard vs. ILT
1101 skip |= sync_state.LogError(rp_state.renderPass, string_SyncHazardVUID(hazard.hazard),
1102 "%s: Hazard %s vs. layout transition in subpass %" PRIu32 " for attachment %" PRIu32
1103 " aspect %s during load with loadOp %s.",
1104 func_name, string_SyncHazard(hazard.hazard), subpass, i, aspect, load_op_string);
1105 } else {
John Zulauf1507ee42020-05-18 11:33:09 -06001106 skip |= sync_state.LogError(rp_state.renderPass, string_SyncHazardVUID(hazard.hazard),
1107 "%s: Hazard %s in subpass %" PRIu32 " for attachment %" PRIu32
John Zulauf59e25072020-07-17 10:55:21 -06001108 " aspect %s during load with loadOp %s. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06001109 func_name, string_SyncHazard(hazard.hazard), subpass, i, aspect, load_op_string,
John Zulauf64ffe552021-02-06 10:25:07 -07001110 ex_context.FormatUsage(hazard).c_str());
John Zulauf1507ee42020-05-18 11:33:09 -06001111 }
1112 }
1113 }
1114 }
1115 return skip;
1116}
1117
John Zulaufaff20662020-06-01 14:07:58 -06001118// Store operation validation can ignore resolve (before it) and layout tranistions after it. The first is ignored
1119// because of the ordering guarantees w.r.t. sample access and that the resolve validation hasn't altered the state, because
1120// store is part of the same Next/End operation.
1121// The latter is handled in layout transistion validation directly
John Zulauf64ffe552021-02-06 10:25:07 -07001122bool AccessContext::ValidateStoreOperation(const CommandExecutionContext &ex_context, const RENDER_PASS_STATE &rp_state,
John Zulaufaff20662020-06-01 14:07:58 -06001123 const VkRect2D &render_area, uint32_t subpass,
1124 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views,
1125 const char *func_name) const {
1126 bool skip = false;
1127 const auto *attachment_ci = rp_state.createInfo.pAttachments;
1128 VkExtent3D extent = CastTo3D(render_area.extent);
1129 VkOffset3D offset = CastTo3D(render_area.offset);
1130
1131 for (uint32_t i = 0; i < rp_state.createInfo.attachmentCount; i++) {
1132 if (subpass == rp_state.attachment_last_subpass[i]) {
1133 if (attachment_views[i] == nullptr) continue;
1134 const IMAGE_VIEW_STATE &view = *attachment_views[i];
1135 const IMAGE_STATE *image = view.image_state.get();
1136 if (image == nullptr) continue;
1137 const auto &ci = attachment_ci[i];
1138
1139 // The spec states that "don't care" is an operation with VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
1140 // so we assume that an implementation is *free* to write in that case, meaning that for correctness
1141 // sake, we treat DONT_CARE as writing.
1142 const bool has_depth = FormatHasDepth(ci.format);
1143 const bool has_stencil = FormatHasStencil(ci.format);
1144 const bool is_color = !(has_depth || has_stencil);
1145 const bool store_op_stores = ci.storeOp != VK_ATTACHMENT_STORE_OP_NONE_QCOM;
1146 if (!has_stencil && !store_op_stores) continue;
1147
1148 HazardResult hazard;
1149 const char *aspect = nullptr;
1150 bool checked_stencil = false;
1151 if (is_color) {
1152 hazard = DetectHazard(*image, SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE,
John Zulauf8e3c3e92021-01-06 11:19:36 -07001153 view.normalized_subresource_range, SyncOrdering::kRaster, offset, extent);
John Zulaufaff20662020-06-01 14:07:58 -06001154 aspect = "color";
1155 } else {
1156 const bool stencil_op_stores = ci.stencilStoreOp != VK_ATTACHMENT_STORE_OP_NONE_QCOM;
1157 auto hazard_range = view.normalized_subresource_range;
1158 if (has_depth && store_op_stores) {
1159 hazard_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
1160 hazard = DetectHazard(*image, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, hazard_range,
John Zulauf8e3c3e92021-01-06 11:19:36 -07001161 SyncOrdering::kRaster, offset, extent);
John Zulaufaff20662020-06-01 14:07:58 -06001162 aspect = "depth";
1163 }
1164 if (!hazard.hazard && has_stencil && stencil_op_stores) {
1165 hazard_range.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
1166 hazard = DetectHazard(*image, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, hazard_range,
John Zulauf8e3c3e92021-01-06 11:19:36 -07001167 SyncOrdering::kRaster, offset, extent);
John Zulaufaff20662020-06-01 14:07:58 -06001168 aspect = "stencil";
1169 checked_stencil = true;
1170 }
1171 }
1172
1173 if (hazard.hazard) {
1174 const char *const op_type_string = checked_stencil ? "stencilStoreOp" : "storeOp";
1175 const char *const store_op_string = string_VkAttachmentStoreOp(checked_stencil ? ci.stencilStoreOp : ci.storeOp);
John Zulauf64ffe552021-02-06 10:25:07 -07001176 skip |= ex_context.GetSyncState().LogError(rp_state.renderPass, string_SyncHazardVUID(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07001177 "%s: Hazard %s in subpass %" PRIu32 " for attachment %" PRIu32
1178 " %s aspect during store with %s %s. Access info %s",
1179 func_name, string_SyncHazard(hazard.hazard), subpass, i, aspect,
John Zulauf64ffe552021-02-06 10:25:07 -07001180 op_type_string, store_op_string, ex_context.FormatUsage(hazard).c_str());
John Zulaufaff20662020-06-01 14:07:58 -06001181 }
1182 }
1183 }
1184 return skip;
1185}
1186
John Zulauf64ffe552021-02-06 10:25:07 -07001187bool AccessContext::ValidateResolveOperations(const CommandExecutionContext &ex_context, const RENDER_PASS_STATE &rp_state,
John Zulaufb027cdb2020-05-21 14:25:22 -06001188 const VkRect2D &render_area,
1189 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views, const char *func_name,
1190 uint32_t subpass) const {
John Zulauf64ffe552021-02-06 10:25:07 -07001191 ValidateResolveAction validate_action(rp_state.renderPass, subpass, *this, ex_context, func_name);
John Zulauf7635de32020-05-29 17:14:15 -06001192 ResolveOperation(validate_action, rp_state, render_area, attachment_views, subpass);
1193 return validate_action.GetSkip();
John Zulaufb027cdb2020-05-21 14:25:22 -06001194}
1195
John Zulauf3d84f1b2020-03-09 13:33:25 -06001196class HazardDetector {
1197 SyncStageAccessIndex usage_index_;
1198
1199 public:
John Zulauf5f13a792020-03-10 07:31:21 -06001200 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const { return pos->second.DetectHazard(usage_index_); }
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07001201 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, const ResourceUsageTag &start_tag) const {
1202 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001203 }
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001204 explicit HazardDetector(SyncStageAccessIndex usage) : usage_index_(usage) {}
John Zulauf3d84f1b2020-03-09 13:33:25 -06001205};
1206
John Zulauf69133422020-05-20 14:55:53 -06001207class HazardDetectorWithOrdering {
1208 const SyncStageAccessIndex usage_index_;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001209 const SyncOrdering ordering_rule_;
John Zulauf69133422020-05-20 14:55:53 -06001210
1211 public:
1212 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
John Zulauf8e3c3e92021-01-06 11:19:36 -07001213 return pos->second.DetectHazard(usage_index_, ordering_rule_);
John Zulauf69133422020-05-20 14:55:53 -06001214 }
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07001215 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, const ResourceUsageTag &start_tag) const {
1216 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
John Zulauf69133422020-05-20 14:55:53 -06001217 }
John Zulauf8e3c3e92021-01-06 11:19:36 -07001218 HazardDetectorWithOrdering(SyncStageAccessIndex usage, SyncOrdering ordering) : usage_index_(usage), ordering_rule_(ordering) {}
John Zulauf69133422020-05-20 14:55:53 -06001219};
1220
John Zulauf16adfc92020-04-08 10:28:33 -06001221HazardResult AccessContext::DetectHazard(const BUFFER_STATE &buffer, SyncStageAccessIndex usage_index,
John Zulauf355e49b2020-04-24 15:11:15 -06001222 const ResourceAccessRange &range) const {
John Zulauf16adfc92020-04-08 10:28:33 -06001223 if (!SimpleBinding(buffer)) return HazardResult();
John Zulauf150e5332020-12-03 08:52:52 -07001224 const auto base_address = ResourceBaseAddress(buffer);
1225 HazardDetector detector(usage_index);
1226 return DetectHazard(AccessAddressType::kLinear, detector, (range + base_address), DetectOptions::kDetectAll);
John Zulaufe5da6e52020-03-18 15:32:18 -06001227}
1228
John Zulauf69133422020-05-20 14:55:53 -06001229template <typename Detector>
1230HazardResult AccessContext::DetectHazard(Detector &detector, const IMAGE_STATE &image,
1231 const VkImageSubresourceRange &subresource_range, const VkOffset3D &offset,
1232 const VkExtent3D &extent, DetectOptions options) const {
1233 if (!SimpleBinding(image)) return HazardResult();
John Zulauf69133422020-05-20 14:55:53 -06001234 const auto base_address = ResourceBaseAddress(image);
John Zulauf150e5332020-12-03 08:52:52 -07001235 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, offset, extent,
1236 base_address);
1237 const auto address_type = ImageAddressType(image);
John Zulauf69133422020-05-20 14:55:53 -06001238 for (; range_gen->non_empty(); ++range_gen) {
John Zulaufd05c5842021-03-26 11:32:16 -06001239#ifdef SYNCVAL_DIAGNOSTICS
1240 sync_diagnostics.Detect(*range_gen);
1241#endif
John Zulauf150e5332020-12-03 08:52:52 -07001242 HazardResult hazard = DetectHazard(address_type, detector, *range_gen, options);
John Zulauf69133422020-05-20 14:55:53 -06001243 if (hazard.hazard) return hazard;
1244 }
1245 return HazardResult();
1246}
1247
John Zulauf540266b2020-04-06 18:54:53 -06001248HazardResult AccessContext::DetectHazard(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
1249 const VkImageSubresourceLayers &subresource, const VkOffset3D &offset,
1250 const VkExtent3D &extent) const {
John Zulauf5c5e88d2019-12-26 11:22:02 -07001251 VkImageSubresourceRange subresource_range = {subresource.aspectMask, subresource.mipLevel, 1, subresource.baseArrayLayer,
1252 subresource.layerCount};
John Zulauf1507ee42020-05-18 11:33:09 -06001253 return DetectHazard(image, current_usage, subresource_range, offset, extent);
1254}
1255
1256HazardResult AccessContext::DetectHazard(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
1257 const VkImageSubresourceRange &subresource_range, const VkOffset3D &offset,
1258 const VkExtent3D &extent) const {
John Zulauf69133422020-05-20 14:55:53 -06001259 HazardDetector detector(current_usage);
1260 return DetectHazard(detector, image, subresource_range, offset, extent, DetectOptions::kDetectAll);
1261}
1262
1263HazardResult AccessContext::DetectHazard(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
John Zulauf8e3c3e92021-01-06 11:19:36 -07001264 const VkImageSubresourceRange &subresource_range, SyncOrdering ordering_rule,
John Zulauf69133422020-05-20 14:55:53 -06001265 const VkOffset3D &offset, const VkExtent3D &extent) const {
John Zulauf8e3c3e92021-01-06 11:19:36 -07001266 HazardDetectorWithOrdering detector(current_usage, ordering_rule);
John Zulauf69133422020-05-20 14:55:53 -06001267 return DetectHazard(detector, image, subresource_range, offset, extent, DetectOptions::kDetectAll);
John Zulauf9cb530d2019-09-30 14:14:10 -06001268}
1269
John Zulaufb027cdb2020-05-21 14:25:22 -06001270// Some common code for looking at attachments, if there's anything wrong, we return no hazard, core validation
1271// should have reported the issue regarding an invalid attachment entry
1272HazardResult AccessContext::DetectHazard(const IMAGE_VIEW_STATE *view, SyncStageAccessIndex current_usage,
John Zulauf8e3c3e92021-01-06 11:19:36 -07001273 SyncOrdering ordering_rule, const VkOffset3D &offset, const VkExtent3D &extent,
John Zulaufb027cdb2020-05-21 14:25:22 -06001274 VkImageAspectFlags aspect_mask) const {
1275 if (view != nullptr) {
1276 const IMAGE_STATE *image = view->image_state.get();
1277 if (image != nullptr) {
1278 auto *detect_range = &view->normalized_subresource_range;
1279 VkImageSubresourceRange masked_range;
1280 if (aspect_mask) { // If present and non-zero, restrict the normalized range to aspects present in aspect_mask
1281 masked_range = view->normalized_subresource_range;
1282 masked_range.aspectMask = aspect_mask & masked_range.aspectMask;
1283 detect_range = &masked_range;
1284 }
1285
1286 // NOTE: The range encoding code is not robust to invalid ranges, so we protect it from our change
1287 if (detect_range->aspectMask) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07001288 return DetectHazard(*image, current_usage, *detect_range, ordering_rule, offset, extent);
John Zulaufb027cdb2020-05-21 14:25:22 -06001289 }
1290 }
1291 }
1292 return HazardResult();
1293}
John Zulauf43cc7462020-12-03 12:33:12 -07001294
John Zulauf3d84f1b2020-03-09 13:33:25 -06001295class BarrierHazardDetector {
1296 public:
Jeremy Gebben40a22942020-12-22 14:22:06 -07001297 BarrierHazardDetector(SyncStageAccessIndex usage_index, VkPipelineStageFlags2KHR src_exec_scope,
John Zulauf3d84f1b2020-03-09 13:33:25 -06001298 SyncStageAccessFlags src_access_scope)
1299 : usage_index_(usage_index), src_exec_scope_(src_exec_scope), src_access_scope_(src_access_scope) {}
1300
John Zulauf5f13a792020-03-10 07:31:21 -06001301 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
1302 return pos->second.DetectBarrierHazard(usage_index_, src_exec_scope_, src_access_scope_);
John Zulauf0cb5be22020-01-23 12:18:22 -07001303 }
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07001304 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, const ResourceUsageTag &start_tag) const {
John Zulauf3d84f1b2020-03-09 13:33:25 -06001305 // Async barrier hazard detection can use the same path as the usage index is not IsRead, but is IsWrite
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07001306 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001307 }
1308
1309 private:
1310 SyncStageAccessIndex usage_index_;
Jeremy Gebben40a22942020-12-22 14:22:06 -07001311 VkPipelineStageFlags2KHR src_exec_scope_;
John Zulauf3d84f1b2020-03-09 13:33:25 -06001312 SyncStageAccessFlags src_access_scope_;
1313};
1314
John Zulauf4a6105a2020-11-17 15:11:05 -07001315class EventBarrierHazardDetector {
1316 public:
Jeremy Gebben40a22942020-12-22 14:22:06 -07001317 EventBarrierHazardDetector(SyncStageAccessIndex usage_index, VkPipelineStageFlags2KHR src_exec_scope,
John Zulauf4a6105a2020-11-17 15:11:05 -07001318 SyncStageAccessFlags src_access_scope, const SyncEventState::ScopeMap &event_scope,
1319 const ResourceUsageTag &scope_tag)
1320 : usage_index_(usage_index),
1321 src_exec_scope_(src_exec_scope),
1322 src_access_scope_(src_access_scope),
1323 event_scope_(event_scope),
1324 scope_pos_(event_scope.cbegin()),
1325 scope_end_(event_scope.cend()),
1326 scope_tag_(scope_tag) {}
1327
1328 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
1329 // TODO NOTE: This is almost the slowest way to do this... need to intelligently walk this...
1330 // Need to find a more efficient sync, since we know pos->first is strictly increasing call to call
1331 // NOTE: "cached_lower_bound_impl" with upgrades could do this.
1332 if (scope_pos_ == scope_end_) return HazardResult();
1333 if (!scope_pos_->first.intersects(pos->first)) {
1334 event_scope_.lower_bound(pos->first);
1335 if ((scope_pos_ == scope_end_) || !scope_pos_->first.intersects(pos->first)) return HazardResult();
1336 }
1337
1338 // Some portion of this pos is in the event_scope, so check for a barrier hazard
1339 return pos->second.DetectBarrierHazard(usage_index_, src_exec_scope_, src_access_scope_, scope_tag_);
1340 }
1341 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, const ResourceUsageTag &start_tag) const {
1342 // Async barrier hazard detection can use the same path as the usage index is not IsRead, but is IsWrite
1343 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
1344 }
1345
1346 private:
1347 SyncStageAccessIndex usage_index_;
Jeremy Gebben40a22942020-12-22 14:22:06 -07001348 VkPipelineStageFlags2KHR src_exec_scope_;
John Zulauf4a6105a2020-11-17 15:11:05 -07001349 SyncStageAccessFlags src_access_scope_;
1350 const SyncEventState::ScopeMap &event_scope_;
1351 SyncEventState::ScopeMap::const_iterator scope_pos_;
1352 SyncEventState::ScopeMap::const_iterator scope_end_;
1353 const ResourceUsageTag &scope_tag_;
1354};
1355
Jeremy Gebben40a22942020-12-22 14:22:06 -07001356HazardResult AccessContext::DetectImageBarrierHazard(const IMAGE_STATE &image, VkPipelineStageFlags2KHR src_exec_scope,
John Zulauf4a6105a2020-11-17 15:11:05 -07001357 const SyncStageAccessFlags &src_access_scope,
1358 const VkImageSubresourceRange &subresource_range,
1359 const SyncEventState &sync_event, DetectOptions options) const {
1360 // It's not particularly DRY to get the address type in this function as well as lower down, but we have to select the
1361 // first access scope map to use, and there's no easy way to plumb it in below.
1362 const auto address_type = ImageAddressType(image);
1363 const auto &event_scope = sync_event.FirstScope(address_type);
1364
1365 EventBarrierHazardDetector detector(SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION, src_exec_scope, src_access_scope,
1366 event_scope, sync_event.first_scope_tag);
1367 VkOffset3D zero_offset = {0, 0, 0};
1368 return DetectHazard(detector, image, subresource_range, zero_offset, image.createInfo.extent, options);
1369}
1370
Jeremy Gebben40a22942020-12-22 14:22:06 -07001371HazardResult AccessContext::DetectImageBarrierHazard(const IMAGE_STATE &image, VkPipelineStageFlags2KHR src_exec_scope,
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07001372 const SyncStageAccessFlags &src_access_scope,
John Zulauf355e49b2020-04-24 15:11:15 -06001373 const VkImageSubresourceRange &subresource_range,
John Zulauf43cc7462020-12-03 12:33:12 -07001374 const DetectOptions options) const {
John Zulauf69133422020-05-20 14:55:53 -06001375 BarrierHazardDetector detector(SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION, src_exec_scope, src_access_scope);
1376 VkOffset3D zero_offset = {0, 0, 0};
1377 return DetectHazard(detector, image, subresource_range, zero_offset, image.createInfo.extent, options);
John Zulauf0cb5be22020-01-23 12:18:22 -07001378}
1379
Jeremy Gebben40a22942020-12-22 14:22:06 -07001380HazardResult AccessContext::DetectImageBarrierHazard(const IMAGE_STATE &image, VkPipelineStageFlags2KHR src_exec_scope,
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07001381 const SyncStageAccessFlags &src_stage_accesses,
John Zulauf355e49b2020-04-24 15:11:15 -06001382 const VkImageMemoryBarrier &barrier) const {
1383 auto subresource_range = NormalizeSubresourceRange(image.createInfo, barrier.subresourceRange);
1384 const auto src_access_scope = SyncStageAccess::AccessScope(src_stage_accesses, barrier.srcAccessMask);
1385 return DetectImageBarrierHazard(image, src_exec_scope, src_access_scope, subresource_range, kDetectAll);
1386}
John Zulaufe7f6a5e2021-01-16 14:31:18 -07001387HazardResult AccessContext::DetectImageBarrierHazard(const SyncImageMemoryBarrier &image_barrier) const {
John Zulaufc523bf62021-02-16 08:20:34 -07001388 return DetectImageBarrierHazard(*image_barrier.image.get(), image_barrier.barrier.src_exec_scope.exec_scope,
John Zulaufd5115702021-01-18 12:34:33 -07001389 image_barrier.barrier.src_access_scope, image_barrier.range.subresource_range, kDetectAll);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07001390}
John Zulauf355e49b2020-04-24 15:11:15 -06001391
John Zulauf9cb530d2019-09-30 14:14:10 -06001392template <typename Flags, typename Map>
1393SyncStageAccessFlags AccessScopeImpl(Flags flag_mask, const Map &map) {
1394 SyncStageAccessFlags scope = 0;
1395 for (const auto &bit_scope : map) {
1396 if (flag_mask < bit_scope.first) break;
1397
1398 if (flag_mask & bit_scope.first) {
1399 scope |= bit_scope.second;
1400 }
1401 }
1402 return scope;
1403}
1404
Jeremy Gebben40a22942020-12-22 14:22:06 -07001405SyncStageAccessFlags SyncStageAccess::AccessScopeByStage(VkPipelineStageFlags2KHR stages) {
John Zulauf9cb530d2019-09-30 14:14:10 -06001406 return AccessScopeImpl(stages, syncStageAccessMaskByStageBit);
1407}
1408
Jeremy Gebben40a22942020-12-22 14:22:06 -07001409SyncStageAccessFlags SyncStageAccess::AccessScopeByAccess(VkAccessFlags2KHR accesses) {
1410 return AccessScopeImpl(sync_utils::ExpandAccessFlags(accesses), syncStageAccessMaskByAccessBit);
John Zulauf9cb530d2019-09-30 14:14:10 -06001411}
1412
Jeremy Gebben40a22942020-12-22 14:22:06 -07001413// Getting from stage mask and access mask to stage/access masks is something we need to be good at...
1414SyncStageAccessFlags SyncStageAccess::AccessScope(VkPipelineStageFlags2KHR stages, VkAccessFlags2KHR accesses) {
John Zulauf5f13a792020-03-10 07:31:21 -06001415 // The access scope is the intersection of all stage/access types possible for the enabled stages and the enables
1416 // accesses (after doing a couple factoring of common terms the union of stage/access intersections is the intersections
1417 // of the union of all stage/access types for all the stages and the same unions for the access mask...
John Zulauf9cb530d2019-09-30 14:14:10 -06001418 return AccessScopeByStage(stages) & AccessScopeByAccess(accesses);
1419}
1420
1421template <typename Action>
John Zulauf5c5e88d2019-12-26 11:22:02 -07001422void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const ResourceAccessRange &range, const Action &action) {
John Zulauf7635de32020-05-29 17:14:15 -06001423 // TODO: Optimization for operations that do a pure overwrite (i.e. WRITE usages which rewrite the state, vs READ usages
1424 // that do incrementalupdates
John Zulauf4a6105a2020-11-17 15:11:05 -07001425 assert(accesses);
John Zulauf9cb530d2019-09-30 14:14:10 -06001426 auto pos = accesses->lower_bound(range);
1427 if (pos == accesses->end() || !pos->first.intersects(range)) {
1428 // The range is empty, fill it with a default value.
1429 pos = action.Infill(accesses, pos, range);
1430 } else if (range.begin < pos->first.begin) {
1431 // Leading empty space, infill
John Zulauf5c5e88d2019-12-26 11:22:02 -07001432 pos = action.Infill(accesses, pos, ResourceAccessRange(range.begin, pos->first.begin));
John Zulauf9cb530d2019-09-30 14:14:10 -06001433 } else if (pos->first.begin < range.begin) {
1434 // Trim the beginning if needed
1435 pos = accesses->split(pos, range.begin, sparse_container::split_op_keep_both());
1436 ++pos;
1437 }
1438
1439 const auto the_end = accesses->end();
1440 while ((pos != the_end) && pos->first.intersects(range)) {
1441 if (pos->first.end > range.end) {
1442 pos = accesses->split(pos, range.end, sparse_container::split_op_keep_both());
1443 }
1444
1445 pos = action(accesses, pos);
1446 if (pos == the_end) break;
1447
1448 auto next = pos;
1449 ++next;
1450 if ((pos->first.end < range.end) && (next != the_end) && !next->first.is_subsequent_to(pos->first)) {
1451 // Need to infill if next is disjoint
1452 VkDeviceSize limit = (next == the_end) ? range.end : std::min(range.end, next->first.begin);
John Zulauf5c5e88d2019-12-26 11:22:02 -07001453 ResourceAccessRange new_range(pos->first.end, limit);
John Zulauf9cb530d2019-09-30 14:14:10 -06001454 next = action.Infill(accesses, next, new_range);
1455 }
1456 pos = next;
1457 }
1458}
John Zulaufd5115702021-01-18 12:34:33 -07001459
1460// Give a comparable interface for range generators and ranges
1461template <typename Action>
1462inline void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const Action &action, ResourceAccessRange *range) {
1463 assert(range);
1464 UpdateMemoryAccessState(accesses, *range, action);
1465}
1466
John Zulauf4a6105a2020-11-17 15:11:05 -07001467template <typename Action, typename RangeGen>
1468void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const Action &action, RangeGen *range_gen_arg) {
1469 assert(range_gen_arg);
John Zulaufd5115702021-01-18 12:34:33 -07001470 RangeGen &range_gen = *range_gen_arg; // Non-const references must be * by style requirement but deref-ing * iterator is a pain
John Zulauf4a6105a2020-11-17 15:11:05 -07001471 for (; range_gen->non_empty(); ++range_gen) {
1472 UpdateMemoryAccessState(accesses, *range_gen, action);
1473 }
1474}
John Zulauf9cb530d2019-09-30 14:14:10 -06001475
1476struct UpdateMemoryAccessStateFunctor {
John Zulauf5c5e88d2019-12-26 11:22:02 -07001477 using Iterator = ResourceAccessRangeMap::iterator;
1478 Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const {
John Zulauf5f13a792020-03-10 07:31:21 -06001479 // this is only called on gaps, and never returns a gap.
1480 ResourceAccessState default_state;
John Zulauf16adfc92020-04-08 10:28:33 -06001481 context.ResolvePreviousAccess(type, range, accesses, &default_state);
John Zulauf5f13a792020-03-10 07:31:21 -06001482 return accesses->lower_bound(range);
John Zulauf9cb530d2019-09-30 14:14:10 -06001483 }
John Zulauf5f13a792020-03-10 07:31:21 -06001484
John Zulauf5c5e88d2019-12-26 11:22:02 -07001485 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -06001486 auto &access_state = pos->second;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001487 access_state.Update(usage, ordering_rule, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06001488 return pos;
1489 }
1490
John Zulauf43cc7462020-12-03 12:33:12 -07001491 UpdateMemoryAccessStateFunctor(AccessAddressType type_, const AccessContext &context_, SyncStageAccessIndex usage_,
John Zulauf8e3c3e92021-01-06 11:19:36 -07001492 SyncOrdering ordering_rule_, const ResourceUsageTag &tag_)
1493 : type(type_), context(context_), usage(usage_), ordering_rule(ordering_rule_), tag(tag_) {}
John Zulauf43cc7462020-12-03 12:33:12 -07001494 const AccessAddressType type;
John Zulauf540266b2020-04-06 18:54:53 -06001495 const AccessContext &context;
John Zulauf16adfc92020-04-08 10:28:33 -06001496 const SyncStageAccessIndex usage;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001497 const SyncOrdering ordering_rule;
John Zulauf9cb530d2019-09-30 14:14:10 -06001498 const ResourceUsageTag &tag;
1499};
1500
John Zulauf4a6105a2020-11-17 15:11:05 -07001501// The barrier operation for pipeline and subpass dependencies`
John Zulauf1e331ec2020-12-04 18:29:38 -07001502struct PipelineBarrierOp {
1503 SyncBarrier barrier;
1504 bool layout_transition;
1505 PipelineBarrierOp(const SyncBarrier &barrier_, bool layout_transition_)
1506 : barrier(barrier_), layout_transition(layout_transition_) {}
1507 PipelineBarrierOp() = default;
John Zulaufd5115702021-01-18 12:34:33 -07001508 PipelineBarrierOp(const PipelineBarrierOp &) = default;
John Zulauf1e331ec2020-12-04 18:29:38 -07001509 void operator()(ResourceAccessState *access_state) const { access_state->ApplyBarrier(barrier, layout_transition); }
1510};
John Zulauf4a6105a2020-11-17 15:11:05 -07001511// The barrier operation for wait events
1512struct WaitEventBarrierOp {
1513 const ResourceUsageTag *scope_tag;
1514 SyncBarrier barrier;
1515 bool layout_transition;
1516 WaitEventBarrierOp(const ResourceUsageTag &scope_tag_, const SyncBarrier &barrier_, bool layout_transition_)
1517 : scope_tag(&scope_tag_), barrier(barrier_), layout_transition(layout_transition_) {}
1518 WaitEventBarrierOp() = default;
1519 void operator()(ResourceAccessState *access_state) const {
1520 assert(scope_tag); // Not valid to have a non-scope op executed, default construct included for std::vector support
1521 access_state->ApplyBarrier(*scope_tag, barrier, layout_transition);
1522 }
1523};
John Zulauf1e331ec2020-12-04 18:29:38 -07001524
John Zulauf4a6105a2020-11-17 15:11:05 -07001525// This functor applies a collection of barriers, updating the "pending state" in each touched memory range, and optionally
1526// resolves the pending state. Suitable for processing Global memory barriers, or Subpass Barriers when the "final" barrier
1527// of a collection is known/present.
John Zulauf1e331ec2020-12-04 18:29:38 -07001528template <typename BarrierOp>
John Zulauf89311b42020-09-29 16:28:47 -06001529class ApplyBarrierOpsFunctor {
1530 public:
John Zulauf5c5e88d2019-12-26 11:22:02 -07001531 using Iterator = ResourceAccessRangeMap::iterator;
1532 inline Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const { return pos; }
John Zulauf9cb530d2019-09-30 14:14:10 -06001533
John Zulauf5c5e88d2019-12-26 11:22:02 -07001534 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -06001535 auto &access_state = pos->second;
John Zulauf1e331ec2020-12-04 18:29:38 -07001536 for (const auto &op : barrier_ops_) {
1537 op(&access_state);
John Zulauf89311b42020-09-29 16:28:47 -06001538 }
John Zulauf9cb530d2019-09-30 14:14:10 -06001539
John Zulauf89311b42020-09-29 16:28:47 -06001540 if (resolve_) {
1541 // If this is the last (or only) batch, we can do the pending resolve as the last step in this operation to avoid
1542 // another walk
1543 access_state.ApplyPendingBarriers(tag_);
John Zulauf9cb530d2019-09-30 14:14:10 -06001544 }
1545 return pos;
1546 }
1547
John Zulauf89311b42020-09-29 16:28:47 -06001548 // A valid tag is required IFF layout_transition is true, as transitions are write ops
John Zulaufd5115702021-01-18 12:34:33 -07001549 ApplyBarrierOpsFunctor(bool resolve, size_t size_hint, const ResourceUsageTag &tag)
1550 : resolve_(resolve), barrier_ops_(), tag_(tag) {
1551 barrier_ops_.reserve(size_hint);
1552 }
1553 void EmplaceBack(const BarrierOp &op) { barrier_ops_.emplace_back(op); }
John Zulauf89311b42020-09-29 16:28:47 -06001554
1555 private:
1556 bool resolve_;
John Zulaufd5115702021-01-18 12:34:33 -07001557 std::vector<BarrierOp> barrier_ops_;
John Zulauf1e331ec2020-12-04 18:29:38 -07001558 const ResourceUsageTag &tag_;
1559};
1560
John Zulauf4a6105a2020-11-17 15:11:05 -07001561// This functor applies a single barrier, updating the "pending state" in each touched memory range, but does not
1562// resolve the pendinging state. Suitable for processing Image and Buffer barriers from PipelineBarriers or Events
1563template <typename BarrierOp>
1564class ApplyBarrierFunctor {
1565 public:
1566 using Iterator = ResourceAccessRangeMap::iterator;
1567 inline Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const { return pos; }
1568
1569 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
1570 auto &access_state = pos->second;
1571 barrier_op_(&access_state);
1572 return pos;
1573 }
1574
1575 ApplyBarrierFunctor(const BarrierOp &barrier_op) : barrier_op_(barrier_op) {}
1576
1577 private:
John Zulaufd5115702021-01-18 12:34:33 -07001578 BarrierOp barrier_op_;
John Zulauf4a6105a2020-11-17 15:11:05 -07001579};
1580
John Zulauf1e331ec2020-12-04 18:29:38 -07001581// This functor resolves the pendinging state.
1582class ResolvePendingBarrierFunctor {
1583 public:
1584 using Iterator = ResourceAccessRangeMap::iterator;
1585 inline Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const { return pos; }
1586
1587 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
1588 auto &access_state = pos->second;
1589 access_state.ApplyPendingBarriers(tag_);
1590 return pos;
1591 }
1592
1593 ResolvePendingBarrierFunctor(const ResourceUsageTag &tag) : tag_(tag) {}
1594
1595 private:
John Zulauf89311b42020-09-29 16:28:47 -06001596 const ResourceUsageTag &tag_;
John Zulauf9cb530d2019-09-30 14:14:10 -06001597};
1598
John Zulauf8e3c3e92021-01-06 11:19:36 -07001599void AccessContext::UpdateAccessState(AccessAddressType type, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
1600 const ResourceAccessRange &range, const ResourceUsageTag &tag) {
1601 UpdateMemoryAccessStateFunctor action(type, *this, current_usage, ordering_rule, tag);
John Zulauf16adfc92020-04-08 10:28:33 -06001602 UpdateMemoryAccessState(&GetAccessStateMap(type), range, action);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001603}
1604
John Zulauf8e3c3e92021-01-06 11:19:36 -07001605void AccessContext::UpdateAccessState(const BUFFER_STATE &buffer, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf355e49b2020-04-24 15:11:15 -06001606 const ResourceAccessRange &range, const ResourceUsageTag &tag) {
John Zulauf16adfc92020-04-08 10:28:33 -06001607 if (!SimpleBinding(buffer)) return;
1608 const auto base_address = ResourceBaseAddress(buffer);
John Zulauf8e3c3e92021-01-06 11:19:36 -07001609 UpdateAccessState(AccessAddressType::kLinear, current_usage, ordering_rule, range + base_address, tag);
John Zulauf16adfc92020-04-08 10:28:33 -06001610}
John Zulauf355e49b2020-04-24 15:11:15 -06001611
John Zulauf8e3c3e92021-01-06 11:19:36 -07001612void AccessContext::UpdateAccessState(const IMAGE_STATE &image, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf355e49b2020-04-24 15:11:15 -06001613 const VkImageSubresourceRange &subresource_range, const VkOffset3D &offset,
John Zulauf540266b2020-04-06 18:54:53 -06001614 const VkExtent3D &extent, const ResourceUsageTag &tag) {
John Zulauf16adfc92020-04-08 10:28:33 -06001615 if (!SimpleBinding(image)) return;
John Zulauf16adfc92020-04-08 10:28:33 -06001616 const auto base_address = ResourceBaseAddress(image);
John Zulauf150e5332020-12-03 08:52:52 -07001617 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, offset, extent,
1618 base_address);
1619 const auto address_type = ImageAddressType(image);
John Zulauf8e3c3e92021-01-06 11:19:36 -07001620 UpdateMemoryAccessStateFunctor action(address_type, *this, current_usage, ordering_rule, tag);
John Zulauf5f13a792020-03-10 07:31:21 -06001621 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf150e5332020-12-03 08:52:52 -07001622 UpdateMemoryAccessState(&GetAccessStateMap(address_type), *range_gen, action);
John Zulauf5f13a792020-03-10 07:31:21 -06001623 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06001624}
John Zulauf8e3c3e92021-01-06 11:19:36 -07001625void AccessContext::UpdateAccessState(const IMAGE_VIEW_STATE *view, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
1626 const VkOffset3D &offset, const VkExtent3D &extent, VkImageAspectFlags aspect_mask,
1627 const ResourceUsageTag &tag) {
John Zulauf7635de32020-05-29 17:14:15 -06001628 if (view != nullptr) {
1629 const IMAGE_STATE *image = view->image_state.get();
1630 if (image != nullptr) {
1631 auto *update_range = &view->normalized_subresource_range;
1632 VkImageSubresourceRange masked_range;
1633 if (aspect_mask) { // If present and non-zero, restrict the normalized range to aspects present in aspect_mask
1634 masked_range = view->normalized_subresource_range;
1635 masked_range.aspectMask = aspect_mask & masked_range.aspectMask;
1636 update_range = &masked_range;
1637 }
John Zulauf8e3c3e92021-01-06 11:19:36 -07001638 UpdateAccessState(*image, current_usage, ordering_rule, *update_range, offset, extent, tag);
John Zulauf7635de32020-05-29 17:14:15 -06001639 }
1640 }
1641}
John Zulauf3d84f1b2020-03-09 13:33:25 -06001642
John Zulauf8e3c3e92021-01-06 11:19:36 -07001643void AccessContext::UpdateAccessState(const IMAGE_STATE &image, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf355e49b2020-04-24 15:11:15 -06001644 const VkImageSubresourceLayers &subresource, const VkOffset3D &offset,
1645 const VkExtent3D &extent, const ResourceUsageTag &tag) {
John Zulauf355e49b2020-04-24 15:11:15 -06001646 VkImageSubresourceRange subresource_range = {subresource.aspectMask, subresource.mipLevel, 1, subresource.baseArrayLayer,
1647 subresource.layerCount};
John Zulauf8e3c3e92021-01-06 11:19:36 -07001648 UpdateAccessState(image, current_usage, ordering_rule, subresource_range, offset, extent, tag);
John Zulauf355e49b2020-04-24 15:11:15 -06001649}
1650
John Zulauf540266b2020-04-06 18:54:53 -06001651template <typename Action>
John Zulauf89311b42020-09-29 16:28:47 -06001652void AccessContext::UpdateResourceAccess(const BUFFER_STATE &buffer, const ResourceAccessRange &range, const Action action) {
John Zulauf16adfc92020-04-08 10:28:33 -06001653 if (!SimpleBinding(buffer)) return;
1654 const auto base_address = ResourceBaseAddress(buffer);
John Zulauf43cc7462020-12-03 12:33:12 -07001655 UpdateMemoryAccessState(&GetAccessStateMap(AccessAddressType::kLinear), (range + base_address), action);
John Zulauf540266b2020-04-06 18:54:53 -06001656}
1657
1658template <typename Action>
John Zulauf89311b42020-09-29 16:28:47 -06001659void AccessContext::UpdateResourceAccess(const IMAGE_STATE &image, const VkImageSubresourceRange &subresource_range,
1660 const Action action) {
John Zulauf16adfc92020-04-08 10:28:33 -06001661 if (!SimpleBinding(image)) return;
1662 const auto address_type = ImageAddressType(image);
1663 auto *accesses = &GetAccessStateMap(address_type);
John Zulauf540266b2020-04-06 18:54:53 -06001664
John Zulauf16adfc92020-04-08 10:28:33 -06001665 const auto base_address = ResourceBaseAddress(image);
John Zulauf150e5332020-12-03 08:52:52 -07001666 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, {0, 0, 0},
1667 image.createInfo.extent, base_address);
1668
John Zulauf540266b2020-04-06 18:54:53 -06001669 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf150e5332020-12-03 08:52:52 -07001670 UpdateMemoryAccessState(accesses, *range_gen, action);
John Zulauf540266b2020-04-06 18:54:53 -06001671 }
1672}
1673
John Zulauf7635de32020-05-29 17:14:15 -06001674void AccessContext::UpdateAttachmentResolveAccess(const RENDER_PASS_STATE &rp_state, const VkRect2D &render_area,
1675 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views, uint32_t subpass,
1676 const ResourceUsageTag &tag) {
1677 UpdateStateResolveAction update(*this, tag);
1678 ResolveOperation(update, rp_state, render_area, attachment_views, subpass);
1679}
1680
John Zulaufaff20662020-06-01 14:07:58 -06001681void AccessContext::UpdateAttachmentStoreAccess(const RENDER_PASS_STATE &rp_state, const VkRect2D &render_area,
1682 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views, uint32_t subpass,
1683 const ResourceUsageTag &tag) {
1684 const auto *attachment_ci = rp_state.createInfo.pAttachments;
1685 VkExtent3D extent = CastTo3D(render_area.extent);
1686 VkOffset3D offset = CastTo3D(render_area.offset);
1687
1688 for (uint32_t i = 0; i < rp_state.createInfo.attachmentCount; i++) {
1689 if (rp_state.attachment_last_subpass[i] == subpass) {
1690 if (attachment_views[i] == nullptr) continue; // UNUSED
1691 const auto &view = *attachment_views[i];
1692 const IMAGE_STATE *image = view.image_state.get();
1693 if (image == nullptr) continue;
1694
1695 const auto &ci = attachment_ci[i];
1696 const bool has_depth = FormatHasDepth(ci.format);
1697 const bool has_stencil = FormatHasStencil(ci.format);
1698 const bool is_color = !(has_depth || has_stencil);
1699 const bool store_op_stores = ci.storeOp != VK_ATTACHMENT_STORE_OP_NONE_QCOM;
1700
1701 if (is_color && store_op_stores) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07001702 UpdateAccessState(*image, SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kRaster,
1703 view.normalized_subresource_range, offset, extent, tag);
John Zulaufaff20662020-06-01 14:07:58 -06001704 } else {
1705 auto update_range = view.normalized_subresource_range;
1706 if (has_depth && store_op_stores) {
1707 update_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001708 UpdateAccessState(*image, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, SyncOrdering::kRaster,
1709 update_range, offset, extent, tag);
John Zulaufaff20662020-06-01 14:07:58 -06001710 }
1711 const bool stencil_op_stores = ci.stencilStoreOp != VK_ATTACHMENT_STORE_OP_NONE_QCOM;
1712 if (has_stencil && stencil_op_stores) {
1713 update_range.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001714 UpdateAccessState(*image, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, SyncOrdering::kRaster,
1715 update_range, offset, extent, tag);
John Zulaufaff20662020-06-01 14:07:58 -06001716 }
1717 }
1718 }
1719 }
1720}
1721
John Zulauf540266b2020-04-06 18:54:53 -06001722template <typename Action>
John Zulaufd5115702021-01-18 12:34:33 -07001723void AccessContext::ApplyToContext(const Action &barrier_action) {
John Zulauf540266b2020-04-06 18:54:53 -06001724 // Note: Barriers do *not* cross context boundaries, applying to accessess within.... (at least for renderpass subpasses)
John Zulauf16adfc92020-04-08 10:28:33 -06001725 for (const auto address_type : kAddressTypes) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001726 UpdateMemoryAccessState(&GetAccessStateMap(address_type), kFullRange, barrier_action);
John Zulauf540266b2020-04-06 18:54:53 -06001727 }
1728}
1729
1730void AccessContext::ResolveChildContexts(const std::vector<AccessContext> &contexts) {
John Zulauf540266b2020-04-06 18:54:53 -06001731 for (uint32_t subpass_index = 0; subpass_index < contexts.size(); subpass_index++) {
1732 auto &context = contexts[subpass_index];
John Zulaufb02c1eb2020-10-06 16:33:36 -06001733 ApplyTrackbackBarriersAction barrier_action(context.GetDstExternalTrackBack().barriers);
John Zulauf16adfc92020-04-08 10:28:33 -06001734 for (const auto address_type : kAddressTypes) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001735 context.ResolveAccessRange(address_type, kFullRange, barrier_action, &GetAccessStateMap(address_type), nullptr, false);
John Zulauf540266b2020-04-06 18:54:53 -06001736 }
1737 }
1738}
1739
John Zulauf355e49b2020-04-24 15:11:15 -06001740// Suitable only for *subpass* access contexts
John Zulauf7635de32020-05-29 17:14:15 -06001741HazardResult AccessContext::DetectSubpassTransitionHazard(const TrackBack &track_back, const IMAGE_VIEW_STATE *attach_view) const {
John Zulauf355e49b2020-04-24 15:11:15 -06001742 if (!attach_view) return HazardResult();
1743 const auto image_state = attach_view->image_state.get();
1744 if (!image_state) return HazardResult();
1745
John Zulauf355e49b2020-04-24 15:11:15 -06001746 // We should never ask for a transition from a context we don't have
John Zulauf7635de32020-05-29 17:14:15 -06001747 assert(track_back.context);
John Zulauf355e49b2020-04-24 15:11:15 -06001748
1749 // Do the detection against the specific prior context independent of other contexts. (Synchronous only)
John Zulaufa0a98292020-09-18 09:30:10 -06001750 // Hazard detection for the transition can be against the merged of the barriers (it only uses src_...)
1751 const auto merged_barrier = MergeBarriers(track_back.barriers);
John Zulaufc523bf62021-02-16 08:20:34 -07001752 HazardResult hazard = track_back.context->DetectImageBarrierHazard(*image_state, merged_barrier.src_exec_scope.exec_scope,
1753 merged_barrier.src_access_scope,
1754 attach_view->normalized_subresource_range, kDetectPrevious);
John Zulauf355e49b2020-04-24 15:11:15 -06001755 if (!hazard.hazard) {
1756 // The Async hazard check is against the current context's async set.
John Zulaufc523bf62021-02-16 08:20:34 -07001757 hazard = DetectImageBarrierHazard(*image_state, merged_barrier.src_exec_scope.exec_scope, merged_barrier.src_access_scope,
John Zulauf355e49b2020-04-24 15:11:15 -06001758 attach_view->normalized_subresource_range, kDetectAsync);
1759 }
John Zulaufa0a98292020-09-18 09:30:10 -06001760
John Zulauf355e49b2020-04-24 15:11:15 -06001761 return hazard;
1762}
1763
John Zulaufb02c1eb2020-10-06 16:33:36 -06001764void AccessContext::RecordLayoutTransitions(const RENDER_PASS_STATE &rp_state, uint32_t subpass,
1765 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views,
1766 const ResourceUsageTag &tag) {
1767 const auto &transitions = rp_state.subpass_transitions[subpass];
John Zulauf646cc292020-10-23 09:16:45 -06001768 const ResourceAccessState empty_infill;
John Zulaufb02c1eb2020-10-06 16:33:36 -06001769 for (const auto &transition : transitions) {
1770 const auto prev_pass = transition.prev_pass;
1771 const auto attachment_view = attachment_views[transition.attachment];
1772 if (!attachment_view) continue;
1773 const auto *image = attachment_view->image_state.get();
1774 if (!image) continue;
1775 if (!SimpleBinding(*image)) continue;
1776
1777 const auto *trackback = GetTrackBackFromSubpass(prev_pass);
1778 assert(trackback);
1779
1780 // Import the attachments into the current context
1781 const auto *prev_context = trackback->context;
1782 assert(prev_context);
1783 const auto address_type = ImageAddressType(*image);
1784 auto &target_map = GetAccessStateMap(address_type);
1785 ApplySubpassTransitionBarriersAction barrier_action(trackback->barriers);
1786 prev_context->ResolveAccessRange(*image, attachment_view->normalized_subresource_range, barrier_action, address_type,
John Zulauf646cc292020-10-23 09:16:45 -06001787 &target_map, &empty_infill);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001788 }
1789
John Zulauf86356ca2020-10-19 11:46:41 -06001790 // If there were no transitions skip this global map walk
1791 if (transitions.size()) {
John Zulauf1e331ec2020-12-04 18:29:38 -07001792 ResolvePendingBarrierFunctor apply_pending_action(tag);
John Zulaufd5115702021-01-18 12:34:33 -07001793 ApplyToContext(apply_pending_action);
John Zulauf86356ca2020-10-19 11:46:41 -06001794 }
John Zulaufb02c1eb2020-10-06 16:33:36 -06001795}
Jeremy Gebben9893daf2021-01-04 10:40:50 -07001796
Jeremy Gebben9893daf2021-01-04 10:40:50 -07001797void CommandBufferAccessContext::ApplyGlobalBarriersToEvents(const SyncExecScope &src, const SyncExecScope &dst) {
1798 const bool all_commands_bit = 0 != (src.mask_param & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
John Zulauf669dfd52021-01-27 17:15:28 -07001799
1800 auto *events_context = GetCurrentEventsContext();
1801 assert(events_context);
1802 for (auto &event_pair : *events_context) {
John Zulauf4a6105a2020-11-17 15:11:05 -07001803 assert(event_pair.second); // Shouldn't be storing empty
1804 auto &sync_event = *event_pair.second;
1805 // Events don't happen at a stage, so we need to check and store the unexpanded ALL_COMMANDS if set for inter-event-calls
Jeremy Gebben9893daf2021-01-04 10:40:50 -07001806 if ((sync_event.barriers & src.exec_scope) || all_commands_bit) {
1807 sync_event.barriers |= dst.exec_scope;
1808 sync_event.barriers |= dst.mask_param & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
John Zulauf4a6105a2020-11-17 15:11:05 -07001809 }
1810 }
1811}
1812
John Zulauf355e49b2020-04-24 15:11:15 -06001813
locke-lunarg61870c22020-06-09 14:51:50 -06001814bool CommandBufferAccessContext::ValidateDispatchDrawDescriptorSet(VkPipelineBindPoint pipelineBindPoint,
1815 const char *func_name) const {
1816 bool skip = false;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001817 const PIPELINE_STATE *pipe = nullptr;
locke-lunarg61870c22020-06-09 14:51:50 -06001818 const std::vector<LAST_BOUND_STATE::PER_SET> *per_sets = nullptr;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001819 GetCurrentPipelineAndDesriptorSetsFromCommandBuffer(*cb_state_.get(), pipelineBindPoint, &pipe, &per_sets);
1820 if (!pipe || !per_sets) {
locke-lunarg61870c22020-06-09 14:51:50 -06001821 return skip;
1822 }
1823
1824 using DescriptorClass = cvdescriptorset::DescriptorClass;
1825 using BufferDescriptor = cvdescriptorset::BufferDescriptor;
1826 using ImageDescriptor = cvdescriptorset::ImageDescriptor;
1827 using ImageSamplerDescriptor = cvdescriptorset::ImageSamplerDescriptor;
1828 using TexelDescriptor = cvdescriptorset::TexelDescriptor;
1829
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001830 for (const auto &stage_state : pipe->stage_state) {
1831 if (stage_state.stage_flag == VK_SHADER_STAGE_FRAGMENT_BIT && pipe->graphicsPipelineCI.pRasterizationState &&
1832 pipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) {
locke-lunarge9f1cdf2020-06-12 12:28:57 -06001833 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001834 }
locke-lunarg61870c22020-06-09 14:51:50 -06001835 for (const auto &set_binding : stage_state.descriptor_uses) {
1836 cvdescriptorset::DescriptorSet *descriptor_set = (*per_sets)[set_binding.first.first].bound_descriptor_set;
1837 cvdescriptorset::DescriptorSetLayout::ConstBindingIterator binding_it(descriptor_set->GetLayout().get(),
1838 set_binding.first.second);
1839 const auto descriptor_type = binding_it.GetType();
1840 cvdescriptorset::IndexRange index_range = binding_it.GetGlobalIndexRange();
1841 auto array_idx = 0;
1842
1843 if (binding_it.IsVariableDescriptorCount()) {
1844 index_range.end = index_range.start + descriptor_set->GetVariableDescriptorCount();
1845 }
1846 SyncStageAccessIndex sync_index =
1847 GetSyncStageAccessIndexsByDescriptorSet(descriptor_type, set_binding.second, stage_state.stage_flag);
1848
1849 for (uint32_t i = index_range.start; i < index_range.end; ++i, ++array_idx) {
1850 uint32_t index = i - index_range.start;
1851 const auto *descriptor = descriptor_set->GetDescriptorFromGlobalIndex(i);
1852 switch (descriptor->GetClass()) {
1853 case DescriptorClass::ImageSampler:
1854 case DescriptorClass::Image: {
1855 const IMAGE_VIEW_STATE *img_view_state = nullptr;
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001856 VkImageLayout image_layout;
locke-lunarg61870c22020-06-09 14:51:50 -06001857 if (descriptor->GetClass() == DescriptorClass::ImageSampler) {
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001858 const auto image_sampler_descriptor = static_cast<const ImageSamplerDescriptor *>(descriptor);
1859 img_view_state = image_sampler_descriptor->GetImageViewState();
1860 image_layout = image_sampler_descriptor->GetImageLayout();
locke-lunarg61870c22020-06-09 14:51:50 -06001861 } else {
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001862 const auto image_descriptor = static_cast<const ImageDescriptor *>(descriptor);
1863 img_view_state = image_descriptor->GetImageViewState();
1864 image_layout = image_descriptor->GetImageLayout();
locke-lunarg61870c22020-06-09 14:51:50 -06001865 }
1866 if (!img_view_state) continue;
1867 const IMAGE_STATE *img_state = img_view_state->image_state.get();
1868 VkExtent3D extent = {};
1869 VkOffset3D offset = {};
1870 if (sync_index == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ) {
1871 extent = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.extent);
1872 offset = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.offset);
1873 } else {
1874 extent = img_state->createInfo.extent;
1875 }
John Zulauf361fb532020-07-22 10:45:39 -06001876 HazardResult hazard;
1877 const auto &subresource_range = img_view_state->normalized_subresource_range;
1878 if (descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT) {
1879 // Input attachments are subject to raster ordering rules
1880 hazard = current_context_->DetectHazard(*img_state, sync_index, subresource_range,
John Zulauf8e3c3e92021-01-06 11:19:36 -07001881 SyncOrdering::kRaster, offset, extent);
John Zulauf361fb532020-07-22 10:45:39 -06001882 } else {
1883 hazard = current_context_->DetectHazard(*img_state, sync_index, subresource_range, offset, extent);
1884 }
John Zulauf33fc1d52020-07-17 11:01:10 -06001885 if (hazard.hazard && !sync_state_->SupressedBoundDescriptorWAW(hazard)) {
John Zulauf1dae9192020-06-16 15:46:44 -06001886 skip |= sync_state_->LogError(
1887 img_view_state->image_view, string_SyncHazardVUID(hazard.hazard),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001888 "%s: Hazard %s for %s, in %s, and %s, %s, type: %s, imageLayout: %s, binding #%" PRIu32
1889 ", index %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06001890 func_name, string_SyncHazard(hazard.hazard),
1891 sync_state_->report_data->FormatHandle(img_view_state->image_view).c_str(),
1892 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer).c_str(),
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001893 sync_state_->report_data->FormatHandle(pipe->pipeline).c_str(),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001894 sync_state_->report_data->FormatHandle(descriptor_set->GetSet()).c_str(),
1895 string_VkDescriptorType(descriptor_type), string_VkImageLayout(image_layout),
John Zulauffaea0ee2021-01-14 14:01:32 -07001896 set_binding.first.second, index, FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06001897 }
1898 break;
1899 }
1900 case DescriptorClass::TexelBuffer: {
1901 auto buf_view_state = static_cast<const TexelDescriptor *>(descriptor)->GetBufferViewState();
1902 if (!buf_view_state) continue;
1903 const BUFFER_STATE *buf_state = buf_view_state->buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06001904 const ResourceAccessRange range = MakeRange(*buf_view_state);
locke-lunarg61870c22020-06-09 14:51:50 -06001905 auto hazard = current_context_->DetectHazard(*buf_state, sync_index, range);
John Zulauf33fc1d52020-07-17 11:01:10 -06001906 if (hazard.hazard && !sync_state_->SupressedBoundDescriptorWAW(hazard)) {
locke-lunarg88dbb542020-06-23 22:05:42 -06001907 skip |= sync_state_->LogError(
1908 buf_view_state->buffer_view, string_SyncHazardVUID(hazard.hazard),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001909 "%s: Hazard %s for %s in %s, %s, and %s, type: %s, binding #%d index %d. Access info %s.",
1910 func_name, string_SyncHazard(hazard.hazard),
locke-lunarg88dbb542020-06-23 22:05:42 -06001911 sync_state_->report_data->FormatHandle(buf_view_state->buffer_view).c_str(),
1912 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer).c_str(),
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001913 sync_state_->report_data->FormatHandle(pipe->pipeline).c_str(),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001914 sync_state_->report_data->FormatHandle(descriptor_set->GetSet()).c_str(),
1915 string_VkDescriptorType(descriptor_type), set_binding.first.second, index,
John Zulauffaea0ee2021-01-14 14:01:32 -07001916 FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06001917 }
1918 break;
1919 }
1920 case DescriptorClass::GeneralBuffer: {
1921 const auto *buffer_descriptor = static_cast<const BufferDescriptor *>(descriptor);
1922 auto buf_state = buffer_descriptor->GetBufferState();
1923 if (!buf_state) continue;
John Zulauf3e86bf02020-09-12 10:47:57 -06001924 const ResourceAccessRange range =
1925 MakeRange(*buf_state, buffer_descriptor->GetOffset(), buffer_descriptor->GetRange());
locke-lunarg61870c22020-06-09 14:51:50 -06001926 auto hazard = current_context_->DetectHazard(*buf_state, sync_index, range);
John Zulauf3ac701a2020-09-07 14:34:41 -06001927 if (hazard.hazard && !sync_state_->SupressedBoundDescriptorWAW(hazard)) {
locke-lunarg88dbb542020-06-23 22:05:42 -06001928 skip |= sync_state_->LogError(
1929 buf_state->buffer, string_SyncHazardVUID(hazard.hazard),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001930 "%s: Hazard %s for %s in %s, %s, and %s, type: %s, binding #%d index %d. Access info %s.",
1931 func_name, string_SyncHazard(hazard.hazard),
1932 sync_state_->report_data->FormatHandle(buf_state->buffer).c_str(),
locke-lunarg88dbb542020-06-23 22:05:42 -06001933 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer).c_str(),
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001934 sync_state_->report_data->FormatHandle(pipe->pipeline).c_str(),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001935 sync_state_->report_data->FormatHandle(descriptor_set->GetSet()).c_str(),
1936 string_VkDescriptorType(descriptor_type), set_binding.first.second, index,
John Zulauffaea0ee2021-01-14 14:01:32 -07001937 FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06001938 }
1939 break;
1940 }
1941 // TODO: INLINE_UNIFORM_BLOCK_EXT, ACCELERATION_STRUCTURE_KHR
1942 default:
1943 break;
1944 }
1945 }
1946 }
1947 }
1948 return skip;
1949}
1950
1951void CommandBufferAccessContext::RecordDispatchDrawDescriptorSet(VkPipelineBindPoint pipelineBindPoint,
1952 const ResourceUsageTag &tag) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001953 const PIPELINE_STATE *pipe = nullptr;
locke-lunarg61870c22020-06-09 14:51:50 -06001954 const std::vector<LAST_BOUND_STATE::PER_SET> *per_sets = nullptr;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001955 GetCurrentPipelineAndDesriptorSetsFromCommandBuffer(*cb_state_.get(), pipelineBindPoint, &pipe, &per_sets);
1956 if (!pipe || !per_sets) {
locke-lunarg61870c22020-06-09 14:51:50 -06001957 return;
1958 }
1959
1960 using DescriptorClass = cvdescriptorset::DescriptorClass;
1961 using BufferDescriptor = cvdescriptorset::BufferDescriptor;
1962 using ImageDescriptor = cvdescriptorset::ImageDescriptor;
1963 using ImageSamplerDescriptor = cvdescriptorset::ImageSamplerDescriptor;
1964 using TexelDescriptor = cvdescriptorset::TexelDescriptor;
1965
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001966 for (const auto &stage_state : pipe->stage_state) {
1967 if (stage_state.stage_flag == VK_SHADER_STAGE_FRAGMENT_BIT && pipe->graphicsPipelineCI.pRasterizationState &&
1968 pipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) {
locke-lunarge9f1cdf2020-06-12 12:28:57 -06001969 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001970 }
locke-lunarg61870c22020-06-09 14:51:50 -06001971 for (const auto &set_binding : stage_state.descriptor_uses) {
1972 cvdescriptorset::DescriptorSet *descriptor_set = (*per_sets)[set_binding.first.first].bound_descriptor_set;
1973 cvdescriptorset::DescriptorSetLayout::ConstBindingIterator binding_it(descriptor_set->GetLayout().get(),
1974 set_binding.first.second);
1975 const auto descriptor_type = binding_it.GetType();
1976 cvdescriptorset::IndexRange index_range = binding_it.GetGlobalIndexRange();
1977 auto array_idx = 0;
1978
1979 if (binding_it.IsVariableDescriptorCount()) {
1980 index_range.end = index_range.start + descriptor_set->GetVariableDescriptorCount();
1981 }
1982 SyncStageAccessIndex sync_index =
1983 GetSyncStageAccessIndexsByDescriptorSet(descriptor_type, set_binding.second, stage_state.stage_flag);
1984
1985 for (uint32_t i = index_range.start; i < index_range.end; ++i, ++array_idx) {
1986 const auto *descriptor = descriptor_set->GetDescriptorFromGlobalIndex(i);
1987 switch (descriptor->GetClass()) {
1988 case DescriptorClass::ImageSampler:
1989 case DescriptorClass::Image: {
1990 const IMAGE_VIEW_STATE *img_view_state = nullptr;
1991 if (descriptor->GetClass() == DescriptorClass::ImageSampler) {
1992 img_view_state = static_cast<const ImageSamplerDescriptor *>(descriptor)->GetImageViewState();
1993 } else {
1994 img_view_state = static_cast<const ImageDescriptor *>(descriptor)->GetImageViewState();
1995 }
1996 if (!img_view_state) continue;
1997 const IMAGE_STATE *img_state = img_view_state->image_state.get();
1998 VkExtent3D extent = {};
1999 VkOffset3D offset = {};
2000 if (sync_index == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ) {
2001 extent = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.extent);
2002 offset = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.offset);
2003 } else {
2004 extent = img_state->createInfo.extent;
2005 }
John Zulauf8e3c3e92021-01-06 11:19:36 -07002006 SyncOrdering ordering_rule = (descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT)
2007 ? SyncOrdering::kRaster
2008 : SyncOrdering::kNonAttachment;
2009 current_context_->UpdateAccessState(*img_state, sync_index, ordering_rule,
2010 img_view_state->normalized_subresource_range, offset, extent, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002011 break;
2012 }
2013 case DescriptorClass::TexelBuffer: {
2014 auto buf_view_state = static_cast<const TexelDescriptor *>(descriptor)->GetBufferViewState();
2015 if (!buf_view_state) continue;
2016 const BUFFER_STATE *buf_state = buf_view_state->buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06002017 const ResourceAccessRange range = MakeRange(*buf_view_state);
John Zulauf8e3c3e92021-01-06 11:19:36 -07002018 current_context_->UpdateAccessState(*buf_state, sync_index, SyncOrdering::kNonAttachment, range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002019 break;
2020 }
2021 case DescriptorClass::GeneralBuffer: {
2022 const auto *buffer_descriptor = static_cast<const BufferDescriptor *>(descriptor);
2023 auto buf_state = buffer_descriptor->GetBufferState();
2024 if (!buf_state) continue;
John Zulauf3e86bf02020-09-12 10:47:57 -06002025 const ResourceAccessRange range =
2026 MakeRange(*buf_state, buffer_descriptor->GetOffset(), buffer_descriptor->GetRange());
John Zulauf8e3c3e92021-01-06 11:19:36 -07002027 current_context_->UpdateAccessState(*buf_state, sync_index, SyncOrdering::kNonAttachment, range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002028 break;
2029 }
2030 // TODO: INLINE_UNIFORM_BLOCK_EXT, ACCELERATION_STRUCTURE_KHR
2031 default:
2032 break;
2033 }
2034 }
2035 }
2036 }
2037}
2038
2039bool CommandBufferAccessContext::ValidateDrawVertex(uint32_t vertexCount, uint32_t firstVertex, const char *func_name) const {
2040 bool skip = false;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002041 const auto *pipe = GetCurrentPipelineFromCommandBuffer(*cb_state_.get(), VK_PIPELINE_BIND_POINT_GRAPHICS);
2042 if (!pipe) {
locke-lunarg61870c22020-06-09 14:51:50 -06002043 return skip;
2044 }
2045
2046 const auto &binding_buffers = cb_state_->current_vertex_buffer_binding_info.vertex_buffer_bindings;
2047 const auto &binding_buffers_size = binding_buffers.size();
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002048 const auto &binding_descriptions_size = pipe->vertex_binding_descriptions_.size();
locke-lunarg61870c22020-06-09 14:51:50 -06002049
2050 for (size_t i = 0; i < binding_descriptions_size; ++i) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002051 const auto &binding_description = pipe->vertex_binding_descriptions_[i];
locke-lunarg61870c22020-06-09 14:51:50 -06002052 if (binding_description.binding < binding_buffers_size) {
2053 const auto &binding_buffer = binding_buffers[binding_description.binding];
locke-lunarg1ae57d62020-11-18 10:49:19 -07002054 if (binding_buffer.buffer_state == nullptr || binding_buffer.buffer_state->destroyed) continue;
locke-lunarg61870c22020-06-09 14:51:50 -06002055
locke-lunarg1ae57d62020-11-18 10:49:19 -07002056 auto *buf_state = binding_buffer.buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06002057 const ResourceAccessRange range = GetBufferRange(binding_buffer.offset, buf_state->createInfo.size, firstVertex,
2058 vertexCount, binding_description.stride);
Jeremy Gebben40a22942020-12-22 14:22:06 -07002059 auto hazard = current_context_->DetectHazard(*buf_state, SYNC_VERTEX_ATTRIBUTE_INPUT_VERTEX_ATTRIBUTE_READ, range);
locke-lunarg61870c22020-06-09 14:51:50 -06002060 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002061 skip |= sync_state_->LogError(
John Zulauf59e25072020-07-17 10:55:21 -06002062 buf_state->buffer, string_SyncHazardVUID(hazard.hazard), "%s: Hazard %s for vertex %s in %s. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06002063 func_name, string_SyncHazard(hazard.hazard), sync_state_->report_data->FormatHandle(buf_state->buffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07002064 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer).c_str(), FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06002065 }
2066 }
2067 }
2068 return skip;
2069}
2070
2071void CommandBufferAccessContext::RecordDrawVertex(uint32_t vertexCount, uint32_t firstVertex, const ResourceUsageTag &tag) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002072 const auto *pipe = GetCurrentPipelineFromCommandBuffer(*cb_state_.get(), VK_PIPELINE_BIND_POINT_GRAPHICS);
2073 if (!pipe) {
locke-lunarg61870c22020-06-09 14:51:50 -06002074 return;
2075 }
2076 const auto &binding_buffers = cb_state_->current_vertex_buffer_binding_info.vertex_buffer_bindings;
2077 const auto &binding_buffers_size = binding_buffers.size();
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002078 const auto &binding_descriptions_size = pipe->vertex_binding_descriptions_.size();
locke-lunarg61870c22020-06-09 14:51:50 -06002079
2080 for (size_t i = 0; i < binding_descriptions_size; ++i) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002081 const auto &binding_description = pipe->vertex_binding_descriptions_[i];
locke-lunarg61870c22020-06-09 14:51:50 -06002082 if (binding_description.binding < binding_buffers_size) {
2083 const auto &binding_buffer = binding_buffers[binding_description.binding];
locke-lunarg1ae57d62020-11-18 10:49:19 -07002084 if (binding_buffer.buffer_state == nullptr || binding_buffer.buffer_state->destroyed) continue;
locke-lunarg61870c22020-06-09 14:51:50 -06002085
locke-lunarg1ae57d62020-11-18 10:49:19 -07002086 auto *buf_state = binding_buffer.buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06002087 const ResourceAccessRange range = GetBufferRange(binding_buffer.offset, buf_state->createInfo.size, firstVertex,
2088 vertexCount, binding_description.stride);
Jeremy Gebben40a22942020-12-22 14:22:06 -07002089 current_context_->UpdateAccessState(*buf_state, SYNC_VERTEX_ATTRIBUTE_INPUT_VERTEX_ATTRIBUTE_READ,
2090 SyncOrdering::kNonAttachment, range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002091 }
2092 }
2093}
2094
2095bool CommandBufferAccessContext::ValidateDrawVertexIndex(uint32_t indexCount, uint32_t firstIndex, const char *func_name) const {
2096 bool skip = false;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002097 if (cb_state_->index_buffer_binding.buffer_state == nullptr || cb_state_->index_buffer_binding.buffer_state->destroyed) {
locke-lunarg1ae57d62020-11-18 10:49:19 -07002098 return skip;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002099 }
locke-lunarg61870c22020-06-09 14:51:50 -06002100
locke-lunarg1ae57d62020-11-18 10:49:19 -07002101 auto *index_buf_state = cb_state_->index_buffer_binding.buffer_state.get();
locke-lunarg61870c22020-06-09 14:51:50 -06002102 const auto index_size = GetIndexAlignment(cb_state_->index_buffer_binding.index_type);
John Zulauf3e86bf02020-09-12 10:47:57 -06002103 const ResourceAccessRange range = GetBufferRange(cb_state_->index_buffer_binding.offset, index_buf_state->createInfo.size,
2104 firstIndex, indexCount, index_size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07002105 auto hazard = current_context_->DetectHazard(*index_buf_state, SYNC_INDEX_INPUT_INDEX_READ, range);
locke-lunarg61870c22020-06-09 14:51:50 -06002106 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002107 skip |= sync_state_->LogError(
John Zulauf59e25072020-07-17 10:55:21 -06002108 index_buf_state->buffer, string_SyncHazardVUID(hazard.hazard), "%s: Hazard %s for index %s in %s. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06002109 func_name, string_SyncHazard(hazard.hazard), sync_state_->report_data->FormatHandle(index_buf_state->buffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07002110 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer).c_str(), FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06002111 }
2112
2113 // TODO: For now, we detect the whole vertex buffer. Index buffer could be changed until SubmitQueue.
2114 // We will detect more accurate range in the future.
2115 skip |= ValidateDrawVertex(UINT32_MAX, 0, func_name);
2116 return skip;
2117}
2118
2119void CommandBufferAccessContext::RecordDrawVertexIndex(uint32_t indexCount, uint32_t firstIndex, const ResourceUsageTag &tag) {
locke-lunarg1ae57d62020-11-18 10:49:19 -07002120 if (cb_state_->index_buffer_binding.buffer_state == nullptr || cb_state_->index_buffer_binding.buffer_state->destroyed) return;
locke-lunarg61870c22020-06-09 14:51:50 -06002121
locke-lunarg1ae57d62020-11-18 10:49:19 -07002122 auto *index_buf_state = cb_state_->index_buffer_binding.buffer_state.get();
locke-lunarg61870c22020-06-09 14:51:50 -06002123 const auto index_size = GetIndexAlignment(cb_state_->index_buffer_binding.index_type);
John Zulauf3e86bf02020-09-12 10:47:57 -06002124 const ResourceAccessRange range = GetBufferRange(cb_state_->index_buffer_binding.offset, index_buf_state->createInfo.size,
2125 firstIndex, indexCount, index_size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07002126 current_context_->UpdateAccessState(*index_buf_state, SYNC_INDEX_INPUT_INDEX_READ, SyncOrdering::kNonAttachment, range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002127
2128 // TODO: For now, we detect the whole vertex buffer. Index buffer could be changed until SubmitQueue.
2129 // We will detect more accurate range in the future.
2130 RecordDrawVertex(UINT32_MAX, 0, tag);
2131}
2132
2133bool CommandBufferAccessContext::ValidateDrawSubpassAttachment(const char *func_name) const {
locke-lunarg7077d502020-06-18 21:37:26 -06002134 bool skip = false;
2135 if (!current_renderpass_context_) return skip;
John Zulauf64ffe552021-02-06 10:25:07 -07002136 skip |= current_renderpass_context_->ValidateDrawSubpassAttachment(GetExecutionContext(), *cb_state_.get(), func_name);
locke-lunarg7077d502020-06-18 21:37:26 -06002137 return skip;
locke-lunarg61870c22020-06-09 14:51:50 -06002138}
2139
2140void CommandBufferAccessContext::RecordDrawSubpassAttachment(const ResourceUsageTag &tag) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002141 if (current_renderpass_context_) {
John Zulauf64ffe552021-02-06 10:25:07 -07002142 current_renderpass_context_->RecordDrawSubpassAttachment(*cb_state_.get(), tag);
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002143 }
locke-lunarg61870c22020-06-09 14:51:50 -06002144}
2145
John Zulauf64ffe552021-02-06 10:25:07 -07002146void CommandBufferAccessContext::RecordBeginRenderPass(const RENDER_PASS_STATE &rp_state, const VkRect2D &render_area,
2147 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views,
2148 const ResourceUsageTag &tag) {
John Zulauf355e49b2020-04-24 15:11:15 -06002149 // Create an access context the current renderpass.
John Zulauf64ffe552021-02-06 10:25:07 -07002150 render_pass_contexts_.emplace_back(rp_state, render_area, GetQueueFlags(), attachment_views, &cb_access_context_);
John Zulauf16adfc92020-04-08 10:28:33 -06002151 current_renderpass_context_ = &render_pass_contexts_.back();
John Zulauf64ffe552021-02-06 10:25:07 -07002152 current_renderpass_context_->RecordBeginRenderPass(tag);
John Zulauf16adfc92020-04-08 10:28:33 -06002153 current_context_ = &current_renderpass_context_->CurrentContext();
John Zulauf16adfc92020-04-08 10:28:33 -06002154}
2155
John Zulauf64ffe552021-02-06 10:25:07 -07002156void CommandBufferAccessContext::RecordNextSubpass(CMD_TYPE command) {
John Zulauf16adfc92020-04-08 10:28:33 -06002157 assert(current_renderpass_context_);
John Zulauffaea0ee2021-01-14 14:01:32 -07002158 auto prev_tag = NextCommandTag(command);
2159 auto next_tag = NextSubcommandTag(command);
John Zulauf64ffe552021-02-06 10:25:07 -07002160 current_renderpass_context_->RecordNextSubpass(prev_tag, next_tag);
John Zulauf16adfc92020-04-08 10:28:33 -06002161 current_context_ = &current_renderpass_context_->CurrentContext();
2162}
2163
John Zulauf64ffe552021-02-06 10:25:07 -07002164void CommandBufferAccessContext::RecordEndRenderPass(CMD_TYPE command) {
John Zulauf16adfc92020-04-08 10:28:33 -06002165 assert(current_renderpass_context_);
2166 if (!current_renderpass_context_) return;
2167
John Zulauf64ffe552021-02-06 10:25:07 -07002168 current_renderpass_context_->RecordEndRenderPass(&cb_access_context_, NextCommandTag(command));
John Zulauf355e49b2020-04-24 15:11:15 -06002169 current_context_ = &cb_access_context_;
John Zulauf16adfc92020-04-08 10:28:33 -06002170 current_renderpass_context_ = nullptr;
2171}
2172
John Zulauf4a6105a2020-11-17 15:11:05 -07002173void CommandBufferAccessContext::RecordDestroyEvent(VkEvent event) {
2174 // Erase is okay with the key not being
John Zulauf669dfd52021-01-27 17:15:28 -07002175 const auto *event_state = sync_state_->Get<EVENT_STATE>(event);
2176 if (event_state) {
2177 GetCurrentEventsContext()->Destroy(event_state);
John Zulaufd5115702021-01-18 12:34:33 -07002178 }
2179}
2180
John Zulauf64ffe552021-02-06 10:25:07 -07002181bool RenderPassAccessContext::ValidateDrawSubpassAttachment(const CommandExecutionContext &ex_context, const CMD_BUFFER_STATE &cmd,
John Zulauffaea0ee2021-01-14 14:01:32 -07002182 const char *func_name) const {
locke-lunarg61870c22020-06-09 14:51:50 -06002183 bool skip = false;
John Zulauf64ffe552021-02-06 10:25:07 -07002184 const auto &sync_state = ex_context.GetSyncState();
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002185 const auto *pipe = GetCurrentPipelineFromCommandBuffer(cmd, VK_PIPELINE_BIND_POINT_GRAPHICS);
2186 if (!pipe ||
2187 (pipe->graphicsPipelineCI.pRasterizationState && pipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable)) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002188 return skip;
2189 }
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002190 const auto &list = pipe->fragmentShader_writable_output_location_list;
locke-lunarg61870c22020-06-09 14:51:50 -06002191 const auto &subpass = rp_state_->createInfo.pSubpasses[current_subpass_];
John Zulauf64ffe552021-02-06 10:25:07 -07002192 VkExtent3D extent = CastTo3D(render_area_.extent);
2193 VkOffset3D offset = CastTo3D(render_area_.offset);
locke-lunarg37047832020-06-12 13:44:45 -06002194
John Zulauf1a224292020-06-30 14:52:13 -06002195 const auto &current_context = CurrentContext();
locke-lunarg44f9bb12020-06-10 14:43:57 -06002196 // Subpass's inputAttachment has been done in ValidateDispatchDrawDescriptorSet
locke-lunarg96dc9632020-06-10 17:22:18 -06002197 if (subpass.pColorAttachments && subpass.colorAttachmentCount && !list.empty()) {
2198 for (const auto location : list) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002199 if (location >= subpass.colorAttachmentCount ||
2200 subpass.pColorAttachments[location].attachment == VK_ATTACHMENT_UNUSED) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002201 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002202 }
locke-lunarg96dc9632020-06-10 17:22:18 -06002203 const IMAGE_VIEW_STATE *img_view_state = attachment_views_[subpass.pColorAttachments[location].attachment];
John Zulauf1a224292020-06-30 14:52:13 -06002204 HazardResult hazard = current_context.DetectHazard(img_view_state, SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE,
John Zulauf8e3c3e92021-01-06 11:19:36 -07002205 SyncOrdering::kColorAttachment, offset, extent);
locke-lunarg96dc9632020-06-10 17:22:18 -06002206 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002207 skip |= sync_state.LogError(img_view_state->image_view, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06002208 "%s: Hazard %s for %s in %s, Subpass #%d, and pColorAttachments #%d. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06002209 func_name, string_SyncHazard(hazard.hazard),
2210 sync_state.report_data->FormatHandle(img_view_state->image_view).c_str(),
2211 sync_state.report_data->FormatHandle(cmd.commandBuffer).c_str(), cmd.activeSubpass,
John Zulauf64ffe552021-02-06 10:25:07 -07002212 location, ex_context.FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06002213 }
2214 }
2215 }
locke-lunarg37047832020-06-12 13:44:45 -06002216
2217 // PHASE1 TODO: Add layout based read/vs. write selection.
2218 // PHASE1 TODO: Read operations for both depth and stencil are possible in the future.
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002219 if (pipe->graphicsPipelineCI.pDepthStencilState && subpass.pDepthStencilAttachment &&
locke-lunarg37047832020-06-12 13:44:45 -06002220 subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
locke-lunarg61870c22020-06-09 14:51:50 -06002221 const IMAGE_VIEW_STATE *img_view_state = attachment_views_[subpass.pDepthStencilAttachment->attachment];
locke-lunarg37047832020-06-12 13:44:45 -06002222 bool depth_write = false, stencil_write = false;
2223
2224 // PHASE1 TODO: These validation should be in core_checks.
2225 if (!FormatIsStencilOnly(img_view_state->create_info.format) &&
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002226 pipe->graphicsPipelineCI.pDepthStencilState->depthTestEnable &&
2227 pipe->graphicsPipelineCI.pDepthStencilState->depthWriteEnable &&
locke-lunarg37047832020-06-12 13:44:45 -06002228 IsImageLayoutDepthWritable(subpass.pDepthStencilAttachment->layout)) {
2229 depth_write = true;
2230 }
2231 // PHASE1 TODO: It needs to check if stencil is writable.
2232 // If failOp, passOp, or depthFailOp are not KEEP, and writeMask isn't 0, it's writable.
2233 // If depth test is disable, it's considered depth test passes, and then depthFailOp doesn't run.
2234 // PHASE1 TODO: These validation should be in core_checks.
2235 if (!FormatIsDepthOnly(img_view_state->create_info.format) &&
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002236 pipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable &&
locke-lunarg37047832020-06-12 13:44:45 -06002237 IsImageLayoutStencilWritable(subpass.pDepthStencilAttachment->layout)) {
2238 stencil_write = true;
2239 }
2240
2241 // PHASE1 TODO: Add EARLY stage detection based on ExecutionMode.
2242 if (depth_write) {
2243 HazardResult hazard =
John Zulauf1a224292020-06-30 14:52:13 -06002244 current_context.DetectHazard(img_view_state, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE,
John Zulauf8e3c3e92021-01-06 11:19:36 -07002245 SyncOrdering::kDepthStencilAttachment, offset, extent, VK_IMAGE_ASPECT_DEPTH_BIT);
locke-lunarg37047832020-06-12 13:44:45 -06002246 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002247 skip |= sync_state.LogError(
2248 img_view_state->image_view, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06002249 "%s: Hazard %s for %s in %s, Subpass #%d, and depth part of pDepthStencilAttachment. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06002250 func_name, string_SyncHazard(hazard.hazard),
2251 sync_state.report_data->FormatHandle(img_view_state->image_view).c_str(),
2252 sync_state.report_data->FormatHandle(cmd.commandBuffer).c_str(), cmd.activeSubpass,
John Zulauf64ffe552021-02-06 10:25:07 -07002253 ex_context.FormatUsage(hazard).c_str());
locke-lunarg37047832020-06-12 13:44:45 -06002254 }
2255 }
2256 if (stencil_write) {
2257 HazardResult hazard =
John Zulauf1a224292020-06-30 14:52:13 -06002258 current_context.DetectHazard(img_view_state, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE,
John Zulauf8e3c3e92021-01-06 11:19:36 -07002259 SyncOrdering::kDepthStencilAttachment, offset, extent, VK_IMAGE_ASPECT_STENCIL_BIT);
locke-lunarg37047832020-06-12 13:44:45 -06002260 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002261 skip |= sync_state.LogError(
2262 img_view_state->image_view, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06002263 "%s: Hazard %s for %s in %s, Subpass #%d, and stencil part of pDepthStencilAttachment. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06002264 func_name, string_SyncHazard(hazard.hazard),
2265 sync_state.report_data->FormatHandle(img_view_state->image_view).c_str(),
2266 sync_state.report_data->FormatHandle(cmd.commandBuffer).c_str(), cmd.activeSubpass,
John Zulauf64ffe552021-02-06 10:25:07 -07002267 ex_context.FormatUsage(hazard).c_str());
locke-lunarg37047832020-06-12 13:44:45 -06002268 }
locke-lunarg61870c22020-06-09 14:51:50 -06002269 }
2270 }
2271 return skip;
2272}
2273
John Zulauf64ffe552021-02-06 10:25:07 -07002274void RenderPassAccessContext::RecordDrawSubpassAttachment(const CMD_BUFFER_STATE &cmd, const ResourceUsageTag &tag) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002275 const auto *pipe = GetCurrentPipelineFromCommandBuffer(cmd, VK_PIPELINE_BIND_POINT_GRAPHICS);
2276 if (!pipe ||
2277 (pipe->graphicsPipelineCI.pRasterizationState && pipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable)) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002278 return;
2279 }
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002280 const auto &list = pipe->fragmentShader_writable_output_location_list;
locke-lunarg61870c22020-06-09 14:51:50 -06002281 const auto &subpass = rp_state_->createInfo.pSubpasses[current_subpass_];
John Zulauf64ffe552021-02-06 10:25:07 -07002282 VkExtent3D extent = CastTo3D(render_area_.extent);
2283 VkOffset3D offset = CastTo3D(render_area_.offset);
locke-lunarg61870c22020-06-09 14:51:50 -06002284
John Zulauf1a224292020-06-30 14:52:13 -06002285 auto &current_context = CurrentContext();
locke-lunarg44f9bb12020-06-10 14:43:57 -06002286 // Subpass's inputAttachment has been done in RecordDispatchDrawDescriptorSet
locke-lunarg96dc9632020-06-10 17:22:18 -06002287 if (subpass.pColorAttachments && subpass.colorAttachmentCount && !list.empty()) {
2288 for (const auto location : list) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002289 if (location >= subpass.colorAttachmentCount ||
2290 subpass.pColorAttachments[location].attachment == VK_ATTACHMENT_UNUSED) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002291 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002292 }
locke-lunarg96dc9632020-06-10 17:22:18 -06002293 const IMAGE_VIEW_STATE *img_view_state = attachment_views_[subpass.pColorAttachments[location].attachment];
John Zulauf8e3c3e92021-01-06 11:19:36 -07002294 current_context.UpdateAccessState(img_view_state, SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE,
2295 SyncOrdering::kColorAttachment, offset, extent, 0, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002296 }
2297 }
locke-lunarg37047832020-06-12 13:44:45 -06002298
2299 // PHASE1 TODO: Add layout based read/vs. write selection.
2300 // PHASE1 TODO: Read operations for both depth and stencil are possible in the future.
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002301 if (pipe->graphicsPipelineCI.pDepthStencilState && subpass.pDepthStencilAttachment &&
locke-lunarg37047832020-06-12 13:44:45 -06002302 subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
locke-lunarg61870c22020-06-09 14:51:50 -06002303 const IMAGE_VIEW_STATE *img_view_state = attachment_views_[subpass.pDepthStencilAttachment->attachment];
locke-lunarg37047832020-06-12 13:44:45 -06002304 bool depth_write = false, stencil_write = false;
2305
2306 // PHASE1 TODO: These validation should be in core_checks.
2307 if (!FormatIsStencilOnly(img_view_state->create_info.format) &&
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002308 pipe->graphicsPipelineCI.pDepthStencilState->depthTestEnable &&
2309 pipe->graphicsPipelineCI.pDepthStencilState->depthWriteEnable &&
locke-lunarg37047832020-06-12 13:44:45 -06002310 IsImageLayoutDepthWritable(subpass.pDepthStencilAttachment->layout)) {
2311 depth_write = true;
2312 }
2313 // PHASE1 TODO: It needs to check if stencil is writable.
2314 // If failOp, passOp, or depthFailOp are not KEEP, and writeMask isn't 0, it's writable.
2315 // If depth test is disable, it's considered depth test passes, and then depthFailOp doesn't run.
2316 // PHASE1 TODO: These validation should be in core_checks.
2317 if (!FormatIsDepthOnly(img_view_state->create_info.format) &&
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002318 pipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable &&
locke-lunarg37047832020-06-12 13:44:45 -06002319 IsImageLayoutStencilWritable(subpass.pDepthStencilAttachment->layout)) {
2320 stencil_write = true;
2321 }
2322
2323 // PHASE1 TODO: Add EARLY stage detection based on ExecutionMode.
2324 if (depth_write) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07002325 current_context.UpdateAccessState(img_view_state, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE,
2326 SyncOrdering::kDepthStencilAttachment, offset, extent, VK_IMAGE_ASPECT_DEPTH_BIT,
2327 tag);
locke-lunarg37047832020-06-12 13:44:45 -06002328 }
2329 if (stencil_write) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07002330 current_context.UpdateAccessState(img_view_state, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE,
2331 SyncOrdering::kDepthStencilAttachment, offset, extent, VK_IMAGE_ASPECT_STENCIL_BIT,
2332 tag);
locke-lunarg37047832020-06-12 13:44:45 -06002333 }
locke-lunarg61870c22020-06-09 14:51:50 -06002334 }
2335}
2336
John Zulauf64ffe552021-02-06 10:25:07 -07002337bool RenderPassAccessContext::ValidateNextSubpass(const CommandExecutionContext &ex_context, const char *func_name) const {
John Zulaufaff20662020-06-01 14:07:58 -06002338 // PHASE1 TODO: Add Validate Preserve attachments
John Zulauf355e49b2020-04-24 15:11:15 -06002339 bool skip = false;
John Zulauf64ffe552021-02-06 10:25:07 -07002340 skip |= CurrentContext().ValidateResolveOperations(ex_context, *rp_state_, render_area_, attachment_views_, func_name,
John Zulaufb027cdb2020-05-21 14:25:22 -06002341 current_subpass_);
John Zulauf64ffe552021-02-06 10:25:07 -07002342 skip |= CurrentContext().ValidateStoreOperation(ex_context, *rp_state_, render_area_, current_subpass_, attachment_views_,
John Zulaufaff20662020-06-01 14:07:58 -06002343 func_name);
2344
John Zulauf355e49b2020-04-24 15:11:15 -06002345 const auto next_subpass = current_subpass_ + 1;
John Zulauf1507ee42020-05-18 11:33:09 -06002346 const auto &next_context = subpass_contexts_[next_subpass];
John Zulauf64ffe552021-02-06 10:25:07 -07002347 skip |=
2348 next_context.ValidateLayoutTransitions(ex_context, *rp_state_, render_area_, next_subpass, attachment_views_, func_name);
John Zulaufb02c1eb2020-10-06 16:33:36 -06002349 if (!skip) {
2350 // To avoid complex (and buggy) duplication of the affect of layout transitions on load operations, we'll record them
2351 // on a copy of the (empty) next context.
2352 // Note: The resource access map should be empty so hopefully this copy isn't too horrible from a perf POV.
2353 AccessContext temp_context(next_context);
2354 temp_context.RecordLayoutTransitions(*rp_state_, next_subpass, attachment_views_, kCurrentCommandTag);
John Zulauf64ffe552021-02-06 10:25:07 -07002355 skip |=
2356 temp_context.ValidateLoadOperation(ex_context, *rp_state_, render_area_, next_subpass, attachment_views_, func_name);
John Zulaufb02c1eb2020-10-06 16:33:36 -06002357 }
John Zulauf7635de32020-05-29 17:14:15 -06002358 return skip;
2359}
John Zulauf64ffe552021-02-06 10:25:07 -07002360bool RenderPassAccessContext::ValidateEndRenderPass(const CommandExecutionContext &ex_context, const char *func_name) const {
John Zulaufaff20662020-06-01 14:07:58 -06002361 // PHASE1 TODO: Validate Preserve
John Zulauf7635de32020-05-29 17:14:15 -06002362 bool skip = false;
John Zulauf64ffe552021-02-06 10:25:07 -07002363 skip |= CurrentContext().ValidateResolveOperations(ex_context, *rp_state_, render_area_, attachment_views_, func_name,
John Zulauf7635de32020-05-29 17:14:15 -06002364 current_subpass_);
John Zulauf64ffe552021-02-06 10:25:07 -07002365 skip |= CurrentContext().ValidateStoreOperation(ex_context, *rp_state_, render_area_, current_subpass_, attachment_views_,
John Zulaufaff20662020-06-01 14:07:58 -06002366 func_name);
John Zulauf64ffe552021-02-06 10:25:07 -07002367 skip |= ValidateFinalSubpassLayoutTransitions(ex_context, func_name);
John Zulauf355e49b2020-04-24 15:11:15 -06002368 return skip;
2369}
2370
John Zulauf64ffe552021-02-06 10:25:07 -07002371AccessContext *RenderPassAccessContext::CreateStoreResolveProxy() const {
2372 return CreateStoreResolveProxyContext(CurrentContext(), *rp_state_, current_subpass_, render_area_, attachment_views_);
John Zulauf7635de32020-05-29 17:14:15 -06002373}
2374
John Zulauf64ffe552021-02-06 10:25:07 -07002375bool RenderPassAccessContext::ValidateFinalSubpassLayoutTransitions(const CommandExecutionContext &ex_context,
2376 const char *func_name) const {
John Zulauf355e49b2020-04-24 15:11:15 -06002377 bool skip = false;
2378
John Zulauf7635de32020-05-29 17:14:15 -06002379 // As validation methods are const and precede the record/update phase, for any tranistions from the current (last)
2380 // subpass, we have to validate them against a copy of the current AccessContext, with resolve operations applied.
2381 // Note: we could be more efficient by tracking whether or not we actually *have* any changes (e.g. attachment resolve)
2382 // to apply and only copy then, if this proves a hot spot.
2383 std::unique_ptr<AccessContext> proxy_for_current;
2384
John Zulauf355e49b2020-04-24 15:11:15 -06002385 // Validate the "finalLayout" transitions to external
2386 // Get them from where there we're hidding in the extra entry.
2387 const auto &final_transitions = rp_state_->subpass_transitions.back();
2388 for (const auto &transition : final_transitions) {
2389 const auto &attach_view = attachment_views_[transition.attachment];
2390 const auto &trackback = subpass_contexts_[transition.prev_pass].GetDstExternalTrackBack();
2391 assert(trackback.context); // Transitions are given implicit transitions if the StateTracker is working correctly
John Zulauf7635de32020-05-29 17:14:15 -06002392 auto *context = trackback.context;
2393
2394 if (transition.prev_pass == current_subpass_) {
2395 if (!proxy_for_current) {
2396 // We haven't recorded resolve ofor the current_subpass, so we need to copy current and update it *as if*
John Zulauf64ffe552021-02-06 10:25:07 -07002397 proxy_for_current.reset(CreateStoreResolveProxy());
John Zulauf7635de32020-05-29 17:14:15 -06002398 }
2399 context = proxy_for_current.get();
2400 }
2401
John Zulaufa0a98292020-09-18 09:30:10 -06002402 // Use the merged barrier for the hazard check (safe since it just considers the src (first) scope.
2403 const auto merged_barrier = MergeBarriers(trackback.barriers);
John Zulaufc523bf62021-02-16 08:20:34 -07002404 auto hazard = context->DetectImageBarrierHazard(*attach_view->image_state, merged_barrier.src_exec_scope.exec_scope,
John Zulaufa0a98292020-09-18 09:30:10 -06002405 merged_barrier.src_access_scope, attach_view->normalized_subresource_range,
2406 AccessContext::DetectOptions::kDetectPrevious);
John Zulauf355e49b2020-04-24 15:11:15 -06002407 if (hazard.hazard) {
John Zulauf64ffe552021-02-06 10:25:07 -07002408 skip |= ex_context.GetSyncState().LogError(
John Zulauffaea0ee2021-01-14 14:01:32 -07002409 rp_state_->renderPass, string_SyncHazardVUID(hazard.hazard),
2410 "%s: Hazard %s with last use subpass %" PRIu32 " for attachment %" PRIu32
2411 " final image layout transition (old_layout: %s, new_layout: %s). Access info %s.",
2412 func_name, string_SyncHazard(hazard.hazard), transition.prev_pass, transition.attachment,
2413 string_VkImageLayout(transition.old_layout), string_VkImageLayout(transition.new_layout),
John Zulauf64ffe552021-02-06 10:25:07 -07002414 ex_context.FormatUsage(hazard).c_str());
John Zulauf355e49b2020-04-24 15:11:15 -06002415 }
2416 }
2417 return skip;
2418}
2419
2420void RenderPassAccessContext::RecordLayoutTransitions(const ResourceUsageTag &tag) {
2421 // Add layout transitions...
John Zulaufb02c1eb2020-10-06 16:33:36 -06002422 subpass_contexts_[current_subpass_].RecordLayoutTransitions(*rp_state_, current_subpass_, attachment_views_, tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002423}
2424
John Zulauf64ffe552021-02-06 10:25:07 -07002425void RenderPassAccessContext::RecordLoadOperations(const ResourceUsageTag &tag) {
John Zulauf1507ee42020-05-18 11:33:09 -06002426 const auto *attachment_ci = rp_state_->createInfo.pAttachments;
2427 auto &subpass_context = subpass_contexts_[current_subpass_];
John Zulauf64ffe552021-02-06 10:25:07 -07002428 VkExtent3D extent = CastTo3D(render_area_.extent);
2429 VkOffset3D offset = CastTo3D(render_area_.offset);
John Zulauf1507ee42020-05-18 11:33:09 -06002430
2431 for (uint32_t i = 0; i < rp_state_->createInfo.attachmentCount; i++) {
2432 if (rp_state_->attachment_first_subpass[i] == current_subpass_) {
2433 if (attachment_views_[i] == nullptr) continue; // UNUSED
2434 const auto &view = *attachment_views_[i];
2435 const IMAGE_STATE *image = view.image_state.get();
2436 if (image == nullptr) continue;
2437
2438 const auto &ci = attachment_ci[i];
2439 const bool has_depth = FormatHasDepth(ci.format);
John Zulaufb027cdb2020-05-21 14:25:22 -06002440 const bool has_stencil = FormatHasStencil(ci.format);
John Zulauf1507ee42020-05-18 11:33:09 -06002441 const bool is_color = !(has_depth || has_stencil);
2442
2443 if (is_color) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07002444 subpass_context.UpdateAccessState(*image, ColorLoadUsage(ci.loadOp), SyncOrdering::kColorAttachment,
2445 view.normalized_subresource_range, offset, extent, tag);
John Zulauf1507ee42020-05-18 11:33:09 -06002446 } else {
2447 auto update_range = view.normalized_subresource_range;
2448 if (has_depth) {
2449 update_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
John Zulauf8e3c3e92021-01-06 11:19:36 -07002450 subpass_context.UpdateAccessState(*image, DepthStencilLoadUsage(ci.loadOp),
2451 SyncOrdering::kDepthStencilAttachment, update_range, offset, extent, tag);
John Zulauf1507ee42020-05-18 11:33:09 -06002452 }
2453 if (has_stencil) {
2454 update_range.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
John Zulauf8e3c3e92021-01-06 11:19:36 -07002455 subpass_context.UpdateAccessState(*image, DepthStencilLoadUsage(ci.stencilLoadOp),
2456 SyncOrdering::kDepthStencilAttachment, update_range, offset, extent, tag);
John Zulauf1507ee42020-05-18 11:33:09 -06002457 }
2458 }
2459 }
2460 }
2461}
John Zulauf64ffe552021-02-06 10:25:07 -07002462RenderPassAccessContext::RenderPassAccessContext(const RENDER_PASS_STATE &rp_state, const VkRect2D &render_area,
2463 VkQueueFlags queue_flags,
2464 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views,
2465 const AccessContext *external_context)
2466 : rp_state_(&rp_state), render_area_(render_area), current_subpass_(0U), attachment_views_(attachment_views) {
John Zulauf355e49b2020-04-24 15:11:15 -06002467 // Add this for all subpasses here so that they exsist during next subpass validation
John Zulauf64ffe552021-02-06 10:25:07 -07002468 subpass_contexts_.reserve(rp_state_->createInfo.subpassCount);
John Zulauf355e49b2020-04-24 15:11:15 -06002469 for (uint32_t pass = 0; pass < rp_state_->createInfo.subpassCount; pass++) {
John Zulauf1a224292020-06-30 14:52:13 -06002470 subpass_contexts_.emplace_back(pass, queue_flags, rp_state_->subpass_dependencies, subpass_contexts_, external_context);
John Zulauf355e49b2020-04-24 15:11:15 -06002471 }
John Zulauf64ffe552021-02-06 10:25:07 -07002472}
2473void RenderPassAccessContext::RecordBeginRenderPass(const ResourceUsageTag &tag) {
2474 assert(0 == current_subpass_);
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07002475 subpass_contexts_[current_subpass_].SetStartTag(tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002476 RecordLayoutTransitions(tag);
John Zulauf64ffe552021-02-06 10:25:07 -07002477 RecordLoadOperations(tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002478}
John Zulauf1507ee42020-05-18 11:33:09 -06002479
John Zulauf64ffe552021-02-06 10:25:07 -07002480void RenderPassAccessContext::RecordNextSubpass(const ResourceUsageTag &prev_subpass_tag,
John Zulauffaea0ee2021-01-14 14:01:32 -07002481 const ResourceUsageTag &next_subpass_tag) {
John Zulauf7635de32020-05-29 17:14:15 -06002482 // Resolves are against *prior* subpass context and thus *before* the subpass increment
John Zulauf64ffe552021-02-06 10:25:07 -07002483 CurrentContext().UpdateAttachmentResolveAccess(*rp_state_, render_area_, attachment_views_, current_subpass_, prev_subpass_tag);
2484 CurrentContext().UpdateAttachmentStoreAccess(*rp_state_, render_area_, attachment_views_, current_subpass_, prev_subpass_tag);
John Zulauf7635de32020-05-29 17:14:15 -06002485
Jeremy Gebben6ea9d9e2020-12-11 09:41:01 -07002486 // Move to the next sub-command for the new subpass. The resolve and store are logically part of the previous
2487 // subpass, so their tag needs to be different from the layout and load operations below.
John Zulauf355e49b2020-04-24 15:11:15 -06002488 current_subpass_++;
2489 assert(current_subpass_ < subpass_contexts_.size());
John Zulauffaea0ee2021-01-14 14:01:32 -07002490 subpass_contexts_[current_subpass_].SetStartTag(next_subpass_tag);
2491 RecordLayoutTransitions(next_subpass_tag);
John Zulauf64ffe552021-02-06 10:25:07 -07002492 RecordLoadOperations(next_subpass_tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002493}
2494
John Zulauf64ffe552021-02-06 10:25:07 -07002495void RenderPassAccessContext::RecordEndRenderPass(AccessContext *external_context, const ResourceUsageTag &tag) {
John Zulaufaff20662020-06-01 14:07:58 -06002496 // Add the resolve and store accesses
John Zulauf64ffe552021-02-06 10:25:07 -07002497 CurrentContext().UpdateAttachmentResolveAccess(*rp_state_, render_area_, attachment_views_, current_subpass_, tag);
2498 CurrentContext().UpdateAttachmentStoreAccess(*rp_state_, render_area_, attachment_views_, current_subpass_, tag);
John Zulauf7635de32020-05-29 17:14:15 -06002499
John Zulauf355e49b2020-04-24 15:11:15 -06002500 // Export the accesses from the renderpass...
John Zulauf1a224292020-06-30 14:52:13 -06002501 external_context->ResolveChildContexts(subpass_contexts_);
John Zulauf355e49b2020-04-24 15:11:15 -06002502
2503 // Add the "finalLayout" transitions to external
2504 // Get them from where there we're hidding in the extra entry.
John Zulauf89311b42020-09-29 16:28:47 -06002505 // Not that since *final* always comes from *one* subpass per view, we don't have to accumulate the barriers
2506 // TODO Aliasing we may need to reconsider barrier accumulation... though I don't know that it would be valid for aliasing
2507 // that had mulitple final layout transistions from mulitple final subpasses.
John Zulauf355e49b2020-04-24 15:11:15 -06002508 const auto &final_transitions = rp_state_->subpass_transitions.back();
2509 for (const auto &transition : final_transitions) {
2510 const auto &attachment = attachment_views_[transition.attachment];
2511 const auto &last_trackback = subpass_contexts_[transition.prev_pass].GetDstExternalTrackBack();
John Zulaufaa97d8b2020-07-14 10:58:13 -06002512 assert(&subpass_contexts_[transition.prev_pass] == last_trackback.context);
John Zulaufd5115702021-01-18 12:34:33 -07002513 ApplyBarrierOpsFunctor<PipelineBarrierOp> barrier_action(true /* resolve */, last_trackback.barriers.size(), tag);
John Zulauf1e331ec2020-12-04 18:29:38 -07002514 for (const auto &barrier : last_trackback.barriers) {
John Zulaufd5115702021-01-18 12:34:33 -07002515 barrier_action.EmplaceBack(PipelineBarrierOp(barrier, true));
John Zulauf1e331ec2020-12-04 18:29:38 -07002516 }
John Zulauf1e331ec2020-12-04 18:29:38 -07002517 external_context->UpdateResourceAccess(*attachment->image_state, attachment->normalized_subresource_range, barrier_action);
John Zulauf355e49b2020-04-24 15:11:15 -06002518 }
2519}
2520
Jeremy Gebben40a22942020-12-22 14:22:06 -07002521SyncExecScope SyncExecScope::MakeSrc(VkQueueFlags queue_flags, VkPipelineStageFlags2KHR mask_param) {
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002522 SyncExecScope result;
2523 result.mask_param = mask_param;
Jeremy Gebben5f585ae2021-02-02 09:03:06 -07002524 result.expanded_mask = sync_utils::ExpandPipelineStages(mask_param, queue_flags);
2525 result.exec_scope = sync_utils::WithEarlierPipelineStages(result.expanded_mask);
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002526 result.valid_accesses = SyncStageAccess::AccessScopeByStage(result.exec_scope);
2527 return result;
2528}
2529
Jeremy Gebben40a22942020-12-22 14:22:06 -07002530SyncExecScope SyncExecScope::MakeDst(VkQueueFlags queue_flags, VkPipelineStageFlags2KHR mask_param) {
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002531 SyncExecScope result;
2532 result.mask_param = mask_param;
Jeremy Gebben5f585ae2021-02-02 09:03:06 -07002533 result.expanded_mask = sync_utils::ExpandPipelineStages(mask_param, queue_flags);
2534 result.exec_scope = sync_utils::WithLaterPipelineStages(result.expanded_mask);
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002535 result.valid_accesses = SyncStageAccess::AccessScopeByStage(result.exec_scope);
2536 return result;
2537}
2538
2539SyncBarrier::SyncBarrier(const SyncExecScope &src, const SyncExecScope &dst) {
John Zulaufc523bf62021-02-16 08:20:34 -07002540 src_exec_scope = src;
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002541 src_access_scope = 0;
John Zulaufc523bf62021-02-16 08:20:34 -07002542 dst_exec_scope = dst;
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002543 dst_access_scope = 0;
2544}
2545
2546template <typename Barrier>
2547SyncBarrier::SyncBarrier(const Barrier &barrier, const SyncExecScope &src, const SyncExecScope &dst) {
John Zulaufc523bf62021-02-16 08:20:34 -07002548 src_exec_scope = src;
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002549 src_access_scope = SyncStageAccess::AccessScope(src.valid_accesses, barrier.srcAccessMask);
John Zulaufc523bf62021-02-16 08:20:34 -07002550 dst_exec_scope = dst;
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002551 dst_access_scope = SyncStageAccess::AccessScope(dst.valid_accesses, barrier.dstAccessMask);
2552}
2553
2554SyncBarrier::SyncBarrier(VkQueueFlags queue_flags, const VkSubpassDependency2 &subpass) {
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002555 const auto barrier = lvl_find_in_chain<VkMemoryBarrier2KHR>(subpass.pNext);
2556 if (barrier) {
2557 auto src = SyncExecScope::MakeSrc(queue_flags, barrier->srcStageMask);
John Zulaufc523bf62021-02-16 08:20:34 -07002558 src_exec_scope = src;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002559 src_access_scope = SyncStageAccess::AccessScope(src.valid_accesses, barrier->srcAccessMask);
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002560
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002561 auto dst = SyncExecScope::MakeDst(queue_flags, barrier->dstStageMask);
John Zulaufc523bf62021-02-16 08:20:34 -07002562 dst_exec_scope = dst;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002563 dst_access_scope = SyncStageAccess::AccessScope(dst.valid_accesses, barrier->dstAccessMask);
2564
2565 } else {
2566 auto src = SyncExecScope::MakeSrc(queue_flags, subpass.srcStageMask);
John Zulaufc523bf62021-02-16 08:20:34 -07002567 src_exec_scope = src;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002568 src_access_scope = SyncStageAccess::AccessScope(src.valid_accesses, subpass.srcAccessMask);
2569
2570 auto dst = SyncExecScope::MakeDst(queue_flags, subpass.dstStageMask);
John Zulaufc523bf62021-02-16 08:20:34 -07002571 dst_exec_scope = dst;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002572 dst_access_scope = SyncStageAccess::AccessScope(dst.valid_accesses, subpass.dstAccessMask);
2573 }
2574}
2575
2576template <typename Barrier>
2577SyncBarrier::SyncBarrier(VkQueueFlags queue_flags, const Barrier &barrier) {
2578 auto src = SyncExecScope::MakeSrc(queue_flags, barrier.srcStageMask);
2579 src_exec_scope = src.exec_scope;
2580 src_access_scope = SyncStageAccess::AccessScope(src.valid_accesses, barrier.srcAccessMask);
2581
2582 auto dst = SyncExecScope::MakeDst(queue_flags, barrier.dstStageMask);
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002583 dst_exec_scope = dst.exec_scope;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07002584 dst_access_scope = SyncStageAccess::AccessScope(dst.valid_accesses, barrier.dstAccessMask);
John Zulauf3d84f1b2020-03-09 13:33:25 -06002585}
2586
John Zulaufb02c1eb2020-10-06 16:33:36 -06002587// Apply a list of barriers, without resolving pending state, useful for subpass layout transitions
2588void ResourceAccessState::ApplyBarriers(const std::vector<SyncBarrier> &barriers, bool layout_transition) {
2589 for (const auto &barrier : barriers) {
2590 ApplyBarrier(barrier, layout_transition);
2591 }
2592}
2593
John Zulauf89311b42020-09-29 16:28:47 -06002594// ApplyBarriers is design for *fully* inclusive barrier lists without layout tranistions. Designed use was for
2595// inter-subpass barriers for lazy-evaluation of parent context memory ranges. Subpass layout transistions are *not* done
2596// lazily, s.t. no previous access reports should need layout transitions.
John Zulaufb02c1eb2020-10-06 16:33:36 -06002597void ResourceAccessState::ApplyBarriers(const std::vector<SyncBarrier> &barriers, const ResourceUsageTag &tag) {
2598 assert(!pending_layout_transition); // This should never be call in the middle of another barrier application
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002599 assert(pending_write_barriers.none());
John Zulaufb02c1eb2020-10-06 16:33:36 -06002600 assert(!pending_write_dep_chain);
John Zulaufa0a98292020-09-18 09:30:10 -06002601 for (const auto &barrier : barriers) {
John Zulauf89311b42020-09-29 16:28:47 -06002602 ApplyBarrier(barrier, false);
John Zulaufa0a98292020-09-18 09:30:10 -06002603 }
John Zulaufb02c1eb2020-10-06 16:33:36 -06002604 ApplyPendingBarriers(tag);
John Zulauf3d84f1b2020-03-09 13:33:25 -06002605}
John Zulauf9cb530d2019-09-30 14:14:10 -06002606HazardResult ResourceAccessState::DetectHazard(SyncStageAccessIndex usage_index) const {
2607 HazardResult hazard;
2608 auto usage = FlagBit(usage_index);
John Zulauf361fb532020-07-22 10:45:39 -06002609 const auto usage_stage = PipelineStageBit(usage_index);
John Zulauf9cb530d2019-09-30 14:14:10 -06002610 if (IsRead(usage)) {
John Zulauf4285ee92020-09-23 10:20:52 -06002611 if (IsRAWHazard(usage_stage, usage)) {
John Zulauf59e25072020-07-17 10:55:21 -06002612 hazard.Set(this, usage_index, READ_AFTER_WRITE, last_write, write_tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06002613 }
2614 } else {
John Zulauf361fb532020-07-22 10:45:39 -06002615 // Write operation:
2616 // Check for read operations more recent than last_write (as setting last_write clears reads, that would be *any*
2617 // If reads exists -- test only against them because either:
2618 // * the reads were hazards, and we've reported the hazard, so just test the current write vs. the read operations
2619 // * the read weren't hazards, and thus if the write is safe w.r.t. the reads, no hazard vs. last_write is possible if
2620 // the current write happens after the reads, so just test the write against the reades
2621 // Otherwise test against last_write
2622 //
2623 // Look for casus belli for WAR
John Zulaufab7756b2020-12-29 16:10:16 -07002624 if (last_reads.size()) {
2625 for (const auto &read_access : last_reads) {
John Zulauf361fb532020-07-22 10:45:39 -06002626 if (IsReadHazard(usage_stage, read_access)) {
2627 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
2628 break;
2629 }
2630 }
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002631 } else if (last_write.any() && IsWriteHazard(usage)) {
John Zulauf361fb532020-07-22 10:45:39 -06002632 // Write-After-Write check -- if we have a previous write to test against
2633 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06002634 }
2635 }
2636 return hazard;
2637}
2638
John Zulauf8e3c3e92021-01-06 11:19:36 -07002639HazardResult ResourceAccessState::DetectHazard(SyncStageAccessIndex usage_index, const SyncOrdering &ordering_rule) const {
2640 const auto &ordering = GetOrderingRules(ordering_rule);
John Zulauf69133422020-05-20 14:55:53 -06002641 // The ordering guarantees act as barriers to the last accesses, independent of synchronization operations
2642 HazardResult hazard;
John Zulauf4285ee92020-09-23 10:20:52 -06002643 const auto usage_bit = FlagBit(usage_index);
John Zulauf361fb532020-07-22 10:45:39 -06002644 const auto usage_stage = PipelineStageBit(usage_index);
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002645 const bool input_attachment_ordering = (ordering.access_scope & SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT).any();
2646 const bool last_write_is_ordered = (last_write & ordering.access_scope).any();
John Zulauf4285ee92020-09-23 10:20:52 -06002647 if (IsRead(usage_bit)) {
2648 // Exclude RAW if no write, or write not most "most recent" operation w.r.t. usage;
2649 bool is_raw_hazard = IsRAWHazard(usage_stage, usage_bit);
2650 if (is_raw_hazard) {
2651 // NOTE: we know last_write is non-zero
2652 // See if the ordering rules save us from the simple RAW check above
2653 // First check to see if the current usage is covered by the ordering rules
2654 const bool usage_is_input_attachment = (usage_index == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ);
2655 const bool usage_is_ordered =
2656 (input_attachment_ordering && usage_is_input_attachment) || (0 != (usage_stage & ordering.exec_scope));
2657 if (usage_is_ordered) {
2658 // Now see of the most recent write (or a subsequent read) are ordered
2659 const bool most_recent_is_ordered = last_write_is_ordered || (0 != GetOrderedStages(ordering));
2660 is_raw_hazard = !most_recent_is_ordered;
John Zulauf361fb532020-07-22 10:45:39 -06002661 }
2662 }
John Zulauf4285ee92020-09-23 10:20:52 -06002663 if (is_raw_hazard) {
2664 hazard.Set(this, usage_index, READ_AFTER_WRITE, last_write, write_tag);
2665 }
John Zulauf361fb532020-07-22 10:45:39 -06002666 } else {
2667 // Only check for WAW if there are no reads since last_write
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002668 bool usage_write_is_ordered = (usage_bit & ordering.access_scope).any();
John Zulaufab7756b2020-12-29 16:10:16 -07002669 if (last_reads.size()) {
John Zulauf361fb532020-07-22 10:45:39 -06002670 // Look for any WAR hazards outside the ordered set of stages
Jeremy Gebben40a22942020-12-22 14:22:06 -07002671 VkPipelineStageFlags2KHR ordered_stages = 0;
John Zulauf4285ee92020-09-23 10:20:52 -06002672 if (usage_write_is_ordered) {
2673 // If the usage is ordered, we can ignore all ordered read stages w.r.t. WAR)
2674 ordered_stages = GetOrderedStages(ordering);
2675 }
2676 // If we're tracking any reads that aren't ordered against the current write, got to check 'em all.
2677 if ((ordered_stages & last_read_stages) != last_read_stages) {
John Zulaufab7756b2020-12-29 16:10:16 -07002678 for (const auto &read_access : last_reads) {
John Zulauf4285ee92020-09-23 10:20:52 -06002679 if (read_access.stage & ordered_stages) continue; // but we can skip the ordered ones
2680 if (IsReadHazard(usage_stage, read_access)) {
2681 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
2682 break;
2683 }
John Zulaufd14743a2020-07-03 09:42:39 -06002684 }
2685 }
John Zulauf4285ee92020-09-23 10:20:52 -06002686 } else if (!(last_write_is_ordered && usage_write_is_ordered)) {
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002687 if (last_write.any() && IsWriteHazard(usage_bit)) {
John Zulauf4285ee92020-09-23 10:20:52 -06002688 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
John Zulauf361fb532020-07-22 10:45:39 -06002689 }
John Zulauf69133422020-05-20 14:55:53 -06002690 }
2691 }
2692 return hazard;
2693}
2694
John Zulauf2f952d22020-02-10 11:34:51 -07002695// Asynchronous Hazards occur between subpasses with no connection through the DAG
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07002696HazardResult ResourceAccessState::DetectAsyncHazard(SyncStageAccessIndex usage_index, const ResourceUsageTag &start_tag) const {
John Zulauf2f952d22020-02-10 11:34:51 -07002697 HazardResult hazard;
2698 auto usage = FlagBit(usage_index);
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07002699 // Async checks need to not go back further than the start of the subpass, as we only want to find hazards between the async
2700 // subpasses. Anything older than that should have been checked at the start of each subpass, taking into account all of
2701 // the raster ordering rules.
John Zulauf2f952d22020-02-10 11:34:51 -07002702 if (IsRead(usage)) {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07002703 if (last_write.any() && (write_tag.index >= start_tag.index)) {
John Zulauf59e25072020-07-17 10:55:21 -06002704 hazard.Set(this, usage_index, READ_RACING_WRITE, last_write, write_tag);
John Zulauf2f952d22020-02-10 11:34:51 -07002705 }
2706 } else {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07002707 if (last_write.any() && (write_tag.index >= start_tag.index)) {
John Zulauf59e25072020-07-17 10:55:21 -06002708 hazard.Set(this, usage_index, WRITE_RACING_WRITE, last_write, write_tag);
John Zulaufab7756b2020-12-29 16:10:16 -07002709 } else if (last_reads.size() > 0) {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07002710 // Any reads during the other subpass will conflict with this write, so we need to check them all.
John Zulaufab7756b2020-12-29 16:10:16 -07002711 for (const auto &read_access : last_reads) {
2712 if (read_access.tag.index >= start_tag.index) {
2713 hazard.Set(this, usage_index, WRITE_RACING_READ, read_access.access, read_access.tag);
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07002714 break;
2715 }
2716 }
John Zulauf2f952d22020-02-10 11:34:51 -07002717 }
2718 }
2719 return hazard;
2720}
2721
Jeremy Gebben40a22942020-12-22 14:22:06 -07002722HazardResult ResourceAccessState::DetectBarrierHazard(SyncStageAccessIndex usage_index, VkPipelineStageFlags2KHR src_exec_scope,
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002723 const SyncStageAccessFlags &src_access_scope) const {
John Zulauf0cb5be22020-01-23 12:18:22 -07002724 // Only supporting image layout transitions for now
2725 assert(usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION);
2726 HazardResult hazard;
John Zulauf361fb532020-07-22 10:45:39 -06002727 // only test for WAW if there no intervening read operations.
2728 // See DetectHazard(SyncStagetAccessIndex) above for more details.
John Zulaufab7756b2020-12-29 16:10:16 -07002729 if (last_reads.size()) {
John Zulauf355e49b2020-04-24 15:11:15 -06002730 // Look at the reads if any
John Zulaufab7756b2020-12-29 16:10:16 -07002731 for (const auto &read_access : last_reads) {
John Zulauf4a6105a2020-11-17 15:11:05 -07002732 if (read_access.IsReadBarrierHazard(src_exec_scope)) {
John Zulauf59e25072020-07-17 10:55:21 -06002733 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
John Zulauf0cb5be22020-01-23 12:18:22 -07002734 break;
2735 }
2736 }
John Zulauf4a6105a2020-11-17 15:11:05 -07002737 } else if (last_write.any() && IsWriteBarrierHazard(src_exec_scope, src_access_scope)) {
2738 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
2739 }
2740
2741 return hazard;
2742}
2743
Jeremy Gebben40a22942020-12-22 14:22:06 -07002744HazardResult ResourceAccessState::DetectBarrierHazard(SyncStageAccessIndex usage_index, VkPipelineStageFlags2KHR src_exec_scope,
John Zulauf4a6105a2020-11-17 15:11:05 -07002745 const SyncStageAccessFlags &src_access_scope,
2746 const ResourceUsageTag &event_tag) const {
2747 // Only supporting image layout transitions for now
2748 assert(usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION);
2749 HazardResult hazard;
2750 // only test for WAW if there no intervening read operations.
2751 // See DetectHazard(SyncStagetAccessIndex) above for more details.
2752
John Zulaufab7756b2020-12-29 16:10:16 -07002753 if (last_reads.size()) {
John Zulauf4a6105a2020-11-17 15:11:05 -07002754 // Look at the reads if any... if reads exist, they are either the resaon the access is in the event
2755 // first scope, or they are a hazard.
John Zulaufab7756b2020-12-29 16:10:16 -07002756 for (const auto &read_access : last_reads) {
John Zulauf4a6105a2020-11-17 15:11:05 -07002757 if (read_access.tag.IsBefore(event_tag)) {
2758 // The read is in the events first synchronization scope, so we use a barrier hazard check
2759 // If the read stage is not in the src sync scope
2760 // *AND* not execution chained with an existing sync barrier (that's the or)
2761 // then the barrier access is unsafe (R/W after R)
2762 if (read_access.IsReadBarrierHazard(src_exec_scope)) {
2763 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
2764 break;
2765 }
2766 } else {
2767 // The read not in the event first sync scope and so is a hazard vs. the layout transition
2768 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
2769 }
2770 }
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002771 } else if (last_write.any()) {
John Zulauf4a6105a2020-11-17 15:11:05 -07002772 // if there are no reads, the write is either the reason the access is in the event scope... they are a hazard
2773 if (write_tag.IsBefore(event_tag)) {
2774 // The write is in the first sync scope of the event (sync their aren't any reads to be the reason)
2775 // So do a normal barrier hazard check
2776 if (IsWriteBarrierHazard(src_exec_scope, src_access_scope)) {
2777 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
2778 }
2779 } else {
2780 // The write isn't in scope, and is thus a hazard to the layout transistion for wait
John Zulauf361fb532020-07-22 10:45:39 -06002781 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
2782 }
John Zulaufd14743a2020-07-03 09:42:39 -06002783 }
John Zulauf361fb532020-07-22 10:45:39 -06002784
John Zulauf0cb5be22020-01-23 12:18:22 -07002785 return hazard;
2786}
2787
John Zulauf5f13a792020-03-10 07:31:21 -06002788// The logic behind resolves is the same as update, we assume that earlier hazards have be reported, and that no
2789// tranistive hazard can exists with a hazard between the earlier operations. Yes, an early hazard can mask that another
2790// exists, but if you fix *that* hazard it either fixes or unmasks the subsequent ones.
2791void ResourceAccessState::Resolve(const ResourceAccessState &other) {
2792 if (write_tag.IsBefore(other.write_tag)) {
John Zulauf4285ee92020-09-23 10:20:52 -06002793 // If this is a later write, we've reported any exsiting hazard, and we can just overwrite as the more recent
2794 // operation
John Zulauf5f13a792020-03-10 07:31:21 -06002795 *this = other;
2796 } else if (!other.write_tag.IsBefore(write_tag)) {
2797 // This is the *equals* case for write operations, we merged the write barriers and the read state (but without the
2798 // dependency chaining logic or any stage expansion)
2799 write_barriers |= other.write_barriers;
John Zulaufb02c1eb2020-10-06 16:33:36 -06002800 pending_write_barriers |= other.pending_write_barriers;
2801 pending_layout_transition |= other.pending_layout_transition;
2802 pending_write_dep_chain |= other.pending_write_dep_chain;
John Zulauf5f13a792020-03-10 07:31:21 -06002803
John Zulaufd14743a2020-07-03 09:42:39 -06002804 // Merge the read states
John Zulaufab7756b2020-12-29 16:10:16 -07002805 const auto pre_merge_count = last_reads.size();
John Zulauf4285ee92020-09-23 10:20:52 -06002806 const auto pre_merge_stages = last_read_stages;
John Zulaufab7756b2020-12-29 16:10:16 -07002807 for (uint32_t other_read_index = 0; other_read_index < other.last_reads.size(); other_read_index++) {
John Zulauf5f13a792020-03-10 07:31:21 -06002808 auto &other_read = other.last_reads[other_read_index];
John Zulauf4285ee92020-09-23 10:20:52 -06002809 if (pre_merge_stages & other_read.stage) {
John Zulauf5f13a792020-03-10 07:31:21 -06002810 // Merge in the barriers for read stages that exist in *both* this and other
John Zulauf4285ee92020-09-23 10:20:52 -06002811 // TODO: This is N^2 with stages... perhaps the ReadStates should be sorted by stage index.
2812 // but we should wait on profiling data for that.
2813 for (uint32_t my_read_index = 0; my_read_index < pre_merge_count; my_read_index++) {
John Zulauf5f13a792020-03-10 07:31:21 -06002814 auto &my_read = last_reads[my_read_index];
2815 if (other_read.stage == my_read.stage) {
2816 if (my_read.tag.IsBefore(other_read.tag)) {
John Zulauf4285ee92020-09-23 10:20:52 -06002817 // Other is more recent, copy in the state
John Zulauf37ceaed2020-07-03 16:18:15 -06002818 my_read.access = other_read.access;
John Zulauf4285ee92020-09-23 10:20:52 -06002819 my_read.tag = other_read.tag;
John Zulaufb02c1eb2020-10-06 16:33:36 -06002820 my_read.pending_dep_chain = other_read.pending_dep_chain;
2821 // TODO: Phase 2 -- review the state merge logic to avoid false positive from overwriting the barriers
2822 // May require tracking more than one access per stage.
2823 my_read.barriers = other_read.barriers;
Jeremy Gebben40a22942020-12-22 14:22:06 -07002824 if (my_read.stage == VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR) {
John Zulauf4285ee92020-09-23 10:20:52 -06002825 // Since I'm overwriting the fragement stage read, also update the input attachment info
2826 // as this is the only stage that affects it.
John Zulauff51fbb62020-10-02 14:43:24 -06002827 input_attachment_read = other.input_attachment_read;
John Zulauf4285ee92020-09-23 10:20:52 -06002828 }
John Zulaufb02c1eb2020-10-06 16:33:36 -06002829 } else if (other_read.tag.IsBefore(my_read.tag)) {
2830 // The read tags match so merge the barriers
2831 my_read.barriers |= other_read.barriers;
2832 my_read.pending_dep_chain |= other_read.pending_dep_chain;
John Zulauf5f13a792020-03-10 07:31:21 -06002833 }
John Zulaufb02c1eb2020-10-06 16:33:36 -06002834
John Zulauf5f13a792020-03-10 07:31:21 -06002835 break;
2836 }
2837 }
2838 } else {
2839 // The other read stage doesn't exist in this, so add it.
John Zulaufab7756b2020-12-29 16:10:16 -07002840 last_reads.emplace_back(other_read);
John Zulauf5f13a792020-03-10 07:31:21 -06002841 last_read_stages |= other_read.stage;
Jeremy Gebben40a22942020-12-22 14:22:06 -07002842 if (other_read.stage == VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR) {
John Zulauff51fbb62020-10-02 14:43:24 -06002843 input_attachment_read = other.input_attachment_read;
John Zulauf4285ee92020-09-23 10:20:52 -06002844 }
John Zulauf5f13a792020-03-10 07:31:21 -06002845 }
2846 }
John Zulauf361fb532020-07-22 10:45:39 -06002847 read_execution_barriers |= other.read_execution_barriers;
John Zulauf4285ee92020-09-23 10:20:52 -06002848 } // the else clause would be that other write is before this write... in which case we supercede the other state and
2849 // ignore it.
John Zulauffaea0ee2021-01-14 14:01:32 -07002850
2851 // Merge first access information by making a copy of this first_access and reconstructing with a shuffle
2852 // of the copy and other into this using the update first logic.
2853 // NOTE: All sorts of additional cleverness could be put into short circuts. (for example back is write and is before front
2854 // of the other first_accesses... )
2855 if (!(first_accesses_ == other.first_accesses_) && !other.first_accesses_.empty()) {
2856 FirstAccesses firsts(std::move(first_accesses_));
2857 first_accesses_.clear();
2858 first_read_stages_ = 0U;
2859 auto a = firsts.begin();
2860 auto a_end = firsts.end();
2861 for (auto &b : other.first_accesses_) {
2862 // TODO: Determine whether "IsBefore" or "IsGloballyBefore" is needed...
2863 while (a != a_end && a->tag.IsBefore(b.tag)) {
2864 UpdateFirst(a->tag, a->usage_index, a->ordering_rule);
2865 ++a;
2866 }
2867 UpdateFirst(b.tag, b.usage_index, b.ordering_rule);
2868 }
2869 for (; a != a_end; ++a) {
2870 UpdateFirst(a->tag, a->usage_index, a->ordering_rule);
2871 }
2872 }
John Zulauf5f13a792020-03-10 07:31:21 -06002873}
2874
John Zulauf8e3c3e92021-01-06 11:19:36 -07002875void ResourceAccessState::Update(SyncStageAccessIndex usage_index, SyncOrdering ordering_rule, const ResourceUsageTag &tag) {
John Zulauf9cb530d2019-09-30 14:14:10 -06002876 // Move this logic in the ResourceStateTracker as methods, thereof (or we'll repeat it for every flavor of resource...
2877 const auto usage_bit = FlagBit(usage_index);
John Zulauf4285ee92020-09-23 10:20:52 -06002878 if (IsRead(usage_index)) {
John Zulauf9cb530d2019-09-30 14:14:10 -06002879 // Mulitple outstanding reads may be of interest and do dependency chains independently
2880 // However, for purposes of barrier tracking, only one read per pipeline stage matters
2881 const auto usage_stage = PipelineStageBit(usage_index);
2882 if (usage_stage & last_read_stages) {
John Zulaufab7756b2020-12-29 16:10:16 -07002883 for (auto &read_access : last_reads) {
2884 if (read_access.stage == usage_stage) {
2885 read_access.Set(usage_stage, usage_bit, 0, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06002886 break;
2887 }
2888 }
2889 } else {
John Zulaufab7756b2020-12-29 16:10:16 -07002890 last_reads.emplace_back(usage_stage, usage_bit, 0, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06002891 last_read_stages |= usage_stage;
2892 }
John Zulauf4285ee92020-09-23 10:20:52 -06002893
2894 // Fragment shader reads come in two flavors, and we need to track if the one we're tracking is the special one.
Jeremy Gebben40a22942020-12-22 14:22:06 -07002895 if (usage_stage == VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR) {
John Zulauff51fbb62020-10-02 14:43:24 -06002896 // TODO Revisit re: multiple reads for a given stage
2897 input_attachment_read = (usage_bit == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT);
John Zulauf4285ee92020-09-23 10:20:52 -06002898 }
John Zulauf9cb530d2019-09-30 14:14:10 -06002899 } else {
2900 // Assume write
2901 // TODO determine what to do with READ-WRITE operations if any
John Zulauf89311b42020-09-29 16:28:47 -06002902 SetWrite(usage_bit, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06002903 }
John Zulauffaea0ee2021-01-14 14:01:32 -07002904 UpdateFirst(tag, usage_index, ordering_rule);
John Zulauf9cb530d2019-09-30 14:14:10 -06002905}
John Zulauf5f13a792020-03-10 07:31:21 -06002906
John Zulauf89311b42020-09-29 16:28:47 -06002907// Clobber last read and all barriers... because all we have is DANGER, DANGER, WILL ROBINSON!!!
2908// if the last_reads/last_write were unsafe, we've reported them, in either case the prior access is irrelevant.
2909// We can overwrite them as *this* write is now after them.
2910//
2911// Note: intentionally ignore pending barriers and chains (i.e. don't apply or clear them), let ApplyPendingBarriers handle them.
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002912void ResourceAccessState::SetWrite(const SyncStageAccessFlags &usage_bit, const ResourceUsageTag &tag) {
John Zulaufab7756b2020-12-29 16:10:16 -07002913 last_reads.clear();
John Zulauf89311b42020-09-29 16:28:47 -06002914 last_read_stages = 0;
2915 read_execution_barriers = 0;
John Zulauff51fbb62020-10-02 14:43:24 -06002916 input_attachment_read = false; // Denotes no outstanding input attachment read after the last write.
John Zulauf89311b42020-09-29 16:28:47 -06002917
2918 write_barriers = 0;
2919 write_dependency_chain = 0;
2920 write_tag = tag;
2921 last_write = usage_bit;
John Zulauf9cb530d2019-09-30 14:14:10 -06002922}
2923
John Zulauf89311b42020-09-29 16:28:47 -06002924// Apply the memory barrier without updating the existing barriers. The execution barrier
2925// changes the "chaining" state, but to keep barriers independent, we defer this until all barriers
2926// of the batch have been processed. Also, depending on whether layout transition happens, we'll either
2927// replace the current write barriers or add to them, so accumulate to pending as well.
2928void ResourceAccessState::ApplyBarrier(const SyncBarrier &barrier, bool layout_transition) {
2929 // For independent barriers we need to track what the new barriers and dependency chain *will* be when we're done
2930 // applying the memory barriers
John Zulauf86356ca2020-10-19 11:46:41 -06002931 // NOTE: We update the write barrier if the write is in the first access scope or if there is a layout
2932 // transistion, under the theory of "most recent access". If the read/write *isn't* safe
2933 // vs. this layout transition DetectBarrierHazard should report it. We treat the layout
2934 // transistion *as* a write and in scope with the barrier (it's before visibility).
John Zulaufc523bf62021-02-16 08:20:34 -07002935 if (layout_transition || WriteInSourceScopeOrChain(barrier.src_exec_scope.exec_scope, barrier.src_access_scope)) {
John Zulauf89311b42020-09-29 16:28:47 -06002936 pending_write_barriers |= barrier.dst_access_scope;
John Zulaufc523bf62021-02-16 08:20:34 -07002937 pending_write_dep_chain |= barrier.dst_exec_scope.exec_scope;
John Zulaufa0a98292020-09-18 09:30:10 -06002938 }
John Zulauf89311b42020-09-29 16:28:47 -06002939 // Track layout transistion as pending as we can't modify last_write until all barriers processed
2940 pending_layout_transition |= layout_transition;
John Zulaufa0a98292020-09-18 09:30:10 -06002941
John Zulauf89311b42020-09-29 16:28:47 -06002942 if (!pending_layout_transition) {
2943 // Once we're dealing with a layout transition (which is modelled as a *write*) then the last reads/writes/chains
2944 // don't need to be tracked as we're just going to zero them.
John Zulaufab7756b2020-12-29 16:10:16 -07002945 for (auto &read_access : last_reads) {
John Zulauf89311b42020-09-29 16:28:47 -06002946 // The | implements the "dependency chain" logic for this access, as the barriers field stores the second sync scope
John Zulaufc523bf62021-02-16 08:20:34 -07002947 if (barrier.src_exec_scope.exec_scope & (read_access.stage | read_access.barriers)) {
2948 read_access.pending_dep_chain |= barrier.dst_exec_scope.exec_scope;
John Zulaufa0a98292020-09-18 09:30:10 -06002949 }
2950 }
John Zulaufa0a98292020-09-18 09:30:10 -06002951 }
John Zulaufa0a98292020-09-18 09:30:10 -06002952}
2953
John Zulauf4a6105a2020-11-17 15:11:05 -07002954// Apply the tag scoped memory barrier without updating the existing barriers. The execution barrier
2955// changes the "chaining" state, but to keep barriers independent. See discussion above.
2956void ResourceAccessState::ApplyBarrier(const ResourceUsageTag &scope_tag, const SyncBarrier &barrier, bool layout_transition) {
2957 // The scope logic for events is, if we're here, the resource usage was flagged as "in the first execution scope" at
2958 // the time of the SetEvent, thus all we need check is whether the access is the same one (i.e. before the scope tag
2959 // in order to know if it's in the excecution scope
2960 // Notice that the layout transition sets the pending barriers *regardless*, as any lack of src_access_scope to
2961 // guard against the layout transition should be reported in the detect barrier hazard phase, and we only report
2962 // errors w.r.t. "most recent" accesses.
2963 if (layout_transition || ((write_tag.IsBefore(scope_tag)) && (barrier.src_access_scope & last_write).any())) {
2964 pending_write_barriers |= barrier.dst_access_scope;
John Zulaufc523bf62021-02-16 08:20:34 -07002965 pending_write_dep_chain |= barrier.dst_exec_scope.exec_scope;
John Zulauf4a6105a2020-11-17 15:11:05 -07002966 }
2967 // Track layout transistion as pending as we can't modify last_write until all barriers processed
2968 pending_layout_transition |= layout_transition;
2969
2970 if (!pending_layout_transition) {
2971 // Once we're dealing with a layout transition (which is modelled as a *write*) then the last reads/writes/chains
2972 // don't need to be tracked as we're just going to zero them.
John Zulaufab7756b2020-12-29 16:10:16 -07002973 for (auto &read_access : last_reads) {
John Zulauf4a6105a2020-11-17 15:11:05 -07002974 // If this read is the same one we included in the set event and in scope, then apply the execution barrier...
2975 // NOTE: That's not really correct... this read stage might *not* have been included in the setevent, and the barriers
2976 // representing the chain might have changed since then (that would be an odd usage), so as a first approximation
2977 // we'll assume the barriers *haven't* been changed since (if the tag hasn't), and while this could be a false
2978 // positive in the case of Set; SomeBarrier; Wait; we'll live with it until we can add more state to the first scope
2979 // capture (the specific write and read stages that *were* in scope at the moment of SetEvents.
2980 // TODO: eliminate the false positive by including write/read-stages "in scope" information in SetEvents first_scope
John Zulaufc523bf62021-02-16 08:20:34 -07002981 if (read_access.tag.IsBefore(scope_tag) &&
2982 (barrier.src_exec_scope.exec_scope & (read_access.stage | read_access.barriers))) {
2983 read_access.pending_dep_chain |= barrier.dst_exec_scope.exec_scope;
John Zulauf4a6105a2020-11-17 15:11:05 -07002984 }
2985 }
2986 }
2987}
John Zulauf89311b42020-09-29 16:28:47 -06002988void ResourceAccessState::ApplyPendingBarriers(const ResourceUsageTag &tag) {
2989 if (pending_layout_transition) {
John Zulauf89311b42020-09-29 16:28:47 -06002990 // SetWrite clobbers the read count, and thus we don't have to clear the read_state out.
2991 SetWrite(SYNC_IMAGE_LAYOUT_TRANSITION_BIT, tag); // Side effect notes below
John Zulauffaea0ee2021-01-14 14:01:32 -07002992 UpdateFirst(tag, SYNC_IMAGE_LAYOUT_TRANSITION, SyncOrdering::kNonAttachment);
John Zulauf89311b42020-09-29 16:28:47 -06002993 pending_layout_transition = false;
John Zulauf9cb530d2019-09-30 14:14:10 -06002994 }
John Zulauf89311b42020-09-29 16:28:47 -06002995
2996 // Apply the accumulate execution barriers (and thus update chaining information)
2997 // for layout transition, read count is zeroed by SetWrite, so this will be skipped.
John Zulaufab7756b2020-12-29 16:10:16 -07002998 for (auto &read_access : last_reads) {
2999 read_access.barriers |= read_access.pending_dep_chain;
3000 read_execution_barriers |= read_access.barriers;
3001 read_access.pending_dep_chain = 0;
John Zulauf89311b42020-09-29 16:28:47 -06003002 }
3003
3004 // We OR in the accumulated write chain and barriers even in the case of a layout transition as SetWrite zeros them.
3005 write_dependency_chain |= pending_write_dep_chain;
3006 write_barriers |= pending_write_barriers;
3007 pending_write_dep_chain = 0;
3008 pending_write_barriers = 0;
John Zulauf9cb530d2019-09-30 14:14:10 -06003009}
3010
John Zulauf59e25072020-07-17 10:55:21 -06003011// This should be just Bits or Index, but we don't have an invalid state for Index
Jeremy Gebben40a22942020-12-22 14:22:06 -07003012VkPipelineStageFlags2KHR ResourceAccessState::GetReadBarriers(const SyncStageAccessFlags &usage_bit) const {
3013 VkPipelineStageFlags2KHR barriers = 0U;
John Zulauf4285ee92020-09-23 10:20:52 -06003014
John Zulaufab7756b2020-12-29 16:10:16 -07003015 for (const auto &read_access : last_reads) {
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003016 if ((read_access.access & usage_bit).any()) {
John Zulauf4285ee92020-09-23 10:20:52 -06003017 barriers = read_access.barriers;
3018 break;
John Zulauf59e25072020-07-17 10:55:21 -06003019 }
3020 }
John Zulauf4285ee92020-09-23 10:20:52 -06003021
John Zulauf59e25072020-07-17 10:55:21 -06003022 return barriers;
3023}
3024
Jeremy Gebben40a22942020-12-22 14:22:06 -07003025inline bool ResourceAccessState::IsRAWHazard(VkPipelineStageFlags2KHR usage_stage, const SyncStageAccessFlags &usage) const {
John Zulauf4285ee92020-09-23 10:20:52 -06003026 assert(IsRead(usage));
3027 // Only RAW vs. last_write if it doesn't happen-after any other read because either:
3028 // * the previous reads are not hazards, and thus last_write must be visible and available to
3029 // any reads that happen after.
3030 // * the previous reads *are* hazards to last_write, have been reported, and if that hazard is fixed
3031 // the current read will be also not be a hazard, thus reporting a hazard here adds no needed information.
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003032 return last_write.any() && (0 == (read_execution_barriers & usage_stage)) && IsWriteHazard(usage);
John Zulauf4285ee92020-09-23 10:20:52 -06003033}
3034
Jeremy Gebben40a22942020-12-22 14:22:06 -07003035VkPipelineStageFlags2KHR ResourceAccessState::GetOrderedStages(const OrderingBarrier &ordering) const {
John Zulauf4285ee92020-09-23 10:20:52 -06003036 // Whether the stage are in the ordering scope only matters if the current write is ordered
Jeremy Gebben40a22942020-12-22 14:22:06 -07003037 VkPipelineStageFlags2KHR ordered_stages = last_read_stages & ordering.exec_scope;
John Zulauf4285ee92020-09-23 10:20:52 -06003038 // Special input attachment handling as always (not encoded in exec_scop)
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003039 const bool input_attachment_ordering = (ordering.access_scope & SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT).any();
John Zulauff51fbb62020-10-02 14:43:24 -06003040 if (input_attachment_ordering && input_attachment_read) {
John Zulauf4285ee92020-09-23 10:20:52 -06003041 // If we have an input attachment in last_reads and input attachments are ordered we all that stage
Jeremy Gebben40a22942020-12-22 14:22:06 -07003042 ordered_stages |= VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR;
John Zulauf4285ee92020-09-23 10:20:52 -06003043 }
3044
3045 return ordered_stages;
3046}
3047
John Zulauffaea0ee2021-01-14 14:01:32 -07003048void ResourceAccessState::UpdateFirst(const ResourceUsageTag &tag, SyncStageAccessIndex usage_index, SyncOrdering ordering_rule) {
3049 // Only record until we record a write.
3050 if (first_accesses_.empty() || IsRead(first_accesses_.back().usage_index)) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003051 const VkPipelineStageFlags2KHR usage_stage = IsRead(usage_index) ? PipelineStageBit(usage_index) : 0U;
John Zulauffaea0ee2021-01-14 14:01:32 -07003052 if (0 == (usage_stage & first_read_stages_)) {
3053 // If this is a read we haven't seen or a write, record.
3054 first_read_stages_ |= usage_stage;
3055 first_accesses_.emplace_back(tag, usage_index, ordering_rule);
3056 }
3057 }
3058}
3059
John Zulaufd1f85d42020-04-15 12:23:15 -06003060void SyncValidator::ResetCommandBufferCallback(VkCommandBuffer command_buffer) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003061 auto *access_context = GetAccessContextNoInsert(command_buffer);
3062 if (access_context) {
3063 access_context->Reset();
John Zulauf9cb530d2019-09-30 14:14:10 -06003064 }
3065}
3066
John Zulaufd1f85d42020-04-15 12:23:15 -06003067void SyncValidator::FreeCommandBufferCallback(VkCommandBuffer command_buffer) {
3068 auto access_found = cb_access_state.find(command_buffer);
3069 if (access_found != cb_access_state.end()) {
3070 access_found->second->Reset();
3071 cb_access_state.erase(access_found);
3072 }
3073}
3074
John Zulauf9cb530d2019-09-30 14:14:10 -06003075bool SyncValidator::PreCallValidateCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
3076 uint32_t regionCount, const VkBufferCopy *pRegions) const {
3077 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -06003078 const auto *cb_context = GetAccessContext(commandBuffer);
3079 assert(cb_context);
3080 if (!cb_context) return skip;
3081 const auto *context = cb_context->GetCurrentAccessContext();
John Zulauf9cb530d2019-09-30 14:14:10 -06003082
John Zulauf3d84f1b2020-03-09 13:33:25 -06003083 // If we have no previous accesses, we have no hazards
John Zulauf3d84f1b2020-03-09 13:33:25 -06003084 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003085 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003086
3087 for (uint32_t region = 0; region < regionCount; region++) {
3088 const auto &copy_region = pRegions[region];
John Zulauf16adfc92020-04-08 10:28:33 -06003089 if (src_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06003090 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003091 auto hazard = context->DetectHazard(*src_buffer, SYNC_COPY_TRANSFER_READ, src_range);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003092 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003093 skip |= LogError(srcBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003094 "vkCmdCopyBuffer: Hazard %s for srcBuffer %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06003095 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcBuffer).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003096 cb_context->FormatUsage(hazard).c_str());
John Zulauf9cb530d2019-09-30 14:14:10 -06003097 }
John Zulauf9cb530d2019-09-30 14:14:10 -06003098 }
John Zulauf16adfc92020-04-08 10:28:33 -06003099 if (dst_buffer && !skip) {
John Zulauf3e86bf02020-09-12 10:47:57 -06003100 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003101 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, dst_range);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003102 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003103 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003104 "vkCmdCopyBuffer: Hazard %s for dstBuffer %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06003105 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstBuffer).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003106 cb_context->FormatUsage(hazard).c_str());
John Zulauf3d84f1b2020-03-09 13:33:25 -06003107 }
3108 }
3109 if (skip) break;
John Zulauf9cb530d2019-09-30 14:14:10 -06003110 }
3111 return skip;
3112}
3113
3114void SyncValidator::PreCallRecordCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
3115 uint32_t regionCount, const VkBufferCopy *pRegions) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003116 auto *cb_context = GetAccessContext(commandBuffer);
3117 assert(cb_context);
John Zulauf2b151bf2020-04-24 15:37:44 -06003118 const auto tag = cb_context->NextCommandTag(CMD_COPYBUFFER);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003119 auto *context = cb_context->GetCurrentAccessContext();
3120
John Zulauf9cb530d2019-09-30 14:14:10 -06003121 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
John Zulauf9cb530d2019-09-30 14:14:10 -06003122 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
John Zulauf9cb530d2019-09-30 14:14:10 -06003123
3124 for (uint32_t region = 0; region < regionCount; region++) {
3125 const auto &copy_region = pRegions[region];
John Zulauf16adfc92020-04-08 10:28:33 -06003126 if (src_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06003127 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003128 context->UpdateAccessState(*src_buffer, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment, src_range, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003129 }
John Zulauf16adfc92020-04-08 10:28:33 -06003130 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06003131 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003132 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, dst_range, tag);
John Zulauf5c5e88d2019-12-26 11:22:02 -07003133 }
3134 }
3135}
3136
John Zulauf4a6105a2020-11-17 15:11:05 -07003137void SyncValidator::PreCallRecordDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
3138 // Clear out events from the command buffer contexts
3139 for (auto &cb_context : cb_access_state) {
3140 cb_context.second->RecordDestroyEvent(event);
3141 }
3142}
3143
Jeff Leger178b1e52020-10-05 12:22:23 -04003144bool SyncValidator::PreCallValidateCmdCopyBuffer2KHR(VkCommandBuffer commandBuffer,
3145 const VkCopyBufferInfo2KHR *pCopyBufferInfos) const {
3146 bool skip = false;
3147 const auto *cb_context = GetAccessContext(commandBuffer);
3148 assert(cb_context);
3149 if (!cb_context) return skip;
3150 const auto *context = cb_context->GetCurrentAccessContext();
3151
3152 // If we have no previous accesses, we have no hazards
3153 const auto *src_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->srcBuffer);
3154 const auto *dst_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->dstBuffer);
3155
3156 for (uint32_t region = 0; region < pCopyBufferInfos->regionCount; region++) {
3157 const auto &copy_region = pCopyBufferInfos->pRegions[region];
3158 if (src_buffer) {
3159 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003160 auto hazard = context->DetectHazard(*src_buffer, SYNC_COPY_TRANSFER_READ, src_range);
Jeff Leger178b1e52020-10-05 12:22:23 -04003161 if (hazard.hazard) {
3162 // TODO -- add tag information to log msg when useful.
3163 skip |= LogError(pCopyBufferInfos->srcBuffer, string_SyncHazardVUID(hazard.hazard),
3164 "vkCmdCopyBuffer2KHR(): Hazard %s for srcBuffer %s, region %" PRIu32 ". Access info %s.",
3165 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyBufferInfos->srcBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07003166 region, cb_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04003167 }
3168 }
3169 if (dst_buffer && !skip) {
3170 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003171 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, dst_range);
Jeff Leger178b1e52020-10-05 12:22:23 -04003172 if (hazard.hazard) {
3173 skip |= LogError(pCopyBufferInfos->dstBuffer, string_SyncHazardVUID(hazard.hazard),
3174 "vkCmdCopyBuffer2KHR(): Hazard %s for dstBuffer %s, region %" PRIu32 ". Access info %s.",
3175 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyBufferInfos->dstBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07003176 region, cb_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04003177 }
3178 }
3179 if (skip) break;
3180 }
3181 return skip;
3182}
3183
3184void SyncValidator::PreCallRecordCmdCopyBuffer2KHR(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2KHR *pCopyBufferInfos) {
3185 auto *cb_context = GetAccessContext(commandBuffer);
3186 assert(cb_context);
3187 const auto tag = cb_context->NextCommandTag(CMD_COPYBUFFER2KHR);
3188 auto *context = cb_context->GetCurrentAccessContext();
3189
3190 const auto *src_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->srcBuffer);
3191 const auto *dst_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->dstBuffer);
3192
3193 for (uint32_t region = 0; region < pCopyBufferInfos->regionCount; region++) {
3194 const auto &copy_region = pCopyBufferInfos->pRegions[region];
3195 if (src_buffer) {
3196 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003197 context->UpdateAccessState(*src_buffer, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment, src_range, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04003198 }
3199 if (dst_buffer) {
3200 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003201 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, dst_range, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04003202 }
3203 }
3204}
3205
John Zulauf5c5e88d2019-12-26 11:22:02 -07003206bool SyncValidator::PreCallValidateCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
3207 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
3208 const VkImageCopy *pRegions) const {
3209 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -06003210 const auto *cb_access_context = GetAccessContext(commandBuffer);
3211 assert(cb_access_context);
3212 if (!cb_access_context) return skip;
John Zulauf5c5e88d2019-12-26 11:22:02 -07003213
John Zulauf3d84f1b2020-03-09 13:33:25 -06003214 const auto *context = cb_access_context->GetCurrentAccessContext();
3215 assert(context);
3216 if (!context) return skip;
3217
3218 const auto *src_image = Get<IMAGE_STATE>(srcImage);
3219 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003220 for (uint32_t region = 0; region < regionCount; region++) {
3221 const auto &copy_region = pRegions[region];
3222 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003223 auto hazard = context->DetectHazard(*src_image, SYNC_COPY_TRANSFER_READ, copy_region.srcSubresource,
John Zulauf3d84f1b2020-03-09 13:33:25 -06003224 copy_region.srcOffset, copy_region.extent);
3225 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003226 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003227 "vkCmdCopyImage: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06003228 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003229 cb_access_context->FormatUsage(hazard).c_str());
John Zulauf5c5e88d2019-12-26 11:22:02 -07003230 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06003231 }
3232
3233 if (dst_image) {
locke-lunarg1df1f882020-03-02 16:42:08 -07003234 VkExtent3D dst_copy_extent =
3235 GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003236 auto hazard = context->DetectHazard(*dst_image, SYNC_COPY_TRANSFER_WRITE, copy_region.dstSubresource,
locke-lunarg1df1f882020-03-02 16:42:08 -07003237 copy_region.dstOffset, dst_copy_extent);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003238 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003239 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003240 "vkCmdCopyImage: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06003241 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003242 cb_access_context->FormatUsage(hazard).c_str());
John Zulauf5c5e88d2019-12-26 11:22:02 -07003243 }
locke-lunarg1dbbb9e2020-02-28 22:43:53 -07003244 if (skip) break;
John Zulauf5c5e88d2019-12-26 11:22:02 -07003245 }
3246 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06003247
John Zulauf5c5e88d2019-12-26 11:22:02 -07003248 return skip;
3249}
3250
3251void SyncValidator::PreCallRecordCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
3252 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
3253 const VkImageCopy *pRegions) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003254 auto *cb_access_context = GetAccessContext(commandBuffer);
3255 assert(cb_access_context);
John Zulauf2b151bf2020-04-24 15:37:44 -06003256 const auto tag = cb_access_context->NextCommandTag(CMD_COPYIMAGE);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003257 auto *context = cb_access_context->GetCurrentAccessContext();
3258 assert(context);
3259
John Zulauf5c5e88d2019-12-26 11:22:02 -07003260 auto *src_image = Get<IMAGE_STATE>(srcImage);
John Zulauf5c5e88d2019-12-26 11:22:02 -07003261 auto *dst_image = Get<IMAGE_STATE>(dstImage);
John Zulauf5c5e88d2019-12-26 11:22:02 -07003262
3263 for (uint32_t region = 0; region < regionCount; region++) {
3264 const auto &copy_region = pRegions[region];
John Zulauf3d84f1b2020-03-09 13:33:25 -06003265 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003266 context->UpdateAccessState(*src_image, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07003267 copy_region.srcSubresource, copy_region.srcOffset, copy_region.extent, tag);
John Zulauf5c5e88d2019-12-26 11:22:02 -07003268 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06003269 if (dst_image) {
locke-lunarg1df1f882020-03-02 16:42:08 -07003270 VkExtent3D dst_copy_extent =
3271 GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003272 context->UpdateAccessState(*dst_image, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07003273 copy_region.dstSubresource, copy_region.dstOffset, dst_copy_extent, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003274 }
3275 }
3276}
3277
Jeff Leger178b1e52020-10-05 12:22:23 -04003278bool SyncValidator::PreCallValidateCmdCopyImage2KHR(VkCommandBuffer commandBuffer,
3279 const VkCopyImageInfo2KHR *pCopyImageInfo) const {
3280 bool skip = false;
3281 const auto *cb_access_context = GetAccessContext(commandBuffer);
3282 assert(cb_access_context);
3283 if (!cb_access_context) return skip;
3284
3285 const auto *context = cb_access_context->GetCurrentAccessContext();
3286 assert(context);
3287 if (!context) return skip;
3288
3289 const auto *src_image = Get<IMAGE_STATE>(pCopyImageInfo->srcImage);
3290 const auto *dst_image = Get<IMAGE_STATE>(pCopyImageInfo->dstImage);
3291 for (uint32_t region = 0; region < pCopyImageInfo->regionCount; region++) {
3292 const auto &copy_region = pCopyImageInfo->pRegions[region];
3293 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003294 auto hazard = context->DetectHazard(*src_image, SYNC_COPY_TRANSFER_READ, copy_region.srcSubresource,
Jeff Leger178b1e52020-10-05 12:22:23 -04003295 copy_region.srcOffset, copy_region.extent);
3296 if (hazard.hazard) {
3297 skip |= LogError(pCopyImageInfo->srcImage, string_SyncHazardVUID(hazard.hazard),
3298 "vkCmdCopyImage2KHR: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.",
3299 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyImageInfo->srcImage).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07003300 region, cb_access_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04003301 }
3302 }
3303
3304 if (dst_image) {
3305 VkExtent3D dst_copy_extent =
3306 GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003307 auto hazard = context->DetectHazard(*dst_image, SYNC_COPY_TRANSFER_WRITE, copy_region.dstSubresource,
Jeff Leger178b1e52020-10-05 12:22:23 -04003308 copy_region.dstOffset, dst_copy_extent);
3309 if (hazard.hazard) {
3310 skip |= LogError(pCopyImageInfo->dstImage, string_SyncHazardVUID(hazard.hazard),
3311 "vkCmdCopyImage2KHR: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.",
3312 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyImageInfo->dstImage).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07003313 region, cb_access_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04003314 }
3315 if (skip) break;
3316 }
3317 }
3318
3319 return skip;
3320}
3321
3322void SyncValidator::PreCallRecordCmdCopyImage2KHR(VkCommandBuffer commandBuffer, const VkCopyImageInfo2KHR *pCopyImageInfo) {
3323 auto *cb_access_context = GetAccessContext(commandBuffer);
3324 assert(cb_access_context);
3325 const auto tag = cb_access_context->NextCommandTag(CMD_COPYIMAGE2KHR);
3326 auto *context = cb_access_context->GetCurrentAccessContext();
3327 assert(context);
3328
3329 auto *src_image = Get<IMAGE_STATE>(pCopyImageInfo->srcImage);
3330 auto *dst_image = Get<IMAGE_STATE>(pCopyImageInfo->dstImage);
3331
3332 for (uint32_t region = 0; region < pCopyImageInfo->regionCount; region++) {
3333 const auto &copy_region = pCopyImageInfo->pRegions[region];
3334 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003335 context->UpdateAccessState(*src_image, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07003336 copy_region.srcSubresource, copy_region.srcOffset, copy_region.extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04003337 }
3338 if (dst_image) {
3339 VkExtent3D dst_copy_extent =
3340 GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
Jeremy Gebben40a22942020-12-22 14:22:06 -07003341 context->UpdateAccessState(*dst_image, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07003342 copy_region.dstSubresource, copy_region.dstOffset, dst_copy_extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04003343 }
3344 }
3345}
3346
John Zulauf9cb530d2019-09-30 14:14:10 -06003347bool SyncValidator::PreCallValidateCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
3348 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
3349 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
3350 uint32_t bufferMemoryBarrierCount,
3351 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
3352 uint32_t imageMemoryBarrierCount,
3353 const VkImageMemoryBarrier *pImageMemoryBarriers) const {
3354 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -06003355 const auto *cb_access_context = GetAccessContext(commandBuffer);
3356 assert(cb_access_context);
3357 if (!cb_access_context) return skip;
John Zulauf0cb5be22020-01-23 12:18:22 -07003358
John Zulauf36ef9282021-02-02 11:47:24 -07003359 SyncOpPipelineBarrier pipeline_barrier(CMD_PIPELINEBARRIER, *this, cb_access_context->GetQueueFlags(), srcStageMask,
3360 dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers,
3361 bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
3362 pImageMemoryBarriers);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07003363 skip = pipeline_barrier.Validate(*cb_access_context);
John Zulauf9cb530d2019-09-30 14:14:10 -06003364 return skip;
3365}
3366
3367void SyncValidator::PreCallRecordCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
3368 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
3369 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
3370 uint32_t bufferMemoryBarrierCount,
3371 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
3372 uint32_t imageMemoryBarrierCount,
3373 const VkImageMemoryBarrier *pImageMemoryBarriers) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003374 auto *cb_access_context = GetAccessContext(commandBuffer);
3375 assert(cb_access_context);
3376 if (!cb_access_context) return;
John Zulauf9cb530d2019-09-30 14:14:10 -06003377
John Zulauf36ef9282021-02-02 11:47:24 -07003378 SyncOpPipelineBarrier pipeline_barrier(CMD_PIPELINEBARRIER, *this, cb_access_context->GetQueueFlags(), srcStageMask,
3379 dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers,
3380 bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
3381 pImageMemoryBarriers);
3382 pipeline_barrier.Record(cb_access_context);
John Zulauf9cb530d2019-09-30 14:14:10 -06003383}
3384
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07003385bool SyncValidator::PreCallValidateCmdPipelineBarrier2KHR(VkCommandBuffer commandBuffer,
3386 const VkDependencyInfoKHR *pDependencyInfo) const {
3387 bool skip = false;
3388 const auto *cb_access_context = GetAccessContext(commandBuffer);
3389 assert(cb_access_context);
3390 if (!cb_access_context) return skip;
3391
3392 SyncOpPipelineBarrier pipeline_barrier(CMD_PIPELINEBARRIER2KHR, *this, cb_access_context->GetQueueFlags(), *pDependencyInfo);
3393 skip = pipeline_barrier.Validate(*cb_access_context);
3394 return skip;
3395}
3396
3397void SyncValidator::PreCallRecordCmdPipelineBarrier2KHR(VkCommandBuffer commandBuffer, const VkDependencyInfoKHR *pDependencyInfo) {
3398 auto *cb_access_context = GetAccessContext(commandBuffer);
3399 assert(cb_access_context);
3400 if (!cb_access_context) return;
3401
3402 SyncOpPipelineBarrier pipeline_barrier(CMD_PIPELINEBARRIER2KHR, *this, cb_access_context->GetQueueFlags(), *pDependencyInfo);
3403 pipeline_barrier.Record(cb_access_context);
3404}
3405
John Zulauf9cb530d2019-09-30 14:14:10 -06003406void SyncValidator::PostCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
3407 const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, VkResult result) {
3408 // The state tracker sets up the device state
3409 StateTracker::PostCallRecordCreateDevice(gpu, pCreateInfo, pAllocator, pDevice, result);
3410
John Zulauf5f13a792020-03-10 07:31:21 -06003411 // Add the callback hooks for the functions that are either broadly or deeply used and that the ValidationStateTracker
3412 // refactor would be messier without.
John Zulauf9cb530d2019-09-30 14:14:10 -06003413 // TODO: Find a good way to do this hooklessly.
3414 ValidationObject *device_object = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
3415 ValidationObject *validation_data = GetValidationObject(device_object->object_dispatch, LayerObjectTypeSyncValidation);
3416 SyncValidator *sync_device_state = static_cast<SyncValidator *>(validation_data);
3417
John Zulaufd1f85d42020-04-15 12:23:15 -06003418 sync_device_state->SetCommandBufferResetCallback([sync_device_state](VkCommandBuffer command_buffer) -> void {
3419 sync_device_state->ResetCommandBufferCallback(command_buffer);
3420 });
3421 sync_device_state->SetCommandBufferFreeCallback([sync_device_state](VkCommandBuffer command_buffer) -> void {
3422 sync_device_state->FreeCommandBufferCallback(command_buffer);
3423 });
John Zulauf9cb530d2019-09-30 14:14:10 -06003424}
John Zulauf3d84f1b2020-03-09 13:33:25 -06003425
John Zulauf355e49b2020-04-24 15:11:15 -06003426bool SyncValidator::ValidateBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
John Zulauf64ffe552021-02-06 10:25:07 -07003427 const VkSubpassBeginInfo *pSubpassBeginInfo, CMD_TYPE cmd, const char *cmd_name) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003428 bool skip = false;
John Zulauf355e49b2020-04-24 15:11:15 -06003429 auto cb_context = GetAccessContext(commandBuffer);
John Zulauf64ffe552021-02-06 10:25:07 -07003430 if (cb_context) {
3431 SyncOpBeginRenderPass sync_op(cmd, *this, pRenderPassBegin, pSubpassBeginInfo, cmd_name);
3432 skip = sync_op.Validate(*cb_context);
John Zulauf355e49b2020-04-24 15:11:15 -06003433 }
John Zulauf355e49b2020-04-24 15:11:15 -06003434 return skip;
3435}
3436
3437bool SyncValidator::PreCallValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
3438 VkSubpassContents contents) const {
3439 bool skip = StateTracker::PreCallValidateCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07003440 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf355e49b2020-04-24 15:11:15 -06003441 subpass_begin_info.contents = contents;
John Zulauf64ffe552021-02-06 10:25:07 -07003442 skip |= ValidateBeginRenderPass(commandBuffer, pRenderPassBegin, &subpass_begin_info, CMD_BEGINRENDERPASS);
John Zulauf355e49b2020-04-24 15:11:15 -06003443 return skip;
3444}
3445
3446bool SyncValidator::PreCallValidateCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
Mike Schuchardt2df08912020-12-15 16:28:09 -08003447 const VkSubpassBeginInfo *pSubpassBeginInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003448 bool skip = StateTracker::PreCallValidateCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07003449 skip |= ValidateBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2);
John Zulauf355e49b2020-04-24 15:11:15 -06003450 return skip;
3451}
3452
John Zulauf64ffe552021-02-06 10:25:07 -07003453static const char *kBeginRenderPass2KhrName = "vkCmdBeginRenderPass2KHR";
John Zulauf355e49b2020-04-24 15:11:15 -06003454bool SyncValidator::PreCallValidateCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
3455 const VkRenderPassBeginInfo *pRenderPassBegin,
Mike Schuchardt2df08912020-12-15 16:28:09 -08003456 const VkSubpassBeginInfo *pSubpassBeginInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003457 bool skip = StateTracker::PreCallValidateCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07003458 skip |=
3459 ValidateBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2, kBeginRenderPass2KhrName);
John Zulauf355e49b2020-04-24 15:11:15 -06003460 return skip;
3461}
3462
John Zulauf3d84f1b2020-03-09 13:33:25 -06003463void SyncValidator::PostCallRecordBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo,
3464 VkResult result) {
3465 // The state tracker sets up the command buffer state
3466 StateTracker::PostCallRecordBeginCommandBuffer(commandBuffer, pBeginInfo, result);
3467
3468 // Create/initialize the structure that trackers accesses at the command buffer scope.
3469 auto cb_access_context = GetAccessContext(commandBuffer);
3470 assert(cb_access_context);
3471 cb_access_context->Reset();
3472}
3473
3474void SyncValidator::RecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
John Zulauf64ffe552021-02-06 10:25:07 -07003475 const VkSubpassBeginInfo *pSubpassBeginInfo, CMD_TYPE cmd, const char *cmd_name) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003476 auto cb_context = GetAccessContext(commandBuffer);
John Zulauf355e49b2020-04-24 15:11:15 -06003477 if (cb_context) {
John Zulauf64ffe552021-02-06 10:25:07 -07003478 SyncOpBeginRenderPass sync_op(cmd, *this, pRenderPassBegin, pSubpassBeginInfo, cmd_name);
3479 sync_op.Record(cb_context);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003480 }
3481}
3482
3483void SyncValidator::PostCallRecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
3484 VkSubpassContents contents) {
3485 StateTracker::PostCallRecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07003486 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf3d84f1b2020-03-09 13:33:25 -06003487 subpass_begin_info.contents = contents;
John Zulauf355e49b2020-04-24 15:11:15 -06003488 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, &subpass_begin_info, CMD_BEGINRENDERPASS);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003489}
3490
3491void SyncValidator::PostCallRecordCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
3492 const VkSubpassBeginInfo *pSubpassBeginInfo) {
3493 StateTracker::PostCallRecordCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
John Zulauf355e49b2020-04-24 15:11:15 -06003494 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003495}
3496
3497void SyncValidator::PostCallRecordCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
3498 const VkRenderPassBeginInfo *pRenderPassBegin,
3499 const VkSubpassBeginInfo *pSubpassBeginInfo) {
3500 StateTracker::PostCallRecordCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07003501 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2, kBeginRenderPass2KhrName);
John Zulauf355e49b2020-04-24 15:11:15 -06003502}
3503
Mike Schuchardt2df08912020-12-15 16:28:09 -08003504bool SyncValidator::ValidateCmdNextSubpass(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
John Zulauf64ffe552021-02-06 10:25:07 -07003505 const VkSubpassEndInfo *pSubpassEndInfo, CMD_TYPE cmd, const char *cmd_name) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003506 bool skip = false;
3507
3508 auto cb_context = GetAccessContext(commandBuffer);
3509 assert(cb_context);
John Zulauf64ffe552021-02-06 10:25:07 -07003510 if (!cb_context) return skip;
3511 SyncOpNextSubpass sync_op(cmd, *this, pSubpassBeginInfo, pSubpassEndInfo, cmd_name);
3512 return sync_op.Validate(*cb_context);
John Zulauf355e49b2020-04-24 15:11:15 -06003513}
3514
3515bool SyncValidator::PreCallValidateCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) const {
3516 bool skip = StateTracker::PreCallValidateCmdNextSubpass(commandBuffer, contents);
John Zulauf64ffe552021-02-06 10:25:07 -07003517 // Convert to a NextSubpass2
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07003518 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf355e49b2020-04-24 15:11:15 -06003519 subpass_begin_info.contents = contents;
John Zulauf64ffe552021-02-06 10:25:07 -07003520 auto subpass_end_info = LvlInitStruct<VkSubpassEndInfo>();
3521 skip |= ValidateCmdNextSubpass(commandBuffer, &subpass_begin_info, &subpass_end_info, CMD_NEXTSUBPASS);
John Zulauf355e49b2020-04-24 15:11:15 -06003522 return skip;
3523}
3524
John Zulauf64ffe552021-02-06 10:25:07 -07003525static const char *kNextSubpass2KhrName = "vkCmdNextSubpass2KHR";
Mike Schuchardt2df08912020-12-15 16:28:09 -08003526bool SyncValidator::PreCallValidateCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
3527 const VkSubpassEndInfo *pSubpassEndInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003528 bool skip = StateTracker::PreCallValidateCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07003529 skip |= ValidateCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2, kNextSubpass2KhrName);
John Zulauf355e49b2020-04-24 15:11:15 -06003530 return skip;
3531}
3532
3533bool SyncValidator::PreCallValidateCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
3534 const VkSubpassEndInfo *pSubpassEndInfo) const {
3535 bool skip = StateTracker::PreCallValidateCmdNextSubpass2(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07003536 skip |= ValidateCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2);
John Zulauf355e49b2020-04-24 15:11:15 -06003537 return skip;
John Zulauf3d84f1b2020-03-09 13:33:25 -06003538}
3539
3540void SyncValidator::RecordCmdNextSubpass(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
John Zulauf64ffe552021-02-06 10:25:07 -07003541 const VkSubpassEndInfo *pSubpassEndInfo, CMD_TYPE cmd, const char *cmd_name) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003542 auto cb_context = GetAccessContext(commandBuffer);
3543 assert(cb_context);
John Zulauf64ffe552021-02-06 10:25:07 -07003544 if (!cb_context) return;
John Zulauf3d84f1b2020-03-09 13:33:25 -06003545
John Zulauf64ffe552021-02-06 10:25:07 -07003546 SyncOpNextSubpass sync_op(cmd, *this, pSubpassBeginInfo, pSubpassEndInfo, cmd_name);
3547 sync_op.Record(cb_context);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003548}
3549
3550void SyncValidator::PostCallRecordCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
3551 StateTracker::PostCallRecordCmdNextSubpass(commandBuffer, contents);
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07003552 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf3d84f1b2020-03-09 13:33:25 -06003553 subpass_begin_info.contents = contents;
John Zulauf355e49b2020-04-24 15:11:15 -06003554 RecordCmdNextSubpass(commandBuffer, &subpass_begin_info, nullptr, CMD_NEXTSUBPASS);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003555}
3556
3557void SyncValidator::PostCallRecordCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
3558 const VkSubpassEndInfo *pSubpassEndInfo) {
3559 StateTracker::PostCallRecordCmdNextSubpass2(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
John Zulauf355e49b2020-04-24 15:11:15 -06003560 RecordCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003561}
3562
3563void SyncValidator::PostCallRecordCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
3564 const VkSubpassEndInfo *pSubpassEndInfo) {
3565 StateTracker::PostCallRecordCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07003566 RecordCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2, kNextSubpass2KhrName);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003567}
3568
John Zulauf64ffe552021-02-06 10:25:07 -07003569bool SyncValidator::ValidateCmdEndRenderPass(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo, CMD_TYPE cmd,
3570 const char *cmd_name) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003571 bool skip = false;
3572
3573 auto cb_context = GetAccessContext(commandBuffer);
3574 assert(cb_context);
John Zulauf64ffe552021-02-06 10:25:07 -07003575 if (!cb_context) return skip;
John Zulauf355e49b2020-04-24 15:11:15 -06003576
John Zulauf64ffe552021-02-06 10:25:07 -07003577 SyncOpEndRenderPass sync_op(cmd, *this, pSubpassEndInfo, cmd_name);
3578 skip |= sync_op.Validate(*cb_context);
John Zulauf355e49b2020-04-24 15:11:15 -06003579 return skip;
3580}
3581
3582bool SyncValidator::PreCallValidateCmdEndRenderPass(VkCommandBuffer commandBuffer) const {
3583 bool skip = StateTracker::PreCallValidateCmdEndRenderPass(commandBuffer);
John Zulauf64ffe552021-02-06 10:25:07 -07003584 skip |= ValidateCmdEndRenderPass(commandBuffer, nullptr, CMD_ENDRENDERPASS);
John Zulauf355e49b2020-04-24 15:11:15 -06003585 return skip;
3586}
3587
Mike Schuchardt2df08912020-12-15 16:28:09 -08003588bool SyncValidator::PreCallValidateCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003589 bool skip = StateTracker::PreCallValidateCmdEndRenderPass2(commandBuffer, pSubpassEndInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07003590 skip |= ValidateCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2);
John Zulauf355e49b2020-04-24 15:11:15 -06003591 return skip;
3592}
3593
John Zulauf64ffe552021-02-06 10:25:07 -07003594const static char *kEndRenderPass2KhrName = "vkEndRenderPass2KHR";
John Zulauf355e49b2020-04-24 15:11:15 -06003595bool SyncValidator::PreCallValidateCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer,
Mike Schuchardt2df08912020-12-15 16:28:09 -08003596 const VkSubpassEndInfo *pSubpassEndInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003597 bool skip = StateTracker::PreCallValidateCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo);
John Zulauf64ffe552021-02-06 10:25:07 -07003598 skip |= ValidateCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2, kEndRenderPass2KhrName);
John Zulauf355e49b2020-04-24 15:11:15 -06003599 return skip;
3600}
3601
John Zulauf64ffe552021-02-06 10:25:07 -07003602void SyncValidator::RecordCmdEndRenderPass(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo, CMD_TYPE cmd,
3603 const char *cmd_name) {
John Zulaufe5da6e52020-03-18 15:32:18 -06003604 // Resolve the all subpass contexts to the command buffer contexts
3605 auto cb_context = GetAccessContext(commandBuffer);
3606 assert(cb_context);
John Zulauf64ffe552021-02-06 10:25:07 -07003607 if (!cb_context) return;
John Zulaufe5da6e52020-03-18 15:32:18 -06003608
John Zulauf64ffe552021-02-06 10:25:07 -07003609 SyncOpEndRenderPass sync_op(cmd, *this, pSubpassEndInfo, cmd_name);
3610 sync_op.Record(cb_context);
3611 return;
John Zulaufe5da6e52020-03-18 15:32:18 -06003612}
John Zulauf3d84f1b2020-03-09 13:33:25 -06003613
John Zulauf33fc1d52020-07-17 11:01:10 -06003614// Simple heuristic rule to detect WAW operations representing algorithmically safe or increment
3615// updates to a resource which do not conflict at the byte level.
3616// TODO: Revisit this rule to see if it needs to be tighter or looser
3617// TODO: Add programatic control over suppression heuristics
3618bool SyncValidator::SupressedBoundDescriptorWAW(const HazardResult &hazard) const {
3619 return (hazard.hazard == WRITE_AFTER_WRITE) && (FlagBit(hazard.usage_index) == hazard.prior_access);
3620}
3621
John Zulauf3d84f1b2020-03-09 13:33:25 -06003622void SyncValidator::PostCallRecordCmdEndRenderPass(VkCommandBuffer commandBuffer) {
John Zulauf355e49b2020-04-24 15:11:15 -06003623 RecordCmdEndRenderPass(commandBuffer, nullptr, CMD_ENDRENDERPASS);
John Zulauf5a1a5382020-06-22 17:23:25 -06003624 StateTracker::PostCallRecordCmdEndRenderPass(commandBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003625}
3626
3627void SyncValidator::PostCallRecordCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
John Zulauf355e49b2020-04-24 15:11:15 -06003628 RecordCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2);
John Zulauf5a1a5382020-06-22 17:23:25 -06003629 StateTracker::PostCallRecordCmdEndRenderPass2(commandBuffer, pSubpassEndInfo);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003630}
3631
3632void SyncValidator::PostCallRecordCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
John Zulauf64ffe552021-02-06 10:25:07 -07003633 RecordCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2, kEndRenderPass2KhrName);
John Zulauf5a1a5382020-06-22 17:23:25 -06003634 StateTracker::PostCallRecordCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003635}
locke-lunarga19c71d2020-03-02 18:17:04 -07003636
Jeff Leger178b1e52020-10-05 12:22:23 -04003637template <typename BufferImageCopyRegionType>
3638bool SyncValidator::ValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
3639 VkImageLayout dstImageLayout, uint32_t regionCount,
3640 const BufferImageCopyRegionType *pRegions, CopyCommandVersion version) const {
locke-lunarga19c71d2020-03-02 18:17:04 -07003641 bool skip = false;
3642 const auto *cb_access_context = GetAccessContext(commandBuffer);
3643 assert(cb_access_context);
3644 if (!cb_access_context) return skip;
3645
Jeff Leger178b1e52020-10-05 12:22:23 -04003646 const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
3647 const char *func_name = is_2khr ? "vkCmdCopyBufferToImage2KHR()" : "vkCmdCopyBufferToImage()";
3648
locke-lunarga19c71d2020-03-02 18:17:04 -07003649 const auto *context = cb_access_context->GetCurrentAccessContext();
3650 assert(context);
3651 if (!context) return skip;
3652
3653 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
locke-lunarga19c71d2020-03-02 18:17:04 -07003654 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
3655
3656 for (uint32_t region = 0; region < regionCount; region++) {
3657 const auto &copy_region = pRegions[region];
John Zulauf477700e2021-01-06 11:41:49 -07003658 HazardResult hazard;
locke-lunarga19c71d2020-03-02 18:17:04 -07003659 if (dst_image) {
John Zulauf477700e2021-01-06 11:41:49 -07003660 if (src_buffer) {
3661 ResourceAccessRange src_range =
3662 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, dst_image->createInfo.format));
Jeremy Gebben40a22942020-12-22 14:22:06 -07003663 hazard = context->DetectHazard(*src_buffer, SYNC_COPY_TRANSFER_READ, src_range);
John Zulauf477700e2021-01-06 11:41:49 -07003664 if (hazard.hazard) {
3665 // PHASE1 TODO -- add tag information to log msg when useful.
3666 skip |= LogError(srcBuffer, string_SyncHazardVUID(hazard.hazard),
3667 "%s: Hazard %s for srcBuffer %s, region %" PRIu32 ". Access info %s.", func_name,
3668 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcBuffer).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003669 cb_access_context->FormatUsage(hazard).c_str());
John Zulauf477700e2021-01-06 11:41:49 -07003670 }
3671 }
3672
Jeremy Gebben40a22942020-12-22 14:22:06 -07003673 hazard = context->DetectHazard(*dst_image, SYNC_COPY_TRANSFER_WRITE, copy_region.imageSubresource,
John Zulauf477700e2021-01-06 11:41:49 -07003674 copy_region.imageOffset, copy_region.imageExtent);
locke-lunarga19c71d2020-03-02 18:17:04 -07003675 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003676 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
Jeff Leger178b1e52020-10-05 12:22:23 -04003677 "%s: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.", func_name,
John Zulauf1dae9192020-06-16 15:46:44 -06003678 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003679 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07003680 }
3681 if (skip) break;
3682 }
3683 if (skip) break;
3684 }
3685 return skip;
3686}
3687
Jeff Leger178b1e52020-10-05 12:22:23 -04003688bool SyncValidator::PreCallValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
3689 VkImageLayout dstImageLayout, uint32_t regionCount,
3690 const VkBufferImageCopy *pRegions) const {
3691 return ValidateCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions,
3692 COPY_COMMAND_VERSION_1);
3693}
3694
3695bool SyncValidator::PreCallValidateCmdCopyBufferToImage2KHR(VkCommandBuffer commandBuffer,
3696 const VkCopyBufferToImageInfo2KHR *pCopyBufferToImageInfo) const {
3697 return ValidateCmdCopyBufferToImage(commandBuffer, pCopyBufferToImageInfo->srcBuffer, pCopyBufferToImageInfo->dstImage,
3698 pCopyBufferToImageInfo->dstImageLayout, pCopyBufferToImageInfo->regionCount,
3699 pCopyBufferToImageInfo->pRegions, COPY_COMMAND_VERSION_2);
3700}
3701
3702template <typename BufferImageCopyRegionType>
3703void SyncValidator::RecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
3704 VkImageLayout dstImageLayout, uint32_t regionCount,
3705 const BufferImageCopyRegionType *pRegions, CopyCommandVersion version) {
locke-lunarga19c71d2020-03-02 18:17:04 -07003706 auto *cb_access_context = GetAccessContext(commandBuffer);
3707 assert(cb_access_context);
Jeff Leger178b1e52020-10-05 12:22:23 -04003708
3709 const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
3710 const CMD_TYPE cmd_type = is_2khr ? CMD_COPYBUFFERTOIMAGE2KHR : CMD_COPYBUFFERTOIMAGE;
3711
3712 const auto tag = cb_access_context->NextCommandTag(cmd_type);
locke-lunarga19c71d2020-03-02 18:17:04 -07003713 auto *context = cb_access_context->GetCurrentAccessContext();
3714 assert(context);
3715
3716 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
John Zulauf16adfc92020-04-08 10:28:33 -06003717 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07003718
3719 for (uint32_t region = 0; region < regionCount; region++) {
3720 const auto &copy_region = pRegions[region];
locke-lunarga19c71d2020-03-02 18:17:04 -07003721 if (dst_image) {
John Zulauf477700e2021-01-06 11:41:49 -07003722 if (src_buffer) {
3723 ResourceAccessRange src_range =
3724 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, dst_image->createInfo.format));
Jeremy Gebben40a22942020-12-22 14:22:06 -07003725 context->UpdateAccessState(*src_buffer, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment, src_range, tag);
John Zulauf477700e2021-01-06 11:41:49 -07003726 }
Jeremy Gebben40a22942020-12-22 14:22:06 -07003727 context->UpdateAccessState(*dst_image, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07003728 copy_region.imageSubresource, copy_region.imageOffset, copy_region.imageExtent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07003729 }
3730 }
3731}
3732
Jeff Leger178b1e52020-10-05 12:22:23 -04003733void SyncValidator::PreCallRecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
3734 VkImageLayout dstImageLayout, uint32_t regionCount,
3735 const VkBufferImageCopy *pRegions) {
3736 StateTracker::PreCallRecordCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
3737 RecordCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions, COPY_COMMAND_VERSION_1);
3738}
3739
3740void SyncValidator::PreCallRecordCmdCopyBufferToImage2KHR(VkCommandBuffer commandBuffer,
3741 const VkCopyBufferToImageInfo2KHR *pCopyBufferToImageInfo) {
3742 StateTracker::PreCallRecordCmdCopyBufferToImage2KHR(commandBuffer, pCopyBufferToImageInfo);
3743 RecordCmdCopyBufferToImage(commandBuffer, pCopyBufferToImageInfo->srcBuffer, pCopyBufferToImageInfo->dstImage,
3744 pCopyBufferToImageInfo->dstImageLayout, pCopyBufferToImageInfo->regionCount,
3745 pCopyBufferToImageInfo->pRegions, COPY_COMMAND_VERSION_2);
3746}
3747
3748template <typename BufferImageCopyRegionType>
3749bool SyncValidator::ValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
3750 VkBuffer dstBuffer, uint32_t regionCount,
3751 const BufferImageCopyRegionType *pRegions, CopyCommandVersion version) const {
locke-lunarga19c71d2020-03-02 18:17:04 -07003752 bool skip = false;
3753 const auto *cb_access_context = GetAccessContext(commandBuffer);
3754 assert(cb_access_context);
3755 if (!cb_access_context) return skip;
3756
Jeff Leger178b1e52020-10-05 12:22:23 -04003757 const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
3758 const char *func_name = is_2khr ? "vkCmdCopyImageToBuffer2KHR()" : "vkCmdCopyImageToBuffer()";
3759
locke-lunarga19c71d2020-03-02 18:17:04 -07003760 const auto *context = cb_access_context->GetCurrentAccessContext();
3761 assert(context);
3762 if (!context) return skip;
3763
3764 const auto *src_image = Get<IMAGE_STATE>(srcImage);
3765 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
3766 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
3767 for (uint32_t region = 0; region < regionCount; region++) {
3768 const auto &copy_region = pRegions[region];
3769 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003770 auto hazard = context->DetectHazard(*src_image, SYNC_COPY_TRANSFER_READ, copy_region.imageSubresource,
locke-lunarga19c71d2020-03-02 18:17:04 -07003771 copy_region.imageOffset, copy_region.imageExtent);
3772 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003773 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
Jeff Leger178b1e52020-10-05 12:22:23 -04003774 "%s: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.", func_name,
John Zulauf1dae9192020-06-16 15:46:44 -06003775 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003776 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07003777 }
John Zulauf477700e2021-01-06 11:41:49 -07003778 if (dst_mem) {
3779 ResourceAccessRange dst_range =
3780 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, src_image->createInfo.format));
Jeremy Gebben40a22942020-12-22 14:22:06 -07003781 hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, dst_range);
John Zulauf477700e2021-01-06 11:41:49 -07003782 if (hazard.hazard) {
3783 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
3784 "%s: Hazard %s for dstBuffer %s, region %" PRIu32 ". Access info %s.", func_name,
3785 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstBuffer).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003786 cb_access_context->FormatUsage(hazard).c_str());
John Zulauf477700e2021-01-06 11:41:49 -07003787 }
locke-lunarga19c71d2020-03-02 18:17:04 -07003788 }
3789 }
3790 if (skip) break;
3791 }
3792 return skip;
3793}
3794
Jeff Leger178b1e52020-10-05 12:22:23 -04003795bool SyncValidator::PreCallValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
3796 VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount,
3797 const VkBufferImageCopy *pRegions) const {
3798 return ValidateCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions,
3799 COPY_COMMAND_VERSION_1);
3800}
3801
3802bool SyncValidator::PreCallValidateCmdCopyImageToBuffer2KHR(VkCommandBuffer commandBuffer,
3803 const VkCopyImageToBufferInfo2KHR *pCopyImageToBufferInfo) const {
3804 return ValidateCmdCopyImageToBuffer(commandBuffer, pCopyImageToBufferInfo->srcImage, pCopyImageToBufferInfo->srcImageLayout,
3805 pCopyImageToBufferInfo->dstBuffer, pCopyImageToBufferInfo->regionCount,
3806 pCopyImageToBufferInfo->pRegions, COPY_COMMAND_VERSION_2);
3807}
3808
3809template <typename BufferImageCopyRegionType>
3810void SyncValidator::RecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
3811 VkBuffer dstBuffer, uint32_t regionCount, const BufferImageCopyRegionType *pRegions,
3812 CopyCommandVersion version) {
locke-lunarga19c71d2020-03-02 18:17:04 -07003813 auto *cb_access_context = GetAccessContext(commandBuffer);
3814 assert(cb_access_context);
Jeff Leger178b1e52020-10-05 12:22:23 -04003815
3816 const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
3817 const CMD_TYPE cmd_type = is_2khr ? CMD_COPYIMAGETOBUFFER2KHR : CMD_COPYIMAGETOBUFFER;
3818
3819 const auto tag = cb_access_context->NextCommandTag(cmd_type);
locke-lunarga19c71d2020-03-02 18:17:04 -07003820 auto *context = cb_access_context->GetCurrentAccessContext();
3821 assert(context);
3822
3823 const auto *src_image = Get<IMAGE_STATE>(srcImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07003824 auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
3825 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
John Zulauf5f13a792020-03-10 07:31:21 -06003826 const VulkanTypedHandle dst_handle(dst_mem, kVulkanObjectTypeDeviceMemory);
locke-lunarga19c71d2020-03-02 18:17:04 -07003827
3828 for (uint32_t region = 0; region < regionCount; region++) {
3829 const auto &copy_region = pRegions[region];
3830 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07003831 context->UpdateAccessState(*src_image, SYNC_COPY_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07003832 copy_region.imageSubresource, copy_region.imageOffset, copy_region.imageExtent, tag);
John Zulauf477700e2021-01-06 11:41:49 -07003833 if (dst_buffer) {
3834 ResourceAccessRange dst_range =
3835 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, src_image->createInfo.format));
Jeremy Gebben40a22942020-12-22 14:22:06 -07003836 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, dst_range, tag);
John Zulauf477700e2021-01-06 11:41:49 -07003837 }
locke-lunarga19c71d2020-03-02 18:17:04 -07003838 }
3839 }
3840}
3841
Jeff Leger178b1e52020-10-05 12:22:23 -04003842void SyncValidator::PreCallRecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
3843 VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
3844 StateTracker::PreCallRecordCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
3845 RecordCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions, COPY_COMMAND_VERSION_1);
3846}
3847
3848void SyncValidator::PreCallRecordCmdCopyImageToBuffer2KHR(VkCommandBuffer commandBuffer,
3849 const VkCopyImageToBufferInfo2KHR *pCopyImageToBufferInfo) {
3850 StateTracker::PreCallRecordCmdCopyImageToBuffer2KHR(commandBuffer, pCopyImageToBufferInfo);
3851 RecordCmdCopyImageToBuffer(commandBuffer, pCopyImageToBufferInfo->srcImage, pCopyImageToBufferInfo->srcImageLayout,
3852 pCopyImageToBufferInfo->dstBuffer, pCopyImageToBufferInfo->regionCount,
3853 pCopyImageToBufferInfo->pRegions, COPY_COMMAND_VERSION_2);
3854}
3855
3856template <typename RegionType>
3857bool SyncValidator::ValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
3858 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
3859 const RegionType *pRegions, VkFilter filter, const char *apiName) const {
locke-lunarga19c71d2020-03-02 18:17:04 -07003860 bool skip = false;
3861 const auto *cb_access_context = GetAccessContext(commandBuffer);
3862 assert(cb_access_context);
3863 if (!cb_access_context) return skip;
3864
3865 const auto *context = cb_access_context->GetCurrentAccessContext();
3866 assert(context);
3867 if (!context) return skip;
3868
3869 const auto *src_image = Get<IMAGE_STATE>(srcImage);
3870 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
3871
3872 for (uint32_t region = 0; region < regionCount; region++) {
3873 const auto &blit_region = pRegions[region];
3874 if (src_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06003875 VkOffset3D offset = {std::min(blit_region.srcOffsets[0].x, blit_region.srcOffsets[1].x),
3876 std::min(blit_region.srcOffsets[0].y, blit_region.srcOffsets[1].y),
3877 std::min(blit_region.srcOffsets[0].z, blit_region.srcOffsets[1].z)};
3878 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.srcOffsets[1].x - blit_region.srcOffsets[0].x)),
3879 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].y - blit_region.srcOffsets[0].y)),
3880 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].z - blit_region.srcOffsets[0].z))};
Jeremy Gebben40a22942020-12-22 14:22:06 -07003881 auto hazard = context->DetectHazard(*src_image, SYNC_BLIT_TRANSFER_READ, blit_region.srcSubresource, offset, extent);
locke-lunarga19c71d2020-03-02 18:17:04 -07003882 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003883 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
Jeff Leger178b1e52020-10-05 12:22:23 -04003884 "%s: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.", apiName,
John Zulauf1dae9192020-06-16 15:46:44 -06003885 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003886 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07003887 }
3888 }
3889
3890 if (dst_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06003891 VkOffset3D offset = {std::min(blit_region.dstOffsets[0].x, blit_region.dstOffsets[1].x),
3892 std::min(blit_region.dstOffsets[0].y, blit_region.dstOffsets[1].y),
3893 std::min(blit_region.dstOffsets[0].z, blit_region.dstOffsets[1].z)};
3894 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.dstOffsets[1].x - blit_region.dstOffsets[0].x)),
3895 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].y - blit_region.dstOffsets[0].y)),
3896 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].z - blit_region.dstOffsets[0].z))};
Jeremy Gebben40a22942020-12-22 14:22:06 -07003897 auto hazard = context->DetectHazard(*dst_image, SYNC_BLIT_TRANSFER_WRITE, blit_region.dstSubresource, offset, extent);
locke-lunarga19c71d2020-03-02 18:17:04 -07003898 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003899 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
Jeff Leger178b1e52020-10-05 12:22:23 -04003900 "%s: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.", apiName,
John Zulauf1dae9192020-06-16 15:46:44 -06003901 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003902 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07003903 }
3904 if (skip) break;
3905 }
3906 }
3907
3908 return skip;
3909}
3910
Jeff Leger178b1e52020-10-05 12:22:23 -04003911bool SyncValidator::PreCallValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
3912 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
3913 const VkImageBlit *pRegions, VkFilter filter) const {
3914 return ValidateCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter,
3915 "vkCmdBlitImage");
3916}
3917
3918bool SyncValidator::PreCallValidateCmdBlitImage2KHR(VkCommandBuffer commandBuffer,
3919 const VkBlitImageInfo2KHR *pBlitImageInfo) const {
3920 return ValidateCmdBlitImage(commandBuffer, pBlitImageInfo->srcImage, pBlitImageInfo->srcImageLayout, pBlitImageInfo->dstImage,
3921 pBlitImageInfo->dstImageLayout, pBlitImageInfo->regionCount, pBlitImageInfo->pRegions,
3922 pBlitImageInfo->filter, "vkCmdBlitImage2KHR");
3923}
3924
3925template <typename RegionType>
3926void SyncValidator::RecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
3927 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
3928 const RegionType *pRegions, VkFilter filter, ResourceUsageTag tag) {
locke-lunarga19c71d2020-03-02 18:17:04 -07003929 auto *cb_access_context = GetAccessContext(commandBuffer);
3930 assert(cb_access_context);
3931 auto *context = cb_access_context->GetCurrentAccessContext();
3932 assert(context);
3933
3934 auto *src_image = Get<IMAGE_STATE>(srcImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07003935 auto *dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07003936
3937 for (uint32_t region = 0; region < regionCount; region++) {
3938 const auto &blit_region = pRegions[region];
3939 if (src_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06003940 VkOffset3D offset = {std::min(blit_region.srcOffsets[0].x, blit_region.srcOffsets[1].x),
3941 std::min(blit_region.srcOffsets[0].y, blit_region.srcOffsets[1].y),
3942 std::min(blit_region.srcOffsets[0].z, blit_region.srcOffsets[1].z)};
3943 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.srcOffsets[1].x - blit_region.srcOffsets[0].x)),
3944 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].y - blit_region.srcOffsets[0].y)),
3945 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].z - blit_region.srcOffsets[0].z))};
Jeremy Gebben40a22942020-12-22 14:22:06 -07003946 context->UpdateAccessState(*src_image, SYNC_BLIT_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07003947 blit_region.srcSubresource, offset, extent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07003948 }
3949 if (dst_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06003950 VkOffset3D offset = {std::min(blit_region.dstOffsets[0].x, blit_region.dstOffsets[1].x),
3951 std::min(blit_region.dstOffsets[0].y, blit_region.dstOffsets[1].y),
3952 std::min(blit_region.dstOffsets[0].z, blit_region.dstOffsets[1].z)};
3953 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.dstOffsets[1].x - blit_region.dstOffsets[0].x)),
3954 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].y - blit_region.dstOffsets[0].y)),
3955 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].z - blit_region.dstOffsets[0].z))};
Jeremy Gebben40a22942020-12-22 14:22:06 -07003956 context->UpdateAccessState(*dst_image, SYNC_BLIT_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07003957 blit_region.dstSubresource, offset, extent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07003958 }
3959 }
3960}
locke-lunarg36ba2592020-04-03 09:42:04 -06003961
Jeff Leger178b1e52020-10-05 12:22:23 -04003962void SyncValidator::PreCallRecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
3963 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
3964 const VkImageBlit *pRegions, VkFilter filter) {
3965 auto *cb_access_context = GetAccessContext(commandBuffer);
3966 assert(cb_access_context);
3967 const auto tag = cb_access_context->NextCommandTag(CMD_BLITIMAGE);
3968 StateTracker::PreCallRecordCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
3969 pRegions, filter);
3970 RecordCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter, tag);
3971}
3972
3973void SyncValidator::PreCallRecordCmdBlitImage2KHR(VkCommandBuffer commandBuffer, const VkBlitImageInfo2KHR *pBlitImageInfo) {
3974 StateTracker::PreCallRecordCmdBlitImage2KHR(commandBuffer, pBlitImageInfo);
3975 auto *cb_access_context = GetAccessContext(commandBuffer);
3976 assert(cb_access_context);
3977 const auto tag = cb_access_context->NextCommandTag(CMD_BLITIMAGE2KHR);
3978 RecordCmdBlitImage(commandBuffer, pBlitImageInfo->srcImage, pBlitImageInfo->srcImageLayout, pBlitImageInfo->dstImage,
3979 pBlitImageInfo->dstImageLayout, pBlitImageInfo->regionCount, pBlitImageInfo->pRegions,
3980 pBlitImageInfo->filter, tag);
3981}
3982
John Zulauffaea0ee2021-01-14 14:01:32 -07003983bool SyncValidator::ValidateIndirectBuffer(const CommandBufferAccessContext &cb_context, const AccessContext &context,
3984 VkCommandBuffer commandBuffer, const VkDeviceSize struct_size, const VkBuffer buffer,
3985 const VkDeviceSize offset, const uint32_t drawCount, const uint32_t stride,
3986 const char *function) const {
locke-lunargff255f92020-05-13 18:53:52 -06003987 bool skip = false;
3988 if (drawCount == 0) return skip;
3989
3990 const auto *buf_state = Get<BUFFER_STATE>(buffer);
3991 VkDeviceSize size = struct_size;
3992 if (drawCount == 1 || stride == size) {
3993 if (drawCount > 1) size *= drawCount;
John Zulauf3e86bf02020-09-12 10:47:57 -06003994 const ResourceAccessRange range = MakeRange(offset, size);
locke-lunargff255f92020-05-13 18:53:52 -06003995 auto hazard = context.DetectHazard(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, range);
3996 if (hazard.hazard) {
John Zulauf1dae9192020-06-16 15:46:44 -06003997 skip |= LogError(buf_state->buffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003998 "%s: Hazard %s for indirect %s in %s. Access info %s.", function, string_SyncHazard(hazard.hazard),
John Zulauf1dae9192020-06-16 15:46:44 -06003999 report_data->FormatHandle(buffer).c_str(), report_data->FormatHandle(commandBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07004000 cb_context.FormatUsage(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06004001 }
4002 } else {
4003 for (uint32_t i = 0; i < drawCount; ++i) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004004 const ResourceAccessRange range = MakeRange(offset + i * stride, size);
locke-lunargff255f92020-05-13 18:53:52 -06004005 auto hazard = context.DetectHazard(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, range);
4006 if (hazard.hazard) {
John Zulauf1dae9192020-06-16 15:46:44 -06004007 skip |= LogError(buf_state->buffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004008 "%s: Hazard %s for indirect %s in %s. Access info %s.", function, string_SyncHazard(hazard.hazard),
4009 report_data->FormatHandle(buffer).c_str(), report_data->FormatHandle(commandBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07004010 cb_context.FormatUsage(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06004011 break;
4012 }
4013 }
4014 }
4015 return skip;
4016}
4017
locke-lunarg61870c22020-06-09 14:51:50 -06004018void SyncValidator::RecordIndirectBuffer(AccessContext &context, const ResourceUsageTag &tag, const VkDeviceSize struct_size,
4019 const VkBuffer buffer, const VkDeviceSize offset, const uint32_t drawCount,
4020 uint32_t stride) {
locke-lunargff255f92020-05-13 18:53:52 -06004021 const auto *buf_state = Get<BUFFER_STATE>(buffer);
4022 VkDeviceSize size = struct_size;
4023 if (drawCount == 1 || stride == size) {
4024 if (drawCount > 1) size *= drawCount;
John Zulauf3e86bf02020-09-12 10:47:57 -06004025 const ResourceAccessRange range = MakeRange(offset, size);
John Zulauf8e3c3e92021-01-06 11:19:36 -07004026 context.UpdateAccessState(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, SyncOrdering::kNonAttachment, range, tag);
locke-lunargff255f92020-05-13 18:53:52 -06004027 } else {
4028 for (uint32_t i = 0; i < drawCount; ++i) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004029 const ResourceAccessRange range = MakeRange(offset + i * stride, size);
John Zulauf8e3c3e92021-01-06 11:19:36 -07004030 context.UpdateAccessState(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, SyncOrdering::kNonAttachment, range,
4031 tag);
locke-lunargff255f92020-05-13 18:53:52 -06004032 }
4033 }
4034}
4035
John Zulauffaea0ee2021-01-14 14:01:32 -07004036bool SyncValidator::ValidateCountBuffer(const CommandBufferAccessContext &cb_context, const AccessContext &context,
4037 VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4038 const char *function) const {
locke-lunargff255f92020-05-13 18:53:52 -06004039 bool skip = false;
4040
4041 const auto *count_buf_state = Get<BUFFER_STATE>(buffer);
John Zulauf3e86bf02020-09-12 10:47:57 -06004042 const ResourceAccessRange range = MakeRange(offset, 4);
locke-lunargff255f92020-05-13 18:53:52 -06004043 auto hazard = context.DetectHazard(*count_buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, range);
4044 if (hazard.hazard) {
John Zulauf1dae9192020-06-16 15:46:44 -06004045 skip |= LogError(count_buf_state->buffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004046 "%s: Hazard %s for countBuffer %s in %s. Access info %s.", function, string_SyncHazard(hazard.hazard),
John Zulauf1dae9192020-06-16 15:46:44 -06004047 report_data->FormatHandle(buffer).c_str(), report_data->FormatHandle(commandBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07004048 cb_context.FormatUsage(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06004049 }
4050 return skip;
4051}
4052
locke-lunarg61870c22020-06-09 14:51:50 -06004053void SyncValidator::RecordCountBuffer(AccessContext &context, const ResourceUsageTag &tag, VkBuffer buffer, VkDeviceSize offset) {
locke-lunargff255f92020-05-13 18:53:52 -06004054 const auto *count_buf_state = Get<BUFFER_STATE>(buffer);
John Zulauf3e86bf02020-09-12 10:47:57 -06004055 const ResourceAccessRange range = MakeRange(offset, 4);
John Zulauf8e3c3e92021-01-06 11:19:36 -07004056 context.UpdateAccessState(*count_buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, SyncOrdering::kNonAttachment, range, tag);
locke-lunargff255f92020-05-13 18:53:52 -06004057}
4058
locke-lunarg36ba2592020-04-03 09:42:04 -06004059bool SyncValidator::PreCallValidateCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) const {
locke-lunargff255f92020-05-13 18:53:52 -06004060 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004061 const auto *cb_access_context = GetAccessContext(commandBuffer);
4062 assert(cb_access_context);
4063 if (!cb_access_context) return skip;
4064
locke-lunarg61870c22020-06-09 14:51:50 -06004065 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, "vkCmdDispatch");
locke-lunargff255f92020-05-13 18:53:52 -06004066 return skip;
locke-lunarg36ba2592020-04-03 09:42:04 -06004067}
4068
4069void SyncValidator::PreCallRecordCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004070 StateTracker::PreCallRecordCmdDispatch(commandBuffer, x, y, z);
locke-lunargff255f92020-05-13 18:53:52 -06004071 auto *cb_access_context = GetAccessContext(commandBuffer);
4072 assert(cb_access_context);
4073 const auto tag = cb_access_context->NextCommandTag(CMD_DISPATCH);
locke-lunargff255f92020-05-13 18:53:52 -06004074
locke-lunarg61870c22020-06-09 14:51:50 -06004075 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, tag);
locke-lunarg36ba2592020-04-03 09:42:04 -06004076}
locke-lunarge1a67022020-04-29 00:15:36 -06004077
4078bool SyncValidator::PreCallValidateCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) const {
locke-lunargff255f92020-05-13 18:53:52 -06004079 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004080 const auto *cb_access_context = GetAccessContext(commandBuffer);
4081 assert(cb_access_context);
4082 if (!cb_access_context) return skip;
4083
4084 const auto *context = cb_access_context->GetCurrentAccessContext();
4085 assert(context);
4086 if (!context) return skip;
4087
locke-lunarg61870c22020-06-09 14:51:50 -06004088 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, "vkCmdDispatchIndirect");
John Zulauffaea0ee2021-01-14 14:01:32 -07004089 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDispatchIndirectCommand), buffer, offset,
4090 1, sizeof(VkDispatchIndirectCommand), "vkCmdDispatchIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004091 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004092}
4093
4094void SyncValidator::PreCallRecordCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004095 StateTracker::PreCallRecordCmdDispatchIndirect(commandBuffer, buffer, offset);
locke-lunargff255f92020-05-13 18:53:52 -06004096 auto *cb_access_context = GetAccessContext(commandBuffer);
4097 assert(cb_access_context);
4098 const auto tag = cb_access_context->NextCommandTag(CMD_DISPATCHINDIRECT);
4099 auto *context = cb_access_context->GetCurrentAccessContext();
4100 assert(context);
4101
locke-lunarg61870c22020-06-09 14:51:50 -06004102 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, tag);
4103 RecordIndirectBuffer(*context, tag, sizeof(VkDispatchIndirectCommand), buffer, offset, 1, sizeof(VkDispatchIndirectCommand));
locke-lunarge1a67022020-04-29 00:15:36 -06004104}
4105
4106bool SyncValidator::PreCallValidateCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
4107 uint32_t firstVertex, uint32_t firstInstance) const {
locke-lunarga4d39ea2020-05-22 14:17:29 -06004108 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004109 const auto *cb_access_context = GetAccessContext(commandBuffer);
4110 assert(cb_access_context);
4111 if (!cb_access_context) return skip;
4112
locke-lunarg61870c22020-06-09 14:51:50 -06004113 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDraw");
4114 skip |= cb_access_context->ValidateDrawVertex(vertexCount, firstVertex, "vkCmdDraw");
4115 skip |= cb_access_context->ValidateDrawSubpassAttachment("vkCmdDraw");
locke-lunarga4d39ea2020-05-22 14:17:29 -06004116 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004117}
4118
4119void SyncValidator::PreCallRecordCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
4120 uint32_t firstVertex, uint32_t firstInstance) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004121 StateTracker::PreCallRecordCmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
locke-lunargff255f92020-05-13 18:53:52 -06004122 auto *cb_access_context = GetAccessContext(commandBuffer);
4123 assert(cb_access_context);
4124 const auto tag = cb_access_context->NextCommandTag(CMD_DRAW);
locke-lunargff255f92020-05-13 18:53:52 -06004125
locke-lunarg61870c22020-06-09 14:51:50 -06004126 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4127 cb_access_context->RecordDrawVertex(vertexCount, firstVertex, tag);
4128 cb_access_context->RecordDrawSubpassAttachment(tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004129}
4130
4131bool SyncValidator::PreCallValidateCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
4132 uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) const {
locke-lunarga4d39ea2020-05-22 14:17:29 -06004133 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004134 const auto *cb_access_context = GetAccessContext(commandBuffer);
4135 assert(cb_access_context);
4136 if (!cb_access_context) return skip;
4137
locke-lunarg61870c22020-06-09 14:51:50 -06004138 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndexed");
4139 skip |= cb_access_context->ValidateDrawVertexIndex(indexCount, firstIndex, "vkCmdDrawIndexed");
4140 skip |= cb_access_context->ValidateDrawSubpassAttachment("vkCmdDrawIndexed");
locke-lunarga4d39ea2020-05-22 14:17:29 -06004141 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004142}
4143
4144void SyncValidator::PreCallRecordCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
4145 uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004146 StateTracker::PreCallRecordCmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
locke-lunargff255f92020-05-13 18:53:52 -06004147 auto *cb_access_context = GetAccessContext(commandBuffer);
4148 assert(cb_access_context);
4149 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDEXED);
locke-lunargff255f92020-05-13 18:53:52 -06004150
locke-lunarg61870c22020-06-09 14:51:50 -06004151 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4152 cb_access_context->RecordDrawVertexIndex(indexCount, firstIndex, tag);
4153 cb_access_context->RecordDrawSubpassAttachment(tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004154}
4155
4156bool SyncValidator::PreCallValidateCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4157 uint32_t drawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004158 bool skip = false;
4159 if (drawCount == 0) return skip;
4160
locke-lunargff255f92020-05-13 18:53:52 -06004161 const auto *cb_access_context = GetAccessContext(commandBuffer);
4162 assert(cb_access_context);
4163 if (!cb_access_context) return skip;
4164
4165 const auto *context = cb_access_context->GetCurrentAccessContext();
4166 assert(context);
4167 if (!context) return skip;
4168
locke-lunarg61870c22020-06-09 14:51:50 -06004169 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndirect");
4170 skip |= cb_access_context->ValidateDrawSubpassAttachment("vkCmdDrawIndirect");
John Zulauffaea0ee2021-01-14 14:01:32 -07004171 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndirectCommand), buffer, offset,
4172 drawCount, stride, "vkCmdDrawIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004173
4174 // TODO: For now, we validate the whole vertex buffer. It might cause some false positive.
4175 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
4176 // We will validate the vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004177 skip |= cb_access_context->ValidateDrawVertex(UINT32_MAX, 0, "vkCmdDrawIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004178 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004179}
4180
4181void SyncValidator::PreCallRecordCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4182 uint32_t drawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004183 StateTracker::PreCallRecordCmdDrawIndirect(commandBuffer, buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06004184 if (drawCount == 0) return;
locke-lunargff255f92020-05-13 18:53:52 -06004185 auto *cb_access_context = GetAccessContext(commandBuffer);
4186 assert(cb_access_context);
4187 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDIRECT);
4188 auto *context = cb_access_context->GetCurrentAccessContext();
4189 assert(context);
4190
locke-lunarg61870c22020-06-09 14:51:50 -06004191 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4192 cb_access_context->RecordDrawSubpassAttachment(tag);
4193 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndirectCommand), buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06004194
4195 // TODO: For now, we record the whole vertex buffer. It might cause some false positive.
4196 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
4197 // We will record the vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004198 cb_access_context->RecordDrawVertex(UINT32_MAX, 0, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004199}
4200
4201bool SyncValidator::PreCallValidateCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4202 uint32_t drawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004203 bool skip = false;
4204 if (drawCount == 0) return skip;
locke-lunargff255f92020-05-13 18:53:52 -06004205 const auto *cb_access_context = GetAccessContext(commandBuffer);
4206 assert(cb_access_context);
4207 if (!cb_access_context) return skip;
4208
4209 const auto *context = cb_access_context->GetCurrentAccessContext();
4210 assert(context);
4211 if (!context) return skip;
4212
locke-lunarg61870c22020-06-09 14:51:50 -06004213 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndexedIndirect");
4214 skip |= cb_access_context->ValidateDrawSubpassAttachment("vkCmdDrawIndexedIndirect");
John Zulauffaea0ee2021-01-14 14:01:32 -07004215 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndexedIndirectCommand), buffer,
4216 offset, drawCount, stride, "vkCmdDrawIndexedIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004217
4218 // TODO: For now, we validate the whole index and vertex buffer. It might cause some false positive.
4219 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
4220 // We will validate the index and vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004221 skip |= cb_access_context->ValidateDrawVertexIndex(UINT32_MAX, 0, "vkCmdDrawIndexedIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004222 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004223}
4224
4225void SyncValidator::PreCallRecordCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4226 uint32_t drawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004227 StateTracker::PreCallRecordCmdDrawIndexedIndirect(commandBuffer, buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06004228 auto *cb_access_context = GetAccessContext(commandBuffer);
4229 assert(cb_access_context);
4230 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDEXEDINDIRECT);
4231 auto *context = cb_access_context->GetCurrentAccessContext();
4232 assert(context);
4233
locke-lunarg61870c22020-06-09 14:51:50 -06004234 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4235 cb_access_context->RecordDrawSubpassAttachment(tag);
4236 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndexedIndirectCommand), buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06004237
4238 // TODO: For now, we record the whole index and vertex buffer. It might cause some false positive.
4239 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
4240 // We will record the index and vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004241 cb_access_context->RecordDrawVertexIndex(UINT32_MAX, 0, tag);
locke-lunargff255f92020-05-13 18:53:52 -06004242}
4243
4244bool SyncValidator::ValidateCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4245 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4246 uint32_t stride, const char *function) const {
4247 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004248 const auto *cb_access_context = GetAccessContext(commandBuffer);
4249 assert(cb_access_context);
4250 if (!cb_access_context) return skip;
4251
4252 const auto *context = cb_access_context->GetCurrentAccessContext();
4253 assert(context);
4254 if (!context) return skip;
4255
locke-lunarg61870c22020-06-09 14:51:50 -06004256 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, function);
4257 skip |= cb_access_context->ValidateDrawSubpassAttachment(function);
John Zulauffaea0ee2021-01-14 14:01:32 -07004258 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndirectCommand), buffer, offset,
4259 maxDrawCount, stride, function);
4260 skip |= ValidateCountBuffer(*cb_access_context, *context, commandBuffer, countBuffer, countBufferOffset, function);
locke-lunargff255f92020-05-13 18:53:52 -06004261
4262 // TODO: For now, we validate the whole vertex buffer. It might cause some false positive.
4263 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
4264 // We will validate the vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004265 skip |= cb_access_context->ValidateDrawVertex(UINT32_MAX, 0, function);
locke-lunargff255f92020-05-13 18:53:52 -06004266 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004267}
4268
4269bool SyncValidator::PreCallValidateCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4270 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4271 uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004272 return ValidateCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4273 "vkCmdDrawIndirectCount");
locke-lunarge1a67022020-04-29 00:15:36 -06004274}
4275
4276void SyncValidator::PreCallRecordCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4277 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4278 uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004279 StateTracker::PreCallRecordCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount,
4280 stride);
locke-lunargff255f92020-05-13 18:53:52 -06004281 auto *cb_access_context = GetAccessContext(commandBuffer);
4282 assert(cb_access_context);
4283 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDIRECTCOUNT);
4284 auto *context = cb_access_context->GetCurrentAccessContext();
4285 assert(context);
4286
locke-lunarg61870c22020-06-09 14:51:50 -06004287 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4288 cb_access_context->RecordDrawSubpassAttachment(tag);
4289 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndirectCommand), buffer, offset, 1, stride);
4290 RecordCountBuffer(*context, tag, countBuffer, countBufferOffset);
locke-lunargff255f92020-05-13 18:53:52 -06004291
4292 // TODO: For now, we record the whole vertex buffer. It might cause some false positive.
4293 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
4294 // We will record the vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004295 cb_access_context->RecordDrawVertex(UINT32_MAX, 0, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004296}
4297
4298bool SyncValidator::PreCallValidateCmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4299 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4300 uint32_t maxDrawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004301 return ValidateCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4302 "vkCmdDrawIndirectCountKHR");
locke-lunarge1a67022020-04-29 00:15:36 -06004303}
4304
4305void SyncValidator::PreCallRecordCmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4306 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4307 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004308 StateTracker::PreCallRecordCmdDrawIndirectCountKHR(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount,
4309 stride);
locke-lunargff255f92020-05-13 18:53:52 -06004310 PreCallRecordCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride);
locke-lunarge1a67022020-04-29 00:15:36 -06004311}
4312
4313bool SyncValidator::PreCallValidateCmdDrawIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4314 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4315 uint32_t maxDrawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004316 return ValidateCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4317 "vkCmdDrawIndirectCountAMD");
locke-lunarge1a67022020-04-29 00:15:36 -06004318}
4319
4320void SyncValidator::PreCallRecordCmdDrawIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4321 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4322 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004323 StateTracker::PreCallRecordCmdDrawIndirectCountAMD(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount,
4324 stride);
locke-lunargff255f92020-05-13 18:53:52 -06004325 PreCallRecordCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride);
4326}
4327
4328bool SyncValidator::ValidateCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4329 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4330 uint32_t stride, const char *function) const {
4331 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004332 const auto *cb_access_context = GetAccessContext(commandBuffer);
4333 assert(cb_access_context);
4334 if (!cb_access_context) return skip;
4335
4336 const auto *context = cb_access_context->GetCurrentAccessContext();
4337 assert(context);
4338 if (!context) return skip;
4339
locke-lunarg61870c22020-06-09 14:51:50 -06004340 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, function);
4341 skip |= cb_access_context->ValidateDrawSubpassAttachment(function);
John Zulauffaea0ee2021-01-14 14:01:32 -07004342 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndexedIndirectCommand), buffer,
4343 offset, maxDrawCount, stride, function);
4344 skip |= ValidateCountBuffer(*cb_access_context, *context, commandBuffer, countBuffer, countBufferOffset, function);
locke-lunargff255f92020-05-13 18:53:52 -06004345
4346 // TODO: For now, we validate the whole index and vertex buffer. It might cause some false positive.
4347 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
4348 // We will validate the index and vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004349 skip |= cb_access_context->ValidateDrawVertexIndex(UINT32_MAX, 0, function);
locke-lunargff255f92020-05-13 18:53:52 -06004350 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004351}
4352
4353bool SyncValidator::PreCallValidateCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4354 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4355 uint32_t maxDrawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004356 return ValidateCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4357 "vkCmdDrawIndexedIndirectCount");
locke-lunarge1a67022020-04-29 00:15:36 -06004358}
4359
4360void SyncValidator::PreCallRecordCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4361 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4362 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004363 StateTracker::PreCallRecordCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
4364 maxDrawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06004365 auto *cb_access_context = GetAccessContext(commandBuffer);
4366 assert(cb_access_context);
4367 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDEXEDINDIRECTCOUNT);
4368 auto *context = cb_access_context->GetCurrentAccessContext();
4369 assert(context);
4370
locke-lunarg61870c22020-06-09 14:51:50 -06004371 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4372 cb_access_context->RecordDrawSubpassAttachment(tag);
4373 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndexedIndirectCommand), buffer, offset, 1, stride);
4374 RecordCountBuffer(*context, tag, countBuffer, countBufferOffset);
locke-lunargff255f92020-05-13 18:53:52 -06004375
4376 // TODO: For now, we record the whole index and vertex buffer. It might cause some false positive.
4377 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
locke-lunarg61870c22020-06-09 14:51:50 -06004378 // We will update the index and vertex buffer in SubmitQueue in the future.
4379 cb_access_context->RecordDrawVertexIndex(UINT32_MAX, 0, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004380}
4381
4382bool SyncValidator::PreCallValidateCmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer,
4383 VkDeviceSize offset, VkBuffer countBuffer,
4384 VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4385 uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004386 return ValidateCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4387 "vkCmdDrawIndexedIndirectCountKHR");
locke-lunarge1a67022020-04-29 00:15:36 -06004388}
4389
4390void SyncValidator::PreCallRecordCmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4391 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4392 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004393 StateTracker::PreCallRecordCmdDrawIndexedIndirectCountKHR(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
4394 maxDrawCount, stride);
locke-lunarge1a67022020-04-29 00:15:36 -06004395 PreCallRecordCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride);
4396}
4397
4398bool SyncValidator::PreCallValidateCmdDrawIndexedIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer,
4399 VkDeviceSize offset, VkBuffer countBuffer,
4400 VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4401 uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004402 return ValidateCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4403 "vkCmdDrawIndexedIndirectCountAMD");
locke-lunarge1a67022020-04-29 00:15:36 -06004404}
4405
4406void SyncValidator::PreCallRecordCmdDrawIndexedIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4407 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4408 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004409 StateTracker::PreCallRecordCmdDrawIndexedIndirectCountAMD(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
4410 maxDrawCount, stride);
locke-lunarge1a67022020-04-29 00:15:36 -06004411 PreCallRecordCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride);
4412}
4413
4414bool SyncValidator::PreCallValidateCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
4415 const VkClearColorValue *pColor, uint32_t rangeCount,
4416 const VkImageSubresourceRange *pRanges) const {
4417 bool skip = false;
4418 const auto *cb_access_context = GetAccessContext(commandBuffer);
4419 assert(cb_access_context);
4420 if (!cb_access_context) return skip;
4421
4422 const auto *context = cb_access_context->GetCurrentAccessContext();
4423 assert(context);
4424 if (!context) return skip;
4425
4426 const auto *image_state = Get<IMAGE_STATE>(image);
4427
4428 for (uint32_t index = 0; index < rangeCount; index++) {
4429 const auto &range = pRanges[index];
4430 if (image_state) {
4431 auto hazard =
Jeremy Gebben40a22942020-12-22 14:22:06 -07004432 context->DetectHazard(*image_state, SYNC_CLEAR_TRANSFER_WRITE, range, {0, 0, 0}, image_state->createInfo.extent);
locke-lunarge1a67022020-04-29 00:15:36 -06004433 if (hazard.hazard) {
4434 skip |= LogError(image, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004435 "vkCmdClearColorImage: Hazard %s for %s, range index %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06004436 string_SyncHazard(hazard.hazard), report_data->FormatHandle(image).c_str(), index,
John Zulauffaea0ee2021-01-14 14:01:32 -07004437 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06004438 }
4439 }
4440 }
4441 return skip;
4442}
4443
4444void SyncValidator::PreCallRecordCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
4445 const VkClearColorValue *pColor, uint32_t rangeCount,
4446 const VkImageSubresourceRange *pRanges) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004447 StateTracker::PreCallRecordCmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
locke-lunarge1a67022020-04-29 00:15:36 -06004448 auto *cb_access_context = GetAccessContext(commandBuffer);
4449 assert(cb_access_context);
4450 const auto tag = cb_access_context->NextCommandTag(CMD_CLEARCOLORIMAGE);
4451 auto *context = cb_access_context->GetCurrentAccessContext();
4452 assert(context);
4453
4454 const auto *image_state = Get<IMAGE_STATE>(image);
4455
4456 for (uint32_t index = 0; index < rangeCount; index++) {
4457 const auto &range = pRanges[index];
4458 if (image_state) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004459 context->UpdateAccessState(*image_state, SYNC_CLEAR_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, {0, 0, 0},
John Zulauf8e3c3e92021-01-06 11:19:36 -07004460 image_state->createInfo.extent, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004461 }
4462 }
4463}
4464
4465bool SyncValidator::PreCallValidateCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image,
4466 VkImageLayout imageLayout,
4467 const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
4468 const VkImageSubresourceRange *pRanges) const {
4469 bool skip = false;
4470 const auto *cb_access_context = GetAccessContext(commandBuffer);
4471 assert(cb_access_context);
4472 if (!cb_access_context) return skip;
4473
4474 const auto *context = cb_access_context->GetCurrentAccessContext();
4475 assert(context);
4476 if (!context) return skip;
4477
4478 const auto *image_state = Get<IMAGE_STATE>(image);
4479
4480 for (uint32_t index = 0; index < rangeCount; index++) {
4481 const auto &range = pRanges[index];
4482 if (image_state) {
4483 auto hazard =
Jeremy Gebben40a22942020-12-22 14:22:06 -07004484 context->DetectHazard(*image_state, SYNC_CLEAR_TRANSFER_WRITE, range, {0, 0, 0}, image_state->createInfo.extent);
locke-lunarge1a67022020-04-29 00:15:36 -06004485 if (hazard.hazard) {
4486 skip |= LogError(image, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004487 "vkCmdClearDepthStencilImage: Hazard %s for %s, range index %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06004488 string_SyncHazard(hazard.hazard), report_data->FormatHandle(image).c_str(), index,
John Zulauffaea0ee2021-01-14 14:01:32 -07004489 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06004490 }
4491 }
4492 }
4493 return skip;
4494}
4495
4496void SyncValidator::PreCallRecordCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
4497 const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
4498 const VkImageSubresourceRange *pRanges) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004499 StateTracker::PreCallRecordCmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
locke-lunarge1a67022020-04-29 00:15:36 -06004500 auto *cb_access_context = GetAccessContext(commandBuffer);
4501 assert(cb_access_context);
4502 const auto tag = cb_access_context->NextCommandTag(CMD_CLEARDEPTHSTENCILIMAGE);
4503 auto *context = cb_access_context->GetCurrentAccessContext();
4504 assert(context);
4505
4506 const auto *image_state = Get<IMAGE_STATE>(image);
4507
4508 for (uint32_t index = 0; index < rangeCount; index++) {
4509 const auto &range = pRanges[index];
4510 if (image_state) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004511 context->UpdateAccessState(*image_state, SYNC_CLEAR_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, {0, 0, 0},
John Zulauf8e3c3e92021-01-06 11:19:36 -07004512 image_state->createInfo.extent, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004513 }
4514 }
4515}
4516
4517bool SyncValidator::PreCallValidateCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool,
4518 uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer,
4519 VkDeviceSize dstOffset, VkDeviceSize stride,
4520 VkQueryResultFlags flags) const {
4521 bool skip = false;
4522 const auto *cb_access_context = GetAccessContext(commandBuffer);
4523 assert(cb_access_context);
4524 if (!cb_access_context) return skip;
4525
4526 const auto *context = cb_access_context->GetCurrentAccessContext();
4527 assert(context);
4528 if (!context) return skip;
4529
4530 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
4531
4532 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004533 const ResourceAccessRange range = MakeRange(dstOffset, stride * queryCount);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004534 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
locke-lunarge1a67022020-04-29 00:15:36 -06004535 if (hazard.hazard) {
John Zulauf59e25072020-07-17 10:55:21 -06004536 skip |=
4537 LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
4538 "vkCmdCopyQueryPoolResults: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07004539 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06004540 }
4541 }
locke-lunargff255f92020-05-13 18:53:52 -06004542
4543 // TODO:Track VkQueryPool
locke-lunarge1a67022020-04-29 00:15:36 -06004544 return skip;
4545}
4546
4547void SyncValidator::PreCallRecordCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
4548 uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
4549 VkDeviceSize stride, VkQueryResultFlags flags) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004550 StateTracker::PreCallRecordCmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset,
4551 stride, flags);
locke-lunarge1a67022020-04-29 00:15:36 -06004552 auto *cb_access_context = GetAccessContext(commandBuffer);
4553 assert(cb_access_context);
locke-lunargff255f92020-05-13 18:53:52 -06004554 const auto tag = cb_access_context->NextCommandTag(CMD_COPYQUERYPOOLRESULTS);
locke-lunarge1a67022020-04-29 00:15:36 -06004555 auto *context = cb_access_context->GetCurrentAccessContext();
4556 assert(context);
4557
4558 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
4559
4560 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004561 const ResourceAccessRange range = MakeRange(dstOffset, stride * queryCount);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004562 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004563 }
locke-lunargff255f92020-05-13 18:53:52 -06004564
4565 // TODO:Track VkQueryPool
locke-lunarge1a67022020-04-29 00:15:36 -06004566}
4567
4568bool SyncValidator::PreCallValidateCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
4569 VkDeviceSize size, uint32_t data) const {
4570 bool skip = false;
4571 const auto *cb_access_context = GetAccessContext(commandBuffer);
4572 assert(cb_access_context);
4573 if (!cb_access_context) return skip;
4574
4575 const auto *context = cb_access_context->GetCurrentAccessContext();
4576 assert(context);
4577 if (!context) return skip;
4578
4579 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
4580
4581 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004582 const ResourceAccessRange range = MakeRange(*dst_buffer, dstOffset, size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004583 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
locke-lunarge1a67022020-04-29 00:15:36 -06004584 if (hazard.hazard) {
John Zulauf1dae9192020-06-16 15:46:44 -06004585 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004586 "vkCmdFillBuffer: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07004587 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06004588 }
4589 }
4590 return skip;
4591}
4592
4593void SyncValidator::PreCallRecordCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
4594 VkDeviceSize size, uint32_t data) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004595 StateTracker::PreCallRecordCmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
locke-lunarge1a67022020-04-29 00:15:36 -06004596 auto *cb_access_context = GetAccessContext(commandBuffer);
4597 assert(cb_access_context);
4598 const auto tag = cb_access_context->NextCommandTag(CMD_FILLBUFFER);
4599 auto *context = cb_access_context->GetCurrentAccessContext();
4600 assert(context);
4601
4602 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
4603
4604 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004605 const ResourceAccessRange range = MakeRange(*dst_buffer, dstOffset, size);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004606 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004607 }
4608}
4609
4610bool SyncValidator::PreCallValidateCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4611 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
4612 const VkImageResolve *pRegions) const {
4613 bool skip = false;
4614 const auto *cb_access_context = GetAccessContext(commandBuffer);
4615 assert(cb_access_context);
4616 if (!cb_access_context) return skip;
4617
4618 const auto *context = cb_access_context->GetCurrentAccessContext();
4619 assert(context);
4620 if (!context) return skip;
4621
4622 const auto *src_image = Get<IMAGE_STATE>(srcImage);
4623 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
4624
4625 for (uint32_t region = 0; region < regionCount; region++) {
4626 const auto &resolve_region = pRegions[region];
4627 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004628 auto hazard = context->DetectHazard(*src_image, SYNC_RESOLVE_TRANSFER_READ, resolve_region.srcSubresource,
locke-lunarge1a67022020-04-29 00:15:36 -06004629 resolve_region.srcOffset, resolve_region.extent);
4630 if (hazard.hazard) {
4631 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004632 "vkCmdResolveImage: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06004633 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07004634 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06004635 }
4636 }
4637
4638 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004639 auto hazard = context->DetectHazard(*dst_image, SYNC_RESOLVE_TRANSFER_WRITE, resolve_region.dstSubresource,
locke-lunarge1a67022020-04-29 00:15:36 -06004640 resolve_region.dstOffset, resolve_region.extent);
4641 if (hazard.hazard) {
4642 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004643 "vkCmdResolveImage: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06004644 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07004645 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06004646 }
4647 if (skip) break;
4648 }
4649 }
4650
4651 return skip;
4652}
4653
4654void SyncValidator::PreCallRecordCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4655 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
4656 const VkImageResolve *pRegions) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004657 StateTracker::PreCallRecordCmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
4658 pRegions);
locke-lunarge1a67022020-04-29 00:15:36 -06004659 auto *cb_access_context = GetAccessContext(commandBuffer);
4660 assert(cb_access_context);
4661 const auto tag = cb_access_context->NextCommandTag(CMD_RESOLVEIMAGE);
4662 auto *context = cb_access_context->GetCurrentAccessContext();
4663 assert(context);
4664
4665 auto *src_image = Get<IMAGE_STATE>(srcImage);
4666 auto *dst_image = Get<IMAGE_STATE>(dstImage);
4667
4668 for (uint32_t region = 0; region < regionCount; region++) {
4669 const auto &resolve_region = pRegions[region];
4670 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004671 context->UpdateAccessState(*src_image, SYNC_RESOLVE_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07004672 resolve_region.srcSubresource, resolve_region.srcOffset, resolve_region.extent, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004673 }
4674 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004675 context->UpdateAccessState(*dst_image, SYNC_RESOLVE_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07004676 resolve_region.dstSubresource, resolve_region.dstOffset, resolve_region.extent, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004677 }
4678 }
4679}
4680
Jeff Leger178b1e52020-10-05 12:22:23 -04004681bool SyncValidator::PreCallValidateCmdResolveImage2KHR(VkCommandBuffer commandBuffer,
4682 const VkResolveImageInfo2KHR *pResolveImageInfo) const {
4683 bool skip = false;
4684 const auto *cb_access_context = GetAccessContext(commandBuffer);
4685 assert(cb_access_context);
4686 if (!cb_access_context) return skip;
4687
4688 const auto *context = cb_access_context->GetCurrentAccessContext();
4689 assert(context);
4690 if (!context) return skip;
4691
4692 const auto *src_image = Get<IMAGE_STATE>(pResolveImageInfo->srcImage);
4693 const auto *dst_image = Get<IMAGE_STATE>(pResolveImageInfo->dstImage);
4694
4695 for (uint32_t region = 0; region < pResolveImageInfo->regionCount; region++) {
4696 const auto &resolve_region = pResolveImageInfo->pRegions[region];
4697 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004698 auto hazard = context->DetectHazard(*src_image, SYNC_RESOLVE_TRANSFER_READ, resolve_region.srcSubresource,
Jeff Leger178b1e52020-10-05 12:22:23 -04004699 resolve_region.srcOffset, resolve_region.extent);
4700 if (hazard.hazard) {
4701 skip |= LogError(pResolveImageInfo->srcImage, string_SyncHazardVUID(hazard.hazard),
4702 "vkCmdResolveImage2KHR: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.",
4703 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pResolveImageInfo->srcImage).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07004704 region, cb_access_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04004705 }
4706 }
4707
4708 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004709 auto hazard = context->DetectHazard(*dst_image, SYNC_RESOLVE_TRANSFER_WRITE, resolve_region.dstSubresource,
Jeff Leger178b1e52020-10-05 12:22:23 -04004710 resolve_region.dstOffset, resolve_region.extent);
4711 if (hazard.hazard) {
4712 skip |= LogError(pResolveImageInfo->dstImage, string_SyncHazardVUID(hazard.hazard),
4713 "vkCmdResolveImage2KHR: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.",
4714 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pResolveImageInfo->dstImage).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07004715 region, cb_access_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04004716 }
4717 if (skip) break;
4718 }
4719 }
4720
4721 return skip;
4722}
4723
4724void SyncValidator::PreCallRecordCmdResolveImage2KHR(VkCommandBuffer commandBuffer,
4725 const VkResolveImageInfo2KHR *pResolveImageInfo) {
4726 StateTracker::PreCallRecordCmdResolveImage2KHR(commandBuffer, pResolveImageInfo);
4727 auto *cb_access_context = GetAccessContext(commandBuffer);
4728 assert(cb_access_context);
4729 const auto tag = cb_access_context->NextCommandTag(CMD_RESOLVEIMAGE2KHR);
4730 auto *context = cb_access_context->GetCurrentAccessContext();
4731 assert(context);
4732
4733 auto *src_image = Get<IMAGE_STATE>(pResolveImageInfo->srcImage);
4734 auto *dst_image = Get<IMAGE_STATE>(pResolveImageInfo->dstImage);
4735
4736 for (uint32_t region = 0; region < pResolveImageInfo->regionCount; region++) {
4737 const auto &resolve_region = pResolveImageInfo->pRegions[region];
4738 if (src_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004739 context->UpdateAccessState(*src_image, SYNC_RESOLVE_TRANSFER_READ, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07004740 resolve_region.srcSubresource, resolve_region.srcOffset, resolve_region.extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04004741 }
4742 if (dst_image) {
Jeremy Gebben40a22942020-12-22 14:22:06 -07004743 context->UpdateAccessState(*dst_image, SYNC_RESOLVE_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
John Zulauf8e3c3e92021-01-06 11:19:36 -07004744 resolve_region.dstSubresource, resolve_region.dstOffset, resolve_region.extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04004745 }
4746 }
4747}
4748
locke-lunarge1a67022020-04-29 00:15:36 -06004749bool SyncValidator::PreCallValidateCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
4750 VkDeviceSize dataSize, const void *pData) const {
4751 bool skip = false;
4752 const auto *cb_access_context = GetAccessContext(commandBuffer);
4753 assert(cb_access_context);
4754 if (!cb_access_context) return skip;
4755
4756 const auto *context = cb_access_context->GetCurrentAccessContext();
4757 assert(context);
4758 if (!context) return skip;
4759
4760 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
4761
4762 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004763 // VK_WHOLE_SIZE not allowed
4764 const ResourceAccessRange range = MakeRange(dstOffset, dataSize);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004765 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
locke-lunarge1a67022020-04-29 00:15:36 -06004766 if (hazard.hazard) {
John Zulauf1dae9192020-06-16 15:46:44 -06004767 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004768 "vkCmdUpdateBuffer: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07004769 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06004770 }
4771 }
4772 return skip;
4773}
4774
4775void SyncValidator::PreCallRecordCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
4776 VkDeviceSize dataSize, const void *pData) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004777 StateTracker::PreCallRecordCmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
locke-lunarge1a67022020-04-29 00:15:36 -06004778 auto *cb_access_context = GetAccessContext(commandBuffer);
4779 assert(cb_access_context);
4780 const auto tag = cb_access_context->NextCommandTag(CMD_UPDATEBUFFER);
4781 auto *context = cb_access_context->GetCurrentAccessContext();
4782 assert(context);
4783
4784 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
4785
4786 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004787 // VK_WHOLE_SIZE not allowed
4788 const ResourceAccessRange range = MakeRange(dstOffset, dataSize);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004789 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004790 }
4791}
locke-lunargff255f92020-05-13 18:53:52 -06004792
4793bool SyncValidator::PreCallValidateCmdWriteBufferMarkerAMD(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
4794 VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) const {
4795 bool skip = false;
4796 const auto *cb_access_context = GetAccessContext(commandBuffer);
4797 assert(cb_access_context);
4798 if (!cb_access_context) return skip;
4799
4800 const auto *context = cb_access_context->GetCurrentAccessContext();
4801 assert(context);
4802 if (!context) return skip;
4803
4804 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
4805
4806 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004807 const ResourceAccessRange range = MakeRange(dstOffset, 4);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004808 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
locke-lunargff255f92020-05-13 18:53:52 -06004809 if (hazard.hazard) {
John Zulauf59e25072020-07-17 10:55:21 -06004810 skip |=
4811 LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
4812 "vkCmdWriteBufferMarkerAMD: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07004813 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatUsage(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06004814 }
4815 }
4816 return skip;
4817}
4818
4819void SyncValidator::PreCallRecordCmdWriteBufferMarkerAMD(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
4820 VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004821 StateTracker::PreCallRecordCmdWriteBufferMarkerAMD(commandBuffer, pipelineStage, dstBuffer, dstOffset, marker);
locke-lunargff255f92020-05-13 18:53:52 -06004822 auto *cb_access_context = GetAccessContext(commandBuffer);
4823 assert(cb_access_context);
4824 const auto tag = cb_access_context->NextCommandTag(CMD_WRITEBUFFERMARKERAMD);
4825 auto *context = cb_access_context->GetCurrentAccessContext();
4826 assert(context);
4827
4828 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
4829
4830 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004831 const ResourceAccessRange range = MakeRange(dstOffset, 4);
Jeremy Gebben40a22942020-12-22 14:22:06 -07004832 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunargff255f92020-05-13 18:53:52 -06004833 }
4834}
John Zulauf49beb112020-11-04 16:06:31 -07004835
4836bool SyncValidator::PreCallValidateCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) const {
4837 bool skip = false;
4838 const auto *cb_context = GetAccessContext(commandBuffer);
4839 assert(cb_context);
4840 if (!cb_context) return skip;
4841
John Zulauf36ef9282021-02-02 11:47:24 -07004842 SyncOpSetEvent set_event_op(CMD_SETEVENT, *this, cb_context->GetQueueFlags(), event, stageMask);
John Zulauf6ce24372021-01-30 05:56:25 -07004843 return set_event_op.Validate(*cb_context);
John Zulauf49beb112020-11-04 16:06:31 -07004844}
4845
4846void SyncValidator::PostCallRecordCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
4847 StateTracker::PostCallRecordCmdSetEvent(commandBuffer, event, stageMask);
4848 auto *cb_context = GetAccessContext(commandBuffer);
4849 assert(cb_context);
4850 if (!cb_context) return;
John Zulauf36ef9282021-02-02 11:47:24 -07004851 SyncOpSetEvent set_event_op(CMD_SETEVENT, *this, cb_context->GetQueueFlags(), event, stageMask);
4852 set_event_op.Record(cb_context);
John Zulauf49beb112020-11-04 16:06:31 -07004853}
4854
John Zulauf4edde622021-02-15 08:54:50 -07004855bool SyncValidator::PreCallValidateCmdSetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event,
4856 const VkDependencyInfoKHR *pDependencyInfo) const {
4857 bool skip = false;
4858 const auto *cb_context = GetAccessContext(commandBuffer);
4859 assert(cb_context);
4860 if (!cb_context || !pDependencyInfo) return skip;
4861
4862 SyncOpSetEvent set_event_op(CMD_SETEVENT2KHR, *this, cb_context->GetQueueFlags(), event, *pDependencyInfo);
4863 return set_event_op.Validate(*cb_context);
4864}
4865
4866void SyncValidator::PostCallRecordCmdSetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event,
4867 const VkDependencyInfoKHR *pDependencyInfo) {
4868 StateTracker::PostCallRecordCmdSetEvent2KHR(commandBuffer, event, pDependencyInfo);
4869 auto *cb_context = GetAccessContext(commandBuffer);
4870 assert(cb_context);
4871 if (!cb_context || !pDependencyInfo) return;
4872
4873 SyncOpSetEvent set_event_op(CMD_SETEVENT2KHR, *this, cb_context->GetQueueFlags(), event, *pDependencyInfo);
4874 set_event_op.Record(cb_context);
4875}
4876
John Zulauf49beb112020-11-04 16:06:31 -07004877bool SyncValidator::PreCallValidateCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event,
4878 VkPipelineStageFlags stageMask) const {
4879 bool skip = false;
4880 const auto *cb_context = GetAccessContext(commandBuffer);
4881 assert(cb_context);
4882 if (!cb_context) return skip;
4883
John Zulauf36ef9282021-02-02 11:47:24 -07004884 SyncOpResetEvent reset_event_op(CMD_RESETEVENT, *this, cb_context->GetQueueFlags(), event, stageMask);
John Zulauf6ce24372021-01-30 05:56:25 -07004885 return reset_event_op.Validate(*cb_context);
John Zulauf49beb112020-11-04 16:06:31 -07004886}
4887
4888void SyncValidator::PostCallRecordCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
4889 StateTracker::PostCallRecordCmdResetEvent(commandBuffer, event, stageMask);
4890 auto *cb_context = GetAccessContext(commandBuffer);
4891 assert(cb_context);
4892 if (!cb_context) return;
4893
John Zulauf36ef9282021-02-02 11:47:24 -07004894 SyncOpResetEvent reset_event_op(CMD_RESETEVENT, *this, cb_context->GetQueueFlags(), event, stageMask);
4895 reset_event_op.Record(cb_context);
John Zulauf49beb112020-11-04 16:06:31 -07004896}
4897
John Zulauf4edde622021-02-15 08:54:50 -07004898bool SyncValidator::PreCallValidateCmdResetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event,
4899 VkPipelineStageFlags2KHR stageMask) const {
4900 bool skip = false;
4901 const auto *cb_context = GetAccessContext(commandBuffer);
4902 assert(cb_context);
4903 if (!cb_context) return skip;
4904
4905 SyncOpResetEvent reset_event_op(CMD_RESETEVENT2KHR, *this, cb_context->GetQueueFlags(), event, stageMask);
4906 return reset_event_op.Validate(*cb_context);
4907}
4908
4909void SyncValidator::PostCallRecordCmdResetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event,
4910 VkPipelineStageFlags2KHR stageMask) {
4911 StateTracker::PostCallRecordCmdResetEvent2KHR(commandBuffer, event, stageMask);
4912 auto *cb_context = GetAccessContext(commandBuffer);
4913 assert(cb_context);
4914 if (!cb_context) return;
4915
4916 SyncOpResetEvent reset_event_op(CMD_RESETEVENT2KHR, *this, cb_context->GetQueueFlags(), event, stageMask);
4917 reset_event_op.Record(cb_context);
4918}
4919
John Zulauf49beb112020-11-04 16:06:31 -07004920bool SyncValidator::PreCallValidateCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
4921 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
4922 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
4923 uint32_t bufferMemoryBarrierCount,
4924 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
4925 uint32_t imageMemoryBarrierCount,
4926 const VkImageMemoryBarrier *pImageMemoryBarriers) const {
4927 bool skip = false;
4928 const auto *cb_context = GetAccessContext(commandBuffer);
4929 assert(cb_context);
4930 if (!cb_context) return skip;
4931
John Zulauf36ef9282021-02-02 11:47:24 -07004932 SyncOpWaitEvents wait_events_op(CMD_WAITEVENTS, *this, cb_context->GetQueueFlags(), eventCount, pEvents, srcStageMask,
4933 dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
4934 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
John Zulaufd5115702021-01-18 12:34:33 -07004935 return wait_events_op.Validate(*cb_context);
John Zulauf49beb112020-11-04 16:06:31 -07004936}
4937
4938void SyncValidator::PostCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
4939 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
4940 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
4941 uint32_t bufferMemoryBarrierCount,
4942 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
4943 uint32_t imageMemoryBarrierCount,
4944 const VkImageMemoryBarrier *pImageMemoryBarriers) {
4945 StateTracker::PostCallRecordCmdWaitEvents(commandBuffer, eventCount, pEvents, srcStageMask, dstStageMask, memoryBarrierCount,
4946 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
4947 imageMemoryBarrierCount, pImageMemoryBarriers);
4948
4949 auto *cb_context = GetAccessContext(commandBuffer);
4950 assert(cb_context);
4951 if (!cb_context) return;
4952
John Zulauf36ef9282021-02-02 11:47:24 -07004953 SyncOpWaitEvents wait_events_op(CMD_WAITEVENTS, *this, cb_context->GetQueueFlags(), eventCount, pEvents, srcStageMask,
4954 dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
4955 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
4956 return wait_events_op.Record(cb_context);
John Zulauf4a6105a2020-11-17 15:11:05 -07004957}
4958
John Zulauf4edde622021-02-15 08:54:50 -07004959bool SyncValidator::PreCallValidateCmdWaitEvents2KHR(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
4960 const VkDependencyInfoKHR *pDependencyInfos) const {
4961 bool skip = false;
4962 const auto *cb_context = GetAccessContext(commandBuffer);
4963 assert(cb_context);
4964 if (!cb_context) return skip;
4965
4966 SyncOpWaitEvents wait_events_op(CMD_WAITEVENTS2KHR, *this, cb_context->GetQueueFlags(), eventCount, pEvents, pDependencyInfos);
4967 skip |= wait_events_op.Validate(*cb_context);
4968 return skip;
4969}
4970
4971void SyncValidator::PostCallRecordCmdWaitEvents2KHR(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
4972 const VkDependencyInfoKHR *pDependencyInfos) {
4973 StateTracker::PostCallRecordCmdWaitEvents2KHR(commandBuffer, eventCount, pEvents, pDependencyInfos);
4974
4975 auto *cb_context = GetAccessContext(commandBuffer);
4976 assert(cb_context);
4977 if (!cb_context) return;
4978
4979 SyncOpWaitEvents wait_events_op(CMD_WAITEVENTS2KHR, *this, cb_context->GetQueueFlags(), eventCount, pEvents, pDependencyInfos);
4980 wait_events_op.Record(cb_context);
4981}
4982
John Zulauf4a6105a2020-11-17 15:11:05 -07004983void SyncEventState::ResetFirstScope() {
4984 for (const auto address_type : kAddressTypes) {
4985 first_scope[static_cast<size_t>(address_type)].clear();
4986 }
Jeremy Gebben9893daf2021-01-04 10:40:50 -07004987 scope = SyncExecScope();
John Zulauf4a6105a2020-11-17 15:11:05 -07004988}
4989
4990// Keep the "ignore this event" logic in same place for ValidateWait and RecordWait to use
John Zulauf4edde622021-02-15 08:54:50 -07004991SyncEventState::IgnoreReason SyncEventState::IsIgnoredByWait(CMD_TYPE cmd, VkPipelineStageFlags2KHR srcStageMask) const {
John Zulauf4a6105a2020-11-17 15:11:05 -07004992 IgnoreReason reason = NotIgnored;
4993
John Zulauf4edde622021-02-15 08:54:50 -07004994 if ((CMD_WAITEVENTS2KHR == cmd) && (CMD_SETEVENT == last_command)) {
4995 reason = SetVsWait2;
4996 } else if ((last_command == CMD_RESETEVENT || last_command == CMD_RESETEVENT2KHR) && !HasBarrier(0U, 0U)) {
4997 reason = (last_command == CMD_RESETEVENT) ? ResetWaitRace : Reset2WaitRace;
John Zulauf4a6105a2020-11-17 15:11:05 -07004998 } else if (unsynchronized_set) {
4999 reason = SetRace;
5000 } else {
Jeremy Gebben40a22942020-12-22 14:22:06 -07005001 const VkPipelineStageFlags2KHR missing_bits = scope.mask_param & ~srcStageMask;
John Zulauf4a6105a2020-11-17 15:11:05 -07005002 if (missing_bits) reason = MissingStageBits;
5003 }
5004
5005 return reason;
5006}
5007
Jeremy Gebben40a22942020-12-22 14:22:06 -07005008bool SyncEventState::HasBarrier(VkPipelineStageFlags2KHR stageMask, VkPipelineStageFlags2KHR exec_scope_arg) const {
John Zulauf4a6105a2020-11-17 15:11:05 -07005009 bool has_barrier = (last_command == CMD_NONE) || (stageMask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) ||
5010 (barriers & exec_scope_arg) || (barriers & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
5011 return has_barrier;
John Zulauf49beb112020-11-04 16:06:31 -07005012}
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005013
John Zulauf36ef9282021-02-02 11:47:24 -07005014SyncOpBarriers::SyncOpBarriers(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags,
5015 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
5016 VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount,
John Zulaufd5115702021-01-18 12:34:33 -07005017 const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount,
5018 const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount,
5019 const VkImageMemoryBarrier *pImageMemoryBarriers)
John Zulauf4edde622021-02-15 08:54:50 -07005020 : SyncOpBase(cmd), barriers_(1) {
5021 auto &barrier_set = barriers_[0];
5022 barrier_set.dependency_flags = dependencyFlags;
5023 barrier_set.src_exec_scope = SyncExecScope::MakeSrc(queue_flags, srcStageMask);
5024 barrier_set.dst_exec_scope = SyncExecScope::MakeDst(queue_flags, dstStageMask);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005025 // Translate the API parameters into structures SyncVal understands directly, and dehandle for safer/faster replay.
John Zulauf4edde622021-02-15 08:54:50 -07005026 barrier_set.MakeMemoryBarriers(barrier_set.src_exec_scope, barrier_set.dst_exec_scope, dependencyFlags, memoryBarrierCount,
5027 pMemoryBarriers);
5028 barrier_set.MakeBufferMemoryBarriers(sync_state, barrier_set.src_exec_scope, barrier_set.dst_exec_scope, dependencyFlags,
5029 bufferMemoryBarrierCount, pBufferMemoryBarriers);
5030 barrier_set.MakeImageMemoryBarriers(sync_state, barrier_set.src_exec_scope, barrier_set.dst_exec_scope, dependencyFlags,
5031 imageMemoryBarrierCount, pImageMemoryBarriers);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005032}
5033
John Zulauf4edde622021-02-15 08:54:50 -07005034SyncOpBarriers::SyncOpBarriers(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags, uint32_t event_count,
5035 const VkDependencyInfoKHR *dep_infos)
5036 : SyncOpBase(cmd), barriers_(event_count) {
5037 for (uint32_t i = 0; i < event_count; i++) {
5038 const auto &dep_info = dep_infos[i];
5039 auto &barrier_set = barriers_[i];
5040 barrier_set.dependency_flags = dep_info.dependencyFlags;
5041 auto stage_masks = sync_utils::GetGlobalStageMasks(dep_info);
5042 barrier_set.src_exec_scope = SyncExecScope::MakeSrc(queue_flags, stage_masks.src);
5043 barrier_set.dst_exec_scope = SyncExecScope::MakeDst(queue_flags, stage_masks.dst);
5044 // Translate the API parameters into structures SyncVal understands directly, and dehandle for safer/faster replay.
5045 barrier_set.MakeMemoryBarriers(queue_flags, dep_info.dependencyFlags, dep_info.memoryBarrierCount,
5046 dep_info.pMemoryBarriers);
5047 barrier_set.MakeBufferMemoryBarriers(sync_state, queue_flags, dep_info.dependencyFlags, dep_info.bufferMemoryBarrierCount,
5048 dep_info.pBufferMemoryBarriers);
5049 barrier_set.MakeImageMemoryBarriers(sync_state, queue_flags, dep_info.dependencyFlags, dep_info.imageMemoryBarrierCount,
5050 dep_info.pImageMemoryBarriers);
5051 }
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005052}
5053
John Zulauf36ef9282021-02-02 11:47:24 -07005054SyncOpPipelineBarrier::SyncOpPipelineBarrier(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags,
John Zulaufd5115702021-01-18 12:34:33 -07005055 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
5056 VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount,
5057 const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount,
5058 const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount,
5059 const VkImageMemoryBarrier *pImageMemoryBarriers)
John Zulauf36ef9282021-02-02 11:47:24 -07005060 : SyncOpBarriers(cmd, sync_state, queue_flags, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers,
John Zulaufd5115702021-01-18 12:34:33 -07005061 bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers) {}
5062
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005063SyncOpPipelineBarrier::SyncOpPipelineBarrier(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags,
5064 const VkDependencyInfoKHR &dep_info)
John Zulauf4edde622021-02-15 08:54:50 -07005065 : SyncOpBarriers(cmd, sync_state, queue_flags, 1, &dep_info) {}
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005066
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005067bool SyncOpPipelineBarrier::Validate(const CommandBufferAccessContext &cb_context) const {
5068 bool skip = false;
5069 const auto *context = cb_context.GetCurrentAccessContext();
5070 assert(context);
5071 if (!context) return skip;
John Zulauf6fdf3d02021-03-05 16:50:47 -07005072 assert(barriers_.size() == 1); // PipelineBarriers only support a single barrier set.
5073
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005074 // Validate Image Layout transitions
John Zulauf6fdf3d02021-03-05 16:50:47 -07005075 const auto &barrier_set = barriers_[0];
5076 for (const auto &image_barrier : barrier_set.image_memory_barriers) {
5077 if (image_barrier.new_layout == image_barrier.old_layout) continue; // Only interested in layout transitions at this point.
5078 const auto *image_state = image_barrier.image.get();
5079 if (!image_state) continue;
5080 const auto hazard = context->DetectImageBarrierHazard(image_barrier);
5081 if (hazard.hazard) {
5082 // PHASE1 TODO -- add tag information to log msg when useful.
5083 const auto &sync_state = cb_context.GetSyncState();
5084 const auto image_handle = image_state->image;
5085 skip |= sync_state.LogError(image_handle, string_SyncHazardVUID(hazard.hazard),
5086 "%s: Hazard %s for image barrier %" PRIu32 " %s. Access info %s.", CmdName(),
5087 string_SyncHazard(hazard.hazard), image_barrier.index,
5088 sync_state.report_data->FormatHandle(image_handle).c_str(),
5089 cb_context.FormatUsage(hazard).c_str());
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005090 }
5091 }
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005092 return skip;
5093}
5094
John Zulaufd5115702021-01-18 12:34:33 -07005095struct SyncOpPipelineBarrierFunctorFactory {
5096 using BarrierOpFunctor = PipelineBarrierOp;
5097 using ApplyFunctor = ApplyBarrierFunctor<BarrierOpFunctor>;
5098 using GlobalBarrierOpFunctor = PipelineBarrierOp;
5099 using GlobalApplyFunctor = ApplyBarrierOpsFunctor<GlobalBarrierOpFunctor>;
5100 using BufferRange = ResourceAccessRange;
5101 using ImageRange = subresource_adapter::ImageRangeGenerator;
5102 using GlobalRange = ResourceAccessRange;
5103
5104 ApplyFunctor MakeApplyFunctor(const SyncBarrier &barrier, bool layout_transition) const {
5105 return ApplyFunctor(BarrierOpFunctor(barrier, layout_transition));
5106 }
5107 GlobalApplyFunctor MakeGlobalApplyFunctor(size_t size_hint, const ResourceUsageTag &tag) const {
5108 return GlobalApplyFunctor(true /* resolve */, size_hint, tag);
5109 }
5110 GlobalBarrierOpFunctor MakeGlobalBarrierOpFunctor(const SyncBarrier &barrier) const {
5111 return GlobalBarrierOpFunctor(barrier, false);
5112 }
5113
5114 BufferRange MakeRangeGen(const BUFFER_STATE &buffer, const ResourceAccessRange &range) const {
5115 if (!SimpleBinding(buffer)) return ResourceAccessRange();
5116 const auto base_address = ResourceBaseAddress(buffer);
5117 return (range + base_address);
5118 }
5119 ImageRange MakeRangeGen(const IMAGE_STATE &image, const SyncImageMemoryBarrier::SubImageRange &range) const {
John Zulauf264cce02021-02-05 14:40:47 -07005120 if (!SimpleBinding(image)) return subresource_adapter::ImageRangeGenerator();
John Zulaufd5115702021-01-18 12:34:33 -07005121
5122 const auto base_address = ResourceBaseAddress(image);
5123 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), range.subresource_range, range.offset,
5124 range.extent, base_address);
5125 return range_gen;
5126 }
5127 GlobalRange MakeGlobalRangeGen(AccessAddressType) const { return kFullRange; }
5128};
5129
5130template <typename Barriers, typename FunctorFactory>
5131void SyncOpBarriers::ApplyBarriers(const Barriers &barriers, const FunctorFactory &factory, const ResourceUsageTag &tag,
5132 AccessContext *context) {
5133 for (const auto &barrier : barriers) {
5134 const auto *state = barrier.GetState();
5135 if (state) {
5136 auto *const accesses = &context->GetAccessStateMap(GetAccessAddressType(*state));
5137 auto update_action = factory.MakeApplyFunctor(barrier.barrier, barrier.IsLayoutTransition());
5138 auto range_gen = factory.MakeRangeGen(*state, barrier.Range());
5139 UpdateMemoryAccessState(accesses, update_action, &range_gen);
5140 }
5141 }
5142}
5143
5144template <typename Barriers, typename FunctorFactory>
5145void SyncOpBarriers::ApplyGlobalBarriers(const Barriers &barriers, const FunctorFactory &factory, const ResourceUsageTag &tag,
5146 AccessContext *access_context) {
5147 auto barriers_functor = factory.MakeGlobalApplyFunctor(barriers.size(), tag);
5148 for (const auto &barrier : barriers) {
5149 barriers_functor.EmplaceBack(factory.MakeGlobalBarrierOpFunctor(barrier));
5150 }
5151 for (const auto address_type : kAddressTypes) {
5152 auto range_gen = factory.MakeGlobalRangeGen(address_type);
5153 UpdateMemoryAccessState(&(access_context->GetAccessStateMap(address_type)), barriers_functor, &range_gen);
5154 }
5155}
5156
John Zulauf36ef9282021-02-02 11:47:24 -07005157void SyncOpPipelineBarrier::Record(CommandBufferAccessContext *cb_context) const {
John Zulaufd5115702021-01-18 12:34:33 -07005158 SyncOpPipelineBarrierFunctorFactory factory;
5159 auto *access_context = cb_context->GetCurrentAccessContext();
John Zulauf36ef9282021-02-02 11:47:24 -07005160 const auto tag = cb_context->NextCommandTag(cmd_);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005161
John Zulauf4edde622021-02-15 08:54:50 -07005162 // Pipeline barriers only have a single barrier set, unlike WaitEvents2
5163 assert(barriers_.size() == 1);
5164 const auto &barrier_set = barriers_[0];
5165 ApplyBarriers(barrier_set.buffer_memory_barriers, factory, tag, access_context);
5166 ApplyBarriers(barrier_set.image_memory_barriers, factory, tag, access_context);
5167 ApplyGlobalBarriers(barrier_set.memory_barriers, factory, tag, access_context);
5168
5169 if (barrier_set.single_exec_scope) {
5170 cb_context->ApplyGlobalBarriersToEvents(barrier_set.src_exec_scope, barrier_set.dst_exec_scope);
5171 } else {
5172 for (const auto &barrier : barrier_set.memory_barriers) {
5173 cb_context->ApplyGlobalBarriersToEvents(barrier.src_exec_scope, barrier.dst_exec_scope);
5174 }
5175 }
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005176}
5177
John Zulauf4edde622021-02-15 08:54:50 -07005178void SyncOpBarriers::BarrierSet::MakeMemoryBarriers(const SyncExecScope &src, const SyncExecScope &dst,
5179 VkDependencyFlags dependency_flags, uint32_t memory_barrier_count,
5180 const VkMemoryBarrier *barriers) {
5181 memory_barriers.reserve(std::max<uint32_t>(1, memory_barrier_count));
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005182 for (uint32_t barrier_index = 0; barrier_index < memory_barrier_count; barrier_index++) {
John Zulauf4edde622021-02-15 08:54:50 -07005183 const auto &barrier = barriers[barrier_index];
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005184 SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07005185 memory_barriers.emplace_back(sync_barrier);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005186 }
5187 if (0 == memory_barrier_count) {
5188 // If there are no global memory barriers, force an exec barrier
John Zulauf4edde622021-02-15 08:54:50 -07005189 memory_barriers.emplace_back(SyncBarrier(src, dst));
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005190 }
John Zulauf4edde622021-02-15 08:54:50 -07005191 single_exec_scope = true;
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005192}
5193
John Zulauf4edde622021-02-15 08:54:50 -07005194void SyncOpBarriers::BarrierSet::MakeBufferMemoryBarriers(const SyncValidator &sync_state, const SyncExecScope &src,
5195 const SyncExecScope &dst, VkDependencyFlags dependencyFlags,
5196 uint32_t barrier_count, const VkBufferMemoryBarrier *barriers) {
5197 buffer_memory_barriers.reserve(barrier_count);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005198 for (uint32_t index = 0; index < barrier_count; index++) {
5199 const auto &barrier = barriers[index];
5200 auto buffer = sync_state.GetShared<BUFFER_STATE>(barrier.buffer);
5201 if (buffer) {
5202 const auto barrier_size = GetBufferWholeSize(*buffer, barrier.offset, barrier.size);
5203 const auto range = MakeRange(barrier.offset, barrier_size);
5204 const SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07005205 buffer_memory_barriers.emplace_back(buffer, sync_barrier, range);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005206 } else {
John Zulauf4edde622021-02-15 08:54:50 -07005207 buffer_memory_barriers.emplace_back();
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005208 }
5209 }
5210}
5211
John Zulauf4edde622021-02-15 08:54:50 -07005212void SyncOpBarriers::BarrierSet::MakeMemoryBarriers(VkQueueFlags queue_flags, VkDependencyFlags dependency_flags,
5213 uint32_t memory_barrier_count, const VkMemoryBarrier2KHR *barriers) {
5214 memory_barriers.reserve(memory_barrier_count);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005215 for (uint32_t barrier_index = 0; barrier_index < memory_barrier_count; barrier_index++) {
John Zulauf4edde622021-02-15 08:54:50 -07005216 const auto &barrier = barriers[barrier_index];
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005217 auto src = SyncExecScope::MakeSrc(queue_flags, barrier.srcStageMask);
5218 auto dst = SyncExecScope::MakeDst(queue_flags, barrier.dstStageMask);
5219 SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07005220 memory_barriers.emplace_back(sync_barrier);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005221 }
John Zulauf4edde622021-02-15 08:54:50 -07005222 single_exec_scope = false;
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005223}
5224
John Zulauf4edde622021-02-15 08:54:50 -07005225void SyncOpBarriers::BarrierSet::MakeBufferMemoryBarriers(const SyncValidator &sync_state, VkQueueFlags queue_flags,
5226 VkDependencyFlags dependencyFlags, uint32_t barrier_count,
5227 const VkBufferMemoryBarrier2KHR *barriers) {
5228 buffer_memory_barriers.reserve(barrier_count);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005229 for (uint32_t index = 0; index < barrier_count; index++) {
5230 const auto &barrier = barriers[index];
5231 auto src = SyncExecScope::MakeSrc(queue_flags, barrier.srcStageMask);
5232 auto dst = SyncExecScope::MakeDst(queue_flags, barrier.dstStageMask);
5233 auto buffer = sync_state.GetShared<BUFFER_STATE>(barrier.buffer);
5234 if (buffer) {
5235 const auto barrier_size = GetBufferWholeSize(*buffer, barrier.offset, barrier.size);
5236 const auto range = MakeRange(barrier.offset, barrier_size);
5237 const SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07005238 buffer_memory_barriers.emplace_back(buffer, sync_barrier, range);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005239 } else {
John Zulauf4edde622021-02-15 08:54:50 -07005240 buffer_memory_barriers.emplace_back();
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005241 }
5242 }
5243}
5244
John Zulauf4edde622021-02-15 08:54:50 -07005245void SyncOpBarriers::BarrierSet::MakeImageMemoryBarriers(const SyncValidator &sync_state, const SyncExecScope &src,
5246 const SyncExecScope &dst, VkDependencyFlags dependencyFlags,
5247 uint32_t barrier_count, const VkImageMemoryBarrier *barriers) {
5248 image_memory_barriers.reserve(barrier_count);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005249 for (uint32_t index = 0; index < barrier_count; index++) {
5250 const auto &barrier = barriers[index];
5251 const auto image = sync_state.GetShared<IMAGE_STATE>(barrier.image);
5252 if (image) {
5253 auto subresource_range = NormalizeSubresourceRange(image->createInfo, barrier.subresourceRange);
5254 const SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07005255 image_memory_barriers.emplace_back(image, index, sync_barrier, barrier.oldLayout, barrier.newLayout, subresource_range);
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005256 } else {
John Zulauf4edde622021-02-15 08:54:50 -07005257 image_memory_barriers.emplace_back();
5258 image_memory_barriers.back().index = index; // Just in case we're interested in the ones we skipped.
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005259 }
5260 }
5261}
John Zulaufd5115702021-01-18 12:34:33 -07005262
John Zulauf4edde622021-02-15 08:54:50 -07005263void SyncOpBarriers::BarrierSet::MakeImageMemoryBarriers(const SyncValidator &sync_state, VkQueueFlags queue_flags,
5264 VkDependencyFlags dependencyFlags, uint32_t barrier_count,
5265 const VkImageMemoryBarrier2KHR *barriers) {
5266 image_memory_barriers.reserve(barrier_count);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005267 for (uint32_t index = 0; index < barrier_count; index++) {
5268 const auto &barrier = barriers[index];
5269 auto src = SyncExecScope::MakeSrc(queue_flags, barrier.srcStageMask);
5270 auto dst = SyncExecScope::MakeDst(queue_flags, barrier.dstStageMask);
5271 const auto image = sync_state.GetShared<IMAGE_STATE>(barrier.image);
5272 if (image) {
5273 auto subresource_range = NormalizeSubresourceRange(image->createInfo, barrier.subresourceRange);
5274 const SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4edde622021-02-15 08:54:50 -07005275 image_memory_barriers.emplace_back(image, index, sync_barrier, barrier.oldLayout, barrier.newLayout, subresource_range);
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005276 } else {
John Zulauf4edde622021-02-15 08:54:50 -07005277 image_memory_barriers.emplace_back();
5278 image_memory_barriers.back().index = index; // Just in case we're interested in the ones we skipped.
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005279 }
5280 }
5281}
5282
John Zulauf36ef9282021-02-02 11:47:24 -07005283SyncOpWaitEvents::SyncOpWaitEvents(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags, uint32_t eventCount,
John Zulaufd5115702021-01-18 12:34:33 -07005284 const VkEvent *pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
5285 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
5286 uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
5287 uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers)
John Zulauf36ef9282021-02-02 11:47:24 -07005288 : SyncOpBarriers(cmd, sync_state, queue_flags, srcStageMask, dstStageMask, VkDependencyFlags(0U), memoryBarrierCount,
John Zulaufd5115702021-01-18 12:34:33 -07005289 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
5290 pImageMemoryBarriers) {
John Zulauf669dfd52021-01-27 17:15:28 -07005291 MakeEventsList(sync_state, eventCount, pEvents);
John Zulaufd5115702021-01-18 12:34:33 -07005292}
5293
John Zulauf4edde622021-02-15 08:54:50 -07005294SyncOpWaitEvents::SyncOpWaitEvents(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags, uint32_t eventCount,
5295 const VkEvent *pEvents, const VkDependencyInfoKHR *pDependencyInfo)
5296 : SyncOpBarriers(cmd, sync_state, queue_flags, eventCount, pDependencyInfo) {
5297 MakeEventsList(sync_state, eventCount, pEvents);
5298 assert(events_.size() == barriers_.size()); // Just so nobody gets clever and decides to cull the event or barrier arrays
5299}
5300
John Zulaufd5115702021-01-18 12:34:33 -07005301bool SyncOpWaitEvents::Validate(const CommandBufferAccessContext &cb_context) const {
John Zulaufd5115702021-01-18 12:34:33 -07005302 const char *const ignored = "Wait operation is ignored for this event.";
5303 bool skip = false;
5304 const auto &sync_state = cb_context.GetSyncState();
5305 const auto command_buffer_handle = cb_context.GetCBState().commandBuffer;
5306
John Zulauf4edde622021-02-15 08:54:50 -07005307 for (size_t barrier_set_index = 0; barrier_set_index < barriers_.size(); barrier_set_index++) {
5308 const auto &barrier_set = barriers_[barrier_set_index];
5309 if (barrier_set.single_exec_scope) {
5310 if (barrier_set.src_exec_scope.mask_param & VK_PIPELINE_STAGE_HOST_BIT) {
5311 const std::string vuid = std::string("SYNC-") + std::string(CmdName()) + std::string("-hostevent-unsupported");
5312 skip = sync_state.LogInfo(command_buffer_handle, vuid,
5313 "%s, srcStageMask includes %s, unsupported by synchronization validation.", CmdName(),
5314 string_VkPipelineStageFlagBits(VK_PIPELINE_STAGE_HOST_BIT));
5315 } else {
5316 const auto &barriers = barrier_set.memory_barriers;
5317 for (size_t barrier_index = 0; barrier_index < barriers.size(); barrier_index++) {
5318 const auto &barrier = barriers[barrier_index];
5319 if (barrier.src_exec_scope.mask_param & VK_PIPELINE_STAGE_HOST_BIT) {
5320 const std::string vuid =
5321 std::string("SYNC-") + std::string(CmdName()) + std::string("-hostevent-unsupported");
5322 skip =
5323 sync_state.LogInfo(command_buffer_handle, vuid,
5324 "%s, srcStageMask %s of %s %zu, %s %zu, unsupported by synchronization validation.",
5325 CmdName(), string_VkPipelineStageFlagBits(VK_PIPELINE_STAGE_HOST_BIT),
5326 "pDependencyInfo", barrier_set_index, "pMemoryBarriers", barrier_index);
5327 }
5328 }
5329 }
5330 }
John Zulaufd5115702021-01-18 12:34:33 -07005331 }
5332
Jeremy Gebben40a22942020-12-22 14:22:06 -07005333 VkPipelineStageFlags2KHR event_stage_masks = 0U;
John Zulauf4edde622021-02-15 08:54:50 -07005334 VkPipelineStageFlags2KHR barrier_mask_params = 0U;
John Zulaufd5115702021-01-18 12:34:33 -07005335 bool events_not_found = false;
John Zulauf669dfd52021-01-27 17:15:28 -07005336 const auto *events_context = cb_context.GetCurrentEventsContext();
5337 assert(events_context);
John Zulauf4edde622021-02-15 08:54:50 -07005338 size_t barrier_set_index = 0;
5339 size_t barrier_set_incr = (barriers_.size() == 1) ? 0 : 1;
5340 for (size_t event_index = 0; event_index < events_.size(); event_index++)
5341 for (const auto &event : events_) {
5342 const auto *sync_event = events_context->Get(event.get());
5343 const auto &barrier_set = barriers_[barrier_set_index];
5344 if (!sync_event) {
5345 // NOTE PHASE2: This is where we'll need queue submit time validation to come back and check the srcStageMask bits
5346 // or solve this with replay creating the SyncEventState in the queue context... also this will be a
5347 // new validation error... wait without previously submitted set event...
5348 events_not_found = true; // Demote "extra_stage_bits" error to warning, to avoid false positives at *record time*
5349 barrier_set_index += barrier_set_incr;
5350 continue; // Core, Lifetimes, or Param check needs to catch invalid events.
John Zulaufd5115702021-01-18 12:34:33 -07005351 }
John Zulauf4edde622021-02-15 08:54:50 -07005352 const auto event_handle = sync_event->event->event;
5353 // TODO add "destroyed" checks
5354
5355 barrier_mask_params |= barrier_set.src_exec_scope.mask_param;
5356 const auto &src_exec_scope = barrier_set.src_exec_scope;
5357 event_stage_masks |= sync_event->scope.mask_param;
5358 const auto ignore_reason = sync_event->IsIgnoredByWait(cmd_, src_exec_scope.mask_param);
5359 if (ignore_reason) {
5360 switch (ignore_reason) {
5361 case SyncEventState::ResetWaitRace:
5362 case SyncEventState::Reset2WaitRace: {
5363 // Four permuations of Reset and Wait calls...
5364 const char *vuid =
5365 (cmd_ == CMD_WAITEVENTS) ? "VUID-vkCmdResetEvent-event-03834" : "VUID-vkCmdResetEvent-event-03835";
5366 if (ignore_reason == SyncEventState::Reset2WaitRace) {
5367 vuid =
Jeremy Gebben476f5e22021-03-01 15:27:20 -07005368 (cmd_ == CMD_WAITEVENTS) ? "VUID-vkCmdResetEvent2KHR-event-03831" : "VUID-vkCmdResetEvent2KHR-event-03832";
John Zulauf4edde622021-02-15 08:54:50 -07005369 }
5370 const char *const message =
5371 "%s: %s %s operation following %s without intervening execution barrier, may cause race condition. %s";
5372 skip |= sync_state.LogError(event_handle, vuid, message, CmdName(),
5373 sync_state.report_data->FormatHandle(event_handle).c_str(), CmdName(),
5374 CommandTypeString(sync_event->last_command), ignored);
5375 break;
5376 }
5377 case SyncEventState::SetRace: {
5378 // Issue error message that Wait is waiting on an signal subject to race condition, and is thus ignored for
5379 // this event
5380 const char *const vuid = "SYNC-vkCmdWaitEvents-unsynchronized-setops";
5381 const char *const message =
5382 "%s: %s Unsychronized %s calls result in race conditions w.r.t. event signalling, %s %s";
5383 const char *const reason = "First synchronization scope is undefined.";
5384 skip |= sync_state.LogError(event_handle, vuid, message, CmdName(),
5385 sync_state.report_data->FormatHandle(event_handle).c_str(),
5386 CommandTypeString(sync_event->last_command), reason, ignored);
5387 break;
5388 }
5389 case SyncEventState::MissingStageBits: {
5390 const auto missing_bits = sync_event->scope.mask_param & ~src_exec_scope.mask_param;
5391 // Issue error message that event waited for is not in wait events scope
5392 const char *const vuid = "VUID-vkCmdWaitEvents-srcStageMask-01158";
5393 const char *const message =
5394 "%s: %s stageMask %" PRIx64 " includes bits not present in srcStageMask 0x%" PRIx64
5395 ". Bits missing from srcStageMask %s. %s";
5396 skip |= sync_state.LogError(event_handle, vuid, message, CmdName(),
5397 sync_state.report_data->FormatHandle(event_handle).c_str(),
5398 sync_event->scope.mask_param, src_exec_scope.mask_param,
5399 sync_utils::StringPipelineStageFlags(missing_bits).c_str(), ignored);
5400 break;
5401 }
5402 case SyncEventState::SetVsWait2: {
5403 skip |= sync_state.LogError(event_handle, "VUID-vkCmdWaitEvents2KHR-pEvents-03837",
5404 "%s: Follows set of %s by %s. Disallowed.", CmdName(),
5405 sync_state.report_data->FormatHandle(event_handle).c_str(),
5406 CommandTypeString(sync_event->last_command));
5407 break;
5408 }
5409 default:
5410 assert(ignore_reason == SyncEventState::NotIgnored);
5411 }
5412 } else if (barrier_set.image_memory_barriers.size()) {
5413 const auto &image_memory_barriers = barrier_set.image_memory_barriers;
5414 const auto *context = cb_context.GetCurrentAccessContext();
5415 assert(context);
5416 for (const auto &image_memory_barrier : image_memory_barriers) {
5417 if (image_memory_barrier.old_layout == image_memory_barrier.new_layout) continue;
5418 const auto *image_state = image_memory_barrier.image.get();
5419 if (!image_state) continue;
5420 const auto &subresource_range = image_memory_barrier.range.subresource_range;
5421 const auto &src_access_scope = image_memory_barrier.barrier.src_access_scope;
5422 const auto hazard =
5423 context->DetectImageBarrierHazard(*image_state, sync_event->scope.exec_scope, src_access_scope,
5424 subresource_range, *sync_event, AccessContext::DetectOptions::kDetectAll);
5425 if (hazard.hazard) {
5426 skip |= sync_state.LogError(image_state->image, string_SyncHazardVUID(hazard.hazard),
5427 "%s: Hazard %s for image barrier %" PRIu32 " %s. Access info %s.", CmdName(),
5428 string_SyncHazard(hazard.hazard), image_memory_barrier.index,
5429 sync_state.report_data->FormatHandle(image_state->image).c_str(),
5430 cb_context.FormatUsage(hazard).c_str());
5431 break;
5432 }
John Zulaufd5115702021-01-18 12:34:33 -07005433 }
5434 }
John Zulauf4edde622021-02-15 08:54:50 -07005435 // TODO: Add infrastructure for checking pDependencyInfo's vs. CmdSetEvent2 VUID - vkCmdWaitEvents2KHR - pEvents -
5436 // 03839
5437 barrier_set_index += barrier_set_incr;
John Zulaufd5115702021-01-18 12:34:33 -07005438 }
John Zulaufd5115702021-01-18 12:34:33 -07005439
5440 // Note that we can't check for HOST in pEvents as we don't track that set event type
John Zulauf4edde622021-02-15 08:54:50 -07005441 const auto extra_stage_bits = (barrier_mask_params & ~VK_PIPELINE_STAGE_2_HOST_BIT_KHR) & ~event_stage_masks;
John Zulaufd5115702021-01-18 12:34:33 -07005442 if (extra_stage_bits) {
5443 // Issue error message that event waited for is not in wait events scope
John Zulauf4edde622021-02-15 08:54:50 -07005444 // NOTE: This isn't exactly the right VUID for WaitEvents2, but it's as close as we currently have support for
5445 const char *const vuid =
5446 (CMD_WAITEVENTS == cmd_) ? "VUID-vkCmdWaitEvents-srcStageMask-01158" : "VUID-vkCmdWaitEvents2KHR-pEvents-03838";
John Zulaufd5115702021-01-18 12:34:33 -07005447 const char *const message =
Jeremy Gebben40a22942020-12-22 14:22:06 -07005448 "%s: srcStageMask 0x%" PRIx64 " contains stages not present in pEvents stageMask. Extra stages are %s.%s";
John Zulaufd5115702021-01-18 12:34:33 -07005449 if (events_not_found) {
John Zulauf4edde622021-02-15 08:54:50 -07005450 skip |= sync_state.LogInfo(command_buffer_handle, vuid, message, CmdName(), barrier_mask_params,
Jeremy Gebben40a22942020-12-22 14:22:06 -07005451 sync_utils::StringPipelineStageFlags(extra_stage_bits).c_str(),
John Zulaufd5115702021-01-18 12:34:33 -07005452 " vkCmdSetEvent may be in previously submitted command buffer.");
5453 } else {
John Zulauf4edde622021-02-15 08:54:50 -07005454 skip |= sync_state.LogError(command_buffer_handle, vuid, message, CmdName(), barrier_mask_params,
Jeremy Gebben40a22942020-12-22 14:22:06 -07005455 sync_utils::StringPipelineStageFlags(extra_stage_bits).c_str(), "");
John Zulaufd5115702021-01-18 12:34:33 -07005456 }
5457 }
5458 return skip;
5459}
5460
5461struct SyncOpWaitEventsFunctorFactory {
5462 using BarrierOpFunctor = WaitEventBarrierOp;
5463 using ApplyFunctor = ApplyBarrierFunctor<BarrierOpFunctor>;
5464 using GlobalBarrierOpFunctor = WaitEventBarrierOp;
5465 using GlobalApplyFunctor = ApplyBarrierOpsFunctor<GlobalBarrierOpFunctor>;
5466 using BufferRange = EventSimpleRangeGenerator;
5467 using ImageRange = EventImageRangeGenerator;
5468 using GlobalRange = EventSimpleRangeGenerator;
5469
5470 // Need to restrict to only valid exec and access scope for this event
5471 // Pass by value is intentional to get a copy we can change without modifying the passed barrier
5472 SyncBarrier RestrictToEvent(SyncBarrier barrier) const {
John Zulaufc523bf62021-02-16 08:20:34 -07005473 barrier.src_exec_scope.exec_scope = sync_event->scope.exec_scope & barrier.src_exec_scope.exec_scope;
John Zulaufd5115702021-01-18 12:34:33 -07005474 barrier.src_access_scope = sync_event->scope.valid_accesses & barrier.src_access_scope;
5475 return barrier;
5476 }
5477 ApplyFunctor MakeApplyFunctor(const SyncBarrier &barrier_arg, bool layout_transition) const {
5478 auto barrier = RestrictToEvent(barrier_arg);
5479 return ApplyFunctor(BarrierOpFunctor(sync_event->first_scope_tag, barrier, layout_transition));
5480 }
5481 GlobalApplyFunctor MakeGlobalApplyFunctor(size_t size_hint, const ResourceUsageTag &tag) const {
5482 return GlobalApplyFunctor(false /* don't resolve */, size_hint, tag);
5483 }
5484 GlobalBarrierOpFunctor MakeGlobalBarrierOpFunctor(const SyncBarrier &barrier_arg) const {
5485 auto barrier = RestrictToEvent(barrier_arg);
5486 return GlobalBarrierOpFunctor(sync_event->first_scope_tag, barrier, false);
5487 }
5488
5489 BufferRange MakeRangeGen(const BUFFER_STATE &buffer, const ResourceAccessRange &range_arg) const {
5490 const AccessAddressType address_type = GetAccessAddressType(buffer);
5491 const auto base_address = ResourceBaseAddress(buffer);
5492 ResourceAccessRange range = SimpleBinding(buffer) ? (range_arg + base_address) : ResourceAccessRange();
5493 EventSimpleRangeGenerator filtered_range_gen(sync_event->FirstScope(address_type), range);
5494 return filtered_range_gen;
5495 }
5496 ImageRange MakeRangeGen(const IMAGE_STATE &image, const SyncImageMemoryBarrier::SubImageRange &range) const {
5497 if (!SimpleBinding(image)) return ImageRange();
5498 const auto address_type = GetAccessAddressType(image);
5499 const auto base_address = ResourceBaseAddress(image);
5500 subresource_adapter::ImageRangeGenerator image_range_gen(*image.fragment_encoder.get(), range.subresource_range,
5501 range.offset, range.extent, base_address);
5502 EventImageRangeGenerator filtered_range_gen(sync_event->FirstScope(address_type), image_range_gen);
5503
5504 return filtered_range_gen;
5505 }
5506 GlobalRange MakeGlobalRangeGen(AccessAddressType address_type) const {
5507 return EventSimpleRangeGenerator(sync_event->FirstScope(address_type), kFullRange);
5508 }
5509 SyncOpWaitEventsFunctorFactory(SyncEventState *sync_event_) : sync_event(sync_event_) { assert(sync_event); }
5510 SyncEventState *sync_event;
5511};
5512
John Zulauf36ef9282021-02-02 11:47:24 -07005513void SyncOpWaitEvents::Record(CommandBufferAccessContext *cb_context) const {
5514 const auto tag = cb_context->NextCommandTag(cmd_);
John Zulaufd5115702021-01-18 12:34:33 -07005515 auto *access_context = cb_context->GetCurrentAccessContext();
5516 assert(access_context);
5517 if (!access_context) return;
John Zulauf669dfd52021-01-27 17:15:28 -07005518 auto *events_context = cb_context->GetCurrentEventsContext();
5519 assert(events_context);
5520 if (!events_context) return;
John Zulaufd5115702021-01-18 12:34:33 -07005521
5522 // Unlike PipelineBarrier, WaitEvent is *not* limited to accesses within the current subpass (if any) and thus needs to import
5523 // all accesses. Can instead import for all first_scopes, or a union of them, if this becomes a performance/memory issue,
5524 // but with no idea of the performance of the union, nor of whether it even matters... take the simplest approach here,
5525 access_context->ResolvePreviousAccesses();
5526
John Zulaufd5115702021-01-18 12:34:33 -07005527 // TODO... this needs change the SyncEventContext it's using depending on whether this is replay... the recorded
5528 // sync_event will be in the recorded context, but we need to update the sync_events in the current context....
John Zulauf4edde622021-02-15 08:54:50 -07005529 size_t barrier_set_index = 0;
5530 size_t barrier_set_incr = (barriers_.size() == 1) ? 0 : 1;
5531 assert(barriers_.size() == 1 || (barriers_.size() == events_.size()));
John Zulauf669dfd52021-01-27 17:15:28 -07005532 for (auto &event_shared : events_) {
5533 if (!event_shared.get()) continue;
5534 auto *sync_event = events_context->GetFromShared(event_shared);
John Zulaufd5115702021-01-18 12:34:33 -07005535
John Zulauf4edde622021-02-15 08:54:50 -07005536 sync_event->last_command = cmd_;
John Zulaufd5115702021-01-18 12:34:33 -07005537
John Zulauf4edde622021-02-15 08:54:50 -07005538 const auto &barrier_set = barriers_[barrier_set_index];
5539 const auto &dst = barrier_set.dst_exec_scope;
5540 if (!sync_event->IsIgnoredByWait(cmd_, barrier_set.src_exec_scope.mask_param)) {
John Zulaufd5115702021-01-18 12:34:33 -07005541 // These apply barriers one at a time as the are restricted to the resource ranges specified per each barrier,
5542 // but do not update the dependency chain information (but set the "pending" state) // s.t. the order independence
5543 // of the barriers is maintained.
5544 SyncOpWaitEventsFunctorFactory factory(sync_event);
John Zulauf4edde622021-02-15 08:54:50 -07005545 ApplyBarriers(barrier_set.buffer_memory_barriers, factory, tag, access_context);
5546 ApplyBarriers(barrier_set.image_memory_barriers, factory, tag, access_context);
5547 ApplyGlobalBarriers(barrier_set.memory_barriers, factory, tag, access_context);
John Zulaufd5115702021-01-18 12:34:33 -07005548
5549 // Apply the global barrier to the event itself (for race condition tracking)
5550 // Events don't happen at a stage, so we need to store the unexpanded ALL_COMMANDS if set for inter-event-calls
5551 sync_event->barriers = dst.mask_param & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
5552 sync_event->barriers |= dst.exec_scope;
5553 } else {
5554 // We ignored this wait, so we don't have any effective synchronization barriers for it.
5555 sync_event->barriers = 0U;
5556 }
John Zulauf4edde622021-02-15 08:54:50 -07005557 barrier_set_index += barrier_set_incr;
John Zulaufd5115702021-01-18 12:34:33 -07005558 }
5559
5560 // Apply the pending barriers
5561 ResolvePendingBarrierFunctor apply_pending_action(tag);
5562 access_context->ApplyToContext(apply_pending_action);
5563}
5564
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005565bool SyncValidator::PreCallValidateCmdWriteBufferMarker2AMD(VkCommandBuffer commandBuffer, VkPipelineStageFlags2KHR pipelineStage,
5566 VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) const {
5567 bool skip = false;
5568 const auto *cb_access_context = GetAccessContext(commandBuffer);
5569 assert(cb_access_context);
5570 if (!cb_access_context) return skip;
5571
5572 const auto *context = cb_access_context->GetCurrentAccessContext();
5573 assert(context);
5574 if (!context) return skip;
5575
5576 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
5577
5578 if (dst_buffer) {
5579 const ResourceAccessRange range = MakeRange(dstOffset, 4);
5580 auto hazard = context->DetectHazard(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, range);
5581 if (hazard.hazard) {
5582 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
5583 "vkCmdWriteBufferMarkerAMD2: Hazard %s for dstBuffer %s. Access info %s.",
5584 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstBuffer).c_str(),
5585 string_UsageTag(hazard.tag).c_str());
5586 }
5587 }
5588 return skip;
5589}
5590
John Zulauf669dfd52021-01-27 17:15:28 -07005591void SyncOpWaitEvents::MakeEventsList(const SyncValidator &sync_state, uint32_t event_count, const VkEvent *events) {
John Zulaufd5115702021-01-18 12:34:33 -07005592 events_.reserve(event_count);
5593 for (uint32_t event_index = 0; event_index < event_count; event_index++) {
John Zulauf669dfd52021-01-27 17:15:28 -07005594 events_.emplace_back(sync_state.GetShared<EVENT_STATE>(events[event_index]));
John Zulaufd5115702021-01-18 12:34:33 -07005595 }
5596}
John Zulauf6ce24372021-01-30 05:56:25 -07005597
John Zulauf36ef9282021-02-02 11:47:24 -07005598SyncOpResetEvent::SyncOpResetEvent(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags, VkEvent event,
John Zulauf4edde622021-02-15 08:54:50 -07005599 VkPipelineStageFlags2KHR stageMask)
John Zulauf36ef9282021-02-02 11:47:24 -07005600 : SyncOpBase(cmd),
5601 event_(sync_state.GetShared<EVENT_STATE>(event)),
5602 exec_scope_(SyncExecScope::MakeSrc(queue_flags, stageMask)) {}
John Zulauf6ce24372021-01-30 05:56:25 -07005603
5604bool SyncOpResetEvent::Validate(const CommandBufferAccessContext &cb_context) const {
John Zulauf6ce24372021-01-30 05:56:25 -07005605 auto *events_context = cb_context.GetCurrentEventsContext();
5606 assert(events_context);
5607 bool skip = false;
5608 if (!events_context) return skip;
5609
5610 const auto &sync_state = cb_context.GetSyncState();
5611 const auto *sync_event = events_context->Get(event_);
5612 if (!sync_event) return skip; // Core, Lifetimes, or Param check needs to catch invalid events.
5613
5614 const char *const set_wait =
5615 "%s: %s %s operation following %s without intervening execution barrier, is a race condition and may result in data "
5616 "hazards.";
5617 const char *message = set_wait; // Only one message this call.
5618 if (!sync_event->HasBarrier(exec_scope_.mask_param, exec_scope_.exec_scope)) {
5619 const char *vuid = nullptr;
5620 switch (sync_event->last_command) {
5621 case CMD_SETEVENT:
John Zulauf4edde622021-02-15 08:54:50 -07005622 case CMD_SETEVENT2KHR:
John Zulauf6ce24372021-01-30 05:56:25 -07005623 // Needs a barrier between set and reset
5624 vuid = "SYNC-vkCmdResetEvent-missingbarrier-set";
5625 break;
John Zulauf4edde622021-02-15 08:54:50 -07005626 case CMD_WAITEVENTS:
5627 case CMD_WAITEVENTS2KHR: {
John Zulauf6ce24372021-01-30 05:56:25 -07005628 // Needs to be in the barriers chain (either because of a barrier, or because of dstStageMask
5629 vuid = "SYNC-vkCmdResetEvent-missingbarrier-wait";
5630 break;
5631 }
5632 default:
5633 // The only other valid last command that wasn't one.
John Zulauf4edde622021-02-15 08:54:50 -07005634 assert((sync_event->last_command == CMD_NONE) || (sync_event->last_command == CMD_RESETEVENT) ||
5635 (sync_event->last_command == CMD_RESETEVENT2KHR));
John Zulauf6ce24372021-01-30 05:56:25 -07005636 break;
5637 }
5638 if (vuid) {
John Zulauf36ef9282021-02-02 11:47:24 -07005639 skip |= sync_state.LogError(event_->event, vuid, message, CmdName(),
5640 sync_state.report_data->FormatHandle(event_->event).c_str(), CmdName(),
John Zulauf6ce24372021-01-30 05:56:25 -07005641 CommandTypeString(sync_event->last_command));
5642 }
5643 }
5644 return skip;
5645}
5646
John Zulauf36ef9282021-02-02 11:47:24 -07005647void SyncOpResetEvent::Record(CommandBufferAccessContext *cb_context) const {
John Zulauf6ce24372021-01-30 05:56:25 -07005648 auto *events_context = cb_context->GetCurrentEventsContext();
5649 assert(events_context);
5650 if (!events_context) return;
5651
5652 auto *sync_event = events_context->GetFromShared(event_);
5653 if (!sync_event) return; // Core, Lifetimes, or Param check needs to catch invalid events.
5654
5655 // Update the event state
John Zulauf36ef9282021-02-02 11:47:24 -07005656 sync_event->last_command = cmd_;
John Zulauf6ce24372021-01-30 05:56:25 -07005657 sync_event->unsynchronized_set = CMD_NONE;
5658 sync_event->ResetFirstScope();
5659 sync_event->barriers = 0U;
5660}
5661
John Zulauf36ef9282021-02-02 11:47:24 -07005662SyncOpSetEvent::SyncOpSetEvent(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags, VkEvent event,
John Zulauf4edde622021-02-15 08:54:50 -07005663 VkPipelineStageFlags2KHR stageMask)
John Zulauf36ef9282021-02-02 11:47:24 -07005664 : SyncOpBase(cmd),
5665 event_(sync_state.GetShared<EVENT_STATE>(event)),
John Zulauf4edde622021-02-15 08:54:50 -07005666 src_exec_scope_(SyncExecScope::MakeSrc(queue_flags, stageMask)),
5667 dep_info_() {}
5668
5669SyncOpSetEvent::SyncOpSetEvent(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags, VkEvent event,
5670 const VkDependencyInfoKHR &dep_info)
5671 : SyncOpBase(cmd),
5672 event_(sync_state.GetShared<EVENT_STATE>(event)),
5673 src_exec_scope_(SyncExecScope::MakeSrc(queue_flags, sync_utils::GetGlobalStageMasks(dep_info).src)),
5674 dep_info_(new safe_VkDependencyInfoKHR(&dep_info)) {}
John Zulauf6ce24372021-01-30 05:56:25 -07005675
5676bool SyncOpSetEvent::Validate(const CommandBufferAccessContext &cb_context) const {
5677 // I'll put this here just in case we need to pass this in for future extension support
John Zulauf6ce24372021-01-30 05:56:25 -07005678 bool skip = false;
5679
5680 const auto &sync_state = cb_context.GetSyncState();
5681 auto *events_context = cb_context.GetCurrentEventsContext();
5682 assert(events_context);
5683 if (!events_context) return skip;
5684
5685 const auto *sync_event = events_context->Get(event_);
5686 if (!sync_event) return skip; // Core, Lifetimes, or Param check needs to catch invalid events.
5687
5688 const char *const reset_set =
5689 "%s: %s %s operation following %s without intervening execution barrier, is a race condition and may result in data "
5690 "hazards.";
5691 const char *const wait =
5692 "%s: %s %s operation following %s without intervening vkCmdResetEvent, may result in data hazard and is ignored.";
5693
5694 if (!sync_event->HasBarrier(src_exec_scope_.mask_param, src_exec_scope_.exec_scope)) {
John Zulauf4edde622021-02-15 08:54:50 -07005695 const char *vuid_stem = nullptr;
John Zulauf6ce24372021-01-30 05:56:25 -07005696 const char *message = nullptr;
5697 switch (sync_event->last_command) {
5698 case CMD_RESETEVENT:
John Zulauf4edde622021-02-15 08:54:50 -07005699 case CMD_RESETEVENT2KHR:
John Zulauf6ce24372021-01-30 05:56:25 -07005700 // Needs a barrier between reset and set
John Zulauf4edde622021-02-15 08:54:50 -07005701 vuid_stem = "-missingbarrier-reset";
John Zulauf6ce24372021-01-30 05:56:25 -07005702 message = reset_set;
5703 break;
5704 case CMD_SETEVENT:
John Zulauf4edde622021-02-15 08:54:50 -07005705 case CMD_SETEVENT2KHR:
John Zulauf6ce24372021-01-30 05:56:25 -07005706 // Needs a barrier between set and set
John Zulauf4edde622021-02-15 08:54:50 -07005707 vuid_stem = "-missingbarrier-set";
John Zulauf6ce24372021-01-30 05:56:25 -07005708 message = reset_set;
5709 break;
5710 case CMD_WAITEVENTS:
John Zulauf4edde622021-02-15 08:54:50 -07005711 case CMD_WAITEVENTS2KHR:
John Zulauf6ce24372021-01-30 05:56:25 -07005712 // Needs a barrier or is in second execution scope
John Zulauf4edde622021-02-15 08:54:50 -07005713 vuid_stem = "-missingbarrier-wait";
John Zulauf6ce24372021-01-30 05:56:25 -07005714 message = wait;
5715 break;
5716 default:
5717 // The only other valid last command that wasn't one.
5718 assert(sync_event->last_command == CMD_NONE);
5719 break;
5720 }
John Zulauf4edde622021-02-15 08:54:50 -07005721 if (vuid_stem) {
John Zulauf6ce24372021-01-30 05:56:25 -07005722 assert(nullptr != message);
John Zulauf4edde622021-02-15 08:54:50 -07005723 std::string vuid("SYNC-");
5724 vuid.append(CmdName()).append(vuid_stem);
5725 skip |= sync_state.LogError(event_->event, vuid.c_str(), message, CmdName(),
John Zulauf36ef9282021-02-02 11:47:24 -07005726 sync_state.report_data->FormatHandle(event_->event).c_str(), CmdName(),
John Zulauf6ce24372021-01-30 05:56:25 -07005727 CommandTypeString(sync_event->last_command));
5728 }
5729 }
5730
5731 return skip;
5732}
5733
John Zulauf36ef9282021-02-02 11:47:24 -07005734void SyncOpSetEvent::Record(CommandBufferAccessContext *cb_context) const {
5735 const auto tag = cb_context->NextCommandTag(cmd_);
John Zulauf6ce24372021-01-30 05:56:25 -07005736 auto *events_context = cb_context->GetCurrentEventsContext();
5737 auto *access_context = cb_context->GetCurrentAccessContext();
5738 assert(events_context);
5739 if (!events_context) return;
5740
5741 auto *sync_event = events_context->GetFromShared(event_);
5742 if (!sync_event) return; // Core, Lifetimes, or Param check needs to catch invalid events.
5743
5744 // NOTE: We're going to simply record the sync scope here, as anything else would be implementation defined/undefined
5745 // and we're issuing errors re: missing barriers between event commands, which if the user fixes would fix
5746 // any issues caused by naive scope setting here.
5747
5748 // What happens with two SetEvent is that one cannot know what group of operations will be waited for.
5749 // Given:
5750 // Stuff1; SetEvent; Stuff2; SetEvent; WaitEvents;
5751 // WaitEvents cannot know which of Stuff1, Stuff2, or both has completed execution.
5752
5753 if (!sync_event->HasBarrier(src_exec_scope_.mask_param, src_exec_scope_.exec_scope)) {
5754 sync_event->unsynchronized_set = sync_event->last_command;
5755 sync_event->ResetFirstScope();
5756 } else if (sync_event->scope.exec_scope == 0) {
5757 // We only set the scope if there isn't one
5758 sync_event->scope = src_exec_scope_;
5759
5760 auto set_scope = [&sync_event](AccessAddressType address_type, const ResourceAccessRangeMap::value_type &access) {
5761 auto &scope_map = sync_event->first_scope[static_cast<size_t>(address_type)];
5762 if (access.second.InSourceScopeOrChain(sync_event->scope.exec_scope, sync_event->scope.valid_accesses)) {
5763 scope_map.insert(scope_map.end(), std::make_pair(access.first, true));
5764 }
5765 };
5766 access_context->ForAll(set_scope);
5767 sync_event->unsynchronized_set = CMD_NONE;
5768 sync_event->first_scope_tag = tag;
5769 }
John Zulauf4edde622021-02-15 08:54:50 -07005770 // TODO: Store dep_info_ shared ptr in sync_state for WaitEvents2 validation
5771 sync_event->last_command = cmd_;
John Zulauf6ce24372021-01-30 05:56:25 -07005772 sync_event->barriers = 0U;
5773}
John Zulauf64ffe552021-02-06 10:25:07 -07005774
5775SyncOpBeginRenderPass::SyncOpBeginRenderPass(CMD_TYPE cmd, const SyncValidator &sync_state,
5776 const VkRenderPassBeginInfo *pRenderPassBegin,
5777 const VkSubpassBeginInfo *pSubpassBeginInfo, const char *cmd_name)
5778 : SyncOpBase(cmd, cmd_name) {
5779 if (pRenderPassBegin) {
5780 rp_state_ = sync_state.GetShared<RENDER_PASS_STATE>(pRenderPassBegin->renderPass);
5781 renderpass_begin_info_ = safe_VkRenderPassBeginInfo(pRenderPassBegin);
5782 const auto *fb_state = sync_state.Get<FRAMEBUFFER_STATE>(pRenderPassBegin->framebuffer);
5783 if (fb_state) {
5784 shared_attachments_ = sync_state.GetSharedAttachmentViews(*renderpass_begin_info_.ptr(), *fb_state);
5785 // TODO: Revisit this when all attachment validation is through SyncOps to see if we can discard the plain pointer copy
5786 // Note that this a safe to presist as long as shared_attachments is not cleared
5787 attachments_.reserve(shared_attachments_.size());
sfricke-samsung01c9ae92021-02-09 22:30:52 -08005788 for (const auto &attachment : shared_attachments_) {
John Zulauf64ffe552021-02-06 10:25:07 -07005789 attachments_.emplace_back(attachment.get());
5790 }
5791 }
5792 if (pSubpassBeginInfo) {
5793 subpass_begin_info_ = safe_VkSubpassBeginInfo(pSubpassBeginInfo);
5794 }
5795 }
5796}
5797
5798bool SyncOpBeginRenderPass::Validate(const CommandBufferAccessContext &cb_context) const {
5799 // Check if any of the layout transitions are hazardous.... but we don't have the renderpass context to work with, so we
5800 bool skip = false;
5801
5802 assert(rp_state_.get());
5803 if (nullptr == rp_state_.get()) return skip;
5804 auto &rp_state = *rp_state_.get();
5805
5806 const uint32_t subpass = 0;
5807
5808 // Construct the state we can use to validate against... (since validation is const and RecordCmdBeginRenderPass
5809 // hasn't happened yet)
5810 const std::vector<AccessContext> empty_context_vector;
5811 AccessContext temp_context(subpass, cb_context.GetQueueFlags(), rp_state.subpass_dependencies, empty_context_vector,
5812 cb_context.GetCurrentAccessContext());
5813
5814 // Validate attachment operations
5815 if (attachments_.size() == 0) return skip;
5816 const auto &render_area = renderpass_begin_info_.renderArea;
5817 skip |= temp_context.ValidateLayoutTransitions(cb_context, rp_state, render_area, subpass, attachments_, CmdName());
5818
5819 // Validate load operations if there were no layout transition hazards
5820 if (!skip) {
5821 temp_context.RecordLayoutTransitions(rp_state, subpass, attachments_, kCurrentCommandTag);
5822 skip |= temp_context.ValidateLoadOperation(cb_context, rp_state, render_area, subpass, attachments_, CmdName());
5823 }
5824
5825 return skip;
5826}
5827
5828void SyncOpBeginRenderPass::Record(CommandBufferAccessContext *cb_context) const {
5829 // TODO PHASE2 need to have a consistent way to record to either command buffer or queue contexts
5830 assert(rp_state_.get());
5831 if (nullptr == rp_state_.get()) return;
5832 const auto tag = cb_context->NextCommandTag(cmd_);
5833 cb_context->RecordBeginRenderPass(*rp_state_.get(), renderpass_begin_info_.renderArea, attachments_, tag);
5834}
5835
5836SyncOpNextSubpass::SyncOpNextSubpass(CMD_TYPE cmd, const SyncValidator &sync_state, const VkSubpassBeginInfo *pSubpassBeginInfo,
5837 const VkSubpassEndInfo *pSubpassEndInfo, const char *name_override)
5838 : SyncOpBase(cmd, name_override) {
5839 if (pSubpassBeginInfo) {
5840 subpass_begin_info_.initialize(pSubpassBeginInfo);
5841 }
5842 if (pSubpassEndInfo) {
5843 subpass_end_info_.initialize(pSubpassEndInfo);
5844 }
5845}
5846
5847bool SyncOpNextSubpass::Validate(const CommandBufferAccessContext &cb_context) const {
5848 bool skip = false;
5849 const auto *renderpass_context = cb_context.GetCurrentRenderPassContext();
5850 if (!renderpass_context) return skip;
5851
5852 skip |= renderpass_context->ValidateNextSubpass(cb_context.GetExecutionContext(), CmdName());
5853 return skip;
5854}
5855
5856void SyncOpNextSubpass::Record(CommandBufferAccessContext *cb_context) const {
5857 // TODO PHASE2 need to have a consistent way to record to either command buffer or queue contexts
5858 cb_context->RecordNextSubpass(cmd_);
5859}
5860
5861SyncOpEndRenderPass::SyncOpEndRenderPass(CMD_TYPE cmd, const SyncValidator &sync_state, const VkSubpassEndInfo *pSubpassEndInfo,
5862 const char *name_override)
5863 : SyncOpBase(cmd, name_override) {
5864 if (pSubpassEndInfo) {
5865 subpass_end_info_.initialize(pSubpassEndInfo);
5866 }
5867}
5868
5869bool SyncOpEndRenderPass::Validate(const CommandBufferAccessContext &cb_context) const {
5870 bool skip = false;
5871 const auto *renderpass_context = cb_context.GetCurrentRenderPassContext();
5872
5873 if (!renderpass_context) return skip;
5874 skip |= renderpass_context->ValidateEndRenderPass(cb_context.GetExecutionContext(), CmdName());
5875 return skip;
5876}
5877
5878void SyncOpEndRenderPass::Record(CommandBufferAccessContext *cb_context) const {
5879 // TODO PHASE2 need to have a consistent way to record to either command buffer or queue contexts
5880 cb_context->RecordEndRenderPass(cmd_);
5881}
Jeremy Gebbendf3fcc32021-02-15 08:53:17 -07005882
5883void SyncValidator::PreCallRecordCmdWriteBufferMarker2AMD(VkCommandBuffer commandBuffer, VkPipelineStageFlags2KHR pipelineStage,
5884 VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) {
5885 StateTracker::PreCallRecordCmdWriteBufferMarker2AMD(commandBuffer, pipelineStage, dstBuffer, dstOffset, marker);
5886 auto *cb_access_context = GetAccessContext(commandBuffer);
5887 assert(cb_access_context);
5888 const auto tag = cb_access_context->NextCommandTag(CMD_WRITEBUFFERMARKERAMD);
5889 auto *context = cb_access_context->GetCurrentAccessContext();
5890 assert(context);
5891
5892 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
5893
5894 if (dst_buffer) {
5895 const ResourceAccessRange range = MakeRange(dstOffset, 4);
5896 context->UpdateAccessState(*dst_buffer, SYNC_COPY_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
5897 }
5898}
John Zulaufd05c5842021-03-26 11:32:16 -06005899
5900#ifdef SYNCVAL_DIAGNOSTICS
5901bool SyncValidator::PreCallValidateDestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) const {
5902 sync_diagnostics.InstanceDump(instance);
5903 ImageRangeGen::diag_.Report();
5904 return StateTracker::PreCallValidateDestroyInstance(instance, pAllocator);
5905}
5906#endif