blob: e932d81061f9801cf913b6e103873624c763f5ac [file] [log] [blame]
John Zulaufab7756b2020-12-29 16:10:16 -07001/* Copyright (c) 2019-2021 The Khronos Group Inc.
2 * Copyright (c) 2019-2021 Valve Corporation
3 * Copyright (c) 2019-2021 LunarG, Inc.
John Zulauf9cb530d2019-09-30 14:14:10 -06004 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 * Author: John Zulauf <jzulauf@lunarg.com>
John Zulaufab7756b2020-12-29 16:10:16 -070018 * Author: Locke Lin <locke@lunarg.com>
19 * Author: Jeremy Gebben <jeremyg@lunarg.com>
John Zulauf9cb530d2019-09-30 14:14:10 -060020 */
21
22#include <limits>
23#include <vector>
locke-lunarg296a3c92020-03-25 01:04:29 -060024#include <memory>
25#include <bitset>
John Zulauf9cb530d2019-09-30 14:14:10 -060026#include "synchronization_validation.h"
27
John Zulauf43cc7462020-12-03 12:33:12 -070028const static std::array<AccessAddressType, static_cast<size_t>(AccessAddressType::kTypeCount)> kAddressTypes = {
29 AccessAddressType::kLinear, AccessAddressType::kIdealized};
30
John Zulauf9cb530d2019-09-30 14:14:10 -060031static const char *string_SyncHazardVUID(SyncHazard hazard) {
32 switch (hazard) {
33 case SyncHazard::NONE:
John Zulauf2f952d22020-02-10 11:34:51 -070034 return "SYNC-HAZARD-NONE";
John Zulauf9cb530d2019-09-30 14:14:10 -060035 break;
36 case SyncHazard::READ_AFTER_WRITE:
37 return "SYNC-HAZARD-READ_AFTER_WRITE";
38 break;
39 case SyncHazard::WRITE_AFTER_READ:
40 return "SYNC-HAZARD-WRITE_AFTER_READ";
41 break;
42 case SyncHazard::WRITE_AFTER_WRITE:
43 return "SYNC-HAZARD-WRITE_AFTER_WRITE";
44 break;
John Zulauf2f952d22020-02-10 11:34:51 -070045 case SyncHazard::READ_RACING_WRITE:
46 return "SYNC-HAZARD-READ-RACING-WRITE";
47 break;
48 case SyncHazard::WRITE_RACING_WRITE:
49 return "SYNC-HAZARD-WRITE-RACING-WRITE";
50 break;
51 case SyncHazard::WRITE_RACING_READ:
52 return "SYNC-HAZARD-WRITE-RACING-READ";
53 break;
John Zulauf9cb530d2019-09-30 14:14:10 -060054 default:
55 assert(0);
56 }
57 return "SYNC-HAZARD-INVALID";
58}
59
John Zulauf59e25072020-07-17 10:55:21 -060060static bool IsHazardVsRead(SyncHazard hazard) {
61 switch (hazard) {
62 case SyncHazard::NONE:
63 return false;
64 break;
65 case SyncHazard::READ_AFTER_WRITE:
66 return false;
67 break;
68 case SyncHazard::WRITE_AFTER_READ:
69 return true;
70 break;
71 case SyncHazard::WRITE_AFTER_WRITE:
72 return false;
73 break;
74 case SyncHazard::READ_RACING_WRITE:
75 return false;
76 break;
77 case SyncHazard::WRITE_RACING_WRITE:
78 return false;
79 break;
80 case SyncHazard::WRITE_RACING_READ:
81 return true;
82 break;
83 default:
84 assert(0);
85 }
86 return false;
87}
88
John Zulauf9cb530d2019-09-30 14:14:10 -060089static const char *string_SyncHazard(SyncHazard hazard) {
90 switch (hazard) {
91 case SyncHazard::NONE:
92 return "NONR";
93 break;
94 case SyncHazard::READ_AFTER_WRITE:
95 return "READ_AFTER_WRITE";
96 break;
97 case SyncHazard::WRITE_AFTER_READ:
98 return "WRITE_AFTER_READ";
99 break;
100 case SyncHazard::WRITE_AFTER_WRITE:
101 return "WRITE_AFTER_WRITE";
102 break;
John Zulauf2f952d22020-02-10 11:34:51 -0700103 case SyncHazard::READ_RACING_WRITE:
104 return "READ_RACING_WRITE";
105 break;
106 case SyncHazard::WRITE_RACING_WRITE:
107 return "WRITE_RACING_WRITE";
108 break;
109 case SyncHazard::WRITE_RACING_READ:
110 return "WRITE_RACING_READ";
111 break;
John Zulauf9cb530d2019-09-30 14:14:10 -0600112 default:
113 assert(0);
114 }
115 return "INVALID HAZARD";
116}
117
John Zulauf37ceaed2020-07-03 16:18:15 -0600118static const SyncStageAccessInfoType *SyncStageAccessInfoFromMask(SyncStageAccessFlags flags) {
119 // Return the info for the first bit found
120 const SyncStageAccessInfoType *info = nullptr;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700121 for (size_t i = 0; i < flags.size(); i++) {
122 if (flags.test(i)) {
123 info = &syncStageAccessInfoByStageAccessIndex[i];
124 break;
John Zulauf37ceaed2020-07-03 16:18:15 -0600125 }
126 }
127 return info;
128}
129
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700130static std::string string_SyncStageAccessFlags(const SyncStageAccessFlags &flags, const char *sep = "|") {
John Zulauf59e25072020-07-17 10:55:21 -0600131 std::string out_str;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700132 if (flags.none()) {
John Zulauf389c34b2020-07-28 11:19:35 -0600133 out_str = "0";
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700134 } else {
135 for (size_t i = 0; i < syncStageAccessInfoByStageAccessIndex.size(); i++) {
136 const auto &info = syncStageAccessInfoByStageAccessIndex[i];
137 if ((flags & info.stage_access_bit).any()) {
138 if (!out_str.empty()) {
139 out_str.append(sep);
140 }
141 out_str.append(info.name);
John Zulauf59e25072020-07-17 10:55:21 -0600142 }
John Zulauf59e25072020-07-17 10:55:21 -0600143 }
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700144 if (out_str.length() == 0) {
145 out_str.append("Unhandled SyncStageAccess");
146 }
John Zulauf59e25072020-07-17 10:55:21 -0600147 }
148 return out_str;
149}
150
Jeremy Gebben6ea9d9e2020-12-11 09:41:01 -0700151static std::string string_UsageTag(const ResourceUsageTag &tag) {
152 std::stringstream out;
153
John Zulauffaea0ee2021-01-14 14:01:32 -0700154 out << "command: " << CommandTypeString(tag.command);
155 out << ", seq_no: " << tag.seq_num;
156 if (tag.sub_command != 0) {
157 out << ", subcmd: " << tag.sub_command;
Jeremy Gebben6ea9d9e2020-12-11 09:41:01 -0700158 }
159 return out.str();
160}
161
John Zulauffaea0ee2021-01-14 14:01:32 -0700162std::string CommandBufferAccessContext::FormatUsage(const HazardResult &hazard) const {
John Zulauf37ceaed2020-07-03 16:18:15 -0600163 const auto &tag = hazard.tag;
John Zulauf59e25072020-07-17 10:55:21 -0600164 assert(hazard.usage_index < static_cast<SyncStageAccessIndex>(syncStageAccessInfoByStageAccessIndex.size()));
165 const auto &usage_info = syncStageAccessInfoByStageAccessIndex[hazard.usage_index];
John Zulauf1dae9192020-06-16 15:46:44 -0600166 std::stringstream out;
John Zulauf37ceaed2020-07-03 16:18:15 -0600167 const auto *info = SyncStageAccessInfoFromMask(hazard.prior_access);
168 const char *stage_access_name = info ? info->name : "INVALID_STAGE_ACCESS";
John Zulauf59e25072020-07-17 10:55:21 -0600169 out << "(usage: " << usage_info.name << ", prior_usage: " << stage_access_name;
170 if (IsHazardVsRead(hazard.hazard)) {
171 const auto barriers = hazard.access_state->GetReadBarriers(hazard.prior_access);
172 out << ", read_barriers: " << string_VkPipelineStageFlags(barriers);
173 } else {
174 SyncStageAccessFlags write_barrier = hazard.access_state->GetWriteBarriers();
175 out << ", write_barriers: " << string_SyncStageAccessFlags(write_barrier);
176 }
177
John Zulauffaea0ee2021-01-14 14:01:32 -0700178 // PHASE2 TODO -- add comand buffer and reset from secondary if applicable
179 out << ", " << string_UsageTag(tag) << ", reset_no: " << reset_count_;
John Zulauf1dae9192020-06-16 15:46:44 -0600180 return out.str();
181}
182
John Zulaufd14743a2020-07-03 09:42:39 -0600183// NOTE: the attachement read flag is put *only* in the access scope and not in the exect scope, since the ordering
184// rules apply only to this specific access for this stage, and not the stage as a whole. The ordering detection
185// also reflects this special case for read hazard detection (using access instead of exec scope)
John Zulaufb027cdb2020-05-21 14:25:22 -0600186static constexpr VkPipelineStageFlags kColorAttachmentExecScope = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700187static const SyncStageAccessFlags kColorAttachmentAccessScope =
188 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ_BIT |
189 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT |
190 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE_BIT |
191 SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT; // Note: this is intentionally not in the exec scope
John Zulaufb027cdb2020-05-21 14:25:22 -0600192static constexpr VkPipelineStageFlags kDepthStencilAttachmentExecScope =
193 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700194static const SyncStageAccessFlags kDepthStencilAttachmentAccessScope =
195 SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
196 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
197 SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT; // Note: this is intentionally not in the exec scope
John Zulauf8e3c3e92021-01-06 11:19:36 -0700198static constexpr VkPipelineStageFlags kRasterAttachmentExecScope = kDepthStencilAttachmentExecScope | kColorAttachmentExecScope;
199static const SyncStageAccessFlags kRasterAttachmentAccessScope = kDepthStencilAttachmentAccessScope | kColorAttachmentAccessScope;
John Zulaufb027cdb2020-05-21 14:25:22 -0600200
John Zulauf8e3c3e92021-01-06 11:19:36 -0700201ResourceAccessState::OrderingBarriers ResourceAccessState::kOrderingRules = {
202 {{0U, SyncStageAccessFlags()},
203 {kColorAttachmentExecScope, kColorAttachmentAccessScope},
204 {kDepthStencilAttachmentExecScope, kDepthStencilAttachmentAccessScope},
205 {kRasterAttachmentExecScope, kRasterAttachmentAccessScope}}};
206
John Zulauf7635de32020-05-29 17:14:15 -0600207// Sometimes we have an internal access conflict, and we using the kCurrentCommandTag to set and detect in temporary/proxy contexts
John Zulauffaea0ee2021-01-14 14:01:32 -0700208static const ResourceUsageTag kCurrentCommandTag(ResourceUsageTag::kMaxIndex, ResourceUsageTag::kMaxCount,
209 ResourceUsageTag::kMaxCount, CMD_NONE);
John Zulaufb027cdb2020-05-21 14:25:22 -0600210
John Zulaufb02c1eb2020-10-06 16:33:36 -0600211static VkDeviceSize ResourceBaseAddress(const BINDABLE &bindable) {
212 return bindable.binding.offset + bindable.binding.mem_state->fake_base_address;
213}
214
215static bool SimpleBinding(const BINDABLE &bindable) { return !bindable.sparse && bindable.binding.mem_state; }
216
locke-lunarg3c038002020-04-30 23:08:08 -0600217inline VkDeviceSize GetRealWholeSize(VkDeviceSize offset, VkDeviceSize size, VkDeviceSize whole_size) {
218 if (size == VK_WHOLE_SIZE) {
219 return (whole_size - offset);
220 }
221 return size;
222}
223
John Zulauf3e86bf02020-09-12 10:47:57 -0600224static inline VkDeviceSize GetBufferWholeSize(const BUFFER_STATE &buf_state, VkDeviceSize offset, VkDeviceSize size) {
225 return GetRealWholeSize(offset, size, buf_state.createInfo.size);
226}
227
John Zulauf16adfc92020-04-08 10:28:33 -0600228template <typename T>
John Zulauf355e49b2020-04-24 15:11:15 -0600229static ResourceAccessRange MakeRange(const T &has_offset_and_size) {
John Zulauf16adfc92020-04-08 10:28:33 -0600230 return ResourceAccessRange(has_offset_and_size.offset, (has_offset_and_size.offset + has_offset_and_size.size));
231}
232
John Zulauf355e49b2020-04-24 15:11:15 -0600233static ResourceAccessRange MakeRange(VkDeviceSize start, VkDeviceSize size) { return ResourceAccessRange(start, (start + size)); }
John Zulauf16adfc92020-04-08 10:28:33 -0600234
John Zulauf3e86bf02020-09-12 10:47:57 -0600235static inline ResourceAccessRange MakeRange(const BUFFER_STATE &buffer, VkDeviceSize offset, VkDeviceSize size) {
236 return MakeRange(offset, GetBufferWholeSize(buffer, offset, size));
237}
238
239static inline ResourceAccessRange MakeRange(const BUFFER_VIEW_STATE &buf_view_state) {
240 return MakeRange(*buf_view_state.buffer_state.get(), buf_view_state.create_info.offset, buf_view_state.create_info.range);
241}
242
John Zulauf4a6105a2020-11-17 15:11:05 -0700243// Range generators for to allow event scope filtration to be limited to the top of the resource access traversal pipeline
244//
John Zulauf10f1f522020-12-18 12:00:35 -0700245// Note: there is no "begin/end" or reset facility. These are each written as "one time through" generators.
246//
John Zulauf4a6105a2020-11-17 15:11:05 -0700247// Usage:
248// Constructor() -- initializes the generator to point to the begin of the space declared.
249// * -- the current range of the generator empty signfies end
250// ++ -- advance to the next non-empty range (or end)
251
252// A wrapper for a single range with the same semantics as the actual generators below
253template <typename KeyType>
254class SingleRangeGenerator {
255 public:
256 SingleRangeGenerator(const KeyType &range) : current_(range) {}
257 KeyType &operator*() const { return *current_; }
258 KeyType *operator->() const { return &*current_; }
259 SingleRangeGenerator &operator++() {
260 current_ = KeyType(); // just one real range
261 return *this;
262 }
263
264 bool operator==(const SingleRangeGenerator &other) const { return current_ == other.current_; }
265
266 private:
267 SingleRangeGenerator() = default;
268 const KeyType range_;
269 KeyType current_;
270};
271
272// Generate the ranges that are the intersection of range and the entries in the FilterMap
273template <typename FilterMap, typename KeyType = typename FilterMap::key_type>
274class FilteredRangeGenerator {
275 public:
276 FilteredRangeGenerator(const FilterMap &filter, const KeyType &range)
277 : range_(range), filter_(&filter), filter_pos_(), current_() {
278 SeekBegin();
279 }
280 const KeyType &operator*() const { return current_; }
281 const KeyType *operator->() const { return &current_; }
282 FilteredRangeGenerator &operator++() {
283 ++filter_pos_;
284 UpdateCurrent();
285 return *this;
286 }
287
288 bool operator==(const FilteredRangeGenerator &other) const { return current_ == other.current_; }
289
290 private:
291 FilteredRangeGenerator() = default;
292 void UpdateCurrent() {
293 if (filter_pos_ != filter_->cend()) {
294 current_ = range_ & filter_pos_->first;
295 } else {
296 current_ = KeyType();
297 }
298 }
299 void SeekBegin() {
300 filter_pos_ = filter_->lower_bound(range_);
301 UpdateCurrent();
302 }
303 const KeyType range_;
304 const FilterMap *filter_;
305 typename FilterMap::const_iterator filter_pos_;
306 KeyType current_;
307};
308using EventSimpleRangeGenerator = FilteredRangeGenerator<SyncEventState::ScopeMap>;
309
310// Templated to allow for different Range generators or map sources...
311
312// Generate the ranges that are the intersection of the RangeGen ranges and the entries in the FilterMap
John Zulauf4a6105a2020-11-17 15:11:05 -0700313template <typename FilterMap, typename RangeGen, typename KeyType = typename FilterMap::key_type>
314class FilteredGeneratorGenerator {
315 public:
316 FilteredGeneratorGenerator(const FilterMap &filter, RangeGen &gen) : filter_(&filter), gen_(&gen), filter_pos_(), current_() {
317 SeekBegin();
318 }
319 const KeyType &operator*() const { return current_; }
320 const KeyType *operator->() const { return &current_; }
321 FilteredGeneratorGenerator &operator++() {
322 KeyType gen_range = GenRange();
323 KeyType filter_range = FilterRange();
324 current_ = KeyType();
325 while (gen_range.non_empty() && filter_range.non_empty() && current_.empty()) {
326 if (gen_range.end > filter_range.end) {
327 // if the generated range is beyond the filter_range, advance the filter range
328 filter_range = AdvanceFilter();
329 } else {
330 gen_range = AdvanceGen();
331 }
332 current_ = gen_range & filter_range;
333 }
334 return *this;
335 }
336
337 bool operator==(const FilteredGeneratorGenerator &other) const { return current_ == other.current_; }
338
339 private:
340 KeyType AdvanceFilter() {
341 ++filter_pos_;
342 auto filter_range = FilterRange();
343 if (filter_range.valid()) {
344 FastForwardGen(filter_range);
345 }
346 return filter_range;
347 }
348 KeyType AdvanceGen() {
349 ++(*gen_);
350 auto gen_range = GenRange();
351 if (gen_range.valid()) {
352 FastForwardFilter(gen_range);
353 }
354 return gen_range;
355 }
356
357 KeyType FilterRange() const { return (filter_pos_ != filter_->cend()) ? filter_pos_->first : KeyType(); }
358 KeyType GenRange() const { return *(*gen_); }
359
360 KeyType FastForwardFilter(const KeyType &range) {
361 auto filter_range = FilterRange();
362 int retry_count = 0;
John Zulauf10f1f522020-12-18 12:00:35 -0700363 const static int kRetryLimit = 2; // TODO -- determine whether this limit is optimal
John Zulauf4a6105a2020-11-17 15:11:05 -0700364 while (!filter_range.empty() && (filter_range.end <= range.begin)) {
365 if (retry_count < kRetryLimit) {
366 ++filter_pos_;
367 filter_range = FilterRange();
368 retry_count++;
369 } else {
370 // Okay we've tried walking, do a seek.
371 filter_pos_ = filter_->lower_bound(range);
372 break;
373 }
374 }
375 return FilterRange();
376 }
377
378 // TODO: Consider adding "seek" (or an absolute bound "get" to range generators to make this walk
379 // faster.
380 KeyType FastForwardGen(const KeyType &range) {
381 auto gen_range = GenRange();
382 while (!gen_range.empty() && (gen_range.end <= range.begin)) {
383 ++(*gen_);
384 gen_range = GenRange();
385 }
386 return gen_range;
387 }
388
389 void SeekBegin() {
390 auto gen_range = GenRange();
391 if (gen_range.empty()) {
392 current_ = KeyType();
393 filter_pos_ = filter_->cend();
394 } else {
395 filter_pos_ = filter_->lower_bound(gen_range);
396 current_ = gen_range & FilterRange();
397 }
398 }
399
400 FilteredGeneratorGenerator() = default;
401 const FilterMap *filter_;
402 RangeGen *const gen_;
403 typename FilterMap::const_iterator filter_pos_;
404 KeyType current_;
405};
406
407using EventImageRangeGenerator = FilteredGeneratorGenerator<SyncEventState::ScopeMap, subresource_adapter::ImageRangeGenerator>;
408
John Zulauf0cb5be22020-01-23 12:18:22 -0700409// Expand the pipeline stage without regard to whether the are valid w.r.t. queue or extension
410VkPipelineStageFlags ExpandPipelineStages(VkQueueFlags queue_flags, VkPipelineStageFlags stage_mask) {
411 VkPipelineStageFlags expanded = stage_mask;
412 if (VK_PIPELINE_STAGE_ALL_COMMANDS_BIT & stage_mask) {
413 expanded = expanded & ~VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
414 for (const auto &all_commands : syncAllCommandStagesByQueueFlags) {
415 if (all_commands.first & queue_flags) {
416 expanded |= all_commands.second;
417 }
418 }
419 }
420 if (VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT & stage_mask) {
421 expanded = expanded & ~VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
422 expanded |= syncAllCommandStagesByQueueFlags.at(VK_QUEUE_GRAPHICS_BIT) & ~VK_PIPELINE_STAGE_HOST_BIT;
423 }
424 return expanded;
425}
426
John Zulauf36bcf6a2020-02-03 15:12:52 -0700427VkPipelineStageFlags RelatedPipelineStages(VkPipelineStageFlags stage_mask,
Jeremy Gebben91c36902020-11-09 08:17:08 -0700428 const std::map<VkPipelineStageFlagBits, VkPipelineStageFlags> &map) {
John Zulauf36bcf6a2020-02-03 15:12:52 -0700429 VkPipelineStageFlags unscanned = stage_mask;
430 VkPipelineStageFlags related = 0;
Jonah Ryan-Davis185189c2020-07-14 10:28:52 -0400431 for (const auto &entry : map) {
432 const auto &stage = entry.first;
John Zulauf36bcf6a2020-02-03 15:12:52 -0700433 if (stage & unscanned) {
434 related = related | entry.second;
435 unscanned = unscanned & ~stage;
436 if (!unscanned) break;
437 }
438 }
439 return related;
440}
441
442VkPipelineStageFlags WithEarlierPipelineStages(VkPipelineStageFlags stage_mask) {
443 return stage_mask | RelatedPipelineStages(stage_mask, syncLogicallyEarlierStages);
444}
445
446VkPipelineStageFlags WithLaterPipelineStages(VkPipelineStageFlags stage_mask) {
447 return stage_mask | RelatedPipelineStages(stage_mask, syncLogicallyLaterStages);
448}
449
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -0700450static const ResourceAccessRange kFullRange(std::numeric_limits<VkDeviceSize>::min(), std::numeric_limits<VkDeviceSize>::max());
John Zulauf5c5e88d2019-12-26 11:22:02 -0700451
John Zulauf3e86bf02020-09-12 10:47:57 -0600452ResourceAccessRange GetBufferRange(VkDeviceSize offset, VkDeviceSize buf_whole_size, uint32_t first_index, uint32_t count,
453 VkDeviceSize stride) {
454 VkDeviceSize range_start = offset + first_index * stride;
455 VkDeviceSize range_size = 0;
locke-lunargff255f92020-05-13 18:53:52 -0600456 if (count == UINT32_MAX) {
457 range_size = buf_whole_size - range_start;
458 } else {
459 range_size = count * stride;
460 }
John Zulauf3e86bf02020-09-12 10:47:57 -0600461 return MakeRange(range_start, range_size);
locke-lunargff255f92020-05-13 18:53:52 -0600462}
463
locke-lunarg654e3692020-06-04 17:19:15 -0600464SyncStageAccessIndex GetSyncStageAccessIndexsByDescriptorSet(VkDescriptorType descriptor_type, const interface_var &descriptor_data,
465 VkShaderStageFlagBits stage_flag) {
466 if (descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT) {
467 assert(stage_flag == VK_SHADER_STAGE_FRAGMENT_BIT);
468 return SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ;
469 }
470 auto stage_access = syncStageAccessMaskByShaderStage.find(stage_flag);
471 if (stage_access == syncStageAccessMaskByShaderStage.end()) {
472 assert(0);
473 }
474 if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER || descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
475 return stage_access->second.uniform_read;
476 }
477
478 // If the desriptorSet is writable, we don't need to care SHADER_READ. SHADER_WRITE is enough.
479 // Because if write hazard happens, read hazard might or might not happen.
480 // But if write hazard doesn't happen, read hazard is impossible to happen.
481 if (descriptor_data.is_writable) {
482 return stage_access->second.shader_write;
483 }
484 return stage_access->second.shader_read;
485}
486
locke-lunarg37047832020-06-12 13:44:45 -0600487bool IsImageLayoutDepthWritable(VkImageLayout image_layout) {
488 return (image_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
489 image_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL ||
490 image_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL)
491 ? true
492 : false;
493}
494
495bool IsImageLayoutStencilWritable(VkImageLayout image_layout) {
496 return (image_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
497 image_layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL ||
498 image_layout == VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL)
499 ? true
500 : false;
501}
502
John Zulauf355e49b2020-04-24 15:11:15 -0600503// Class AccessContext stores the state of accesses specific to a Command, Subpass, or Queue
John Zulaufb02c1eb2020-10-06 16:33:36 -0600504template <typename Action>
505static void ApplyOverImageRange(const IMAGE_STATE &image_state, const VkImageSubresourceRange &subresource_range_arg,
506 Action &action) {
507 // At this point the "apply over range" logic only supports a single memory binding
508 if (!SimpleBinding(image_state)) return;
509 auto subresource_range = NormalizeSubresourceRange(image_state.createInfo, subresource_range_arg);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600510 const auto base_address = ResourceBaseAddress(image_state);
John Zulauf150e5332020-12-03 08:52:52 -0700511 subresource_adapter::ImageRangeGenerator range_gen(*image_state.fragment_encoder.get(), subresource_range, {0, 0, 0},
512 image_state.createInfo.extent, base_address);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600513 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf150e5332020-12-03 08:52:52 -0700514 action(*range_gen);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600515 }
516}
517
John Zulauf7635de32020-05-29 17:14:15 -0600518// Tranverse the attachment resolves for this a specific subpass, and do action() to them.
519// Used by both validation and record operations
520//
521// The signature for Action() reflect the needs of both uses.
522template <typename Action>
523void ResolveOperation(Action &action, const RENDER_PASS_STATE &rp_state, const VkRect2D &render_area,
524 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views, uint32_t subpass) {
525 VkExtent3D extent = CastTo3D(render_area.extent);
526 VkOffset3D offset = CastTo3D(render_area.offset);
527 const auto &rp_ci = rp_state.createInfo;
528 const auto *attachment_ci = rp_ci.pAttachments;
529 const auto &subpass_ci = rp_ci.pSubpasses[subpass];
530
531 // Color resolves -- require an inuse color attachment and a matching inuse resolve attachment
532 const auto *color_attachments = subpass_ci.pColorAttachments;
533 const auto *color_resolve = subpass_ci.pResolveAttachments;
534 if (color_resolve && color_attachments) {
535 for (uint32_t i = 0; i < subpass_ci.colorAttachmentCount; i++) {
536 const auto &color_attach = color_attachments[i].attachment;
537 const auto &resolve_attach = subpass_ci.pResolveAttachments[i].attachment;
538 if ((color_attach != VK_ATTACHMENT_UNUSED) && (resolve_attach != VK_ATTACHMENT_UNUSED)) {
539 action("color", "resolve read", color_attach, resolve_attach, attachment_views[color_attach],
John Zulauf8e3c3e92021-01-06 11:19:36 -0700540 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ, SyncOrdering::kColorAttachment, offset, extent, 0);
John Zulauf7635de32020-05-29 17:14:15 -0600541 action("color", "resolve write", color_attach, resolve_attach, attachment_views[resolve_attach],
John Zulauf8e3c3e92021-01-06 11:19:36 -0700542 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kColorAttachment, offset, extent, 0);
John Zulauf7635de32020-05-29 17:14:15 -0600543 }
544 }
545 }
546
547 // Depth stencil resolve only if the extension is present
Mark Lobodzinski1f887d32020-12-30 15:31:33 -0700548 const auto ds_resolve = LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(subpass_ci.pNext);
John Zulauf7635de32020-05-29 17:14:15 -0600549 if (ds_resolve && ds_resolve->pDepthStencilResolveAttachment &&
550 (ds_resolve->pDepthStencilResolveAttachment->attachment != VK_ATTACHMENT_UNUSED) && subpass_ci.pDepthStencilAttachment &&
551 (subpass_ci.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
552 const auto src_at = subpass_ci.pDepthStencilAttachment->attachment;
553 const auto src_ci = attachment_ci[src_at];
554 // The formats are required to match so we can pick either
555 const bool resolve_depth = (ds_resolve->depthResolveMode != VK_RESOLVE_MODE_NONE) && FormatHasDepth(src_ci.format);
556 const bool resolve_stencil = (ds_resolve->stencilResolveMode != VK_RESOLVE_MODE_NONE) && FormatHasStencil(src_ci.format);
557 const auto dst_at = ds_resolve->pDepthStencilResolveAttachment->attachment;
558 VkImageAspectFlags aspect_mask = 0u;
559
560 // Figure out which aspects are actually touched during resolve operations
561 const char *aspect_string = nullptr;
562 if (resolve_depth && resolve_stencil) {
563 // Validate all aspects together
564 aspect_mask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
565 aspect_string = "depth/stencil";
566 } else if (resolve_depth) {
567 // Validate depth only
568 aspect_mask = VK_IMAGE_ASPECT_DEPTH_BIT;
569 aspect_string = "depth";
570 } else if (resolve_stencil) {
571 // Validate all stencil only
572 aspect_mask = VK_IMAGE_ASPECT_STENCIL_BIT;
573 aspect_string = "stencil";
574 }
575
576 if (aspect_mask) {
577 action(aspect_string, "resolve read", src_at, dst_at, attachment_views[src_at],
John Zulauf8e3c3e92021-01-06 11:19:36 -0700578 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ, SyncOrdering::kRaster, offset, extent, aspect_mask);
John Zulauf7635de32020-05-29 17:14:15 -0600579 action(aspect_string, "resolve write", src_at, dst_at, attachment_views[dst_at],
John Zulauf8e3c3e92021-01-06 11:19:36 -0700580 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kRaster, offset, extent, aspect_mask);
John Zulauf7635de32020-05-29 17:14:15 -0600581 }
582 }
583}
584
585// Action for validating resolve operations
586class ValidateResolveAction {
587 public:
John Zulauffaea0ee2021-01-14 14:01:32 -0700588 ValidateResolveAction(VkRenderPass render_pass, uint32_t subpass, const AccessContext &context,
589 const CommandBufferAccessContext &cb_context, const char *func_name)
John Zulauf7635de32020-05-29 17:14:15 -0600590 : render_pass_(render_pass),
591 subpass_(subpass),
592 context_(context),
John Zulauffaea0ee2021-01-14 14:01:32 -0700593 cb_context_(cb_context),
John Zulauf7635de32020-05-29 17:14:15 -0600594 func_name_(func_name),
595 skip_(false) {}
596 void operator()(const char *aspect_name, const char *attachment_name, uint32_t src_at, uint32_t dst_at,
John Zulauf8e3c3e92021-01-06 11:19:36 -0700597 const IMAGE_VIEW_STATE *view, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf7635de32020-05-29 17:14:15 -0600598 const VkOffset3D &offset, const VkExtent3D &extent, VkImageAspectFlags aspect_mask) {
599 HazardResult hazard;
John Zulauf8e3c3e92021-01-06 11:19:36 -0700600 hazard = context_.DetectHazard(view, current_usage, ordering_rule, offset, extent, aspect_mask);
John Zulauf7635de32020-05-29 17:14:15 -0600601 if (hazard.hazard) {
John Zulauffaea0ee2021-01-14 14:01:32 -0700602 skip_ |=
603 cb_context_.GetSyncState().LogError(render_pass_, string_SyncHazardVUID(hazard.hazard),
604 "%s: Hazard %s in subpass %" PRIu32 "during %s %s, from attachment %" PRIu32
605 " to resolve attachment %" PRIu32 ". Access info %s.",
606 func_name_, string_SyncHazard(hazard.hazard), subpass_, aspect_name,
607 attachment_name, src_at, dst_at, cb_context_.FormatUsage(hazard).c_str());
John Zulauf7635de32020-05-29 17:14:15 -0600608 }
609 }
610 // Providing a mechanism for the constructing caller to get the result of the validation
611 bool GetSkip() const { return skip_; }
612
613 private:
614 VkRenderPass render_pass_;
615 const uint32_t subpass_;
616 const AccessContext &context_;
John Zulauffaea0ee2021-01-14 14:01:32 -0700617 const CommandBufferAccessContext &cb_context_;
John Zulauf7635de32020-05-29 17:14:15 -0600618 const char *func_name_;
619 bool skip_;
620};
621
622// Update action for resolve operations
623class UpdateStateResolveAction {
624 public:
625 UpdateStateResolveAction(AccessContext &context, const ResourceUsageTag &tag) : context_(context), tag_(tag) {}
626 void operator()(const char *aspect_name, const char *attachment_name, uint32_t src_at, uint32_t dst_at,
John Zulauf8e3c3e92021-01-06 11:19:36 -0700627 const IMAGE_VIEW_STATE *view, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf7635de32020-05-29 17:14:15 -0600628 const VkOffset3D &offset, const VkExtent3D &extent, VkImageAspectFlags aspect_mask) {
629 // Ignores validation only arguments...
John Zulauf8e3c3e92021-01-06 11:19:36 -0700630 context_.UpdateAccessState(view, current_usage, ordering_rule, offset, extent, aspect_mask, tag_);
John Zulauf7635de32020-05-29 17:14:15 -0600631 }
632
633 private:
634 AccessContext &context_;
635 const ResourceUsageTag &tag_;
636};
637
John Zulauf59e25072020-07-17 10:55:21 -0600638void HazardResult::Set(const ResourceAccessState *access_state_, SyncStageAccessIndex usage_index_, SyncHazard hazard_,
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700639 const SyncStageAccessFlags &prior_, const ResourceUsageTag &tag_) {
John Zulauf59e25072020-07-17 10:55:21 -0600640 access_state = std::unique_ptr<const ResourceAccessState>(new ResourceAccessState(*access_state_));
641 usage_index = usage_index_;
642 hazard = hazard_;
643 prior_access = prior_;
644 tag = tag_;
645}
646
John Zulauf540266b2020-04-06 18:54:53 -0600647AccessContext::AccessContext(uint32_t subpass, VkQueueFlags queue_flags,
648 const std::vector<SubpassDependencyGraphNode> &dependencies,
John Zulauf1a224292020-06-30 14:52:13 -0600649 const std::vector<AccessContext> &contexts, const AccessContext *external_context) {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600650 Reset();
651 const auto &subpass_dep = dependencies[subpass];
652 prev_.reserve(subpass_dep.prev.size());
John Zulauf355e49b2020-04-24 15:11:15 -0600653 prev_by_subpass_.resize(subpass, nullptr); // Can't be more prevs than the subpass we're on
John Zulauf3d84f1b2020-03-09 13:33:25 -0600654 for (const auto &prev_dep : subpass_dep.prev) {
John Zulaufbaea94f2020-09-15 17:55:16 -0600655 const auto prev_pass = prev_dep.first->pass;
656 const auto &prev_barriers = prev_dep.second;
657 assert(prev_dep.second.size());
658 prev_.emplace_back(&contexts[prev_pass], queue_flags, prev_barriers);
659 prev_by_subpass_[prev_pass] = &prev_.back();
John Zulauf5c5e88d2019-12-26 11:22:02 -0700660 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600661
662 async_.reserve(subpass_dep.async.size());
663 for (const auto async_subpass : subpass_dep.async) {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -0700664 async_.emplace_back(&contexts[async_subpass]);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600665 }
John Zulaufbaea94f2020-09-15 17:55:16 -0600666 if (subpass_dep.barrier_from_external.size()) {
667 src_external_ = TrackBack(external_context, queue_flags, subpass_dep.barrier_from_external);
John Zulaufe5da6e52020-03-18 15:32:18 -0600668 }
John Zulaufbaea94f2020-09-15 17:55:16 -0600669 if (subpass_dep.barrier_to_external.size()) {
670 dst_external_ = TrackBack(this, queue_flags, subpass_dep.barrier_to_external);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600671 }
John Zulauf5c5e88d2019-12-26 11:22:02 -0700672}
673
John Zulauf5f13a792020-03-10 07:31:21 -0600674template <typename Detector>
John Zulauf43cc7462020-12-03 12:33:12 -0700675HazardResult AccessContext::DetectPreviousHazard(AccessAddressType type, const Detector &detector,
John Zulauf540266b2020-04-06 18:54:53 -0600676 const ResourceAccessRange &range) const {
John Zulauf5f13a792020-03-10 07:31:21 -0600677 ResourceAccessRangeMap descent_map;
John Zulauf69133422020-05-20 14:55:53 -0600678 ResolvePreviousAccess(type, range, &descent_map, nullptr);
John Zulauf5f13a792020-03-10 07:31:21 -0600679
680 HazardResult hazard;
681 for (auto prev = descent_map.begin(); prev != descent_map.end() && !hazard.hazard; ++prev) {
682 hazard = detector.Detect(prev);
683 }
684 return hazard;
685}
686
John Zulauf4a6105a2020-11-17 15:11:05 -0700687template <typename Action>
688void AccessContext::ForAll(Action &&action) {
689 for (const auto address_type : kAddressTypes) {
690 auto &accesses = GetAccessStateMap(address_type);
691 for (const auto &access : accesses) {
692 action(address_type, access);
693 }
694 }
695}
696
John Zulauf3d84f1b2020-03-09 13:33:25 -0600697// A recursive range walker for hazard detection, first for the current context and the (DetectHazardRecur) to walk
698// the DAG of the contexts (for example subpasses)
699template <typename Detector>
John Zulauf43cc7462020-12-03 12:33:12 -0700700HazardResult AccessContext::DetectHazard(AccessAddressType type, const Detector &detector, const ResourceAccessRange &range,
John Zulauf355e49b2020-04-24 15:11:15 -0600701 DetectOptions options) const {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600702 HazardResult hazard;
John Zulauf5f13a792020-03-10 07:31:21 -0600703
John Zulauf1a224292020-06-30 14:52:13 -0600704 if (static_cast<uint32_t>(options) & DetectOptions::kDetectAsync) {
John Zulauf355e49b2020-04-24 15:11:15 -0600705 // Async checks don't require recursive lookups, as the async lists are exhaustive for the top-level context
706 // so we'll check these first
707 for (const auto &async_context : async_) {
708 hazard = async_context->DetectAsyncHazard(type, detector, range);
709 if (hazard.hazard) return hazard;
710 }
John Zulauf5f13a792020-03-10 07:31:21 -0600711 }
712
John Zulauf1a224292020-06-30 14:52:13 -0600713 const bool detect_prev = (static_cast<uint32_t>(options) & DetectOptions::kDetectPrevious) != 0;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600714
John Zulauf69133422020-05-20 14:55:53 -0600715 const auto &accesses = GetAccessStateMap(type);
716 const auto from = accesses.lower_bound(range);
717 const auto to = accesses.upper_bound(range);
718 ResourceAccessRange gap = {range.begin, range.begin};
John Zulauf5f13a792020-03-10 07:31:21 -0600719
John Zulauf69133422020-05-20 14:55:53 -0600720 for (auto pos = from; pos != to; ++pos) {
721 // Cover any leading gap, or gap between entries
722 if (detect_prev) {
723 // TODO: After profiling we may want to change the descent logic such that we don't recur per gap...
724 // Cover any leading gap, or gap between entries
725 gap.end = pos->first.begin; // We know this begin is < range.end
John Zulauf355e49b2020-04-24 15:11:15 -0600726 if (gap.non_empty()) {
John Zulauf69133422020-05-20 14:55:53 -0600727 // Recur on all gaps
John Zulauf16adfc92020-04-08 10:28:33 -0600728 hazard = DetectPreviousHazard(type, detector, gap);
John Zulauf5f13a792020-03-10 07:31:21 -0600729 if (hazard.hazard) return hazard;
730 }
John Zulauf69133422020-05-20 14:55:53 -0600731 // Set up for the next gap. If pos..end is >= range.end, loop will exit, and trailing gap will be empty
732 gap.begin = pos->first.end;
733 }
734
735 hazard = detector.Detect(pos);
736 if (hazard.hazard) return hazard;
737 }
738
739 if (detect_prev) {
740 // Detect in the trailing empty as needed
741 gap.end = range.end;
742 if (gap.non_empty()) {
743 hazard = DetectPreviousHazard(type, detector, gap);
John Zulauf16adfc92020-04-08 10:28:33 -0600744 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600745 }
746
747 return hazard;
748}
749
750// A non recursive range walker for the asynchronous contexts (those we have no barriers with)
751template <typename Detector>
John Zulauf43cc7462020-12-03 12:33:12 -0700752HazardResult AccessContext::DetectAsyncHazard(AccessAddressType type, const Detector &detector,
753 const ResourceAccessRange &range) const {
John Zulauf16adfc92020-04-08 10:28:33 -0600754 auto &accesses = GetAccessStateMap(type);
755 const auto from = accesses.lower_bound(range);
756 const auto to = accesses.upper_bound(range);
757
John Zulauf3d84f1b2020-03-09 13:33:25 -0600758 HazardResult hazard;
John Zulauf16adfc92020-04-08 10:28:33 -0600759 for (auto pos = from; pos != to && !hazard.hazard; ++pos) {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -0700760 hazard = detector.DetectAsync(pos, start_tag_);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600761 }
John Zulauf16adfc92020-04-08 10:28:33 -0600762
John Zulauf3d84f1b2020-03-09 13:33:25 -0600763 return hazard;
764}
765
John Zulaufb02c1eb2020-10-06 16:33:36 -0600766struct ApplySubpassTransitionBarriersAction {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -0700767 explicit ApplySubpassTransitionBarriersAction(const std::vector<SyncBarrier> &barriers_) : barriers(barriers_) {}
John Zulaufb02c1eb2020-10-06 16:33:36 -0600768 void operator()(ResourceAccessState *access) const {
769 assert(access);
770 access->ApplyBarriers(barriers, true);
771 }
772 const std::vector<SyncBarrier> &barriers;
773};
774
775struct ApplyTrackbackBarriersAction {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -0700776 explicit ApplyTrackbackBarriersAction(const std::vector<SyncBarrier> &barriers_) : barriers(barriers_) {}
John Zulaufb02c1eb2020-10-06 16:33:36 -0600777 void operator()(ResourceAccessState *access) const {
778 assert(access);
779 assert(!access->HasPendingState());
780 access->ApplyBarriers(barriers, false);
781 access->ApplyPendingBarriers(kCurrentCommandTag);
782 }
783 const std::vector<SyncBarrier> &barriers;
784};
785
786// Splits a single map entry into piece matching the entries in [first, last) the total range over [first, last) must be
787// contained with entry. Entry must be an iterator pointing to dest, first and last must be iterators pointing to a
788// *different* map from dest.
789// Returns the position past the last resolved range -- the entry covering the remainder of entry->first not included in the
790// range [first, last)
791template <typename BarrierAction>
John Zulauf355e49b2020-04-24 15:11:15 -0600792static void ResolveMapToEntry(ResourceAccessRangeMap *dest, ResourceAccessRangeMap::iterator entry,
793 ResourceAccessRangeMap::const_iterator first, ResourceAccessRangeMap::const_iterator last,
John Zulaufb02c1eb2020-10-06 16:33:36 -0600794 BarrierAction &barrier_action) {
John Zulauf355e49b2020-04-24 15:11:15 -0600795 auto at = entry;
796 for (auto pos = first; pos != last; ++pos) {
797 // Every member of the input iterator range must fit within the remaining portion of entry
798 assert(at->first.includes(pos->first));
799 assert(at != dest->end());
800 // Trim up at to the same size as the entry to resolve
801 at = sparse_container::split(at, *dest, pos->first);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600802 auto access = pos->second; // intentional copy
803 barrier_action(&access);
John Zulauf355e49b2020-04-24 15:11:15 -0600804 at->second.Resolve(access);
805 ++at; // Go to the remaining unused section of entry
806 }
807}
808
John Zulaufa0a98292020-09-18 09:30:10 -0600809static SyncBarrier MergeBarriers(const std::vector<SyncBarrier> &barriers) {
810 SyncBarrier merged = {};
811 for (const auto &barrier : barriers) {
812 merged.Merge(barrier);
813 }
814 return merged;
815}
816
John Zulaufb02c1eb2020-10-06 16:33:36 -0600817template <typename BarrierAction>
John Zulauf43cc7462020-12-03 12:33:12 -0700818void AccessContext::ResolveAccessRange(AccessAddressType type, const ResourceAccessRange &range, BarrierAction &barrier_action,
John Zulauf355e49b2020-04-24 15:11:15 -0600819 ResourceAccessRangeMap *resolve_map, const ResourceAccessState *infill_state,
820 bool recur_to_infill) const {
John Zulauf3bcab5e2020-06-19 14:42:32 -0600821 if (!range.non_empty()) return;
822
John Zulauf355e49b2020-04-24 15:11:15 -0600823 ResourceRangeMergeIterator current(*resolve_map, GetAccessStateMap(type), range.begin);
824 while (current->range.non_empty() && range.includes(current->range.begin)) {
John Zulauf3bcab5e2020-06-19 14:42:32 -0600825 const auto current_range = current->range & range;
John Zulauf16adfc92020-04-08 10:28:33 -0600826 if (current->pos_B->valid) {
827 const auto &src_pos = current->pos_B->lower_bound;
John Zulaufb02c1eb2020-10-06 16:33:36 -0600828 auto access = src_pos->second; // intentional copy
829 barrier_action(&access);
830
John Zulauf16adfc92020-04-08 10:28:33 -0600831 if (current->pos_A->valid) {
John Zulauf3bcab5e2020-06-19 14:42:32 -0600832 const auto trimmed = sparse_container::split(current->pos_A->lower_bound, *resolve_map, current_range);
833 trimmed->second.Resolve(access);
834 current.invalidate_A(trimmed);
John Zulauf5f13a792020-03-10 07:31:21 -0600835 } else {
John Zulauf3bcab5e2020-06-19 14:42:32 -0600836 auto inserted = resolve_map->insert(current->pos_A->lower_bound, std::make_pair(current_range, access));
John Zulauf355e49b2020-04-24 15:11:15 -0600837 current.invalidate_A(inserted); // Update the parallel iterator to point at the insert segment
John Zulauf5f13a792020-03-10 07:31:21 -0600838 }
John Zulauf16adfc92020-04-08 10:28:33 -0600839 } else {
840 // we have to descend to fill this gap
841 if (recur_to_infill) {
John Zulauf355e49b2020-04-24 15:11:15 -0600842 if (current->pos_A->valid) {
843 // Dest is valid, so we need to accumulate along the DAG and then resolve... in an N-to-1 resolve operation
844 ResourceAccessRangeMap gap_map;
John Zulauf3bcab5e2020-06-19 14:42:32 -0600845 ResolvePreviousAccess(type, current_range, &gap_map, infill_state);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600846 ResolveMapToEntry(resolve_map, current->pos_A->lower_bound, gap_map.begin(), gap_map.end(), barrier_action);
John Zulauf355e49b2020-04-24 15:11:15 -0600847 } else {
John Zulauf3bcab5e2020-06-19 14:42:32 -0600848 // There isn't anything in dest in current)range, so we can accumulate directly into it.
849 ResolvePreviousAccess(type, current_range, resolve_map, infill_state);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600850 // Need to apply the barrier to the accesses we accumulated, noting that we haven't updated current
851 for (auto pos = resolve_map->lower_bound(current_range); pos != current->pos_A->lower_bound; ++pos) {
852 barrier_action(&pos->second);
John Zulauf355e49b2020-04-24 15:11:15 -0600853 }
854 }
855 // Given that there could be gaps we need to seek carefully to not repeatedly search the same gaps in the next
856 // iterator of the outer while.
857
858 // Set the parallel iterator to the end of this range s.t. ++ will move us to the next range whether or
859 // not the end of the range is a gap. For the seek to work, first we need to warn the parallel iterator
860 // we stepped on the dest map
locke-lunarg88dbb542020-06-23 22:05:42 -0600861 const auto seek_to = current_range.end - 1; // The subtraction is safe as range can't be empty (loop condition)
862 current.invalidate_A(); // Changes current->range
John Zulauf355e49b2020-04-24 15:11:15 -0600863 current.seek(seek_to);
864 } else if (!current->pos_A->valid && infill_state) {
865 // If we didn't find anything in the current range, and we aren't reccuring... we infill if required
866 auto inserted = resolve_map->insert(current->pos_A->lower_bound, std::make_pair(current->range, *infill_state));
867 current.invalidate_A(inserted); // Update the parallel iterator to point at the correct segment after insert
John Zulauf16adfc92020-04-08 10:28:33 -0600868 }
John Zulauf5f13a792020-03-10 07:31:21 -0600869 }
John Zulauf16adfc92020-04-08 10:28:33 -0600870 ++current;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600871 }
John Zulauf1a224292020-06-30 14:52:13 -0600872
873 // Infill if range goes passed both the current and resolve map prior contents
874 if (recur_to_infill && (current->range.end < range.end)) {
875 ResourceAccessRange trailing_fill_range = {current->range.end, range.end};
876 ResourceAccessRangeMap gap_map;
877 const auto the_end = resolve_map->end();
878 ResolvePreviousAccess(type, trailing_fill_range, &gap_map, infill_state);
879 for (auto &access : gap_map) {
John Zulaufb02c1eb2020-10-06 16:33:36 -0600880 barrier_action(&access.second);
John Zulauf1a224292020-06-30 14:52:13 -0600881 resolve_map->insert(the_end, access);
882 }
883 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600884}
885
John Zulauf43cc7462020-12-03 12:33:12 -0700886void AccessContext::ResolvePreviousAccess(AccessAddressType type, const ResourceAccessRange &range,
887 ResourceAccessRangeMap *descent_map, const ResourceAccessState *infill_state) const {
John Zulaufe5da6e52020-03-18 15:32:18 -0600888 if ((prev_.size() == 0) && (src_external_.context == nullptr)) {
John Zulauf5f13a792020-03-10 07:31:21 -0600889 if (range.non_empty() && infill_state) {
890 descent_map->insert(std::make_pair(range, *infill_state));
891 }
892 } else {
893 // Look for something to fill the gap further along.
894 for (const auto &prev_dep : prev_) {
John Zulaufb02c1eb2020-10-06 16:33:36 -0600895 const ApplyTrackbackBarriersAction barrier_action(prev_dep.barriers);
896 prev_dep.context->ResolveAccessRange(type, range, barrier_action, descent_map, infill_state);
John Zulauf5f13a792020-03-10 07:31:21 -0600897 }
898
John Zulaufe5da6e52020-03-18 15:32:18 -0600899 if (src_external_.context) {
John Zulaufb02c1eb2020-10-06 16:33:36 -0600900 const ApplyTrackbackBarriersAction barrier_action(src_external_.barriers);
901 src_external_.context->ResolveAccessRange(type, range, barrier_action, descent_map, infill_state);
John Zulauf5f13a792020-03-10 07:31:21 -0600902 }
903 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600904}
905
John Zulauf4a6105a2020-11-17 15:11:05 -0700906// Non-lazy import of all accesses, WaitEvents needs this.
907void AccessContext::ResolvePreviousAccesses() {
908 ResourceAccessState default_state;
909 for (const auto address_type : kAddressTypes) {
910 ResolvePreviousAccess(address_type, kFullRange, &GetAccessStateMap(address_type), &default_state);
911 }
912}
913
John Zulauf43cc7462020-12-03 12:33:12 -0700914AccessAddressType AccessContext::ImageAddressType(const IMAGE_STATE &image) {
915 return (image.fragment_encoder->IsLinearImage()) ? AccessAddressType::kLinear : AccessAddressType::kIdealized;
John Zulauf16adfc92020-04-08 10:28:33 -0600916}
917
John Zulauf1507ee42020-05-18 11:33:09 -0600918static SyncStageAccessIndex ColorLoadUsage(VkAttachmentLoadOp load_op) {
919 const auto stage_access = (load_op == VK_ATTACHMENT_LOAD_OP_LOAD) ? SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ
920 : SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE;
921 return stage_access;
922}
923static SyncStageAccessIndex DepthStencilLoadUsage(VkAttachmentLoadOp load_op) {
924 const auto stage_access = (load_op == VK_ATTACHMENT_LOAD_OP_LOAD) ? SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_READ
925 : SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE;
926 return stage_access;
927}
928
John Zulauf7635de32020-05-29 17:14:15 -0600929// Caller must manage returned pointer
930static AccessContext *CreateStoreResolveProxyContext(const AccessContext &context, const RENDER_PASS_STATE &rp_state,
931 uint32_t subpass, const VkRect2D &render_area,
932 std::vector<const IMAGE_VIEW_STATE *> attachment_views) {
933 auto *proxy = new AccessContext(context);
934 proxy->UpdateAttachmentResolveAccess(rp_state, render_area, attachment_views, subpass, kCurrentCommandTag);
John Zulaufaff20662020-06-01 14:07:58 -0600935 proxy->UpdateAttachmentStoreAccess(rp_state, render_area, attachment_views, subpass, kCurrentCommandTag);
John Zulauf7635de32020-05-29 17:14:15 -0600936 return proxy;
937}
938
John Zulaufb02c1eb2020-10-06 16:33:36 -0600939template <typename BarrierAction>
John Zulauf52446eb2020-10-22 16:40:08 -0600940class ResolveAccessRangeFunctor {
John Zulaufb02c1eb2020-10-06 16:33:36 -0600941 public:
John Zulauf43cc7462020-12-03 12:33:12 -0700942 ResolveAccessRangeFunctor(const AccessContext &context, AccessAddressType address_type, ResourceAccessRangeMap *descent_map,
943 const ResourceAccessState *infill_state, BarrierAction &barrier_action)
John Zulauf52446eb2020-10-22 16:40:08 -0600944 : context_(context),
945 address_type_(address_type),
946 descent_map_(descent_map),
947 infill_state_(infill_state),
948 barrier_action_(barrier_action) {}
John Zulaufb02c1eb2020-10-06 16:33:36 -0600949 ResolveAccessRangeFunctor() = delete;
950 void operator()(const ResourceAccessRange &range) const {
951 context_.ResolveAccessRange(address_type_, range, barrier_action_, descent_map_, infill_state_);
952 }
953
954 private:
John Zulauf52446eb2020-10-22 16:40:08 -0600955 const AccessContext &context_;
John Zulauf43cc7462020-12-03 12:33:12 -0700956 const AccessAddressType address_type_;
John Zulauf52446eb2020-10-22 16:40:08 -0600957 ResourceAccessRangeMap *const descent_map_;
958 const ResourceAccessState *infill_state_;
John Zulaufb02c1eb2020-10-06 16:33:36 -0600959 BarrierAction &barrier_action_;
960};
961
John Zulaufb02c1eb2020-10-06 16:33:36 -0600962template <typename BarrierAction>
963void AccessContext::ResolveAccessRange(const IMAGE_STATE &image_state, const VkImageSubresourceRange &subresource_range,
John Zulauf43cc7462020-12-03 12:33:12 -0700964 BarrierAction &barrier_action, AccessAddressType address_type,
965 ResourceAccessRangeMap *descent_map, const ResourceAccessState *infill_state) const {
John Zulaufb02c1eb2020-10-06 16:33:36 -0600966 const ResolveAccessRangeFunctor<BarrierAction> action(*this, address_type, descent_map, infill_state, barrier_action);
967 ApplyOverImageRange(image_state, subresource_range, action);
John Zulauf62f10592020-04-03 12:20:02 -0600968}
969
John Zulauf7635de32020-05-29 17:14:15 -0600970// Layout transitions are handled as if the were occuring in the beginning of the next subpass
John Zulauffaea0ee2021-01-14 14:01:32 -0700971bool AccessContext::ValidateLayoutTransitions(const CommandBufferAccessContext &cb_context, const RENDER_PASS_STATE &rp_state,
John Zulauf7635de32020-05-29 17:14:15 -0600972 const VkRect2D &render_area, uint32_t subpass,
973 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views,
974 const char *func_name) const {
John Zulauf355e49b2020-04-24 15:11:15 -0600975 bool skip = false;
John Zulauf7635de32020-05-29 17:14:15 -0600976 // As validation methods are const and precede the record/update phase, for any tranistions from the immediately
977 // previous subpass, we have to validate them against a copy of the AccessContext, with resolve operations applied, as
978 // those affects have not been recorded yet.
979 //
980 // Note: we could be more efficient by tracking whether or not we actually *have* any changes (e.g. attachment resolve)
981 // to apply and only copy then, if this proves a hot spot.
982 std::unique_ptr<AccessContext> proxy_for_prev;
983 TrackBack proxy_track_back;
984
John Zulauf355e49b2020-04-24 15:11:15 -0600985 const auto &transitions = rp_state.subpass_transitions[subpass];
986 for (const auto &transition : transitions) {
John Zulauf7635de32020-05-29 17:14:15 -0600987 const bool prev_needs_proxy = transition.prev_pass != VK_SUBPASS_EXTERNAL && (transition.prev_pass + 1 == subpass);
988
989 const auto *track_back = GetTrackBackFromSubpass(transition.prev_pass);
990 if (prev_needs_proxy) {
991 if (!proxy_for_prev) {
992 proxy_for_prev.reset(CreateStoreResolveProxyContext(*track_back->context, rp_state, transition.prev_pass,
993 render_area, attachment_views));
994 proxy_track_back = *track_back;
995 proxy_track_back.context = proxy_for_prev.get();
996 }
997 track_back = &proxy_track_back;
998 }
999 auto hazard = DetectSubpassTransitionHazard(*track_back, attachment_views[transition.attachment]);
John Zulauf355e49b2020-04-24 15:11:15 -06001000 if (hazard.hazard) {
John Zulauffaea0ee2021-01-14 14:01:32 -07001001 skip |= cb_context.GetSyncState().LogError(rp_state.renderPass, string_SyncHazardVUID(hazard.hazard),
1002 "%s: Hazard %s in subpass %" PRIu32 " for attachment %" PRIu32
1003 " image layout transition (old_layout: %s, new_layout: %s). Access info %s.",
1004 func_name, string_SyncHazard(hazard.hazard), subpass, transition.attachment,
1005 string_VkImageLayout(transition.old_layout),
1006 string_VkImageLayout(transition.new_layout),
1007 cb_context.FormatUsage(hazard).c_str());
John Zulauf355e49b2020-04-24 15:11:15 -06001008 }
1009 }
1010 return skip;
1011}
1012
John Zulauffaea0ee2021-01-14 14:01:32 -07001013bool AccessContext::ValidateLoadOperation(const CommandBufferAccessContext &cb_context, const RENDER_PASS_STATE &rp_state,
John Zulauf7635de32020-05-29 17:14:15 -06001014 const VkRect2D &render_area, uint32_t subpass,
1015 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views,
1016 const char *func_name) const {
John Zulauf1507ee42020-05-18 11:33:09 -06001017 bool skip = false;
1018 const auto *attachment_ci = rp_state.createInfo.pAttachments;
1019 VkExtent3D extent = CastTo3D(render_area.extent);
1020 VkOffset3D offset = CastTo3D(render_area.offset);
John Zulaufa0a98292020-09-18 09:30:10 -06001021
John Zulauf1507ee42020-05-18 11:33:09 -06001022 for (uint32_t i = 0; i < rp_state.createInfo.attachmentCount; i++) {
1023 if (subpass == rp_state.attachment_first_subpass[i]) {
1024 if (attachment_views[i] == nullptr) continue;
1025 const IMAGE_VIEW_STATE &view = *attachment_views[i];
1026 const IMAGE_STATE *image = view.image_state.get();
1027 if (image == nullptr) continue;
1028 const auto &ci = attachment_ci[i];
John Zulauf1507ee42020-05-18 11:33:09 -06001029
1030 // Need check in the following way
1031 // 1) if the usage bit isn't in the dest_access_scope, and there is layout traniition for initial use, report hazard
1032 // vs. transition
1033 // 2) if there isn't a layout transition, we need to look at the external context with a "detect hazard" operation
1034 // for each aspect loaded.
1035
1036 const bool has_depth = FormatHasDepth(ci.format);
John Zulaufb027cdb2020-05-21 14:25:22 -06001037 const bool has_stencil = FormatHasStencil(ci.format);
John Zulauf1507ee42020-05-18 11:33:09 -06001038 const bool is_color = !(has_depth || has_stencil);
1039
1040 const SyncStageAccessIndex load_index = has_depth ? DepthStencilLoadUsage(ci.loadOp) : ColorLoadUsage(ci.loadOp);
John Zulauf1507ee42020-05-18 11:33:09 -06001041 const SyncStageAccessIndex stencil_load_index = has_stencil ? DepthStencilLoadUsage(ci.stencilLoadOp) : load_index;
John Zulauf1507ee42020-05-18 11:33:09 -06001042
John Zulaufaff20662020-06-01 14:07:58 -06001043 HazardResult hazard;
John Zulauf1507ee42020-05-18 11:33:09 -06001044 const char *aspect = nullptr;
John Zulauf1507ee42020-05-18 11:33:09 -06001045
John Zulaufb02c1eb2020-10-06 16:33:36 -06001046 auto hazard_range = view.normalized_subresource_range;
1047 bool checked_stencil = false;
1048 if (is_color) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07001049 hazard = DetectHazard(*image, load_index, view.normalized_subresource_range, SyncOrdering::kColorAttachment, offset,
John Zulauf859089b2020-10-29 17:37:03 -06001050 extent);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001051 aspect = "color";
1052 } else {
1053 if (has_depth) {
1054 hazard_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001055 hazard = DetectHazard(*image, load_index, hazard_range, SyncOrdering::kDepthStencilAttachment, offset, extent);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001056 aspect = "depth";
1057 }
1058 if (!hazard.hazard && has_stencil) {
1059 hazard_range.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001060 hazard = DetectHazard(*image, stencil_load_index, hazard_range, SyncOrdering::kDepthStencilAttachment, offset,
1061 extent);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001062 aspect = "stencil";
1063 checked_stencil = true;
1064 }
1065 }
1066
1067 if (hazard.hazard) {
1068 auto load_op_string = string_VkAttachmentLoadOp(checked_stencil ? ci.stencilLoadOp : ci.loadOp);
John Zulauffaea0ee2021-01-14 14:01:32 -07001069 const auto &sync_state = cb_context.GetSyncState();
John Zulaufb02c1eb2020-10-06 16:33:36 -06001070 if (hazard.tag == kCurrentCommandTag) {
1071 // Hazard vs. ILT
1072 skip |= sync_state.LogError(rp_state.renderPass, string_SyncHazardVUID(hazard.hazard),
1073 "%s: Hazard %s vs. layout transition in subpass %" PRIu32 " for attachment %" PRIu32
1074 " aspect %s during load with loadOp %s.",
1075 func_name, string_SyncHazard(hazard.hazard), subpass, i, aspect, load_op_string);
1076 } else {
John Zulauf1507ee42020-05-18 11:33:09 -06001077 skip |= sync_state.LogError(rp_state.renderPass, string_SyncHazardVUID(hazard.hazard),
1078 "%s: Hazard %s in subpass %" PRIu32 " for attachment %" PRIu32
John Zulauf59e25072020-07-17 10:55:21 -06001079 " aspect %s during load with loadOp %s. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06001080 func_name, string_SyncHazard(hazard.hazard), subpass, i, aspect, load_op_string,
John Zulauffaea0ee2021-01-14 14:01:32 -07001081 cb_context.FormatUsage(hazard).c_str());
John Zulauf1507ee42020-05-18 11:33:09 -06001082 }
1083 }
1084 }
1085 }
1086 return skip;
1087}
1088
John Zulaufaff20662020-06-01 14:07:58 -06001089// Store operation validation can ignore resolve (before it) and layout tranistions after it. The first is ignored
1090// because of the ordering guarantees w.r.t. sample access and that the resolve validation hasn't altered the state, because
1091// store is part of the same Next/End operation.
1092// The latter is handled in layout transistion validation directly
John Zulauffaea0ee2021-01-14 14:01:32 -07001093bool AccessContext::ValidateStoreOperation(const CommandBufferAccessContext &cb_context, const RENDER_PASS_STATE &rp_state,
John Zulaufaff20662020-06-01 14:07:58 -06001094 const VkRect2D &render_area, uint32_t subpass,
1095 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views,
1096 const char *func_name) const {
1097 bool skip = false;
1098 const auto *attachment_ci = rp_state.createInfo.pAttachments;
1099 VkExtent3D extent = CastTo3D(render_area.extent);
1100 VkOffset3D offset = CastTo3D(render_area.offset);
1101
1102 for (uint32_t i = 0; i < rp_state.createInfo.attachmentCount; i++) {
1103 if (subpass == rp_state.attachment_last_subpass[i]) {
1104 if (attachment_views[i] == nullptr) continue;
1105 const IMAGE_VIEW_STATE &view = *attachment_views[i];
1106 const IMAGE_STATE *image = view.image_state.get();
1107 if (image == nullptr) continue;
1108 const auto &ci = attachment_ci[i];
1109
1110 // The spec states that "don't care" is an operation with VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
1111 // so we assume that an implementation is *free* to write in that case, meaning that for correctness
1112 // sake, we treat DONT_CARE as writing.
1113 const bool has_depth = FormatHasDepth(ci.format);
1114 const bool has_stencil = FormatHasStencil(ci.format);
1115 const bool is_color = !(has_depth || has_stencil);
1116 const bool store_op_stores = ci.storeOp != VK_ATTACHMENT_STORE_OP_NONE_QCOM;
1117 if (!has_stencil && !store_op_stores) continue;
1118
1119 HazardResult hazard;
1120 const char *aspect = nullptr;
1121 bool checked_stencil = false;
1122 if (is_color) {
1123 hazard = DetectHazard(*image, SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE,
John Zulauf8e3c3e92021-01-06 11:19:36 -07001124 view.normalized_subresource_range, SyncOrdering::kRaster, offset, extent);
John Zulaufaff20662020-06-01 14:07:58 -06001125 aspect = "color";
1126 } else {
1127 const bool stencil_op_stores = ci.stencilStoreOp != VK_ATTACHMENT_STORE_OP_NONE_QCOM;
1128 auto hazard_range = view.normalized_subresource_range;
1129 if (has_depth && store_op_stores) {
1130 hazard_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
1131 hazard = DetectHazard(*image, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, hazard_range,
John Zulauf8e3c3e92021-01-06 11:19:36 -07001132 SyncOrdering::kRaster, offset, extent);
John Zulaufaff20662020-06-01 14:07:58 -06001133 aspect = "depth";
1134 }
1135 if (!hazard.hazard && has_stencil && stencil_op_stores) {
1136 hazard_range.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
1137 hazard = DetectHazard(*image, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, hazard_range,
John Zulauf8e3c3e92021-01-06 11:19:36 -07001138 SyncOrdering::kRaster, offset, extent);
John Zulaufaff20662020-06-01 14:07:58 -06001139 aspect = "stencil";
1140 checked_stencil = true;
1141 }
1142 }
1143
1144 if (hazard.hazard) {
1145 const char *const op_type_string = checked_stencil ? "stencilStoreOp" : "storeOp";
1146 const char *const store_op_string = string_VkAttachmentStoreOp(checked_stencil ? ci.stencilStoreOp : ci.storeOp);
John Zulauffaea0ee2021-01-14 14:01:32 -07001147 skip |= cb_context.GetSyncState().LogError(rp_state.renderPass, string_SyncHazardVUID(hazard.hazard),
1148 "%s: Hazard %s in subpass %" PRIu32 " for attachment %" PRIu32
1149 " %s aspect during store with %s %s. Access info %s",
1150 func_name, string_SyncHazard(hazard.hazard), subpass, i, aspect,
1151 op_type_string, store_op_string, cb_context.FormatUsage(hazard).c_str());
John Zulaufaff20662020-06-01 14:07:58 -06001152 }
1153 }
1154 }
1155 return skip;
1156}
1157
John Zulauffaea0ee2021-01-14 14:01:32 -07001158bool AccessContext::ValidateResolveOperations(const CommandBufferAccessContext &cb_context, const RENDER_PASS_STATE &rp_state,
John Zulaufb027cdb2020-05-21 14:25:22 -06001159 const VkRect2D &render_area,
1160 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views, const char *func_name,
1161 uint32_t subpass) const {
John Zulauffaea0ee2021-01-14 14:01:32 -07001162 ValidateResolveAction validate_action(rp_state.renderPass, subpass, *this, cb_context, func_name);
John Zulauf7635de32020-05-29 17:14:15 -06001163 ResolveOperation(validate_action, rp_state, render_area, attachment_views, subpass);
1164 return validate_action.GetSkip();
John Zulaufb027cdb2020-05-21 14:25:22 -06001165}
1166
John Zulauf3d84f1b2020-03-09 13:33:25 -06001167class HazardDetector {
1168 SyncStageAccessIndex usage_index_;
1169
1170 public:
John Zulauf5f13a792020-03-10 07:31:21 -06001171 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const { return pos->second.DetectHazard(usage_index_); }
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07001172 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, const ResourceUsageTag &start_tag) const {
1173 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001174 }
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001175 explicit HazardDetector(SyncStageAccessIndex usage) : usage_index_(usage) {}
John Zulauf3d84f1b2020-03-09 13:33:25 -06001176};
1177
John Zulauf69133422020-05-20 14:55:53 -06001178class HazardDetectorWithOrdering {
1179 const SyncStageAccessIndex usage_index_;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001180 const SyncOrdering ordering_rule_;
John Zulauf69133422020-05-20 14:55:53 -06001181
1182 public:
1183 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
John Zulauf8e3c3e92021-01-06 11:19:36 -07001184 return pos->second.DetectHazard(usage_index_, ordering_rule_);
John Zulauf69133422020-05-20 14:55:53 -06001185 }
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07001186 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, const ResourceUsageTag &start_tag) const {
1187 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
John Zulauf69133422020-05-20 14:55:53 -06001188 }
John Zulauf8e3c3e92021-01-06 11:19:36 -07001189 HazardDetectorWithOrdering(SyncStageAccessIndex usage, SyncOrdering ordering) : usage_index_(usage), ordering_rule_(ordering) {}
John Zulauf69133422020-05-20 14:55:53 -06001190};
1191
John Zulauf16adfc92020-04-08 10:28:33 -06001192HazardResult AccessContext::DetectHazard(const BUFFER_STATE &buffer, SyncStageAccessIndex usage_index,
John Zulauf355e49b2020-04-24 15:11:15 -06001193 const ResourceAccessRange &range) const {
John Zulauf16adfc92020-04-08 10:28:33 -06001194 if (!SimpleBinding(buffer)) return HazardResult();
John Zulauf150e5332020-12-03 08:52:52 -07001195 const auto base_address = ResourceBaseAddress(buffer);
1196 HazardDetector detector(usage_index);
1197 return DetectHazard(AccessAddressType::kLinear, detector, (range + base_address), DetectOptions::kDetectAll);
John Zulaufe5da6e52020-03-18 15:32:18 -06001198}
1199
John Zulauf69133422020-05-20 14:55:53 -06001200template <typename Detector>
1201HazardResult AccessContext::DetectHazard(Detector &detector, const IMAGE_STATE &image,
1202 const VkImageSubresourceRange &subresource_range, const VkOffset3D &offset,
1203 const VkExtent3D &extent, DetectOptions options) const {
1204 if (!SimpleBinding(image)) return HazardResult();
John Zulauf69133422020-05-20 14:55:53 -06001205 const auto base_address = ResourceBaseAddress(image);
John Zulauf150e5332020-12-03 08:52:52 -07001206 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, offset, extent,
1207 base_address);
1208 const auto address_type = ImageAddressType(image);
John Zulauf69133422020-05-20 14:55:53 -06001209 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf150e5332020-12-03 08:52:52 -07001210 HazardResult hazard = DetectHazard(address_type, detector, *range_gen, options);
John Zulauf69133422020-05-20 14:55:53 -06001211 if (hazard.hazard) return hazard;
1212 }
1213 return HazardResult();
1214}
1215
John Zulauf540266b2020-04-06 18:54:53 -06001216HazardResult AccessContext::DetectHazard(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
1217 const VkImageSubresourceLayers &subresource, const VkOffset3D &offset,
1218 const VkExtent3D &extent) const {
John Zulauf5c5e88d2019-12-26 11:22:02 -07001219 VkImageSubresourceRange subresource_range = {subresource.aspectMask, subresource.mipLevel, 1, subresource.baseArrayLayer,
1220 subresource.layerCount};
John Zulauf1507ee42020-05-18 11:33:09 -06001221 return DetectHazard(image, current_usage, subresource_range, offset, extent);
1222}
1223
1224HazardResult AccessContext::DetectHazard(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
1225 const VkImageSubresourceRange &subresource_range, const VkOffset3D &offset,
1226 const VkExtent3D &extent) const {
John Zulauf69133422020-05-20 14:55:53 -06001227 HazardDetector detector(current_usage);
1228 return DetectHazard(detector, image, subresource_range, offset, extent, DetectOptions::kDetectAll);
1229}
1230
1231HazardResult AccessContext::DetectHazard(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
John Zulauf8e3c3e92021-01-06 11:19:36 -07001232 const VkImageSubresourceRange &subresource_range, SyncOrdering ordering_rule,
John Zulauf69133422020-05-20 14:55:53 -06001233 const VkOffset3D &offset, const VkExtent3D &extent) const {
John Zulauf8e3c3e92021-01-06 11:19:36 -07001234 HazardDetectorWithOrdering detector(current_usage, ordering_rule);
John Zulauf69133422020-05-20 14:55:53 -06001235 return DetectHazard(detector, image, subresource_range, offset, extent, DetectOptions::kDetectAll);
John Zulauf9cb530d2019-09-30 14:14:10 -06001236}
1237
John Zulaufb027cdb2020-05-21 14:25:22 -06001238// Some common code for looking at attachments, if there's anything wrong, we return no hazard, core validation
1239// should have reported the issue regarding an invalid attachment entry
1240HazardResult AccessContext::DetectHazard(const IMAGE_VIEW_STATE *view, SyncStageAccessIndex current_usage,
John Zulauf8e3c3e92021-01-06 11:19:36 -07001241 SyncOrdering ordering_rule, const VkOffset3D &offset, const VkExtent3D &extent,
John Zulaufb027cdb2020-05-21 14:25:22 -06001242 VkImageAspectFlags aspect_mask) const {
1243 if (view != nullptr) {
1244 const IMAGE_STATE *image = view->image_state.get();
1245 if (image != nullptr) {
1246 auto *detect_range = &view->normalized_subresource_range;
1247 VkImageSubresourceRange masked_range;
1248 if (aspect_mask) { // If present and non-zero, restrict the normalized range to aspects present in aspect_mask
1249 masked_range = view->normalized_subresource_range;
1250 masked_range.aspectMask = aspect_mask & masked_range.aspectMask;
1251 detect_range = &masked_range;
1252 }
1253
1254 // NOTE: The range encoding code is not robust to invalid ranges, so we protect it from our change
1255 if (detect_range->aspectMask) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07001256 return DetectHazard(*image, current_usage, *detect_range, ordering_rule, offset, extent);
John Zulaufb027cdb2020-05-21 14:25:22 -06001257 }
1258 }
1259 }
1260 return HazardResult();
1261}
John Zulauf43cc7462020-12-03 12:33:12 -07001262
John Zulauf3d84f1b2020-03-09 13:33:25 -06001263class BarrierHazardDetector {
1264 public:
1265 BarrierHazardDetector(SyncStageAccessIndex usage_index, VkPipelineStageFlags src_exec_scope,
1266 SyncStageAccessFlags src_access_scope)
1267 : usage_index_(usage_index), src_exec_scope_(src_exec_scope), src_access_scope_(src_access_scope) {}
1268
John Zulauf5f13a792020-03-10 07:31:21 -06001269 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
1270 return pos->second.DetectBarrierHazard(usage_index_, src_exec_scope_, src_access_scope_);
John Zulauf0cb5be22020-01-23 12:18:22 -07001271 }
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07001272 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, const ResourceUsageTag &start_tag) const {
John Zulauf3d84f1b2020-03-09 13:33:25 -06001273 // Async barrier hazard detection can use the same path as the usage index is not IsRead, but is IsWrite
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07001274 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001275 }
1276
1277 private:
1278 SyncStageAccessIndex usage_index_;
1279 VkPipelineStageFlags src_exec_scope_;
1280 SyncStageAccessFlags src_access_scope_;
1281};
1282
John Zulauf4a6105a2020-11-17 15:11:05 -07001283class EventBarrierHazardDetector {
1284 public:
1285 EventBarrierHazardDetector(SyncStageAccessIndex usage_index, VkPipelineStageFlags src_exec_scope,
1286 SyncStageAccessFlags src_access_scope, const SyncEventState::ScopeMap &event_scope,
1287 const ResourceUsageTag &scope_tag)
1288 : usage_index_(usage_index),
1289 src_exec_scope_(src_exec_scope),
1290 src_access_scope_(src_access_scope),
1291 event_scope_(event_scope),
1292 scope_pos_(event_scope.cbegin()),
1293 scope_end_(event_scope.cend()),
1294 scope_tag_(scope_tag) {}
1295
1296 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
1297 // TODO NOTE: This is almost the slowest way to do this... need to intelligently walk this...
1298 // Need to find a more efficient sync, since we know pos->first is strictly increasing call to call
1299 // NOTE: "cached_lower_bound_impl" with upgrades could do this.
1300 if (scope_pos_ == scope_end_) return HazardResult();
1301 if (!scope_pos_->first.intersects(pos->first)) {
1302 event_scope_.lower_bound(pos->first);
1303 if ((scope_pos_ == scope_end_) || !scope_pos_->first.intersects(pos->first)) return HazardResult();
1304 }
1305
1306 // Some portion of this pos is in the event_scope, so check for a barrier hazard
1307 return pos->second.DetectBarrierHazard(usage_index_, src_exec_scope_, src_access_scope_, scope_tag_);
1308 }
1309 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, const ResourceUsageTag &start_tag) const {
1310 // Async barrier hazard detection can use the same path as the usage index is not IsRead, but is IsWrite
1311 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
1312 }
1313
1314 private:
1315 SyncStageAccessIndex usage_index_;
1316 VkPipelineStageFlags src_exec_scope_;
1317 SyncStageAccessFlags src_access_scope_;
1318 const SyncEventState::ScopeMap &event_scope_;
1319 SyncEventState::ScopeMap::const_iterator scope_pos_;
1320 SyncEventState::ScopeMap::const_iterator scope_end_;
1321 const ResourceUsageTag &scope_tag_;
1322};
1323
1324HazardResult AccessContext::DetectImageBarrierHazard(const IMAGE_STATE &image, VkPipelineStageFlags src_exec_scope,
1325 const SyncStageAccessFlags &src_access_scope,
1326 const VkImageSubresourceRange &subresource_range,
1327 const SyncEventState &sync_event, DetectOptions options) const {
1328 // It's not particularly DRY to get the address type in this function as well as lower down, but we have to select the
1329 // first access scope map to use, and there's no easy way to plumb it in below.
1330 const auto address_type = ImageAddressType(image);
1331 const auto &event_scope = sync_event.FirstScope(address_type);
1332
1333 EventBarrierHazardDetector detector(SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION, src_exec_scope, src_access_scope,
1334 event_scope, sync_event.first_scope_tag);
1335 VkOffset3D zero_offset = {0, 0, 0};
1336 return DetectHazard(detector, image, subresource_range, zero_offset, image.createInfo.extent, options);
1337}
1338
John Zulauf16adfc92020-04-08 10:28:33 -06001339HazardResult AccessContext::DetectImageBarrierHazard(const IMAGE_STATE &image, VkPipelineStageFlags src_exec_scope,
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07001340 const SyncStageAccessFlags &src_access_scope,
John Zulauf355e49b2020-04-24 15:11:15 -06001341 const VkImageSubresourceRange &subresource_range,
John Zulauf43cc7462020-12-03 12:33:12 -07001342 const DetectOptions options) const {
John Zulauf69133422020-05-20 14:55:53 -06001343 BarrierHazardDetector detector(SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION, src_exec_scope, src_access_scope);
1344 VkOffset3D zero_offset = {0, 0, 0};
1345 return DetectHazard(detector, image, subresource_range, zero_offset, image.createInfo.extent, options);
John Zulauf0cb5be22020-01-23 12:18:22 -07001346}
1347
John Zulauf355e49b2020-04-24 15:11:15 -06001348HazardResult AccessContext::DetectImageBarrierHazard(const IMAGE_STATE &image, VkPipelineStageFlags src_exec_scope,
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07001349 const SyncStageAccessFlags &src_stage_accesses,
John Zulauf355e49b2020-04-24 15:11:15 -06001350 const VkImageMemoryBarrier &barrier) const {
1351 auto subresource_range = NormalizeSubresourceRange(image.createInfo, barrier.subresourceRange);
1352 const auto src_access_scope = SyncStageAccess::AccessScope(src_stage_accesses, barrier.srcAccessMask);
1353 return DetectImageBarrierHazard(image, src_exec_scope, src_access_scope, subresource_range, kDetectAll);
1354}
1355
John Zulauf9cb530d2019-09-30 14:14:10 -06001356template <typename Flags, typename Map>
1357SyncStageAccessFlags AccessScopeImpl(Flags flag_mask, const Map &map) {
1358 SyncStageAccessFlags scope = 0;
1359 for (const auto &bit_scope : map) {
1360 if (flag_mask < bit_scope.first) break;
1361
1362 if (flag_mask & bit_scope.first) {
1363 scope |= bit_scope.second;
1364 }
1365 }
1366 return scope;
1367}
1368
1369SyncStageAccessFlags SyncStageAccess::AccessScopeByStage(VkPipelineStageFlags stages) {
1370 return AccessScopeImpl(stages, syncStageAccessMaskByStageBit);
1371}
1372
1373SyncStageAccessFlags SyncStageAccess::AccessScopeByAccess(VkAccessFlags accesses) {
1374 return AccessScopeImpl(accesses, syncStageAccessMaskByAccessBit);
1375}
1376
1377// Getting from stage mask and access mask to stage/acess masks is something we need to be good at...
1378SyncStageAccessFlags SyncStageAccess::AccessScope(VkPipelineStageFlags stages, VkAccessFlags accesses) {
John Zulauf5f13a792020-03-10 07:31:21 -06001379 // The access scope is the intersection of all stage/access types possible for the enabled stages and the enables
1380 // accesses (after doing a couple factoring of common terms the union of stage/access intersections is the intersections
1381 // of the union of all stage/access types for all the stages and the same unions for the access mask...
John Zulauf9cb530d2019-09-30 14:14:10 -06001382 return AccessScopeByStage(stages) & AccessScopeByAccess(accesses);
1383}
1384
1385template <typename Action>
John Zulauf5c5e88d2019-12-26 11:22:02 -07001386void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const ResourceAccessRange &range, const Action &action) {
John Zulauf7635de32020-05-29 17:14:15 -06001387 // TODO: Optimization for operations that do a pure overwrite (i.e. WRITE usages which rewrite the state, vs READ usages
1388 // that do incrementalupdates
John Zulauf4a6105a2020-11-17 15:11:05 -07001389 assert(accesses);
John Zulauf9cb530d2019-09-30 14:14:10 -06001390 auto pos = accesses->lower_bound(range);
1391 if (pos == accesses->end() || !pos->first.intersects(range)) {
1392 // The range is empty, fill it with a default value.
1393 pos = action.Infill(accesses, pos, range);
1394 } else if (range.begin < pos->first.begin) {
1395 // Leading empty space, infill
John Zulauf5c5e88d2019-12-26 11:22:02 -07001396 pos = action.Infill(accesses, pos, ResourceAccessRange(range.begin, pos->first.begin));
John Zulauf9cb530d2019-09-30 14:14:10 -06001397 } else if (pos->first.begin < range.begin) {
1398 // Trim the beginning if needed
1399 pos = accesses->split(pos, range.begin, sparse_container::split_op_keep_both());
1400 ++pos;
1401 }
1402
1403 const auto the_end = accesses->end();
1404 while ((pos != the_end) && pos->first.intersects(range)) {
1405 if (pos->first.end > range.end) {
1406 pos = accesses->split(pos, range.end, sparse_container::split_op_keep_both());
1407 }
1408
1409 pos = action(accesses, pos);
1410 if (pos == the_end) break;
1411
1412 auto next = pos;
1413 ++next;
1414 if ((pos->first.end < range.end) && (next != the_end) && !next->first.is_subsequent_to(pos->first)) {
1415 // Need to infill if next is disjoint
1416 VkDeviceSize limit = (next == the_end) ? range.end : std::min(range.end, next->first.begin);
John Zulauf5c5e88d2019-12-26 11:22:02 -07001417 ResourceAccessRange new_range(pos->first.end, limit);
John Zulauf9cb530d2019-09-30 14:14:10 -06001418 next = action.Infill(accesses, next, new_range);
1419 }
1420 pos = next;
1421 }
1422}
John Zulauf4a6105a2020-11-17 15:11:05 -07001423template <typename Action, typename RangeGen>
1424void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const Action &action, RangeGen *range_gen_arg) {
1425 assert(range_gen_arg);
1426 auto &range_gen = *range_gen_arg; // Non-const references must be * by style requirement but deref-ing * iterator is a pain
1427 for (; range_gen->non_empty(); ++range_gen) {
1428 UpdateMemoryAccessState(accesses, *range_gen, action);
1429 }
1430}
John Zulauf9cb530d2019-09-30 14:14:10 -06001431
1432struct UpdateMemoryAccessStateFunctor {
John Zulauf5c5e88d2019-12-26 11:22:02 -07001433 using Iterator = ResourceAccessRangeMap::iterator;
1434 Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const {
John Zulauf5f13a792020-03-10 07:31:21 -06001435 // this is only called on gaps, and never returns a gap.
1436 ResourceAccessState default_state;
John Zulauf16adfc92020-04-08 10:28:33 -06001437 context.ResolvePreviousAccess(type, range, accesses, &default_state);
John Zulauf5f13a792020-03-10 07:31:21 -06001438 return accesses->lower_bound(range);
John Zulauf9cb530d2019-09-30 14:14:10 -06001439 }
John Zulauf5f13a792020-03-10 07:31:21 -06001440
John Zulauf5c5e88d2019-12-26 11:22:02 -07001441 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -06001442 auto &access_state = pos->second;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001443 access_state.Update(usage, ordering_rule, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06001444 return pos;
1445 }
1446
John Zulauf43cc7462020-12-03 12:33:12 -07001447 UpdateMemoryAccessStateFunctor(AccessAddressType type_, const AccessContext &context_, SyncStageAccessIndex usage_,
John Zulauf8e3c3e92021-01-06 11:19:36 -07001448 SyncOrdering ordering_rule_, const ResourceUsageTag &tag_)
1449 : type(type_), context(context_), usage(usage_), ordering_rule(ordering_rule_), tag(tag_) {}
John Zulauf43cc7462020-12-03 12:33:12 -07001450 const AccessAddressType type;
John Zulauf540266b2020-04-06 18:54:53 -06001451 const AccessContext &context;
John Zulauf16adfc92020-04-08 10:28:33 -06001452 const SyncStageAccessIndex usage;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001453 const SyncOrdering ordering_rule;
John Zulauf9cb530d2019-09-30 14:14:10 -06001454 const ResourceUsageTag &tag;
1455};
1456
John Zulauf4a6105a2020-11-17 15:11:05 -07001457// The barrier operation for pipeline and subpass dependencies`
John Zulauf1e331ec2020-12-04 18:29:38 -07001458struct PipelineBarrierOp {
1459 SyncBarrier barrier;
1460 bool layout_transition;
1461 PipelineBarrierOp(const SyncBarrier &barrier_, bool layout_transition_)
1462 : barrier(barrier_), layout_transition(layout_transition_) {}
1463 PipelineBarrierOp() = default;
1464 void operator()(ResourceAccessState *access_state) const { access_state->ApplyBarrier(barrier, layout_transition); }
1465};
John Zulauf4a6105a2020-11-17 15:11:05 -07001466// The barrier operation for wait events
1467struct WaitEventBarrierOp {
1468 const ResourceUsageTag *scope_tag;
1469 SyncBarrier barrier;
1470 bool layout_transition;
1471 WaitEventBarrierOp(const ResourceUsageTag &scope_tag_, const SyncBarrier &barrier_, bool layout_transition_)
1472 : scope_tag(&scope_tag_), barrier(barrier_), layout_transition(layout_transition_) {}
1473 WaitEventBarrierOp() = default;
1474 void operator()(ResourceAccessState *access_state) const {
1475 assert(scope_tag); // Not valid to have a non-scope op executed, default construct included for std::vector support
1476 access_state->ApplyBarrier(*scope_tag, barrier, layout_transition);
1477 }
1478};
John Zulauf1e331ec2020-12-04 18:29:38 -07001479
John Zulauf4a6105a2020-11-17 15:11:05 -07001480// This functor applies a collection of barriers, updating the "pending state" in each touched memory range, and optionally
1481// resolves the pending state. Suitable for processing Global memory barriers, or Subpass Barriers when the "final" barrier
1482// of a collection is known/present.
John Zulauf1e331ec2020-12-04 18:29:38 -07001483template <typename BarrierOp>
John Zulauf89311b42020-09-29 16:28:47 -06001484class ApplyBarrierOpsFunctor {
1485 public:
John Zulauf5c5e88d2019-12-26 11:22:02 -07001486 using Iterator = ResourceAccessRangeMap::iterator;
1487 inline Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const { return pos; }
John Zulauf9cb530d2019-09-30 14:14:10 -06001488
John Zulauf5c5e88d2019-12-26 11:22:02 -07001489 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -06001490 auto &access_state = pos->second;
John Zulauf1e331ec2020-12-04 18:29:38 -07001491 for (const auto &op : barrier_ops_) {
1492 op(&access_state);
John Zulauf89311b42020-09-29 16:28:47 -06001493 }
John Zulauf9cb530d2019-09-30 14:14:10 -06001494
John Zulauf89311b42020-09-29 16:28:47 -06001495 if (resolve_) {
1496 // If this is the last (or only) batch, we can do the pending resolve as the last step in this operation to avoid
1497 // another walk
1498 access_state.ApplyPendingBarriers(tag_);
John Zulauf9cb530d2019-09-30 14:14:10 -06001499 }
1500 return pos;
1501 }
1502
John Zulauf89311b42020-09-29 16:28:47 -06001503 // A valid tag is required IFF layout_transition is true, as transitions are write ops
John Zulauf1e331ec2020-12-04 18:29:38 -07001504 ApplyBarrierOpsFunctor(bool resolve, const std::vector<BarrierOp> &barrier_ops, const ResourceUsageTag &tag)
1505 : resolve_(resolve), barrier_ops_(barrier_ops), tag_(tag) {}
John Zulauf89311b42020-09-29 16:28:47 -06001506
1507 private:
1508 bool resolve_;
John Zulauf1e331ec2020-12-04 18:29:38 -07001509 const std::vector<BarrierOp> &barrier_ops_;
1510 const ResourceUsageTag &tag_;
1511};
1512
John Zulauf4a6105a2020-11-17 15:11:05 -07001513// This functor applies a single barrier, updating the "pending state" in each touched memory range, but does not
1514// resolve the pendinging state. Suitable for processing Image and Buffer barriers from PipelineBarriers or Events
1515template <typename BarrierOp>
1516class ApplyBarrierFunctor {
1517 public:
1518 using Iterator = ResourceAccessRangeMap::iterator;
1519 inline Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const { return pos; }
1520
1521 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
1522 auto &access_state = pos->second;
1523 barrier_op_(&access_state);
1524 return pos;
1525 }
1526
1527 ApplyBarrierFunctor(const BarrierOp &barrier_op) : barrier_op_(barrier_op) {}
1528
1529 private:
1530 const BarrierOp barrier_op_;
1531};
1532
John Zulauf1e331ec2020-12-04 18:29:38 -07001533// This functor resolves the pendinging state.
1534class ResolvePendingBarrierFunctor {
1535 public:
1536 using Iterator = ResourceAccessRangeMap::iterator;
1537 inline Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const { return pos; }
1538
1539 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
1540 auto &access_state = pos->second;
1541 access_state.ApplyPendingBarriers(tag_);
1542 return pos;
1543 }
1544
1545 ResolvePendingBarrierFunctor(const ResourceUsageTag &tag) : tag_(tag) {}
1546
1547 private:
John Zulauf89311b42020-09-29 16:28:47 -06001548 const ResourceUsageTag &tag_;
John Zulauf9cb530d2019-09-30 14:14:10 -06001549};
1550
John Zulauf8e3c3e92021-01-06 11:19:36 -07001551void AccessContext::UpdateAccessState(AccessAddressType type, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
1552 const ResourceAccessRange &range, const ResourceUsageTag &tag) {
1553 UpdateMemoryAccessStateFunctor action(type, *this, current_usage, ordering_rule, tag);
John Zulauf16adfc92020-04-08 10:28:33 -06001554 UpdateMemoryAccessState(&GetAccessStateMap(type), range, action);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001555}
1556
John Zulauf8e3c3e92021-01-06 11:19:36 -07001557void AccessContext::UpdateAccessState(const BUFFER_STATE &buffer, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf355e49b2020-04-24 15:11:15 -06001558 const ResourceAccessRange &range, const ResourceUsageTag &tag) {
John Zulauf16adfc92020-04-08 10:28:33 -06001559 if (!SimpleBinding(buffer)) return;
1560 const auto base_address = ResourceBaseAddress(buffer);
John Zulauf8e3c3e92021-01-06 11:19:36 -07001561 UpdateAccessState(AccessAddressType::kLinear, current_usage, ordering_rule, range + base_address, tag);
John Zulauf16adfc92020-04-08 10:28:33 -06001562}
John Zulauf355e49b2020-04-24 15:11:15 -06001563
John Zulauf8e3c3e92021-01-06 11:19:36 -07001564void AccessContext::UpdateAccessState(const IMAGE_STATE &image, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf355e49b2020-04-24 15:11:15 -06001565 const VkImageSubresourceRange &subresource_range, const VkOffset3D &offset,
John Zulauf540266b2020-04-06 18:54:53 -06001566 const VkExtent3D &extent, const ResourceUsageTag &tag) {
John Zulauf16adfc92020-04-08 10:28:33 -06001567 if (!SimpleBinding(image)) return;
John Zulauf16adfc92020-04-08 10:28:33 -06001568 const auto base_address = ResourceBaseAddress(image);
John Zulauf150e5332020-12-03 08:52:52 -07001569 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, offset, extent,
1570 base_address);
1571 const auto address_type = ImageAddressType(image);
John Zulauf8e3c3e92021-01-06 11:19:36 -07001572 UpdateMemoryAccessStateFunctor action(address_type, *this, current_usage, ordering_rule, tag);
John Zulauf5f13a792020-03-10 07:31:21 -06001573 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf150e5332020-12-03 08:52:52 -07001574 UpdateMemoryAccessState(&GetAccessStateMap(address_type), *range_gen, action);
John Zulauf5f13a792020-03-10 07:31:21 -06001575 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06001576}
John Zulauf8e3c3e92021-01-06 11:19:36 -07001577void AccessContext::UpdateAccessState(const IMAGE_VIEW_STATE *view, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
1578 const VkOffset3D &offset, const VkExtent3D &extent, VkImageAspectFlags aspect_mask,
1579 const ResourceUsageTag &tag) {
John Zulauf7635de32020-05-29 17:14:15 -06001580 if (view != nullptr) {
1581 const IMAGE_STATE *image = view->image_state.get();
1582 if (image != nullptr) {
1583 auto *update_range = &view->normalized_subresource_range;
1584 VkImageSubresourceRange masked_range;
1585 if (aspect_mask) { // If present and non-zero, restrict the normalized range to aspects present in aspect_mask
1586 masked_range = view->normalized_subresource_range;
1587 masked_range.aspectMask = aspect_mask & masked_range.aspectMask;
1588 update_range = &masked_range;
1589 }
John Zulauf8e3c3e92021-01-06 11:19:36 -07001590 UpdateAccessState(*image, current_usage, ordering_rule, *update_range, offset, extent, tag);
John Zulauf7635de32020-05-29 17:14:15 -06001591 }
1592 }
1593}
John Zulauf3d84f1b2020-03-09 13:33:25 -06001594
John Zulauf8e3c3e92021-01-06 11:19:36 -07001595void AccessContext::UpdateAccessState(const IMAGE_STATE &image, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf355e49b2020-04-24 15:11:15 -06001596 const VkImageSubresourceLayers &subresource, const VkOffset3D &offset,
1597 const VkExtent3D &extent, const ResourceUsageTag &tag) {
John Zulauf355e49b2020-04-24 15:11:15 -06001598 VkImageSubresourceRange subresource_range = {subresource.aspectMask, subresource.mipLevel, 1, subresource.baseArrayLayer,
1599 subresource.layerCount};
John Zulauf8e3c3e92021-01-06 11:19:36 -07001600 UpdateAccessState(image, current_usage, ordering_rule, subresource_range, offset, extent, tag);
John Zulauf355e49b2020-04-24 15:11:15 -06001601}
1602
John Zulauf540266b2020-04-06 18:54:53 -06001603template <typename Action>
John Zulauf89311b42020-09-29 16:28:47 -06001604void AccessContext::UpdateResourceAccess(const BUFFER_STATE &buffer, const ResourceAccessRange &range, const Action action) {
John Zulauf16adfc92020-04-08 10:28:33 -06001605 if (!SimpleBinding(buffer)) return;
1606 const auto base_address = ResourceBaseAddress(buffer);
John Zulauf43cc7462020-12-03 12:33:12 -07001607 UpdateMemoryAccessState(&GetAccessStateMap(AccessAddressType::kLinear), (range + base_address), action);
John Zulauf540266b2020-04-06 18:54:53 -06001608}
1609
1610template <typename Action>
John Zulauf89311b42020-09-29 16:28:47 -06001611void AccessContext::UpdateResourceAccess(const IMAGE_STATE &image, const VkImageSubresourceRange &subresource_range,
1612 const Action action) {
John Zulauf16adfc92020-04-08 10:28:33 -06001613 if (!SimpleBinding(image)) return;
1614 const auto address_type = ImageAddressType(image);
1615 auto *accesses = &GetAccessStateMap(address_type);
John Zulauf540266b2020-04-06 18:54:53 -06001616
John Zulauf16adfc92020-04-08 10:28:33 -06001617 const auto base_address = ResourceBaseAddress(image);
John Zulauf150e5332020-12-03 08:52:52 -07001618 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, {0, 0, 0},
1619 image.createInfo.extent, base_address);
1620
John Zulauf540266b2020-04-06 18:54:53 -06001621 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf150e5332020-12-03 08:52:52 -07001622 UpdateMemoryAccessState(accesses, *range_gen, action);
John Zulauf540266b2020-04-06 18:54:53 -06001623 }
1624}
1625
John Zulauf7635de32020-05-29 17:14:15 -06001626void AccessContext::UpdateAttachmentResolveAccess(const RENDER_PASS_STATE &rp_state, const VkRect2D &render_area,
1627 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views, uint32_t subpass,
1628 const ResourceUsageTag &tag) {
1629 UpdateStateResolveAction update(*this, tag);
1630 ResolveOperation(update, rp_state, render_area, attachment_views, subpass);
1631}
1632
John Zulaufaff20662020-06-01 14:07:58 -06001633void AccessContext::UpdateAttachmentStoreAccess(const RENDER_PASS_STATE &rp_state, const VkRect2D &render_area,
1634 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views, uint32_t subpass,
1635 const ResourceUsageTag &tag) {
1636 const auto *attachment_ci = rp_state.createInfo.pAttachments;
1637 VkExtent3D extent = CastTo3D(render_area.extent);
1638 VkOffset3D offset = CastTo3D(render_area.offset);
1639
1640 for (uint32_t i = 0; i < rp_state.createInfo.attachmentCount; i++) {
1641 if (rp_state.attachment_last_subpass[i] == subpass) {
1642 if (attachment_views[i] == nullptr) continue; // UNUSED
1643 const auto &view = *attachment_views[i];
1644 const IMAGE_STATE *image = view.image_state.get();
1645 if (image == nullptr) continue;
1646
1647 const auto &ci = attachment_ci[i];
1648 const bool has_depth = FormatHasDepth(ci.format);
1649 const bool has_stencil = FormatHasStencil(ci.format);
1650 const bool is_color = !(has_depth || has_stencil);
1651 const bool store_op_stores = ci.storeOp != VK_ATTACHMENT_STORE_OP_NONE_QCOM;
1652
1653 if (is_color && store_op_stores) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07001654 UpdateAccessState(*image, SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kRaster,
1655 view.normalized_subresource_range, offset, extent, tag);
John Zulaufaff20662020-06-01 14:07:58 -06001656 } else {
1657 auto update_range = view.normalized_subresource_range;
1658 if (has_depth && store_op_stores) {
1659 update_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001660 UpdateAccessState(*image, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, SyncOrdering::kRaster,
1661 update_range, offset, extent, tag);
John Zulaufaff20662020-06-01 14:07:58 -06001662 }
1663 const bool stencil_op_stores = ci.stencilStoreOp != VK_ATTACHMENT_STORE_OP_NONE_QCOM;
1664 if (has_stencil && stencil_op_stores) {
1665 update_range.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001666 UpdateAccessState(*image, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, SyncOrdering::kRaster,
1667 update_range, offset, extent, tag);
John Zulaufaff20662020-06-01 14:07:58 -06001668 }
1669 }
1670 }
1671 }
1672}
1673
John Zulauf540266b2020-04-06 18:54:53 -06001674template <typename Action>
1675void AccessContext::ApplyGlobalBarriers(const Action &barrier_action) {
1676 // Note: Barriers do *not* cross context boundaries, applying to accessess within.... (at least for renderpass subpasses)
John Zulauf16adfc92020-04-08 10:28:33 -06001677 for (const auto address_type : kAddressTypes) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001678 UpdateMemoryAccessState(&GetAccessStateMap(address_type), kFullRange, barrier_action);
John Zulauf540266b2020-04-06 18:54:53 -06001679 }
1680}
1681
1682void AccessContext::ResolveChildContexts(const std::vector<AccessContext> &contexts) {
John Zulauf540266b2020-04-06 18:54:53 -06001683 for (uint32_t subpass_index = 0; subpass_index < contexts.size(); subpass_index++) {
1684 auto &context = contexts[subpass_index];
John Zulaufb02c1eb2020-10-06 16:33:36 -06001685 ApplyTrackbackBarriersAction barrier_action(context.GetDstExternalTrackBack().barriers);
John Zulauf16adfc92020-04-08 10:28:33 -06001686 for (const auto address_type : kAddressTypes) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001687 context.ResolveAccessRange(address_type, kFullRange, barrier_action, &GetAccessStateMap(address_type), nullptr, false);
John Zulauf540266b2020-04-06 18:54:53 -06001688 }
1689 }
1690}
1691
John Zulauf355e49b2020-04-24 15:11:15 -06001692// Suitable only for *subpass* access contexts
John Zulauf7635de32020-05-29 17:14:15 -06001693HazardResult AccessContext::DetectSubpassTransitionHazard(const TrackBack &track_back, const IMAGE_VIEW_STATE *attach_view) const {
John Zulauf355e49b2020-04-24 15:11:15 -06001694 if (!attach_view) return HazardResult();
1695 const auto image_state = attach_view->image_state.get();
1696 if (!image_state) return HazardResult();
1697
John Zulauf355e49b2020-04-24 15:11:15 -06001698 // We should never ask for a transition from a context we don't have
John Zulauf7635de32020-05-29 17:14:15 -06001699 assert(track_back.context);
John Zulauf355e49b2020-04-24 15:11:15 -06001700
1701 // Do the detection against the specific prior context independent of other contexts. (Synchronous only)
John Zulaufa0a98292020-09-18 09:30:10 -06001702 // Hazard detection for the transition can be against the merged of the barriers (it only uses src_...)
1703 const auto merged_barrier = MergeBarriers(track_back.barriers);
1704 HazardResult hazard =
1705 track_back.context->DetectImageBarrierHazard(*image_state, merged_barrier.src_exec_scope, merged_barrier.src_access_scope,
1706 attach_view->normalized_subresource_range, kDetectPrevious);
John Zulauf355e49b2020-04-24 15:11:15 -06001707 if (!hazard.hazard) {
1708 // The Async hazard check is against the current context's async set.
John Zulaufa0a98292020-09-18 09:30:10 -06001709 hazard = DetectImageBarrierHazard(*image_state, merged_barrier.src_exec_scope, merged_barrier.src_access_scope,
John Zulauf355e49b2020-04-24 15:11:15 -06001710 attach_view->normalized_subresource_range, kDetectAsync);
1711 }
John Zulaufa0a98292020-09-18 09:30:10 -06001712
John Zulauf355e49b2020-04-24 15:11:15 -06001713 return hazard;
1714}
1715
John Zulaufb02c1eb2020-10-06 16:33:36 -06001716void AccessContext::RecordLayoutTransitions(const RENDER_PASS_STATE &rp_state, uint32_t subpass,
1717 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views,
1718 const ResourceUsageTag &tag) {
1719 const auto &transitions = rp_state.subpass_transitions[subpass];
John Zulauf646cc292020-10-23 09:16:45 -06001720 const ResourceAccessState empty_infill;
John Zulaufb02c1eb2020-10-06 16:33:36 -06001721 for (const auto &transition : transitions) {
1722 const auto prev_pass = transition.prev_pass;
1723 const auto attachment_view = attachment_views[transition.attachment];
1724 if (!attachment_view) continue;
1725 const auto *image = attachment_view->image_state.get();
1726 if (!image) continue;
1727 if (!SimpleBinding(*image)) continue;
1728
1729 const auto *trackback = GetTrackBackFromSubpass(prev_pass);
1730 assert(trackback);
1731
1732 // Import the attachments into the current context
1733 const auto *prev_context = trackback->context;
1734 assert(prev_context);
1735 const auto address_type = ImageAddressType(*image);
1736 auto &target_map = GetAccessStateMap(address_type);
1737 ApplySubpassTransitionBarriersAction barrier_action(trackback->barriers);
1738 prev_context->ResolveAccessRange(*image, attachment_view->normalized_subresource_range, barrier_action, address_type,
John Zulauf646cc292020-10-23 09:16:45 -06001739 &target_map, &empty_infill);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001740 }
1741
John Zulauf86356ca2020-10-19 11:46:41 -06001742 // If there were no transitions skip this global map walk
1743 if (transitions.size()) {
John Zulauf1e331ec2020-12-04 18:29:38 -07001744 ResolvePendingBarrierFunctor apply_pending_action(tag);
John Zulauf86356ca2020-10-19 11:46:41 -06001745 ApplyGlobalBarriers(apply_pending_action);
1746 }
John Zulaufb02c1eb2020-10-06 16:33:36 -06001747}
Jeremy Gebben9893daf2021-01-04 10:40:50 -07001748
1749void CommandBufferAccessContext::ApplyBufferBarriers(const SyncEventState &sync_event, const SyncExecScope &dst,
1750 uint32_t barrier_count, const VkBufferMemoryBarrier *barriers) {
John Zulauf4a6105a2020-11-17 15:11:05 -07001751 const auto &scope_tag = sync_event.first_scope_tag;
1752 auto *access_context = GetCurrentAccessContext();
1753 const auto address_type = AccessAddressType::kLinear;
1754 for (uint32_t index = 0; index < barrier_count; index++) {
1755 auto barrier = barriers[index]; // barrier is a copy
1756 const auto *buffer = sync_state_->Get<BUFFER_STATE>(barrier.buffer);
1757 if (!buffer) continue;
1758 const auto base_address = ResourceBaseAddress(*buffer);
1759 barrier.size = GetBufferWholeSize(*buffer, barrier.offset, barrier.size);
1760 const ResourceAccessRange range = MakeRange(barrier) + base_address;
Jeremy Gebben9893daf2021-01-04 10:40:50 -07001761 const SyncBarrier sync_barrier(barrier, sync_event.scope, dst);
John Zulauf4a6105a2020-11-17 15:11:05 -07001762 const ApplyBarrierFunctor<WaitEventBarrierOp> barrier_action({scope_tag, sync_barrier, false /* layout_transition */});
1763 EventSimpleRangeGenerator filtered_range_gen(sync_event.FirstScope(address_type), range);
1764 UpdateMemoryAccessState(&(access_context->GetAccessStateMap(address_type)), barrier_action, &filtered_range_gen);
1765 }
1766}
1767
Jeremy Gebben9893daf2021-01-04 10:40:50 -07001768void CommandBufferAccessContext::ApplyGlobalBarriers(SyncEventState &sync_event, const SyncExecScope &dst,
1769 uint32_t memory_barrier_count, const VkMemoryBarrier *pMemoryBarriers,
1770 const ResourceUsageTag &tag) {
John Zulauf4a6105a2020-11-17 15:11:05 -07001771 std::vector<WaitEventBarrierOp> barrier_ops;
1772 barrier_ops.reserve(std::min<uint32_t>(memory_barrier_count, 1));
1773 const auto &scope_tag = sync_event.first_scope_tag;
1774 auto *access_context = GetCurrentAccessContext();
1775 for (uint32_t barrier_index = 0; barrier_index < memory_barrier_count; barrier_index++) {
1776 const auto &barrier = pMemoryBarriers[barrier_index];
Jeremy Gebben9893daf2021-01-04 10:40:50 -07001777 SyncBarrier sync_barrier(barrier, sync_event.scope, dst);
John Zulauf4a6105a2020-11-17 15:11:05 -07001778 barrier_ops.emplace_back(scope_tag, sync_barrier, false);
1779 }
1780 if (0 == memory_barrier_count) {
1781 // If there are no global memory barriers, force an exec barrier
Jeremy Gebben9893daf2021-01-04 10:40:50 -07001782 barrier_ops.emplace_back(scope_tag, SyncBarrier(sync_event.scope, dst), false);
John Zulauf4a6105a2020-11-17 15:11:05 -07001783 }
1784 ApplyBarrierOpsFunctor<WaitEventBarrierOp> barriers_functor(false /* don't resolve */, barrier_ops, tag);
1785 for (const auto address_type : kAddressTypes) {
1786 EventSimpleRangeGenerator filtered_range_gen(sync_event.FirstScope(address_type), kFullRange);
1787 UpdateMemoryAccessState(&(access_context->GetAccessStateMap(address_type)), barriers_functor, &filtered_range_gen);
1788 }
1789
1790 // Apply the global barrier to the event itself (for race condition tracking)
1791 // Events don't happen at a stage, so we need to store the unexpanded ALL_COMMANDS if set for inter-event-calls
Jeremy Gebben9893daf2021-01-04 10:40:50 -07001792 sync_event.barriers = dst.mask_param & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
1793 sync_event.barriers |= dst.exec_scope;
John Zulauf4a6105a2020-11-17 15:11:05 -07001794}
1795
Jeremy Gebben9893daf2021-01-04 10:40:50 -07001796void CommandBufferAccessContext::ApplyGlobalBarriersToEvents(const SyncExecScope &src, const SyncExecScope &dst) {
1797 const bool all_commands_bit = 0 != (src.mask_param & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
John Zulauf4a6105a2020-11-17 15:11:05 -07001798 for (auto &event_pair : event_state_) {
1799 assert(event_pair.second); // Shouldn't be storing empty
1800 auto &sync_event = *event_pair.second;
1801 // Events don't happen at a stage, so we need to check and store the unexpanded ALL_COMMANDS if set for inter-event-calls
Jeremy Gebben9893daf2021-01-04 10:40:50 -07001802 if ((sync_event.barriers & src.exec_scope) || all_commands_bit) {
1803 sync_event.barriers |= dst.exec_scope;
1804 sync_event.barriers |= dst.mask_param & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
John Zulauf4a6105a2020-11-17 15:11:05 -07001805 }
1806 }
1807}
1808
Jeremy Gebben9893daf2021-01-04 10:40:50 -07001809void CommandBufferAccessContext::ApplyImageBarriers(const SyncEventState &sync_event, const SyncExecScope &dst,
1810 uint32_t barrier_count, const VkImageMemoryBarrier *barriers,
1811 const ResourceUsageTag &tag) {
John Zulauf4a6105a2020-11-17 15:11:05 -07001812 const auto &scope_tag = sync_event.first_scope_tag;
1813 auto *access_context = GetCurrentAccessContext();
1814 for (uint32_t index = 0; index < barrier_count; index++) {
1815 const auto &barrier = barriers[index];
1816 const auto *image = sync_state_->Get<IMAGE_STATE>(barrier.image);
1817 if (!image) continue;
1818 auto subresource_range = NormalizeSubresourceRange(image->createInfo, barrier.subresourceRange);
1819 bool layout_transition = barrier.oldLayout != barrier.newLayout;
Jeremy Gebben9893daf2021-01-04 10:40:50 -07001820 const SyncBarrier sync_barrier(barrier, sync_event.scope, dst);
John Zulauf4a6105a2020-11-17 15:11:05 -07001821 const ApplyBarrierFunctor<WaitEventBarrierOp> barrier_action({scope_tag, sync_barrier, layout_transition});
1822 const auto base_address = ResourceBaseAddress(*image);
1823 subresource_adapter::ImageRangeGenerator range_gen(*image->fragment_encoder.get(), subresource_range, {0, 0, 0},
1824 image->createInfo.extent, base_address);
1825 const auto address_type = AccessContext::ImageAddressType(*image);
1826 EventImageRangeGenerator filtered_range_gen(sync_event.FirstScope(address_type), range_gen);
1827 UpdateMemoryAccessState(&(access_context->GetAccessStateMap(address_type)), barrier_action, &filtered_range_gen);
1828 }
1829}
John Zulaufb02c1eb2020-10-06 16:33:36 -06001830
John Zulauf355e49b2020-04-24 15:11:15 -06001831// Class CommandBufferAccessContext: Keep track of resource access state information for a specific command buffer
1832bool CommandBufferAccessContext::ValidateBeginRenderPass(const RENDER_PASS_STATE &rp_state,
1833
1834 const VkRenderPassBeginInfo *pRenderPassBegin,
Mike Schuchardt2df08912020-12-15 16:28:09 -08001835 const VkSubpassBeginInfo *pSubpassBeginInfo, const char *func_name) const {
John Zulauf355e49b2020-04-24 15:11:15 -06001836 // Check if any of the layout transitions are hazardous.... but we don't have the renderpass context to work with, so we
1837 bool skip = false;
John Zulauf355e49b2020-04-24 15:11:15 -06001838
John Zulauf86356ca2020-10-19 11:46:41 -06001839 assert(pRenderPassBegin);
1840 if (nullptr == pRenderPassBegin) return skip;
John Zulauf355e49b2020-04-24 15:11:15 -06001841
John Zulauf86356ca2020-10-19 11:46:41 -06001842 const uint32_t subpass = 0;
John Zulauf355e49b2020-04-24 15:11:15 -06001843
John Zulauf86356ca2020-10-19 11:46:41 -06001844 // Construct the state we can use to validate against... (since validation is const and RecordCmdBeginRenderPass
1845 // hasn't happened yet)
1846 const std::vector<AccessContext> empty_context_vector;
1847 AccessContext temp_context(subpass, queue_flags_, rp_state.subpass_dependencies, empty_context_vector,
1848 const_cast<AccessContext *>(&cb_access_context_));
John Zulauf355e49b2020-04-24 15:11:15 -06001849
John Zulauf86356ca2020-10-19 11:46:41 -06001850 // Create a view list
1851 const auto fb_state = sync_state_->Get<FRAMEBUFFER_STATE>(pRenderPassBegin->framebuffer);
1852 assert(fb_state);
1853 if (nullptr == fb_state) return skip;
1854 // NOTE: Must not use COMMAND_BUFFER_STATE variant of this as RecordCmdBeginRenderPass hasn't run and thus
1855 // the activeRenderPass.* fields haven't been set.
1856 const auto views = sync_state_->GetAttachmentViews(*pRenderPassBegin, *fb_state);
1857
1858 // Validate transitions
John Zulauffaea0ee2021-01-14 14:01:32 -07001859 skip |= temp_context.ValidateLayoutTransitions(*this, rp_state, pRenderPassBegin->renderArea, subpass, views, func_name);
John Zulauf86356ca2020-10-19 11:46:41 -06001860
1861 // Validate load operations if there were no layout transition hazards
1862 if (!skip) {
1863 temp_context.RecordLayoutTransitions(rp_state, subpass, views, kCurrentCommandTag);
John Zulauffaea0ee2021-01-14 14:01:32 -07001864 skip |= temp_context.ValidateLoadOperation(*this, rp_state, pRenderPassBegin->renderArea, subpass, views, func_name);
John Zulauf355e49b2020-04-24 15:11:15 -06001865 }
John Zulauf86356ca2020-10-19 11:46:41 -06001866
John Zulauf355e49b2020-04-24 15:11:15 -06001867 return skip;
1868}
1869
locke-lunarg61870c22020-06-09 14:51:50 -06001870bool CommandBufferAccessContext::ValidateDispatchDrawDescriptorSet(VkPipelineBindPoint pipelineBindPoint,
1871 const char *func_name) const {
1872 bool skip = false;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001873 const PIPELINE_STATE *pipe = nullptr;
locke-lunarg61870c22020-06-09 14:51:50 -06001874 const std::vector<LAST_BOUND_STATE::PER_SET> *per_sets = nullptr;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001875 GetCurrentPipelineAndDesriptorSetsFromCommandBuffer(*cb_state_.get(), pipelineBindPoint, &pipe, &per_sets);
1876 if (!pipe || !per_sets) {
locke-lunarg61870c22020-06-09 14:51:50 -06001877 return skip;
1878 }
1879
1880 using DescriptorClass = cvdescriptorset::DescriptorClass;
1881 using BufferDescriptor = cvdescriptorset::BufferDescriptor;
1882 using ImageDescriptor = cvdescriptorset::ImageDescriptor;
1883 using ImageSamplerDescriptor = cvdescriptorset::ImageSamplerDescriptor;
1884 using TexelDescriptor = cvdescriptorset::TexelDescriptor;
1885
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001886 for (const auto &stage_state : pipe->stage_state) {
1887 if (stage_state.stage_flag == VK_SHADER_STAGE_FRAGMENT_BIT && pipe->graphicsPipelineCI.pRasterizationState &&
1888 pipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) {
locke-lunarge9f1cdf2020-06-12 12:28:57 -06001889 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001890 }
locke-lunarg61870c22020-06-09 14:51:50 -06001891 for (const auto &set_binding : stage_state.descriptor_uses) {
1892 cvdescriptorset::DescriptorSet *descriptor_set = (*per_sets)[set_binding.first.first].bound_descriptor_set;
1893 cvdescriptorset::DescriptorSetLayout::ConstBindingIterator binding_it(descriptor_set->GetLayout().get(),
1894 set_binding.first.second);
1895 const auto descriptor_type = binding_it.GetType();
1896 cvdescriptorset::IndexRange index_range = binding_it.GetGlobalIndexRange();
1897 auto array_idx = 0;
1898
1899 if (binding_it.IsVariableDescriptorCount()) {
1900 index_range.end = index_range.start + descriptor_set->GetVariableDescriptorCount();
1901 }
1902 SyncStageAccessIndex sync_index =
1903 GetSyncStageAccessIndexsByDescriptorSet(descriptor_type, set_binding.second, stage_state.stage_flag);
1904
1905 for (uint32_t i = index_range.start; i < index_range.end; ++i, ++array_idx) {
1906 uint32_t index = i - index_range.start;
1907 const auto *descriptor = descriptor_set->GetDescriptorFromGlobalIndex(i);
1908 switch (descriptor->GetClass()) {
1909 case DescriptorClass::ImageSampler:
1910 case DescriptorClass::Image: {
1911 const IMAGE_VIEW_STATE *img_view_state = nullptr;
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001912 VkImageLayout image_layout;
locke-lunarg61870c22020-06-09 14:51:50 -06001913 if (descriptor->GetClass() == DescriptorClass::ImageSampler) {
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001914 const auto image_sampler_descriptor = static_cast<const ImageSamplerDescriptor *>(descriptor);
1915 img_view_state = image_sampler_descriptor->GetImageViewState();
1916 image_layout = image_sampler_descriptor->GetImageLayout();
locke-lunarg61870c22020-06-09 14:51:50 -06001917 } else {
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001918 const auto image_descriptor = static_cast<const ImageDescriptor *>(descriptor);
1919 img_view_state = image_descriptor->GetImageViewState();
1920 image_layout = image_descriptor->GetImageLayout();
locke-lunarg61870c22020-06-09 14:51:50 -06001921 }
1922 if (!img_view_state) continue;
1923 const IMAGE_STATE *img_state = img_view_state->image_state.get();
1924 VkExtent3D extent = {};
1925 VkOffset3D offset = {};
1926 if (sync_index == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ) {
1927 extent = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.extent);
1928 offset = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.offset);
1929 } else {
1930 extent = img_state->createInfo.extent;
1931 }
John Zulauf361fb532020-07-22 10:45:39 -06001932 HazardResult hazard;
1933 const auto &subresource_range = img_view_state->normalized_subresource_range;
1934 if (descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT) {
1935 // Input attachments are subject to raster ordering rules
1936 hazard = current_context_->DetectHazard(*img_state, sync_index, subresource_range,
John Zulauf8e3c3e92021-01-06 11:19:36 -07001937 SyncOrdering::kRaster, offset, extent);
John Zulauf361fb532020-07-22 10:45:39 -06001938 } else {
1939 hazard = current_context_->DetectHazard(*img_state, sync_index, subresource_range, offset, extent);
1940 }
John Zulauf33fc1d52020-07-17 11:01:10 -06001941 if (hazard.hazard && !sync_state_->SupressedBoundDescriptorWAW(hazard)) {
John Zulauf1dae9192020-06-16 15:46:44 -06001942 skip |= sync_state_->LogError(
1943 img_view_state->image_view, string_SyncHazardVUID(hazard.hazard),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001944 "%s: Hazard %s for %s, in %s, and %s, %s, type: %s, imageLayout: %s, binding #%" PRIu32
1945 ", index %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06001946 func_name, string_SyncHazard(hazard.hazard),
1947 sync_state_->report_data->FormatHandle(img_view_state->image_view).c_str(),
1948 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer).c_str(),
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001949 sync_state_->report_data->FormatHandle(pipe->pipeline).c_str(),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001950 sync_state_->report_data->FormatHandle(descriptor_set->GetSet()).c_str(),
1951 string_VkDescriptorType(descriptor_type), string_VkImageLayout(image_layout),
John Zulauffaea0ee2021-01-14 14:01:32 -07001952 set_binding.first.second, index, FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06001953 }
1954 break;
1955 }
1956 case DescriptorClass::TexelBuffer: {
1957 auto buf_view_state = static_cast<const TexelDescriptor *>(descriptor)->GetBufferViewState();
1958 if (!buf_view_state) continue;
1959 const BUFFER_STATE *buf_state = buf_view_state->buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06001960 const ResourceAccessRange range = MakeRange(*buf_view_state);
locke-lunarg61870c22020-06-09 14:51:50 -06001961 auto hazard = current_context_->DetectHazard(*buf_state, sync_index, range);
John Zulauf33fc1d52020-07-17 11:01:10 -06001962 if (hazard.hazard && !sync_state_->SupressedBoundDescriptorWAW(hazard)) {
locke-lunarg88dbb542020-06-23 22:05:42 -06001963 skip |= sync_state_->LogError(
1964 buf_view_state->buffer_view, string_SyncHazardVUID(hazard.hazard),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001965 "%s: Hazard %s for %s in %s, %s, and %s, type: %s, binding #%d index %d. Access info %s.",
1966 func_name, string_SyncHazard(hazard.hazard),
locke-lunarg88dbb542020-06-23 22:05:42 -06001967 sync_state_->report_data->FormatHandle(buf_view_state->buffer_view).c_str(),
1968 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer).c_str(),
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001969 sync_state_->report_data->FormatHandle(pipe->pipeline).c_str(),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001970 sync_state_->report_data->FormatHandle(descriptor_set->GetSet()).c_str(),
1971 string_VkDescriptorType(descriptor_type), set_binding.first.second, index,
John Zulauffaea0ee2021-01-14 14:01:32 -07001972 FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06001973 }
1974 break;
1975 }
1976 case DescriptorClass::GeneralBuffer: {
1977 const auto *buffer_descriptor = static_cast<const BufferDescriptor *>(descriptor);
1978 auto buf_state = buffer_descriptor->GetBufferState();
1979 if (!buf_state) continue;
John Zulauf3e86bf02020-09-12 10:47:57 -06001980 const ResourceAccessRange range =
1981 MakeRange(*buf_state, buffer_descriptor->GetOffset(), buffer_descriptor->GetRange());
locke-lunarg61870c22020-06-09 14:51:50 -06001982 auto hazard = current_context_->DetectHazard(*buf_state, sync_index, range);
John Zulauf3ac701a2020-09-07 14:34:41 -06001983 if (hazard.hazard && !sync_state_->SupressedBoundDescriptorWAW(hazard)) {
locke-lunarg88dbb542020-06-23 22:05:42 -06001984 skip |= sync_state_->LogError(
1985 buf_state->buffer, string_SyncHazardVUID(hazard.hazard),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001986 "%s: Hazard %s for %s in %s, %s, and %s, type: %s, binding #%d index %d. Access info %s.",
1987 func_name, string_SyncHazard(hazard.hazard),
1988 sync_state_->report_data->FormatHandle(buf_state->buffer).c_str(),
locke-lunarg88dbb542020-06-23 22:05:42 -06001989 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer).c_str(),
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001990 sync_state_->report_data->FormatHandle(pipe->pipeline).c_str(),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001991 sync_state_->report_data->FormatHandle(descriptor_set->GetSet()).c_str(),
1992 string_VkDescriptorType(descriptor_type), set_binding.first.second, index,
John Zulauffaea0ee2021-01-14 14:01:32 -07001993 FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06001994 }
1995 break;
1996 }
1997 // TODO: INLINE_UNIFORM_BLOCK_EXT, ACCELERATION_STRUCTURE_KHR
1998 default:
1999 break;
2000 }
2001 }
2002 }
2003 }
2004 return skip;
2005}
2006
2007void CommandBufferAccessContext::RecordDispatchDrawDescriptorSet(VkPipelineBindPoint pipelineBindPoint,
2008 const ResourceUsageTag &tag) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002009 const PIPELINE_STATE *pipe = nullptr;
locke-lunarg61870c22020-06-09 14:51:50 -06002010 const std::vector<LAST_BOUND_STATE::PER_SET> *per_sets = nullptr;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002011 GetCurrentPipelineAndDesriptorSetsFromCommandBuffer(*cb_state_.get(), pipelineBindPoint, &pipe, &per_sets);
2012 if (!pipe || !per_sets) {
locke-lunarg61870c22020-06-09 14:51:50 -06002013 return;
2014 }
2015
2016 using DescriptorClass = cvdescriptorset::DescriptorClass;
2017 using BufferDescriptor = cvdescriptorset::BufferDescriptor;
2018 using ImageDescriptor = cvdescriptorset::ImageDescriptor;
2019 using ImageSamplerDescriptor = cvdescriptorset::ImageSamplerDescriptor;
2020 using TexelDescriptor = cvdescriptorset::TexelDescriptor;
2021
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002022 for (const auto &stage_state : pipe->stage_state) {
2023 if (stage_state.stage_flag == VK_SHADER_STAGE_FRAGMENT_BIT && pipe->graphicsPipelineCI.pRasterizationState &&
2024 pipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) {
locke-lunarge9f1cdf2020-06-12 12:28:57 -06002025 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002026 }
locke-lunarg61870c22020-06-09 14:51:50 -06002027 for (const auto &set_binding : stage_state.descriptor_uses) {
2028 cvdescriptorset::DescriptorSet *descriptor_set = (*per_sets)[set_binding.first.first].bound_descriptor_set;
2029 cvdescriptorset::DescriptorSetLayout::ConstBindingIterator binding_it(descriptor_set->GetLayout().get(),
2030 set_binding.first.second);
2031 const auto descriptor_type = binding_it.GetType();
2032 cvdescriptorset::IndexRange index_range = binding_it.GetGlobalIndexRange();
2033 auto array_idx = 0;
2034
2035 if (binding_it.IsVariableDescriptorCount()) {
2036 index_range.end = index_range.start + descriptor_set->GetVariableDescriptorCount();
2037 }
2038 SyncStageAccessIndex sync_index =
2039 GetSyncStageAccessIndexsByDescriptorSet(descriptor_type, set_binding.second, stage_state.stage_flag);
2040
2041 for (uint32_t i = index_range.start; i < index_range.end; ++i, ++array_idx) {
2042 const auto *descriptor = descriptor_set->GetDescriptorFromGlobalIndex(i);
2043 switch (descriptor->GetClass()) {
2044 case DescriptorClass::ImageSampler:
2045 case DescriptorClass::Image: {
2046 const IMAGE_VIEW_STATE *img_view_state = nullptr;
2047 if (descriptor->GetClass() == DescriptorClass::ImageSampler) {
2048 img_view_state = static_cast<const ImageSamplerDescriptor *>(descriptor)->GetImageViewState();
2049 } else {
2050 img_view_state = static_cast<const ImageDescriptor *>(descriptor)->GetImageViewState();
2051 }
2052 if (!img_view_state) continue;
2053 const IMAGE_STATE *img_state = img_view_state->image_state.get();
2054 VkExtent3D extent = {};
2055 VkOffset3D offset = {};
2056 if (sync_index == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ) {
2057 extent = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.extent);
2058 offset = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.offset);
2059 } else {
2060 extent = img_state->createInfo.extent;
2061 }
John Zulauf8e3c3e92021-01-06 11:19:36 -07002062 SyncOrdering ordering_rule = (descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT)
2063 ? SyncOrdering::kRaster
2064 : SyncOrdering::kNonAttachment;
2065 current_context_->UpdateAccessState(*img_state, sync_index, ordering_rule,
2066 img_view_state->normalized_subresource_range, offset, extent, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002067 break;
2068 }
2069 case DescriptorClass::TexelBuffer: {
2070 auto buf_view_state = static_cast<const TexelDescriptor *>(descriptor)->GetBufferViewState();
2071 if (!buf_view_state) continue;
2072 const BUFFER_STATE *buf_state = buf_view_state->buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06002073 const ResourceAccessRange range = MakeRange(*buf_view_state);
John Zulauf8e3c3e92021-01-06 11:19:36 -07002074 current_context_->UpdateAccessState(*buf_state, sync_index, SyncOrdering::kNonAttachment, range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002075 break;
2076 }
2077 case DescriptorClass::GeneralBuffer: {
2078 const auto *buffer_descriptor = static_cast<const BufferDescriptor *>(descriptor);
2079 auto buf_state = buffer_descriptor->GetBufferState();
2080 if (!buf_state) continue;
John Zulauf3e86bf02020-09-12 10:47:57 -06002081 const ResourceAccessRange range =
2082 MakeRange(*buf_state, buffer_descriptor->GetOffset(), buffer_descriptor->GetRange());
John Zulauf8e3c3e92021-01-06 11:19:36 -07002083 current_context_->UpdateAccessState(*buf_state, sync_index, SyncOrdering::kNonAttachment, range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002084 break;
2085 }
2086 // TODO: INLINE_UNIFORM_BLOCK_EXT, ACCELERATION_STRUCTURE_KHR
2087 default:
2088 break;
2089 }
2090 }
2091 }
2092 }
2093}
2094
2095bool CommandBufferAccessContext::ValidateDrawVertex(uint32_t vertexCount, uint32_t firstVertex, const char *func_name) const {
2096 bool skip = false;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002097 const auto *pipe = GetCurrentPipelineFromCommandBuffer(*cb_state_.get(), VK_PIPELINE_BIND_POINT_GRAPHICS);
2098 if (!pipe) {
locke-lunarg61870c22020-06-09 14:51:50 -06002099 return skip;
2100 }
2101
2102 const auto &binding_buffers = cb_state_->current_vertex_buffer_binding_info.vertex_buffer_bindings;
2103 const auto &binding_buffers_size = binding_buffers.size();
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002104 const auto &binding_descriptions_size = pipe->vertex_binding_descriptions_.size();
locke-lunarg61870c22020-06-09 14:51:50 -06002105
2106 for (size_t i = 0; i < binding_descriptions_size; ++i) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002107 const auto &binding_description = pipe->vertex_binding_descriptions_[i];
locke-lunarg61870c22020-06-09 14:51:50 -06002108 if (binding_description.binding < binding_buffers_size) {
2109 const auto &binding_buffer = binding_buffers[binding_description.binding];
locke-lunarg1ae57d62020-11-18 10:49:19 -07002110 if (binding_buffer.buffer_state == nullptr || binding_buffer.buffer_state->destroyed) continue;
locke-lunarg61870c22020-06-09 14:51:50 -06002111
locke-lunarg1ae57d62020-11-18 10:49:19 -07002112 auto *buf_state = binding_buffer.buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06002113 const ResourceAccessRange range = GetBufferRange(binding_buffer.offset, buf_state->createInfo.size, firstVertex,
2114 vertexCount, binding_description.stride);
locke-lunarg61870c22020-06-09 14:51:50 -06002115 auto hazard = current_context_->DetectHazard(*buf_state, SYNC_VERTEX_INPUT_VERTEX_ATTRIBUTE_READ, range);
2116 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002117 skip |= sync_state_->LogError(
John Zulauf59e25072020-07-17 10:55:21 -06002118 buf_state->buffer, string_SyncHazardVUID(hazard.hazard), "%s: Hazard %s for vertex %s in %s. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06002119 func_name, string_SyncHazard(hazard.hazard), sync_state_->report_data->FormatHandle(buf_state->buffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07002120 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer).c_str(), FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06002121 }
2122 }
2123 }
2124 return skip;
2125}
2126
2127void CommandBufferAccessContext::RecordDrawVertex(uint32_t vertexCount, uint32_t firstVertex, const ResourceUsageTag &tag) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002128 const auto *pipe = GetCurrentPipelineFromCommandBuffer(*cb_state_.get(), VK_PIPELINE_BIND_POINT_GRAPHICS);
2129 if (!pipe) {
locke-lunarg61870c22020-06-09 14:51:50 -06002130 return;
2131 }
2132 const auto &binding_buffers = cb_state_->current_vertex_buffer_binding_info.vertex_buffer_bindings;
2133 const auto &binding_buffers_size = binding_buffers.size();
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002134 const auto &binding_descriptions_size = pipe->vertex_binding_descriptions_.size();
locke-lunarg61870c22020-06-09 14:51:50 -06002135
2136 for (size_t i = 0; i < binding_descriptions_size; ++i) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002137 const auto &binding_description = pipe->vertex_binding_descriptions_[i];
locke-lunarg61870c22020-06-09 14:51:50 -06002138 if (binding_description.binding < binding_buffers_size) {
2139 const auto &binding_buffer = binding_buffers[binding_description.binding];
locke-lunarg1ae57d62020-11-18 10:49:19 -07002140 if (binding_buffer.buffer_state == nullptr || binding_buffer.buffer_state->destroyed) continue;
locke-lunarg61870c22020-06-09 14:51:50 -06002141
locke-lunarg1ae57d62020-11-18 10:49:19 -07002142 auto *buf_state = binding_buffer.buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06002143 const ResourceAccessRange range = GetBufferRange(binding_buffer.offset, buf_state->createInfo.size, firstVertex,
2144 vertexCount, binding_description.stride);
John Zulauf8e3c3e92021-01-06 11:19:36 -07002145 current_context_->UpdateAccessState(*buf_state, SYNC_VERTEX_INPUT_VERTEX_ATTRIBUTE_READ, SyncOrdering::kNonAttachment,
2146 range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002147 }
2148 }
2149}
2150
2151bool CommandBufferAccessContext::ValidateDrawVertexIndex(uint32_t indexCount, uint32_t firstIndex, const char *func_name) const {
2152 bool skip = false;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002153 if (cb_state_->index_buffer_binding.buffer_state == nullptr || cb_state_->index_buffer_binding.buffer_state->destroyed) {
locke-lunarg1ae57d62020-11-18 10:49:19 -07002154 return skip;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002155 }
locke-lunarg61870c22020-06-09 14:51:50 -06002156
locke-lunarg1ae57d62020-11-18 10:49:19 -07002157 auto *index_buf_state = cb_state_->index_buffer_binding.buffer_state.get();
locke-lunarg61870c22020-06-09 14:51:50 -06002158 const auto index_size = GetIndexAlignment(cb_state_->index_buffer_binding.index_type);
John Zulauf3e86bf02020-09-12 10:47:57 -06002159 const ResourceAccessRange range = GetBufferRange(cb_state_->index_buffer_binding.offset, index_buf_state->createInfo.size,
2160 firstIndex, indexCount, index_size);
locke-lunarg61870c22020-06-09 14:51:50 -06002161 auto hazard = current_context_->DetectHazard(*index_buf_state, SYNC_VERTEX_INPUT_INDEX_READ, range);
2162 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002163 skip |= sync_state_->LogError(
John Zulauf59e25072020-07-17 10:55:21 -06002164 index_buf_state->buffer, string_SyncHazardVUID(hazard.hazard), "%s: Hazard %s for index %s in %s. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06002165 func_name, string_SyncHazard(hazard.hazard), sync_state_->report_data->FormatHandle(index_buf_state->buffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07002166 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer).c_str(), FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06002167 }
2168
2169 // TODO: For now, we detect the whole vertex buffer. Index buffer could be changed until SubmitQueue.
2170 // We will detect more accurate range in the future.
2171 skip |= ValidateDrawVertex(UINT32_MAX, 0, func_name);
2172 return skip;
2173}
2174
2175void CommandBufferAccessContext::RecordDrawVertexIndex(uint32_t indexCount, uint32_t firstIndex, const ResourceUsageTag &tag) {
locke-lunarg1ae57d62020-11-18 10:49:19 -07002176 if (cb_state_->index_buffer_binding.buffer_state == nullptr || cb_state_->index_buffer_binding.buffer_state->destroyed) return;
locke-lunarg61870c22020-06-09 14:51:50 -06002177
locke-lunarg1ae57d62020-11-18 10:49:19 -07002178 auto *index_buf_state = cb_state_->index_buffer_binding.buffer_state.get();
locke-lunarg61870c22020-06-09 14:51:50 -06002179 const auto index_size = GetIndexAlignment(cb_state_->index_buffer_binding.index_type);
John Zulauf3e86bf02020-09-12 10:47:57 -06002180 const ResourceAccessRange range = GetBufferRange(cb_state_->index_buffer_binding.offset, index_buf_state->createInfo.size,
2181 firstIndex, indexCount, index_size);
John Zulauf8e3c3e92021-01-06 11:19:36 -07002182 current_context_->UpdateAccessState(*index_buf_state, SYNC_VERTEX_INPUT_INDEX_READ, SyncOrdering::kNonAttachment, range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002183
2184 // TODO: For now, we detect the whole vertex buffer. Index buffer could be changed until SubmitQueue.
2185 // We will detect more accurate range in the future.
2186 RecordDrawVertex(UINT32_MAX, 0, tag);
2187}
2188
2189bool CommandBufferAccessContext::ValidateDrawSubpassAttachment(const char *func_name) const {
locke-lunarg7077d502020-06-18 21:37:26 -06002190 bool skip = false;
2191 if (!current_renderpass_context_) return skip;
John Zulauffaea0ee2021-01-14 14:01:32 -07002192 skip |= current_renderpass_context_->ValidateDrawSubpassAttachment(*this, *cb_state_.get(),
locke-lunarg7077d502020-06-18 21:37:26 -06002193 cb_state_->activeRenderPassBeginInfo.renderArea, func_name);
2194 return skip;
locke-lunarg61870c22020-06-09 14:51:50 -06002195}
2196
2197void CommandBufferAccessContext::RecordDrawSubpassAttachment(const ResourceUsageTag &tag) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002198 if (current_renderpass_context_) {
locke-lunarg7077d502020-06-18 21:37:26 -06002199 current_renderpass_context_->RecordDrawSubpassAttachment(*cb_state_.get(), cb_state_->activeRenderPassBeginInfo.renderArea,
2200 tag);
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002201 }
locke-lunarg61870c22020-06-09 14:51:50 -06002202}
2203
John Zulauf355e49b2020-04-24 15:11:15 -06002204bool CommandBufferAccessContext::ValidateNextSubpass(const char *func_name) const {
John Zulauf355e49b2020-04-24 15:11:15 -06002205 bool skip = false;
locke-lunarg7077d502020-06-18 21:37:26 -06002206 if (!current_renderpass_context_) return skip;
John Zulauffaea0ee2021-01-14 14:01:32 -07002207 skip |= current_renderpass_context_->ValidateNextSubpass(*this, cb_state_->activeRenderPassBeginInfo.renderArea, func_name);
John Zulauf355e49b2020-04-24 15:11:15 -06002208
2209 return skip;
2210}
2211
2212bool CommandBufferAccessContext::ValidateEndRenderpass(const char *func_name) const {
2213 // TODO: Things to add here.
John Zulauf7635de32020-05-29 17:14:15 -06002214 // Validate Preserve attachments
John Zulauf355e49b2020-04-24 15:11:15 -06002215 bool skip = false;
locke-lunarg7077d502020-06-18 21:37:26 -06002216 if (!current_renderpass_context_) return skip;
John Zulauffaea0ee2021-01-14 14:01:32 -07002217 skip |= current_renderpass_context_->ValidateEndRenderPass(*this, cb_state_->activeRenderPassBeginInfo.renderArea, func_name);
John Zulauf355e49b2020-04-24 15:11:15 -06002218
2219 return skip;
2220}
2221
2222void CommandBufferAccessContext::RecordBeginRenderPass(const ResourceUsageTag &tag) {
2223 assert(sync_state_);
2224 if (!cb_state_) return;
2225
2226 // Create an access context the current renderpass.
John Zulauf1a224292020-06-30 14:52:13 -06002227 render_pass_contexts_.emplace_back();
John Zulauf16adfc92020-04-08 10:28:33 -06002228 current_renderpass_context_ = &render_pass_contexts_.back();
John Zulauf1a224292020-06-30 14:52:13 -06002229 current_renderpass_context_->RecordBeginRenderPass(*sync_state_, *cb_state_, &cb_access_context_, queue_flags_, tag);
John Zulauf16adfc92020-04-08 10:28:33 -06002230 current_context_ = &current_renderpass_context_->CurrentContext();
John Zulauf16adfc92020-04-08 10:28:33 -06002231}
2232
John Zulauffaea0ee2021-01-14 14:01:32 -07002233void CommandBufferAccessContext::RecordNextSubpass(const RENDER_PASS_STATE &rp_state, CMD_TYPE command) {
John Zulauf16adfc92020-04-08 10:28:33 -06002234 assert(current_renderpass_context_);
John Zulauffaea0ee2021-01-14 14:01:32 -07002235 auto prev_tag = NextCommandTag(command);
2236 auto next_tag = NextSubcommandTag(command);
2237 current_renderpass_context_->RecordNextSubpass(cb_state_->activeRenderPassBeginInfo.renderArea, prev_tag, next_tag);
John Zulauf16adfc92020-04-08 10:28:33 -06002238 current_context_ = &current_renderpass_context_->CurrentContext();
2239}
2240
John Zulauffaea0ee2021-01-14 14:01:32 -07002241void CommandBufferAccessContext::RecordEndRenderPass(const RENDER_PASS_STATE &render_pass, CMD_TYPE command) {
John Zulauf16adfc92020-04-08 10:28:33 -06002242 assert(current_renderpass_context_);
2243 if (!current_renderpass_context_) return;
2244
John Zulauffaea0ee2021-01-14 14:01:32 -07002245 current_renderpass_context_->RecordEndRenderPass(&cb_access_context_, cb_state_->activeRenderPassBeginInfo.renderArea,
2246 NextCommandTag(command));
John Zulauf355e49b2020-04-24 15:11:15 -06002247 current_context_ = &cb_access_context_;
John Zulauf16adfc92020-04-08 10:28:33 -06002248 current_renderpass_context_ = nullptr;
2249}
2250
John Zulauf49beb112020-11-04 16:06:31 -07002251bool CommandBufferAccessContext::ValidateSetEvent(VkCommandBuffer commandBuffer, VkEvent event,
2252 VkPipelineStageFlags stageMask) const {
John Zulauf4a6105a2020-11-17 15:11:05 -07002253 // I'll put this here just in case we need to pass this in for future extension support
2254 const auto cmd = CMD_SETEVENT;
2255 bool skip = false;
2256 const auto *sync_event = GetEventState(event);
2257 if (!sync_event) return false; // Core, Lifetimes, or Param check needs to catch invalid events.
2258
2259 const char *const reset_set =
2260 "%s: %s %s operation following %s without intervening execution barrier, is a race condition and may result in data "
2261 "hazards.";
2262 const char *const wait =
2263 "%s: %s %s operation following %s without intervening vkCmdResetEvent, may result in data hazard and is ignored.";
2264
2265 const auto exec_scope = WithEarlierPipelineStages(ExpandPipelineStages(GetQueueFlags(), stageMask));
2266 if (!sync_event->HasBarrier(stageMask, exec_scope)) {
2267 const char *vuid = nullptr;
2268 const char *message = nullptr;
2269 switch (sync_event->last_command) {
2270 case CMD_RESETEVENT:
2271 // Needs a barrier between reset and set
2272 vuid = "SYNC-vkCmdSetEvent-missingbarrier-reset";
2273 message = reset_set;
2274 break;
2275 case CMD_SETEVENT:
2276 // Needs a barrier between set and set
2277 vuid = "SYNC-vkCmdSetEvent-missingbarrier-set";
2278 message = reset_set;
2279 break;
2280 case CMD_WAITEVENTS:
2281 // Needs a barrier or is in second execution scope
2282 vuid = "SYNC-vkCmdSetEvent-missingbarrier-wait";
2283 message = wait;
2284 break;
2285 default:
2286 // The only other valid last command that wasn't one.
2287 assert(sync_event->last_command == CMD_NONE);
2288 break;
2289 }
2290 if (vuid) {
2291 assert(nullptr != message);
2292 const char *const cmd_name = CommandTypeString(cmd);
2293 skip |= sync_state_->LogError(event, vuid, message, cmd_name, sync_state_->report_data->FormatHandle(event).c_str(),
2294 cmd_name, CommandTypeString(sync_event->last_command));
2295 }
2296 }
2297
2298 return skip;
John Zulauf49beb112020-11-04 16:06:31 -07002299}
2300
John Zulauf4a6105a2020-11-17 15:11:05 -07002301void CommandBufferAccessContext::RecordSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask,
2302 const ResourceUsageTag &tag) {
2303 auto *sync_event = GetEventState(event);
2304 if (!sync_event) return; // Core, Lifetimes, or Param check needs to catch invalid events.
2305
2306 // NOTE: We're going to simply record the sync scope here, as anything else would be implementation defined/undefined
2307 // and we're issuing errors re: missing barriers between event commands, which if the user fixes would fix
2308 // any issues caused by naive scope setting here.
2309
2310 // What happens with two SetEvent is that one cannot know what group of operations will be waited for.
2311 // Given:
2312 // Stuff1; SetEvent; Stuff2; SetEvent; WaitEvents;
2313 // WaitEvents cannot know which of Stuff1, Stuff2, or both has completed execution.
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002314 auto scope = SyncExecScope::MakeSrc(GetQueueFlags(), stageMask);
John Zulauf4a6105a2020-11-17 15:11:05 -07002315
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002316 if (!sync_event->HasBarrier(stageMask, scope.exec_scope)) {
John Zulauf4a6105a2020-11-17 15:11:05 -07002317 sync_event->unsynchronized_set = sync_event->last_command;
2318 sync_event->ResetFirstScope();
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002319 } else if (sync_event->scope.exec_scope == 0) {
John Zulauf4a6105a2020-11-17 15:11:05 -07002320 // We only set the scope if there isn't one
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002321 sync_event->scope = scope;
John Zulauf4a6105a2020-11-17 15:11:05 -07002322
2323 auto set_scope = [&sync_event](AccessAddressType address_type, const ResourceAccessRangeMap::value_type &access) {
2324 auto &scope_map = sync_event->first_scope[static_cast<size_t>(address_type)];
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002325 if (access.second.InSourceScopeOrChain(sync_event->scope.exec_scope, sync_event->scope.valid_accesses)) {
John Zulauf4a6105a2020-11-17 15:11:05 -07002326 scope_map.insert(scope_map.end(), std::make_pair(access.first, true));
2327 }
2328 };
2329 GetCurrentAccessContext()->ForAll(set_scope);
2330 sync_event->unsynchronized_set = CMD_NONE;
2331 sync_event->first_scope_tag = tag;
2332 }
2333 sync_event->last_command = CMD_SETEVENT;
2334 sync_event->barriers = 0U;
2335}
John Zulauf49beb112020-11-04 16:06:31 -07002336
2337bool CommandBufferAccessContext::ValidateResetEvent(VkCommandBuffer commandBuffer, VkEvent event,
2338 VkPipelineStageFlags stageMask) const {
John Zulauf4a6105a2020-11-17 15:11:05 -07002339 // I'll put this here just in case we need to pass this in for future extension support
2340 const auto cmd = CMD_RESETEVENT;
2341
2342 bool skip = false;
2343 // TODO: EVENTS:
2344 // What is it we need to check... that we've had a reset since a set? Set/Set seems ill formed...
2345 const auto *sync_event = GetEventState(event);
2346 if (!sync_event) return false; // Core, Lifetimes, or Param check needs to catch invalid events.
2347
2348 const char *const set_wait =
2349 "%s: %s %s operation following %s without intervening execution barrier, is a race condition and may result in data "
2350 "hazards.";
2351 const char *message = set_wait; // Only one message this call.
2352 const auto exec_scope = WithEarlierPipelineStages(ExpandPipelineStages(GetQueueFlags(), stageMask));
2353 if (!sync_event->HasBarrier(stageMask, exec_scope)) {
2354 const char *vuid = nullptr;
2355 switch (sync_event->last_command) {
2356 case CMD_SETEVENT:
2357 // Needs a barrier between set and reset
2358 vuid = "SYNC-vkCmdResetEvent-missingbarrier-set";
2359 break;
2360 case CMD_WAITEVENTS: {
2361 // Needs to be in the barriers chain (either because of a barrier, or because of dstStageMask
2362 vuid = "SYNC-vkCmdResetEvent-missingbarrier-wait";
2363 break;
2364 }
2365 default:
2366 // The only other valid last command that wasn't one.
2367 assert((sync_event->last_command == CMD_NONE) || (sync_event->last_command == CMD_RESETEVENT));
2368 break;
2369 }
2370 if (vuid) {
2371 const char *const cmd_name = CommandTypeString(cmd);
2372 skip |= sync_state_->LogError(event, vuid, message, cmd_name, sync_state_->report_data->FormatHandle(event).c_str(),
2373 cmd_name, CommandTypeString(sync_event->last_command));
2374 }
2375 }
2376 return skip;
John Zulauf49beb112020-11-04 16:06:31 -07002377}
2378
John Zulauf4a6105a2020-11-17 15:11:05 -07002379void CommandBufferAccessContext::RecordResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
2380 const auto cmd = CMD_RESETEVENT;
2381 auto *sync_event = GetEventState(event);
2382 if (!sync_event) return;
John Zulauf49beb112020-11-04 16:06:31 -07002383
John Zulauf4a6105a2020-11-17 15:11:05 -07002384 // Clear out the first sync scope, any races vs. wait or set are reported, so we'll keep the bookkeeping simple assuming
2385 // the safe case
2386 for (const auto address_type : kAddressTypes) {
2387 sync_event->first_scope[static_cast<size_t>(address_type)].clear();
2388 }
2389
2390 // Update the event state
2391 sync_event->last_command = cmd;
2392 sync_event->unsynchronized_set = CMD_NONE;
2393 sync_event->ResetFirstScope();
2394 sync_event->barriers = 0U;
2395}
2396
2397bool CommandBufferAccessContext::ValidateWaitEvents(uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags srcStageMask,
2398 VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount,
2399 const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount,
John Zulauf49beb112020-11-04 16:06:31 -07002400 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
2401 uint32_t imageMemoryBarrierCount,
2402 const VkImageMemoryBarrier *pImageMemoryBarriers) const {
John Zulauf4a6105a2020-11-17 15:11:05 -07002403 const auto cmd = CMD_WAITEVENTS;
2404 const char *const ignored = "Wait operation is ignored for this event.";
2405 bool skip = false;
2406
2407 if (srcStageMask & VK_PIPELINE_STAGE_HOST_BIT) {
2408 const char *const cmd_name = CommandTypeString(cmd);
2409 const char *const vuid = "SYNC-vkCmdWaitEvents-hostevent-unsupported";
John Zulauffe757512020-12-18 12:17:47 -07002410 skip = sync_state_->LogInfo(cb_state_->commandBuffer, vuid,
2411 "%s, srcStageMask includes %s, unsupported by synchronization validaton.", cmd_name,
2412 string_VkPipelineStageFlagBits(VK_PIPELINE_STAGE_HOST_BIT), ignored);
John Zulauf4a6105a2020-11-17 15:11:05 -07002413 }
2414
2415 VkPipelineStageFlags event_stage_masks = 0U;
John Zulauffe757512020-12-18 12:17:47 -07002416 bool events_not_found = false;
John Zulauf4a6105a2020-11-17 15:11:05 -07002417 for (uint32_t event_index = 0; event_index < eventCount; event_index++) {
2418 const auto event = pEvents[event_index];
2419 const auto *sync_event = GetEventState(event);
John Zulauffe757512020-12-18 12:17:47 -07002420 if (!sync_event) {
2421 // NOTE PHASE2: This is where we'll need queue submit time validation to come back and check the srcStageMask bits
2422 events_not_found = true; // Demote "extra_stage_bits" error to warning, to avoid false positives.
2423
2424 continue; // Core, Lifetimes, or Param check needs to catch invalid events.
2425 }
John Zulauf4a6105a2020-11-17 15:11:05 -07002426
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002427 event_stage_masks |= sync_event->scope.mask_param;
John Zulauf4a6105a2020-11-17 15:11:05 -07002428 const auto ignore_reason = sync_event->IsIgnoredByWait(srcStageMask);
2429 if (ignore_reason) {
2430 switch (ignore_reason) {
2431 case SyncEventState::ResetWaitRace: {
2432 const char *const cmd_name = CommandTypeString(cmd);
2433 const char *const vuid = "SYNC-vkCmdWaitEvents-missingbarrier-reset";
2434 const char *const message =
2435 "%s: %s %s operation following %s without intervening execution barrier, may cause race condition. %s";
2436 skip |=
2437 sync_state_->LogError(event, vuid, message, cmd_name, sync_state_->report_data->FormatHandle(event).c_str(),
2438 cmd_name, CommandTypeString(sync_event->last_command), ignored);
2439 break;
2440 }
2441 case SyncEventState::SetRace: {
2442 // Issue error message that Wait is waiting on an signal subject to race condition, and is thus ignored for this
2443 // event
2444 const char *const cmd_name = CommandTypeString(cmd);
2445 const char *const vuid = "SYNC-vkCmdWaitEvents-unsynchronized-setops";
2446 const char *const message =
2447 "%s: %s Unsychronized %s calls result in race conditions w.r.t. event signalling, % %s";
2448 const char *const reason = "First synchronization scope is undefined.";
2449 skip |=
2450 sync_state_->LogError(event, vuid, message, cmd_name, sync_state_->report_data->FormatHandle(event).c_str(),
2451 CommandTypeString(sync_event->last_command), reason, ignored);
2452 break;
2453 }
2454 case SyncEventState::MissingStageBits: {
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002455 const VkPipelineStageFlags missing_bits = sync_event->scope.mask_param & ~srcStageMask;
John Zulauf4a6105a2020-11-17 15:11:05 -07002456 // Issue error message that event waited for is not in wait events scope
2457 const char *const cmd_name = CommandTypeString(cmd);
2458 const char *const vuid = "VUID-vkCmdWaitEvents-srcStageMask-01158";
2459 const char *const message =
2460 "%s: %s stageMask 0x%" PRIx32 " includes bits not present in srcStageMask 0x%" PRIx32
2461 ". Bits missing from srcStageMask %s. %s";
2462 skip |= sync_state_->LogError(
2463 event, vuid, message, cmd_name, sync_state_->report_data->FormatHandle(event).c_str(),
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002464 sync_event->scope.mask_param, srcStageMask, string_VkPipelineStageFlags(missing_bits).c_str(), ignored);
John Zulauf4a6105a2020-11-17 15:11:05 -07002465 break;
2466 }
2467 default:
2468 assert(ignore_reason == SyncEventState::NotIgnored);
2469 }
2470 } else if (imageMemoryBarrierCount) {
2471 const auto *context = GetCurrentAccessContext();
2472 assert(context);
2473 for (uint32_t barrier_index = 0; barrier_index < imageMemoryBarrierCount; barrier_index++) {
2474 const auto &barrier = pImageMemoryBarriers[barrier_index];
2475 if (barrier.oldLayout == barrier.newLayout) continue;
2476 const auto *image_state = sync_state_->Get<IMAGE_STATE>(barrier.image);
2477 if (!image_state) continue;
2478 auto subresource_range = NormalizeSubresourceRange(image_state->createInfo, barrier.subresourceRange);
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002479 const auto src_access_scope = SyncStageAccess::AccessScope(sync_event->scope.valid_accesses, barrier.srcAccessMask);
John Zulauf4a6105a2020-11-17 15:11:05 -07002480 const auto hazard =
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002481 context->DetectImageBarrierHazard(*image_state, sync_event->scope.exec_scope, src_access_scope,
2482 subresource_range, *sync_event, AccessContext::DetectOptions::kDetectAll);
John Zulauf4a6105a2020-11-17 15:11:05 -07002483 if (hazard.hazard) {
2484 const char *const cmd_name = CommandTypeString(cmd);
2485 skip |= sync_state_->LogError(barrier.image, string_SyncHazardVUID(hazard.hazard),
2486 "%s: Hazard %s for image barrier %" PRIu32 " %s. Access info %s.", cmd_name,
2487 string_SyncHazard(hazard.hazard), barrier_index,
2488 sync_state_->report_data->FormatHandle(barrier.image).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07002489 FormatUsage(hazard).c_str());
John Zulauf4a6105a2020-11-17 15:11:05 -07002490 break;
2491 }
2492 }
2493 }
2494 }
2495
2496 // Note that we can't check for HOST in pEvents as we don't track that set event type
2497 const auto extra_stage_bits = (srcStageMask & ~VK_PIPELINE_STAGE_HOST_BIT) & ~event_stage_masks;
2498 if (extra_stage_bits) {
2499 // Issue error message that event waited for is not in wait events scope
2500 const char *const cmd_name = CommandTypeString(cmd);
2501 const char *const vuid = "VUID-vkCmdWaitEvents-srcStageMask-01158";
2502 const char *const message =
John Zulauffe757512020-12-18 12:17:47 -07002503 "%s: srcStageMask 0x%" PRIx32 " contains stages not present in pEvents stageMask. Extra stages are %s.%s";
2504 if (events_not_found) {
2505 skip |= sync_state_->LogInfo(cb_state_->commandBuffer, vuid, message, cmd_name, srcStageMask,
2506 string_VkPipelineStageFlags(extra_stage_bits).c_str(),
2507 " vkCmdSetEvent may be in previously submitted command buffer.");
2508 } else {
2509 skip |= sync_state_->LogError(cb_state_->commandBuffer, vuid, message, cmd_name, srcStageMask,
2510 string_VkPipelineStageFlags(extra_stage_bits).c_str(), "");
2511 }
John Zulauf4a6105a2020-11-17 15:11:05 -07002512 }
2513 return skip;
John Zulauf49beb112020-11-04 16:06:31 -07002514}
2515
2516void CommandBufferAccessContext::RecordWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
2517 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
2518 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
2519 uint32_t bufferMemoryBarrierCount,
2520 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
2521 uint32_t imageMemoryBarrierCount,
John Zulauf4a6105a2020-11-17 15:11:05 -07002522 const VkImageMemoryBarrier *pImageMemoryBarriers, const ResourceUsageTag &tag) {
2523 auto *access_context = GetCurrentAccessContext();
2524 assert(access_context);
2525 if (!access_context) return;
2526
2527 // Unlike PipelineBarrier, WaitEvent is *not* limited to accesses within the current subpass (if any) and thus needs to import
2528 // all accesses. Can instead import for all first_scopes, or a union of them, if this becomes a performance/memory issue,
2529 // but with no idea of the performance of the union, nor of whether it even matters... take the simplest approach here,
John Zulauf4a6105a2020-11-17 15:11:05 -07002530 access_context->ResolvePreviousAccesses();
2531
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002532 auto dst = SyncExecScope::MakeDst(GetQueueFlags(), dstStageMask);
John Zulauf4a6105a2020-11-17 15:11:05 -07002533 for (uint32_t event_index = 0; event_index < eventCount; event_index++) {
2534 const auto event = pEvents[event_index];
2535 auto *sync_event = GetEventState(event);
2536 if (!sync_event) continue;
2537
2538 sync_event->last_command = CMD_WAITEVENTS;
2539
2540 if (!sync_event->IsIgnoredByWait(srcStageMask)) {
2541 // These apply barriers one at a time as the are restricted to the resource ranges specified per each barrier,
2542 // but do not update the dependency chain information (but set the "pending" state) // s.t. the order independence
2543 // of the barriers is maintained.
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002544 ApplyBufferBarriers(*sync_event, dst, bufferMemoryBarrierCount, pBufferMemoryBarriers);
2545 ApplyImageBarriers(*sync_event, dst, imageMemoryBarrierCount, pImageMemoryBarriers, tag);
2546 ApplyGlobalBarriers(*sync_event, dst, memoryBarrierCount, pMemoryBarriers, tag);
John Zulauf4a6105a2020-11-17 15:11:05 -07002547 } else {
2548 // We ignored this wait, so we don't have any effective synchronization barriers for it.
2549 sync_event->barriers = 0U;
2550 }
2551 }
2552
2553 // Apply the pending barriers
2554 ResolvePendingBarrierFunctor apply_pending_action(tag);
2555 access_context->ApplyGlobalBarriers(apply_pending_action);
2556}
2557
2558void CommandBufferAccessContext::RecordDestroyEvent(VkEvent event) {
2559 // Erase is okay with the key not being
2560 event_state_.erase(event);
2561}
2562
2563SyncEventState *CommandBufferAccessContext::GetEventState(VkEvent event) {
2564 auto &event_up = event_state_[event];
2565 if (!event_up) {
2566 auto event_atate = sync_state_->GetShared<EVENT_STATE>(event);
2567 event_up.reset(new SyncEventState(event_atate));
2568 }
2569 return event_up.get();
2570}
2571
2572const SyncEventState *CommandBufferAccessContext::GetEventState(VkEvent event) const {
2573 auto event_it = event_state_.find(event);
2574 if (event_it == event_state_.cend()) {
2575 return nullptr;
2576 }
2577 return event_it->second.get();
2578}
John Zulauf49beb112020-11-04 16:06:31 -07002579
John Zulauffaea0ee2021-01-14 14:01:32 -07002580bool RenderPassAccessContext::ValidateDrawSubpassAttachment(const CommandBufferAccessContext &cb_context,
2581 const CMD_BUFFER_STATE &cmd, const VkRect2D &render_area,
2582 const char *func_name) const {
locke-lunarg61870c22020-06-09 14:51:50 -06002583 bool skip = false;
John Zulauffaea0ee2021-01-14 14:01:32 -07002584 const auto &sync_state = cb_context.GetSyncState();
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002585 const auto *pipe = GetCurrentPipelineFromCommandBuffer(cmd, VK_PIPELINE_BIND_POINT_GRAPHICS);
2586 if (!pipe ||
2587 (pipe->graphicsPipelineCI.pRasterizationState && pipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable)) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002588 return skip;
2589 }
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002590 const auto &list = pipe->fragmentShader_writable_output_location_list;
locke-lunarg61870c22020-06-09 14:51:50 -06002591 const auto &subpass = rp_state_->createInfo.pSubpasses[current_subpass_];
2592 VkExtent3D extent = CastTo3D(render_area.extent);
2593 VkOffset3D offset = CastTo3D(render_area.offset);
locke-lunarg37047832020-06-12 13:44:45 -06002594
John Zulauf1a224292020-06-30 14:52:13 -06002595 const auto &current_context = CurrentContext();
locke-lunarg44f9bb12020-06-10 14:43:57 -06002596 // Subpass's inputAttachment has been done in ValidateDispatchDrawDescriptorSet
locke-lunarg96dc9632020-06-10 17:22:18 -06002597 if (subpass.pColorAttachments && subpass.colorAttachmentCount && !list.empty()) {
2598 for (const auto location : list) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002599 if (location >= subpass.colorAttachmentCount ||
2600 subpass.pColorAttachments[location].attachment == VK_ATTACHMENT_UNUSED) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002601 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002602 }
locke-lunarg96dc9632020-06-10 17:22:18 -06002603 const IMAGE_VIEW_STATE *img_view_state = attachment_views_[subpass.pColorAttachments[location].attachment];
John Zulauf1a224292020-06-30 14:52:13 -06002604 HazardResult hazard = current_context.DetectHazard(img_view_state, SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE,
John Zulauf8e3c3e92021-01-06 11:19:36 -07002605 SyncOrdering::kColorAttachment, offset, extent);
locke-lunarg96dc9632020-06-10 17:22:18 -06002606 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002607 skip |= sync_state.LogError(img_view_state->image_view, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06002608 "%s: Hazard %s for %s in %s, Subpass #%d, and pColorAttachments #%d. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06002609 func_name, string_SyncHazard(hazard.hazard),
2610 sync_state.report_data->FormatHandle(img_view_state->image_view).c_str(),
2611 sync_state.report_data->FormatHandle(cmd.commandBuffer).c_str(), cmd.activeSubpass,
John Zulauffaea0ee2021-01-14 14:01:32 -07002612 location, cb_context.FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06002613 }
2614 }
2615 }
locke-lunarg37047832020-06-12 13:44:45 -06002616
2617 // PHASE1 TODO: Add layout based read/vs. write selection.
2618 // PHASE1 TODO: Read operations for both depth and stencil are possible in the future.
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002619 if (pipe->graphicsPipelineCI.pDepthStencilState && subpass.pDepthStencilAttachment &&
locke-lunarg37047832020-06-12 13:44:45 -06002620 subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
locke-lunarg61870c22020-06-09 14:51:50 -06002621 const IMAGE_VIEW_STATE *img_view_state = attachment_views_[subpass.pDepthStencilAttachment->attachment];
locke-lunarg37047832020-06-12 13:44:45 -06002622 bool depth_write = false, stencil_write = false;
2623
2624 // PHASE1 TODO: These validation should be in core_checks.
2625 if (!FormatIsStencilOnly(img_view_state->create_info.format) &&
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002626 pipe->graphicsPipelineCI.pDepthStencilState->depthTestEnable &&
2627 pipe->graphicsPipelineCI.pDepthStencilState->depthWriteEnable &&
locke-lunarg37047832020-06-12 13:44:45 -06002628 IsImageLayoutDepthWritable(subpass.pDepthStencilAttachment->layout)) {
2629 depth_write = true;
2630 }
2631 // PHASE1 TODO: It needs to check if stencil is writable.
2632 // If failOp, passOp, or depthFailOp are not KEEP, and writeMask isn't 0, it's writable.
2633 // If depth test is disable, it's considered depth test passes, and then depthFailOp doesn't run.
2634 // PHASE1 TODO: These validation should be in core_checks.
2635 if (!FormatIsDepthOnly(img_view_state->create_info.format) &&
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002636 pipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable &&
locke-lunarg37047832020-06-12 13:44:45 -06002637 IsImageLayoutStencilWritable(subpass.pDepthStencilAttachment->layout)) {
2638 stencil_write = true;
2639 }
2640
2641 // PHASE1 TODO: Add EARLY stage detection based on ExecutionMode.
2642 if (depth_write) {
2643 HazardResult hazard =
John Zulauf1a224292020-06-30 14:52:13 -06002644 current_context.DetectHazard(img_view_state, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE,
John Zulauf8e3c3e92021-01-06 11:19:36 -07002645 SyncOrdering::kDepthStencilAttachment, offset, extent, VK_IMAGE_ASPECT_DEPTH_BIT);
locke-lunarg37047832020-06-12 13:44:45 -06002646 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002647 skip |= sync_state.LogError(
2648 img_view_state->image_view, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06002649 "%s: Hazard %s for %s in %s, Subpass #%d, and depth part of pDepthStencilAttachment. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06002650 func_name, string_SyncHazard(hazard.hazard),
2651 sync_state.report_data->FormatHandle(img_view_state->image_view).c_str(),
2652 sync_state.report_data->FormatHandle(cmd.commandBuffer).c_str(), cmd.activeSubpass,
John Zulauffaea0ee2021-01-14 14:01:32 -07002653 cb_context.FormatUsage(hazard).c_str());
locke-lunarg37047832020-06-12 13:44:45 -06002654 }
2655 }
2656 if (stencil_write) {
2657 HazardResult hazard =
John Zulauf1a224292020-06-30 14:52:13 -06002658 current_context.DetectHazard(img_view_state, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE,
John Zulauf8e3c3e92021-01-06 11:19:36 -07002659 SyncOrdering::kDepthStencilAttachment, offset, extent, VK_IMAGE_ASPECT_STENCIL_BIT);
locke-lunarg37047832020-06-12 13:44:45 -06002660 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002661 skip |= sync_state.LogError(
2662 img_view_state->image_view, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06002663 "%s: Hazard %s for %s in %s, Subpass #%d, and stencil part of pDepthStencilAttachment. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06002664 func_name, string_SyncHazard(hazard.hazard),
2665 sync_state.report_data->FormatHandle(img_view_state->image_view).c_str(),
2666 sync_state.report_data->FormatHandle(cmd.commandBuffer).c_str(), cmd.activeSubpass,
John Zulauffaea0ee2021-01-14 14:01:32 -07002667 cb_context.FormatUsage(hazard).c_str());
locke-lunarg37047832020-06-12 13:44:45 -06002668 }
locke-lunarg61870c22020-06-09 14:51:50 -06002669 }
2670 }
2671 return skip;
2672}
2673
locke-lunarg96dc9632020-06-10 17:22:18 -06002674void RenderPassAccessContext::RecordDrawSubpassAttachment(const CMD_BUFFER_STATE &cmd, const VkRect2D &render_area,
2675 const ResourceUsageTag &tag) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002676 const auto *pipe = GetCurrentPipelineFromCommandBuffer(cmd, VK_PIPELINE_BIND_POINT_GRAPHICS);
2677 if (!pipe ||
2678 (pipe->graphicsPipelineCI.pRasterizationState && pipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable)) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002679 return;
2680 }
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002681 const auto &list = pipe->fragmentShader_writable_output_location_list;
locke-lunarg61870c22020-06-09 14:51:50 -06002682 const auto &subpass = rp_state_->createInfo.pSubpasses[current_subpass_];
2683 VkExtent3D extent = CastTo3D(render_area.extent);
2684 VkOffset3D offset = CastTo3D(render_area.offset);
2685
John Zulauf1a224292020-06-30 14:52:13 -06002686 auto &current_context = CurrentContext();
locke-lunarg44f9bb12020-06-10 14:43:57 -06002687 // Subpass's inputAttachment has been done in RecordDispatchDrawDescriptorSet
locke-lunarg96dc9632020-06-10 17:22:18 -06002688 if (subpass.pColorAttachments && subpass.colorAttachmentCount && !list.empty()) {
2689 for (const auto location : list) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002690 if (location >= subpass.colorAttachmentCount ||
2691 subpass.pColorAttachments[location].attachment == VK_ATTACHMENT_UNUSED) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002692 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002693 }
locke-lunarg96dc9632020-06-10 17:22:18 -06002694 const IMAGE_VIEW_STATE *img_view_state = attachment_views_[subpass.pColorAttachments[location].attachment];
John Zulauf8e3c3e92021-01-06 11:19:36 -07002695 current_context.UpdateAccessState(img_view_state, SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE,
2696 SyncOrdering::kColorAttachment, offset, extent, 0, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002697 }
2698 }
locke-lunarg37047832020-06-12 13:44:45 -06002699
2700 // PHASE1 TODO: Add layout based read/vs. write selection.
2701 // PHASE1 TODO: Read operations for both depth and stencil are possible in the future.
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002702 if (pipe->graphicsPipelineCI.pDepthStencilState && subpass.pDepthStencilAttachment &&
locke-lunarg37047832020-06-12 13:44:45 -06002703 subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
locke-lunarg61870c22020-06-09 14:51:50 -06002704 const IMAGE_VIEW_STATE *img_view_state = attachment_views_[subpass.pDepthStencilAttachment->attachment];
locke-lunarg37047832020-06-12 13:44:45 -06002705 bool depth_write = false, stencil_write = false;
2706
2707 // PHASE1 TODO: These validation should be in core_checks.
2708 if (!FormatIsStencilOnly(img_view_state->create_info.format) &&
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002709 pipe->graphicsPipelineCI.pDepthStencilState->depthTestEnable &&
2710 pipe->graphicsPipelineCI.pDepthStencilState->depthWriteEnable &&
locke-lunarg37047832020-06-12 13:44:45 -06002711 IsImageLayoutDepthWritable(subpass.pDepthStencilAttachment->layout)) {
2712 depth_write = true;
2713 }
2714 // PHASE1 TODO: It needs to check if stencil is writable.
2715 // If failOp, passOp, or depthFailOp are not KEEP, and writeMask isn't 0, it's writable.
2716 // If depth test is disable, it's considered depth test passes, and then depthFailOp doesn't run.
2717 // PHASE1 TODO: These validation should be in core_checks.
2718 if (!FormatIsDepthOnly(img_view_state->create_info.format) &&
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002719 pipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable &&
locke-lunarg37047832020-06-12 13:44:45 -06002720 IsImageLayoutStencilWritable(subpass.pDepthStencilAttachment->layout)) {
2721 stencil_write = true;
2722 }
2723
2724 // PHASE1 TODO: Add EARLY stage detection based on ExecutionMode.
2725 if (depth_write) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07002726 current_context.UpdateAccessState(img_view_state, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE,
2727 SyncOrdering::kDepthStencilAttachment, offset, extent, VK_IMAGE_ASPECT_DEPTH_BIT,
2728 tag);
locke-lunarg37047832020-06-12 13:44:45 -06002729 }
2730 if (stencil_write) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07002731 current_context.UpdateAccessState(img_view_state, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE,
2732 SyncOrdering::kDepthStencilAttachment, offset, extent, VK_IMAGE_ASPECT_STENCIL_BIT,
2733 tag);
locke-lunarg37047832020-06-12 13:44:45 -06002734 }
locke-lunarg61870c22020-06-09 14:51:50 -06002735 }
2736}
2737
John Zulauffaea0ee2021-01-14 14:01:32 -07002738bool RenderPassAccessContext::ValidateNextSubpass(const CommandBufferAccessContext &cb_context, const VkRect2D &render_area,
John Zulauf1507ee42020-05-18 11:33:09 -06002739 const char *func_name) const {
John Zulaufaff20662020-06-01 14:07:58 -06002740 // PHASE1 TODO: Add Validate Preserve attachments
John Zulauf355e49b2020-04-24 15:11:15 -06002741 bool skip = false;
John Zulauffaea0ee2021-01-14 14:01:32 -07002742 skip |= CurrentContext().ValidateResolveOperations(cb_context, *rp_state_, render_area, attachment_views_, func_name,
John Zulaufb027cdb2020-05-21 14:25:22 -06002743 current_subpass_);
John Zulauffaea0ee2021-01-14 14:01:32 -07002744 skip |= CurrentContext().ValidateStoreOperation(cb_context, *rp_state_, render_area, current_subpass_, attachment_views_,
John Zulaufaff20662020-06-01 14:07:58 -06002745 func_name);
2746
John Zulauf355e49b2020-04-24 15:11:15 -06002747 const auto next_subpass = current_subpass_ + 1;
John Zulauf1507ee42020-05-18 11:33:09 -06002748 const auto &next_context = subpass_contexts_[next_subpass];
John Zulauffaea0ee2021-01-14 14:01:32 -07002749 skip |= next_context.ValidateLayoutTransitions(cb_context, *rp_state_, render_area, next_subpass, attachment_views_, func_name);
John Zulaufb02c1eb2020-10-06 16:33:36 -06002750 if (!skip) {
2751 // To avoid complex (and buggy) duplication of the affect of layout transitions on load operations, we'll record them
2752 // on a copy of the (empty) next context.
2753 // Note: The resource access map should be empty so hopefully this copy isn't too horrible from a perf POV.
2754 AccessContext temp_context(next_context);
2755 temp_context.RecordLayoutTransitions(*rp_state_, next_subpass, attachment_views_, kCurrentCommandTag);
John Zulauffaea0ee2021-01-14 14:01:32 -07002756 skip |= temp_context.ValidateLoadOperation(cb_context, *rp_state_, render_area, next_subpass, attachment_views_, func_name);
John Zulaufb02c1eb2020-10-06 16:33:36 -06002757 }
John Zulauf7635de32020-05-29 17:14:15 -06002758 return skip;
2759}
John Zulauffaea0ee2021-01-14 14:01:32 -07002760bool RenderPassAccessContext::ValidateEndRenderPass(const CommandBufferAccessContext &cb_context, const VkRect2D &render_area,
John Zulauf7635de32020-05-29 17:14:15 -06002761 const char *func_name) const {
John Zulaufaff20662020-06-01 14:07:58 -06002762 // PHASE1 TODO: Validate Preserve
John Zulauf7635de32020-05-29 17:14:15 -06002763 bool skip = false;
John Zulauffaea0ee2021-01-14 14:01:32 -07002764 skip |= CurrentContext().ValidateResolveOperations(cb_context, *rp_state_, render_area, attachment_views_, func_name,
John Zulauf7635de32020-05-29 17:14:15 -06002765 current_subpass_);
John Zulauffaea0ee2021-01-14 14:01:32 -07002766 skip |= CurrentContext().ValidateStoreOperation(cb_context, *rp_state_, render_area, current_subpass_, attachment_views_,
John Zulaufaff20662020-06-01 14:07:58 -06002767 func_name);
John Zulauffaea0ee2021-01-14 14:01:32 -07002768 skip |= ValidateFinalSubpassLayoutTransitions(cb_context, render_area, func_name);
John Zulauf355e49b2020-04-24 15:11:15 -06002769 return skip;
2770}
2771
John Zulauf7635de32020-05-29 17:14:15 -06002772AccessContext *RenderPassAccessContext::CreateStoreResolveProxy(const VkRect2D &render_area) const {
2773 return CreateStoreResolveProxyContext(CurrentContext(), *rp_state_, current_subpass_, render_area, attachment_views_);
2774}
2775
John Zulauffaea0ee2021-01-14 14:01:32 -07002776bool RenderPassAccessContext::ValidateFinalSubpassLayoutTransitions(const CommandBufferAccessContext &cb_context,
2777 const VkRect2D &render_area, const char *func_name) const {
John Zulauf355e49b2020-04-24 15:11:15 -06002778 bool skip = false;
2779
John Zulauf7635de32020-05-29 17:14:15 -06002780 // As validation methods are const and precede the record/update phase, for any tranistions from the current (last)
2781 // subpass, we have to validate them against a copy of the current AccessContext, with resolve operations applied.
2782 // Note: we could be more efficient by tracking whether or not we actually *have* any changes (e.g. attachment resolve)
2783 // to apply and only copy then, if this proves a hot spot.
2784 std::unique_ptr<AccessContext> proxy_for_current;
2785
John Zulauf355e49b2020-04-24 15:11:15 -06002786 // Validate the "finalLayout" transitions to external
2787 // Get them from where there we're hidding in the extra entry.
2788 const auto &final_transitions = rp_state_->subpass_transitions.back();
2789 for (const auto &transition : final_transitions) {
2790 const auto &attach_view = attachment_views_[transition.attachment];
2791 const auto &trackback = subpass_contexts_[transition.prev_pass].GetDstExternalTrackBack();
2792 assert(trackback.context); // Transitions are given implicit transitions if the StateTracker is working correctly
John Zulauf7635de32020-05-29 17:14:15 -06002793 auto *context = trackback.context;
2794
2795 if (transition.prev_pass == current_subpass_) {
2796 if (!proxy_for_current) {
2797 // We haven't recorded resolve ofor the current_subpass, so we need to copy current and update it *as if*
2798 proxy_for_current.reset(CreateStoreResolveProxy(render_area));
2799 }
2800 context = proxy_for_current.get();
2801 }
2802
John Zulaufa0a98292020-09-18 09:30:10 -06002803 // Use the merged barrier for the hazard check (safe since it just considers the src (first) scope.
2804 const auto merged_barrier = MergeBarriers(trackback.barriers);
2805 auto hazard = context->DetectImageBarrierHazard(*attach_view->image_state, merged_barrier.src_exec_scope,
2806 merged_barrier.src_access_scope, attach_view->normalized_subresource_range,
2807 AccessContext::DetectOptions::kDetectPrevious);
John Zulauf355e49b2020-04-24 15:11:15 -06002808 if (hazard.hazard) {
John Zulauffaea0ee2021-01-14 14:01:32 -07002809 skip |= cb_context.GetSyncState().LogError(
2810 rp_state_->renderPass, string_SyncHazardVUID(hazard.hazard),
2811 "%s: Hazard %s with last use subpass %" PRIu32 " for attachment %" PRIu32
2812 " final image layout transition (old_layout: %s, new_layout: %s). Access info %s.",
2813 func_name, string_SyncHazard(hazard.hazard), transition.prev_pass, transition.attachment,
2814 string_VkImageLayout(transition.old_layout), string_VkImageLayout(transition.new_layout),
2815 cb_context.FormatUsage(hazard).c_str());
John Zulauf355e49b2020-04-24 15:11:15 -06002816 }
2817 }
2818 return skip;
2819}
2820
2821void RenderPassAccessContext::RecordLayoutTransitions(const ResourceUsageTag &tag) {
2822 // Add layout transitions...
John Zulaufb02c1eb2020-10-06 16:33:36 -06002823 subpass_contexts_[current_subpass_].RecordLayoutTransitions(*rp_state_, current_subpass_, attachment_views_, tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002824}
2825
John Zulauf1507ee42020-05-18 11:33:09 -06002826void RenderPassAccessContext::RecordLoadOperations(const VkRect2D &render_area, const ResourceUsageTag &tag) {
2827 const auto *attachment_ci = rp_state_->createInfo.pAttachments;
2828 auto &subpass_context = subpass_contexts_[current_subpass_];
2829 VkExtent3D extent = CastTo3D(render_area.extent);
2830 VkOffset3D offset = CastTo3D(render_area.offset);
2831
2832 for (uint32_t i = 0; i < rp_state_->createInfo.attachmentCount; i++) {
2833 if (rp_state_->attachment_first_subpass[i] == current_subpass_) {
2834 if (attachment_views_[i] == nullptr) continue; // UNUSED
2835 const auto &view = *attachment_views_[i];
2836 const IMAGE_STATE *image = view.image_state.get();
2837 if (image == nullptr) continue;
2838
2839 const auto &ci = attachment_ci[i];
2840 const bool has_depth = FormatHasDepth(ci.format);
John Zulaufb027cdb2020-05-21 14:25:22 -06002841 const bool has_stencil = FormatHasStencil(ci.format);
John Zulauf1507ee42020-05-18 11:33:09 -06002842 const bool is_color = !(has_depth || has_stencil);
2843
2844 if (is_color) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07002845 subpass_context.UpdateAccessState(*image, ColorLoadUsage(ci.loadOp), SyncOrdering::kColorAttachment,
2846 view.normalized_subresource_range, offset, extent, tag);
John Zulauf1507ee42020-05-18 11:33:09 -06002847 } else {
2848 auto update_range = view.normalized_subresource_range;
2849 if (has_depth) {
2850 update_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
John Zulauf8e3c3e92021-01-06 11:19:36 -07002851 subpass_context.UpdateAccessState(*image, DepthStencilLoadUsage(ci.loadOp),
2852 SyncOrdering::kDepthStencilAttachment, update_range, offset, extent, tag);
John Zulauf1507ee42020-05-18 11:33:09 -06002853 }
2854 if (has_stencil) {
2855 update_range.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
John Zulauf8e3c3e92021-01-06 11:19:36 -07002856 subpass_context.UpdateAccessState(*image, DepthStencilLoadUsage(ci.stencilLoadOp),
2857 SyncOrdering::kDepthStencilAttachment, update_range, offset, extent, tag);
John Zulauf1507ee42020-05-18 11:33:09 -06002858 }
2859 }
2860 }
2861 }
2862}
2863
John Zulauf355e49b2020-04-24 15:11:15 -06002864void RenderPassAccessContext::RecordBeginRenderPass(const SyncValidator &state, const CMD_BUFFER_STATE &cb_state,
John Zulauf1a224292020-06-30 14:52:13 -06002865 const AccessContext *external_context, VkQueueFlags queue_flags,
2866 const ResourceUsageTag &tag) {
John Zulauf355e49b2020-04-24 15:11:15 -06002867 current_subpass_ = 0;
locke-lunargaecf2152020-05-12 17:15:41 -06002868 rp_state_ = cb_state.activeRenderPass.get();
John Zulauf355e49b2020-04-24 15:11:15 -06002869 subpass_contexts_.reserve(rp_state_->createInfo.subpassCount);
2870 // Add this for all subpasses here so that they exsist during next subpass validation
2871 for (uint32_t pass = 0; pass < rp_state_->createInfo.subpassCount; pass++) {
John Zulauf1a224292020-06-30 14:52:13 -06002872 subpass_contexts_.emplace_back(pass, queue_flags, rp_state_->subpass_dependencies, subpass_contexts_, external_context);
John Zulauf355e49b2020-04-24 15:11:15 -06002873 }
2874 attachment_views_ = state.GetCurrentAttachmentViews(cb_state);
2875
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07002876 subpass_contexts_[current_subpass_].SetStartTag(tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002877 RecordLayoutTransitions(tag);
John Zulauf1507ee42020-05-18 11:33:09 -06002878 RecordLoadOperations(cb_state.activeRenderPassBeginInfo.renderArea, tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002879}
John Zulauf1507ee42020-05-18 11:33:09 -06002880
John Zulauffaea0ee2021-01-14 14:01:32 -07002881void RenderPassAccessContext::RecordNextSubpass(const VkRect2D &render_area, const ResourceUsageTag &prev_subpass_tag,
2882 const ResourceUsageTag &next_subpass_tag) {
John Zulauf7635de32020-05-29 17:14:15 -06002883 // Resolves are against *prior* subpass context and thus *before* the subpass increment
John Zulauffaea0ee2021-01-14 14:01:32 -07002884 CurrentContext().UpdateAttachmentResolveAccess(*rp_state_, render_area, attachment_views_, current_subpass_, prev_subpass_tag);
2885 CurrentContext().UpdateAttachmentStoreAccess(*rp_state_, render_area, attachment_views_, current_subpass_, prev_subpass_tag);
John Zulauf7635de32020-05-29 17:14:15 -06002886
Jeremy Gebben6ea9d9e2020-12-11 09:41:01 -07002887 // Move to the next sub-command for the new subpass. The resolve and store are logically part of the previous
2888 // subpass, so their tag needs to be different from the layout and load operations below.
John Zulauf355e49b2020-04-24 15:11:15 -06002889 current_subpass_++;
2890 assert(current_subpass_ < subpass_contexts_.size());
John Zulauffaea0ee2021-01-14 14:01:32 -07002891 subpass_contexts_[current_subpass_].SetStartTag(next_subpass_tag);
2892 RecordLayoutTransitions(next_subpass_tag);
2893 RecordLoadOperations(render_area, next_subpass_tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002894}
2895
John Zulauf1a224292020-06-30 14:52:13 -06002896void RenderPassAccessContext::RecordEndRenderPass(AccessContext *external_context, const VkRect2D &render_area,
2897 const ResourceUsageTag &tag) {
John Zulaufaff20662020-06-01 14:07:58 -06002898 // Add the resolve and store accesses
John Zulauf7635de32020-05-29 17:14:15 -06002899 CurrentContext().UpdateAttachmentResolveAccess(*rp_state_, render_area, attachment_views_, current_subpass_, tag);
John Zulaufaff20662020-06-01 14:07:58 -06002900 CurrentContext().UpdateAttachmentStoreAccess(*rp_state_, render_area, attachment_views_, current_subpass_, tag);
John Zulauf7635de32020-05-29 17:14:15 -06002901
John Zulauf355e49b2020-04-24 15:11:15 -06002902 // Export the accesses from the renderpass...
John Zulauf1a224292020-06-30 14:52:13 -06002903 external_context->ResolveChildContexts(subpass_contexts_);
John Zulauf355e49b2020-04-24 15:11:15 -06002904
2905 // Add the "finalLayout" transitions to external
2906 // Get them from where there we're hidding in the extra entry.
John Zulauf89311b42020-09-29 16:28:47 -06002907 // Not that since *final* always comes from *one* subpass per view, we don't have to accumulate the barriers
2908 // TODO Aliasing we may need to reconsider barrier accumulation... though I don't know that it would be valid for aliasing
2909 // that had mulitple final layout transistions from mulitple final subpasses.
John Zulauf355e49b2020-04-24 15:11:15 -06002910 const auto &final_transitions = rp_state_->subpass_transitions.back();
2911 for (const auto &transition : final_transitions) {
2912 const auto &attachment = attachment_views_[transition.attachment];
2913 const auto &last_trackback = subpass_contexts_[transition.prev_pass].GetDstExternalTrackBack();
John Zulaufaa97d8b2020-07-14 10:58:13 -06002914 assert(&subpass_contexts_[transition.prev_pass] == last_trackback.context);
John Zulauf1e331ec2020-12-04 18:29:38 -07002915 std::vector<PipelineBarrierOp> barrier_ops;
2916 barrier_ops.reserve(last_trackback.barriers.size());
2917 for (const auto &barrier : last_trackback.barriers) {
2918 barrier_ops.emplace_back(barrier, true);
2919 }
2920 ApplyBarrierOpsFunctor<PipelineBarrierOp> barrier_action(true /* resolve */, barrier_ops, tag);
2921 external_context->UpdateResourceAccess(*attachment->image_state, attachment->normalized_subresource_range, barrier_action);
John Zulauf355e49b2020-04-24 15:11:15 -06002922 }
2923}
2924
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002925SyncExecScope SyncExecScope::MakeSrc(VkQueueFlags queue_flags, VkPipelineStageFlags mask_param) {
2926 SyncExecScope result;
2927 result.mask_param = mask_param;
2928 result.expanded_mask = ExpandPipelineStages(queue_flags, mask_param);
2929 result.exec_scope = WithEarlierPipelineStages(result.expanded_mask);
2930 result.valid_accesses = SyncStageAccess::AccessScopeByStage(result.exec_scope);
2931 return result;
2932}
2933
2934SyncExecScope SyncExecScope::MakeDst(VkQueueFlags queue_flags, VkPipelineStageFlags mask_param) {
2935 SyncExecScope result;
2936 result.mask_param = mask_param;
2937 result.expanded_mask = ExpandPipelineStages(queue_flags, mask_param);
2938 result.exec_scope = WithLaterPipelineStages(result.expanded_mask);
2939 result.valid_accesses = SyncStageAccess::AccessScopeByStage(result.exec_scope);
2940 return result;
2941}
2942
2943SyncBarrier::SyncBarrier(const SyncExecScope &src, const SyncExecScope &dst) {
2944 src_exec_scope = src.exec_scope;
2945 src_access_scope = 0;
2946 dst_exec_scope = dst.exec_scope;
2947 dst_access_scope = 0;
2948}
2949
2950template <typename Barrier>
2951SyncBarrier::SyncBarrier(const Barrier &barrier, const SyncExecScope &src, const SyncExecScope &dst) {
2952 src_exec_scope = src.exec_scope;
2953 src_access_scope = SyncStageAccess::AccessScope(src.valid_accesses, barrier.srcAccessMask);
2954 dst_exec_scope = dst.exec_scope;
2955 dst_access_scope = SyncStageAccess::AccessScope(dst.valid_accesses, barrier.dstAccessMask);
2956}
2957
2958SyncBarrier::SyncBarrier(VkQueueFlags queue_flags, const VkSubpassDependency2 &subpass) {
2959 auto src = SyncExecScope::MakeSrc(queue_flags, subpass.srcStageMask);
2960 src_exec_scope = src.exec_scope;
2961 src_access_scope = SyncStageAccess::AccessScope(src.valid_accesses, subpass.srcAccessMask);
2962
2963 auto dst = SyncExecScope::MakeDst(queue_flags, subpass.dstStageMask);
2964 dst_exec_scope = dst.exec_scope;
2965 dst_access_scope = SyncStageAccess::AccessScope(dst.valid_accesses, subpass.dstAccessMask);
John Zulauf3d84f1b2020-03-09 13:33:25 -06002966}
2967
John Zulaufb02c1eb2020-10-06 16:33:36 -06002968// Apply a list of barriers, without resolving pending state, useful for subpass layout transitions
2969void ResourceAccessState::ApplyBarriers(const std::vector<SyncBarrier> &barriers, bool layout_transition) {
2970 for (const auto &barrier : barriers) {
2971 ApplyBarrier(barrier, layout_transition);
2972 }
2973}
2974
John Zulauf89311b42020-09-29 16:28:47 -06002975// ApplyBarriers is design for *fully* inclusive barrier lists without layout tranistions. Designed use was for
2976// inter-subpass barriers for lazy-evaluation of parent context memory ranges. Subpass layout transistions are *not* done
2977// lazily, s.t. no previous access reports should need layout transitions.
John Zulaufb02c1eb2020-10-06 16:33:36 -06002978void ResourceAccessState::ApplyBarriers(const std::vector<SyncBarrier> &barriers, const ResourceUsageTag &tag) {
2979 assert(!pending_layout_transition); // This should never be call in the middle of another barrier application
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002980 assert(pending_write_barriers.none());
John Zulaufb02c1eb2020-10-06 16:33:36 -06002981 assert(!pending_write_dep_chain);
John Zulaufa0a98292020-09-18 09:30:10 -06002982 for (const auto &barrier : barriers) {
John Zulauf89311b42020-09-29 16:28:47 -06002983 ApplyBarrier(barrier, false);
John Zulaufa0a98292020-09-18 09:30:10 -06002984 }
John Zulaufb02c1eb2020-10-06 16:33:36 -06002985 ApplyPendingBarriers(tag);
John Zulauf3d84f1b2020-03-09 13:33:25 -06002986}
John Zulauf9cb530d2019-09-30 14:14:10 -06002987HazardResult ResourceAccessState::DetectHazard(SyncStageAccessIndex usage_index) const {
2988 HazardResult hazard;
2989 auto usage = FlagBit(usage_index);
John Zulauf361fb532020-07-22 10:45:39 -06002990 const auto usage_stage = PipelineStageBit(usage_index);
John Zulauf9cb530d2019-09-30 14:14:10 -06002991 if (IsRead(usage)) {
John Zulauf4285ee92020-09-23 10:20:52 -06002992 if (IsRAWHazard(usage_stage, usage)) {
John Zulauf59e25072020-07-17 10:55:21 -06002993 hazard.Set(this, usage_index, READ_AFTER_WRITE, last_write, write_tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06002994 }
2995 } else {
John Zulauf361fb532020-07-22 10:45:39 -06002996 // Write operation:
2997 // Check for read operations more recent than last_write (as setting last_write clears reads, that would be *any*
2998 // If reads exists -- test only against them because either:
2999 // * the reads were hazards, and we've reported the hazard, so just test the current write vs. the read operations
3000 // * the read weren't hazards, and thus if the write is safe w.r.t. the reads, no hazard vs. last_write is possible if
3001 // the current write happens after the reads, so just test the write against the reades
3002 // Otherwise test against last_write
3003 //
3004 // Look for casus belli for WAR
John Zulaufab7756b2020-12-29 16:10:16 -07003005 if (last_reads.size()) {
3006 for (const auto &read_access : last_reads) {
John Zulauf361fb532020-07-22 10:45:39 -06003007 if (IsReadHazard(usage_stage, read_access)) {
3008 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
3009 break;
3010 }
3011 }
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003012 } else if (last_write.any() && IsWriteHazard(usage)) {
John Zulauf361fb532020-07-22 10:45:39 -06003013 // Write-After-Write check -- if we have a previous write to test against
3014 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003015 }
3016 }
3017 return hazard;
3018}
3019
John Zulauf8e3c3e92021-01-06 11:19:36 -07003020HazardResult ResourceAccessState::DetectHazard(SyncStageAccessIndex usage_index, const SyncOrdering &ordering_rule) const {
3021 const auto &ordering = GetOrderingRules(ordering_rule);
John Zulauf69133422020-05-20 14:55:53 -06003022 // The ordering guarantees act as barriers to the last accesses, independent of synchronization operations
3023 HazardResult hazard;
John Zulauf4285ee92020-09-23 10:20:52 -06003024 const auto usage_bit = FlagBit(usage_index);
John Zulauf361fb532020-07-22 10:45:39 -06003025 const auto usage_stage = PipelineStageBit(usage_index);
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003026 const bool input_attachment_ordering = (ordering.access_scope & SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT).any();
3027 const bool last_write_is_ordered = (last_write & ordering.access_scope).any();
John Zulauf4285ee92020-09-23 10:20:52 -06003028 if (IsRead(usage_bit)) {
3029 // Exclude RAW if no write, or write not most "most recent" operation w.r.t. usage;
3030 bool is_raw_hazard = IsRAWHazard(usage_stage, usage_bit);
3031 if (is_raw_hazard) {
3032 // NOTE: we know last_write is non-zero
3033 // See if the ordering rules save us from the simple RAW check above
3034 // First check to see if the current usage is covered by the ordering rules
3035 const bool usage_is_input_attachment = (usage_index == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ);
3036 const bool usage_is_ordered =
3037 (input_attachment_ordering && usage_is_input_attachment) || (0 != (usage_stage & ordering.exec_scope));
3038 if (usage_is_ordered) {
3039 // Now see of the most recent write (or a subsequent read) are ordered
3040 const bool most_recent_is_ordered = last_write_is_ordered || (0 != GetOrderedStages(ordering));
3041 is_raw_hazard = !most_recent_is_ordered;
John Zulauf361fb532020-07-22 10:45:39 -06003042 }
3043 }
John Zulauf4285ee92020-09-23 10:20:52 -06003044 if (is_raw_hazard) {
3045 hazard.Set(this, usage_index, READ_AFTER_WRITE, last_write, write_tag);
3046 }
John Zulauf361fb532020-07-22 10:45:39 -06003047 } else {
3048 // Only check for WAW if there are no reads since last_write
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003049 bool usage_write_is_ordered = (usage_bit & ordering.access_scope).any();
John Zulaufab7756b2020-12-29 16:10:16 -07003050 if (last_reads.size()) {
John Zulauf361fb532020-07-22 10:45:39 -06003051 // Look for any WAR hazards outside the ordered set of stages
John Zulauf4285ee92020-09-23 10:20:52 -06003052 VkPipelineStageFlags ordered_stages = 0;
3053 if (usage_write_is_ordered) {
3054 // If the usage is ordered, we can ignore all ordered read stages w.r.t. WAR)
3055 ordered_stages = GetOrderedStages(ordering);
3056 }
3057 // If we're tracking any reads that aren't ordered against the current write, got to check 'em all.
3058 if ((ordered_stages & last_read_stages) != last_read_stages) {
John Zulaufab7756b2020-12-29 16:10:16 -07003059 for (const auto &read_access : last_reads) {
John Zulauf4285ee92020-09-23 10:20:52 -06003060 if (read_access.stage & ordered_stages) continue; // but we can skip the ordered ones
3061 if (IsReadHazard(usage_stage, read_access)) {
3062 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
3063 break;
3064 }
John Zulaufd14743a2020-07-03 09:42:39 -06003065 }
3066 }
John Zulauf4285ee92020-09-23 10:20:52 -06003067 } else if (!(last_write_is_ordered && usage_write_is_ordered)) {
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003068 if (last_write.any() && IsWriteHazard(usage_bit)) {
John Zulauf4285ee92020-09-23 10:20:52 -06003069 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
John Zulauf361fb532020-07-22 10:45:39 -06003070 }
John Zulauf69133422020-05-20 14:55:53 -06003071 }
3072 }
3073 return hazard;
3074}
3075
John Zulauf2f952d22020-02-10 11:34:51 -07003076// Asynchronous Hazards occur between subpasses with no connection through the DAG
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07003077HazardResult ResourceAccessState::DetectAsyncHazard(SyncStageAccessIndex usage_index, const ResourceUsageTag &start_tag) const {
John Zulauf2f952d22020-02-10 11:34:51 -07003078 HazardResult hazard;
3079 auto usage = FlagBit(usage_index);
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07003080 // Async checks need to not go back further than the start of the subpass, as we only want to find hazards between the async
3081 // subpasses. Anything older than that should have been checked at the start of each subpass, taking into account all of
3082 // the raster ordering rules.
John Zulauf2f952d22020-02-10 11:34:51 -07003083 if (IsRead(usage)) {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07003084 if (last_write.any() && (write_tag.index >= start_tag.index)) {
John Zulauf59e25072020-07-17 10:55:21 -06003085 hazard.Set(this, usage_index, READ_RACING_WRITE, last_write, write_tag);
John Zulauf2f952d22020-02-10 11:34:51 -07003086 }
3087 } else {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07003088 if (last_write.any() && (write_tag.index >= start_tag.index)) {
John Zulauf59e25072020-07-17 10:55:21 -06003089 hazard.Set(this, usage_index, WRITE_RACING_WRITE, last_write, write_tag);
John Zulaufab7756b2020-12-29 16:10:16 -07003090 } else if (last_reads.size() > 0) {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07003091 // Any reads during the other subpass will conflict with this write, so we need to check them all.
John Zulaufab7756b2020-12-29 16:10:16 -07003092 for (const auto &read_access : last_reads) {
3093 if (read_access.tag.index >= start_tag.index) {
3094 hazard.Set(this, usage_index, WRITE_RACING_READ, read_access.access, read_access.tag);
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07003095 break;
3096 }
3097 }
John Zulauf2f952d22020-02-10 11:34:51 -07003098 }
3099 }
3100 return hazard;
3101}
3102
John Zulauf36bcf6a2020-02-03 15:12:52 -07003103HazardResult ResourceAccessState::DetectBarrierHazard(SyncStageAccessIndex usage_index, VkPipelineStageFlags src_exec_scope,
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003104 const SyncStageAccessFlags &src_access_scope) const {
John Zulauf0cb5be22020-01-23 12:18:22 -07003105 // Only supporting image layout transitions for now
3106 assert(usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION);
3107 HazardResult hazard;
John Zulauf361fb532020-07-22 10:45:39 -06003108 // only test for WAW if there no intervening read operations.
3109 // See DetectHazard(SyncStagetAccessIndex) above for more details.
John Zulaufab7756b2020-12-29 16:10:16 -07003110 if (last_reads.size()) {
John Zulauf355e49b2020-04-24 15:11:15 -06003111 // Look at the reads if any
John Zulaufab7756b2020-12-29 16:10:16 -07003112 for (const auto &read_access : last_reads) {
John Zulauf4a6105a2020-11-17 15:11:05 -07003113 if (read_access.IsReadBarrierHazard(src_exec_scope)) {
John Zulauf59e25072020-07-17 10:55:21 -06003114 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
John Zulauf0cb5be22020-01-23 12:18:22 -07003115 break;
3116 }
3117 }
John Zulauf4a6105a2020-11-17 15:11:05 -07003118 } else if (last_write.any() && IsWriteBarrierHazard(src_exec_scope, src_access_scope)) {
3119 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
3120 }
3121
3122 return hazard;
3123}
3124
3125HazardResult ResourceAccessState::DetectBarrierHazard(SyncStageAccessIndex usage_index, VkPipelineStageFlags src_exec_scope,
3126 const SyncStageAccessFlags &src_access_scope,
3127 const ResourceUsageTag &event_tag) const {
3128 // Only supporting image layout transitions for now
3129 assert(usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION);
3130 HazardResult hazard;
3131 // only test for WAW if there no intervening read operations.
3132 // See DetectHazard(SyncStagetAccessIndex) above for more details.
3133
John Zulaufab7756b2020-12-29 16:10:16 -07003134 if (last_reads.size()) {
John Zulauf4a6105a2020-11-17 15:11:05 -07003135 // Look at the reads if any... if reads exist, they are either the resaon the access is in the event
3136 // first scope, or they are a hazard.
John Zulaufab7756b2020-12-29 16:10:16 -07003137 for (const auto &read_access : last_reads) {
John Zulauf4a6105a2020-11-17 15:11:05 -07003138 if (read_access.tag.IsBefore(event_tag)) {
3139 // The read is in the events first synchronization scope, so we use a barrier hazard check
3140 // If the read stage is not in the src sync scope
3141 // *AND* not execution chained with an existing sync barrier (that's the or)
3142 // then the barrier access is unsafe (R/W after R)
3143 if (read_access.IsReadBarrierHazard(src_exec_scope)) {
3144 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
3145 break;
3146 }
3147 } else {
3148 // The read not in the event first sync scope and so is a hazard vs. the layout transition
3149 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
3150 }
3151 }
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003152 } else if (last_write.any()) {
John Zulauf4a6105a2020-11-17 15:11:05 -07003153 // if there are no reads, the write is either the reason the access is in the event scope... they are a hazard
3154 if (write_tag.IsBefore(event_tag)) {
3155 // The write is in the first sync scope of the event (sync their aren't any reads to be the reason)
3156 // So do a normal barrier hazard check
3157 if (IsWriteBarrierHazard(src_exec_scope, src_access_scope)) {
3158 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
3159 }
3160 } else {
3161 // The write isn't in scope, and is thus a hazard to the layout transistion for wait
John Zulauf361fb532020-07-22 10:45:39 -06003162 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
3163 }
John Zulaufd14743a2020-07-03 09:42:39 -06003164 }
John Zulauf361fb532020-07-22 10:45:39 -06003165
John Zulauf0cb5be22020-01-23 12:18:22 -07003166 return hazard;
3167}
3168
John Zulauf5f13a792020-03-10 07:31:21 -06003169// The logic behind resolves is the same as update, we assume that earlier hazards have be reported, and that no
3170// tranistive hazard can exists with a hazard between the earlier operations. Yes, an early hazard can mask that another
3171// exists, but if you fix *that* hazard it either fixes or unmasks the subsequent ones.
3172void ResourceAccessState::Resolve(const ResourceAccessState &other) {
3173 if (write_tag.IsBefore(other.write_tag)) {
John Zulauf4285ee92020-09-23 10:20:52 -06003174 // If this is a later write, we've reported any exsiting hazard, and we can just overwrite as the more recent
3175 // operation
John Zulauf5f13a792020-03-10 07:31:21 -06003176 *this = other;
3177 } else if (!other.write_tag.IsBefore(write_tag)) {
3178 // This is the *equals* case for write operations, we merged the write barriers and the read state (but without the
3179 // dependency chaining logic or any stage expansion)
3180 write_barriers |= other.write_barriers;
John Zulaufb02c1eb2020-10-06 16:33:36 -06003181 pending_write_barriers |= other.pending_write_barriers;
3182 pending_layout_transition |= other.pending_layout_transition;
3183 pending_write_dep_chain |= other.pending_write_dep_chain;
John Zulauf5f13a792020-03-10 07:31:21 -06003184
John Zulaufd14743a2020-07-03 09:42:39 -06003185 // Merge the read states
John Zulaufab7756b2020-12-29 16:10:16 -07003186 const auto pre_merge_count = last_reads.size();
John Zulauf4285ee92020-09-23 10:20:52 -06003187 const auto pre_merge_stages = last_read_stages;
John Zulaufab7756b2020-12-29 16:10:16 -07003188 for (uint32_t other_read_index = 0; other_read_index < other.last_reads.size(); other_read_index++) {
John Zulauf5f13a792020-03-10 07:31:21 -06003189 auto &other_read = other.last_reads[other_read_index];
John Zulauf4285ee92020-09-23 10:20:52 -06003190 if (pre_merge_stages & other_read.stage) {
John Zulauf5f13a792020-03-10 07:31:21 -06003191 // Merge in the barriers for read stages that exist in *both* this and other
John Zulauf4285ee92020-09-23 10:20:52 -06003192 // TODO: This is N^2 with stages... perhaps the ReadStates should be sorted by stage index.
3193 // but we should wait on profiling data for that.
3194 for (uint32_t my_read_index = 0; my_read_index < pre_merge_count; my_read_index++) {
John Zulauf5f13a792020-03-10 07:31:21 -06003195 auto &my_read = last_reads[my_read_index];
3196 if (other_read.stage == my_read.stage) {
3197 if (my_read.tag.IsBefore(other_read.tag)) {
John Zulauf4285ee92020-09-23 10:20:52 -06003198 // Other is more recent, copy in the state
John Zulauf37ceaed2020-07-03 16:18:15 -06003199 my_read.access = other_read.access;
John Zulauf4285ee92020-09-23 10:20:52 -06003200 my_read.tag = other_read.tag;
John Zulaufb02c1eb2020-10-06 16:33:36 -06003201 my_read.pending_dep_chain = other_read.pending_dep_chain;
3202 // TODO: Phase 2 -- review the state merge logic to avoid false positive from overwriting the barriers
3203 // May require tracking more than one access per stage.
3204 my_read.barriers = other_read.barriers;
John Zulauf4285ee92020-09-23 10:20:52 -06003205 if (my_read.stage == VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT) {
3206 // Since I'm overwriting the fragement stage read, also update the input attachment info
3207 // as this is the only stage that affects it.
John Zulauff51fbb62020-10-02 14:43:24 -06003208 input_attachment_read = other.input_attachment_read;
John Zulauf4285ee92020-09-23 10:20:52 -06003209 }
John Zulaufb02c1eb2020-10-06 16:33:36 -06003210 } else if (other_read.tag.IsBefore(my_read.tag)) {
3211 // The read tags match so merge the barriers
3212 my_read.barriers |= other_read.barriers;
3213 my_read.pending_dep_chain |= other_read.pending_dep_chain;
John Zulauf5f13a792020-03-10 07:31:21 -06003214 }
John Zulaufb02c1eb2020-10-06 16:33:36 -06003215
John Zulauf5f13a792020-03-10 07:31:21 -06003216 break;
3217 }
3218 }
3219 } else {
3220 // The other read stage doesn't exist in this, so add it.
John Zulaufab7756b2020-12-29 16:10:16 -07003221 last_reads.emplace_back(other_read);
John Zulauf5f13a792020-03-10 07:31:21 -06003222 last_read_stages |= other_read.stage;
John Zulauf4285ee92020-09-23 10:20:52 -06003223 if (other_read.stage == VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT) {
John Zulauff51fbb62020-10-02 14:43:24 -06003224 input_attachment_read = other.input_attachment_read;
John Zulauf4285ee92020-09-23 10:20:52 -06003225 }
John Zulauf5f13a792020-03-10 07:31:21 -06003226 }
3227 }
John Zulauf361fb532020-07-22 10:45:39 -06003228 read_execution_barriers |= other.read_execution_barriers;
John Zulauf4285ee92020-09-23 10:20:52 -06003229 } // the else clause would be that other write is before this write... in which case we supercede the other state and
3230 // ignore it.
John Zulauffaea0ee2021-01-14 14:01:32 -07003231
3232 // Merge first access information by making a copy of this first_access and reconstructing with a shuffle
3233 // of the copy and other into this using the update first logic.
3234 // NOTE: All sorts of additional cleverness could be put into short circuts. (for example back is write and is before front
3235 // of the other first_accesses... )
3236 if (!(first_accesses_ == other.first_accesses_) && !other.first_accesses_.empty()) {
3237 FirstAccesses firsts(std::move(first_accesses_));
3238 first_accesses_.clear();
3239 first_read_stages_ = 0U;
3240 auto a = firsts.begin();
3241 auto a_end = firsts.end();
3242 for (auto &b : other.first_accesses_) {
3243 // TODO: Determine whether "IsBefore" or "IsGloballyBefore" is needed...
3244 while (a != a_end && a->tag.IsBefore(b.tag)) {
3245 UpdateFirst(a->tag, a->usage_index, a->ordering_rule);
3246 ++a;
3247 }
3248 UpdateFirst(b.tag, b.usage_index, b.ordering_rule);
3249 }
3250 for (; a != a_end; ++a) {
3251 UpdateFirst(a->tag, a->usage_index, a->ordering_rule);
3252 }
3253 }
John Zulauf5f13a792020-03-10 07:31:21 -06003254}
3255
John Zulauf8e3c3e92021-01-06 11:19:36 -07003256void ResourceAccessState::Update(SyncStageAccessIndex usage_index, SyncOrdering ordering_rule, const ResourceUsageTag &tag) {
John Zulauf9cb530d2019-09-30 14:14:10 -06003257 // Move this logic in the ResourceStateTracker as methods, thereof (or we'll repeat it for every flavor of resource...
3258 const auto usage_bit = FlagBit(usage_index);
John Zulauf4285ee92020-09-23 10:20:52 -06003259 if (IsRead(usage_index)) {
John Zulauf9cb530d2019-09-30 14:14:10 -06003260 // Mulitple outstanding reads may be of interest and do dependency chains independently
3261 // However, for purposes of barrier tracking, only one read per pipeline stage matters
3262 const auto usage_stage = PipelineStageBit(usage_index);
3263 if (usage_stage & last_read_stages) {
John Zulaufab7756b2020-12-29 16:10:16 -07003264 for (auto &read_access : last_reads) {
3265 if (read_access.stage == usage_stage) {
3266 read_access.Set(usage_stage, usage_bit, 0, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003267 break;
3268 }
3269 }
3270 } else {
John Zulaufab7756b2020-12-29 16:10:16 -07003271 last_reads.emplace_back(usage_stage, usage_bit, 0, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003272 last_read_stages |= usage_stage;
3273 }
John Zulauf4285ee92020-09-23 10:20:52 -06003274
3275 // Fragment shader reads come in two flavors, and we need to track if the one we're tracking is the special one.
3276 if (usage_stage == VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT) {
John Zulauff51fbb62020-10-02 14:43:24 -06003277 // TODO Revisit re: multiple reads for a given stage
3278 input_attachment_read = (usage_bit == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT);
John Zulauf4285ee92020-09-23 10:20:52 -06003279 }
John Zulauf9cb530d2019-09-30 14:14:10 -06003280 } else {
3281 // Assume write
3282 // TODO determine what to do with READ-WRITE operations if any
John Zulauf89311b42020-09-29 16:28:47 -06003283 SetWrite(usage_bit, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003284 }
John Zulauffaea0ee2021-01-14 14:01:32 -07003285 UpdateFirst(tag, usage_index, ordering_rule);
John Zulauf9cb530d2019-09-30 14:14:10 -06003286}
John Zulauf5f13a792020-03-10 07:31:21 -06003287
John Zulauf89311b42020-09-29 16:28:47 -06003288// Clobber last read and all barriers... because all we have is DANGER, DANGER, WILL ROBINSON!!!
3289// if the last_reads/last_write were unsafe, we've reported them, in either case the prior access is irrelevant.
3290// We can overwrite them as *this* write is now after them.
3291//
3292// Note: intentionally ignore pending barriers and chains (i.e. don't apply or clear them), let ApplyPendingBarriers handle them.
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003293void ResourceAccessState::SetWrite(const SyncStageAccessFlags &usage_bit, const ResourceUsageTag &tag) {
John Zulaufab7756b2020-12-29 16:10:16 -07003294 last_reads.clear();
John Zulauf89311b42020-09-29 16:28:47 -06003295 last_read_stages = 0;
3296 read_execution_barriers = 0;
John Zulauff51fbb62020-10-02 14:43:24 -06003297 input_attachment_read = false; // Denotes no outstanding input attachment read after the last write.
John Zulauf89311b42020-09-29 16:28:47 -06003298
3299 write_barriers = 0;
3300 write_dependency_chain = 0;
3301 write_tag = tag;
3302 last_write = usage_bit;
John Zulauf9cb530d2019-09-30 14:14:10 -06003303}
3304
John Zulauf89311b42020-09-29 16:28:47 -06003305// Apply the memory barrier without updating the existing barriers. The execution barrier
3306// changes the "chaining" state, but to keep barriers independent, we defer this until all barriers
3307// of the batch have been processed. Also, depending on whether layout transition happens, we'll either
3308// replace the current write barriers or add to them, so accumulate to pending as well.
3309void ResourceAccessState::ApplyBarrier(const SyncBarrier &barrier, bool layout_transition) {
3310 // For independent barriers we need to track what the new barriers and dependency chain *will* be when we're done
3311 // applying the memory barriers
John Zulauf86356ca2020-10-19 11:46:41 -06003312 // NOTE: We update the write barrier if the write is in the first access scope or if there is a layout
3313 // transistion, under the theory of "most recent access". If the read/write *isn't* safe
3314 // vs. this layout transition DetectBarrierHazard should report it. We treat the layout
3315 // transistion *as* a write and in scope with the barrier (it's before visibility).
John Zulauf4a6105a2020-11-17 15:11:05 -07003316 if (layout_transition || WriteInSourceScopeOrChain(barrier.src_exec_scope, barrier.src_access_scope)) {
John Zulauf89311b42020-09-29 16:28:47 -06003317 pending_write_barriers |= barrier.dst_access_scope;
3318 pending_write_dep_chain |= barrier.dst_exec_scope;
John Zulaufa0a98292020-09-18 09:30:10 -06003319 }
John Zulauf89311b42020-09-29 16:28:47 -06003320 // Track layout transistion as pending as we can't modify last_write until all barriers processed
3321 pending_layout_transition |= layout_transition;
John Zulaufa0a98292020-09-18 09:30:10 -06003322
John Zulauf89311b42020-09-29 16:28:47 -06003323 if (!pending_layout_transition) {
3324 // Once we're dealing with a layout transition (which is modelled as a *write*) then the last reads/writes/chains
3325 // don't need to be tracked as we're just going to zero them.
John Zulaufab7756b2020-12-29 16:10:16 -07003326 for (auto &read_access : last_reads) {
John Zulauf89311b42020-09-29 16:28:47 -06003327 // The | implements the "dependency chain" logic for this access, as the barriers field stores the second sync scope
John Zulaufab7756b2020-12-29 16:10:16 -07003328 if (barrier.src_exec_scope & (read_access.stage | read_access.barriers)) {
3329 read_access.pending_dep_chain |= barrier.dst_exec_scope;
John Zulaufa0a98292020-09-18 09:30:10 -06003330 }
3331 }
John Zulaufa0a98292020-09-18 09:30:10 -06003332 }
John Zulaufa0a98292020-09-18 09:30:10 -06003333}
3334
John Zulauf4a6105a2020-11-17 15:11:05 -07003335// Apply the tag scoped memory barrier without updating the existing barriers. The execution barrier
3336// changes the "chaining" state, but to keep barriers independent. See discussion above.
3337void ResourceAccessState::ApplyBarrier(const ResourceUsageTag &scope_tag, const SyncBarrier &barrier, bool layout_transition) {
3338 // The scope logic for events is, if we're here, the resource usage was flagged as "in the first execution scope" at
3339 // the time of the SetEvent, thus all we need check is whether the access is the same one (i.e. before the scope tag
3340 // in order to know if it's in the excecution scope
3341 // Notice that the layout transition sets the pending barriers *regardless*, as any lack of src_access_scope to
3342 // guard against the layout transition should be reported in the detect barrier hazard phase, and we only report
3343 // errors w.r.t. "most recent" accesses.
3344 if (layout_transition || ((write_tag.IsBefore(scope_tag)) && (barrier.src_access_scope & last_write).any())) {
3345 pending_write_barriers |= barrier.dst_access_scope;
3346 pending_write_dep_chain |= barrier.dst_exec_scope;
3347 }
3348 // Track layout transistion as pending as we can't modify last_write until all barriers processed
3349 pending_layout_transition |= layout_transition;
3350
3351 if (!pending_layout_transition) {
3352 // Once we're dealing with a layout transition (which is modelled as a *write*) then the last reads/writes/chains
3353 // don't need to be tracked as we're just going to zero them.
John Zulaufab7756b2020-12-29 16:10:16 -07003354 for (auto &read_access : last_reads) {
John Zulauf4a6105a2020-11-17 15:11:05 -07003355 // If this read is the same one we included in the set event and in scope, then apply the execution barrier...
3356 // NOTE: That's not really correct... this read stage might *not* have been included in the setevent, and the barriers
3357 // representing the chain might have changed since then (that would be an odd usage), so as a first approximation
3358 // we'll assume the barriers *haven't* been changed since (if the tag hasn't), and while this could be a false
3359 // positive in the case of Set; SomeBarrier; Wait; we'll live with it until we can add more state to the first scope
3360 // capture (the specific write and read stages that *were* in scope at the moment of SetEvents.
3361 // TODO: eliminate the false positive by including write/read-stages "in scope" information in SetEvents first_scope
John Zulaufab7756b2020-12-29 16:10:16 -07003362 if (read_access.tag.IsBefore(scope_tag) && (barrier.src_exec_scope & (read_access.stage | read_access.barriers))) {
3363 read_access.pending_dep_chain |= barrier.dst_exec_scope;
John Zulauf4a6105a2020-11-17 15:11:05 -07003364 }
3365 }
3366 }
3367}
John Zulauf89311b42020-09-29 16:28:47 -06003368void ResourceAccessState::ApplyPendingBarriers(const ResourceUsageTag &tag) {
3369 if (pending_layout_transition) {
John Zulauf89311b42020-09-29 16:28:47 -06003370 // SetWrite clobbers the read count, and thus we don't have to clear the read_state out.
3371 SetWrite(SYNC_IMAGE_LAYOUT_TRANSITION_BIT, tag); // Side effect notes below
John Zulauffaea0ee2021-01-14 14:01:32 -07003372 UpdateFirst(tag, SYNC_IMAGE_LAYOUT_TRANSITION, SyncOrdering::kNonAttachment);
John Zulauf89311b42020-09-29 16:28:47 -06003373 pending_layout_transition = false;
John Zulauf9cb530d2019-09-30 14:14:10 -06003374 }
John Zulauf89311b42020-09-29 16:28:47 -06003375
3376 // Apply the accumulate execution barriers (and thus update chaining information)
3377 // for layout transition, read count is zeroed by SetWrite, so this will be skipped.
John Zulaufab7756b2020-12-29 16:10:16 -07003378 for (auto &read_access : last_reads) {
3379 read_access.barriers |= read_access.pending_dep_chain;
3380 read_execution_barriers |= read_access.barriers;
3381 read_access.pending_dep_chain = 0;
John Zulauf89311b42020-09-29 16:28:47 -06003382 }
3383
3384 // We OR in the accumulated write chain and barriers even in the case of a layout transition as SetWrite zeros them.
3385 write_dependency_chain |= pending_write_dep_chain;
3386 write_barriers |= pending_write_barriers;
3387 pending_write_dep_chain = 0;
3388 pending_write_barriers = 0;
John Zulauf9cb530d2019-09-30 14:14:10 -06003389}
3390
John Zulauf59e25072020-07-17 10:55:21 -06003391// This should be just Bits or Index, but we don't have an invalid state for Index
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003392VkPipelineStageFlags ResourceAccessState::GetReadBarriers(const SyncStageAccessFlags &usage_bit) const {
John Zulauf59e25072020-07-17 10:55:21 -06003393 VkPipelineStageFlags barriers = 0U;
John Zulauf4285ee92020-09-23 10:20:52 -06003394
John Zulaufab7756b2020-12-29 16:10:16 -07003395 for (const auto &read_access : last_reads) {
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003396 if ((read_access.access & usage_bit).any()) {
John Zulauf4285ee92020-09-23 10:20:52 -06003397 barriers = read_access.barriers;
3398 break;
John Zulauf59e25072020-07-17 10:55:21 -06003399 }
3400 }
John Zulauf4285ee92020-09-23 10:20:52 -06003401
John Zulauf59e25072020-07-17 10:55:21 -06003402 return barriers;
3403}
3404
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003405inline bool ResourceAccessState::IsRAWHazard(VkPipelineStageFlagBits usage_stage, const SyncStageAccessFlags &usage) const {
John Zulauf4285ee92020-09-23 10:20:52 -06003406 assert(IsRead(usage));
3407 // Only RAW vs. last_write if it doesn't happen-after any other read because either:
3408 // * the previous reads are not hazards, and thus last_write must be visible and available to
3409 // any reads that happen after.
3410 // * the previous reads *are* hazards to last_write, have been reported, and if that hazard is fixed
3411 // the current read will be also not be a hazard, thus reporting a hazard here adds no needed information.
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003412 return last_write.any() && (0 == (read_execution_barriers & usage_stage)) && IsWriteHazard(usage);
John Zulauf4285ee92020-09-23 10:20:52 -06003413}
3414
John Zulauf8e3c3e92021-01-06 11:19:36 -07003415VkPipelineStageFlags ResourceAccessState::GetOrderedStages(const OrderingBarrier &ordering) const {
John Zulauf4285ee92020-09-23 10:20:52 -06003416 // Whether the stage are in the ordering scope only matters if the current write is ordered
3417 VkPipelineStageFlags ordered_stages = last_read_stages & ordering.exec_scope;
3418 // Special input attachment handling as always (not encoded in exec_scop)
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003419 const bool input_attachment_ordering = (ordering.access_scope & SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT).any();
John Zulauff51fbb62020-10-02 14:43:24 -06003420 if (input_attachment_ordering && input_attachment_read) {
John Zulauf4285ee92020-09-23 10:20:52 -06003421 // If we have an input attachment in last_reads and input attachments are ordered we all that stage
3422 ordered_stages |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
3423 }
3424
3425 return ordered_stages;
3426}
3427
John Zulauffaea0ee2021-01-14 14:01:32 -07003428void ResourceAccessState::UpdateFirst(const ResourceUsageTag &tag, SyncStageAccessIndex usage_index, SyncOrdering ordering_rule) {
3429 // Only record until we record a write.
3430 if (first_accesses_.empty() || IsRead(first_accesses_.back().usage_index)) {
3431 const VkPipelineStageFlags usage_stage = IsRead(usage_index) ? PipelineStageBit(usage_index) : 0U;
3432 if (0 == (usage_stage & first_read_stages_)) {
3433 // If this is a read we haven't seen or a write, record.
3434 first_read_stages_ |= usage_stage;
3435 first_accesses_.emplace_back(tag, usage_index, ordering_rule);
3436 }
3437 }
3438}
3439
John Zulaufd1f85d42020-04-15 12:23:15 -06003440void SyncValidator::ResetCommandBufferCallback(VkCommandBuffer command_buffer) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003441 auto *access_context = GetAccessContextNoInsert(command_buffer);
3442 if (access_context) {
3443 access_context->Reset();
John Zulauf9cb530d2019-09-30 14:14:10 -06003444 }
3445}
3446
John Zulaufd1f85d42020-04-15 12:23:15 -06003447void SyncValidator::FreeCommandBufferCallback(VkCommandBuffer command_buffer) {
3448 auto access_found = cb_access_state.find(command_buffer);
3449 if (access_found != cb_access_state.end()) {
3450 access_found->second->Reset();
3451 cb_access_state.erase(access_found);
3452 }
3453}
3454
Jeremy Gebben9893daf2021-01-04 10:40:50 -07003455void SyncValidator::ApplyGlobalBarriers(AccessContext *context, const SyncExecScope &src, const SyncExecScope &dst,
3456 uint32_t memory_barrier_count, const VkMemoryBarrier *pMemoryBarriers,
3457 const ResourceUsageTag &tag) {
John Zulauf1e331ec2020-12-04 18:29:38 -07003458 std::vector<PipelineBarrierOp> barrier_ops;
3459 barrier_ops.reserve(std::min<uint32_t>(1, memory_barrier_count));
John Zulauf89311b42020-09-29 16:28:47 -06003460 for (uint32_t barrier_index = 0; barrier_index < memory_barrier_count; barrier_index++) {
3461 const auto &barrier = pMemoryBarriers[barrier_index];
Jeremy Gebben9893daf2021-01-04 10:40:50 -07003462 SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf1e331ec2020-12-04 18:29:38 -07003463 barrier_ops.emplace_back(sync_barrier, false);
John Zulauf89311b42020-09-29 16:28:47 -06003464 }
3465 if (0 == memory_barrier_count) {
3466 // If there are no global memory barriers, force an exec barrier
Jeremy Gebben9893daf2021-01-04 10:40:50 -07003467 barrier_ops.emplace_back(SyncBarrier(src, dst), false);
John Zulauf89311b42020-09-29 16:28:47 -06003468 }
John Zulauf1e331ec2020-12-04 18:29:38 -07003469 ApplyBarrierOpsFunctor<PipelineBarrierOp> barriers_functor(true /* resolve */, barrier_ops, tag);
John Zulauf540266b2020-04-06 18:54:53 -06003470 context->ApplyGlobalBarriers(barriers_functor);
John Zulauf9cb530d2019-09-30 14:14:10 -06003471}
3472
Jeremy Gebben9893daf2021-01-04 10:40:50 -07003473void SyncValidator::ApplyBufferBarriers(AccessContext *context, const SyncExecScope &src, const SyncExecScope &dst,
3474 uint32_t barrier_count, const VkBufferMemoryBarrier *barriers) {
John Zulauf9cb530d2019-09-30 14:14:10 -06003475 for (uint32_t index = 0; index < barrier_count; index++) {
John Zulauf3e86bf02020-09-12 10:47:57 -06003476 auto barrier = barriers[index]; // barrier is a copy
John Zulauf9cb530d2019-09-30 14:14:10 -06003477 const auto *buffer = Get<BUFFER_STATE>(barrier.buffer);
3478 if (!buffer) continue;
John Zulauf3e86bf02020-09-12 10:47:57 -06003479 barrier.size = GetBufferWholeSize(*buffer, barrier.offset, barrier.size);
3480 const ResourceAccessRange range = MakeRange(barrier);
Jeremy Gebben9893daf2021-01-04 10:40:50 -07003481 const SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4a6105a2020-11-17 15:11:05 -07003482 const ApplyBarrierFunctor<PipelineBarrierOp> update_action({sync_barrier, false /* layout_transition */});
John Zulauf89311b42020-09-29 16:28:47 -06003483 context->UpdateResourceAccess(*buffer, range, update_action);
John Zulauf9cb530d2019-09-30 14:14:10 -06003484 }
3485}
3486
Jeremy Gebben9893daf2021-01-04 10:40:50 -07003487void SyncValidator::ApplyImageBarriers(AccessContext *context, const SyncExecScope &src, const SyncExecScope &dst,
3488 uint32_t barrier_count, const VkImageMemoryBarrier *barriers, const ResourceUsageTag &tag) {
John Zulauf5c5e88d2019-12-26 11:22:02 -07003489 for (uint32_t index = 0; index < barrier_count; index++) {
3490 const auto &barrier = barriers[index];
3491 const auto *image = Get<IMAGE_STATE>(barrier.image);
3492 if (!image) continue;
John Zulauf540266b2020-04-06 18:54:53 -06003493 auto subresource_range = NormalizeSubresourceRange(image->createInfo, barrier.subresourceRange);
John Zulauf355e49b2020-04-24 15:11:15 -06003494 bool layout_transition = barrier.oldLayout != barrier.newLayout;
Jeremy Gebben9893daf2021-01-04 10:40:50 -07003495 const SyncBarrier sync_barrier(barrier, src, dst);
John Zulauf4a6105a2020-11-17 15:11:05 -07003496 const ApplyBarrierFunctor<PipelineBarrierOp> barrier_action({sync_barrier, layout_transition});
John Zulauf89311b42020-09-29 16:28:47 -06003497 context->UpdateResourceAccess(*image, subresource_range, barrier_action);
John Zulauf9cb530d2019-09-30 14:14:10 -06003498 }
John Zulauf9cb530d2019-09-30 14:14:10 -06003499}
3500
3501bool SyncValidator::PreCallValidateCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
3502 uint32_t regionCount, const VkBufferCopy *pRegions) const {
3503 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -06003504 const auto *cb_context = GetAccessContext(commandBuffer);
3505 assert(cb_context);
3506 if (!cb_context) return skip;
3507 const auto *context = cb_context->GetCurrentAccessContext();
John Zulauf9cb530d2019-09-30 14:14:10 -06003508
John Zulauf3d84f1b2020-03-09 13:33:25 -06003509 // If we have no previous accesses, we have no hazards
John Zulauf3d84f1b2020-03-09 13:33:25 -06003510 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003511 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003512
3513 for (uint32_t region = 0; region < regionCount; region++) {
3514 const auto &copy_region = pRegions[region];
John Zulauf16adfc92020-04-08 10:28:33 -06003515 if (src_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06003516 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
John Zulauf16adfc92020-04-08 10:28:33 -06003517 auto hazard = context->DetectHazard(*src_buffer, SYNC_TRANSFER_TRANSFER_READ, src_range);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003518 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003519 skip |= LogError(srcBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003520 "vkCmdCopyBuffer: Hazard %s for srcBuffer %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06003521 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcBuffer).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003522 cb_context->FormatUsage(hazard).c_str());
John Zulauf9cb530d2019-09-30 14:14:10 -06003523 }
John Zulauf9cb530d2019-09-30 14:14:10 -06003524 }
John Zulauf16adfc92020-04-08 10:28:33 -06003525 if (dst_buffer && !skip) {
John Zulauf3e86bf02020-09-12 10:47:57 -06003526 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
John Zulauf355e49b2020-04-24 15:11:15 -06003527 auto hazard = context->DetectHazard(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, dst_range);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003528 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003529 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003530 "vkCmdCopyBuffer: Hazard %s for dstBuffer %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06003531 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstBuffer).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003532 cb_context->FormatUsage(hazard).c_str());
John Zulauf3d84f1b2020-03-09 13:33:25 -06003533 }
3534 }
3535 if (skip) break;
John Zulauf9cb530d2019-09-30 14:14:10 -06003536 }
3537 return skip;
3538}
3539
3540void SyncValidator::PreCallRecordCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
3541 uint32_t regionCount, const VkBufferCopy *pRegions) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003542 auto *cb_context = GetAccessContext(commandBuffer);
3543 assert(cb_context);
John Zulauf2b151bf2020-04-24 15:37:44 -06003544 const auto tag = cb_context->NextCommandTag(CMD_COPYBUFFER);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003545 auto *context = cb_context->GetCurrentAccessContext();
3546
John Zulauf9cb530d2019-09-30 14:14:10 -06003547 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
John Zulauf9cb530d2019-09-30 14:14:10 -06003548 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
John Zulauf9cb530d2019-09-30 14:14:10 -06003549
3550 for (uint32_t region = 0; region < regionCount; region++) {
3551 const auto &copy_region = pRegions[region];
John Zulauf16adfc92020-04-08 10:28:33 -06003552 if (src_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06003553 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
John Zulauf8e3c3e92021-01-06 11:19:36 -07003554 context->UpdateAccessState(*src_buffer, SYNC_TRANSFER_TRANSFER_READ, SyncOrdering::kNonAttachment, src_range, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003555 }
John Zulauf16adfc92020-04-08 10:28:33 -06003556 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06003557 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
John Zulauf8e3c3e92021-01-06 11:19:36 -07003558 context->UpdateAccessState(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment, dst_range, tag);
John Zulauf5c5e88d2019-12-26 11:22:02 -07003559 }
3560 }
3561}
3562
John Zulauf4a6105a2020-11-17 15:11:05 -07003563void SyncValidator::PreCallRecordDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
3564 // Clear out events from the command buffer contexts
3565 for (auto &cb_context : cb_access_state) {
3566 cb_context.second->RecordDestroyEvent(event);
3567 }
3568}
3569
Jeff Leger178b1e52020-10-05 12:22:23 -04003570bool SyncValidator::PreCallValidateCmdCopyBuffer2KHR(VkCommandBuffer commandBuffer,
3571 const VkCopyBufferInfo2KHR *pCopyBufferInfos) const {
3572 bool skip = false;
3573 const auto *cb_context = GetAccessContext(commandBuffer);
3574 assert(cb_context);
3575 if (!cb_context) return skip;
3576 const auto *context = cb_context->GetCurrentAccessContext();
3577
3578 // If we have no previous accesses, we have no hazards
3579 const auto *src_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->srcBuffer);
3580 const auto *dst_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->dstBuffer);
3581
3582 for (uint32_t region = 0; region < pCopyBufferInfos->regionCount; region++) {
3583 const auto &copy_region = pCopyBufferInfos->pRegions[region];
3584 if (src_buffer) {
3585 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
3586 auto hazard = context->DetectHazard(*src_buffer, SYNC_TRANSFER_TRANSFER_READ, src_range);
3587 if (hazard.hazard) {
3588 // TODO -- add tag information to log msg when useful.
3589 skip |= LogError(pCopyBufferInfos->srcBuffer, string_SyncHazardVUID(hazard.hazard),
3590 "vkCmdCopyBuffer2KHR(): Hazard %s for srcBuffer %s, region %" PRIu32 ". Access info %s.",
3591 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyBufferInfos->srcBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07003592 region, cb_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04003593 }
3594 }
3595 if (dst_buffer && !skip) {
3596 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
3597 auto hazard = context->DetectHazard(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, dst_range);
3598 if (hazard.hazard) {
3599 skip |= LogError(pCopyBufferInfos->dstBuffer, string_SyncHazardVUID(hazard.hazard),
3600 "vkCmdCopyBuffer2KHR(): Hazard %s for dstBuffer %s, region %" PRIu32 ". Access info %s.",
3601 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyBufferInfos->dstBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07003602 region, cb_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04003603 }
3604 }
3605 if (skip) break;
3606 }
3607 return skip;
3608}
3609
3610void SyncValidator::PreCallRecordCmdCopyBuffer2KHR(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2KHR *pCopyBufferInfos) {
3611 auto *cb_context = GetAccessContext(commandBuffer);
3612 assert(cb_context);
3613 const auto tag = cb_context->NextCommandTag(CMD_COPYBUFFER2KHR);
3614 auto *context = cb_context->GetCurrentAccessContext();
3615
3616 const auto *src_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->srcBuffer);
3617 const auto *dst_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->dstBuffer);
3618
3619 for (uint32_t region = 0; region < pCopyBufferInfos->regionCount; region++) {
3620 const auto &copy_region = pCopyBufferInfos->pRegions[region];
3621 if (src_buffer) {
3622 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
John Zulauf8e3c3e92021-01-06 11:19:36 -07003623 context->UpdateAccessState(*src_buffer, SYNC_TRANSFER_TRANSFER_READ, SyncOrdering::kNonAttachment, src_range, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04003624 }
3625 if (dst_buffer) {
3626 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
John Zulauf8e3c3e92021-01-06 11:19:36 -07003627 context->UpdateAccessState(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment, dst_range, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04003628 }
3629 }
3630}
3631
John Zulauf5c5e88d2019-12-26 11:22:02 -07003632bool SyncValidator::PreCallValidateCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
3633 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
3634 const VkImageCopy *pRegions) const {
3635 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -06003636 const auto *cb_access_context = GetAccessContext(commandBuffer);
3637 assert(cb_access_context);
3638 if (!cb_access_context) return skip;
John Zulauf5c5e88d2019-12-26 11:22:02 -07003639
John Zulauf3d84f1b2020-03-09 13:33:25 -06003640 const auto *context = cb_access_context->GetCurrentAccessContext();
3641 assert(context);
3642 if (!context) return skip;
3643
3644 const auto *src_image = Get<IMAGE_STATE>(srcImage);
3645 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003646 for (uint32_t region = 0; region < regionCount; region++) {
3647 const auto &copy_region = pRegions[region];
3648 if (src_image) {
John Zulauf540266b2020-04-06 18:54:53 -06003649 auto hazard = context->DetectHazard(*src_image, SYNC_TRANSFER_TRANSFER_READ, copy_region.srcSubresource,
John Zulauf3d84f1b2020-03-09 13:33:25 -06003650 copy_region.srcOffset, copy_region.extent);
3651 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003652 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003653 "vkCmdCopyImage: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06003654 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003655 cb_access_context->FormatUsage(hazard).c_str());
John Zulauf5c5e88d2019-12-26 11:22:02 -07003656 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06003657 }
3658
3659 if (dst_image) {
locke-lunarg1df1f882020-03-02 16:42:08 -07003660 VkExtent3D dst_copy_extent =
3661 GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
John Zulauf540266b2020-04-06 18:54:53 -06003662 auto hazard = context->DetectHazard(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, copy_region.dstSubresource,
locke-lunarg1df1f882020-03-02 16:42:08 -07003663 copy_region.dstOffset, dst_copy_extent);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003664 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003665 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003666 "vkCmdCopyImage: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06003667 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003668 cb_access_context->FormatUsage(hazard).c_str());
John Zulauf5c5e88d2019-12-26 11:22:02 -07003669 }
locke-lunarg1dbbb9e2020-02-28 22:43:53 -07003670 if (skip) break;
John Zulauf5c5e88d2019-12-26 11:22:02 -07003671 }
3672 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06003673
John Zulauf5c5e88d2019-12-26 11:22:02 -07003674 return skip;
3675}
3676
3677void SyncValidator::PreCallRecordCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
3678 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
3679 const VkImageCopy *pRegions) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003680 auto *cb_access_context = GetAccessContext(commandBuffer);
3681 assert(cb_access_context);
John Zulauf2b151bf2020-04-24 15:37:44 -06003682 const auto tag = cb_access_context->NextCommandTag(CMD_COPYIMAGE);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003683 auto *context = cb_access_context->GetCurrentAccessContext();
3684 assert(context);
3685
John Zulauf5c5e88d2019-12-26 11:22:02 -07003686 auto *src_image = Get<IMAGE_STATE>(srcImage);
John Zulauf5c5e88d2019-12-26 11:22:02 -07003687 auto *dst_image = Get<IMAGE_STATE>(dstImage);
John Zulauf5c5e88d2019-12-26 11:22:02 -07003688
3689 for (uint32_t region = 0; region < regionCount; region++) {
3690 const auto &copy_region = pRegions[region];
John Zulauf3d84f1b2020-03-09 13:33:25 -06003691 if (src_image) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07003692 context->UpdateAccessState(*src_image, SYNC_TRANSFER_TRANSFER_READ, SyncOrdering::kNonAttachment,
3693 copy_region.srcSubresource, copy_region.srcOffset, copy_region.extent, tag);
John Zulauf5c5e88d2019-12-26 11:22:02 -07003694 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06003695 if (dst_image) {
locke-lunarg1df1f882020-03-02 16:42:08 -07003696 VkExtent3D dst_copy_extent =
3697 GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
John Zulauf8e3c3e92021-01-06 11:19:36 -07003698 context->UpdateAccessState(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
3699 copy_region.dstSubresource, copy_region.dstOffset, dst_copy_extent, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003700 }
3701 }
3702}
3703
Jeff Leger178b1e52020-10-05 12:22:23 -04003704bool SyncValidator::PreCallValidateCmdCopyImage2KHR(VkCommandBuffer commandBuffer,
3705 const VkCopyImageInfo2KHR *pCopyImageInfo) const {
3706 bool skip = false;
3707 const auto *cb_access_context = GetAccessContext(commandBuffer);
3708 assert(cb_access_context);
3709 if (!cb_access_context) return skip;
3710
3711 const auto *context = cb_access_context->GetCurrentAccessContext();
3712 assert(context);
3713 if (!context) return skip;
3714
3715 const auto *src_image = Get<IMAGE_STATE>(pCopyImageInfo->srcImage);
3716 const auto *dst_image = Get<IMAGE_STATE>(pCopyImageInfo->dstImage);
3717 for (uint32_t region = 0; region < pCopyImageInfo->regionCount; region++) {
3718 const auto &copy_region = pCopyImageInfo->pRegions[region];
3719 if (src_image) {
3720 auto hazard = context->DetectHazard(*src_image, SYNC_TRANSFER_TRANSFER_READ, copy_region.srcSubresource,
3721 copy_region.srcOffset, copy_region.extent);
3722 if (hazard.hazard) {
3723 skip |= LogError(pCopyImageInfo->srcImage, string_SyncHazardVUID(hazard.hazard),
3724 "vkCmdCopyImage2KHR: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.",
3725 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyImageInfo->srcImage).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07003726 region, cb_access_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04003727 }
3728 }
3729
3730 if (dst_image) {
3731 VkExtent3D dst_copy_extent =
3732 GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
3733 auto hazard = context->DetectHazard(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, copy_region.dstSubresource,
3734 copy_region.dstOffset, dst_copy_extent);
3735 if (hazard.hazard) {
3736 skip |= LogError(pCopyImageInfo->dstImage, string_SyncHazardVUID(hazard.hazard),
3737 "vkCmdCopyImage2KHR: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.",
3738 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyImageInfo->dstImage).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07003739 region, cb_access_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04003740 }
3741 if (skip) break;
3742 }
3743 }
3744
3745 return skip;
3746}
3747
3748void SyncValidator::PreCallRecordCmdCopyImage2KHR(VkCommandBuffer commandBuffer, const VkCopyImageInfo2KHR *pCopyImageInfo) {
3749 auto *cb_access_context = GetAccessContext(commandBuffer);
3750 assert(cb_access_context);
3751 const auto tag = cb_access_context->NextCommandTag(CMD_COPYIMAGE2KHR);
3752 auto *context = cb_access_context->GetCurrentAccessContext();
3753 assert(context);
3754
3755 auto *src_image = Get<IMAGE_STATE>(pCopyImageInfo->srcImage);
3756 auto *dst_image = Get<IMAGE_STATE>(pCopyImageInfo->dstImage);
3757
3758 for (uint32_t region = 0; region < pCopyImageInfo->regionCount; region++) {
3759 const auto &copy_region = pCopyImageInfo->pRegions[region];
3760 if (src_image) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07003761 context->UpdateAccessState(*src_image, SYNC_TRANSFER_TRANSFER_READ, SyncOrdering::kNonAttachment,
3762 copy_region.srcSubresource, copy_region.srcOffset, copy_region.extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04003763 }
3764 if (dst_image) {
3765 VkExtent3D dst_copy_extent =
3766 GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
John Zulauf8e3c3e92021-01-06 11:19:36 -07003767 context->UpdateAccessState(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
3768 copy_region.dstSubresource, copy_region.dstOffset, dst_copy_extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04003769 }
3770 }
3771}
3772
John Zulauf9cb530d2019-09-30 14:14:10 -06003773bool SyncValidator::PreCallValidateCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
3774 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
3775 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
3776 uint32_t bufferMemoryBarrierCount,
3777 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
3778 uint32_t imageMemoryBarrierCount,
3779 const VkImageMemoryBarrier *pImageMemoryBarriers) const {
3780 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -06003781 const auto *cb_access_context = GetAccessContext(commandBuffer);
3782 assert(cb_access_context);
3783 if (!cb_access_context) return skip;
John Zulauf0cb5be22020-01-23 12:18:22 -07003784
John Zulauf3d84f1b2020-03-09 13:33:25 -06003785 const auto *context = cb_access_context->GetCurrentAccessContext();
3786 assert(context);
3787 if (!context) return skip;
John Zulauf0cb5be22020-01-23 12:18:22 -07003788
John Zulauf3d84f1b2020-03-09 13:33:25 -06003789 const auto src_stage_mask = ExpandPipelineStages(cb_access_context->GetQueueFlags(), srcStageMask);
John Zulauf36bcf6a2020-02-03 15:12:52 -07003790 const auto src_exec_scope = WithEarlierPipelineStages(src_stage_mask);
3791 auto src_stage_accesses = AccessScopeByStage(src_stage_mask);
John Zulauf0cb5be22020-01-23 12:18:22 -07003792 // Validate Image Layout transitions
3793 for (uint32_t index = 0; index < imageMemoryBarrierCount; index++) {
3794 const auto &barrier = pImageMemoryBarriers[index];
3795 if (barrier.newLayout == barrier.oldLayout) continue; // Only interested in layout transitions at this point.
3796 const auto *image_state = Get<IMAGE_STATE>(barrier.image);
3797 if (!image_state) continue;
John Zulauf16adfc92020-04-08 10:28:33 -06003798 const auto hazard = context->DetectImageBarrierHazard(*image_state, src_exec_scope, src_stage_accesses, barrier);
John Zulauf0cb5be22020-01-23 12:18:22 -07003799 if (hazard.hazard) {
John Zulauf7635de32020-05-29 17:14:15 -06003800 // PHASE1 TODO -- add tag information to log msg when useful.
locke-lunarga0003652020-03-10 11:38:51 -06003801 skip |= LogError(barrier.image, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003802 "vkCmdPipelineBarrier: Hazard %s for image barrier %" PRIu32 " %s. Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06003803 string_SyncHazard(hazard.hazard), index, report_data->FormatHandle(barrier.image).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07003804 cb_access_context->FormatUsage(hazard).c_str());
John Zulauf0cb5be22020-01-23 12:18:22 -07003805 }
3806 }
John Zulauf9cb530d2019-09-30 14:14:10 -06003807
3808 return skip;
3809}
3810
3811void SyncValidator::PreCallRecordCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
3812 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
3813 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
3814 uint32_t bufferMemoryBarrierCount,
3815 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
3816 uint32_t imageMemoryBarrierCount,
3817 const VkImageMemoryBarrier *pImageMemoryBarriers) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003818 auto *cb_access_context = GetAccessContext(commandBuffer);
3819 assert(cb_access_context);
3820 if (!cb_access_context) return;
John Zulauf2b151bf2020-04-24 15:37:44 -06003821 const auto tag = cb_access_context->NextCommandTag(CMD_PIPELINEBARRIER);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003822 auto access_context = cb_access_context->GetCurrentAccessContext();
3823 assert(access_context);
3824 if (!access_context) return;
John Zulauf9cb530d2019-09-30 14:14:10 -06003825
Jeremy Gebben9893daf2021-01-04 10:40:50 -07003826 auto src = SyncExecScope::MakeSrc(cb_access_context->GetQueueFlags(), srcStageMask);
3827 auto dst = SyncExecScope::MakeDst(cb_access_context->GetQueueFlags(), dstStageMask);
John Zulauf89311b42020-09-29 16:28:47 -06003828
3829 // These two apply barriers one at a time as the are restricted to the resource ranges specified per each barrier,
3830 // but do not update the dependency chain information (but set the "pending" state) // s.t. the order independence
3831 // of the barriers is maintained.
Jeremy Gebben9893daf2021-01-04 10:40:50 -07003832 ApplyBufferBarriers(access_context, src, dst, bufferMemoryBarrierCount, pBufferMemoryBarriers);
3833 ApplyImageBarriers(access_context, src, dst, imageMemoryBarrierCount, pImageMemoryBarriers, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003834
John Zulauf89311b42020-09-29 16:28:47 -06003835 // Apply the global barriers last as is it walks all memory, it can also clean up the "pending" state without requiring an
3836 // additional pass, updating the dependency chains *last* as it goes along.
3837 // This is needed to guarantee order independence of the three lists.
Jeremy Gebben9893daf2021-01-04 10:40:50 -07003838 ApplyGlobalBarriers(access_context, src, dst, memoryBarrierCount, pMemoryBarriers, tag);
John Zulauf4a6105a2020-11-17 15:11:05 -07003839
Jeremy Gebben9893daf2021-01-04 10:40:50 -07003840 cb_access_context->ApplyGlobalBarriersToEvents(src, dst);
John Zulauf9cb530d2019-09-30 14:14:10 -06003841}
3842
3843void SyncValidator::PostCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
3844 const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, VkResult result) {
3845 // The state tracker sets up the device state
3846 StateTracker::PostCallRecordCreateDevice(gpu, pCreateInfo, pAllocator, pDevice, result);
3847
John Zulauf5f13a792020-03-10 07:31:21 -06003848 // Add the callback hooks for the functions that are either broadly or deeply used and that the ValidationStateTracker
3849 // refactor would be messier without.
John Zulauf9cb530d2019-09-30 14:14:10 -06003850 // TODO: Find a good way to do this hooklessly.
3851 ValidationObject *device_object = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
3852 ValidationObject *validation_data = GetValidationObject(device_object->object_dispatch, LayerObjectTypeSyncValidation);
3853 SyncValidator *sync_device_state = static_cast<SyncValidator *>(validation_data);
3854
John Zulaufd1f85d42020-04-15 12:23:15 -06003855 sync_device_state->SetCommandBufferResetCallback([sync_device_state](VkCommandBuffer command_buffer) -> void {
3856 sync_device_state->ResetCommandBufferCallback(command_buffer);
3857 });
3858 sync_device_state->SetCommandBufferFreeCallback([sync_device_state](VkCommandBuffer command_buffer) -> void {
3859 sync_device_state->FreeCommandBufferCallback(command_buffer);
3860 });
John Zulauf9cb530d2019-09-30 14:14:10 -06003861}
John Zulauf3d84f1b2020-03-09 13:33:25 -06003862
John Zulauf355e49b2020-04-24 15:11:15 -06003863bool SyncValidator::ValidateBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
Mike Schuchardt2df08912020-12-15 16:28:09 -08003864 const VkSubpassBeginInfo *pSubpassBeginInfo, const char *func_name) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003865 bool skip = false;
3866 const auto rp_state = Get<RENDER_PASS_STATE>(pRenderPassBegin->renderPass);
3867 auto cb_context = GetAccessContext(commandBuffer);
3868
3869 if (rp_state && cb_context) {
3870 skip |= cb_context->ValidateBeginRenderPass(*rp_state, pRenderPassBegin, pSubpassBeginInfo, func_name);
3871 }
3872
3873 return skip;
3874}
3875
3876bool SyncValidator::PreCallValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
3877 VkSubpassContents contents) const {
3878 bool skip = StateTracker::PreCallValidateCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07003879 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf355e49b2020-04-24 15:11:15 -06003880 subpass_begin_info.contents = contents;
3881 skip |= ValidateBeginRenderPass(commandBuffer, pRenderPassBegin, &subpass_begin_info, "vkCmdBeginRenderPass");
3882 return skip;
3883}
3884
3885bool SyncValidator::PreCallValidateCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
Mike Schuchardt2df08912020-12-15 16:28:09 -08003886 const VkSubpassBeginInfo *pSubpassBeginInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003887 bool skip = StateTracker::PreCallValidateCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
3888 skip |= ValidateBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, "vkCmdBeginRenderPass2");
3889 return skip;
3890}
3891
3892bool SyncValidator::PreCallValidateCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
3893 const VkRenderPassBeginInfo *pRenderPassBegin,
Mike Schuchardt2df08912020-12-15 16:28:09 -08003894 const VkSubpassBeginInfo *pSubpassBeginInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003895 bool skip = StateTracker::PreCallValidateCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
3896 skip |= ValidateBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, "vkCmdBeginRenderPass2KHR");
3897 return skip;
3898}
3899
John Zulauf3d84f1b2020-03-09 13:33:25 -06003900void SyncValidator::PostCallRecordBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo,
3901 VkResult result) {
3902 // The state tracker sets up the command buffer state
3903 StateTracker::PostCallRecordBeginCommandBuffer(commandBuffer, pBeginInfo, result);
3904
3905 // Create/initialize the structure that trackers accesses at the command buffer scope.
3906 auto cb_access_context = GetAccessContext(commandBuffer);
3907 assert(cb_access_context);
3908 cb_access_context->Reset();
3909}
3910
3911void SyncValidator::RecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
John Zulauf355e49b2020-04-24 15:11:15 -06003912 const VkSubpassBeginInfo *pSubpassBeginInfo, CMD_TYPE command) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003913 auto cb_context = GetAccessContext(commandBuffer);
John Zulauf355e49b2020-04-24 15:11:15 -06003914 if (cb_context) {
3915 cb_context->RecordBeginRenderPass(cb_context->NextCommandTag(command));
John Zulauf3d84f1b2020-03-09 13:33:25 -06003916 }
3917}
3918
3919void SyncValidator::PostCallRecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
3920 VkSubpassContents contents) {
3921 StateTracker::PostCallRecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07003922 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf3d84f1b2020-03-09 13:33:25 -06003923 subpass_begin_info.contents = contents;
John Zulauf355e49b2020-04-24 15:11:15 -06003924 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, &subpass_begin_info, CMD_BEGINRENDERPASS);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003925}
3926
3927void SyncValidator::PostCallRecordCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
3928 const VkSubpassBeginInfo *pSubpassBeginInfo) {
3929 StateTracker::PostCallRecordCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
John Zulauf355e49b2020-04-24 15:11:15 -06003930 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003931}
3932
3933void SyncValidator::PostCallRecordCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
3934 const VkRenderPassBeginInfo *pRenderPassBegin,
3935 const VkSubpassBeginInfo *pSubpassBeginInfo) {
3936 StateTracker::PostCallRecordCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
John Zulauf355e49b2020-04-24 15:11:15 -06003937 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2);
3938}
3939
Mike Schuchardt2df08912020-12-15 16:28:09 -08003940bool SyncValidator::ValidateCmdNextSubpass(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
3941 const VkSubpassEndInfo *pSubpassEndInfo, const char *func_name) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003942 bool skip = false;
3943
3944 auto cb_context = GetAccessContext(commandBuffer);
3945 assert(cb_context);
3946 auto cb_state = cb_context->GetCommandBufferState();
3947 if (!cb_state) return skip;
3948
3949 auto rp_state = cb_state->activeRenderPass;
3950 if (!rp_state) return skip;
3951
3952 skip |= cb_context->ValidateNextSubpass(func_name);
3953
3954 return skip;
3955}
3956
3957bool SyncValidator::PreCallValidateCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) const {
3958 bool skip = StateTracker::PreCallValidateCmdNextSubpass(commandBuffer, contents);
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07003959 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf355e49b2020-04-24 15:11:15 -06003960 subpass_begin_info.contents = contents;
3961 skip |= ValidateCmdNextSubpass(commandBuffer, &subpass_begin_info, nullptr, "vkCmdNextSubpass");
3962 return skip;
3963}
3964
Mike Schuchardt2df08912020-12-15 16:28:09 -08003965bool SyncValidator::PreCallValidateCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
3966 const VkSubpassEndInfo *pSubpassEndInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003967 bool skip = StateTracker::PreCallValidateCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
3968 skip |= ValidateCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, "vkCmdNextSubpass2KHR");
3969 return skip;
3970}
3971
3972bool SyncValidator::PreCallValidateCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
3973 const VkSubpassEndInfo *pSubpassEndInfo) const {
3974 bool skip = StateTracker::PreCallValidateCmdNextSubpass2(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
3975 skip |= ValidateCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, "vkCmdNextSubpass2");
3976 return skip;
John Zulauf3d84f1b2020-03-09 13:33:25 -06003977}
3978
3979void SyncValidator::RecordCmdNextSubpass(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
John Zulauf355e49b2020-04-24 15:11:15 -06003980 const VkSubpassEndInfo *pSubpassEndInfo, CMD_TYPE command) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003981 auto cb_context = GetAccessContext(commandBuffer);
3982 assert(cb_context);
3983 auto cb_state = cb_context->GetCommandBufferState();
3984 if (!cb_state) return;
3985
3986 auto rp_state = cb_state->activeRenderPass;
3987 if (!rp_state) return;
3988
John Zulauffaea0ee2021-01-14 14:01:32 -07003989 cb_context->RecordNextSubpass(*rp_state, command);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003990}
3991
3992void SyncValidator::PostCallRecordCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
3993 StateTracker::PostCallRecordCmdNextSubpass(commandBuffer, contents);
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07003994 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf3d84f1b2020-03-09 13:33:25 -06003995 subpass_begin_info.contents = contents;
John Zulauf355e49b2020-04-24 15:11:15 -06003996 RecordCmdNextSubpass(commandBuffer, &subpass_begin_info, nullptr, CMD_NEXTSUBPASS);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003997}
3998
3999void SyncValidator::PostCallRecordCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
4000 const VkSubpassEndInfo *pSubpassEndInfo) {
4001 StateTracker::PostCallRecordCmdNextSubpass2(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
John Zulauf355e49b2020-04-24 15:11:15 -06004002 RecordCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004003}
4004
4005void SyncValidator::PostCallRecordCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
4006 const VkSubpassEndInfo *pSubpassEndInfo) {
4007 StateTracker::PostCallRecordCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
John Zulauf355e49b2020-04-24 15:11:15 -06004008 RecordCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004009}
4010
Mike Schuchardt2df08912020-12-15 16:28:09 -08004011bool SyncValidator::ValidateCmdEndRenderPass(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo,
John Zulauf355e49b2020-04-24 15:11:15 -06004012 const char *func_name) const {
4013 bool skip = false;
4014
4015 auto cb_context = GetAccessContext(commandBuffer);
4016 assert(cb_context);
4017 auto cb_state = cb_context->GetCommandBufferState();
4018 if (!cb_state) return skip;
4019
4020 auto rp_state = cb_state->activeRenderPass;
4021 if (!rp_state) return skip;
4022
4023 skip |= cb_context->ValidateEndRenderpass(func_name);
4024 return skip;
4025}
4026
4027bool SyncValidator::PreCallValidateCmdEndRenderPass(VkCommandBuffer commandBuffer) const {
4028 bool skip = StateTracker::PreCallValidateCmdEndRenderPass(commandBuffer);
4029 skip |= ValidateCmdEndRenderPass(commandBuffer, nullptr, "vkEndRenderPass");
4030 return skip;
4031}
4032
Mike Schuchardt2df08912020-12-15 16:28:09 -08004033bool SyncValidator::PreCallValidateCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06004034 bool skip = StateTracker::PreCallValidateCmdEndRenderPass2(commandBuffer, pSubpassEndInfo);
4035 skip |= ValidateCmdEndRenderPass(commandBuffer, pSubpassEndInfo, "vkEndRenderPass2");
4036 return skip;
4037}
4038
4039bool SyncValidator::PreCallValidateCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer,
Mike Schuchardt2df08912020-12-15 16:28:09 -08004040 const VkSubpassEndInfo *pSubpassEndInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06004041 bool skip = StateTracker::PreCallValidateCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo);
4042 skip |= ValidateCmdEndRenderPass(commandBuffer, pSubpassEndInfo, "vkEndRenderPass2KHR");
4043 return skip;
4044}
4045
4046void SyncValidator::RecordCmdEndRenderPass(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo,
4047 CMD_TYPE command) {
John Zulaufe5da6e52020-03-18 15:32:18 -06004048 // Resolve the all subpass contexts to the command buffer contexts
4049 auto cb_context = GetAccessContext(commandBuffer);
4050 assert(cb_context);
4051 auto cb_state = cb_context->GetCommandBufferState();
4052 if (!cb_state) return;
4053
locke-lunargaecf2152020-05-12 17:15:41 -06004054 const auto *rp_state = cb_state->activeRenderPass.get();
John Zulaufe5da6e52020-03-18 15:32:18 -06004055 if (!rp_state) return;
4056
John Zulauffaea0ee2021-01-14 14:01:32 -07004057 cb_context->RecordEndRenderPass(*rp_state, command);
John Zulaufe5da6e52020-03-18 15:32:18 -06004058}
John Zulauf3d84f1b2020-03-09 13:33:25 -06004059
John Zulauf33fc1d52020-07-17 11:01:10 -06004060// Simple heuristic rule to detect WAW operations representing algorithmically safe or increment
4061// updates to a resource which do not conflict at the byte level.
4062// TODO: Revisit this rule to see if it needs to be tighter or looser
4063// TODO: Add programatic control over suppression heuristics
4064bool SyncValidator::SupressedBoundDescriptorWAW(const HazardResult &hazard) const {
4065 return (hazard.hazard == WRITE_AFTER_WRITE) && (FlagBit(hazard.usage_index) == hazard.prior_access);
4066}
4067
John Zulauf3d84f1b2020-03-09 13:33:25 -06004068void SyncValidator::PostCallRecordCmdEndRenderPass(VkCommandBuffer commandBuffer) {
John Zulauf355e49b2020-04-24 15:11:15 -06004069 RecordCmdEndRenderPass(commandBuffer, nullptr, CMD_ENDRENDERPASS);
John Zulauf5a1a5382020-06-22 17:23:25 -06004070 StateTracker::PostCallRecordCmdEndRenderPass(commandBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004071}
4072
4073void SyncValidator::PostCallRecordCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
John Zulauf355e49b2020-04-24 15:11:15 -06004074 RecordCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2);
John Zulauf5a1a5382020-06-22 17:23:25 -06004075 StateTracker::PostCallRecordCmdEndRenderPass2(commandBuffer, pSubpassEndInfo);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004076}
4077
4078void SyncValidator::PostCallRecordCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
John Zulauf355e49b2020-04-24 15:11:15 -06004079 RecordCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2);
John Zulauf5a1a5382020-06-22 17:23:25 -06004080 StateTracker::PostCallRecordCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004081}
locke-lunarga19c71d2020-03-02 18:17:04 -07004082
Jeff Leger178b1e52020-10-05 12:22:23 -04004083template <typename BufferImageCopyRegionType>
4084bool SyncValidator::ValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
4085 VkImageLayout dstImageLayout, uint32_t regionCount,
4086 const BufferImageCopyRegionType *pRegions, CopyCommandVersion version) const {
locke-lunarga19c71d2020-03-02 18:17:04 -07004087 bool skip = false;
4088 const auto *cb_access_context = GetAccessContext(commandBuffer);
4089 assert(cb_access_context);
4090 if (!cb_access_context) return skip;
4091
Jeff Leger178b1e52020-10-05 12:22:23 -04004092 const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
4093 const char *func_name = is_2khr ? "vkCmdCopyBufferToImage2KHR()" : "vkCmdCopyBufferToImage()";
4094
locke-lunarga19c71d2020-03-02 18:17:04 -07004095 const auto *context = cb_access_context->GetCurrentAccessContext();
4096 assert(context);
4097 if (!context) return skip;
4098
4099 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
locke-lunarga19c71d2020-03-02 18:17:04 -07004100 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
4101
4102 for (uint32_t region = 0; region < regionCount; region++) {
4103 const auto &copy_region = pRegions[region];
John Zulauf477700e2021-01-06 11:41:49 -07004104 HazardResult hazard;
locke-lunarga19c71d2020-03-02 18:17:04 -07004105 if (dst_image) {
John Zulauf477700e2021-01-06 11:41:49 -07004106 if (src_buffer) {
4107 ResourceAccessRange src_range =
4108 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, dst_image->createInfo.format));
4109 hazard = context->DetectHazard(*src_buffer, SYNC_TRANSFER_TRANSFER_READ, src_range);
4110 if (hazard.hazard) {
4111 // PHASE1 TODO -- add tag information to log msg when useful.
4112 skip |= LogError(srcBuffer, string_SyncHazardVUID(hazard.hazard),
4113 "%s: Hazard %s for srcBuffer %s, region %" PRIu32 ". Access info %s.", func_name,
4114 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcBuffer).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07004115 cb_access_context->FormatUsage(hazard).c_str());
John Zulauf477700e2021-01-06 11:41:49 -07004116 }
4117 }
4118
4119 hazard = context->DetectHazard(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, copy_region.imageSubresource,
4120 copy_region.imageOffset, copy_region.imageExtent);
locke-lunarga19c71d2020-03-02 18:17:04 -07004121 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06004122 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
Jeff Leger178b1e52020-10-05 12:22:23 -04004123 "%s: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.", func_name,
John Zulauf1dae9192020-06-16 15:46:44 -06004124 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07004125 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07004126 }
4127 if (skip) break;
4128 }
4129 if (skip) break;
4130 }
4131 return skip;
4132}
4133
Jeff Leger178b1e52020-10-05 12:22:23 -04004134bool SyncValidator::PreCallValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
4135 VkImageLayout dstImageLayout, uint32_t regionCount,
4136 const VkBufferImageCopy *pRegions) const {
4137 return ValidateCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions,
4138 COPY_COMMAND_VERSION_1);
4139}
4140
4141bool SyncValidator::PreCallValidateCmdCopyBufferToImage2KHR(VkCommandBuffer commandBuffer,
4142 const VkCopyBufferToImageInfo2KHR *pCopyBufferToImageInfo) const {
4143 return ValidateCmdCopyBufferToImage(commandBuffer, pCopyBufferToImageInfo->srcBuffer, pCopyBufferToImageInfo->dstImage,
4144 pCopyBufferToImageInfo->dstImageLayout, pCopyBufferToImageInfo->regionCount,
4145 pCopyBufferToImageInfo->pRegions, COPY_COMMAND_VERSION_2);
4146}
4147
4148template <typename BufferImageCopyRegionType>
4149void SyncValidator::RecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
4150 VkImageLayout dstImageLayout, uint32_t regionCount,
4151 const BufferImageCopyRegionType *pRegions, CopyCommandVersion version) {
locke-lunarga19c71d2020-03-02 18:17:04 -07004152 auto *cb_access_context = GetAccessContext(commandBuffer);
4153 assert(cb_access_context);
Jeff Leger178b1e52020-10-05 12:22:23 -04004154
4155 const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
4156 const CMD_TYPE cmd_type = is_2khr ? CMD_COPYBUFFERTOIMAGE2KHR : CMD_COPYBUFFERTOIMAGE;
4157
4158 const auto tag = cb_access_context->NextCommandTag(cmd_type);
locke-lunarga19c71d2020-03-02 18:17:04 -07004159 auto *context = cb_access_context->GetCurrentAccessContext();
4160 assert(context);
4161
4162 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
John Zulauf16adfc92020-04-08 10:28:33 -06004163 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07004164
4165 for (uint32_t region = 0; region < regionCount; region++) {
4166 const auto &copy_region = pRegions[region];
locke-lunarga19c71d2020-03-02 18:17:04 -07004167 if (dst_image) {
John Zulauf477700e2021-01-06 11:41:49 -07004168 if (src_buffer) {
4169 ResourceAccessRange src_range =
4170 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, dst_image->createInfo.format));
4171 context->UpdateAccessState(*src_buffer, SYNC_TRANSFER_TRANSFER_READ, SyncOrdering::kNonAttachment, src_range, tag);
4172 }
John Zulauf8e3c3e92021-01-06 11:19:36 -07004173 context->UpdateAccessState(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
4174 copy_region.imageSubresource, copy_region.imageOffset, copy_region.imageExtent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07004175 }
4176 }
4177}
4178
Jeff Leger178b1e52020-10-05 12:22:23 -04004179void SyncValidator::PreCallRecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
4180 VkImageLayout dstImageLayout, uint32_t regionCount,
4181 const VkBufferImageCopy *pRegions) {
4182 StateTracker::PreCallRecordCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
4183 RecordCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions, COPY_COMMAND_VERSION_1);
4184}
4185
4186void SyncValidator::PreCallRecordCmdCopyBufferToImage2KHR(VkCommandBuffer commandBuffer,
4187 const VkCopyBufferToImageInfo2KHR *pCopyBufferToImageInfo) {
4188 StateTracker::PreCallRecordCmdCopyBufferToImage2KHR(commandBuffer, pCopyBufferToImageInfo);
4189 RecordCmdCopyBufferToImage(commandBuffer, pCopyBufferToImageInfo->srcBuffer, pCopyBufferToImageInfo->dstImage,
4190 pCopyBufferToImageInfo->dstImageLayout, pCopyBufferToImageInfo->regionCount,
4191 pCopyBufferToImageInfo->pRegions, COPY_COMMAND_VERSION_2);
4192}
4193
4194template <typename BufferImageCopyRegionType>
4195bool SyncValidator::ValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4196 VkBuffer dstBuffer, uint32_t regionCount,
4197 const BufferImageCopyRegionType *pRegions, CopyCommandVersion version) const {
locke-lunarga19c71d2020-03-02 18:17:04 -07004198 bool skip = false;
4199 const auto *cb_access_context = GetAccessContext(commandBuffer);
4200 assert(cb_access_context);
4201 if (!cb_access_context) return skip;
4202
Jeff Leger178b1e52020-10-05 12:22:23 -04004203 const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
4204 const char *func_name = is_2khr ? "vkCmdCopyImageToBuffer2KHR()" : "vkCmdCopyImageToBuffer()";
4205
locke-lunarga19c71d2020-03-02 18:17:04 -07004206 const auto *context = cb_access_context->GetCurrentAccessContext();
4207 assert(context);
4208 if (!context) return skip;
4209
4210 const auto *src_image = Get<IMAGE_STATE>(srcImage);
4211 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
4212 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
4213 for (uint32_t region = 0; region < regionCount; region++) {
4214 const auto &copy_region = pRegions[region];
4215 if (src_image) {
John Zulauf540266b2020-04-06 18:54:53 -06004216 auto hazard = context->DetectHazard(*src_image, SYNC_TRANSFER_TRANSFER_READ, copy_region.imageSubresource,
locke-lunarga19c71d2020-03-02 18:17:04 -07004217 copy_region.imageOffset, copy_region.imageExtent);
4218 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06004219 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
Jeff Leger178b1e52020-10-05 12:22:23 -04004220 "%s: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.", func_name,
John Zulauf1dae9192020-06-16 15:46:44 -06004221 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07004222 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07004223 }
John Zulauf477700e2021-01-06 11:41:49 -07004224 if (dst_mem) {
4225 ResourceAccessRange dst_range =
4226 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, src_image->createInfo.format));
4227 hazard = context->DetectHazard(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, dst_range);
4228 if (hazard.hazard) {
4229 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
4230 "%s: Hazard %s for dstBuffer %s, region %" PRIu32 ". Access info %s.", func_name,
4231 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstBuffer).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07004232 cb_access_context->FormatUsage(hazard).c_str());
John Zulauf477700e2021-01-06 11:41:49 -07004233 }
locke-lunarga19c71d2020-03-02 18:17:04 -07004234 }
4235 }
4236 if (skip) break;
4237 }
4238 return skip;
4239}
4240
Jeff Leger178b1e52020-10-05 12:22:23 -04004241bool SyncValidator::PreCallValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
4242 VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount,
4243 const VkBufferImageCopy *pRegions) const {
4244 return ValidateCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions,
4245 COPY_COMMAND_VERSION_1);
4246}
4247
4248bool SyncValidator::PreCallValidateCmdCopyImageToBuffer2KHR(VkCommandBuffer commandBuffer,
4249 const VkCopyImageToBufferInfo2KHR *pCopyImageToBufferInfo) const {
4250 return ValidateCmdCopyImageToBuffer(commandBuffer, pCopyImageToBufferInfo->srcImage, pCopyImageToBufferInfo->srcImageLayout,
4251 pCopyImageToBufferInfo->dstBuffer, pCopyImageToBufferInfo->regionCount,
4252 pCopyImageToBufferInfo->pRegions, COPY_COMMAND_VERSION_2);
4253}
4254
4255template <typename BufferImageCopyRegionType>
4256void SyncValidator::RecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4257 VkBuffer dstBuffer, uint32_t regionCount, const BufferImageCopyRegionType *pRegions,
4258 CopyCommandVersion version) {
locke-lunarga19c71d2020-03-02 18:17:04 -07004259 auto *cb_access_context = GetAccessContext(commandBuffer);
4260 assert(cb_access_context);
Jeff Leger178b1e52020-10-05 12:22:23 -04004261
4262 const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
4263 const CMD_TYPE cmd_type = is_2khr ? CMD_COPYIMAGETOBUFFER2KHR : CMD_COPYIMAGETOBUFFER;
4264
4265 const auto tag = cb_access_context->NextCommandTag(cmd_type);
locke-lunarga19c71d2020-03-02 18:17:04 -07004266 auto *context = cb_access_context->GetCurrentAccessContext();
4267 assert(context);
4268
4269 const auto *src_image = Get<IMAGE_STATE>(srcImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07004270 auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
4271 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
John Zulauf5f13a792020-03-10 07:31:21 -06004272 const VulkanTypedHandle dst_handle(dst_mem, kVulkanObjectTypeDeviceMemory);
locke-lunarga19c71d2020-03-02 18:17:04 -07004273
4274 for (uint32_t region = 0; region < regionCount; region++) {
4275 const auto &copy_region = pRegions[region];
4276 if (src_image) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07004277 context->UpdateAccessState(*src_image, SYNC_TRANSFER_TRANSFER_READ, SyncOrdering::kNonAttachment,
4278 copy_region.imageSubresource, copy_region.imageOffset, copy_region.imageExtent, tag);
John Zulauf477700e2021-01-06 11:41:49 -07004279 if (dst_buffer) {
4280 ResourceAccessRange dst_range =
4281 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, src_image->createInfo.format));
4282 context->UpdateAccessState(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment, dst_range, tag);
4283 }
locke-lunarga19c71d2020-03-02 18:17:04 -07004284 }
4285 }
4286}
4287
Jeff Leger178b1e52020-10-05 12:22:23 -04004288void SyncValidator::PreCallRecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4289 VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
4290 StateTracker::PreCallRecordCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
4291 RecordCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions, COPY_COMMAND_VERSION_1);
4292}
4293
4294void SyncValidator::PreCallRecordCmdCopyImageToBuffer2KHR(VkCommandBuffer commandBuffer,
4295 const VkCopyImageToBufferInfo2KHR *pCopyImageToBufferInfo) {
4296 StateTracker::PreCallRecordCmdCopyImageToBuffer2KHR(commandBuffer, pCopyImageToBufferInfo);
4297 RecordCmdCopyImageToBuffer(commandBuffer, pCopyImageToBufferInfo->srcImage, pCopyImageToBufferInfo->srcImageLayout,
4298 pCopyImageToBufferInfo->dstBuffer, pCopyImageToBufferInfo->regionCount,
4299 pCopyImageToBufferInfo->pRegions, COPY_COMMAND_VERSION_2);
4300}
4301
4302template <typename RegionType>
4303bool SyncValidator::ValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4304 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
4305 const RegionType *pRegions, VkFilter filter, const char *apiName) const {
locke-lunarga19c71d2020-03-02 18:17:04 -07004306 bool skip = false;
4307 const auto *cb_access_context = GetAccessContext(commandBuffer);
4308 assert(cb_access_context);
4309 if (!cb_access_context) return skip;
4310
4311 const auto *context = cb_access_context->GetCurrentAccessContext();
4312 assert(context);
4313 if (!context) return skip;
4314
4315 const auto *src_image = Get<IMAGE_STATE>(srcImage);
4316 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
4317
4318 for (uint32_t region = 0; region < regionCount; region++) {
4319 const auto &blit_region = pRegions[region];
4320 if (src_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06004321 VkOffset3D offset = {std::min(blit_region.srcOffsets[0].x, blit_region.srcOffsets[1].x),
4322 std::min(blit_region.srcOffsets[0].y, blit_region.srcOffsets[1].y),
4323 std::min(blit_region.srcOffsets[0].z, blit_region.srcOffsets[1].z)};
4324 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.srcOffsets[1].x - blit_region.srcOffsets[0].x)),
4325 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].y - blit_region.srcOffsets[0].y)),
4326 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].z - blit_region.srcOffsets[0].z))};
4327 auto hazard =
4328 context->DetectHazard(*src_image, SYNC_TRANSFER_TRANSFER_READ, blit_region.srcSubresource, offset, extent);
locke-lunarga19c71d2020-03-02 18:17:04 -07004329 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06004330 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
Jeff Leger178b1e52020-10-05 12:22:23 -04004331 "%s: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.", apiName,
John Zulauf1dae9192020-06-16 15:46:44 -06004332 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07004333 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07004334 }
4335 }
4336
4337 if (dst_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06004338 VkOffset3D offset = {std::min(blit_region.dstOffsets[0].x, blit_region.dstOffsets[1].x),
4339 std::min(blit_region.dstOffsets[0].y, blit_region.dstOffsets[1].y),
4340 std::min(blit_region.dstOffsets[0].z, blit_region.dstOffsets[1].z)};
4341 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.dstOffsets[1].x - blit_region.dstOffsets[0].x)),
4342 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].y - blit_region.dstOffsets[0].y)),
4343 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].z - blit_region.dstOffsets[0].z))};
4344 auto hazard =
4345 context->DetectHazard(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, blit_region.dstSubresource, offset, extent);
locke-lunarga19c71d2020-03-02 18:17:04 -07004346 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06004347 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
Jeff Leger178b1e52020-10-05 12:22:23 -04004348 "%s: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.", apiName,
John Zulauf1dae9192020-06-16 15:46:44 -06004349 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07004350 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07004351 }
4352 if (skip) break;
4353 }
4354 }
4355
4356 return skip;
4357}
4358
Jeff Leger178b1e52020-10-05 12:22:23 -04004359bool SyncValidator::PreCallValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4360 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
4361 const VkImageBlit *pRegions, VkFilter filter) const {
4362 return ValidateCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter,
4363 "vkCmdBlitImage");
4364}
4365
4366bool SyncValidator::PreCallValidateCmdBlitImage2KHR(VkCommandBuffer commandBuffer,
4367 const VkBlitImageInfo2KHR *pBlitImageInfo) const {
4368 return ValidateCmdBlitImage(commandBuffer, pBlitImageInfo->srcImage, pBlitImageInfo->srcImageLayout, pBlitImageInfo->dstImage,
4369 pBlitImageInfo->dstImageLayout, pBlitImageInfo->regionCount, pBlitImageInfo->pRegions,
4370 pBlitImageInfo->filter, "vkCmdBlitImage2KHR");
4371}
4372
4373template <typename RegionType>
4374void SyncValidator::RecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4375 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
4376 const RegionType *pRegions, VkFilter filter, ResourceUsageTag tag) {
locke-lunarga19c71d2020-03-02 18:17:04 -07004377 auto *cb_access_context = GetAccessContext(commandBuffer);
4378 assert(cb_access_context);
4379 auto *context = cb_access_context->GetCurrentAccessContext();
4380 assert(context);
4381
4382 auto *src_image = Get<IMAGE_STATE>(srcImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07004383 auto *dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07004384
4385 for (uint32_t region = 0; region < regionCount; region++) {
4386 const auto &blit_region = pRegions[region];
4387 if (src_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06004388 VkOffset3D offset = {std::min(blit_region.srcOffsets[0].x, blit_region.srcOffsets[1].x),
4389 std::min(blit_region.srcOffsets[0].y, blit_region.srcOffsets[1].y),
4390 std::min(blit_region.srcOffsets[0].z, blit_region.srcOffsets[1].z)};
4391 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.srcOffsets[1].x - blit_region.srcOffsets[0].x)),
4392 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].y - blit_region.srcOffsets[0].y)),
4393 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].z - blit_region.srcOffsets[0].z))};
John Zulauf8e3c3e92021-01-06 11:19:36 -07004394 context->UpdateAccessState(*src_image, SYNC_TRANSFER_TRANSFER_READ, SyncOrdering::kNonAttachment,
4395 blit_region.srcSubresource, offset, extent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07004396 }
4397 if (dst_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06004398 VkOffset3D offset = {std::min(blit_region.dstOffsets[0].x, blit_region.dstOffsets[1].x),
4399 std::min(blit_region.dstOffsets[0].y, blit_region.dstOffsets[1].y),
4400 std::min(blit_region.dstOffsets[0].z, blit_region.dstOffsets[1].z)};
4401 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.dstOffsets[1].x - blit_region.dstOffsets[0].x)),
4402 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].y - blit_region.dstOffsets[0].y)),
4403 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].z - blit_region.dstOffsets[0].z))};
John Zulauf8e3c3e92021-01-06 11:19:36 -07004404 context->UpdateAccessState(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
4405 blit_region.dstSubresource, offset, extent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07004406 }
4407 }
4408}
locke-lunarg36ba2592020-04-03 09:42:04 -06004409
Jeff Leger178b1e52020-10-05 12:22:23 -04004410void SyncValidator::PreCallRecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4411 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
4412 const VkImageBlit *pRegions, VkFilter filter) {
4413 auto *cb_access_context = GetAccessContext(commandBuffer);
4414 assert(cb_access_context);
4415 const auto tag = cb_access_context->NextCommandTag(CMD_BLITIMAGE);
4416 StateTracker::PreCallRecordCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
4417 pRegions, filter);
4418 RecordCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter, tag);
4419}
4420
4421void SyncValidator::PreCallRecordCmdBlitImage2KHR(VkCommandBuffer commandBuffer, const VkBlitImageInfo2KHR *pBlitImageInfo) {
4422 StateTracker::PreCallRecordCmdBlitImage2KHR(commandBuffer, pBlitImageInfo);
4423 auto *cb_access_context = GetAccessContext(commandBuffer);
4424 assert(cb_access_context);
4425 const auto tag = cb_access_context->NextCommandTag(CMD_BLITIMAGE2KHR);
4426 RecordCmdBlitImage(commandBuffer, pBlitImageInfo->srcImage, pBlitImageInfo->srcImageLayout, pBlitImageInfo->dstImage,
4427 pBlitImageInfo->dstImageLayout, pBlitImageInfo->regionCount, pBlitImageInfo->pRegions,
4428 pBlitImageInfo->filter, tag);
4429}
4430
John Zulauffaea0ee2021-01-14 14:01:32 -07004431bool SyncValidator::ValidateIndirectBuffer(const CommandBufferAccessContext &cb_context, const AccessContext &context,
4432 VkCommandBuffer commandBuffer, const VkDeviceSize struct_size, const VkBuffer buffer,
4433 const VkDeviceSize offset, const uint32_t drawCount, const uint32_t stride,
4434 const char *function) const {
locke-lunargff255f92020-05-13 18:53:52 -06004435 bool skip = false;
4436 if (drawCount == 0) return skip;
4437
4438 const auto *buf_state = Get<BUFFER_STATE>(buffer);
4439 VkDeviceSize size = struct_size;
4440 if (drawCount == 1 || stride == size) {
4441 if (drawCount > 1) size *= drawCount;
John Zulauf3e86bf02020-09-12 10:47:57 -06004442 const ResourceAccessRange range = MakeRange(offset, size);
locke-lunargff255f92020-05-13 18:53:52 -06004443 auto hazard = context.DetectHazard(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, range);
4444 if (hazard.hazard) {
John Zulauf1dae9192020-06-16 15:46:44 -06004445 skip |= LogError(buf_state->buffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004446 "%s: Hazard %s for indirect %s in %s. Access info %s.", function, string_SyncHazard(hazard.hazard),
John Zulauf1dae9192020-06-16 15:46:44 -06004447 report_data->FormatHandle(buffer).c_str(), report_data->FormatHandle(commandBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07004448 cb_context.FormatUsage(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06004449 }
4450 } else {
4451 for (uint32_t i = 0; i < drawCount; ++i) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004452 const ResourceAccessRange range = MakeRange(offset + i * stride, size);
locke-lunargff255f92020-05-13 18:53:52 -06004453 auto hazard = context.DetectHazard(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, range);
4454 if (hazard.hazard) {
John Zulauf1dae9192020-06-16 15:46:44 -06004455 skip |= LogError(buf_state->buffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004456 "%s: Hazard %s for indirect %s in %s. Access info %s.", function, string_SyncHazard(hazard.hazard),
4457 report_data->FormatHandle(buffer).c_str(), report_data->FormatHandle(commandBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07004458 cb_context.FormatUsage(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06004459 break;
4460 }
4461 }
4462 }
4463 return skip;
4464}
4465
locke-lunarg61870c22020-06-09 14:51:50 -06004466void SyncValidator::RecordIndirectBuffer(AccessContext &context, const ResourceUsageTag &tag, const VkDeviceSize struct_size,
4467 const VkBuffer buffer, const VkDeviceSize offset, const uint32_t drawCount,
4468 uint32_t stride) {
locke-lunargff255f92020-05-13 18:53:52 -06004469 const auto *buf_state = Get<BUFFER_STATE>(buffer);
4470 VkDeviceSize size = struct_size;
4471 if (drawCount == 1 || stride == size) {
4472 if (drawCount > 1) size *= drawCount;
John Zulauf3e86bf02020-09-12 10:47:57 -06004473 const ResourceAccessRange range = MakeRange(offset, size);
John Zulauf8e3c3e92021-01-06 11:19:36 -07004474 context.UpdateAccessState(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, SyncOrdering::kNonAttachment, range, tag);
locke-lunargff255f92020-05-13 18:53:52 -06004475 } else {
4476 for (uint32_t i = 0; i < drawCount; ++i) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004477 const ResourceAccessRange range = MakeRange(offset + i * stride, size);
John Zulauf8e3c3e92021-01-06 11:19:36 -07004478 context.UpdateAccessState(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, SyncOrdering::kNonAttachment, range,
4479 tag);
locke-lunargff255f92020-05-13 18:53:52 -06004480 }
4481 }
4482}
4483
John Zulauffaea0ee2021-01-14 14:01:32 -07004484bool SyncValidator::ValidateCountBuffer(const CommandBufferAccessContext &cb_context, const AccessContext &context,
4485 VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4486 const char *function) const {
locke-lunargff255f92020-05-13 18:53:52 -06004487 bool skip = false;
4488
4489 const auto *count_buf_state = Get<BUFFER_STATE>(buffer);
John Zulauf3e86bf02020-09-12 10:47:57 -06004490 const ResourceAccessRange range = MakeRange(offset, 4);
locke-lunargff255f92020-05-13 18:53:52 -06004491 auto hazard = context.DetectHazard(*count_buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, range);
4492 if (hazard.hazard) {
John Zulauf1dae9192020-06-16 15:46:44 -06004493 skip |= LogError(count_buf_state->buffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004494 "%s: Hazard %s for countBuffer %s in %s. Access info %s.", function, string_SyncHazard(hazard.hazard),
John Zulauf1dae9192020-06-16 15:46:44 -06004495 report_data->FormatHandle(buffer).c_str(), report_data->FormatHandle(commandBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07004496 cb_context.FormatUsage(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06004497 }
4498 return skip;
4499}
4500
locke-lunarg61870c22020-06-09 14:51:50 -06004501void SyncValidator::RecordCountBuffer(AccessContext &context, const ResourceUsageTag &tag, VkBuffer buffer, VkDeviceSize offset) {
locke-lunargff255f92020-05-13 18:53:52 -06004502 const auto *count_buf_state = Get<BUFFER_STATE>(buffer);
John Zulauf3e86bf02020-09-12 10:47:57 -06004503 const ResourceAccessRange range = MakeRange(offset, 4);
John Zulauf8e3c3e92021-01-06 11:19:36 -07004504 context.UpdateAccessState(*count_buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, SyncOrdering::kNonAttachment, range, tag);
locke-lunargff255f92020-05-13 18:53:52 -06004505}
4506
locke-lunarg36ba2592020-04-03 09:42:04 -06004507bool SyncValidator::PreCallValidateCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) const {
locke-lunargff255f92020-05-13 18:53:52 -06004508 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004509 const auto *cb_access_context = GetAccessContext(commandBuffer);
4510 assert(cb_access_context);
4511 if (!cb_access_context) return skip;
4512
locke-lunarg61870c22020-06-09 14:51:50 -06004513 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, "vkCmdDispatch");
locke-lunargff255f92020-05-13 18:53:52 -06004514 return skip;
locke-lunarg36ba2592020-04-03 09:42:04 -06004515}
4516
4517void SyncValidator::PreCallRecordCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004518 StateTracker::PreCallRecordCmdDispatch(commandBuffer, x, y, z);
locke-lunargff255f92020-05-13 18:53:52 -06004519 auto *cb_access_context = GetAccessContext(commandBuffer);
4520 assert(cb_access_context);
4521 const auto tag = cb_access_context->NextCommandTag(CMD_DISPATCH);
locke-lunargff255f92020-05-13 18:53:52 -06004522
locke-lunarg61870c22020-06-09 14:51:50 -06004523 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, tag);
locke-lunarg36ba2592020-04-03 09:42:04 -06004524}
locke-lunarge1a67022020-04-29 00:15:36 -06004525
4526bool SyncValidator::PreCallValidateCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) const {
locke-lunargff255f92020-05-13 18:53:52 -06004527 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004528 const auto *cb_access_context = GetAccessContext(commandBuffer);
4529 assert(cb_access_context);
4530 if (!cb_access_context) return skip;
4531
4532 const auto *context = cb_access_context->GetCurrentAccessContext();
4533 assert(context);
4534 if (!context) return skip;
4535
locke-lunarg61870c22020-06-09 14:51:50 -06004536 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, "vkCmdDispatchIndirect");
John Zulauffaea0ee2021-01-14 14:01:32 -07004537 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDispatchIndirectCommand), buffer, offset,
4538 1, sizeof(VkDispatchIndirectCommand), "vkCmdDispatchIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004539 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004540}
4541
4542void SyncValidator::PreCallRecordCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004543 StateTracker::PreCallRecordCmdDispatchIndirect(commandBuffer, buffer, offset);
locke-lunargff255f92020-05-13 18:53:52 -06004544 auto *cb_access_context = GetAccessContext(commandBuffer);
4545 assert(cb_access_context);
4546 const auto tag = cb_access_context->NextCommandTag(CMD_DISPATCHINDIRECT);
4547 auto *context = cb_access_context->GetCurrentAccessContext();
4548 assert(context);
4549
locke-lunarg61870c22020-06-09 14:51:50 -06004550 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, tag);
4551 RecordIndirectBuffer(*context, tag, sizeof(VkDispatchIndirectCommand), buffer, offset, 1, sizeof(VkDispatchIndirectCommand));
locke-lunarge1a67022020-04-29 00:15:36 -06004552}
4553
4554bool SyncValidator::PreCallValidateCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
4555 uint32_t firstVertex, uint32_t firstInstance) const {
locke-lunarga4d39ea2020-05-22 14:17:29 -06004556 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004557 const auto *cb_access_context = GetAccessContext(commandBuffer);
4558 assert(cb_access_context);
4559 if (!cb_access_context) return skip;
4560
locke-lunarg61870c22020-06-09 14:51:50 -06004561 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDraw");
4562 skip |= cb_access_context->ValidateDrawVertex(vertexCount, firstVertex, "vkCmdDraw");
4563 skip |= cb_access_context->ValidateDrawSubpassAttachment("vkCmdDraw");
locke-lunarga4d39ea2020-05-22 14:17:29 -06004564 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004565}
4566
4567void SyncValidator::PreCallRecordCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
4568 uint32_t firstVertex, uint32_t firstInstance) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004569 StateTracker::PreCallRecordCmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
locke-lunargff255f92020-05-13 18:53:52 -06004570 auto *cb_access_context = GetAccessContext(commandBuffer);
4571 assert(cb_access_context);
4572 const auto tag = cb_access_context->NextCommandTag(CMD_DRAW);
locke-lunargff255f92020-05-13 18:53:52 -06004573
locke-lunarg61870c22020-06-09 14:51:50 -06004574 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4575 cb_access_context->RecordDrawVertex(vertexCount, firstVertex, tag);
4576 cb_access_context->RecordDrawSubpassAttachment(tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004577}
4578
4579bool SyncValidator::PreCallValidateCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
4580 uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) const {
locke-lunarga4d39ea2020-05-22 14:17:29 -06004581 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004582 const auto *cb_access_context = GetAccessContext(commandBuffer);
4583 assert(cb_access_context);
4584 if (!cb_access_context) return skip;
4585
locke-lunarg61870c22020-06-09 14:51:50 -06004586 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndexed");
4587 skip |= cb_access_context->ValidateDrawVertexIndex(indexCount, firstIndex, "vkCmdDrawIndexed");
4588 skip |= cb_access_context->ValidateDrawSubpassAttachment("vkCmdDrawIndexed");
locke-lunarga4d39ea2020-05-22 14:17:29 -06004589 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004590}
4591
4592void SyncValidator::PreCallRecordCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
4593 uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004594 StateTracker::PreCallRecordCmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
locke-lunargff255f92020-05-13 18:53:52 -06004595 auto *cb_access_context = GetAccessContext(commandBuffer);
4596 assert(cb_access_context);
4597 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDEXED);
locke-lunargff255f92020-05-13 18:53:52 -06004598
locke-lunarg61870c22020-06-09 14:51:50 -06004599 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4600 cb_access_context->RecordDrawVertexIndex(indexCount, firstIndex, tag);
4601 cb_access_context->RecordDrawSubpassAttachment(tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004602}
4603
4604bool SyncValidator::PreCallValidateCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4605 uint32_t drawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004606 bool skip = false;
4607 if (drawCount == 0) return skip;
4608
locke-lunargff255f92020-05-13 18:53:52 -06004609 const auto *cb_access_context = GetAccessContext(commandBuffer);
4610 assert(cb_access_context);
4611 if (!cb_access_context) return skip;
4612
4613 const auto *context = cb_access_context->GetCurrentAccessContext();
4614 assert(context);
4615 if (!context) return skip;
4616
locke-lunarg61870c22020-06-09 14:51:50 -06004617 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndirect");
4618 skip |= cb_access_context->ValidateDrawSubpassAttachment("vkCmdDrawIndirect");
John Zulauffaea0ee2021-01-14 14:01:32 -07004619 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndirectCommand), buffer, offset,
4620 drawCount, stride, "vkCmdDrawIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004621
4622 // TODO: For now, we validate the whole vertex buffer. It might cause some false positive.
4623 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
4624 // We will validate the vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004625 skip |= cb_access_context->ValidateDrawVertex(UINT32_MAX, 0, "vkCmdDrawIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004626 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004627}
4628
4629void SyncValidator::PreCallRecordCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4630 uint32_t drawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004631 StateTracker::PreCallRecordCmdDrawIndirect(commandBuffer, buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06004632 if (drawCount == 0) return;
locke-lunargff255f92020-05-13 18:53:52 -06004633 auto *cb_access_context = GetAccessContext(commandBuffer);
4634 assert(cb_access_context);
4635 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDIRECT);
4636 auto *context = cb_access_context->GetCurrentAccessContext();
4637 assert(context);
4638
locke-lunarg61870c22020-06-09 14:51:50 -06004639 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4640 cb_access_context->RecordDrawSubpassAttachment(tag);
4641 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndirectCommand), buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06004642
4643 // TODO: For now, we record the whole vertex buffer. It might cause some false positive.
4644 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
4645 // We will record the vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004646 cb_access_context->RecordDrawVertex(UINT32_MAX, 0, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004647}
4648
4649bool SyncValidator::PreCallValidateCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4650 uint32_t drawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004651 bool skip = false;
4652 if (drawCount == 0) return skip;
locke-lunargff255f92020-05-13 18:53:52 -06004653 const auto *cb_access_context = GetAccessContext(commandBuffer);
4654 assert(cb_access_context);
4655 if (!cb_access_context) return skip;
4656
4657 const auto *context = cb_access_context->GetCurrentAccessContext();
4658 assert(context);
4659 if (!context) return skip;
4660
locke-lunarg61870c22020-06-09 14:51:50 -06004661 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndexedIndirect");
4662 skip |= cb_access_context->ValidateDrawSubpassAttachment("vkCmdDrawIndexedIndirect");
John Zulauffaea0ee2021-01-14 14:01:32 -07004663 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndexedIndirectCommand), buffer,
4664 offset, drawCount, stride, "vkCmdDrawIndexedIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004665
4666 // TODO: For now, we validate the whole index and vertex buffer. It might cause some false positive.
4667 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
4668 // We will validate the index and vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004669 skip |= cb_access_context->ValidateDrawVertexIndex(UINT32_MAX, 0, "vkCmdDrawIndexedIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004670 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004671}
4672
4673void SyncValidator::PreCallRecordCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4674 uint32_t drawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004675 StateTracker::PreCallRecordCmdDrawIndexedIndirect(commandBuffer, buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06004676 auto *cb_access_context = GetAccessContext(commandBuffer);
4677 assert(cb_access_context);
4678 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDEXEDINDIRECT);
4679 auto *context = cb_access_context->GetCurrentAccessContext();
4680 assert(context);
4681
locke-lunarg61870c22020-06-09 14:51:50 -06004682 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4683 cb_access_context->RecordDrawSubpassAttachment(tag);
4684 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndexedIndirectCommand), buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06004685
4686 // TODO: For now, we record the whole index and vertex buffer. It might cause some false positive.
4687 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
4688 // We will record the index and vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004689 cb_access_context->RecordDrawVertexIndex(UINT32_MAX, 0, tag);
locke-lunargff255f92020-05-13 18:53:52 -06004690}
4691
4692bool SyncValidator::ValidateCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4693 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4694 uint32_t stride, const char *function) const {
4695 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004696 const auto *cb_access_context = GetAccessContext(commandBuffer);
4697 assert(cb_access_context);
4698 if (!cb_access_context) return skip;
4699
4700 const auto *context = cb_access_context->GetCurrentAccessContext();
4701 assert(context);
4702 if (!context) return skip;
4703
locke-lunarg61870c22020-06-09 14:51:50 -06004704 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, function);
4705 skip |= cb_access_context->ValidateDrawSubpassAttachment(function);
John Zulauffaea0ee2021-01-14 14:01:32 -07004706 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndirectCommand), buffer, offset,
4707 maxDrawCount, stride, function);
4708 skip |= ValidateCountBuffer(*cb_access_context, *context, commandBuffer, countBuffer, countBufferOffset, function);
locke-lunargff255f92020-05-13 18:53:52 -06004709
4710 // TODO: For now, we validate the whole vertex buffer. It might cause some false positive.
4711 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
4712 // We will validate the vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004713 skip |= cb_access_context->ValidateDrawVertex(UINT32_MAX, 0, function);
locke-lunargff255f92020-05-13 18:53:52 -06004714 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004715}
4716
4717bool SyncValidator::PreCallValidateCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4718 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4719 uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004720 return ValidateCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4721 "vkCmdDrawIndirectCount");
locke-lunarge1a67022020-04-29 00:15:36 -06004722}
4723
4724void SyncValidator::PreCallRecordCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4725 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4726 uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004727 StateTracker::PreCallRecordCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount,
4728 stride);
locke-lunargff255f92020-05-13 18:53:52 -06004729 auto *cb_access_context = GetAccessContext(commandBuffer);
4730 assert(cb_access_context);
4731 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDIRECTCOUNT);
4732 auto *context = cb_access_context->GetCurrentAccessContext();
4733 assert(context);
4734
locke-lunarg61870c22020-06-09 14:51:50 -06004735 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4736 cb_access_context->RecordDrawSubpassAttachment(tag);
4737 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndirectCommand), buffer, offset, 1, stride);
4738 RecordCountBuffer(*context, tag, countBuffer, countBufferOffset);
locke-lunargff255f92020-05-13 18:53:52 -06004739
4740 // TODO: For now, we record the whole vertex buffer. It might cause some false positive.
4741 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
4742 // We will record the vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004743 cb_access_context->RecordDrawVertex(UINT32_MAX, 0, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004744}
4745
4746bool SyncValidator::PreCallValidateCmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4747 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4748 uint32_t maxDrawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004749 return ValidateCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4750 "vkCmdDrawIndirectCountKHR");
locke-lunarge1a67022020-04-29 00:15:36 -06004751}
4752
4753void SyncValidator::PreCallRecordCmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4754 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4755 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004756 StateTracker::PreCallRecordCmdDrawIndirectCountKHR(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount,
4757 stride);
locke-lunargff255f92020-05-13 18:53:52 -06004758 PreCallRecordCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride);
locke-lunarge1a67022020-04-29 00:15:36 -06004759}
4760
4761bool SyncValidator::PreCallValidateCmdDrawIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4762 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4763 uint32_t maxDrawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004764 return ValidateCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4765 "vkCmdDrawIndirectCountAMD");
locke-lunarge1a67022020-04-29 00:15:36 -06004766}
4767
4768void SyncValidator::PreCallRecordCmdDrawIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4769 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4770 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004771 StateTracker::PreCallRecordCmdDrawIndirectCountAMD(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount,
4772 stride);
locke-lunargff255f92020-05-13 18:53:52 -06004773 PreCallRecordCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride);
4774}
4775
4776bool SyncValidator::ValidateCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4777 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4778 uint32_t stride, const char *function) const {
4779 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004780 const auto *cb_access_context = GetAccessContext(commandBuffer);
4781 assert(cb_access_context);
4782 if (!cb_access_context) return skip;
4783
4784 const auto *context = cb_access_context->GetCurrentAccessContext();
4785 assert(context);
4786 if (!context) return skip;
4787
locke-lunarg61870c22020-06-09 14:51:50 -06004788 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, function);
4789 skip |= cb_access_context->ValidateDrawSubpassAttachment(function);
John Zulauffaea0ee2021-01-14 14:01:32 -07004790 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndexedIndirectCommand), buffer,
4791 offset, maxDrawCount, stride, function);
4792 skip |= ValidateCountBuffer(*cb_access_context, *context, commandBuffer, countBuffer, countBufferOffset, function);
locke-lunargff255f92020-05-13 18:53:52 -06004793
4794 // TODO: For now, we validate the whole index and vertex buffer. It might cause some false positive.
4795 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
4796 // We will validate the index and vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004797 skip |= cb_access_context->ValidateDrawVertexIndex(UINT32_MAX, 0, function);
locke-lunargff255f92020-05-13 18:53:52 -06004798 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004799}
4800
4801bool SyncValidator::PreCallValidateCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4802 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4803 uint32_t maxDrawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004804 return ValidateCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4805 "vkCmdDrawIndexedIndirectCount");
locke-lunarge1a67022020-04-29 00:15:36 -06004806}
4807
4808void SyncValidator::PreCallRecordCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4809 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4810 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004811 StateTracker::PreCallRecordCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
4812 maxDrawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06004813 auto *cb_access_context = GetAccessContext(commandBuffer);
4814 assert(cb_access_context);
4815 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDEXEDINDIRECTCOUNT);
4816 auto *context = cb_access_context->GetCurrentAccessContext();
4817 assert(context);
4818
locke-lunarg61870c22020-06-09 14:51:50 -06004819 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4820 cb_access_context->RecordDrawSubpassAttachment(tag);
4821 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndexedIndirectCommand), buffer, offset, 1, stride);
4822 RecordCountBuffer(*context, tag, countBuffer, countBufferOffset);
locke-lunargff255f92020-05-13 18:53:52 -06004823
4824 // TODO: For now, we record the whole index and vertex buffer. It might cause some false positive.
4825 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
locke-lunarg61870c22020-06-09 14:51:50 -06004826 // We will update the index and vertex buffer in SubmitQueue in the future.
4827 cb_access_context->RecordDrawVertexIndex(UINT32_MAX, 0, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004828}
4829
4830bool SyncValidator::PreCallValidateCmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer,
4831 VkDeviceSize offset, VkBuffer countBuffer,
4832 VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4833 uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004834 return ValidateCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4835 "vkCmdDrawIndexedIndirectCountKHR");
locke-lunarge1a67022020-04-29 00:15:36 -06004836}
4837
4838void SyncValidator::PreCallRecordCmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4839 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4840 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004841 StateTracker::PreCallRecordCmdDrawIndexedIndirectCountKHR(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
4842 maxDrawCount, stride);
locke-lunarge1a67022020-04-29 00:15:36 -06004843 PreCallRecordCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride);
4844}
4845
4846bool SyncValidator::PreCallValidateCmdDrawIndexedIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer,
4847 VkDeviceSize offset, VkBuffer countBuffer,
4848 VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4849 uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004850 return ValidateCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4851 "vkCmdDrawIndexedIndirectCountAMD");
locke-lunarge1a67022020-04-29 00:15:36 -06004852}
4853
4854void SyncValidator::PreCallRecordCmdDrawIndexedIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4855 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4856 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004857 StateTracker::PreCallRecordCmdDrawIndexedIndirectCountAMD(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
4858 maxDrawCount, stride);
locke-lunarge1a67022020-04-29 00:15:36 -06004859 PreCallRecordCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride);
4860}
4861
4862bool SyncValidator::PreCallValidateCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
4863 const VkClearColorValue *pColor, uint32_t rangeCount,
4864 const VkImageSubresourceRange *pRanges) const {
4865 bool skip = false;
4866 const auto *cb_access_context = GetAccessContext(commandBuffer);
4867 assert(cb_access_context);
4868 if (!cb_access_context) return skip;
4869
4870 const auto *context = cb_access_context->GetCurrentAccessContext();
4871 assert(context);
4872 if (!context) return skip;
4873
4874 const auto *image_state = Get<IMAGE_STATE>(image);
4875
4876 for (uint32_t index = 0; index < rangeCount; index++) {
4877 const auto &range = pRanges[index];
4878 if (image_state) {
4879 auto hazard =
4880 context->DetectHazard(*image_state, SYNC_TRANSFER_TRANSFER_WRITE, range, {0, 0, 0}, image_state->createInfo.extent);
4881 if (hazard.hazard) {
4882 skip |= LogError(image, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004883 "vkCmdClearColorImage: Hazard %s for %s, range index %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06004884 string_SyncHazard(hazard.hazard), report_data->FormatHandle(image).c_str(), index,
John Zulauffaea0ee2021-01-14 14:01:32 -07004885 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06004886 }
4887 }
4888 }
4889 return skip;
4890}
4891
4892void SyncValidator::PreCallRecordCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
4893 const VkClearColorValue *pColor, uint32_t rangeCount,
4894 const VkImageSubresourceRange *pRanges) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004895 StateTracker::PreCallRecordCmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
locke-lunarge1a67022020-04-29 00:15:36 -06004896 auto *cb_access_context = GetAccessContext(commandBuffer);
4897 assert(cb_access_context);
4898 const auto tag = cb_access_context->NextCommandTag(CMD_CLEARCOLORIMAGE);
4899 auto *context = cb_access_context->GetCurrentAccessContext();
4900 assert(context);
4901
4902 const auto *image_state = Get<IMAGE_STATE>(image);
4903
4904 for (uint32_t index = 0; index < rangeCount; index++) {
4905 const auto &range = pRanges[index];
4906 if (image_state) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07004907 context->UpdateAccessState(*image_state, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, {0, 0, 0},
4908 image_state->createInfo.extent, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004909 }
4910 }
4911}
4912
4913bool SyncValidator::PreCallValidateCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image,
4914 VkImageLayout imageLayout,
4915 const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
4916 const VkImageSubresourceRange *pRanges) const {
4917 bool skip = false;
4918 const auto *cb_access_context = GetAccessContext(commandBuffer);
4919 assert(cb_access_context);
4920 if (!cb_access_context) return skip;
4921
4922 const auto *context = cb_access_context->GetCurrentAccessContext();
4923 assert(context);
4924 if (!context) return skip;
4925
4926 const auto *image_state = Get<IMAGE_STATE>(image);
4927
4928 for (uint32_t index = 0; index < rangeCount; index++) {
4929 const auto &range = pRanges[index];
4930 if (image_state) {
4931 auto hazard =
4932 context->DetectHazard(*image_state, SYNC_TRANSFER_TRANSFER_WRITE, range, {0, 0, 0}, image_state->createInfo.extent);
4933 if (hazard.hazard) {
4934 skip |= LogError(image, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004935 "vkCmdClearDepthStencilImage: Hazard %s for %s, range index %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06004936 string_SyncHazard(hazard.hazard), report_data->FormatHandle(image).c_str(), index,
John Zulauffaea0ee2021-01-14 14:01:32 -07004937 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06004938 }
4939 }
4940 }
4941 return skip;
4942}
4943
4944void SyncValidator::PreCallRecordCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
4945 const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
4946 const VkImageSubresourceRange *pRanges) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004947 StateTracker::PreCallRecordCmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
locke-lunarge1a67022020-04-29 00:15:36 -06004948 auto *cb_access_context = GetAccessContext(commandBuffer);
4949 assert(cb_access_context);
4950 const auto tag = cb_access_context->NextCommandTag(CMD_CLEARDEPTHSTENCILIMAGE);
4951 auto *context = cb_access_context->GetCurrentAccessContext();
4952 assert(context);
4953
4954 const auto *image_state = Get<IMAGE_STATE>(image);
4955
4956 for (uint32_t index = 0; index < rangeCount; index++) {
4957 const auto &range = pRanges[index];
4958 if (image_state) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07004959 context->UpdateAccessState(*image_state, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, {0, 0, 0},
4960 image_state->createInfo.extent, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004961 }
4962 }
4963}
4964
4965bool SyncValidator::PreCallValidateCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool,
4966 uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer,
4967 VkDeviceSize dstOffset, VkDeviceSize stride,
4968 VkQueryResultFlags flags) const {
4969 bool skip = false;
4970 const auto *cb_access_context = GetAccessContext(commandBuffer);
4971 assert(cb_access_context);
4972 if (!cb_access_context) return skip;
4973
4974 const auto *context = cb_access_context->GetCurrentAccessContext();
4975 assert(context);
4976 if (!context) return skip;
4977
4978 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
4979
4980 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004981 const ResourceAccessRange range = MakeRange(dstOffset, stride * queryCount);
locke-lunarge1a67022020-04-29 00:15:36 -06004982 auto hazard = context->DetectHazard(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, range);
4983 if (hazard.hazard) {
John Zulauf59e25072020-07-17 10:55:21 -06004984 skip |=
4985 LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
4986 "vkCmdCopyQueryPoolResults: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07004987 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06004988 }
4989 }
locke-lunargff255f92020-05-13 18:53:52 -06004990
4991 // TODO:Track VkQueryPool
locke-lunarge1a67022020-04-29 00:15:36 -06004992 return skip;
4993}
4994
4995void SyncValidator::PreCallRecordCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
4996 uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
4997 VkDeviceSize stride, VkQueryResultFlags flags) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004998 StateTracker::PreCallRecordCmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset,
4999 stride, flags);
locke-lunarge1a67022020-04-29 00:15:36 -06005000 auto *cb_access_context = GetAccessContext(commandBuffer);
5001 assert(cb_access_context);
locke-lunargff255f92020-05-13 18:53:52 -06005002 const auto tag = cb_access_context->NextCommandTag(CMD_COPYQUERYPOOLRESULTS);
locke-lunarge1a67022020-04-29 00:15:36 -06005003 auto *context = cb_access_context->GetCurrentAccessContext();
5004 assert(context);
5005
5006 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
5007
5008 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005009 const ResourceAccessRange range = MakeRange(dstOffset, stride * queryCount);
John Zulauf8e3c3e92021-01-06 11:19:36 -07005010 context->UpdateAccessState(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005011 }
locke-lunargff255f92020-05-13 18:53:52 -06005012
5013 // TODO:Track VkQueryPool
locke-lunarge1a67022020-04-29 00:15:36 -06005014}
5015
5016bool SyncValidator::PreCallValidateCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
5017 VkDeviceSize size, uint32_t data) const {
5018 bool skip = false;
5019 const auto *cb_access_context = GetAccessContext(commandBuffer);
5020 assert(cb_access_context);
5021 if (!cb_access_context) return skip;
5022
5023 const auto *context = cb_access_context->GetCurrentAccessContext();
5024 assert(context);
5025 if (!context) return skip;
5026
5027 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
5028
5029 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005030 const ResourceAccessRange range = MakeRange(*dst_buffer, dstOffset, size);
locke-lunarge1a67022020-04-29 00:15:36 -06005031 auto hazard = context->DetectHazard(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, range);
5032 if (hazard.hazard) {
John Zulauf1dae9192020-06-16 15:46:44 -06005033 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06005034 "vkCmdFillBuffer: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07005035 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06005036 }
5037 }
5038 return skip;
5039}
5040
5041void SyncValidator::PreCallRecordCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
5042 VkDeviceSize size, uint32_t data) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005043 StateTracker::PreCallRecordCmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
locke-lunarge1a67022020-04-29 00:15:36 -06005044 auto *cb_access_context = GetAccessContext(commandBuffer);
5045 assert(cb_access_context);
5046 const auto tag = cb_access_context->NextCommandTag(CMD_FILLBUFFER);
5047 auto *context = cb_access_context->GetCurrentAccessContext();
5048 assert(context);
5049
5050 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
5051
5052 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005053 const ResourceAccessRange range = MakeRange(*dst_buffer, dstOffset, size);
John Zulauf8e3c3e92021-01-06 11:19:36 -07005054 context->UpdateAccessState(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005055 }
5056}
5057
5058bool SyncValidator::PreCallValidateCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
5059 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
5060 const VkImageResolve *pRegions) const {
5061 bool skip = false;
5062 const auto *cb_access_context = GetAccessContext(commandBuffer);
5063 assert(cb_access_context);
5064 if (!cb_access_context) return skip;
5065
5066 const auto *context = cb_access_context->GetCurrentAccessContext();
5067 assert(context);
5068 if (!context) return skip;
5069
5070 const auto *src_image = Get<IMAGE_STATE>(srcImage);
5071 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
5072
5073 for (uint32_t region = 0; region < regionCount; region++) {
5074 const auto &resolve_region = pRegions[region];
5075 if (src_image) {
5076 auto hazard = context->DetectHazard(*src_image, SYNC_TRANSFER_TRANSFER_READ, resolve_region.srcSubresource,
5077 resolve_region.srcOffset, resolve_region.extent);
5078 if (hazard.hazard) {
5079 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06005080 "vkCmdResolveImage: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06005081 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07005082 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06005083 }
5084 }
5085
5086 if (dst_image) {
5087 auto hazard = context->DetectHazard(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, resolve_region.dstSubresource,
5088 resolve_region.dstOffset, resolve_region.extent);
5089 if (hazard.hazard) {
5090 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06005091 "vkCmdResolveImage: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06005092 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07005093 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06005094 }
5095 if (skip) break;
5096 }
5097 }
5098
5099 return skip;
5100}
5101
5102void SyncValidator::PreCallRecordCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
5103 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
5104 const VkImageResolve *pRegions) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005105 StateTracker::PreCallRecordCmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
5106 pRegions);
locke-lunarge1a67022020-04-29 00:15:36 -06005107 auto *cb_access_context = GetAccessContext(commandBuffer);
5108 assert(cb_access_context);
5109 const auto tag = cb_access_context->NextCommandTag(CMD_RESOLVEIMAGE);
5110 auto *context = cb_access_context->GetCurrentAccessContext();
5111 assert(context);
5112
5113 auto *src_image = Get<IMAGE_STATE>(srcImage);
5114 auto *dst_image = Get<IMAGE_STATE>(dstImage);
5115
5116 for (uint32_t region = 0; region < regionCount; region++) {
5117 const auto &resolve_region = pRegions[region];
5118 if (src_image) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07005119 context->UpdateAccessState(*src_image, SYNC_TRANSFER_TRANSFER_READ, SyncOrdering::kNonAttachment,
5120 resolve_region.srcSubresource, resolve_region.srcOffset, resolve_region.extent, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005121 }
5122 if (dst_image) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07005123 context->UpdateAccessState(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
5124 resolve_region.dstSubresource, resolve_region.dstOffset, resolve_region.extent, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005125 }
5126 }
5127}
5128
Jeff Leger178b1e52020-10-05 12:22:23 -04005129bool SyncValidator::PreCallValidateCmdResolveImage2KHR(VkCommandBuffer commandBuffer,
5130 const VkResolveImageInfo2KHR *pResolveImageInfo) const {
5131 bool skip = false;
5132 const auto *cb_access_context = GetAccessContext(commandBuffer);
5133 assert(cb_access_context);
5134 if (!cb_access_context) return skip;
5135
5136 const auto *context = cb_access_context->GetCurrentAccessContext();
5137 assert(context);
5138 if (!context) return skip;
5139
5140 const auto *src_image = Get<IMAGE_STATE>(pResolveImageInfo->srcImage);
5141 const auto *dst_image = Get<IMAGE_STATE>(pResolveImageInfo->dstImage);
5142
5143 for (uint32_t region = 0; region < pResolveImageInfo->regionCount; region++) {
5144 const auto &resolve_region = pResolveImageInfo->pRegions[region];
5145 if (src_image) {
5146 auto hazard = context->DetectHazard(*src_image, SYNC_TRANSFER_TRANSFER_READ, resolve_region.srcSubresource,
5147 resolve_region.srcOffset, resolve_region.extent);
5148 if (hazard.hazard) {
5149 skip |= LogError(pResolveImageInfo->srcImage, string_SyncHazardVUID(hazard.hazard),
5150 "vkCmdResolveImage2KHR: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.",
5151 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pResolveImageInfo->srcImage).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07005152 region, cb_access_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04005153 }
5154 }
5155
5156 if (dst_image) {
5157 auto hazard = context->DetectHazard(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, resolve_region.dstSubresource,
5158 resolve_region.dstOffset, resolve_region.extent);
5159 if (hazard.hazard) {
5160 skip |= LogError(pResolveImageInfo->dstImage, string_SyncHazardVUID(hazard.hazard),
5161 "vkCmdResolveImage2KHR: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.",
5162 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pResolveImageInfo->dstImage).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07005163 region, cb_access_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04005164 }
5165 if (skip) break;
5166 }
5167 }
5168
5169 return skip;
5170}
5171
5172void SyncValidator::PreCallRecordCmdResolveImage2KHR(VkCommandBuffer commandBuffer,
5173 const VkResolveImageInfo2KHR *pResolveImageInfo) {
5174 StateTracker::PreCallRecordCmdResolveImage2KHR(commandBuffer, pResolveImageInfo);
5175 auto *cb_access_context = GetAccessContext(commandBuffer);
5176 assert(cb_access_context);
5177 const auto tag = cb_access_context->NextCommandTag(CMD_RESOLVEIMAGE2KHR);
5178 auto *context = cb_access_context->GetCurrentAccessContext();
5179 assert(context);
5180
5181 auto *src_image = Get<IMAGE_STATE>(pResolveImageInfo->srcImage);
5182 auto *dst_image = Get<IMAGE_STATE>(pResolveImageInfo->dstImage);
5183
5184 for (uint32_t region = 0; region < pResolveImageInfo->regionCount; region++) {
5185 const auto &resolve_region = pResolveImageInfo->pRegions[region];
5186 if (src_image) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07005187 context->UpdateAccessState(*src_image, SYNC_TRANSFER_TRANSFER_READ, SyncOrdering::kNonAttachment,
5188 resolve_region.srcSubresource, resolve_region.srcOffset, resolve_region.extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04005189 }
5190 if (dst_image) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07005191 context->UpdateAccessState(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
5192 resolve_region.dstSubresource, resolve_region.dstOffset, resolve_region.extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04005193 }
5194 }
5195}
5196
locke-lunarge1a67022020-04-29 00:15:36 -06005197bool SyncValidator::PreCallValidateCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
5198 VkDeviceSize dataSize, const void *pData) const {
5199 bool skip = false;
5200 const auto *cb_access_context = GetAccessContext(commandBuffer);
5201 assert(cb_access_context);
5202 if (!cb_access_context) return skip;
5203
5204 const auto *context = cb_access_context->GetCurrentAccessContext();
5205 assert(context);
5206 if (!context) return skip;
5207
5208 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
5209
5210 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005211 // VK_WHOLE_SIZE not allowed
5212 const ResourceAccessRange range = MakeRange(dstOffset, dataSize);
locke-lunarge1a67022020-04-29 00:15:36 -06005213 auto hazard = context->DetectHazard(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, range);
5214 if (hazard.hazard) {
John Zulauf1dae9192020-06-16 15:46:44 -06005215 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06005216 "vkCmdUpdateBuffer: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07005217 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06005218 }
5219 }
5220 return skip;
5221}
5222
5223void SyncValidator::PreCallRecordCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
5224 VkDeviceSize dataSize, const void *pData) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005225 StateTracker::PreCallRecordCmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
locke-lunarge1a67022020-04-29 00:15:36 -06005226 auto *cb_access_context = GetAccessContext(commandBuffer);
5227 assert(cb_access_context);
5228 const auto tag = cb_access_context->NextCommandTag(CMD_UPDATEBUFFER);
5229 auto *context = cb_access_context->GetCurrentAccessContext();
5230 assert(context);
5231
5232 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
5233
5234 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005235 // VK_WHOLE_SIZE not allowed
5236 const ResourceAccessRange range = MakeRange(dstOffset, dataSize);
John Zulauf8e3c3e92021-01-06 11:19:36 -07005237 context->UpdateAccessState(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005238 }
5239}
locke-lunargff255f92020-05-13 18:53:52 -06005240
5241bool SyncValidator::PreCallValidateCmdWriteBufferMarkerAMD(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
5242 VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) const {
5243 bool skip = false;
5244 const auto *cb_access_context = GetAccessContext(commandBuffer);
5245 assert(cb_access_context);
5246 if (!cb_access_context) return skip;
5247
5248 const auto *context = cb_access_context->GetCurrentAccessContext();
5249 assert(context);
5250 if (!context) return skip;
5251
5252 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
5253
5254 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005255 const ResourceAccessRange range = MakeRange(dstOffset, 4);
locke-lunargff255f92020-05-13 18:53:52 -06005256 auto hazard = context->DetectHazard(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, range);
5257 if (hazard.hazard) {
John Zulauf59e25072020-07-17 10:55:21 -06005258 skip |=
5259 LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
5260 "vkCmdWriteBufferMarkerAMD: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07005261 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatUsage(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06005262 }
5263 }
5264 return skip;
5265}
5266
5267void SyncValidator::PreCallRecordCmdWriteBufferMarkerAMD(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
5268 VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005269 StateTracker::PreCallRecordCmdWriteBufferMarkerAMD(commandBuffer, pipelineStage, dstBuffer, dstOffset, marker);
locke-lunargff255f92020-05-13 18:53:52 -06005270 auto *cb_access_context = GetAccessContext(commandBuffer);
5271 assert(cb_access_context);
5272 const auto tag = cb_access_context->NextCommandTag(CMD_WRITEBUFFERMARKERAMD);
5273 auto *context = cb_access_context->GetCurrentAccessContext();
5274 assert(context);
5275
5276 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
5277
5278 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005279 const ResourceAccessRange range = MakeRange(dstOffset, 4);
John Zulauf8e3c3e92021-01-06 11:19:36 -07005280 context->UpdateAccessState(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunargff255f92020-05-13 18:53:52 -06005281 }
5282}
John Zulauf49beb112020-11-04 16:06:31 -07005283
5284bool SyncValidator::PreCallValidateCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) const {
5285 bool skip = false;
5286 const auto *cb_context = GetAccessContext(commandBuffer);
5287 assert(cb_context);
5288 if (!cb_context) return skip;
5289
5290 return cb_context->ValidateSetEvent(commandBuffer, event, stageMask);
5291}
5292
5293void SyncValidator::PostCallRecordCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
5294 StateTracker::PostCallRecordCmdSetEvent(commandBuffer, event, stageMask);
5295 auto *cb_context = GetAccessContext(commandBuffer);
5296 assert(cb_context);
5297 if (!cb_context) return;
John Zulauf4a6105a2020-11-17 15:11:05 -07005298 const auto tag = cb_context->NextCommandTag(CMD_SETEVENT);
5299 cb_context->RecordSetEvent(commandBuffer, event, stageMask, tag);
John Zulauf49beb112020-11-04 16:06:31 -07005300}
5301
5302bool SyncValidator::PreCallValidateCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event,
5303 VkPipelineStageFlags stageMask) const {
5304 bool skip = false;
5305 const auto *cb_context = GetAccessContext(commandBuffer);
5306 assert(cb_context);
5307 if (!cb_context) return skip;
5308
5309 return cb_context->ValidateResetEvent(commandBuffer, event, stageMask);
5310}
5311
5312void SyncValidator::PostCallRecordCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
5313 StateTracker::PostCallRecordCmdResetEvent(commandBuffer, event, stageMask);
5314 auto *cb_context = GetAccessContext(commandBuffer);
5315 assert(cb_context);
5316 if (!cb_context) return;
5317
5318 cb_context->RecordResetEvent(commandBuffer, event, stageMask);
5319}
5320
5321bool SyncValidator::PreCallValidateCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
5322 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
5323 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
5324 uint32_t bufferMemoryBarrierCount,
5325 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
5326 uint32_t imageMemoryBarrierCount,
5327 const VkImageMemoryBarrier *pImageMemoryBarriers) const {
5328 bool skip = false;
5329 const auto *cb_context = GetAccessContext(commandBuffer);
5330 assert(cb_context);
5331 if (!cb_context) return skip;
5332
John Zulauf4a6105a2020-11-17 15:11:05 -07005333 return cb_context->ValidateWaitEvents(eventCount, pEvents, srcStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers,
5334 bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
John Zulauf49beb112020-11-04 16:06:31 -07005335 pImageMemoryBarriers);
5336}
5337
5338void SyncValidator::PostCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
5339 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
5340 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
5341 uint32_t bufferMemoryBarrierCount,
5342 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
5343 uint32_t imageMemoryBarrierCount,
5344 const VkImageMemoryBarrier *pImageMemoryBarriers) {
5345 StateTracker::PostCallRecordCmdWaitEvents(commandBuffer, eventCount, pEvents, srcStageMask, dstStageMask, memoryBarrierCount,
5346 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
5347 imageMemoryBarrierCount, pImageMemoryBarriers);
5348
5349 auto *cb_context = GetAccessContext(commandBuffer);
5350 assert(cb_context);
5351 if (!cb_context) return;
5352
John Zulauf4a6105a2020-11-17 15:11:05 -07005353 const auto tag = cb_context->NextCommandTag(CMD_WAITEVENTS);
John Zulauf49beb112020-11-04 16:06:31 -07005354 cb_context->RecordWaitEvents(commandBuffer, eventCount, pEvents, srcStageMask, dstStageMask, memoryBarrierCount,
5355 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
John Zulauf4a6105a2020-11-17 15:11:05 -07005356 pImageMemoryBarriers, tag);
5357}
5358
5359void SyncEventState::ResetFirstScope() {
5360 for (const auto address_type : kAddressTypes) {
5361 first_scope[static_cast<size_t>(address_type)].clear();
5362 }
Jeremy Gebben9893daf2021-01-04 10:40:50 -07005363 scope = SyncExecScope();
John Zulauf4a6105a2020-11-17 15:11:05 -07005364}
5365
5366// Keep the "ignore this event" logic in same place for ValidateWait and RecordWait to use
5367SyncEventState::IgnoreReason SyncEventState::IsIgnoredByWait(VkPipelineStageFlags srcStageMask) const {
5368 IgnoreReason reason = NotIgnored;
5369
5370 if (last_command == CMD_RESETEVENT && !HasBarrier(0U, 0U)) {
5371 reason = ResetWaitRace;
5372 } else if (unsynchronized_set) {
5373 reason = SetRace;
5374 } else {
Jeremy Gebben9893daf2021-01-04 10:40:50 -07005375 const VkPipelineStageFlags missing_bits = scope.mask_param & ~srcStageMask;
John Zulauf4a6105a2020-11-17 15:11:05 -07005376 if (missing_bits) reason = MissingStageBits;
5377 }
5378
5379 return reason;
5380}
5381
5382bool SyncEventState::HasBarrier(VkPipelineStageFlags stageMask, VkPipelineStageFlags exec_scope_arg) const {
5383 bool has_barrier = (last_command == CMD_NONE) || (stageMask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) ||
5384 (barriers & exec_scope_arg) || (barriers & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
5385 return has_barrier;
John Zulauf49beb112020-11-04 16:06:31 -07005386}