blob: 2f903b5ecffa5343a51a0d66f7d8aa5105b02186 [file] [log] [blame]
John Zulaufab7756b2020-12-29 16:10:16 -07001/* Copyright (c) 2019-2021 The Khronos Group Inc.
2 * Copyright (c) 2019-2021 Valve Corporation
3 * Copyright (c) 2019-2021 LunarG, Inc.
John Zulauf9cb530d2019-09-30 14:14:10 -06004 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 * Author: John Zulauf <jzulauf@lunarg.com>
John Zulaufab7756b2020-12-29 16:10:16 -070018 * Author: Locke Lin <locke@lunarg.com>
19 * Author: Jeremy Gebben <jeremyg@lunarg.com>
John Zulauf9cb530d2019-09-30 14:14:10 -060020 */
21
22#include <limits>
23#include <vector>
locke-lunarg296a3c92020-03-25 01:04:29 -060024#include <memory>
25#include <bitset>
John Zulauf9cb530d2019-09-30 14:14:10 -060026#include "synchronization_validation.h"
27
John Zulauf43cc7462020-12-03 12:33:12 -070028const static std::array<AccessAddressType, static_cast<size_t>(AccessAddressType::kTypeCount)> kAddressTypes = {
29 AccessAddressType::kLinear, AccessAddressType::kIdealized};
30
John Zulauf9cb530d2019-09-30 14:14:10 -060031static const char *string_SyncHazardVUID(SyncHazard hazard) {
32 switch (hazard) {
33 case SyncHazard::NONE:
John Zulauf2f952d22020-02-10 11:34:51 -070034 return "SYNC-HAZARD-NONE";
John Zulauf9cb530d2019-09-30 14:14:10 -060035 break;
36 case SyncHazard::READ_AFTER_WRITE:
37 return "SYNC-HAZARD-READ_AFTER_WRITE";
38 break;
39 case SyncHazard::WRITE_AFTER_READ:
40 return "SYNC-HAZARD-WRITE_AFTER_READ";
41 break;
42 case SyncHazard::WRITE_AFTER_WRITE:
43 return "SYNC-HAZARD-WRITE_AFTER_WRITE";
44 break;
John Zulauf2f952d22020-02-10 11:34:51 -070045 case SyncHazard::READ_RACING_WRITE:
46 return "SYNC-HAZARD-READ-RACING-WRITE";
47 break;
48 case SyncHazard::WRITE_RACING_WRITE:
49 return "SYNC-HAZARD-WRITE-RACING-WRITE";
50 break;
51 case SyncHazard::WRITE_RACING_READ:
52 return "SYNC-HAZARD-WRITE-RACING-READ";
53 break;
John Zulauf9cb530d2019-09-30 14:14:10 -060054 default:
55 assert(0);
56 }
57 return "SYNC-HAZARD-INVALID";
58}
59
John Zulauf59e25072020-07-17 10:55:21 -060060static bool IsHazardVsRead(SyncHazard hazard) {
61 switch (hazard) {
62 case SyncHazard::NONE:
63 return false;
64 break;
65 case SyncHazard::READ_AFTER_WRITE:
66 return false;
67 break;
68 case SyncHazard::WRITE_AFTER_READ:
69 return true;
70 break;
71 case SyncHazard::WRITE_AFTER_WRITE:
72 return false;
73 break;
74 case SyncHazard::READ_RACING_WRITE:
75 return false;
76 break;
77 case SyncHazard::WRITE_RACING_WRITE:
78 return false;
79 break;
80 case SyncHazard::WRITE_RACING_READ:
81 return true;
82 break;
83 default:
84 assert(0);
85 }
86 return false;
87}
88
John Zulauf9cb530d2019-09-30 14:14:10 -060089static const char *string_SyncHazard(SyncHazard hazard) {
90 switch (hazard) {
91 case SyncHazard::NONE:
92 return "NONR";
93 break;
94 case SyncHazard::READ_AFTER_WRITE:
95 return "READ_AFTER_WRITE";
96 break;
97 case SyncHazard::WRITE_AFTER_READ:
98 return "WRITE_AFTER_READ";
99 break;
100 case SyncHazard::WRITE_AFTER_WRITE:
101 return "WRITE_AFTER_WRITE";
102 break;
John Zulauf2f952d22020-02-10 11:34:51 -0700103 case SyncHazard::READ_RACING_WRITE:
104 return "READ_RACING_WRITE";
105 break;
106 case SyncHazard::WRITE_RACING_WRITE:
107 return "WRITE_RACING_WRITE";
108 break;
109 case SyncHazard::WRITE_RACING_READ:
110 return "WRITE_RACING_READ";
111 break;
John Zulauf9cb530d2019-09-30 14:14:10 -0600112 default:
113 assert(0);
114 }
115 return "INVALID HAZARD";
116}
117
John Zulauf37ceaed2020-07-03 16:18:15 -0600118static const SyncStageAccessInfoType *SyncStageAccessInfoFromMask(SyncStageAccessFlags flags) {
119 // Return the info for the first bit found
120 const SyncStageAccessInfoType *info = nullptr;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700121 for (size_t i = 0; i < flags.size(); i++) {
122 if (flags.test(i)) {
123 info = &syncStageAccessInfoByStageAccessIndex[i];
124 break;
John Zulauf37ceaed2020-07-03 16:18:15 -0600125 }
126 }
127 return info;
128}
129
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700130static std::string string_SyncStageAccessFlags(const SyncStageAccessFlags &flags, const char *sep = "|") {
John Zulauf59e25072020-07-17 10:55:21 -0600131 std::string out_str;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700132 if (flags.none()) {
John Zulauf389c34b2020-07-28 11:19:35 -0600133 out_str = "0";
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700134 } else {
135 for (size_t i = 0; i < syncStageAccessInfoByStageAccessIndex.size(); i++) {
136 const auto &info = syncStageAccessInfoByStageAccessIndex[i];
137 if ((flags & info.stage_access_bit).any()) {
138 if (!out_str.empty()) {
139 out_str.append(sep);
140 }
141 out_str.append(info.name);
John Zulauf59e25072020-07-17 10:55:21 -0600142 }
John Zulauf59e25072020-07-17 10:55:21 -0600143 }
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700144 if (out_str.length() == 0) {
145 out_str.append("Unhandled SyncStageAccess");
146 }
John Zulauf59e25072020-07-17 10:55:21 -0600147 }
148 return out_str;
149}
150
Jeremy Gebben6ea9d9e2020-12-11 09:41:01 -0700151static std::string string_UsageTag(const ResourceUsageTag &tag) {
152 std::stringstream out;
153
John Zulauffaea0ee2021-01-14 14:01:32 -0700154 out << "command: " << CommandTypeString(tag.command);
155 out << ", seq_no: " << tag.seq_num;
156 if (tag.sub_command != 0) {
157 out << ", subcmd: " << tag.sub_command;
Jeremy Gebben6ea9d9e2020-12-11 09:41:01 -0700158 }
159 return out.str();
160}
161
John Zulauffaea0ee2021-01-14 14:01:32 -0700162std::string CommandBufferAccessContext::FormatUsage(const HazardResult &hazard) const {
John Zulauf37ceaed2020-07-03 16:18:15 -0600163 const auto &tag = hazard.tag;
John Zulauf59e25072020-07-17 10:55:21 -0600164 assert(hazard.usage_index < static_cast<SyncStageAccessIndex>(syncStageAccessInfoByStageAccessIndex.size()));
165 const auto &usage_info = syncStageAccessInfoByStageAccessIndex[hazard.usage_index];
John Zulauf1dae9192020-06-16 15:46:44 -0600166 std::stringstream out;
John Zulauf37ceaed2020-07-03 16:18:15 -0600167 const auto *info = SyncStageAccessInfoFromMask(hazard.prior_access);
168 const char *stage_access_name = info ? info->name : "INVALID_STAGE_ACCESS";
John Zulauf59e25072020-07-17 10:55:21 -0600169 out << "(usage: " << usage_info.name << ", prior_usage: " << stage_access_name;
170 if (IsHazardVsRead(hazard.hazard)) {
171 const auto barriers = hazard.access_state->GetReadBarriers(hazard.prior_access);
172 out << ", read_barriers: " << string_VkPipelineStageFlags(barriers);
173 } else {
174 SyncStageAccessFlags write_barrier = hazard.access_state->GetWriteBarriers();
175 out << ", write_barriers: " << string_SyncStageAccessFlags(write_barrier);
176 }
177
John Zulauffaea0ee2021-01-14 14:01:32 -0700178 // PHASE2 TODO -- add comand buffer and reset from secondary if applicable
179 out << ", " << string_UsageTag(tag) << ", reset_no: " << reset_count_;
John Zulauf1dae9192020-06-16 15:46:44 -0600180 return out.str();
181}
182
John Zulaufd14743a2020-07-03 09:42:39 -0600183// NOTE: the attachement read flag is put *only* in the access scope and not in the exect scope, since the ordering
184// rules apply only to this specific access for this stage, and not the stage as a whole. The ordering detection
185// also reflects this special case for read hazard detection (using access instead of exec scope)
John Zulaufb027cdb2020-05-21 14:25:22 -0600186static constexpr VkPipelineStageFlags kColorAttachmentExecScope = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700187static const SyncStageAccessFlags kColorAttachmentAccessScope =
188 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ_BIT |
189 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT |
190 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE_BIT |
191 SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT; // Note: this is intentionally not in the exec scope
John Zulaufb027cdb2020-05-21 14:25:22 -0600192static constexpr VkPipelineStageFlags kDepthStencilAttachmentExecScope =
193 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700194static const SyncStageAccessFlags kDepthStencilAttachmentAccessScope =
195 SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
196 SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
197 SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT; // Note: this is intentionally not in the exec scope
John Zulauf8e3c3e92021-01-06 11:19:36 -0700198static constexpr VkPipelineStageFlags kRasterAttachmentExecScope = kDepthStencilAttachmentExecScope | kColorAttachmentExecScope;
199static const SyncStageAccessFlags kRasterAttachmentAccessScope = kDepthStencilAttachmentAccessScope | kColorAttachmentAccessScope;
John Zulaufb027cdb2020-05-21 14:25:22 -0600200
John Zulauf8e3c3e92021-01-06 11:19:36 -0700201ResourceAccessState::OrderingBarriers ResourceAccessState::kOrderingRules = {
202 {{0U, SyncStageAccessFlags()},
203 {kColorAttachmentExecScope, kColorAttachmentAccessScope},
204 {kDepthStencilAttachmentExecScope, kDepthStencilAttachmentAccessScope},
205 {kRasterAttachmentExecScope, kRasterAttachmentAccessScope}}};
206
John Zulauf7635de32020-05-29 17:14:15 -0600207// Sometimes we have an internal access conflict, and we using the kCurrentCommandTag to set and detect in temporary/proxy contexts
John Zulauffaea0ee2021-01-14 14:01:32 -0700208static const ResourceUsageTag kCurrentCommandTag(ResourceUsageTag::kMaxIndex, ResourceUsageTag::kMaxCount,
209 ResourceUsageTag::kMaxCount, CMD_NONE);
John Zulaufb027cdb2020-05-21 14:25:22 -0600210
John Zulaufb02c1eb2020-10-06 16:33:36 -0600211static VkDeviceSize ResourceBaseAddress(const BINDABLE &bindable) {
212 return bindable.binding.offset + bindable.binding.mem_state->fake_base_address;
213}
214
215static bool SimpleBinding(const BINDABLE &bindable) { return !bindable.sparse && bindable.binding.mem_state; }
216
locke-lunarg3c038002020-04-30 23:08:08 -0600217inline VkDeviceSize GetRealWholeSize(VkDeviceSize offset, VkDeviceSize size, VkDeviceSize whole_size) {
218 if (size == VK_WHOLE_SIZE) {
219 return (whole_size - offset);
220 }
221 return size;
222}
223
John Zulauf3e86bf02020-09-12 10:47:57 -0600224static inline VkDeviceSize GetBufferWholeSize(const BUFFER_STATE &buf_state, VkDeviceSize offset, VkDeviceSize size) {
225 return GetRealWholeSize(offset, size, buf_state.createInfo.size);
226}
227
John Zulauf16adfc92020-04-08 10:28:33 -0600228template <typename T>
John Zulauf355e49b2020-04-24 15:11:15 -0600229static ResourceAccessRange MakeRange(const T &has_offset_and_size) {
John Zulauf16adfc92020-04-08 10:28:33 -0600230 return ResourceAccessRange(has_offset_and_size.offset, (has_offset_and_size.offset + has_offset_and_size.size));
231}
232
John Zulauf355e49b2020-04-24 15:11:15 -0600233static ResourceAccessRange MakeRange(VkDeviceSize start, VkDeviceSize size) { return ResourceAccessRange(start, (start + size)); }
John Zulauf16adfc92020-04-08 10:28:33 -0600234
John Zulauf3e86bf02020-09-12 10:47:57 -0600235static inline ResourceAccessRange MakeRange(const BUFFER_STATE &buffer, VkDeviceSize offset, VkDeviceSize size) {
236 return MakeRange(offset, GetBufferWholeSize(buffer, offset, size));
237}
238
239static inline ResourceAccessRange MakeRange(const BUFFER_VIEW_STATE &buf_view_state) {
240 return MakeRange(*buf_view_state.buffer_state.get(), buf_view_state.create_info.offset, buf_view_state.create_info.range);
241}
242
John Zulauf4a6105a2020-11-17 15:11:05 -0700243// Range generators for to allow event scope filtration to be limited to the top of the resource access traversal pipeline
244//
John Zulauf10f1f522020-12-18 12:00:35 -0700245// Note: there is no "begin/end" or reset facility. These are each written as "one time through" generators.
246//
John Zulauf4a6105a2020-11-17 15:11:05 -0700247// Usage:
248// Constructor() -- initializes the generator to point to the begin of the space declared.
249// * -- the current range of the generator empty signfies end
250// ++ -- advance to the next non-empty range (or end)
251
252// A wrapper for a single range with the same semantics as the actual generators below
253template <typename KeyType>
254class SingleRangeGenerator {
255 public:
256 SingleRangeGenerator(const KeyType &range) : current_(range) {}
257 KeyType &operator*() const { return *current_; }
258 KeyType *operator->() const { return &*current_; }
259 SingleRangeGenerator &operator++() {
260 current_ = KeyType(); // just one real range
261 return *this;
262 }
263
264 bool operator==(const SingleRangeGenerator &other) const { return current_ == other.current_; }
265
266 private:
267 SingleRangeGenerator() = default;
268 const KeyType range_;
269 KeyType current_;
270};
271
272// Generate the ranges that are the intersection of range and the entries in the FilterMap
273template <typename FilterMap, typename KeyType = typename FilterMap::key_type>
274class FilteredRangeGenerator {
275 public:
276 FilteredRangeGenerator(const FilterMap &filter, const KeyType &range)
277 : range_(range), filter_(&filter), filter_pos_(), current_() {
278 SeekBegin();
279 }
280 const KeyType &operator*() const { return current_; }
281 const KeyType *operator->() const { return &current_; }
282 FilteredRangeGenerator &operator++() {
283 ++filter_pos_;
284 UpdateCurrent();
285 return *this;
286 }
287
288 bool operator==(const FilteredRangeGenerator &other) const { return current_ == other.current_; }
289
290 private:
291 FilteredRangeGenerator() = default;
292 void UpdateCurrent() {
293 if (filter_pos_ != filter_->cend()) {
294 current_ = range_ & filter_pos_->first;
295 } else {
296 current_ = KeyType();
297 }
298 }
299 void SeekBegin() {
300 filter_pos_ = filter_->lower_bound(range_);
301 UpdateCurrent();
302 }
303 const KeyType range_;
304 const FilterMap *filter_;
305 typename FilterMap::const_iterator filter_pos_;
306 KeyType current_;
307};
308using EventSimpleRangeGenerator = FilteredRangeGenerator<SyncEventState::ScopeMap>;
309
310// Templated to allow for different Range generators or map sources...
311
312// Generate the ranges that are the intersection of the RangeGen ranges and the entries in the FilterMap
John Zulauf4a6105a2020-11-17 15:11:05 -0700313template <typename FilterMap, typename RangeGen, typename KeyType = typename FilterMap::key_type>
314class FilteredGeneratorGenerator {
315 public:
316 FilteredGeneratorGenerator(const FilterMap &filter, RangeGen &gen) : filter_(&filter), gen_(&gen), filter_pos_(), current_() {
317 SeekBegin();
318 }
319 const KeyType &operator*() const { return current_; }
320 const KeyType *operator->() const { return &current_; }
321 FilteredGeneratorGenerator &operator++() {
322 KeyType gen_range = GenRange();
323 KeyType filter_range = FilterRange();
324 current_ = KeyType();
325 while (gen_range.non_empty() && filter_range.non_empty() && current_.empty()) {
326 if (gen_range.end > filter_range.end) {
327 // if the generated range is beyond the filter_range, advance the filter range
328 filter_range = AdvanceFilter();
329 } else {
330 gen_range = AdvanceGen();
331 }
332 current_ = gen_range & filter_range;
333 }
334 return *this;
335 }
336
337 bool operator==(const FilteredGeneratorGenerator &other) const { return current_ == other.current_; }
338
339 private:
340 KeyType AdvanceFilter() {
341 ++filter_pos_;
342 auto filter_range = FilterRange();
343 if (filter_range.valid()) {
344 FastForwardGen(filter_range);
345 }
346 return filter_range;
347 }
348 KeyType AdvanceGen() {
349 ++(*gen_);
350 auto gen_range = GenRange();
351 if (gen_range.valid()) {
352 FastForwardFilter(gen_range);
353 }
354 return gen_range;
355 }
356
357 KeyType FilterRange() const { return (filter_pos_ != filter_->cend()) ? filter_pos_->first : KeyType(); }
358 KeyType GenRange() const { return *(*gen_); }
359
360 KeyType FastForwardFilter(const KeyType &range) {
361 auto filter_range = FilterRange();
362 int retry_count = 0;
John Zulauf10f1f522020-12-18 12:00:35 -0700363 const static int kRetryLimit = 2; // TODO -- determine whether this limit is optimal
John Zulauf4a6105a2020-11-17 15:11:05 -0700364 while (!filter_range.empty() && (filter_range.end <= range.begin)) {
365 if (retry_count < kRetryLimit) {
366 ++filter_pos_;
367 filter_range = FilterRange();
368 retry_count++;
369 } else {
370 // Okay we've tried walking, do a seek.
371 filter_pos_ = filter_->lower_bound(range);
372 break;
373 }
374 }
375 return FilterRange();
376 }
377
378 // TODO: Consider adding "seek" (or an absolute bound "get" to range generators to make this walk
379 // faster.
380 KeyType FastForwardGen(const KeyType &range) {
381 auto gen_range = GenRange();
382 while (!gen_range.empty() && (gen_range.end <= range.begin)) {
383 ++(*gen_);
384 gen_range = GenRange();
385 }
386 return gen_range;
387 }
388
389 void SeekBegin() {
390 auto gen_range = GenRange();
391 if (gen_range.empty()) {
392 current_ = KeyType();
393 filter_pos_ = filter_->cend();
394 } else {
395 filter_pos_ = filter_->lower_bound(gen_range);
396 current_ = gen_range & FilterRange();
397 }
398 }
399
400 FilteredGeneratorGenerator() = default;
401 const FilterMap *filter_;
402 RangeGen *const gen_;
403 typename FilterMap::const_iterator filter_pos_;
404 KeyType current_;
405};
406
407using EventImageRangeGenerator = FilteredGeneratorGenerator<SyncEventState::ScopeMap, subresource_adapter::ImageRangeGenerator>;
408
John Zulauf0cb5be22020-01-23 12:18:22 -0700409// Expand the pipeline stage without regard to whether the are valid w.r.t. queue or extension
410VkPipelineStageFlags ExpandPipelineStages(VkQueueFlags queue_flags, VkPipelineStageFlags stage_mask) {
411 VkPipelineStageFlags expanded = stage_mask;
412 if (VK_PIPELINE_STAGE_ALL_COMMANDS_BIT & stage_mask) {
413 expanded = expanded & ~VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
414 for (const auto &all_commands : syncAllCommandStagesByQueueFlags) {
415 if (all_commands.first & queue_flags) {
416 expanded |= all_commands.second;
417 }
418 }
419 }
420 if (VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT & stage_mask) {
421 expanded = expanded & ~VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
422 expanded |= syncAllCommandStagesByQueueFlags.at(VK_QUEUE_GRAPHICS_BIT) & ~VK_PIPELINE_STAGE_HOST_BIT;
423 }
424 return expanded;
425}
426
John Zulauf36bcf6a2020-02-03 15:12:52 -0700427VkPipelineStageFlags RelatedPipelineStages(VkPipelineStageFlags stage_mask,
Jeremy Gebben91c36902020-11-09 08:17:08 -0700428 const std::map<VkPipelineStageFlagBits, VkPipelineStageFlags> &map) {
John Zulauf36bcf6a2020-02-03 15:12:52 -0700429 VkPipelineStageFlags unscanned = stage_mask;
430 VkPipelineStageFlags related = 0;
Jonah Ryan-Davis185189c2020-07-14 10:28:52 -0400431 for (const auto &entry : map) {
432 const auto &stage = entry.first;
John Zulauf36bcf6a2020-02-03 15:12:52 -0700433 if (stage & unscanned) {
434 related = related | entry.second;
435 unscanned = unscanned & ~stage;
436 if (!unscanned) break;
437 }
438 }
439 return related;
440}
441
442VkPipelineStageFlags WithEarlierPipelineStages(VkPipelineStageFlags stage_mask) {
443 return stage_mask | RelatedPipelineStages(stage_mask, syncLogicallyEarlierStages);
444}
445
446VkPipelineStageFlags WithLaterPipelineStages(VkPipelineStageFlags stage_mask) {
447 return stage_mask | RelatedPipelineStages(stage_mask, syncLogicallyLaterStages);
448}
449
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -0700450static const ResourceAccessRange kFullRange(std::numeric_limits<VkDeviceSize>::min(), std::numeric_limits<VkDeviceSize>::max());
John Zulauf5c5e88d2019-12-26 11:22:02 -0700451
John Zulauf3e86bf02020-09-12 10:47:57 -0600452ResourceAccessRange GetBufferRange(VkDeviceSize offset, VkDeviceSize buf_whole_size, uint32_t first_index, uint32_t count,
453 VkDeviceSize stride) {
454 VkDeviceSize range_start = offset + first_index * stride;
455 VkDeviceSize range_size = 0;
locke-lunargff255f92020-05-13 18:53:52 -0600456 if (count == UINT32_MAX) {
457 range_size = buf_whole_size - range_start;
458 } else {
459 range_size = count * stride;
460 }
John Zulauf3e86bf02020-09-12 10:47:57 -0600461 return MakeRange(range_start, range_size);
locke-lunargff255f92020-05-13 18:53:52 -0600462}
463
locke-lunarg654e3692020-06-04 17:19:15 -0600464SyncStageAccessIndex GetSyncStageAccessIndexsByDescriptorSet(VkDescriptorType descriptor_type, const interface_var &descriptor_data,
465 VkShaderStageFlagBits stage_flag) {
466 if (descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT) {
467 assert(stage_flag == VK_SHADER_STAGE_FRAGMENT_BIT);
468 return SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ;
469 }
470 auto stage_access = syncStageAccessMaskByShaderStage.find(stage_flag);
471 if (stage_access == syncStageAccessMaskByShaderStage.end()) {
472 assert(0);
473 }
474 if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER || descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
475 return stage_access->second.uniform_read;
476 }
477
478 // If the desriptorSet is writable, we don't need to care SHADER_READ. SHADER_WRITE is enough.
479 // Because if write hazard happens, read hazard might or might not happen.
480 // But if write hazard doesn't happen, read hazard is impossible to happen.
481 if (descriptor_data.is_writable) {
482 return stage_access->second.shader_write;
483 }
484 return stage_access->second.shader_read;
485}
486
locke-lunarg37047832020-06-12 13:44:45 -0600487bool IsImageLayoutDepthWritable(VkImageLayout image_layout) {
488 return (image_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
489 image_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL ||
490 image_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL)
491 ? true
492 : false;
493}
494
495bool IsImageLayoutStencilWritable(VkImageLayout image_layout) {
496 return (image_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
497 image_layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL ||
498 image_layout == VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL)
499 ? true
500 : false;
501}
502
John Zulauf355e49b2020-04-24 15:11:15 -0600503// Class AccessContext stores the state of accesses specific to a Command, Subpass, or Queue
John Zulaufb02c1eb2020-10-06 16:33:36 -0600504template <typename Action>
505static void ApplyOverImageRange(const IMAGE_STATE &image_state, const VkImageSubresourceRange &subresource_range_arg,
506 Action &action) {
507 // At this point the "apply over range" logic only supports a single memory binding
508 if (!SimpleBinding(image_state)) return;
509 auto subresource_range = NormalizeSubresourceRange(image_state.createInfo, subresource_range_arg);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600510 const auto base_address = ResourceBaseAddress(image_state);
John Zulauf150e5332020-12-03 08:52:52 -0700511 subresource_adapter::ImageRangeGenerator range_gen(*image_state.fragment_encoder.get(), subresource_range, {0, 0, 0},
512 image_state.createInfo.extent, base_address);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600513 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf150e5332020-12-03 08:52:52 -0700514 action(*range_gen);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600515 }
516}
517
John Zulauf7635de32020-05-29 17:14:15 -0600518// Tranverse the attachment resolves for this a specific subpass, and do action() to them.
519// Used by both validation and record operations
520//
521// The signature for Action() reflect the needs of both uses.
522template <typename Action>
523void ResolveOperation(Action &action, const RENDER_PASS_STATE &rp_state, const VkRect2D &render_area,
524 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views, uint32_t subpass) {
525 VkExtent3D extent = CastTo3D(render_area.extent);
526 VkOffset3D offset = CastTo3D(render_area.offset);
527 const auto &rp_ci = rp_state.createInfo;
528 const auto *attachment_ci = rp_ci.pAttachments;
529 const auto &subpass_ci = rp_ci.pSubpasses[subpass];
530
531 // Color resolves -- require an inuse color attachment and a matching inuse resolve attachment
532 const auto *color_attachments = subpass_ci.pColorAttachments;
533 const auto *color_resolve = subpass_ci.pResolveAttachments;
534 if (color_resolve && color_attachments) {
535 for (uint32_t i = 0; i < subpass_ci.colorAttachmentCount; i++) {
536 const auto &color_attach = color_attachments[i].attachment;
537 const auto &resolve_attach = subpass_ci.pResolveAttachments[i].attachment;
538 if ((color_attach != VK_ATTACHMENT_UNUSED) && (resolve_attach != VK_ATTACHMENT_UNUSED)) {
539 action("color", "resolve read", color_attach, resolve_attach, attachment_views[color_attach],
John Zulauf8e3c3e92021-01-06 11:19:36 -0700540 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ, SyncOrdering::kColorAttachment, offset, extent, 0);
John Zulauf7635de32020-05-29 17:14:15 -0600541 action("color", "resolve write", color_attach, resolve_attach, attachment_views[resolve_attach],
John Zulauf8e3c3e92021-01-06 11:19:36 -0700542 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kColorAttachment, offset, extent, 0);
John Zulauf7635de32020-05-29 17:14:15 -0600543 }
544 }
545 }
546
547 // Depth stencil resolve only if the extension is present
Mark Lobodzinski1f887d32020-12-30 15:31:33 -0700548 const auto ds_resolve = LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(subpass_ci.pNext);
John Zulauf7635de32020-05-29 17:14:15 -0600549 if (ds_resolve && ds_resolve->pDepthStencilResolveAttachment &&
550 (ds_resolve->pDepthStencilResolveAttachment->attachment != VK_ATTACHMENT_UNUSED) && subpass_ci.pDepthStencilAttachment &&
551 (subpass_ci.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
552 const auto src_at = subpass_ci.pDepthStencilAttachment->attachment;
553 const auto src_ci = attachment_ci[src_at];
554 // The formats are required to match so we can pick either
555 const bool resolve_depth = (ds_resolve->depthResolveMode != VK_RESOLVE_MODE_NONE) && FormatHasDepth(src_ci.format);
556 const bool resolve_stencil = (ds_resolve->stencilResolveMode != VK_RESOLVE_MODE_NONE) && FormatHasStencil(src_ci.format);
557 const auto dst_at = ds_resolve->pDepthStencilResolveAttachment->attachment;
558 VkImageAspectFlags aspect_mask = 0u;
559
560 // Figure out which aspects are actually touched during resolve operations
561 const char *aspect_string = nullptr;
562 if (resolve_depth && resolve_stencil) {
563 // Validate all aspects together
564 aspect_mask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
565 aspect_string = "depth/stencil";
566 } else if (resolve_depth) {
567 // Validate depth only
568 aspect_mask = VK_IMAGE_ASPECT_DEPTH_BIT;
569 aspect_string = "depth";
570 } else if (resolve_stencil) {
571 // Validate all stencil only
572 aspect_mask = VK_IMAGE_ASPECT_STENCIL_BIT;
573 aspect_string = "stencil";
574 }
575
576 if (aspect_mask) {
577 action(aspect_string, "resolve read", src_at, dst_at, attachment_views[src_at],
John Zulauf8e3c3e92021-01-06 11:19:36 -0700578 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ, SyncOrdering::kRaster, offset, extent, aspect_mask);
John Zulauf7635de32020-05-29 17:14:15 -0600579 action(aspect_string, "resolve write", src_at, dst_at, attachment_views[dst_at],
John Zulauf8e3c3e92021-01-06 11:19:36 -0700580 SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kRaster, offset, extent, aspect_mask);
John Zulauf7635de32020-05-29 17:14:15 -0600581 }
582 }
583}
584
585// Action for validating resolve operations
586class ValidateResolveAction {
587 public:
John Zulauffaea0ee2021-01-14 14:01:32 -0700588 ValidateResolveAction(VkRenderPass render_pass, uint32_t subpass, const AccessContext &context,
589 const CommandBufferAccessContext &cb_context, const char *func_name)
John Zulauf7635de32020-05-29 17:14:15 -0600590 : render_pass_(render_pass),
591 subpass_(subpass),
592 context_(context),
John Zulauffaea0ee2021-01-14 14:01:32 -0700593 cb_context_(cb_context),
John Zulauf7635de32020-05-29 17:14:15 -0600594 func_name_(func_name),
595 skip_(false) {}
596 void operator()(const char *aspect_name, const char *attachment_name, uint32_t src_at, uint32_t dst_at,
John Zulauf8e3c3e92021-01-06 11:19:36 -0700597 const IMAGE_VIEW_STATE *view, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf7635de32020-05-29 17:14:15 -0600598 const VkOffset3D &offset, const VkExtent3D &extent, VkImageAspectFlags aspect_mask) {
599 HazardResult hazard;
John Zulauf8e3c3e92021-01-06 11:19:36 -0700600 hazard = context_.DetectHazard(view, current_usage, ordering_rule, offset, extent, aspect_mask);
John Zulauf7635de32020-05-29 17:14:15 -0600601 if (hazard.hazard) {
John Zulauffaea0ee2021-01-14 14:01:32 -0700602 skip_ |=
603 cb_context_.GetSyncState().LogError(render_pass_, string_SyncHazardVUID(hazard.hazard),
604 "%s: Hazard %s in subpass %" PRIu32 "during %s %s, from attachment %" PRIu32
605 " to resolve attachment %" PRIu32 ". Access info %s.",
606 func_name_, string_SyncHazard(hazard.hazard), subpass_, aspect_name,
607 attachment_name, src_at, dst_at, cb_context_.FormatUsage(hazard).c_str());
John Zulauf7635de32020-05-29 17:14:15 -0600608 }
609 }
610 // Providing a mechanism for the constructing caller to get the result of the validation
611 bool GetSkip() const { return skip_; }
612
613 private:
614 VkRenderPass render_pass_;
615 const uint32_t subpass_;
616 const AccessContext &context_;
John Zulauffaea0ee2021-01-14 14:01:32 -0700617 const CommandBufferAccessContext &cb_context_;
John Zulauf7635de32020-05-29 17:14:15 -0600618 const char *func_name_;
619 bool skip_;
620};
621
622// Update action for resolve operations
623class UpdateStateResolveAction {
624 public:
625 UpdateStateResolveAction(AccessContext &context, const ResourceUsageTag &tag) : context_(context), tag_(tag) {}
626 void operator()(const char *aspect_name, const char *attachment_name, uint32_t src_at, uint32_t dst_at,
John Zulauf8e3c3e92021-01-06 11:19:36 -0700627 const IMAGE_VIEW_STATE *view, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf7635de32020-05-29 17:14:15 -0600628 const VkOffset3D &offset, const VkExtent3D &extent, VkImageAspectFlags aspect_mask) {
629 // Ignores validation only arguments...
John Zulauf8e3c3e92021-01-06 11:19:36 -0700630 context_.UpdateAccessState(view, current_usage, ordering_rule, offset, extent, aspect_mask, tag_);
John Zulauf7635de32020-05-29 17:14:15 -0600631 }
632
633 private:
634 AccessContext &context_;
635 const ResourceUsageTag &tag_;
636};
637
John Zulauf59e25072020-07-17 10:55:21 -0600638void HazardResult::Set(const ResourceAccessState *access_state_, SyncStageAccessIndex usage_index_, SyncHazard hazard_,
Jeremy Gebbend0de1f82020-11-09 08:21:07 -0700639 const SyncStageAccessFlags &prior_, const ResourceUsageTag &tag_) {
John Zulauf59e25072020-07-17 10:55:21 -0600640 access_state = std::unique_ptr<const ResourceAccessState>(new ResourceAccessState(*access_state_));
641 usage_index = usage_index_;
642 hazard = hazard_;
643 prior_access = prior_;
644 tag = tag_;
645}
646
John Zulauf540266b2020-04-06 18:54:53 -0600647AccessContext::AccessContext(uint32_t subpass, VkQueueFlags queue_flags,
648 const std::vector<SubpassDependencyGraphNode> &dependencies,
John Zulauf1a224292020-06-30 14:52:13 -0600649 const std::vector<AccessContext> &contexts, const AccessContext *external_context) {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600650 Reset();
651 const auto &subpass_dep = dependencies[subpass];
652 prev_.reserve(subpass_dep.prev.size());
John Zulauf355e49b2020-04-24 15:11:15 -0600653 prev_by_subpass_.resize(subpass, nullptr); // Can't be more prevs than the subpass we're on
John Zulauf3d84f1b2020-03-09 13:33:25 -0600654 for (const auto &prev_dep : subpass_dep.prev) {
John Zulaufbaea94f2020-09-15 17:55:16 -0600655 const auto prev_pass = prev_dep.first->pass;
656 const auto &prev_barriers = prev_dep.second;
657 assert(prev_dep.second.size());
658 prev_.emplace_back(&contexts[prev_pass], queue_flags, prev_barriers);
659 prev_by_subpass_[prev_pass] = &prev_.back();
John Zulauf5c5e88d2019-12-26 11:22:02 -0700660 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600661
662 async_.reserve(subpass_dep.async.size());
663 for (const auto async_subpass : subpass_dep.async) {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -0700664 async_.emplace_back(&contexts[async_subpass]);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600665 }
John Zulaufbaea94f2020-09-15 17:55:16 -0600666 if (subpass_dep.barrier_from_external.size()) {
667 src_external_ = TrackBack(external_context, queue_flags, subpass_dep.barrier_from_external);
John Zulaufe5da6e52020-03-18 15:32:18 -0600668 }
John Zulaufbaea94f2020-09-15 17:55:16 -0600669 if (subpass_dep.barrier_to_external.size()) {
670 dst_external_ = TrackBack(this, queue_flags, subpass_dep.barrier_to_external);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600671 }
John Zulauf5c5e88d2019-12-26 11:22:02 -0700672}
673
John Zulauf5f13a792020-03-10 07:31:21 -0600674template <typename Detector>
John Zulauf43cc7462020-12-03 12:33:12 -0700675HazardResult AccessContext::DetectPreviousHazard(AccessAddressType type, const Detector &detector,
John Zulauf540266b2020-04-06 18:54:53 -0600676 const ResourceAccessRange &range) const {
John Zulauf5f13a792020-03-10 07:31:21 -0600677 ResourceAccessRangeMap descent_map;
John Zulauf69133422020-05-20 14:55:53 -0600678 ResolvePreviousAccess(type, range, &descent_map, nullptr);
John Zulauf5f13a792020-03-10 07:31:21 -0600679
680 HazardResult hazard;
681 for (auto prev = descent_map.begin(); prev != descent_map.end() && !hazard.hazard; ++prev) {
682 hazard = detector.Detect(prev);
683 }
684 return hazard;
685}
686
John Zulauf4a6105a2020-11-17 15:11:05 -0700687template <typename Action>
688void AccessContext::ForAll(Action &&action) {
689 for (const auto address_type : kAddressTypes) {
690 auto &accesses = GetAccessStateMap(address_type);
691 for (const auto &access : accesses) {
692 action(address_type, access);
693 }
694 }
695}
696
John Zulauf3d84f1b2020-03-09 13:33:25 -0600697// A recursive range walker for hazard detection, first for the current context and the (DetectHazardRecur) to walk
698// the DAG of the contexts (for example subpasses)
699template <typename Detector>
John Zulauf43cc7462020-12-03 12:33:12 -0700700HazardResult AccessContext::DetectHazard(AccessAddressType type, const Detector &detector, const ResourceAccessRange &range,
John Zulauf355e49b2020-04-24 15:11:15 -0600701 DetectOptions options) const {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600702 HazardResult hazard;
John Zulauf5f13a792020-03-10 07:31:21 -0600703
John Zulauf1a224292020-06-30 14:52:13 -0600704 if (static_cast<uint32_t>(options) & DetectOptions::kDetectAsync) {
John Zulauf355e49b2020-04-24 15:11:15 -0600705 // Async checks don't require recursive lookups, as the async lists are exhaustive for the top-level context
706 // so we'll check these first
707 for (const auto &async_context : async_) {
708 hazard = async_context->DetectAsyncHazard(type, detector, range);
709 if (hazard.hazard) return hazard;
710 }
John Zulauf5f13a792020-03-10 07:31:21 -0600711 }
712
John Zulauf1a224292020-06-30 14:52:13 -0600713 const bool detect_prev = (static_cast<uint32_t>(options) & DetectOptions::kDetectPrevious) != 0;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600714
John Zulauf69133422020-05-20 14:55:53 -0600715 const auto &accesses = GetAccessStateMap(type);
716 const auto from = accesses.lower_bound(range);
717 const auto to = accesses.upper_bound(range);
718 ResourceAccessRange gap = {range.begin, range.begin};
John Zulauf5f13a792020-03-10 07:31:21 -0600719
John Zulauf69133422020-05-20 14:55:53 -0600720 for (auto pos = from; pos != to; ++pos) {
721 // Cover any leading gap, or gap between entries
722 if (detect_prev) {
723 // TODO: After profiling we may want to change the descent logic such that we don't recur per gap...
724 // Cover any leading gap, or gap between entries
725 gap.end = pos->first.begin; // We know this begin is < range.end
John Zulauf355e49b2020-04-24 15:11:15 -0600726 if (gap.non_empty()) {
John Zulauf69133422020-05-20 14:55:53 -0600727 // Recur on all gaps
John Zulauf16adfc92020-04-08 10:28:33 -0600728 hazard = DetectPreviousHazard(type, detector, gap);
John Zulauf5f13a792020-03-10 07:31:21 -0600729 if (hazard.hazard) return hazard;
730 }
John Zulauf69133422020-05-20 14:55:53 -0600731 // Set up for the next gap. If pos..end is >= range.end, loop will exit, and trailing gap will be empty
732 gap.begin = pos->first.end;
733 }
734
735 hazard = detector.Detect(pos);
736 if (hazard.hazard) return hazard;
737 }
738
739 if (detect_prev) {
740 // Detect in the trailing empty as needed
741 gap.end = range.end;
742 if (gap.non_empty()) {
743 hazard = DetectPreviousHazard(type, detector, gap);
John Zulauf16adfc92020-04-08 10:28:33 -0600744 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600745 }
746
747 return hazard;
748}
749
750// A non recursive range walker for the asynchronous contexts (those we have no barriers with)
751template <typename Detector>
John Zulauf43cc7462020-12-03 12:33:12 -0700752HazardResult AccessContext::DetectAsyncHazard(AccessAddressType type, const Detector &detector,
753 const ResourceAccessRange &range) const {
John Zulauf16adfc92020-04-08 10:28:33 -0600754 auto &accesses = GetAccessStateMap(type);
755 const auto from = accesses.lower_bound(range);
756 const auto to = accesses.upper_bound(range);
757
John Zulauf3d84f1b2020-03-09 13:33:25 -0600758 HazardResult hazard;
John Zulauf16adfc92020-04-08 10:28:33 -0600759 for (auto pos = from; pos != to && !hazard.hazard; ++pos) {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -0700760 hazard = detector.DetectAsync(pos, start_tag_);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600761 }
John Zulauf16adfc92020-04-08 10:28:33 -0600762
John Zulauf3d84f1b2020-03-09 13:33:25 -0600763 return hazard;
764}
765
John Zulaufb02c1eb2020-10-06 16:33:36 -0600766struct ApplySubpassTransitionBarriersAction {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -0700767 explicit ApplySubpassTransitionBarriersAction(const std::vector<SyncBarrier> &barriers_) : barriers(barriers_) {}
John Zulaufb02c1eb2020-10-06 16:33:36 -0600768 void operator()(ResourceAccessState *access) const {
769 assert(access);
770 access->ApplyBarriers(barriers, true);
771 }
772 const std::vector<SyncBarrier> &barriers;
773};
774
775struct ApplyTrackbackBarriersAction {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -0700776 explicit ApplyTrackbackBarriersAction(const std::vector<SyncBarrier> &barriers_) : barriers(barriers_) {}
John Zulaufb02c1eb2020-10-06 16:33:36 -0600777 void operator()(ResourceAccessState *access) const {
778 assert(access);
779 assert(!access->HasPendingState());
780 access->ApplyBarriers(barriers, false);
781 access->ApplyPendingBarriers(kCurrentCommandTag);
782 }
783 const std::vector<SyncBarrier> &barriers;
784};
785
786// Splits a single map entry into piece matching the entries in [first, last) the total range over [first, last) must be
787// contained with entry. Entry must be an iterator pointing to dest, first and last must be iterators pointing to a
788// *different* map from dest.
789// Returns the position past the last resolved range -- the entry covering the remainder of entry->first not included in the
790// range [first, last)
791template <typename BarrierAction>
John Zulauf355e49b2020-04-24 15:11:15 -0600792static void ResolveMapToEntry(ResourceAccessRangeMap *dest, ResourceAccessRangeMap::iterator entry,
793 ResourceAccessRangeMap::const_iterator first, ResourceAccessRangeMap::const_iterator last,
John Zulaufb02c1eb2020-10-06 16:33:36 -0600794 BarrierAction &barrier_action) {
John Zulauf355e49b2020-04-24 15:11:15 -0600795 auto at = entry;
796 for (auto pos = first; pos != last; ++pos) {
797 // Every member of the input iterator range must fit within the remaining portion of entry
798 assert(at->first.includes(pos->first));
799 assert(at != dest->end());
800 // Trim up at to the same size as the entry to resolve
801 at = sparse_container::split(at, *dest, pos->first);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600802 auto access = pos->second; // intentional copy
803 barrier_action(&access);
John Zulauf355e49b2020-04-24 15:11:15 -0600804 at->second.Resolve(access);
805 ++at; // Go to the remaining unused section of entry
806 }
807}
808
John Zulaufa0a98292020-09-18 09:30:10 -0600809static SyncBarrier MergeBarriers(const std::vector<SyncBarrier> &barriers) {
810 SyncBarrier merged = {};
811 for (const auto &barrier : barriers) {
812 merged.Merge(barrier);
813 }
814 return merged;
815}
816
John Zulaufb02c1eb2020-10-06 16:33:36 -0600817template <typename BarrierAction>
John Zulauf43cc7462020-12-03 12:33:12 -0700818void AccessContext::ResolveAccessRange(AccessAddressType type, const ResourceAccessRange &range, BarrierAction &barrier_action,
John Zulauf355e49b2020-04-24 15:11:15 -0600819 ResourceAccessRangeMap *resolve_map, const ResourceAccessState *infill_state,
820 bool recur_to_infill) const {
John Zulauf3bcab5e2020-06-19 14:42:32 -0600821 if (!range.non_empty()) return;
822
John Zulauf355e49b2020-04-24 15:11:15 -0600823 ResourceRangeMergeIterator current(*resolve_map, GetAccessStateMap(type), range.begin);
824 while (current->range.non_empty() && range.includes(current->range.begin)) {
John Zulauf3bcab5e2020-06-19 14:42:32 -0600825 const auto current_range = current->range & range;
John Zulauf16adfc92020-04-08 10:28:33 -0600826 if (current->pos_B->valid) {
827 const auto &src_pos = current->pos_B->lower_bound;
John Zulaufb02c1eb2020-10-06 16:33:36 -0600828 auto access = src_pos->second; // intentional copy
829 barrier_action(&access);
830
John Zulauf16adfc92020-04-08 10:28:33 -0600831 if (current->pos_A->valid) {
John Zulauf3bcab5e2020-06-19 14:42:32 -0600832 const auto trimmed = sparse_container::split(current->pos_A->lower_bound, *resolve_map, current_range);
833 trimmed->second.Resolve(access);
834 current.invalidate_A(trimmed);
John Zulauf5f13a792020-03-10 07:31:21 -0600835 } else {
John Zulauf3bcab5e2020-06-19 14:42:32 -0600836 auto inserted = resolve_map->insert(current->pos_A->lower_bound, std::make_pair(current_range, access));
John Zulauf355e49b2020-04-24 15:11:15 -0600837 current.invalidate_A(inserted); // Update the parallel iterator to point at the insert segment
John Zulauf5f13a792020-03-10 07:31:21 -0600838 }
John Zulauf16adfc92020-04-08 10:28:33 -0600839 } else {
840 // we have to descend to fill this gap
841 if (recur_to_infill) {
John Zulauf355e49b2020-04-24 15:11:15 -0600842 if (current->pos_A->valid) {
843 // Dest is valid, so we need to accumulate along the DAG and then resolve... in an N-to-1 resolve operation
844 ResourceAccessRangeMap gap_map;
John Zulauf3bcab5e2020-06-19 14:42:32 -0600845 ResolvePreviousAccess(type, current_range, &gap_map, infill_state);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600846 ResolveMapToEntry(resolve_map, current->pos_A->lower_bound, gap_map.begin(), gap_map.end(), barrier_action);
John Zulauf355e49b2020-04-24 15:11:15 -0600847 } else {
John Zulauf3bcab5e2020-06-19 14:42:32 -0600848 // There isn't anything in dest in current)range, so we can accumulate directly into it.
849 ResolvePreviousAccess(type, current_range, resolve_map, infill_state);
John Zulaufb02c1eb2020-10-06 16:33:36 -0600850 // Need to apply the barrier to the accesses we accumulated, noting that we haven't updated current
851 for (auto pos = resolve_map->lower_bound(current_range); pos != current->pos_A->lower_bound; ++pos) {
852 barrier_action(&pos->second);
John Zulauf355e49b2020-04-24 15:11:15 -0600853 }
854 }
855 // Given that there could be gaps we need to seek carefully to not repeatedly search the same gaps in the next
856 // iterator of the outer while.
857
858 // Set the parallel iterator to the end of this range s.t. ++ will move us to the next range whether or
859 // not the end of the range is a gap. For the seek to work, first we need to warn the parallel iterator
860 // we stepped on the dest map
locke-lunarg88dbb542020-06-23 22:05:42 -0600861 const auto seek_to = current_range.end - 1; // The subtraction is safe as range can't be empty (loop condition)
862 current.invalidate_A(); // Changes current->range
John Zulauf355e49b2020-04-24 15:11:15 -0600863 current.seek(seek_to);
864 } else if (!current->pos_A->valid && infill_state) {
865 // If we didn't find anything in the current range, and we aren't reccuring... we infill if required
866 auto inserted = resolve_map->insert(current->pos_A->lower_bound, std::make_pair(current->range, *infill_state));
867 current.invalidate_A(inserted); // Update the parallel iterator to point at the correct segment after insert
John Zulauf16adfc92020-04-08 10:28:33 -0600868 }
John Zulauf5f13a792020-03-10 07:31:21 -0600869 }
John Zulauf16adfc92020-04-08 10:28:33 -0600870 ++current;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600871 }
John Zulauf1a224292020-06-30 14:52:13 -0600872
873 // Infill if range goes passed both the current and resolve map prior contents
874 if (recur_to_infill && (current->range.end < range.end)) {
875 ResourceAccessRange trailing_fill_range = {current->range.end, range.end};
876 ResourceAccessRangeMap gap_map;
877 const auto the_end = resolve_map->end();
878 ResolvePreviousAccess(type, trailing_fill_range, &gap_map, infill_state);
879 for (auto &access : gap_map) {
John Zulaufb02c1eb2020-10-06 16:33:36 -0600880 barrier_action(&access.second);
John Zulauf1a224292020-06-30 14:52:13 -0600881 resolve_map->insert(the_end, access);
882 }
883 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600884}
885
John Zulauf43cc7462020-12-03 12:33:12 -0700886void AccessContext::ResolvePreviousAccess(AccessAddressType type, const ResourceAccessRange &range,
887 ResourceAccessRangeMap *descent_map, const ResourceAccessState *infill_state) const {
John Zulaufe5da6e52020-03-18 15:32:18 -0600888 if ((prev_.size() == 0) && (src_external_.context == nullptr)) {
John Zulauf5f13a792020-03-10 07:31:21 -0600889 if (range.non_empty() && infill_state) {
890 descent_map->insert(std::make_pair(range, *infill_state));
891 }
892 } else {
893 // Look for something to fill the gap further along.
894 for (const auto &prev_dep : prev_) {
John Zulaufb02c1eb2020-10-06 16:33:36 -0600895 const ApplyTrackbackBarriersAction barrier_action(prev_dep.barriers);
896 prev_dep.context->ResolveAccessRange(type, range, barrier_action, descent_map, infill_state);
John Zulauf5f13a792020-03-10 07:31:21 -0600897 }
898
John Zulaufe5da6e52020-03-18 15:32:18 -0600899 if (src_external_.context) {
John Zulaufb02c1eb2020-10-06 16:33:36 -0600900 const ApplyTrackbackBarriersAction barrier_action(src_external_.barriers);
901 src_external_.context->ResolveAccessRange(type, range, barrier_action, descent_map, infill_state);
John Zulauf5f13a792020-03-10 07:31:21 -0600902 }
903 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600904}
905
John Zulauf4a6105a2020-11-17 15:11:05 -0700906// Non-lazy import of all accesses, WaitEvents needs this.
907void AccessContext::ResolvePreviousAccesses() {
908 ResourceAccessState default_state;
909 for (const auto address_type : kAddressTypes) {
910 ResolvePreviousAccess(address_type, kFullRange, &GetAccessStateMap(address_type), &default_state);
911 }
912}
913
John Zulauf43cc7462020-12-03 12:33:12 -0700914AccessAddressType AccessContext::ImageAddressType(const IMAGE_STATE &image) {
915 return (image.fragment_encoder->IsLinearImage()) ? AccessAddressType::kLinear : AccessAddressType::kIdealized;
John Zulauf16adfc92020-04-08 10:28:33 -0600916}
917
John Zulauf1507ee42020-05-18 11:33:09 -0600918static SyncStageAccessIndex ColorLoadUsage(VkAttachmentLoadOp load_op) {
919 const auto stage_access = (load_op == VK_ATTACHMENT_LOAD_OP_LOAD) ? SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ
920 : SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE;
921 return stage_access;
922}
923static SyncStageAccessIndex DepthStencilLoadUsage(VkAttachmentLoadOp load_op) {
924 const auto stage_access = (load_op == VK_ATTACHMENT_LOAD_OP_LOAD) ? SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_READ
925 : SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE;
926 return stage_access;
927}
928
John Zulauf7635de32020-05-29 17:14:15 -0600929// Caller must manage returned pointer
930static AccessContext *CreateStoreResolveProxyContext(const AccessContext &context, const RENDER_PASS_STATE &rp_state,
931 uint32_t subpass, const VkRect2D &render_area,
932 std::vector<const IMAGE_VIEW_STATE *> attachment_views) {
933 auto *proxy = new AccessContext(context);
934 proxy->UpdateAttachmentResolveAccess(rp_state, render_area, attachment_views, subpass, kCurrentCommandTag);
John Zulaufaff20662020-06-01 14:07:58 -0600935 proxy->UpdateAttachmentStoreAccess(rp_state, render_area, attachment_views, subpass, kCurrentCommandTag);
John Zulauf7635de32020-05-29 17:14:15 -0600936 return proxy;
937}
938
John Zulaufb02c1eb2020-10-06 16:33:36 -0600939template <typename BarrierAction>
John Zulauf52446eb2020-10-22 16:40:08 -0600940class ResolveAccessRangeFunctor {
John Zulaufb02c1eb2020-10-06 16:33:36 -0600941 public:
John Zulauf43cc7462020-12-03 12:33:12 -0700942 ResolveAccessRangeFunctor(const AccessContext &context, AccessAddressType address_type, ResourceAccessRangeMap *descent_map,
943 const ResourceAccessState *infill_state, BarrierAction &barrier_action)
John Zulauf52446eb2020-10-22 16:40:08 -0600944 : context_(context),
945 address_type_(address_type),
946 descent_map_(descent_map),
947 infill_state_(infill_state),
948 barrier_action_(barrier_action) {}
John Zulaufb02c1eb2020-10-06 16:33:36 -0600949 ResolveAccessRangeFunctor() = delete;
950 void operator()(const ResourceAccessRange &range) const {
951 context_.ResolveAccessRange(address_type_, range, barrier_action_, descent_map_, infill_state_);
952 }
953
954 private:
John Zulauf52446eb2020-10-22 16:40:08 -0600955 const AccessContext &context_;
John Zulauf43cc7462020-12-03 12:33:12 -0700956 const AccessAddressType address_type_;
John Zulauf52446eb2020-10-22 16:40:08 -0600957 ResourceAccessRangeMap *const descent_map_;
958 const ResourceAccessState *infill_state_;
John Zulaufb02c1eb2020-10-06 16:33:36 -0600959 BarrierAction &barrier_action_;
960};
961
John Zulaufb02c1eb2020-10-06 16:33:36 -0600962template <typename BarrierAction>
963void AccessContext::ResolveAccessRange(const IMAGE_STATE &image_state, const VkImageSubresourceRange &subresource_range,
John Zulauf43cc7462020-12-03 12:33:12 -0700964 BarrierAction &barrier_action, AccessAddressType address_type,
965 ResourceAccessRangeMap *descent_map, const ResourceAccessState *infill_state) const {
John Zulaufb02c1eb2020-10-06 16:33:36 -0600966 const ResolveAccessRangeFunctor<BarrierAction> action(*this, address_type, descent_map, infill_state, barrier_action);
967 ApplyOverImageRange(image_state, subresource_range, action);
John Zulauf62f10592020-04-03 12:20:02 -0600968}
969
John Zulauf7635de32020-05-29 17:14:15 -0600970// Layout transitions are handled as if the were occuring in the beginning of the next subpass
John Zulauffaea0ee2021-01-14 14:01:32 -0700971bool AccessContext::ValidateLayoutTransitions(const CommandBufferAccessContext &cb_context, const RENDER_PASS_STATE &rp_state,
John Zulauf7635de32020-05-29 17:14:15 -0600972 const VkRect2D &render_area, uint32_t subpass,
973 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views,
974 const char *func_name) const {
John Zulauf355e49b2020-04-24 15:11:15 -0600975 bool skip = false;
John Zulauf7635de32020-05-29 17:14:15 -0600976 // As validation methods are const and precede the record/update phase, for any tranistions from the immediately
977 // previous subpass, we have to validate them against a copy of the AccessContext, with resolve operations applied, as
978 // those affects have not been recorded yet.
979 //
980 // Note: we could be more efficient by tracking whether or not we actually *have* any changes (e.g. attachment resolve)
981 // to apply and only copy then, if this proves a hot spot.
982 std::unique_ptr<AccessContext> proxy_for_prev;
983 TrackBack proxy_track_back;
984
John Zulauf355e49b2020-04-24 15:11:15 -0600985 const auto &transitions = rp_state.subpass_transitions[subpass];
986 for (const auto &transition : transitions) {
John Zulauf7635de32020-05-29 17:14:15 -0600987 const bool prev_needs_proxy = transition.prev_pass != VK_SUBPASS_EXTERNAL && (transition.prev_pass + 1 == subpass);
988
989 const auto *track_back = GetTrackBackFromSubpass(transition.prev_pass);
990 if (prev_needs_proxy) {
991 if (!proxy_for_prev) {
992 proxy_for_prev.reset(CreateStoreResolveProxyContext(*track_back->context, rp_state, transition.prev_pass,
993 render_area, attachment_views));
994 proxy_track_back = *track_back;
995 proxy_track_back.context = proxy_for_prev.get();
996 }
997 track_back = &proxy_track_back;
998 }
999 auto hazard = DetectSubpassTransitionHazard(*track_back, attachment_views[transition.attachment]);
John Zulauf355e49b2020-04-24 15:11:15 -06001000 if (hazard.hazard) {
John Zulauffaea0ee2021-01-14 14:01:32 -07001001 skip |= cb_context.GetSyncState().LogError(rp_state.renderPass, string_SyncHazardVUID(hazard.hazard),
1002 "%s: Hazard %s in subpass %" PRIu32 " for attachment %" PRIu32
1003 " image layout transition (old_layout: %s, new_layout: %s). Access info %s.",
1004 func_name, string_SyncHazard(hazard.hazard), subpass, transition.attachment,
1005 string_VkImageLayout(transition.old_layout),
1006 string_VkImageLayout(transition.new_layout),
1007 cb_context.FormatUsage(hazard).c_str());
John Zulauf355e49b2020-04-24 15:11:15 -06001008 }
1009 }
1010 return skip;
1011}
1012
John Zulauffaea0ee2021-01-14 14:01:32 -07001013bool AccessContext::ValidateLoadOperation(const CommandBufferAccessContext &cb_context, const RENDER_PASS_STATE &rp_state,
John Zulauf7635de32020-05-29 17:14:15 -06001014 const VkRect2D &render_area, uint32_t subpass,
1015 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views,
1016 const char *func_name) const {
John Zulauf1507ee42020-05-18 11:33:09 -06001017 bool skip = false;
1018 const auto *attachment_ci = rp_state.createInfo.pAttachments;
1019 VkExtent3D extent = CastTo3D(render_area.extent);
1020 VkOffset3D offset = CastTo3D(render_area.offset);
John Zulaufa0a98292020-09-18 09:30:10 -06001021
John Zulauf1507ee42020-05-18 11:33:09 -06001022 for (uint32_t i = 0; i < rp_state.createInfo.attachmentCount; i++) {
1023 if (subpass == rp_state.attachment_first_subpass[i]) {
1024 if (attachment_views[i] == nullptr) continue;
1025 const IMAGE_VIEW_STATE &view = *attachment_views[i];
1026 const IMAGE_STATE *image = view.image_state.get();
1027 if (image == nullptr) continue;
1028 const auto &ci = attachment_ci[i];
John Zulauf1507ee42020-05-18 11:33:09 -06001029
1030 // Need check in the following way
1031 // 1) if the usage bit isn't in the dest_access_scope, and there is layout traniition for initial use, report hazard
1032 // vs. transition
1033 // 2) if there isn't a layout transition, we need to look at the external context with a "detect hazard" operation
1034 // for each aspect loaded.
1035
1036 const bool has_depth = FormatHasDepth(ci.format);
John Zulaufb027cdb2020-05-21 14:25:22 -06001037 const bool has_stencil = FormatHasStencil(ci.format);
John Zulauf1507ee42020-05-18 11:33:09 -06001038 const bool is_color = !(has_depth || has_stencil);
1039
1040 const SyncStageAccessIndex load_index = has_depth ? DepthStencilLoadUsage(ci.loadOp) : ColorLoadUsage(ci.loadOp);
John Zulauf1507ee42020-05-18 11:33:09 -06001041 const SyncStageAccessIndex stencil_load_index = has_stencil ? DepthStencilLoadUsage(ci.stencilLoadOp) : load_index;
John Zulauf1507ee42020-05-18 11:33:09 -06001042
John Zulaufaff20662020-06-01 14:07:58 -06001043 HazardResult hazard;
John Zulauf1507ee42020-05-18 11:33:09 -06001044 const char *aspect = nullptr;
John Zulauf1507ee42020-05-18 11:33:09 -06001045
John Zulaufb02c1eb2020-10-06 16:33:36 -06001046 auto hazard_range = view.normalized_subresource_range;
1047 bool checked_stencil = false;
1048 if (is_color) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07001049 hazard = DetectHazard(*image, load_index, view.normalized_subresource_range, SyncOrdering::kColorAttachment, offset,
John Zulauf859089b2020-10-29 17:37:03 -06001050 extent);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001051 aspect = "color";
1052 } else {
1053 if (has_depth) {
1054 hazard_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001055 hazard = DetectHazard(*image, load_index, hazard_range, SyncOrdering::kDepthStencilAttachment, offset, extent);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001056 aspect = "depth";
1057 }
1058 if (!hazard.hazard && has_stencil) {
1059 hazard_range.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001060 hazard = DetectHazard(*image, stencil_load_index, hazard_range, SyncOrdering::kDepthStencilAttachment, offset,
1061 extent);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001062 aspect = "stencil";
1063 checked_stencil = true;
1064 }
1065 }
1066
1067 if (hazard.hazard) {
1068 auto load_op_string = string_VkAttachmentLoadOp(checked_stencil ? ci.stencilLoadOp : ci.loadOp);
John Zulauffaea0ee2021-01-14 14:01:32 -07001069 const auto &sync_state = cb_context.GetSyncState();
John Zulaufb02c1eb2020-10-06 16:33:36 -06001070 if (hazard.tag == kCurrentCommandTag) {
1071 // Hazard vs. ILT
1072 skip |= sync_state.LogError(rp_state.renderPass, string_SyncHazardVUID(hazard.hazard),
1073 "%s: Hazard %s vs. layout transition in subpass %" PRIu32 " for attachment %" PRIu32
1074 " aspect %s during load with loadOp %s.",
1075 func_name, string_SyncHazard(hazard.hazard), subpass, i, aspect, load_op_string);
1076 } else {
John Zulauf1507ee42020-05-18 11:33:09 -06001077 skip |= sync_state.LogError(rp_state.renderPass, string_SyncHazardVUID(hazard.hazard),
1078 "%s: Hazard %s in subpass %" PRIu32 " for attachment %" PRIu32
John Zulauf59e25072020-07-17 10:55:21 -06001079 " aspect %s during load with loadOp %s. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06001080 func_name, string_SyncHazard(hazard.hazard), subpass, i, aspect, load_op_string,
John Zulauffaea0ee2021-01-14 14:01:32 -07001081 cb_context.FormatUsage(hazard).c_str());
John Zulauf1507ee42020-05-18 11:33:09 -06001082 }
1083 }
1084 }
1085 }
1086 return skip;
1087}
1088
John Zulaufaff20662020-06-01 14:07:58 -06001089// Store operation validation can ignore resolve (before it) and layout tranistions after it. The first is ignored
1090// because of the ordering guarantees w.r.t. sample access and that the resolve validation hasn't altered the state, because
1091// store is part of the same Next/End operation.
1092// The latter is handled in layout transistion validation directly
John Zulauffaea0ee2021-01-14 14:01:32 -07001093bool AccessContext::ValidateStoreOperation(const CommandBufferAccessContext &cb_context, const RENDER_PASS_STATE &rp_state,
John Zulaufaff20662020-06-01 14:07:58 -06001094 const VkRect2D &render_area, uint32_t subpass,
1095 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views,
1096 const char *func_name) const {
1097 bool skip = false;
1098 const auto *attachment_ci = rp_state.createInfo.pAttachments;
1099 VkExtent3D extent = CastTo3D(render_area.extent);
1100 VkOffset3D offset = CastTo3D(render_area.offset);
1101
1102 for (uint32_t i = 0; i < rp_state.createInfo.attachmentCount; i++) {
1103 if (subpass == rp_state.attachment_last_subpass[i]) {
1104 if (attachment_views[i] == nullptr) continue;
1105 const IMAGE_VIEW_STATE &view = *attachment_views[i];
1106 const IMAGE_STATE *image = view.image_state.get();
1107 if (image == nullptr) continue;
1108 const auto &ci = attachment_ci[i];
1109
1110 // The spec states that "don't care" is an operation with VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
1111 // so we assume that an implementation is *free* to write in that case, meaning that for correctness
1112 // sake, we treat DONT_CARE as writing.
1113 const bool has_depth = FormatHasDepth(ci.format);
1114 const bool has_stencil = FormatHasStencil(ci.format);
1115 const bool is_color = !(has_depth || has_stencil);
1116 const bool store_op_stores = ci.storeOp != VK_ATTACHMENT_STORE_OP_NONE_QCOM;
1117 if (!has_stencil && !store_op_stores) continue;
1118
1119 HazardResult hazard;
1120 const char *aspect = nullptr;
1121 bool checked_stencil = false;
1122 if (is_color) {
1123 hazard = DetectHazard(*image, SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE,
John Zulauf8e3c3e92021-01-06 11:19:36 -07001124 view.normalized_subresource_range, SyncOrdering::kRaster, offset, extent);
John Zulaufaff20662020-06-01 14:07:58 -06001125 aspect = "color";
1126 } else {
1127 const bool stencil_op_stores = ci.stencilStoreOp != VK_ATTACHMENT_STORE_OP_NONE_QCOM;
1128 auto hazard_range = view.normalized_subresource_range;
1129 if (has_depth && store_op_stores) {
1130 hazard_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
1131 hazard = DetectHazard(*image, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, hazard_range,
John Zulauf8e3c3e92021-01-06 11:19:36 -07001132 SyncOrdering::kRaster, offset, extent);
John Zulaufaff20662020-06-01 14:07:58 -06001133 aspect = "depth";
1134 }
1135 if (!hazard.hazard && has_stencil && stencil_op_stores) {
1136 hazard_range.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
1137 hazard = DetectHazard(*image, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, hazard_range,
John Zulauf8e3c3e92021-01-06 11:19:36 -07001138 SyncOrdering::kRaster, offset, extent);
John Zulaufaff20662020-06-01 14:07:58 -06001139 aspect = "stencil";
1140 checked_stencil = true;
1141 }
1142 }
1143
1144 if (hazard.hazard) {
1145 const char *const op_type_string = checked_stencil ? "stencilStoreOp" : "storeOp";
1146 const char *const store_op_string = string_VkAttachmentStoreOp(checked_stencil ? ci.stencilStoreOp : ci.storeOp);
John Zulauffaea0ee2021-01-14 14:01:32 -07001147 skip |= cb_context.GetSyncState().LogError(rp_state.renderPass, string_SyncHazardVUID(hazard.hazard),
1148 "%s: Hazard %s in subpass %" PRIu32 " for attachment %" PRIu32
1149 " %s aspect during store with %s %s. Access info %s",
1150 func_name, string_SyncHazard(hazard.hazard), subpass, i, aspect,
1151 op_type_string, store_op_string, cb_context.FormatUsage(hazard).c_str());
John Zulaufaff20662020-06-01 14:07:58 -06001152 }
1153 }
1154 }
1155 return skip;
1156}
1157
John Zulauffaea0ee2021-01-14 14:01:32 -07001158bool AccessContext::ValidateResolveOperations(const CommandBufferAccessContext &cb_context, const RENDER_PASS_STATE &rp_state,
John Zulaufb027cdb2020-05-21 14:25:22 -06001159 const VkRect2D &render_area,
1160 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views, const char *func_name,
1161 uint32_t subpass) const {
John Zulauffaea0ee2021-01-14 14:01:32 -07001162 ValidateResolveAction validate_action(rp_state.renderPass, subpass, *this, cb_context, func_name);
John Zulauf7635de32020-05-29 17:14:15 -06001163 ResolveOperation(validate_action, rp_state, render_area, attachment_views, subpass);
1164 return validate_action.GetSkip();
John Zulaufb027cdb2020-05-21 14:25:22 -06001165}
1166
John Zulauf3d84f1b2020-03-09 13:33:25 -06001167class HazardDetector {
1168 SyncStageAccessIndex usage_index_;
1169
1170 public:
John Zulauf5f13a792020-03-10 07:31:21 -06001171 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const { return pos->second.DetectHazard(usage_index_); }
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07001172 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, const ResourceUsageTag &start_tag) const {
1173 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001174 }
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001175 explicit HazardDetector(SyncStageAccessIndex usage) : usage_index_(usage) {}
John Zulauf3d84f1b2020-03-09 13:33:25 -06001176};
1177
John Zulauf69133422020-05-20 14:55:53 -06001178class HazardDetectorWithOrdering {
1179 const SyncStageAccessIndex usage_index_;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001180 const SyncOrdering ordering_rule_;
John Zulauf69133422020-05-20 14:55:53 -06001181
1182 public:
1183 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
John Zulauf8e3c3e92021-01-06 11:19:36 -07001184 return pos->second.DetectHazard(usage_index_, ordering_rule_);
John Zulauf69133422020-05-20 14:55:53 -06001185 }
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07001186 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, const ResourceUsageTag &start_tag) const {
1187 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
John Zulauf69133422020-05-20 14:55:53 -06001188 }
John Zulauf8e3c3e92021-01-06 11:19:36 -07001189 HazardDetectorWithOrdering(SyncStageAccessIndex usage, SyncOrdering ordering) : usage_index_(usage), ordering_rule_(ordering) {}
John Zulauf69133422020-05-20 14:55:53 -06001190};
1191
John Zulauf16adfc92020-04-08 10:28:33 -06001192HazardResult AccessContext::DetectHazard(const BUFFER_STATE &buffer, SyncStageAccessIndex usage_index,
John Zulauf355e49b2020-04-24 15:11:15 -06001193 const ResourceAccessRange &range) const {
John Zulauf16adfc92020-04-08 10:28:33 -06001194 if (!SimpleBinding(buffer)) return HazardResult();
John Zulauf150e5332020-12-03 08:52:52 -07001195 const auto base_address = ResourceBaseAddress(buffer);
1196 HazardDetector detector(usage_index);
1197 return DetectHazard(AccessAddressType::kLinear, detector, (range + base_address), DetectOptions::kDetectAll);
John Zulaufe5da6e52020-03-18 15:32:18 -06001198}
1199
John Zulauf69133422020-05-20 14:55:53 -06001200template <typename Detector>
1201HazardResult AccessContext::DetectHazard(Detector &detector, const IMAGE_STATE &image,
1202 const VkImageSubresourceRange &subresource_range, const VkOffset3D &offset,
1203 const VkExtent3D &extent, DetectOptions options) const {
1204 if (!SimpleBinding(image)) return HazardResult();
John Zulauf69133422020-05-20 14:55:53 -06001205 const auto base_address = ResourceBaseAddress(image);
John Zulauf150e5332020-12-03 08:52:52 -07001206 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, offset, extent,
1207 base_address);
1208 const auto address_type = ImageAddressType(image);
John Zulauf69133422020-05-20 14:55:53 -06001209 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf150e5332020-12-03 08:52:52 -07001210 HazardResult hazard = DetectHazard(address_type, detector, *range_gen, options);
John Zulauf69133422020-05-20 14:55:53 -06001211 if (hazard.hazard) return hazard;
1212 }
1213 return HazardResult();
1214}
1215
John Zulauf540266b2020-04-06 18:54:53 -06001216HazardResult AccessContext::DetectHazard(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
1217 const VkImageSubresourceLayers &subresource, const VkOffset3D &offset,
1218 const VkExtent3D &extent) const {
John Zulauf5c5e88d2019-12-26 11:22:02 -07001219 VkImageSubresourceRange subresource_range = {subresource.aspectMask, subresource.mipLevel, 1, subresource.baseArrayLayer,
1220 subresource.layerCount};
John Zulauf1507ee42020-05-18 11:33:09 -06001221 return DetectHazard(image, current_usage, subresource_range, offset, extent);
1222}
1223
1224HazardResult AccessContext::DetectHazard(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
1225 const VkImageSubresourceRange &subresource_range, const VkOffset3D &offset,
1226 const VkExtent3D &extent) const {
John Zulauf69133422020-05-20 14:55:53 -06001227 HazardDetector detector(current_usage);
1228 return DetectHazard(detector, image, subresource_range, offset, extent, DetectOptions::kDetectAll);
1229}
1230
1231HazardResult AccessContext::DetectHazard(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
John Zulauf8e3c3e92021-01-06 11:19:36 -07001232 const VkImageSubresourceRange &subresource_range, SyncOrdering ordering_rule,
John Zulauf69133422020-05-20 14:55:53 -06001233 const VkOffset3D &offset, const VkExtent3D &extent) const {
John Zulauf8e3c3e92021-01-06 11:19:36 -07001234 HazardDetectorWithOrdering detector(current_usage, ordering_rule);
John Zulauf69133422020-05-20 14:55:53 -06001235 return DetectHazard(detector, image, subresource_range, offset, extent, DetectOptions::kDetectAll);
John Zulauf9cb530d2019-09-30 14:14:10 -06001236}
1237
John Zulaufb027cdb2020-05-21 14:25:22 -06001238// Some common code for looking at attachments, if there's anything wrong, we return no hazard, core validation
1239// should have reported the issue regarding an invalid attachment entry
1240HazardResult AccessContext::DetectHazard(const IMAGE_VIEW_STATE *view, SyncStageAccessIndex current_usage,
John Zulauf8e3c3e92021-01-06 11:19:36 -07001241 SyncOrdering ordering_rule, const VkOffset3D &offset, const VkExtent3D &extent,
John Zulaufb027cdb2020-05-21 14:25:22 -06001242 VkImageAspectFlags aspect_mask) const {
1243 if (view != nullptr) {
1244 const IMAGE_STATE *image = view->image_state.get();
1245 if (image != nullptr) {
1246 auto *detect_range = &view->normalized_subresource_range;
1247 VkImageSubresourceRange masked_range;
1248 if (aspect_mask) { // If present and non-zero, restrict the normalized range to aspects present in aspect_mask
1249 masked_range = view->normalized_subresource_range;
1250 masked_range.aspectMask = aspect_mask & masked_range.aspectMask;
1251 detect_range = &masked_range;
1252 }
1253
1254 // NOTE: The range encoding code is not robust to invalid ranges, so we protect it from our change
1255 if (detect_range->aspectMask) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07001256 return DetectHazard(*image, current_usage, *detect_range, ordering_rule, offset, extent);
John Zulaufb027cdb2020-05-21 14:25:22 -06001257 }
1258 }
1259 }
1260 return HazardResult();
1261}
John Zulauf43cc7462020-12-03 12:33:12 -07001262
John Zulauf3d84f1b2020-03-09 13:33:25 -06001263class BarrierHazardDetector {
1264 public:
1265 BarrierHazardDetector(SyncStageAccessIndex usage_index, VkPipelineStageFlags src_exec_scope,
1266 SyncStageAccessFlags src_access_scope)
1267 : usage_index_(usage_index), src_exec_scope_(src_exec_scope), src_access_scope_(src_access_scope) {}
1268
John Zulauf5f13a792020-03-10 07:31:21 -06001269 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
1270 return pos->second.DetectBarrierHazard(usage_index_, src_exec_scope_, src_access_scope_);
John Zulauf0cb5be22020-01-23 12:18:22 -07001271 }
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07001272 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, const ResourceUsageTag &start_tag) const {
John Zulauf3d84f1b2020-03-09 13:33:25 -06001273 // Async barrier hazard detection can use the same path as the usage index is not IsRead, but is IsWrite
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07001274 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001275 }
1276
1277 private:
1278 SyncStageAccessIndex usage_index_;
1279 VkPipelineStageFlags src_exec_scope_;
1280 SyncStageAccessFlags src_access_scope_;
1281};
1282
John Zulauf4a6105a2020-11-17 15:11:05 -07001283class EventBarrierHazardDetector {
1284 public:
1285 EventBarrierHazardDetector(SyncStageAccessIndex usage_index, VkPipelineStageFlags src_exec_scope,
1286 SyncStageAccessFlags src_access_scope, const SyncEventState::ScopeMap &event_scope,
1287 const ResourceUsageTag &scope_tag)
1288 : usage_index_(usage_index),
1289 src_exec_scope_(src_exec_scope),
1290 src_access_scope_(src_access_scope),
1291 event_scope_(event_scope),
1292 scope_pos_(event_scope.cbegin()),
1293 scope_end_(event_scope.cend()),
1294 scope_tag_(scope_tag) {}
1295
1296 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
1297 // TODO NOTE: This is almost the slowest way to do this... need to intelligently walk this...
1298 // Need to find a more efficient sync, since we know pos->first is strictly increasing call to call
1299 // NOTE: "cached_lower_bound_impl" with upgrades could do this.
1300 if (scope_pos_ == scope_end_) return HazardResult();
1301 if (!scope_pos_->first.intersects(pos->first)) {
1302 event_scope_.lower_bound(pos->first);
1303 if ((scope_pos_ == scope_end_) || !scope_pos_->first.intersects(pos->first)) return HazardResult();
1304 }
1305
1306 // Some portion of this pos is in the event_scope, so check for a barrier hazard
1307 return pos->second.DetectBarrierHazard(usage_index_, src_exec_scope_, src_access_scope_, scope_tag_);
1308 }
1309 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, const ResourceUsageTag &start_tag) const {
1310 // Async barrier hazard detection can use the same path as the usage index is not IsRead, but is IsWrite
1311 return pos->second.DetectAsyncHazard(usage_index_, start_tag);
1312 }
1313
1314 private:
1315 SyncStageAccessIndex usage_index_;
1316 VkPipelineStageFlags src_exec_scope_;
1317 SyncStageAccessFlags src_access_scope_;
1318 const SyncEventState::ScopeMap &event_scope_;
1319 SyncEventState::ScopeMap::const_iterator scope_pos_;
1320 SyncEventState::ScopeMap::const_iterator scope_end_;
1321 const ResourceUsageTag &scope_tag_;
1322};
1323
1324HazardResult AccessContext::DetectImageBarrierHazard(const IMAGE_STATE &image, VkPipelineStageFlags src_exec_scope,
1325 const SyncStageAccessFlags &src_access_scope,
1326 const VkImageSubresourceRange &subresource_range,
1327 const SyncEventState &sync_event, DetectOptions options) const {
1328 // It's not particularly DRY to get the address type in this function as well as lower down, but we have to select the
1329 // first access scope map to use, and there's no easy way to plumb it in below.
1330 const auto address_type = ImageAddressType(image);
1331 const auto &event_scope = sync_event.FirstScope(address_type);
1332
1333 EventBarrierHazardDetector detector(SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION, src_exec_scope, src_access_scope,
1334 event_scope, sync_event.first_scope_tag);
1335 VkOffset3D zero_offset = {0, 0, 0};
1336 return DetectHazard(detector, image, subresource_range, zero_offset, image.createInfo.extent, options);
1337}
1338
John Zulauf16adfc92020-04-08 10:28:33 -06001339HazardResult AccessContext::DetectImageBarrierHazard(const IMAGE_STATE &image, VkPipelineStageFlags src_exec_scope,
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07001340 const SyncStageAccessFlags &src_access_scope,
John Zulauf355e49b2020-04-24 15:11:15 -06001341 const VkImageSubresourceRange &subresource_range,
John Zulauf43cc7462020-12-03 12:33:12 -07001342 const DetectOptions options) const {
John Zulauf69133422020-05-20 14:55:53 -06001343 BarrierHazardDetector detector(SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION, src_exec_scope, src_access_scope);
1344 VkOffset3D zero_offset = {0, 0, 0};
1345 return DetectHazard(detector, image, subresource_range, zero_offset, image.createInfo.extent, options);
John Zulauf0cb5be22020-01-23 12:18:22 -07001346}
1347
John Zulauf355e49b2020-04-24 15:11:15 -06001348HazardResult AccessContext::DetectImageBarrierHazard(const IMAGE_STATE &image, VkPipelineStageFlags src_exec_scope,
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07001349 const SyncStageAccessFlags &src_stage_accesses,
John Zulauf355e49b2020-04-24 15:11:15 -06001350 const VkImageMemoryBarrier &barrier) const {
1351 auto subresource_range = NormalizeSubresourceRange(image.createInfo, barrier.subresourceRange);
1352 const auto src_access_scope = SyncStageAccess::AccessScope(src_stage_accesses, barrier.srcAccessMask);
1353 return DetectImageBarrierHazard(image, src_exec_scope, src_access_scope, subresource_range, kDetectAll);
1354}
John Zulaufe7f6a5e2021-01-16 14:31:18 -07001355HazardResult AccessContext::DetectImageBarrierHazard(const SyncImageMemoryBarrier &image_barrier) const {
1356 return DetectImageBarrierHazard(*image_barrier.image.get(), image_barrier.barrier.src_exec_scope,
1357 image_barrier.barrier.src_access_scope, image_barrier.subresource_range, kDetectAll);
1358}
John Zulauf355e49b2020-04-24 15:11:15 -06001359
John Zulauf9cb530d2019-09-30 14:14:10 -06001360template <typename Flags, typename Map>
1361SyncStageAccessFlags AccessScopeImpl(Flags flag_mask, const Map &map) {
1362 SyncStageAccessFlags scope = 0;
1363 for (const auto &bit_scope : map) {
1364 if (flag_mask < bit_scope.first) break;
1365
1366 if (flag_mask & bit_scope.first) {
1367 scope |= bit_scope.second;
1368 }
1369 }
1370 return scope;
1371}
1372
1373SyncStageAccessFlags SyncStageAccess::AccessScopeByStage(VkPipelineStageFlags stages) {
1374 return AccessScopeImpl(stages, syncStageAccessMaskByStageBit);
1375}
1376
1377SyncStageAccessFlags SyncStageAccess::AccessScopeByAccess(VkAccessFlags accesses) {
1378 return AccessScopeImpl(accesses, syncStageAccessMaskByAccessBit);
1379}
1380
1381// Getting from stage mask and access mask to stage/acess masks is something we need to be good at...
1382SyncStageAccessFlags SyncStageAccess::AccessScope(VkPipelineStageFlags stages, VkAccessFlags accesses) {
John Zulauf5f13a792020-03-10 07:31:21 -06001383 // The access scope is the intersection of all stage/access types possible for the enabled stages and the enables
1384 // accesses (after doing a couple factoring of common terms the union of stage/access intersections is the intersections
1385 // of the union of all stage/access types for all the stages and the same unions for the access mask...
John Zulauf9cb530d2019-09-30 14:14:10 -06001386 return AccessScopeByStage(stages) & AccessScopeByAccess(accesses);
1387}
1388
1389template <typename Action>
John Zulauf5c5e88d2019-12-26 11:22:02 -07001390void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const ResourceAccessRange &range, const Action &action) {
John Zulauf7635de32020-05-29 17:14:15 -06001391 // TODO: Optimization for operations that do a pure overwrite (i.e. WRITE usages which rewrite the state, vs READ usages
1392 // that do incrementalupdates
John Zulauf4a6105a2020-11-17 15:11:05 -07001393 assert(accesses);
John Zulauf9cb530d2019-09-30 14:14:10 -06001394 auto pos = accesses->lower_bound(range);
1395 if (pos == accesses->end() || !pos->first.intersects(range)) {
1396 // The range is empty, fill it with a default value.
1397 pos = action.Infill(accesses, pos, range);
1398 } else if (range.begin < pos->first.begin) {
1399 // Leading empty space, infill
John Zulauf5c5e88d2019-12-26 11:22:02 -07001400 pos = action.Infill(accesses, pos, ResourceAccessRange(range.begin, pos->first.begin));
John Zulauf9cb530d2019-09-30 14:14:10 -06001401 } else if (pos->first.begin < range.begin) {
1402 // Trim the beginning if needed
1403 pos = accesses->split(pos, range.begin, sparse_container::split_op_keep_both());
1404 ++pos;
1405 }
1406
1407 const auto the_end = accesses->end();
1408 while ((pos != the_end) && pos->first.intersects(range)) {
1409 if (pos->first.end > range.end) {
1410 pos = accesses->split(pos, range.end, sparse_container::split_op_keep_both());
1411 }
1412
1413 pos = action(accesses, pos);
1414 if (pos == the_end) break;
1415
1416 auto next = pos;
1417 ++next;
1418 if ((pos->first.end < range.end) && (next != the_end) && !next->first.is_subsequent_to(pos->first)) {
1419 // Need to infill if next is disjoint
1420 VkDeviceSize limit = (next == the_end) ? range.end : std::min(range.end, next->first.begin);
John Zulauf5c5e88d2019-12-26 11:22:02 -07001421 ResourceAccessRange new_range(pos->first.end, limit);
John Zulauf9cb530d2019-09-30 14:14:10 -06001422 next = action.Infill(accesses, next, new_range);
1423 }
1424 pos = next;
1425 }
1426}
John Zulauf4a6105a2020-11-17 15:11:05 -07001427template <typename Action, typename RangeGen>
1428void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const Action &action, RangeGen *range_gen_arg) {
1429 assert(range_gen_arg);
1430 auto &range_gen = *range_gen_arg; // Non-const references must be * by style requirement but deref-ing * iterator is a pain
1431 for (; range_gen->non_empty(); ++range_gen) {
1432 UpdateMemoryAccessState(accesses, *range_gen, action);
1433 }
1434}
John Zulauf9cb530d2019-09-30 14:14:10 -06001435
1436struct UpdateMemoryAccessStateFunctor {
John Zulauf5c5e88d2019-12-26 11:22:02 -07001437 using Iterator = ResourceAccessRangeMap::iterator;
1438 Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const {
John Zulauf5f13a792020-03-10 07:31:21 -06001439 // this is only called on gaps, and never returns a gap.
1440 ResourceAccessState default_state;
John Zulauf16adfc92020-04-08 10:28:33 -06001441 context.ResolvePreviousAccess(type, range, accesses, &default_state);
John Zulauf5f13a792020-03-10 07:31:21 -06001442 return accesses->lower_bound(range);
John Zulauf9cb530d2019-09-30 14:14:10 -06001443 }
John Zulauf5f13a792020-03-10 07:31:21 -06001444
John Zulauf5c5e88d2019-12-26 11:22:02 -07001445 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -06001446 auto &access_state = pos->second;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001447 access_state.Update(usage, ordering_rule, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06001448 return pos;
1449 }
1450
John Zulauf43cc7462020-12-03 12:33:12 -07001451 UpdateMemoryAccessStateFunctor(AccessAddressType type_, const AccessContext &context_, SyncStageAccessIndex usage_,
John Zulauf8e3c3e92021-01-06 11:19:36 -07001452 SyncOrdering ordering_rule_, const ResourceUsageTag &tag_)
1453 : type(type_), context(context_), usage(usage_), ordering_rule(ordering_rule_), tag(tag_) {}
John Zulauf43cc7462020-12-03 12:33:12 -07001454 const AccessAddressType type;
John Zulauf540266b2020-04-06 18:54:53 -06001455 const AccessContext &context;
John Zulauf16adfc92020-04-08 10:28:33 -06001456 const SyncStageAccessIndex usage;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001457 const SyncOrdering ordering_rule;
John Zulauf9cb530d2019-09-30 14:14:10 -06001458 const ResourceUsageTag &tag;
1459};
1460
John Zulauf4a6105a2020-11-17 15:11:05 -07001461// The barrier operation for pipeline and subpass dependencies`
John Zulauf1e331ec2020-12-04 18:29:38 -07001462struct PipelineBarrierOp {
1463 SyncBarrier barrier;
1464 bool layout_transition;
1465 PipelineBarrierOp(const SyncBarrier &barrier_, bool layout_transition_)
1466 : barrier(barrier_), layout_transition(layout_transition_) {}
1467 PipelineBarrierOp() = default;
1468 void operator()(ResourceAccessState *access_state) const { access_state->ApplyBarrier(barrier, layout_transition); }
1469};
John Zulauf4a6105a2020-11-17 15:11:05 -07001470// The barrier operation for wait events
1471struct WaitEventBarrierOp {
1472 const ResourceUsageTag *scope_tag;
1473 SyncBarrier barrier;
1474 bool layout_transition;
1475 WaitEventBarrierOp(const ResourceUsageTag &scope_tag_, const SyncBarrier &barrier_, bool layout_transition_)
1476 : scope_tag(&scope_tag_), barrier(barrier_), layout_transition(layout_transition_) {}
1477 WaitEventBarrierOp() = default;
1478 void operator()(ResourceAccessState *access_state) const {
1479 assert(scope_tag); // Not valid to have a non-scope op executed, default construct included for std::vector support
1480 access_state->ApplyBarrier(*scope_tag, barrier, layout_transition);
1481 }
1482};
John Zulauf1e331ec2020-12-04 18:29:38 -07001483
John Zulauf4a6105a2020-11-17 15:11:05 -07001484// This functor applies a collection of barriers, updating the "pending state" in each touched memory range, and optionally
1485// resolves the pending state. Suitable for processing Global memory barriers, or Subpass Barriers when the "final" barrier
1486// of a collection is known/present.
John Zulauf1e331ec2020-12-04 18:29:38 -07001487template <typename BarrierOp>
John Zulauf89311b42020-09-29 16:28:47 -06001488class ApplyBarrierOpsFunctor {
1489 public:
John Zulauf5c5e88d2019-12-26 11:22:02 -07001490 using Iterator = ResourceAccessRangeMap::iterator;
1491 inline Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const { return pos; }
John Zulauf9cb530d2019-09-30 14:14:10 -06001492
John Zulauf5c5e88d2019-12-26 11:22:02 -07001493 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -06001494 auto &access_state = pos->second;
John Zulauf1e331ec2020-12-04 18:29:38 -07001495 for (const auto &op : barrier_ops_) {
1496 op(&access_state);
John Zulauf89311b42020-09-29 16:28:47 -06001497 }
John Zulauf9cb530d2019-09-30 14:14:10 -06001498
John Zulauf89311b42020-09-29 16:28:47 -06001499 if (resolve_) {
1500 // If this is the last (or only) batch, we can do the pending resolve as the last step in this operation to avoid
1501 // another walk
1502 access_state.ApplyPendingBarriers(tag_);
John Zulauf9cb530d2019-09-30 14:14:10 -06001503 }
1504 return pos;
1505 }
1506
John Zulauf89311b42020-09-29 16:28:47 -06001507 // A valid tag is required IFF layout_transition is true, as transitions are write ops
John Zulauf1e331ec2020-12-04 18:29:38 -07001508 ApplyBarrierOpsFunctor(bool resolve, const std::vector<BarrierOp> &barrier_ops, const ResourceUsageTag &tag)
1509 : resolve_(resolve), barrier_ops_(barrier_ops), tag_(tag) {}
John Zulauf89311b42020-09-29 16:28:47 -06001510
1511 private:
1512 bool resolve_;
John Zulauf1e331ec2020-12-04 18:29:38 -07001513 const std::vector<BarrierOp> &barrier_ops_;
1514 const ResourceUsageTag &tag_;
1515};
1516
John Zulauf4a6105a2020-11-17 15:11:05 -07001517// This functor applies a single barrier, updating the "pending state" in each touched memory range, but does not
1518// resolve the pendinging state. Suitable for processing Image and Buffer barriers from PipelineBarriers or Events
1519template <typename BarrierOp>
1520class ApplyBarrierFunctor {
1521 public:
1522 using Iterator = ResourceAccessRangeMap::iterator;
1523 inline Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const { return pos; }
1524
1525 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
1526 auto &access_state = pos->second;
1527 barrier_op_(&access_state);
1528 return pos;
1529 }
1530
1531 ApplyBarrierFunctor(const BarrierOp &barrier_op) : barrier_op_(barrier_op) {}
1532
1533 private:
1534 const BarrierOp barrier_op_;
1535};
1536
John Zulauf1e331ec2020-12-04 18:29:38 -07001537// This functor resolves the pendinging state.
1538class ResolvePendingBarrierFunctor {
1539 public:
1540 using Iterator = ResourceAccessRangeMap::iterator;
1541 inline Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const { return pos; }
1542
1543 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
1544 auto &access_state = pos->second;
1545 access_state.ApplyPendingBarriers(tag_);
1546 return pos;
1547 }
1548
1549 ResolvePendingBarrierFunctor(const ResourceUsageTag &tag) : tag_(tag) {}
1550
1551 private:
John Zulauf89311b42020-09-29 16:28:47 -06001552 const ResourceUsageTag &tag_;
John Zulauf9cb530d2019-09-30 14:14:10 -06001553};
1554
John Zulauf8e3c3e92021-01-06 11:19:36 -07001555void AccessContext::UpdateAccessState(AccessAddressType type, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
1556 const ResourceAccessRange &range, const ResourceUsageTag &tag) {
1557 UpdateMemoryAccessStateFunctor action(type, *this, current_usage, ordering_rule, tag);
John Zulauf16adfc92020-04-08 10:28:33 -06001558 UpdateMemoryAccessState(&GetAccessStateMap(type), range, action);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001559}
1560
John Zulauf8e3c3e92021-01-06 11:19:36 -07001561void AccessContext::UpdateAccessState(const BUFFER_STATE &buffer, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf355e49b2020-04-24 15:11:15 -06001562 const ResourceAccessRange &range, const ResourceUsageTag &tag) {
John Zulauf16adfc92020-04-08 10:28:33 -06001563 if (!SimpleBinding(buffer)) return;
1564 const auto base_address = ResourceBaseAddress(buffer);
John Zulauf8e3c3e92021-01-06 11:19:36 -07001565 UpdateAccessState(AccessAddressType::kLinear, current_usage, ordering_rule, range + base_address, tag);
John Zulauf16adfc92020-04-08 10:28:33 -06001566}
John Zulauf355e49b2020-04-24 15:11:15 -06001567
John Zulauf8e3c3e92021-01-06 11:19:36 -07001568void AccessContext::UpdateAccessState(const IMAGE_STATE &image, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf355e49b2020-04-24 15:11:15 -06001569 const VkImageSubresourceRange &subresource_range, const VkOffset3D &offset,
John Zulauf540266b2020-04-06 18:54:53 -06001570 const VkExtent3D &extent, const ResourceUsageTag &tag) {
John Zulauf16adfc92020-04-08 10:28:33 -06001571 if (!SimpleBinding(image)) return;
John Zulauf16adfc92020-04-08 10:28:33 -06001572 const auto base_address = ResourceBaseAddress(image);
John Zulauf150e5332020-12-03 08:52:52 -07001573 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, offset, extent,
1574 base_address);
1575 const auto address_type = ImageAddressType(image);
John Zulauf8e3c3e92021-01-06 11:19:36 -07001576 UpdateMemoryAccessStateFunctor action(address_type, *this, current_usage, ordering_rule, tag);
John Zulauf5f13a792020-03-10 07:31:21 -06001577 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf150e5332020-12-03 08:52:52 -07001578 UpdateMemoryAccessState(&GetAccessStateMap(address_type), *range_gen, action);
John Zulauf5f13a792020-03-10 07:31:21 -06001579 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06001580}
John Zulauf8e3c3e92021-01-06 11:19:36 -07001581void AccessContext::UpdateAccessState(const IMAGE_VIEW_STATE *view, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
1582 const VkOffset3D &offset, const VkExtent3D &extent, VkImageAspectFlags aspect_mask,
1583 const ResourceUsageTag &tag) {
John Zulauf7635de32020-05-29 17:14:15 -06001584 if (view != nullptr) {
1585 const IMAGE_STATE *image = view->image_state.get();
1586 if (image != nullptr) {
1587 auto *update_range = &view->normalized_subresource_range;
1588 VkImageSubresourceRange masked_range;
1589 if (aspect_mask) { // If present and non-zero, restrict the normalized range to aspects present in aspect_mask
1590 masked_range = view->normalized_subresource_range;
1591 masked_range.aspectMask = aspect_mask & masked_range.aspectMask;
1592 update_range = &masked_range;
1593 }
John Zulauf8e3c3e92021-01-06 11:19:36 -07001594 UpdateAccessState(*image, current_usage, ordering_rule, *update_range, offset, extent, tag);
John Zulauf7635de32020-05-29 17:14:15 -06001595 }
1596 }
1597}
John Zulauf3d84f1b2020-03-09 13:33:25 -06001598
John Zulauf8e3c3e92021-01-06 11:19:36 -07001599void AccessContext::UpdateAccessState(const IMAGE_STATE &image, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
John Zulauf355e49b2020-04-24 15:11:15 -06001600 const VkImageSubresourceLayers &subresource, const VkOffset3D &offset,
1601 const VkExtent3D &extent, const ResourceUsageTag &tag) {
John Zulauf355e49b2020-04-24 15:11:15 -06001602 VkImageSubresourceRange subresource_range = {subresource.aspectMask, subresource.mipLevel, 1, subresource.baseArrayLayer,
1603 subresource.layerCount};
John Zulauf8e3c3e92021-01-06 11:19:36 -07001604 UpdateAccessState(image, current_usage, ordering_rule, subresource_range, offset, extent, tag);
John Zulauf355e49b2020-04-24 15:11:15 -06001605}
1606
John Zulauf540266b2020-04-06 18:54:53 -06001607template <typename Action>
John Zulauf89311b42020-09-29 16:28:47 -06001608void AccessContext::UpdateResourceAccess(const BUFFER_STATE &buffer, const ResourceAccessRange &range, const Action action) {
John Zulauf16adfc92020-04-08 10:28:33 -06001609 if (!SimpleBinding(buffer)) return;
1610 const auto base_address = ResourceBaseAddress(buffer);
John Zulauf43cc7462020-12-03 12:33:12 -07001611 UpdateMemoryAccessState(&GetAccessStateMap(AccessAddressType::kLinear), (range + base_address), action);
John Zulauf540266b2020-04-06 18:54:53 -06001612}
1613
1614template <typename Action>
John Zulauf89311b42020-09-29 16:28:47 -06001615void AccessContext::UpdateResourceAccess(const IMAGE_STATE &image, const VkImageSubresourceRange &subresource_range,
1616 const Action action) {
John Zulauf16adfc92020-04-08 10:28:33 -06001617 if (!SimpleBinding(image)) return;
1618 const auto address_type = ImageAddressType(image);
1619 auto *accesses = &GetAccessStateMap(address_type);
John Zulauf540266b2020-04-06 18:54:53 -06001620
John Zulauf16adfc92020-04-08 10:28:33 -06001621 const auto base_address = ResourceBaseAddress(image);
John Zulauf150e5332020-12-03 08:52:52 -07001622 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, {0, 0, 0},
1623 image.createInfo.extent, base_address);
1624
John Zulauf540266b2020-04-06 18:54:53 -06001625 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf150e5332020-12-03 08:52:52 -07001626 UpdateMemoryAccessState(accesses, *range_gen, action);
John Zulauf540266b2020-04-06 18:54:53 -06001627 }
1628}
1629
John Zulauf7635de32020-05-29 17:14:15 -06001630void AccessContext::UpdateAttachmentResolveAccess(const RENDER_PASS_STATE &rp_state, const VkRect2D &render_area,
1631 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views, uint32_t subpass,
1632 const ResourceUsageTag &tag) {
1633 UpdateStateResolveAction update(*this, tag);
1634 ResolveOperation(update, rp_state, render_area, attachment_views, subpass);
1635}
1636
John Zulaufaff20662020-06-01 14:07:58 -06001637void AccessContext::UpdateAttachmentStoreAccess(const RENDER_PASS_STATE &rp_state, const VkRect2D &render_area,
1638 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views, uint32_t subpass,
1639 const ResourceUsageTag &tag) {
1640 const auto *attachment_ci = rp_state.createInfo.pAttachments;
1641 VkExtent3D extent = CastTo3D(render_area.extent);
1642 VkOffset3D offset = CastTo3D(render_area.offset);
1643
1644 for (uint32_t i = 0; i < rp_state.createInfo.attachmentCount; i++) {
1645 if (rp_state.attachment_last_subpass[i] == subpass) {
1646 if (attachment_views[i] == nullptr) continue; // UNUSED
1647 const auto &view = *attachment_views[i];
1648 const IMAGE_STATE *image = view.image_state.get();
1649 if (image == nullptr) continue;
1650
1651 const auto &ci = attachment_ci[i];
1652 const bool has_depth = FormatHasDepth(ci.format);
1653 const bool has_stencil = FormatHasStencil(ci.format);
1654 const bool is_color = !(has_depth || has_stencil);
1655 const bool store_op_stores = ci.storeOp != VK_ATTACHMENT_STORE_OP_NONE_QCOM;
1656
1657 if (is_color && store_op_stores) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07001658 UpdateAccessState(*image, SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kRaster,
1659 view.normalized_subresource_range, offset, extent, tag);
John Zulaufaff20662020-06-01 14:07:58 -06001660 } else {
1661 auto update_range = view.normalized_subresource_range;
1662 if (has_depth && store_op_stores) {
1663 update_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001664 UpdateAccessState(*image, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, SyncOrdering::kRaster,
1665 update_range, offset, extent, tag);
John Zulaufaff20662020-06-01 14:07:58 -06001666 }
1667 const bool stencil_op_stores = ci.stencilStoreOp != VK_ATTACHMENT_STORE_OP_NONE_QCOM;
1668 if (has_stencil && stencil_op_stores) {
1669 update_range.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
John Zulauf8e3c3e92021-01-06 11:19:36 -07001670 UpdateAccessState(*image, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, SyncOrdering::kRaster,
1671 update_range, offset, extent, tag);
John Zulaufaff20662020-06-01 14:07:58 -06001672 }
1673 }
1674 }
1675 }
1676}
1677
John Zulauf540266b2020-04-06 18:54:53 -06001678template <typename Action>
1679void AccessContext::ApplyGlobalBarriers(const Action &barrier_action) {
1680 // Note: Barriers do *not* cross context boundaries, applying to accessess within.... (at least for renderpass subpasses)
John Zulauf16adfc92020-04-08 10:28:33 -06001681 for (const auto address_type : kAddressTypes) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001682 UpdateMemoryAccessState(&GetAccessStateMap(address_type), kFullRange, barrier_action);
John Zulauf540266b2020-04-06 18:54:53 -06001683 }
1684}
1685
1686void AccessContext::ResolveChildContexts(const std::vector<AccessContext> &contexts) {
John Zulauf540266b2020-04-06 18:54:53 -06001687 for (uint32_t subpass_index = 0; subpass_index < contexts.size(); subpass_index++) {
1688 auto &context = contexts[subpass_index];
John Zulaufb02c1eb2020-10-06 16:33:36 -06001689 ApplyTrackbackBarriersAction barrier_action(context.GetDstExternalTrackBack().barriers);
John Zulauf16adfc92020-04-08 10:28:33 -06001690 for (const auto address_type : kAddressTypes) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001691 context.ResolveAccessRange(address_type, kFullRange, barrier_action, &GetAccessStateMap(address_type), nullptr, false);
John Zulauf540266b2020-04-06 18:54:53 -06001692 }
1693 }
1694}
1695
John Zulauf355e49b2020-04-24 15:11:15 -06001696// Suitable only for *subpass* access contexts
John Zulauf7635de32020-05-29 17:14:15 -06001697HazardResult AccessContext::DetectSubpassTransitionHazard(const TrackBack &track_back, const IMAGE_VIEW_STATE *attach_view) const {
John Zulauf355e49b2020-04-24 15:11:15 -06001698 if (!attach_view) return HazardResult();
1699 const auto image_state = attach_view->image_state.get();
1700 if (!image_state) return HazardResult();
1701
John Zulauf355e49b2020-04-24 15:11:15 -06001702 // We should never ask for a transition from a context we don't have
John Zulauf7635de32020-05-29 17:14:15 -06001703 assert(track_back.context);
John Zulauf355e49b2020-04-24 15:11:15 -06001704
1705 // Do the detection against the specific prior context independent of other contexts. (Synchronous only)
John Zulaufa0a98292020-09-18 09:30:10 -06001706 // Hazard detection for the transition can be against the merged of the barriers (it only uses src_...)
1707 const auto merged_barrier = MergeBarriers(track_back.barriers);
1708 HazardResult hazard =
1709 track_back.context->DetectImageBarrierHazard(*image_state, merged_barrier.src_exec_scope, merged_barrier.src_access_scope,
1710 attach_view->normalized_subresource_range, kDetectPrevious);
John Zulauf355e49b2020-04-24 15:11:15 -06001711 if (!hazard.hazard) {
1712 // The Async hazard check is against the current context's async set.
John Zulaufa0a98292020-09-18 09:30:10 -06001713 hazard = DetectImageBarrierHazard(*image_state, merged_barrier.src_exec_scope, merged_barrier.src_access_scope,
John Zulauf355e49b2020-04-24 15:11:15 -06001714 attach_view->normalized_subresource_range, kDetectAsync);
1715 }
John Zulaufa0a98292020-09-18 09:30:10 -06001716
John Zulauf355e49b2020-04-24 15:11:15 -06001717 return hazard;
1718}
1719
John Zulaufb02c1eb2020-10-06 16:33:36 -06001720void AccessContext::RecordLayoutTransitions(const RENDER_PASS_STATE &rp_state, uint32_t subpass,
1721 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views,
1722 const ResourceUsageTag &tag) {
1723 const auto &transitions = rp_state.subpass_transitions[subpass];
John Zulauf646cc292020-10-23 09:16:45 -06001724 const ResourceAccessState empty_infill;
John Zulaufb02c1eb2020-10-06 16:33:36 -06001725 for (const auto &transition : transitions) {
1726 const auto prev_pass = transition.prev_pass;
1727 const auto attachment_view = attachment_views[transition.attachment];
1728 if (!attachment_view) continue;
1729 const auto *image = attachment_view->image_state.get();
1730 if (!image) continue;
1731 if (!SimpleBinding(*image)) continue;
1732
1733 const auto *trackback = GetTrackBackFromSubpass(prev_pass);
1734 assert(trackback);
1735
1736 // Import the attachments into the current context
1737 const auto *prev_context = trackback->context;
1738 assert(prev_context);
1739 const auto address_type = ImageAddressType(*image);
1740 auto &target_map = GetAccessStateMap(address_type);
1741 ApplySubpassTransitionBarriersAction barrier_action(trackback->barriers);
1742 prev_context->ResolveAccessRange(*image, attachment_view->normalized_subresource_range, barrier_action, address_type,
John Zulauf646cc292020-10-23 09:16:45 -06001743 &target_map, &empty_infill);
John Zulaufb02c1eb2020-10-06 16:33:36 -06001744 }
1745
John Zulauf86356ca2020-10-19 11:46:41 -06001746 // If there were no transitions skip this global map walk
1747 if (transitions.size()) {
John Zulauf1e331ec2020-12-04 18:29:38 -07001748 ResolvePendingBarrierFunctor apply_pending_action(tag);
John Zulauf86356ca2020-10-19 11:46:41 -06001749 ApplyGlobalBarriers(apply_pending_action);
1750 }
John Zulaufb02c1eb2020-10-06 16:33:36 -06001751}
Jeremy Gebben9893daf2021-01-04 10:40:50 -07001752
1753void CommandBufferAccessContext::ApplyBufferBarriers(const SyncEventState &sync_event, const SyncExecScope &dst,
1754 uint32_t barrier_count, const VkBufferMemoryBarrier *barriers) {
John Zulauf4a6105a2020-11-17 15:11:05 -07001755 const auto &scope_tag = sync_event.first_scope_tag;
1756 auto *access_context = GetCurrentAccessContext();
1757 const auto address_type = AccessAddressType::kLinear;
1758 for (uint32_t index = 0; index < barrier_count; index++) {
1759 auto barrier = barriers[index]; // barrier is a copy
1760 const auto *buffer = sync_state_->Get<BUFFER_STATE>(barrier.buffer);
1761 if (!buffer) continue;
1762 const auto base_address = ResourceBaseAddress(*buffer);
1763 barrier.size = GetBufferWholeSize(*buffer, barrier.offset, barrier.size);
1764 const ResourceAccessRange range = MakeRange(barrier) + base_address;
Jeremy Gebben9893daf2021-01-04 10:40:50 -07001765 const SyncBarrier sync_barrier(barrier, sync_event.scope, dst);
John Zulauf4a6105a2020-11-17 15:11:05 -07001766 const ApplyBarrierFunctor<WaitEventBarrierOp> barrier_action({scope_tag, sync_barrier, false /* layout_transition */});
1767 EventSimpleRangeGenerator filtered_range_gen(sync_event.FirstScope(address_type), range);
1768 UpdateMemoryAccessState(&(access_context->GetAccessStateMap(address_type)), barrier_action, &filtered_range_gen);
1769 }
1770}
1771
Jeremy Gebben9893daf2021-01-04 10:40:50 -07001772void CommandBufferAccessContext::ApplyGlobalBarriers(SyncEventState &sync_event, const SyncExecScope &dst,
1773 uint32_t memory_barrier_count, const VkMemoryBarrier *pMemoryBarriers,
1774 const ResourceUsageTag &tag) {
John Zulauf4a6105a2020-11-17 15:11:05 -07001775 std::vector<WaitEventBarrierOp> barrier_ops;
1776 barrier_ops.reserve(std::min<uint32_t>(memory_barrier_count, 1));
1777 const auto &scope_tag = sync_event.first_scope_tag;
1778 auto *access_context = GetCurrentAccessContext();
1779 for (uint32_t barrier_index = 0; barrier_index < memory_barrier_count; barrier_index++) {
1780 const auto &barrier = pMemoryBarriers[barrier_index];
Jeremy Gebben9893daf2021-01-04 10:40:50 -07001781 SyncBarrier sync_barrier(barrier, sync_event.scope, dst);
John Zulauf4a6105a2020-11-17 15:11:05 -07001782 barrier_ops.emplace_back(scope_tag, sync_barrier, false);
1783 }
1784 if (0 == memory_barrier_count) {
1785 // If there are no global memory barriers, force an exec barrier
Jeremy Gebben9893daf2021-01-04 10:40:50 -07001786 barrier_ops.emplace_back(scope_tag, SyncBarrier(sync_event.scope, dst), false);
John Zulauf4a6105a2020-11-17 15:11:05 -07001787 }
1788 ApplyBarrierOpsFunctor<WaitEventBarrierOp> barriers_functor(false /* don't resolve */, barrier_ops, tag);
1789 for (const auto address_type : kAddressTypes) {
1790 EventSimpleRangeGenerator filtered_range_gen(sync_event.FirstScope(address_type), kFullRange);
1791 UpdateMemoryAccessState(&(access_context->GetAccessStateMap(address_type)), barriers_functor, &filtered_range_gen);
1792 }
1793
1794 // Apply the global barrier to the event itself (for race condition tracking)
1795 // Events don't happen at a stage, so we need to store the unexpanded ALL_COMMANDS if set for inter-event-calls
Jeremy Gebben9893daf2021-01-04 10:40:50 -07001796 sync_event.barriers = dst.mask_param & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
1797 sync_event.barriers |= dst.exec_scope;
John Zulauf4a6105a2020-11-17 15:11:05 -07001798}
1799
Jeremy Gebben9893daf2021-01-04 10:40:50 -07001800void CommandBufferAccessContext::ApplyGlobalBarriersToEvents(const SyncExecScope &src, const SyncExecScope &dst) {
1801 const bool all_commands_bit = 0 != (src.mask_param & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
John Zulauf4a6105a2020-11-17 15:11:05 -07001802 for (auto &event_pair : event_state_) {
1803 assert(event_pair.second); // Shouldn't be storing empty
1804 auto &sync_event = *event_pair.second;
1805 // Events don't happen at a stage, so we need to check and store the unexpanded ALL_COMMANDS if set for inter-event-calls
Jeremy Gebben9893daf2021-01-04 10:40:50 -07001806 if ((sync_event.barriers & src.exec_scope) || all_commands_bit) {
1807 sync_event.barriers |= dst.exec_scope;
1808 sync_event.barriers |= dst.mask_param & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
John Zulauf4a6105a2020-11-17 15:11:05 -07001809 }
1810 }
1811}
1812
Jeremy Gebben9893daf2021-01-04 10:40:50 -07001813void CommandBufferAccessContext::ApplyImageBarriers(const SyncEventState &sync_event, const SyncExecScope &dst,
1814 uint32_t barrier_count, const VkImageMemoryBarrier *barriers,
1815 const ResourceUsageTag &tag) {
John Zulauf4a6105a2020-11-17 15:11:05 -07001816 const auto &scope_tag = sync_event.first_scope_tag;
1817 auto *access_context = GetCurrentAccessContext();
1818 for (uint32_t index = 0; index < barrier_count; index++) {
1819 const auto &barrier = barriers[index];
1820 const auto *image = sync_state_->Get<IMAGE_STATE>(barrier.image);
1821 if (!image) continue;
1822 auto subresource_range = NormalizeSubresourceRange(image->createInfo, barrier.subresourceRange);
1823 bool layout_transition = barrier.oldLayout != barrier.newLayout;
Jeremy Gebben9893daf2021-01-04 10:40:50 -07001824 const SyncBarrier sync_barrier(barrier, sync_event.scope, dst);
John Zulauf4a6105a2020-11-17 15:11:05 -07001825 const ApplyBarrierFunctor<WaitEventBarrierOp> barrier_action({scope_tag, sync_barrier, layout_transition});
1826 const auto base_address = ResourceBaseAddress(*image);
1827 subresource_adapter::ImageRangeGenerator range_gen(*image->fragment_encoder.get(), subresource_range, {0, 0, 0},
1828 image->createInfo.extent, base_address);
1829 const auto address_type = AccessContext::ImageAddressType(*image);
1830 EventImageRangeGenerator filtered_range_gen(sync_event.FirstScope(address_type), range_gen);
1831 UpdateMemoryAccessState(&(access_context->GetAccessStateMap(address_type)), barrier_action, &filtered_range_gen);
1832 }
1833}
John Zulaufb02c1eb2020-10-06 16:33:36 -06001834
John Zulauf355e49b2020-04-24 15:11:15 -06001835// Class CommandBufferAccessContext: Keep track of resource access state information for a specific command buffer
1836bool CommandBufferAccessContext::ValidateBeginRenderPass(const RENDER_PASS_STATE &rp_state,
1837
1838 const VkRenderPassBeginInfo *pRenderPassBegin,
Mike Schuchardt2df08912020-12-15 16:28:09 -08001839 const VkSubpassBeginInfo *pSubpassBeginInfo, const char *func_name) const {
John Zulauf355e49b2020-04-24 15:11:15 -06001840 // Check if any of the layout transitions are hazardous.... but we don't have the renderpass context to work with, so we
1841 bool skip = false;
John Zulauf355e49b2020-04-24 15:11:15 -06001842
John Zulauf86356ca2020-10-19 11:46:41 -06001843 assert(pRenderPassBegin);
1844 if (nullptr == pRenderPassBegin) return skip;
John Zulauf355e49b2020-04-24 15:11:15 -06001845
John Zulauf86356ca2020-10-19 11:46:41 -06001846 const uint32_t subpass = 0;
John Zulauf355e49b2020-04-24 15:11:15 -06001847
John Zulauf86356ca2020-10-19 11:46:41 -06001848 // Construct the state we can use to validate against... (since validation is const and RecordCmdBeginRenderPass
1849 // hasn't happened yet)
1850 const std::vector<AccessContext> empty_context_vector;
1851 AccessContext temp_context(subpass, queue_flags_, rp_state.subpass_dependencies, empty_context_vector,
1852 const_cast<AccessContext *>(&cb_access_context_));
John Zulauf355e49b2020-04-24 15:11:15 -06001853
John Zulauf86356ca2020-10-19 11:46:41 -06001854 // Create a view list
1855 const auto fb_state = sync_state_->Get<FRAMEBUFFER_STATE>(pRenderPassBegin->framebuffer);
1856 assert(fb_state);
1857 if (nullptr == fb_state) return skip;
1858 // NOTE: Must not use COMMAND_BUFFER_STATE variant of this as RecordCmdBeginRenderPass hasn't run and thus
1859 // the activeRenderPass.* fields haven't been set.
1860 const auto views = sync_state_->GetAttachmentViews(*pRenderPassBegin, *fb_state);
1861
1862 // Validate transitions
John Zulauffaea0ee2021-01-14 14:01:32 -07001863 skip |= temp_context.ValidateLayoutTransitions(*this, rp_state, pRenderPassBegin->renderArea, subpass, views, func_name);
John Zulauf86356ca2020-10-19 11:46:41 -06001864
1865 // Validate load operations if there were no layout transition hazards
1866 if (!skip) {
1867 temp_context.RecordLayoutTransitions(rp_state, subpass, views, kCurrentCommandTag);
John Zulauffaea0ee2021-01-14 14:01:32 -07001868 skip |= temp_context.ValidateLoadOperation(*this, rp_state, pRenderPassBegin->renderArea, subpass, views, func_name);
John Zulauf355e49b2020-04-24 15:11:15 -06001869 }
John Zulauf86356ca2020-10-19 11:46:41 -06001870
John Zulauf355e49b2020-04-24 15:11:15 -06001871 return skip;
1872}
1873
locke-lunarg61870c22020-06-09 14:51:50 -06001874bool CommandBufferAccessContext::ValidateDispatchDrawDescriptorSet(VkPipelineBindPoint pipelineBindPoint,
1875 const char *func_name) const {
1876 bool skip = false;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001877 const PIPELINE_STATE *pipe = nullptr;
locke-lunarg61870c22020-06-09 14:51:50 -06001878 const std::vector<LAST_BOUND_STATE::PER_SET> *per_sets = nullptr;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001879 GetCurrentPipelineAndDesriptorSetsFromCommandBuffer(*cb_state_.get(), pipelineBindPoint, &pipe, &per_sets);
1880 if (!pipe || !per_sets) {
locke-lunarg61870c22020-06-09 14:51:50 -06001881 return skip;
1882 }
1883
1884 using DescriptorClass = cvdescriptorset::DescriptorClass;
1885 using BufferDescriptor = cvdescriptorset::BufferDescriptor;
1886 using ImageDescriptor = cvdescriptorset::ImageDescriptor;
1887 using ImageSamplerDescriptor = cvdescriptorset::ImageSamplerDescriptor;
1888 using TexelDescriptor = cvdescriptorset::TexelDescriptor;
1889
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001890 for (const auto &stage_state : pipe->stage_state) {
1891 if (stage_state.stage_flag == VK_SHADER_STAGE_FRAGMENT_BIT && pipe->graphicsPipelineCI.pRasterizationState &&
1892 pipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) {
locke-lunarge9f1cdf2020-06-12 12:28:57 -06001893 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001894 }
locke-lunarg61870c22020-06-09 14:51:50 -06001895 for (const auto &set_binding : stage_state.descriptor_uses) {
1896 cvdescriptorset::DescriptorSet *descriptor_set = (*per_sets)[set_binding.first.first].bound_descriptor_set;
1897 cvdescriptorset::DescriptorSetLayout::ConstBindingIterator binding_it(descriptor_set->GetLayout().get(),
1898 set_binding.first.second);
1899 const auto descriptor_type = binding_it.GetType();
1900 cvdescriptorset::IndexRange index_range = binding_it.GetGlobalIndexRange();
1901 auto array_idx = 0;
1902
1903 if (binding_it.IsVariableDescriptorCount()) {
1904 index_range.end = index_range.start + descriptor_set->GetVariableDescriptorCount();
1905 }
1906 SyncStageAccessIndex sync_index =
1907 GetSyncStageAccessIndexsByDescriptorSet(descriptor_type, set_binding.second, stage_state.stage_flag);
1908
1909 for (uint32_t i = index_range.start; i < index_range.end; ++i, ++array_idx) {
1910 uint32_t index = i - index_range.start;
1911 const auto *descriptor = descriptor_set->GetDescriptorFromGlobalIndex(i);
1912 switch (descriptor->GetClass()) {
1913 case DescriptorClass::ImageSampler:
1914 case DescriptorClass::Image: {
1915 const IMAGE_VIEW_STATE *img_view_state = nullptr;
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001916 VkImageLayout image_layout;
locke-lunarg61870c22020-06-09 14:51:50 -06001917 if (descriptor->GetClass() == DescriptorClass::ImageSampler) {
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001918 const auto image_sampler_descriptor = static_cast<const ImageSamplerDescriptor *>(descriptor);
1919 img_view_state = image_sampler_descriptor->GetImageViewState();
1920 image_layout = image_sampler_descriptor->GetImageLayout();
locke-lunarg61870c22020-06-09 14:51:50 -06001921 } else {
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001922 const auto image_descriptor = static_cast<const ImageDescriptor *>(descriptor);
1923 img_view_state = image_descriptor->GetImageViewState();
1924 image_layout = image_descriptor->GetImageLayout();
locke-lunarg61870c22020-06-09 14:51:50 -06001925 }
1926 if (!img_view_state) continue;
1927 const IMAGE_STATE *img_state = img_view_state->image_state.get();
1928 VkExtent3D extent = {};
1929 VkOffset3D offset = {};
1930 if (sync_index == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ) {
1931 extent = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.extent);
1932 offset = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.offset);
1933 } else {
1934 extent = img_state->createInfo.extent;
1935 }
John Zulauf361fb532020-07-22 10:45:39 -06001936 HazardResult hazard;
1937 const auto &subresource_range = img_view_state->normalized_subresource_range;
1938 if (descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT) {
1939 // Input attachments are subject to raster ordering rules
1940 hazard = current_context_->DetectHazard(*img_state, sync_index, subresource_range,
John Zulauf8e3c3e92021-01-06 11:19:36 -07001941 SyncOrdering::kRaster, offset, extent);
John Zulauf361fb532020-07-22 10:45:39 -06001942 } else {
1943 hazard = current_context_->DetectHazard(*img_state, sync_index, subresource_range, offset, extent);
1944 }
John Zulauf33fc1d52020-07-17 11:01:10 -06001945 if (hazard.hazard && !sync_state_->SupressedBoundDescriptorWAW(hazard)) {
John Zulauf1dae9192020-06-16 15:46:44 -06001946 skip |= sync_state_->LogError(
1947 img_view_state->image_view, string_SyncHazardVUID(hazard.hazard),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001948 "%s: Hazard %s for %s, in %s, and %s, %s, type: %s, imageLayout: %s, binding #%" PRIu32
1949 ", index %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06001950 func_name, string_SyncHazard(hazard.hazard),
1951 sync_state_->report_data->FormatHandle(img_view_state->image_view).c_str(),
1952 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer).c_str(),
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001953 sync_state_->report_data->FormatHandle(pipe->pipeline).c_str(),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001954 sync_state_->report_data->FormatHandle(descriptor_set->GetSet()).c_str(),
1955 string_VkDescriptorType(descriptor_type), string_VkImageLayout(image_layout),
John Zulauffaea0ee2021-01-14 14:01:32 -07001956 set_binding.first.second, index, FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06001957 }
1958 break;
1959 }
1960 case DescriptorClass::TexelBuffer: {
1961 auto buf_view_state = static_cast<const TexelDescriptor *>(descriptor)->GetBufferViewState();
1962 if (!buf_view_state) continue;
1963 const BUFFER_STATE *buf_state = buf_view_state->buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06001964 const ResourceAccessRange range = MakeRange(*buf_view_state);
locke-lunarg61870c22020-06-09 14:51:50 -06001965 auto hazard = current_context_->DetectHazard(*buf_state, sync_index, range);
John Zulauf33fc1d52020-07-17 11:01:10 -06001966 if (hazard.hazard && !sync_state_->SupressedBoundDescriptorWAW(hazard)) {
locke-lunarg88dbb542020-06-23 22:05:42 -06001967 skip |= sync_state_->LogError(
1968 buf_view_state->buffer_view, string_SyncHazardVUID(hazard.hazard),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001969 "%s: Hazard %s for %s in %s, %s, and %s, type: %s, binding #%d index %d. Access info %s.",
1970 func_name, string_SyncHazard(hazard.hazard),
locke-lunarg88dbb542020-06-23 22:05:42 -06001971 sync_state_->report_data->FormatHandle(buf_view_state->buffer_view).c_str(),
1972 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer).c_str(),
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001973 sync_state_->report_data->FormatHandle(pipe->pipeline).c_str(),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001974 sync_state_->report_data->FormatHandle(descriptor_set->GetSet()).c_str(),
1975 string_VkDescriptorType(descriptor_type), set_binding.first.second, index,
John Zulauffaea0ee2021-01-14 14:01:32 -07001976 FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06001977 }
1978 break;
1979 }
1980 case DescriptorClass::GeneralBuffer: {
1981 const auto *buffer_descriptor = static_cast<const BufferDescriptor *>(descriptor);
1982 auto buf_state = buffer_descriptor->GetBufferState();
1983 if (!buf_state) continue;
John Zulauf3e86bf02020-09-12 10:47:57 -06001984 const ResourceAccessRange range =
1985 MakeRange(*buf_state, buffer_descriptor->GetOffset(), buffer_descriptor->GetRange());
locke-lunarg61870c22020-06-09 14:51:50 -06001986 auto hazard = current_context_->DetectHazard(*buf_state, sync_index, range);
John Zulauf3ac701a2020-09-07 14:34:41 -06001987 if (hazard.hazard && !sync_state_->SupressedBoundDescriptorWAW(hazard)) {
locke-lunarg88dbb542020-06-23 22:05:42 -06001988 skip |= sync_state_->LogError(
1989 buf_state->buffer, string_SyncHazardVUID(hazard.hazard),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001990 "%s: Hazard %s for %s in %s, %s, and %s, type: %s, binding #%d index %d. Access info %s.",
1991 func_name, string_SyncHazard(hazard.hazard),
1992 sync_state_->report_data->FormatHandle(buf_state->buffer).c_str(),
locke-lunarg88dbb542020-06-23 22:05:42 -06001993 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer).c_str(),
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07001994 sync_state_->report_data->FormatHandle(pipe->pipeline).c_str(),
locke-lunarg7cc0ead2020-07-17 14:29:16 -06001995 sync_state_->report_data->FormatHandle(descriptor_set->GetSet()).c_str(),
1996 string_VkDescriptorType(descriptor_type), set_binding.first.second, index,
John Zulauffaea0ee2021-01-14 14:01:32 -07001997 FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06001998 }
1999 break;
2000 }
2001 // TODO: INLINE_UNIFORM_BLOCK_EXT, ACCELERATION_STRUCTURE_KHR
2002 default:
2003 break;
2004 }
2005 }
2006 }
2007 }
2008 return skip;
2009}
2010
2011void CommandBufferAccessContext::RecordDispatchDrawDescriptorSet(VkPipelineBindPoint pipelineBindPoint,
2012 const ResourceUsageTag &tag) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002013 const PIPELINE_STATE *pipe = nullptr;
locke-lunarg61870c22020-06-09 14:51:50 -06002014 const std::vector<LAST_BOUND_STATE::PER_SET> *per_sets = nullptr;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002015 GetCurrentPipelineAndDesriptorSetsFromCommandBuffer(*cb_state_.get(), pipelineBindPoint, &pipe, &per_sets);
2016 if (!pipe || !per_sets) {
locke-lunarg61870c22020-06-09 14:51:50 -06002017 return;
2018 }
2019
2020 using DescriptorClass = cvdescriptorset::DescriptorClass;
2021 using BufferDescriptor = cvdescriptorset::BufferDescriptor;
2022 using ImageDescriptor = cvdescriptorset::ImageDescriptor;
2023 using ImageSamplerDescriptor = cvdescriptorset::ImageSamplerDescriptor;
2024 using TexelDescriptor = cvdescriptorset::TexelDescriptor;
2025
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002026 for (const auto &stage_state : pipe->stage_state) {
2027 if (stage_state.stage_flag == VK_SHADER_STAGE_FRAGMENT_BIT && pipe->graphicsPipelineCI.pRasterizationState &&
2028 pipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) {
locke-lunarge9f1cdf2020-06-12 12:28:57 -06002029 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002030 }
locke-lunarg61870c22020-06-09 14:51:50 -06002031 for (const auto &set_binding : stage_state.descriptor_uses) {
2032 cvdescriptorset::DescriptorSet *descriptor_set = (*per_sets)[set_binding.first.first].bound_descriptor_set;
2033 cvdescriptorset::DescriptorSetLayout::ConstBindingIterator binding_it(descriptor_set->GetLayout().get(),
2034 set_binding.first.second);
2035 const auto descriptor_type = binding_it.GetType();
2036 cvdescriptorset::IndexRange index_range = binding_it.GetGlobalIndexRange();
2037 auto array_idx = 0;
2038
2039 if (binding_it.IsVariableDescriptorCount()) {
2040 index_range.end = index_range.start + descriptor_set->GetVariableDescriptorCount();
2041 }
2042 SyncStageAccessIndex sync_index =
2043 GetSyncStageAccessIndexsByDescriptorSet(descriptor_type, set_binding.second, stage_state.stage_flag);
2044
2045 for (uint32_t i = index_range.start; i < index_range.end; ++i, ++array_idx) {
2046 const auto *descriptor = descriptor_set->GetDescriptorFromGlobalIndex(i);
2047 switch (descriptor->GetClass()) {
2048 case DescriptorClass::ImageSampler:
2049 case DescriptorClass::Image: {
2050 const IMAGE_VIEW_STATE *img_view_state = nullptr;
2051 if (descriptor->GetClass() == DescriptorClass::ImageSampler) {
2052 img_view_state = static_cast<const ImageSamplerDescriptor *>(descriptor)->GetImageViewState();
2053 } else {
2054 img_view_state = static_cast<const ImageDescriptor *>(descriptor)->GetImageViewState();
2055 }
2056 if (!img_view_state) continue;
2057 const IMAGE_STATE *img_state = img_view_state->image_state.get();
2058 VkExtent3D extent = {};
2059 VkOffset3D offset = {};
2060 if (sync_index == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ) {
2061 extent = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.extent);
2062 offset = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.offset);
2063 } else {
2064 extent = img_state->createInfo.extent;
2065 }
John Zulauf8e3c3e92021-01-06 11:19:36 -07002066 SyncOrdering ordering_rule = (descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT)
2067 ? SyncOrdering::kRaster
2068 : SyncOrdering::kNonAttachment;
2069 current_context_->UpdateAccessState(*img_state, sync_index, ordering_rule,
2070 img_view_state->normalized_subresource_range, offset, extent, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002071 break;
2072 }
2073 case DescriptorClass::TexelBuffer: {
2074 auto buf_view_state = static_cast<const TexelDescriptor *>(descriptor)->GetBufferViewState();
2075 if (!buf_view_state) continue;
2076 const BUFFER_STATE *buf_state = buf_view_state->buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06002077 const ResourceAccessRange range = MakeRange(*buf_view_state);
John Zulauf8e3c3e92021-01-06 11:19:36 -07002078 current_context_->UpdateAccessState(*buf_state, sync_index, SyncOrdering::kNonAttachment, range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002079 break;
2080 }
2081 case DescriptorClass::GeneralBuffer: {
2082 const auto *buffer_descriptor = static_cast<const BufferDescriptor *>(descriptor);
2083 auto buf_state = buffer_descriptor->GetBufferState();
2084 if (!buf_state) continue;
John Zulauf3e86bf02020-09-12 10:47:57 -06002085 const ResourceAccessRange range =
2086 MakeRange(*buf_state, buffer_descriptor->GetOffset(), buffer_descriptor->GetRange());
John Zulauf8e3c3e92021-01-06 11:19:36 -07002087 current_context_->UpdateAccessState(*buf_state, sync_index, SyncOrdering::kNonAttachment, range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002088 break;
2089 }
2090 // TODO: INLINE_UNIFORM_BLOCK_EXT, ACCELERATION_STRUCTURE_KHR
2091 default:
2092 break;
2093 }
2094 }
2095 }
2096 }
2097}
2098
2099bool CommandBufferAccessContext::ValidateDrawVertex(uint32_t vertexCount, uint32_t firstVertex, const char *func_name) const {
2100 bool skip = false;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002101 const auto *pipe = GetCurrentPipelineFromCommandBuffer(*cb_state_.get(), VK_PIPELINE_BIND_POINT_GRAPHICS);
2102 if (!pipe) {
locke-lunarg61870c22020-06-09 14:51:50 -06002103 return skip;
2104 }
2105
2106 const auto &binding_buffers = cb_state_->current_vertex_buffer_binding_info.vertex_buffer_bindings;
2107 const auto &binding_buffers_size = binding_buffers.size();
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002108 const auto &binding_descriptions_size = pipe->vertex_binding_descriptions_.size();
locke-lunarg61870c22020-06-09 14:51:50 -06002109
2110 for (size_t i = 0; i < binding_descriptions_size; ++i) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002111 const auto &binding_description = pipe->vertex_binding_descriptions_[i];
locke-lunarg61870c22020-06-09 14:51:50 -06002112 if (binding_description.binding < binding_buffers_size) {
2113 const auto &binding_buffer = binding_buffers[binding_description.binding];
locke-lunarg1ae57d62020-11-18 10:49:19 -07002114 if (binding_buffer.buffer_state == nullptr || binding_buffer.buffer_state->destroyed) continue;
locke-lunarg61870c22020-06-09 14:51:50 -06002115
locke-lunarg1ae57d62020-11-18 10:49:19 -07002116 auto *buf_state = binding_buffer.buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06002117 const ResourceAccessRange range = GetBufferRange(binding_buffer.offset, buf_state->createInfo.size, firstVertex,
2118 vertexCount, binding_description.stride);
locke-lunarg61870c22020-06-09 14:51:50 -06002119 auto hazard = current_context_->DetectHazard(*buf_state, SYNC_VERTEX_INPUT_VERTEX_ATTRIBUTE_READ, range);
2120 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002121 skip |= sync_state_->LogError(
John Zulauf59e25072020-07-17 10:55:21 -06002122 buf_state->buffer, string_SyncHazardVUID(hazard.hazard), "%s: Hazard %s for vertex %s in %s. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06002123 func_name, string_SyncHazard(hazard.hazard), sync_state_->report_data->FormatHandle(buf_state->buffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07002124 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer).c_str(), FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06002125 }
2126 }
2127 }
2128 return skip;
2129}
2130
2131void CommandBufferAccessContext::RecordDrawVertex(uint32_t vertexCount, uint32_t firstVertex, const ResourceUsageTag &tag) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002132 const auto *pipe = GetCurrentPipelineFromCommandBuffer(*cb_state_.get(), VK_PIPELINE_BIND_POINT_GRAPHICS);
2133 if (!pipe) {
locke-lunarg61870c22020-06-09 14:51:50 -06002134 return;
2135 }
2136 const auto &binding_buffers = cb_state_->current_vertex_buffer_binding_info.vertex_buffer_bindings;
2137 const auto &binding_buffers_size = binding_buffers.size();
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002138 const auto &binding_descriptions_size = pipe->vertex_binding_descriptions_.size();
locke-lunarg61870c22020-06-09 14:51:50 -06002139
2140 for (size_t i = 0; i < binding_descriptions_size; ++i) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002141 const auto &binding_description = pipe->vertex_binding_descriptions_[i];
locke-lunarg61870c22020-06-09 14:51:50 -06002142 if (binding_description.binding < binding_buffers_size) {
2143 const auto &binding_buffer = binding_buffers[binding_description.binding];
locke-lunarg1ae57d62020-11-18 10:49:19 -07002144 if (binding_buffer.buffer_state == nullptr || binding_buffer.buffer_state->destroyed) continue;
locke-lunarg61870c22020-06-09 14:51:50 -06002145
locke-lunarg1ae57d62020-11-18 10:49:19 -07002146 auto *buf_state = binding_buffer.buffer_state.get();
John Zulauf3e86bf02020-09-12 10:47:57 -06002147 const ResourceAccessRange range = GetBufferRange(binding_buffer.offset, buf_state->createInfo.size, firstVertex,
2148 vertexCount, binding_description.stride);
John Zulauf8e3c3e92021-01-06 11:19:36 -07002149 current_context_->UpdateAccessState(*buf_state, SYNC_VERTEX_INPUT_VERTEX_ATTRIBUTE_READ, SyncOrdering::kNonAttachment,
2150 range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002151 }
2152 }
2153}
2154
2155bool CommandBufferAccessContext::ValidateDrawVertexIndex(uint32_t indexCount, uint32_t firstIndex, const char *func_name) const {
2156 bool skip = false;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002157 if (cb_state_->index_buffer_binding.buffer_state == nullptr || cb_state_->index_buffer_binding.buffer_state->destroyed) {
locke-lunarg1ae57d62020-11-18 10:49:19 -07002158 return skip;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002159 }
locke-lunarg61870c22020-06-09 14:51:50 -06002160
locke-lunarg1ae57d62020-11-18 10:49:19 -07002161 auto *index_buf_state = cb_state_->index_buffer_binding.buffer_state.get();
locke-lunarg61870c22020-06-09 14:51:50 -06002162 const auto index_size = GetIndexAlignment(cb_state_->index_buffer_binding.index_type);
John Zulauf3e86bf02020-09-12 10:47:57 -06002163 const ResourceAccessRange range = GetBufferRange(cb_state_->index_buffer_binding.offset, index_buf_state->createInfo.size,
2164 firstIndex, indexCount, index_size);
locke-lunarg61870c22020-06-09 14:51:50 -06002165 auto hazard = current_context_->DetectHazard(*index_buf_state, SYNC_VERTEX_INPUT_INDEX_READ, range);
2166 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002167 skip |= sync_state_->LogError(
John Zulauf59e25072020-07-17 10:55:21 -06002168 index_buf_state->buffer, string_SyncHazardVUID(hazard.hazard), "%s: Hazard %s for index %s in %s. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06002169 func_name, string_SyncHazard(hazard.hazard), sync_state_->report_data->FormatHandle(index_buf_state->buffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07002170 sync_state_->report_data->FormatHandle(cb_state_->commandBuffer).c_str(), FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06002171 }
2172
2173 // TODO: For now, we detect the whole vertex buffer. Index buffer could be changed until SubmitQueue.
2174 // We will detect more accurate range in the future.
2175 skip |= ValidateDrawVertex(UINT32_MAX, 0, func_name);
2176 return skip;
2177}
2178
2179void CommandBufferAccessContext::RecordDrawVertexIndex(uint32_t indexCount, uint32_t firstIndex, const ResourceUsageTag &tag) {
locke-lunarg1ae57d62020-11-18 10:49:19 -07002180 if (cb_state_->index_buffer_binding.buffer_state == nullptr || cb_state_->index_buffer_binding.buffer_state->destroyed) return;
locke-lunarg61870c22020-06-09 14:51:50 -06002181
locke-lunarg1ae57d62020-11-18 10:49:19 -07002182 auto *index_buf_state = cb_state_->index_buffer_binding.buffer_state.get();
locke-lunarg61870c22020-06-09 14:51:50 -06002183 const auto index_size = GetIndexAlignment(cb_state_->index_buffer_binding.index_type);
John Zulauf3e86bf02020-09-12 10:47:57 -06002184 const ResourceAccessRange range = GetBufferRange(cb_state_->index_buffer_binding.offset, index_buf_state->createInfo.size,
2185 firstIndex, indexCount, index_size);
John Zulauf8e3c3e92021-01-06 11:19:36 -07002186 current_context_->UpdateAccessState(*index_buf_state, SYNC_VERTEX_INPUT_INDEX_READ, SyncOrdering::kNonAttachment, range, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002187
2188 // TODO: For now, we detect the whole vertex buffer. Index buffer could be changed until SubmitQueue.
2189 // We will detect more accurate range in the future.
2190 RecordDrawVertex(UINT32_MAX, 0, tag);
2191}
2192
2193bool CommandBufferAccessContext::ValidateDrawSubpassAttachment(const char *func_name) const {
locke-lunarg7077d502020-06-18 21:37:26 -06002194 bool skip = false;
2195 if (!current_renderpass_context_) return skip;
John Zulauffaea0ee2021-01-14 14:01:32 -07002196 skip |= current_renderpass_context_->ValidateDrawSubpassAttachment(*this, *cb_state_.get(),
locke-lunarg7077d502020-06-18 21:37:26 -06002197 cb_state_->activeRenderPassBeginInfo.renderArea, func_name);
2198 return skip;
locke-lunarg61870c22020-06-09 14:51:50 -06002199}
2200
2201void CommandBufferAccessContext::RecordDrawSubpassAttachment(const ResourceUsageTag &tag) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002202 if (current_renderpass_context_) {
locke-lunarg7077d502020-06-18 21:37:26 -06002203 current_renderpass_context_->RecordDrawSubpassAttachment(*cb_state_.get(), cb_state_->activeRenderPassBeginInfo.renderArea,
2204 tag);
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002205 }
locke-lunarg61870c22020-06-09 14:51:50 -06002206}
2207
John Zulauf355e49b2020-04-24 15:11:15 -06002208bool CommandBufferAccessContext::ValidateNextSubpass(const char *func_name) const {
John Zulauf355e49b2020-04-24 15:11:15 -06002209 bool skip = false;
locke-lunarg7077d502020-06-18 21:37:26 -06002210 if (!current_renderpass_context_) return skip;
John Zulauffaea0ee2021-01-14 14:01:32 -07002211 skip |= current_renderpass_context_->ValidateNextSubpass(*this, cb_state_->activeRenderPassBeginInfo.renderArea, func_name);
John Zulauf355e49b2020-04-24 15:11:15 -06002212
2213 return skip;
2214}
2215
2216bool CommandBufferAccessContext::ValidateEndRenderpass(const char *func_name) const {
2217 // TODO: Things to add here.
John Zulauf7635de32020-05-29 17:14:15 -06002218 // Validate Preserve attachments
John Zulauf355e49b2020-04-24 15:11:15 -06002219 bool skip = false;
locke-lunarg7077d502020-06-18 21:37:26 -06002220 if (!current_renderpass_context_) return skip;
John Zulauffaea0ee2021-01-14 14:01:32 -07002221 skip |= current_renderpass_context_->ValidateEndRenderPass(*this, cb_state_->activeRenderPassBeginInfo.renderArea, func_name);
John Zulauf355e49b2020-04-24 15:11:15 -06002222
2223 return skip;
2224}
2225
2226void CommandBufferAccessContext::RecordBeginRenderPass(const ResourceUsageTag &tag) {
2227 assert(sync_state_);
2228 if (!cb_state_) return;
2229
2230 // Create an access context the current renderpass.
John Zulauf1a224292020-06-30 14:52:13 -06002231 render_pass_contexts_.emplace_back();
John Zulauf16adfc92020-04-08 10:28:33 -06002232 current_renderpass_context_ = &render_pass_contexts_.back();
John Zulauf1a224292020-06-30 14:52:13 -06002233 current_renderpass_context_->RecordBeginRenderPass(*sync_state_, *cb_state_, &cb_access_context_, queue_flags_, tag);
John Zulauf16adfc92020-04-08 10:28:33 -06002234 current_context_ = &current_renderpass_context_->CurrentContext();
John Zulauf16adfc92020-04-08 10:28:33 -06002235}
2236
John Zulauffaea0ee2021-01-14 14:01:32 -07002237void CommandBufferAccessContext::RecordNextSubpass(const RENDER_PASS_STATE &rp_state, CMD_TYPE command) {
John Zulauf16adfc92020-04-08 10:28:33 -06002238 assert(current_renderpass_context_);
John Zulauffaea0ee2021-01-14 14:01:32 -07002239 auto prev_tag = NextCommandTag(command);
2240 auto next_tag = NextSubcommandTag(command);
2241 current_renderpass_context_->RecordNextSubpass(cb_state_->activeRenderPassBeginInfo.renderArea, prev_tag, next_tag);
John Zulauf16adfc92020-04-08 10:28:33 -06002242 current_context_ = &current_renderpass_context_->CurrentContext();
2243}
2244
John Zulauffaea0ee2021-01-14 14:01:32 -07002245void CommandBufferAccessContext::RecordEndRenderPass(const RENDER_PASS_STATE &render_pass, CMD_TYPE command) {
John Zulauf16adfc92020-04-08 10:28:33 -06002246 assert(current_renderpass_context_);
2247 if (!current_renderpass_context_) return;
2248
John Zulauffaea0ee2021-01-14 14:01:32 -07002249 current_renderpass_context_->RecordEndRenderPass(&cb_access_context_, cb_state_->activeRenderPassBeginInfo.renderArea,
2250 NextCommandTag(command));
John Zulauf355e49b2020-04-24 15:11:15 -06002251 current_context_ = &cb_access_context_;
John Zulauf16adfc92020-04-08 10:28:33 -06002252 current_renderpass_context_ = nullptr;
2253}
2254
John Zulauf49beb112020-11-04 16:06:31 -07002255bool CommandBufferAccessContext::ValidateSetEvent(VkCommandBuffer commandBuffer, VkEvent event,
2256 VkPipelineStageFlags stageMask) const {
John Zulauf4a6105a2020-11-17 15:11:05 -07002257 // I'll put this here just in case we need to pass this in for future extension support
2258 const auto cmd = CMD_SETEVENT;
2259 bool skip = false;
2260 const auto *sync_event = GetEventState(event);
2261 if (!sync_event) return false; // Core, Lifetimes, or Param check needs to catch invalid events.
2262
2263 const char *const reset_set =
2264 "%s: %s %s operation following %s without intervening execution barrier, is a race condition and may result in data "
2265 "hazards.";
2266 const char *const wait =
2267 "%s: %s %s operation following %s without intervening vkCmdResetEvent, may result in data hazard and is ignored.";
2268
2269 const auto exec_scope = WithEarlierPipelineStages(ExpandPipelineStages(GetQueueFlags(), stageMask));
2270 if (!sync_event->HasBarrier(stageMask, exec_scope)) {
2271 const char *vuid = nullptr;
2272 const char *message = nullptr;
2273 switch (sync_event->last_command) {
2274 case CMD_RESETEVENT:
2275 // Needs a barrier between reset and set
2276 vuid = "SYNC-vkCmdSetEvent-missingbarrier-reset";
2277 message = reset_set;
2278 break;
2279 case CMD_SETEVENT:
2280 // Needs a barrier between set and set
2281 vuid = "SYNC-vkCmdSetEvent-missingbarrier-set";
2282 message = reset_set;
2283 break;
2284 case CMD_WAITEVENTS:
2285 // Needs a barrier or is in second execution scope
2286 vuid = "SYNC-vkCmdSetEvent-missingbarrier-wait";
2287 message = wait;
2288 break;
2289 default:
2290 // The only other valid last command that wasn't one.
2291 assert(sync_event->last_command == CMD_NONE);
2292 break;
2293 }
2294 if (vuid) {
2295 assert(nullptr != message);
2296 const char *const cmd_name = CommandTypeString(cmd);
2297 skip |= sync_state_->LogError(event, vuid, message, cmd_name, sync_state_->report_data->FormatHandle(event).c_str(),
2298 cmd_name, CommandTypeString(sync_event->last_command));
2299 }
2300 }
2301
2302 return skip;
John Zulauf49beb112020-11-04 16:06:31 -07002303}
2304
John Zulauf4a6105a2020-11-17 15:11:05 -07002305void CommandBufferAccessContext::RecordSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask,
2306 const ResourceUsageTag &tag) {
2307 auto *sync_event = GetEventState(event);
2308 if (!sync_event) return; // Core, Lifetimes, or Param check needs to catch invalid events.
2309
2310 // NOTE: We're going to simply record the sync scope here, as anything else would be implementation defined/undefined
2311 // and we're issuing errors re: missing barriers between event commands, which if the user fixes would fix
2312 // any issues caused by naive scope setting here.
2313
2314 // What happens with two SetEvent is that one cannot know what group of operations will be waited for.
2315 // Given:
2316 // Stuff1; SetEvent; Stuff2; SetEvent; WaitEvents;
2317 // WaitEvents cannot know which of Stuff1, Stuff2, or both has completed execution.
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002318 auto scope = SyncExecScope::MakeSrc(GetQueueFlags(), stageMask);
John Zulauf4a6105a2020-11-17 15:11:05 -07002319
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002320 if (!sync_event->HasBarrier(stageMask, scope.exec_scope)) {
John Zulauf4a6105a2020-11-17 15:11:05 -07002321 sync_event->unsynchronized_set = sync_event->last_command;
2322 sync_event->ResetFirstScope();
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002323 } else if (sync_event->scope.exec_scope == 0) {
John Zulauf4a6105a2020-11-17 15:11:05 -07002324 // We only set the scope if there isn't one
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002325 sync_event->scope = scope;
John Zulauf4a6105a2020-11-17 15:11:05 -07002326
2327 auto set_scope = [&sync_event](AccessAddressType address_type, const ResourceAccessRangeMap::value_type &access) {
2328 auto &scope_map = sync_event->first_scope[static_cast<size_t>(address_type)];
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002329 if (access.second.InSourceScopeOrChain(sync_event->scope.exec_scope, sync_event->scope.valid_accesses)) {
John Zulauf4a6105a2020-11-17 15:11:05 -07002330 scope_map.insert(scope_map.end(), std::make_pair(access.first, true));
2331 }
2332 };
2333 GetCurrentAccessContext()->ForAll(set_scope);
2334 sync_event->unsynchronized_set = CMD_NONE;
2335 sync_event->first_scope_tag = tag;
2336 }
2337 sync_event->last_command = CMD_SETEVENT;
2338 sync_event->barriers = 0U;
2339}
John Zulauf49beb112020-11-04 16:06:31 -07002340
2341bool CommandBufferAccessContext::ValidateResetEvent(VkCommandBuffer commandBuffer, VkEvent event,
2342 VkPipelineStageFlags stageMask) const {
John Zulauf4a6105a2020-11-17 15:11:05 -07002343 // I'll put this here just in case we need to pass this in for future extension support
2344 const auto cmd = CMD_RESETEVENT;
2345
2346 bool skip = false;
2347 // TODO: EVENTS:
2348 // What is it we need to check... that we've had a reset since a set? Set/Set seems ill formed...
2349 const auto *sync_event = GetEventState(event);
2350 if (!sync_event) return false; // Core, Lifetimes, or Param check needs to catch invalid events.
2351
2352 const char *const set_wait =
2353 "%s: %s %s operation following %s without intervening execution barrier, is a race condition and may result in data "
2354 "hazards.";
2355 const char *message = set_wait; // Only one message this call.
2356 const auto exec_scope = WithEarlierPipelineStages(ExpandPipelineStages(GetQueueFlags(), stageMask));
2357 if (!sync_event->HasBarrier(stageMask, exec_scope)) {
2358 const char *vuid = nullptr;
2359 switch (sync_event->last_command) {
2360 case CMD_SETEVENT:
2361 // Needs a barrier between set and reset
2362 vuid = "SYNC-vkCmdResetEvent-missingbarrier-set";
2363 break;
2364 case CMD_WAITEVENTS: {
2365 // Needs to be in the barriers chain (either because of a barrier, or because of dstStageMask
2366 vuid = "SYNC-vkCmdResetEvent-missingbarrier-wait";
2367 break;
2368 }
2369 default:
2370 // The only other valid last command that wasn't one.
2371 assert((sync_event->last_command == CMD_NONE) || (sync_event->last_command == CMD_RESETEVENT));
2372 break;
2373 }
2374 if (vuid) {
2375 const char *const cmd_name = CommandTypeString(cmd);
2376 skip |= sync_state_->LogError(event, vuid, message, cmd_name, sync_state_->report_data->FormatHandle(event).c_str(),
2377 cmd_name, CommandTypeString(sync_event->last_command));
2378 }
2379 }
2380 return skip;
John Zulauf49beb112020-11-04 16:06:31 -07002381}
2382
John Zulauf4a6105a2020-11-17 15:11:05 -07002383void CommandBufferAccessContext::RecordResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
2384 const auto cmd = CMD_RESETEVENT;
2385 auto *sync_event = GetEventState(event);
2386 if (!sync_event) return;
John Zulauf49beb112020-11-04 16:06:31 -07002387
John Zulauf4a6105a2020-11-17 15:11:05 -07002388 // Clear out the first sync scope, any races vs. wait or set are reported, so we'll keep the bookkeeping simple assuming
2389 // the safe case
2390 for (const auto address_type : kAddressTypes) {
2391 sync_event->first_scope[static_cast<size_t>(address_type)].clear();
2392 }
2393
2394 // Update the event state
2395 sync_event->last_command = cmd;
2396 sync_event->unsynchronized_set = CMD_NONE;
2397 sync_event->ResetFirstScope();
2398 sync_event->barriers = 0U;
2399}
2400
2401bool CommandBufferAccessContext::ValidateWaitEvents(uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags srcStageMask,
2402 VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount,
2403 const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount,
John Zulauf49beb112020-11-04 16:06:31 -07002404 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
2405 uint32_t imageMemoryBarrierCount,
2406 const VkImageMemoryBarrier *pImageMemoryBarriers) const {
John Zulauf4a6105a2020-11-17 15:11:05 -07002407 const auto cmd = CMD_WAITEVENTS;
2408 const char *const ignored = "Wait operation is ignored for this event.";
2409 bool skip = false;
2410
2411 if (srcStageMask & VK_PIPELINE_STAGE_HOST_BIT) {
2412 const char *const cmd_name = CommandTypeString(cmd);
2413 const char *const vuid = "SYNC-vkCmdWaitEvents-hostevent-unsupported";
John Zulauffe757512020-12-18 12:17:47 -07002414 skip = sync_state_->LogInfo(cb_state_->commandBuffer, vuid,
2415 "%s, srcStageMask includes %s, unsupported by synchronization validaton.", cmd_name,
2416 string_VkPipelineStageFlagBits(VK_PIPELINE_STAGE_HOST_BIT), ignored);
John Zulauf4a6105a2020-11-17 15:11:05 -07002417 }
2418
2419 VkPipelineStageFlags event_stage_masks = 0U;
John Zulauffe757512020-12-18 12:17:47 -07002420 bool events_not_found = false;
John Zulauf4a6105a2020-11-17 15:11:05 -07002421 for (uint32_t event_index = 0; event_index < eventCount; event_index++) {
2422 const auto event = pEvents[event_index];
2423 const auto *sync_event = GetEventState(event);
John Zulauffe757512020-12-18 12:17:47 -07002424 if (!sync_event) {
2425 // NOTE PHASE2: This is where we'll need queue submit time validation to come back and check the srcStageMask bits
2426 events_not_found = true; // Demote "extra_stage_bits" error to warning, to avoid false positives.
2427
2428 continue; // Core, Lifetimes, or Param check needs to catch invalid events.
2429 }
John Zulauf4a6105a2020-11-17 15:11:05 -07002430
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002431 event_stage_masks |= sync_event->scope.mask_param;
John Zulauf4a6105a2020-11-17 15:11:05 -07002432 const auto ignore_reason = sync_event->IsIgnoredByWait(srcStageMask);
2433 if (ignore_reason) {
2434 switch (ignore_reason) {
2435 case SyncEventState::ResetWaitRace: {
2436 const char *const cmd_name = CommandTypeString(cmd);
2437 const char *const vuid = "SYNC-vkCmdWaitEvents-missingbarrier-reset";
2438 const char *const message =
2439 "%s: %s %s operation following %s without intervening execution barrier, may cause race condition. %s";
2440 skip |=
2441 sync_state_->LogError(event, vuid, message, cmd_name, sync_state_->report_data->FormatHandle(event).c_str(),
2442 cmd_name, CommandTypeString(sync_event->last_command), ignored);
2443 break;
2444 }
2445 case SyncEventState::SetRace: {
2446 // Issue error message that Wait is waiting on an signal subject to race condition, and is thus ignored for this
2447 // event
2448 const char *const cmd_name = CommandTypeString(cmd);
2449 const char *const vuid = "SYNC-vkCmdWaitEvents-unsynchronized-setops";
2450 const char *const message =
2451 "%s: %s Unsychronized %s calls result in race conditions w.r.t. event signalling, % %s";
2452 const char *const reason = "First synchronization scope is undefined.";
2453 skip |=
2454 sync_state_->LogError(event, vuid, message, cmd_name, sync_state_->report_data->FormatHandle(event).c_str(),
2455 CommandTypeString(sync_event->last_command), reason, ignored);
2456 break;
2457 }
2458 case SyncEventState::MissingStageBits: {
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002459 const VkPipelineStageFlags missing_bits = sync_event->scope.mask_param & ~srcStageMask;
John Zulauf4a6105a2020-11-17 15:11:05 -07002460 // Issue error message that event waited for is not in wait events scope
2461 const char *const cmd_name = CommandTypeString(cmd);
2462 const char *const vuid = "VUID-vkCmdWaitEvents-srcStageMask-01158";
2463 const char *const message =
2464 "%s: %s stageMask 0x%" PRIx32 " includes bits not present in srcStageMask 0x%" PRIx32
2465 ". Bits missing from srcStageMask %s. %s";
2466 skip |= sync_state_->LogError(
2467 event, vuid, message, cmd_name, sync_state_->report_data->FormatHandle(event).c_str(),
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002468 sync_event->scope.mask_param, srcStageMask, string_VkPipelineStageFlags(missing_bits).c_str(), ignored);
John Zulauf4a6105a2020-11-17 15:11:05 -07002469 break;
2470 }
2471 default:
2472 assert(ignore_reason == SyncEventState::NotIgnored);
2473 }
2474 } else if (imageMemoryBarrierCount) {
2475 const auto *context = GetCurrentAccessContext();
2476 assert(context);
2477 for (uint32_t barrier_index = 0; barrier_index < imageMemoryBarrierCount; barrier_index++) {
2478 const auto &barrier = pImageMemoryBarriers[barrier_index];
2479 if (barrier.oldLayout == barrier.newLayout) continue;
2480 const auto *image_state = sync_state_->Get<IMAGE_STATE>(barrier.image);
2481 if (!image_state) continue;
2482 auto subresource_range = NormalizeSubresourceRange(image_state->createInfo, barrier.subresourceRange);
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002483 const auto src_access_scope = SyncStageAccess::AccessScope(sync_event->scope.valid_accesses, barrier.srcAccessMask);
John Zulauf4a6105a2020-11-17 15:11:05 -07002484 const auto hazard =
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002485 context->DetectImageBarrierHazard(*image_state, sync_event->scope.exec_scope, src_access_scope,
2486 subresource_range, *sync_event, AccessContext::DetectOptions::kDetectAll);
John Zulauf4a6105a2020-11-17 15:11:05 -07002487 if (hazard.hazard) {
2488 const char *const cmd_name = CommandTypeString(cmd);
2489 skip |= sync_state_->LogError(barrier.image, string_SyncHazardVUID(hazard.hazard),
2490 "%s: Hazard %s for image barrier %" PRIu32 " %s. Access info %s.", cmd_name,
2491 string_SyncHazard(hazard.hazard), barrier_index,
2492 sync_state_->report_data->FormatHandle(barrier.image).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07002493 FormatUsage(hazard).c_str());
John Zulauf4a6105a2020-11-17 15:11:05 -07002494 break;
2495 }
2496 }
2497 }
2498 }
2499
2500 // Note that we can't check for HOST in pEvents as we don't track that set event type
2501 const auto extra_stage_bits = (srcStageMask & ~VK_PIPELINE_STAGE_HOST_BIT) & ~event_stage_masks;
2502 if (extra_stage_bits) {
2503 // Issue error message that event waited for is not in wait events scope
2504 const char *const cmd_name = CommandTypeString(cmd);
2505 const char *const vuid = "VUID-vkCmdWaitEvents-srcStageMask-01158";
2506 const char *const message =
John Zulauffe757512020-12-18 12:17:47 -07002507 "%s: srcStageMask 0x%" PRIx32 " contains stages not present in pEvents stageMask. Extra stages are %s.%s";
2508 if (events_not_found) {
2509 skip |= sync_state_->LogInfo(cb_state_->commandBuffer, vuid, message, cmd_name, srcStageMask,
2510 string_VkPipelineStageFlags(extra_stage_bits).c_str(),
2511 " vkCmdSetEvent may be in previously submitted command buffer.");
2512 } else {
2513 skip |= sync_state_->LogError(cb_state_->commandBuffer, vuid, message, cmd_name, srcStageMask,
2514 string_VkPipelineStageFlags(extra_stage_bits).c_str(), "");
2515 }
John Zulauf4a6105a2020-11-17 15:11:05 -07002516 }
2517 return skip;
John Zulauf49beb112020-11-04 16:06:31 -07002518}
2519
2520void CommandBufferAccessContext::RecordWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
2521 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
2522 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
2523 uint32_t bufferMemoryBarrierCount,
2524 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
2525 uint32_t imageMemoryBarrierCount,
John Zulauf4a6105a2020-11-17 15:11:05 -07002526 const VkImageMemoryBarrier *pImageMemoryBarriers, const ResourceUsageTag &tag) {
2527 auto *access_context = GetCurrentAccessContext();
2528 assert(access_context);
2529 if (!access_context) return;
2530
2531 // Unlike PipelineBarrier, WaitEvent is *not* limited to accesses within the current subpass (if any) and thus needs to import
2532 // all accesses. Can instead import for all first_scopes, or a union of them, if this becomes a performance/memory issue,
2533 // but with no idea of the performance of the union, nor of whether it even matters... take the simplest approach here,
John Zulauf4a6105a2020-11-17 15:11:05 -07002534 access_context->ResolvePreviousAccesses();
2535
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002536 auto dst = SyncExecScope::MakeDst(GetQueueFlags(), dstStageMask);
John Zulauf4a6105a2020-11-17 15:11:05 -07002537 for (uint32_t event_index = 0; event_index < eventCount; event_index++) {
2538 const auto event = pEvents[event_index];
2539 auto *sync_event = GetEventState(event);
2540 if (!sync_event) continue;
2541
2542 sync_event->last_command = CMD_WAITEVENTS;
2543
2544 if (!sync_event->IsIgnoredByWait(srcStageMask)) {
2545 // These apply barriers one at a time as the are restricted to the resource ranges specified per each barrier,
2546 // but do not update the dependency chain information (but set the "pending" state) // s.t. the order independence
2547 // of the barriers is maintained.
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002548 ApplyBufferBarriers(*sync_event, dst, bufferMemoryBarrierCount, pBufferMemoryBarriers);
2549 ApplyImageBarriers(*sync_event, dst, imageMemoryBarrierCount, pImageMemoryBarriers, tag);
2550 ApplyGlobalBarriers(*sync_event, dst, memoryBarrierCount, pMemoryBarriers, tag);
John Zulauf4a6105a2020-11-17 15:11:05 -07002551 } else {
2552 // We ignored this wait, so we don't have any effective synchronization barriers for it.
2553 sync_event->barriers = 0U;
2554 }
2555 }
2556
2557 // Apply the pending barriers
2558 ResolvePendingBarrierFunctor apply_pending_action(tag);
2559 access_context->ApplyGlobalBarriers(apply_pending_action);
2560}
2561
2562void CommandBufferAccessContext::RecordDestroyEvent(VkEvent event) {
2563 // Erase is okay with the key not being
2564 event_state_.erase(event);
2565}
2566
2567SyncEventState *CommandBufferAccessContext::GetEventState(VkEvent event) {
2568 auto &event_up = event_state_[event];
2569 if (!event_up) {
2570 auto event_atate = sync_state_->GetShared<EVENT_STATE>(event);
2571 event_up.reset(new SyncEventState(event_atate));
2572 }
2573 return event_up.get();
2574}
2575
2576const SyncEventState *CommandBufferAccessContext::GetEventState(VkEvent event) const {
2577 auto event_it = event_state_.find(event);
2578 if (event_it == event_state_.cend()) {
2579 return nullptr;
2580 }
2581 return event_it->second.get();
2582}
John Zulauf49beb112020-11-04 16:06:31 -07002583
John Zulauffaea0ee2021-01-14 14:01:32 -07002584bool RenderPassAccessContext::ValidateDrawSubpassAttachment(const CommandBufferAccessContext &cb_context,
2585 const CMD_BUFFER_STATE &cmd, const VkRect2D &render_area,
2586 const char *func_name) const {
locke-lunarg61870c22020-06-09 14:51:50 -06002587 bool skip = false;
John Zulauffaea0ee2021-01-14 14:01:32 -07002588 const auto &sync_state = cb_context.GetSyncState();
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002589 const auto *pipe = GetCurrentPipelineFromCommandBuffer(cmd, VK_PIPELINE_BIND_POINT_GRAPHICS);
2590 if (!pipe ||
2591 (pipe->graphicsPipelineCI.pRasterizationState && pipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable)) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002592 return skip;
2593 }
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002594 const auto &list = pipe->fragmentShader_writable_output_location_list;
locke-lunarg61870c22020-06-09 14:51:50 -06002595 const auto &subpass = rp_state_->createInfo.pSubpasses[current_subpass_];
2596 VkExtent3D extent = CastTo3D(render_area.extent);
2597 VkOffset3D offset = CastTo3D(render_area.offset);
locke-lunarg37047832020-06-12 13:44:45 -06002598
John Zulauf1a224292020-06-30 14:52:13 -06002599 const auto &current_context = CurrentContext();
locke-lunarg44f9bb12020-06-10 14:43:57 -06002600 // Subpass's inputAttachment has been done in ValidateDispatchDrawDescriptorSet
locke-lunarg96dc9632020-06-10 17:22:18 -06002601 if (subpass.pColorAttachments && subpass.colorAttachmentCount && !list.empty()) {
2602 for (const auto location : list) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002603 if (location >= subpass.colorAttachmentCount ||
2604 subpass.pColorAttachments[location].attachment == VK_ATTACHMENT_UNUSED) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002605 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002606 }
locke-lunarg96dc9632020-06-10 17:22:18 -06002607 const IMAGE_VIEW_STATE *img_view_state = attachment_views_[subpass.pColorAttachments[location].attachment];
John Zulauf1a224292020-06-30 14:52:13 -06002608 HazardResult hazard = current_context.DetectHazard(img_view_state, SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE,
John Zulauf8e3c3e92021-01-06 11:19:36 -07002609 SyncOrdering::kColorAttachment, offset, extent);
locke-lunarg96dc9632020-06-10 17:22:18 -06002610 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002611 skip |= sync_state.LogError(img_view_state->image_view, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06002612 "%s: Hazard %s for %s in %s, Subpass #%d, and pColorAttachments #%d. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06002613 func_name, string_SyncHazard(hazard.hazard),
2614 sync_state.report_data->FormatHandle(img_view_state->image_view).c_str(),
2615 sync_state.report_data->FormatHandle(cmd.commandBuffer).c_str(), cmd.activeSubpass,
John Zulauffaea0ee2021-01-14 14:01:32 -07002616 location, cb_context.FormatUsage(hazard).c_str());
locke-lunarg61870c22020-06-09 14:51:50 -06002617 }
2618 }
2619 }
locke-lunarg37047832020-06-12 13:44:45 -06002620
2621 // PHASE1 TODO: Add layout based read/vs. write selection.
2622 // PHASE1 TODO: Read operations for both depth and stencil are possible in the future.
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002623 if (pipe->graphicsPipelineCI.pDepthStencilState && subpass.pDepthStencilAttachment &&
locke-lunarg37047832020-06-12 13:44:45 -06002624 subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
locke-lunarg61870c22020-06-09 14:51:50 -06002625 const IMAGE_VIEW_STATE *img_view_state = attachment_views_[subpass.pDepthStencilAttachment->attachment];
locke-lunarg37047832020-06-12 13:44:45 -06002626 bool depth_write = false, stencil_write = false;
2627
2628 // PHASE1 TODO: These validation should be in core_checks.
2629 if (!FormatIsStencilOnly(img_view_state->create_info.format) &&
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002630 pipe->graphicsPipelineCI.pDepthStencilState->depthTestEnable &&
2631 pipe->graphicsPipelineCI.pDepthStencilState->depthWriteEnable &&
locke-lunarg37047832020-06-12 13:44:45 -06002632 IsImageLayoutDepthWritable(subpass.pDepthStencilAttachment->layout)) {
2633 depth_write = true;
2634 }
2635 // PHASE1 TODO: It needs to check if stencil is writable.
2636 // If failOp, passOp, or depthFailOp are not KEEP, and writeMask isn't 0, it's writable.
2637 // If depth test is disable, it's considered depth test passes, and then depthFailOp doesn't run.
2638 // PHASE1 TODO: These validation should be in core_checks.
2639 if (!FormatIsDepthOnly(img_view_state->create_info.format) &&
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002640 pipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable &&
locke-lunarg37047832020-06-12 13:44:45 -06002641 IsImageLayoutStencilWritable(subpass.pDepthStencilAttachment->layout)) {
2642 stencil_write = true;
2643 }
2644
2645 // PHASE1 TODO: Add EARLY stage detection based on ExecutionMode.
2646 if (depth_write) {
2647 HazardResult hazard =
John Zulauf1a224292020-06-30 14:52:13 -06002648 current_context.DetectHazard(img_view_state, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE,
John Zulauf8e3c3e92021-01-06 11:19:36 -07002649 SyncOrdering::kDepthStencilAttachment, offset, extent, VK_IMAGE_ASPECT_DEPTH_BIT);
locke-lunarg37047832020-06-12 13:44:45 -06002650 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002651 skip |= sync_state.LogError(
2652 img_view_state->image_view, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06002653 "%s: Hazard %s for %s in %s, Subpass #%d, and depth part of pDepthStencilAttachment. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06002654 func_name, string_SyncHazard(hazard.hazard),
2655 sync_state.report_data->FormatHandle(img_view_state->image_view).c_str(),
2656 sync_state.report_data->FormatHandle(cmd.commandBuffer).c_str(), cmd.activeSubpass,
John Zulauffaea0ee2021-01-14 14:01:32 -07002657 cb_context.FormatUsage(hazard).c_str());
locke-lunarg37047832020-06-12 13:44:45 -06002658 }
2659 }
2660 if (stencil_write) {
2661 HazardResult hazard =
John Zulauf1a224292020-06-30 14:52:13 -06002662 current_context.DetectHazard(img_view_state, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE,
John Zulauf8e3c3e92021-01-06 11:19:36 -07002663 SyncOrdering::kDepthStencilAttachment, offset, extent, VK_IMAGE_ASPECT_STENCIL_BIT);
locke-lunarg37047832020-06-12 13:44:45 -06002664 if (hazard.hazard) {
locke-lunarg88dbb542020-06-23 22:05:42 -06002665 skip |= sync_state.LogError(
2666 img_view_state->image_view, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06002667 "%s: Hazard %s for %s in %s, Subpass #%d, and stencil part of pDepthStencilAttachment. Access info %s.",
locke-lunarg88dbb542020-06-23 22:05:42 -06002668 func_name, string_SyncHazard(hazard.hazard),
2669 sync_state.report_data->FormatHandle(img_view_state->image_view).c_str(),
2670 sync_state.report_data->FormatHandle(cmd.commandBuffer).c_str(), cmd.activeSubpass,
John Zulauffaea0ee2021-01-14 14:01:32 -07002671 cb_context.FormatUsage(hazard).c_str());
locke-lunarg37047832020-06-12 13:44:45 -06002672 }
locke-lunarg61870c22020-06-09 14:51:50 -06002673 }
2674 }
2675 return skip;
2676}
2677
locke-lunarg96dc9632020-06-10 17:22:18 -06002678void RenderPassAccessContext::RecordDrawSubpassAttachment(const CMD_BUFFER_STATE &cmd, const VkRect2D &render_area,
2679 const ResourceUsageTag &tag) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002680 const auto *pipe = GetCurrentPipelineFromCommandBuffer(cmd, VK_PIPELINE_BIND_POINT_GRAPHICS);
2681 if (!pipe ||
2682 (pipe->graphicsPipelineCI.pRasterizationState && pipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable)) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002683 return;
2684 }
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002685 const auto &list = pipe->fragmentShader_writable_output_location_list;
locke-lunarg61870c22020-06-09 14:51:50 -06002686 const auto &subpass = rp_state_->createInfo.pSubpasses[current_subpass_];
2687 VkExtent3D extent = CastTo3D(render_area.extent);
2688 VkOffset3D offset = CastTo3D(render_area.offset);
2689
John Zulauf1a224292020-06-30 14:52:13 -06002690 auto &current_context = CurrentContext();
locke-lunarg44f9bb12020-06-10 14:43:57 -06002691 // Subpass's inputAttachment has been done in RecordDispatchDrawDescriptorSet
locke-lunarg96dc9632020-06-10 17:22:18 -06002692 if (subpass.pColorAttachments && subpass.colorAttachmentCount && !list.empty()) {
2693 for (const auto location : list) {
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002694 if (location >= subpass.colorAttachmentCount ||
2695 subpass.pColorAttachments[location].attachment == VK_ATTACHMENT_UNUSED) {
locke-lunarg96dc9632020-06-10 17:22:18 -06002696 continue;
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002697 }
locke-lunarg96dc9632020-06-10 17:22:18 -06002698 const IMAGE_VIEW_STATE *img_view_state = attachment_views_[subpass.pColorAttachments[location].attachment];
John Zulauf8e3c3e92021-01-06 11:19:36 -07002699 current_context.UpdateAccessState(img_view_state, SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE,
2700 SyncOrdering::kColorAttachment, offset, extent, 0, tag);
locke-lunarg61870c22020-06-09 14:51:50 -06002701 }
2702 }
locke-lunarg37047832020-06-12 13:44:45 -06002703
2704 // PHASE1 TODO: Add layout based read/vs. write selection.
2705 // PHASE1 TODO: Read operations for both depth and stencil are possible in the future.
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002706 if (pipe->graphicsPipelineCI.pDepthStencilState && subpass.pDepthStencilAttachment &&
locke-lunarg37047832020-06-12 13:44:45 -06002707 subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
locke-lunarg61870c22020-06-09 14:51:50 -06002708 const IMAGE_VIEW_STATE *img_view_state = attachment_views_[subpass.pDepthStencilAttachment->attachment];
locke-lunarg37047832020-06-12 13:44:45 -06002709 bool depth_write = false, stencil_write = false;
2710
2711 // PHASE1 TODO: These validation should be in core_checks.
2712 if (!FormatIsStencilOnly(img_view_state->create_info.format) &&
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002713 pipe->graphicsPipelineCI.pDepthStencilState->depthTestEnable &&
2714 pipe->graphicsPipelineCI.pDepthStencilState->depthWriteEnable &&
locke-lunarg37047832020-06-12 13:44:45 -06002715 IsImageLayoutDepthWritable(subpass.pDepthStencilAttachment->layout)) {
2716 depth_write = true;
2717 }
2718 // PHASE1 TODO: It needs to check if stencil is writable.
2719 // If failOp, passOp, or depthFailOp are not KEEP, and writeMask isn't 0, it's writable.
2720 // If depth test is disable, it's considered depth test passes, and then depthFailOp doesn't run.
2721 // PHASE1 TODO: These validation should be in core_checks.
2722 if (!FormatIsDepthOnly(img_view_state->create_info.format) &&
Nathaniel Cesarioce9b4812020-12-17 08:55:28 -07002723 pipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable &&
locke-lunarg37047832020-06-12 13:44:45 -06002724 IsImageLayoutStencilWritable(subpass.pDepthStencilAttachment->layout)) {
2725 stencil_write = true;
2726 }
2727
2728 // PHASE1 TODO: Add EARLY stage detection based on ExecutionMode.
2729 if (depth_write) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07002730 current_context.UpdateAccessState(img_view_state, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE,
2731 SyncOrdering::kDepthStencilAttachment, offset, extent, VK_IMAGE_ASPECT_DEPTH_BIT,
2732 tag);
locke-lunarg37047832020-06-12 13:44:45 -06002733 }
2734 if (stencil_write) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07002735 current_context.UpdateAccessState(img_view_state, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE,
2736 SyncOrdering::kDepthStencilAttachment, offset, extent, VK_IMAGE_ASPECT_STENCIL_BIT,
2737 tag);
locke-lunarg37047832020-06-12 13:44:45 -06002738 }
locke-lunarg61870c22020-06-09 14:51:50 -06002739 }
2740}
2741
John Zulauffaea0ee2021-01-14 14:01:32 -07002742bool RenderPassAccessContext::ValidateNextSubpass(const CommandBufferAccessContext &cb_context, const VkRect2D &render_area,
John Zulauf1507ee42020-05-18 11:33:09 -06002743 const char *func_name) const {
John Zulaufaff20662020-06-01 14:07:58 -06002744 // PHASE1 TODO: Add Validate Preserve attachments
John Zulauf355e49b2020-04-24 15:11:15 -06002745 bool skip = false;
John Zulauffaea0ee2021-01-14 14:01:32 -07002746 skip |= CurrentContext().ValidateResolveOperations(cb_context, *rp_state_, render_area, attachment_views_, func_name,
John Zulaufb027cdb2020-05-21 14:25:22 -06002747 current_subpass_);
John Zulauffaea0ee2021-01-14 14:01:32 -07002748 skip |= CurrentContext().ValidateStoreOperation(cb_context, *rp_state_, render_area, current_subpass_, attachment_views_,
John Zulaufaff20662020-06-01 14:07:58 -06002749 func_name);
2750
John Zulauf355e49b2020-04-24 15:11:15 -06002751 const auto next_subpass = current_subpass_ + 1;
John Zulauf1507ee42020-05-18 11:33:09 -06002752 const auto &next_context = subpass_contexts_[next_subpass];
John Zulauffaea0ee2021-01-14 14:01:32 -07002753 skip |= next_context.ValidateLayoutTransitions(cb_context, *rp_state_, render_area, next_subpass, attachment_views_, func_name);
John Zulaufb02c1eb2020-10-06 16:33:36 -06002754 if (!skip) {
2755 // To avoid complex (and buggy) duplication of the affect of layout transitions on load operations, we'll record them
2756 // on a copy of the (empty) next context.
2757 // Note: The resource access map should be empty so hopefully this copy isn't too horrible from a perf POV.
2758 AccessContext temp_context(next_context);
2759 temp_context.RecordLayoutTransitions(*rp_state_, next_subpass, attachment_views_, kCurrentCommandTag);
John Zulauffaea0ee2021-01-14 14:01:32 -07002760 skip |= temp_context.ValidateLoadOperation(cb_context, *rp_state_, render_area, next_subpass, attachment_views_, func_name);
John Zulaufb02c1eb2020-10-06 16:33:36 -06002761 }
John Zulauf7635de32020-05-29 17:14:15 -06002762 return skip;
2763}
John Zulauffaea0ee2021-01-14 14:01:32 -07002764bool RenderPassAccessContext::ValidateEndRenderPass(const CommandBufferAccessContext &cb_context, const VkRect2D &render_area,
John Zulauf7635de32020-05-29 17:14:15 -06002765 const char *func_name) const {
John Zulaufaff20662020-06-01 14:07:58 -06002766 // PHASE1 TODO: Validate Preserve
John Zulauf7635de32020-05-29 17:14:15 -06002767 bool skip = false;
John Zulauffaea0ee2021-01-14 14:01:32 -07002768 skip |= CurrentContext().ValidateResolveOperations(cb_context, *rp_state_, render_area, attachment_views_, func_name,
John Zulauf7635de32020-05-29 17:14:15 -06002769 current_subpass_);
John Zulauffaea0ee2021-01-14 14:01:32 -07002770 skip |= CurrentContext().ValidateStoreOperation(cb_context, *rp_state_, render_area, current_subpass_, attachment_views_,
John Zulaufaff20662020-06-01 14:07:58 -06002771 func_name);
John Zulauffaea0ee2021-01-14 14:01:32 -07002772 skip |= ValidateFinalSubpassLayoutTransitions(cb_context, render_area, func_name);
John Zulauf355e49b2020-04-24 15:11:15 -06002773 return skip;
2774}
2775
John Zulauf7635de32020-05-29 17:14:15 -06002776AccessContext *RenderPassAccessContext::CreateStoreResolveProxy(const VkRect2D &render_area) const {
2777 return CreateStoreResolveProxyContext(CurrentContext(), *rp_state_, current_subpass_, render_area, attachment_views_);
2778}
2779
John Zulauffaea0ee2021-01-14 14:01:32 -07002780bool RenderPassAccessContext::ValidateFinalSubpassLayoutTransitions(const CommandBufferAccessContext &cb_context,
2781 const VkRect2D &render_area, const char *func_name) const {
John Zulauf355e49b2020-04-24 15:11:15 -06002782 bool skip = false;
2783
John Zulauf7635de32020-05-29 17:14:15 -06002784 // As validation methods are const and precede the record/update phase, for any tranistions from the current (last)
2785 // subpass, we have to validate them against a copy of the current AccessContext, with resolve operations applied.
2786 // Note: we could be more efficient by tracking whether or not we actually *have* any changes (e.g. attachment resolve)
2787 // to apply and only copy then, if this proves a hot spot.
2788 std::unique_ptr<AccessContext> proxy_for_current;
2789
John Zulauf355e49b2020-04-24 15:11:15 -06002790 // Validate the "finalLayout" transitions to external
2791 // Get them from where there we're hidding in the extra entry.
2792 const auto &final_transitions = rp_state_->subpass_transitions.back();
2793 for (const auto &transition : final_transitions) {
2794 const auto &attach_view = attachment_views_[transition.attachment];
2795 const auto &trackback = subpass_contexts_[transition.prev_pass].GetDstExternalTrackBack();
2796 assert(trackback.context); // Transitions are given implicit transitions if the StateTracker is working correctly
John Zulauf7635de32020-05-29 17:14:15 -06002797 auto *context = trackback.context;
2798
2799 if (transition.prev_pass == current_subpass_) {
2800 if (!proxy_for_current) {
2801 // We haven't recorded resolve ofor the current_subpass, so we need to copy current and update it *as if*
2802 proxy_for_current.reset(CreateStoreResolveProxy(render_area));
2803 }
2804 context = proxy_for_current.get();
2805 }
2806
John Zulaufa0a98292020-09-18 09:30:10 -06002807 // Use the merged barrier for the hazard check (safe since it just considers the src (first) scope.
2808 const auto merged_barrier = MergeBarriers(trackback.barriers);
2809 auto hazard = context->DetectImageBarrierHazard(*attach_view->image_state, merged_barrier.src_exec_scope,
2810 merged_barrier.src_access_scope, attach_view->normalized_subresource_range,
2811 AccessContext::DetectOptions::kDetectPrevious);
John Zulauf355e49b2020-04-24 15:11:15 -06002812 if (hazard.hazard) {
John Zulauffaea0ee2021-01-14 14:01:32 -07002813 skip |= cb_context.GetSyncState().LogError(
2814 rp_state_->renderPass, string_SyncHazardVUID(hazard.hazard),
2815 "%s: Hazard %s with last use subpass %" PRIu32 " for attachment %" PRIu32
2816 " final image layout transition (old_layout: %s, new_layout: %s). Access info %s.",
2817 func_name, string_SyncHazard(hazard.hazard), transition.prev_pass, transition.attachment,
2818 string_VkImageLayout(transition.old_layout), string_VkImageLayout(transition.new_layout),
2819 cb_context.FormatUsage(hazard).c_str());
John Zulauf355e49b2020-04-24 15:11:15 -06002820 }
2821 }
2822 return skip;
2823}
2824
2825void RenderPassAccessContext::RecordLayoutTransitions(const ResourceUsageTag &tag) {
2826 // Add layout transitions...
John Zulaufb02c1eb2020-10-06 16:33:36 -06002827 subpass_contexts_[current_subpass_].RecordLayoutTransitions(*rp_state_, current_subpass_, attachment_views_, tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002828}
2829
John Zulauf1507ee42020-05-18 11:33:09 -06002830void RenderPassAccessContext::RecordLoadOperations(const VkRect2D &render_area, const ResourceUsageTag &tag) {
2831 const auto *attachment_ci = rp_state_->createInfo.pAttachments;
2832 auto &subpass_context = subpass_contexts_[current_subpass_];
2833 VkExtent3D extent = CastTo3D(render_area.extent);
2834 VkOffset3D offset = CastTo3D(render_area.offset);
2835
2836 for (uint32_t i = 0; i < rp_state_->createInfo.attachmentCount; i++) {
2837 if (rp_state_->attachment_first_subpass[i] == current_subpass_) {
2838 if (attachment_views_[i] == nullptr) continue; // UNUSED
2839 const auto &view = *attachment_views_[i];
2840 const IMAGE_STATE *image = view.image_state.get();
2841 if (image == nullptr) continue;
2842
2843 const auto &ci = attachment_ci[i];
2844 const bool has_depth = FormatHasDepth(ci.format);
John Zulaufb027cdb2020-05-21 14:25:22 -06002845 const bool has_stencil = FormatHasStencil(ci.format);
John Zulauf1507ee42020-05-18 11:33:09 -06002846 const bool is_color = !(has_depth || has_stencil);
2847
2848 if (is_color) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07002849 subpass_context.UpdateAccessState(*image, ColorLoadUsage(ci.loadOp), SyncOrdering::kColorAttachment,
2850 view.normalized_subresource_range, offset, extent, tag);
John Zulauf1507ee42020-05-18 11:33:09 -06002851 } else {
2852 auto update_range = view.normalized_subresource_range;
2853 if (has_depth) {
2854 update_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
John Zulauf8e3c3e92021-01-06 11:19:36 -07002855 subpass_context.UpdateAccessState(*image, DepthStencilLoadUsage(ci.loadOp),
2856 SyncOrdering::kDepthStencilAttachment, update_range, offset, extent, tag);
John Zulauf1507ee42020-05-18 11:33:09 -06002857 }
2858 if (has_stencil) {
2859 update_range.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
John Zulauf8e3c3e92021-01-06 11:19:36 -07002860 subpass_context.UpdateAccessState(*image, DepthStencilLoadUsage(ci.stencilLoadOp),
2861 SyncOrdering::kDepthStencilAttachment, update_range, offset, extent, tag);
John Zulauf1507ee42020-05-18 11:33:09 -06002862 }
2863 }
2864 }
2865 }
2866}
2867
John Zulauf355e49b2020-04-24 15:11:15 -06002868void RenderPassAccessContext::RecordBeginRenderPass(const SyncValidator &state, const CMD_BUFFER_STATE &cb_state,
John Zulauf1a224292020-06-30 14:52:13 -06002869 const AccessContext *external_context, VkQueueFlags queue_flags,
2870 const ResourceUsageTag &tag) {
John Zulauf355e49b2020-04-24 15:11:15 -06002871 current_subpass_ = 0;
locke-lunargaecf2152020-05-12 17:15:41 -06002872 rp_state_ = cb_state.activeRenderPass.get();
John Zulauf355e49b2020-04-24 15:11:15 -06002873 subpass_contexts_.reserve(rp_state_->createInfo.subpassCount);
2874 // Add this for all subpasses here so that they exsist during next subpass validation
2875 for (uint32_t pass = 0; pass < rp_state_->createInfo.subpassCount; pass++) {
John Zulauf1a224292020-06-30 14:52:13 -06002876 subpass_contexts_.emplace_back(pass, queue_flags, rp_state_->subpass_dependencies, subpass_contexts_, external_context);
John Zulauf355e49b2020-04-24 15:11:15 -06002877 }
2878 attachment_views_ = state.GetCurrentAttachmentViews(cb_state);
2879
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07002880 subpass_contexts_[current_subpass_].SetStartTag(tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002881 RecordLayoutTransitions(tag);
John Zulauf1507ee42020-05-18 11:33:09 -06002882 RecordLoadOperations(cb_state.activeRenderPassBeginInfo.renderArea, tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002883}
John Zulauf1507ee42020-05-18 11:33:09 -06002884
John Zulauffaea0ee2021-01-14 14:01:32 -07002885void RenderPassAccessContext::RecordNextSubpass(const VkRect2D &render_area, const ResourceUsageTag &prev_subpass_tag,
2886 const ResourceUsageTag &next_subpass_tag) {
John Zulauf7635de32020-05-29 17:14:15 -06002887 // Resolves are against *prior* subpass context and thus *before* the subpass increment
John Zulauffaea0ee2021-01-14 14:01:32 -07002888 CurrentContext().UpdateAttachmentResolveAccess(*rp_state_, render_area, attachment_views_, current_subpass_, prev_subpass_tag);
2889 CurrentContext().UpdateAttachmentStoreAccess(*rp_state_, render_area, attachment_views_, current_subpass_, prev_subpass_tag);
John Zulauf7635de32020-05-29 17:14:15 -06002890
Jeremy Gebben6ea9d9e2020-12-11 09:41:01 -07002891 // Move to the next sub-command for the new subpass. The resolve and store are logically part of the previous
2892 // subpass, so their tag needs to be different from the layout and load operations below.
John Zulauf355e49b2020-04-24 15:11:15 -06002893 current_subpass_++;
2894 assert(current_subpass_ < subpass_contexts_.size());
John Zulauffaea0ee2021-01-14 14:01:32 -07002895 subpass_contexts_[current_subpass_].SetStartTag(next_subpass_tag);
2896 RecordLayoutTransitions(next_subpass_tag);
2897 RecordLoadOperations(render_area, next_subpass_tag);
John Zulauf355e49b2020-04-24 15:11:15 -06002898}
2899
John Zulauf1a224292020-06-30 14:52:13 -06002900void RenderPassAccessContext::RecordEndRenderPass(AccessContext *external_context, const VkRect2D &render_area,
2901 const ResourceUsageTag &tag) {
John Zulaufaff20662020-06-01 14:07:58 -06002902 // Add the resolve and store accesses
John Zulauf7635de32020-05-29 17:14:15 -06002903 CurrentContext().UpdateAttachmentResolveAccess(*rp_state_, render_area, attachment_views_, current_subpass_, tag);
John Zulaufaff20662020-06-01 14:07:58 -06002904 CurrentContext().UpdateAttachmentStoreAccess(*rp_state_, render_area, attachment_views_, current_subpass_, tag);
John Zulauf7635de32020-05-29 17:14:15 -06002905
John Zulauf355e49b2020-04-24 15:11:15 -06002906 // Export the accesses from the renderpass...
John Zulauf1a224292020-06-30 14:52:13 -06002907 external_context->ResolveChildContexts(subpass_contexts_);
John Zulauf355e49b2020-04-24 15:11:15 -06002908
2909 // Add the "finalLayout" transitions to external
2910 // Get them from where there we're hidding in the extra entry.
John Zulauf89311b42020-09-29 16:28:47 -06002911 // Not that since *final* always comes from *one* subpass per view, we don't have to accumulate the barriers
2912 // TODO Aliasing we may need to reconsider barrier accumulation... though I don't know that it would be valid for aliasing
2913 // that had mulitple final layout transistions from mulitple final subpasses.
John Zulauf355e49b2020-04-24 15:11:15 -06002914 const auto &final_transitions = rp_state_->subpass_transitions.back();
2915 for (const auto &transition : final_transitions) {
2916 const auto &attachment = attachment_views_[transition.attachment];
2917 const auto &last_trackback = subpass_contexts_[transition.prev_pass].GetDstExternalTrackBack();
John Zulaufaa97d8b2020-07-14 10:58:13 -06002918 assert(&subpass_contexts_[transition.prev_pass] == last_trackback.context);
John Zulauf1e331ec2020-12-04 18:29:38 -07002919 std::vector<PipelineBarrierOp> barrier_ops;
2920 barrier_ops.reserve(last_trackback.barriers.size());
2921 for (const auto &barrier : last_trackback.barriers) {
2922 barrier_ops.emplace_back(barrier, true);
2923 }
2924 ApplyBarrierOpsFunctor<PipelineBarrierOp> barrier_action(true /* resolve */, barrier_ops, tag);
2925 external_context->UpdateResourceAccess(*attachment->image_state, attachment->normalized_subresource_range, barrier_action);
John Zulauf355e49b2020-04-24 15:11:15 -06002926 }
2927}
2928
Jeremy Gebben9893daf2021-01-04 10:40:50 -07002929SyncExecScope SyncExecScope::MakeSrc(VkQueueFlags queue_flags, VkPipelineStageFlags mask_param) {
2930 SyncExecScope result;
2931 result.mask_param = mask_param;
2932 result.expanded_mask = ExpandPipelineStages(queue_flags, mask_param);
2933 result.exec_scope = WithEarlierPipelineStages(result.expanded_mask);
2934 result.valid_accesses = SyncStageAccess::AccessScopeByStage(result.exec_scope);
2935 return result;
2936}
2937
2938SyncExecScope SyncExecScope::MakeDst(VkQueueFlags queue_flags, VkPipelineStageFlags mask_param) {
2939 SyncExecScope result;
2940 result.mask_param = mask_param;
2941 result.expanded_mask = ExpandPipelineStages(queue_flags, mask_param);
2942 result.exec_scope = WithLaterPipelineStages(result.expanded_mask);
2943 result.valid_accesses = SyncStageAccess::AccessScopeByStage(result.exec_scope);
2944 return result;
2945}
2946
2947SyncBarrier::SyncBarrier(const SyncExecScope &src, const SyncExecScope &dst) {
2948 src_exec_scope = src.exec_scope;
2949 src_access_scope = 0;
2950 dst_exec_scope = dst.exec_scope;
2951 dst_access_scope = 0;
2952}
2953
2954template <typename Barrier>
2955SyncBarrier::SyncBarrier(const Barrier &barrier, const SyncExecScope &src, const SyncExecScope &dst) {
2956 src_exec_scope = src.exec_scope;
2957 src_access_scope = SyncStageAccess::AccessScope(src.valid_accesses, barrier.srcAccessMask);
2958 dst_exec_scope = dst.exec_scope;
2959 dst_access_scope = SyncStageAccess::AccessScope(dst.valid_accesses, barrier.dstAccessMask);
2960}
2961
2962SyncBarrier::SyncBarrier(VkQueueFlags queue_flags, const VkSubpassDependency2 &subpass) {
2963 auto src = SyncExecScope::MakeSrc(queue_flags, subpass.srcStageMask);
2964 src_exec_scope = src.exec_scope;
2965 src_access_scope = SyncStageAccess::AccessScope(src.valid_accesses, subpass.srcAccessMask);
2966
2967 auto dst = SyncExecScope::MakeDst(queue_flags, subpass.dstStageMask);
2968 dst_exec_scope = dst.exec_scope;
2969 dst_access_scope = SyncStageAccess::AccessScope(dst.valid_accesses, subpass.dstAccessMask);
John Zulauf3d84f1b2020-03-09 13:33:25 -06002970}
2971
John Zulaufb02c1eb2020-10-06 16:33:36 -06002972// Apply a list of barriers, without resolving pending state, useful for subpass layout transitions
2973void ResourceAccessState::ApplyBarriers(const std::vector<SyncBarrier> &barriers, bool layout_transition) {
2974 for (const auto &barrier : barriers) {
2975 ApplyBarrier(barrier, layout_transition);
2976 }
2977}
2978
John Zulauf89311b42020-09-29 16:28:47 -06002979// ApplyBarriers is design for *fully* inclusive barrier lists without layout tranistions. Designed use was for
2980// inter-subpass barriers for lazy-evaluation of parent context memory ranges. Subpass layout transistions are *not* done
2981// lazily, s.t. no previous access reports should need layout transitions.
John Zulaufb02c1eb2020-10-06 16:33:36 -06002982void ResourceAccessState::ApplyBarriers(const std::vector<SyncBarrier> &barriers, const ResourceUsageTag &tag) {
2983 assert(!pending_layout_transition); // This should never be call in the middle of another barrier application
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07002984 assert(pending_write_barriers.none());
John Zulaufb02c1eb2020-10-06 16:33:36 -06002985 assert(!pending_write_dep_chain);
John Zulaufa0a98292020-09-18 09:30:10 -06002986 for (const auto &barrier : barriers) {
John Zulauf89311b42020-09-29 16:28:47 -06002987 ApplyBarrier(barrier, false);
John Zulaufa0a98292020-09-18 09:30:10 -06002988 }
John Zulaufb02c1eb2020-10-06 16:33:36 -06002989 ApplyPendingBarriers(tag);
John Zulauf3d84f1b2020-03-09 13:33:25 -06002990}
John Zulauf9cb530d2019-09-30 14:14:10 -06002991HazardResult ResourceAccessState::DetectHazard(SyncStageAccessIndex usage_index) const {
2992 HazardResult hazard;
2993 auto usage = FlagBit(usage_index);
John Zulauf361fb532020-07-22 10:45:39 -06002994 const auto usage_stage = PipelineStageBit(usage_index);
John Zulauf9cb530d2019-09-30 14:14:10 -06002995 if (IsRead(usage)) {
John Zulauf4285ee92020-09-23 10:20:52 -06002996 if (IsRAWHazard(usage_stage, usage)) {
John Zulauf59e25072020-07-17 10:55:21 -06002997 hazard.Set(this, usage_index, READ_AFTER_WRITE, last_write, write_tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06002998 }
2999 } else {
John Zulauf361fb532020-07-22 10:45:39 -06003000 // Write operation:
3001 // Check for read operations more recent than last_write (as setting last_write clears reads, that would be *any*
3002 // If reads exists -- test only against them because either:
3003 // * the reads were hazards, and we've reported the hazard, so just test the current write vs. the read operations
3004 // * the read weren't hazards, and thus if the write is safe w.r.t. the reads, no hazard vs. last_write is possible if
3005 // the current write happens after the reads, so just test the write against the reades
3006 // Otherwise test against last_write
3007 //
3008 // Look for casus belli for WAR
John Zulaufab7756b2020-12-29 16:10:16 -07003009 if (last_reads.size()) {
3010 for (const auto &read_access : last_reads) {
John Zulauf361fb532020-07-22 10:45:39 -06003011 if (IsReadHazard(usage_stage, read_access)) {
3012 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
3013 break;
3014 }
3015 }
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003016 } else if (last_write.any() && IsWriteHazard(usage)) {
John Zulauf361fb532020-07-22 10:45:39 -06003017 // Write-After-Write check -- if we have a previous write to test against
3018 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003019 }
3020 }
3021 return hazard;
3022}
3023
John Zulauf8e3c3e92021-01-06 11:19:36 -07003024HazardResult ResourceAccessState::DetectHazard(SyncStageAccessIndex usage_index, const SyncOrdering &ordering_rule) const {
3025 const auto &ordering = GetOrderingRules(ordering_rule);
John Zulauf69133422020-05-20 14:55:53 -06003026 // The ordering guarantees act as barriers to the last accesses, independent of synchronization operations
3027 HazardResult hazard;
John Zulauf4285ee92020-09-23 10:20:52 -06003028 const auto usage_bit = FlagBit(usage_index);
John Zulauf361fb532020-07-22 10:45:39 -06003029 const auto usage_stage = PipelineStageBit(usage_index);
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003030 const bool input_attachment_ordering = (ordering.access_scope & SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT).any();
3031 const bool last_write_is_ordered = (last_write & ordering.access_scope).any();
John Zulauf4285ee92020-09-23 10:20:52 -06003032 if (IsRead(usage_bit)) {
3033 // Exclude RAW if no write, or write not most "most recent" operation w.r.t. usage;
3034 bool is_raw_hazard = IsRAWHazard(usage_stage, usage_bit);
3035 if (is_raw_hazard) {
3036 // NOTE: we know last_write is non-zero
3037 // See if the ordering rules save us from the simple RAW check above
3038 // First check to see if the current usage is covered by the ordering rules
3039 const bool usage_is_input_attachment = (usage_index == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ);
3040 const bool usage_is_ordered =
3041 (input_attachment_ordering && usage_is_input_attachment) || (0 != (usage_stage & ordering.exec_scope));
3042 if (usage_is_ordered) {
3043 // Now see of the most recent write (or a subsequent read) are ordered
3044 const bool most_recent_is_ordered = last_write_is_ordered || (0 != GetOrderedStages(ordering));
3045 is_raw_hazard = !most_recent_is_ordered;
John Zulauf361fb532020-07-22 10:45:39 -06003046 }
3047 }
John Zulauf4285ee92020-09-23 10:20:52 -06003048 if (is_raw_hazard) {
3049 hazard.Set(this, usage_index, READ_AFTER_WRITE, last_write, write_tag);
3050 }
John Zulauf361fb532020-07-22 10:45:39 -06003051 } else {
3052 // Only check for WAW if there are no reads since last_write
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003053 bool usage_write_is_ordered = (usage_bit & ordering.access_scope).any();
John Zulaufab7756b2020-12-29 16:10:16 -07003054 if (last_reads.size()) {
John Zulauf361fb532020-07-22 10:45:39 -06003055 // Look for any WAR hazards outside the ordered set of stages
John Zulauf4285ee92020-09-23 10:20:52 -06003056 VkPipelineStageFlags ordered_stages = 0;
3057 if (usage_write_is_ordered) {
3058 // If the usage is ordered, we can ignore all ordered read stages w.r.t. WAR)
3059 ordered_stages = GetOrderedStages(ordering);
3060 }
3061 // If we're tracking any reads that aren't ordered against the current write, got to check 'em all.
3062 if ((ordered_stages & last_read_stages) != last_read_stages) {
John Zulaufab7756b2020-12-29 16:10:16 -07003063 for (const auto &read_access : last_reads) {
John Zulauf4285ee92020-09-23 10:20:52 -06003064 if (read_access.stage & ordered_stages) continue; // but we can skip the ordered ones
3065 if (IsReadHazard(usage_stage, read_access)) {
3066 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
3067 break;
3068 }
John Zulaufd14743a2020-07-03 09:42:39 -06003069 }
3070 }
John Zulauf4285ee92020-09-23 10:20:52 -06003071 } else if (!(last_write_is_ordered && usage_write_is_ordered)) {
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003072 if (last_write.any() && IsWriteHazard(usage_bit)) {
John Zulauf4285ee92020-09-23 10:20:52 -06003073 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
John Zulauf361fb532020-07-22 10:45:39 -06003074 }
John Zulauf69133422020-05-20 14:55:53 -06003075 }
3076 }
3077 return hazard;
3078}
3079
John Zulauf2f952d22020-02-10 11:34:51 -07003080// Asynchronous Hazards occur between subpasses with no connection through the DAG
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07003081HazardResult ResourceAccessState::DetectAsyncHazard(SyncStageAccessIndex usage_index, const ResourceUsageTag &start_tag) const {
John Zulauf2f952d22020-02-10 11:34:51 -07003082 HazardResult hazard;
3083 auto usage = FlagBit(usage_index);
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07003084 // Async checks need to not go back further than the start of the subpass, as we only want to find hazards between the async
3085 // subpasses. Anything older than that should have been checked at the start of each subpass, taking into account all of
3086 // the raster ordering rules.
John Zulauf2f952d22020-02-10 11:34:51 -07003087 if (IsRead(usage)) {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07003088 if (last_write.any() && (write_tag.index >= start_tag.index)) {
John Zulauf59e25072020-07-17 10:55:21 -06003089 hazard.Set(this, usage_index, READ_RACING_WRITE, last_write, write_tag);
John Zulauf2f952d22020-02-10 11:34:51 -07003090 }
3091 } else {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07003092 if (last_write.any() && (write_tag.index >= start_tag.index)) {
John Zulauf59e25072020-07-17 10:55:21 -06003093 hazard.Set(this, usage_index, WRITE_RACING_WRITE, last_write, write_tag);
John Zulaufab7756b2020-12-29 16:10:16 -07003094 } else if (last_reads.size() > 0) {
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07003095 // Any reads during the other subpass will conflict with this write, so we need to check them all.
John Zulaufab7756b2020-12-29 16:10:16 -07003096 for (const auto &read_access : last_reads) {
3097 if (read_access.tag.index >= start_tag.index) {
3098 hazard.Set(this, usage_index, WRITE_RACING_READ, read_access.access, read_access.tag);
Jeremy Gebbenc4b78c52020-12-11 09:39:47 -07003099 break;
3100 }
3101 }
John Zulauf2f952d22020-02-10 11:34:51 -07003102 }
3103 }
3104 return hazard;
3105}
3106
John Zulauf36bcf6a2020-02-03 15:12:52 -07003107HazardResult ResourceAccessState::DetectBarrierHazard(SyncStageAccessIndex usage_index, VkPipelineStageFlags src_exec_scope,
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003108 const SyncStageAccessFlags &src_access_scope) const {
John Zulauf0cb5be22020-01-23 12:18:22 -07003109 // Only supporting image layout transitions for now
3110 assert(usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION);
3111 HazardResult hazard;
John Zulauf361fb532020-07-22 10:45:39 -06003112 // only test for WAW if there no intervening read operations.
3113 // See DetectHazard(SyncStagetAccessIndex) above for more details.
John Zulaufab7756b2020-12-29 16:10:16 -07003114 if (last_reads.size()) {
John Zulauf355e49b2020-04-24 15:11:15 -06003115 // Look at the reads if any
John Zulaufab7756b2020-12-29 16:10:16 -07003116 for (const auto &read_access : last_reads) {
John Zulauf4a6105a2020-11-17 15:11:05 -07003117 if (read_access.IsReadBarrierHazard(src_exec_scope)) {
John Zulauf59e25072020-07-17 10:55:21 -06003118 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
John Zulauf0cb5be22020-01-23 12:18:22 -07003119 break;
3120 }
3121 }
John Zulauf4a6105a2020-11-17 15:11:05 -07003122 } else if (last_write.any() && IsWriteBarrierHazard(src_exec_scope, src_access_scope)) {
3123 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
3124 }
3125
3126 return hazard;
3127}
3128
3129HazardResult ResourceAccessState::DetectBarrierHazard(SyncStageAccessIndex usage_index, VkPipelineStageFlags src_exec_scope,
3130 const SyncStageAccessFlags &src_access_scope,
3131 const ResourceUsageTag &event_tag) const {
3132 // Only supporting image layout transitions for now
3133 assert(usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION);
3134 HazardResult hazard;
3135 // only test for WAW if there no intervening read operations.
3136 // See DetectHazard(SyncStagetAccessIndex) above for more details.
3137
John Zulaufab7756b2020-12-29 16:10:16 -07003138 if (last_reads.size()) {
John Zulauf4a6105a2020-11-17 15:11:05 -07003139 // Look at the reads if any... if reads exist, they are either the resaon the access is in the event
3140 // first scope, or they are a hazard.
John Zulaufab7756b2020-12-29 16:10:16 -07003141 for (const auto &read_access : last_reads) {
John Zulauf4a6105a2020-11-17 15:11:05 -07003142 if (read_access.tag.IsBefore(event_tag)) {
3143 // The read is in the events first synchronization scope, so we use a barrier hazard check
3144 // If the read stage is not in the src sync scope
3145 // *AND* not execution chained with an existing sync barrier (that's the or)
3146 // then the barrier access is unsafe (R/W after R)
3147 if (read_access.IsReadBarrierHazard(src_exec_scope)) {
3148 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
3149 break;
3150 }
3151 } else {
3152 // The read not in the event first sync scope and so is a hazard vs. the layout transition
3153 hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
3154 }
3155 }
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003156 } else if (last_write.any()) {
John Zulauf4a6105a2020-11-17 15:11:05 -07003157 // if there are no reads, the write is either the reason the access is in the event scope... they are a hazard
3158 if (write_tag.IsBefore(event_tag)) {
3159 // The write is in the first sync scope of the event (sync their aren't any reads to be the reason)
3160 // So do a normal barrier hazard check
3161 if (IsWriteBarrierHazard(src_exec_scope, src_access_scope)) {
3162 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
3163 }
3164 } else {
3165 // The write isn't in scope, and is thus a hazard to the layout transistion for wait
John Zulauf361fb532020-07-22 10:45:39 -06003166 hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
3167 }
John Zulaufd14743a2020-07-03 09:42:39 -06003168 }
John Zulauf361fb532020-07-22 10:45:39 -06003169
John Zulauf0cb5be22020-01-23 12:18:22 -07003170 return hazard;
3171}
3172
John Zulauf5f13a792020-03-10 07:31:21 -06003173// The logic behind resolves is the same as update, we assume that earlier hazards have be reported, and that no
3174// tranistive hazard can exists with a hazard between the earlier operations. Yes, an early hazard can mask that another
3175// exists, but if you fix *that* hazard it either fixes or unmasks the subsequent ones.
3176void ResourceAccessState::Resolve(const ResourceAccessState &other) {
3177 if (write_tag.IsBefore(other.write_tag)) {
John Zulauf4285ee92020-09-23 10:20:52 -06003178 // If this is a later write, we've reported any exsiting hazard, and we can just overwrite as the more recent
3179 // operation
John Zulauf5f13a792020-03-10 07:31:21 -06003180 *this = other;
3181 } else if (!other.write_tag.IsBefore(write_tag)) {
3182 // This is the *equals* case for write operations, we merged the write barriers and the read state (but without the
3183 // dependency chaining logic or any stage expansion)
3184 write_barriers |= other.write_barriers;
John Zulaufb02c1eb2020-10-06 16:33:36 -06003185 pending_write_barriers |= other.pending_write_barriers;
3186 pending_layout_transition |= other.pending_layout_transition;
3187 pending_write_dep_chain |= other.pending_write_dep_chain;
John Zulauf5f13a792020-03-10 07:31:21 -06003188
John Zulaufd14743a2020-07-03 09:42:39 -06003189 // Merge the read states
John Zulaufab7756b2020-12-29 16:10:16 -07003190 const auto pre_merge_count = last_reads.size();
John Zulauf4285ee92020-09-23 10:20:52 -06003191 const auto pre_merge_stages = last_read_stages;
John Zulaufab7756b2020-12-29 16:10:16 -07003192 for (uint32_t other_read_index = 0; other_read_index < other.last_reads.size(); other_read_index++) {
John Zulauf5f13a792020-03-10 07:31:21 -06003193 auto &other_read = other.last_reads[other_read_index];
John Zulauf4285ee92020-09-23 10:20:52 -06003194 if (pre_merge_stages & other_read.stage) {
John Zulauf5f13a792020-03-10 07:31:21 -06003195 // Merge in the barriers for read stages that exist in *both* this and other
John Zulauf4285ee92020-09-23 10:20:52 -06003196 // TODO: This is N^2 with stages... perhaps the ReadStates should be sorted by stage index.
3197 // but we should wait on profiling data for that.
3198 for (uint32_t my_read_index = 0; my_read_index < pre_merge_count; my_read_index++) {
John Zulauf5f13a792020-03-10 07:31:21 -06003199 auto &my_read = last_reads[my_read_index];
3200 if (other_read.stage == my_read.stage) {
3201 if (my_read.tag.IsBefore(other_read.tag)) {
John Zulauf4285ee92020-09-23 10:20:52 -06003202 // Other is more recent, copy in the state
John Zulauf37ceaed2020-07-03 16:18:15 -06003203 my_read.access = other_read.access;
John Zulauf4285ee92020-09-23 10:20:52 -06003204 my_read.tag = other_read.tag;
John Zulaufb02c1eb2020-10-06 16:33:36 -06003205 my_read.pending_dep_chain = other_read.pending_dep_chain;
3206 // TODO: Phase 2 -- review the state merge logic to avoid false positive from overwriting the barriers
3207 // May require tracking more than one access per stage.
3208 my_read.barriers = other_read.barriers;
John Zulauf4285ee92020-09-23 10:20:52 -06003209 if (my_read.stage == VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT) {
3210 // Since I'm overwriting the fragement stage read, also update the input attachment info
3211 // as this is the only stage that affects it.
John Zulauff51fbb62020-10-02 14:43:24 -06003212 input_attachment_read = other.input_attachment_read;
John Zulauf4285ee92020-09-23 10:20:52 -06003213 }
John Zulaufb02c1eb2020-10-06 16:33:36 -06003214 } else if (other_read.tag.IsBefore(my_read.tag)) {
3215 // The read tags match so merge the barriers
3216 my_read.barriers |= other_read.barriers;
3217 my_read.pending_dep_chain |= other_read.pending_dep_chain;
John Zulauf5f13a792020-03-10 07:31:21 -06003218 }
John Zulaufb02c1eb2020-10-06 16:33:36 -06003219
John Zulauf5f13a792020-03-10 07:31:21 -06003220 break;
3221 }
3222 }
3223 } else {
3224 // The other read stage doesn't exist in this, so add it.
John Zulaufab7756b2020-12-29 16:10:16 -07003225 last_reads.emplace_back(other_read);
John Zulauf5f13a792020-03-10 07:31:21 -06003226 last_read_stages |= other_read.stage;
John Zulauf4285ee92020-09-23 10:20:52 -06003227 if (other_read.stage == VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT) {
John Zulauff51fbb62020-10-02 14:43:24 -06003228 input_attachment_read = other.input_attachment_read;
John Zulauf4285ee92020-09-23 10:20:52 -06003229 }
John Zulauf5f13a792020-03-10 07:31:21 -06003230 }
3231 }
John Zulauf361fb532020-07-22 10:45:39 -06003232 read_execution_barriers |= other.read_execution_barriers;
John Zulauf4285ee92020-09-23 10:20:52 -06003233 } // the else clause would be that other write is before this write... in which case we supercede the other state and
3234 // ignore it.
John Zulauffaea0ee2021-01-14 14:01:32 -07003235
3236 // Merge first access information by making a copy of this first_access and reconstructing with a shuffle
3237 // of the copy and other into this using the update first logic.
3238 // NOTE: All sorts of additional cleverness could be put into short circuts. (for example back is write and is before front
3239 // of the other first_accesses... )
3240 if (!(first_accesses_ == other.first_accesses_) && !other.first_accesses_.empty()) {
3241 FirstAccesses firsts(std::move(first_accesses_));
3242 first_accesses_.clear();
3243 first_read_stages_ = 0U;
3244 auto a = firsts.begin();
3245 auto a_end = firsts.end();
3246 for (auto &b : other.first_accesses_) {
3247 // TODO: Determine whether "IsBefore" or "IsGloballyBefore" is needed...
3248 while (a != a_end && a->tag.IsBefore(b.tag)) {
3249 UpdateFirst(a->tag, a->usage_index, a->ordering_rule);
3250 ++a;
3251 }
3252 UpdateFirst(b.tag, b.usage_index, b.ordering_rule);
3253 }
3254 for (; a != a_end; ++a) {
3255 UpdateFirst(a->tag, a->usage_index, a->ordering_rule);
3256 }
3257 }
John Zulauf5f13a792020-03-10 07:31:21 -06003258}
3259
John Zulauf8e3c3e92021-01-06 11:19:36 -07003260void ResourceAccessState::Update(SyncStageAccessIndex usage_index, SyncOrdering ordering_rule, const ResourceUsageTag &tag) {
John Zulauf9cb530d2019-09-30 14:14:10 -06003261 // Move this logic in the ResourceStateTracker as methods, thereof (or we'll repeat it for every flavor of resource...
3262 const auto usage_bit = FlagBit(usage_index);
John Zulauf4285ee92020-09-23 10:20:52 -06003263 if (IsRead(usage_index)) {
John Zulauf9cb530d2019-09-30 14:14:10 -06003264 // Mulitple outstanding reads may be of interest and do dependency chains independently
3265 // However, for purposes of barrier tracking, only one read per pipeline stage matters
3266 const auto usage_stage = PipelineStageBit(usage_index);
3267 if (usage_stage & last_read_stages) {
John Zulaufab7756b2020-12-29 16:10:16 -07003268 for (auto &read_access : last_reads) {
3269 if (read_access.stage == usage_stage) {
3270 read_access.Set(usage_stage, usage_bit, 0, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003271 break;
3272 }
3273 }
3274 } else {
John Zulaufab7756b2020-12-29 16:10:16 -07003275 last_reads.emplace_back(usage_stage, usage_bit, 0, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003276 last_read_stages |= usage_stage;
3277 }
John Zulauf4285ee92020-09-23 10:20:52 -06003278
3279 // Fragment shader reads come in two flavors, and we need to track if the one we're tracking is the special one.
3280 if (usage_stage == VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT) {
John Zulauff51fbb62020-10-02 14:43:24 -06003281 // TODO Revisit re: multiple reads for a given stage
3282 input_attachment_read = (usage_bit == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT);
John Zulauf4285ee92020-09-23 10:20:52 -06003283 }
John Zulauf9cb530d2019-09-30 14:14:10 -06003284 } else {
3285 // Assume write
3286 // TODO determine what to do with READ-WRITE operations if any
John Zulauf89311b42020-09-29 16:28:47 -06003287 SetWrite(usage_bit, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003288 }
John Zulauffaea0ee2021-01-14 14:01:32 -07003289 UpdateFirst(tag, usage_index, ordering_rule);
John Zulauf9cb530d2019-09-30 14:14:10 -06003290}
John Zulauf5f13a792020-03-10 07:31:21 -06003291
John Zulauf89311b42020-09-29 16:28:47 -06003292// Clobber last read and all barriers... because all we have is DANGER, DANGER, WILL ROBINSON!!!
3293// if the last_reads/last_write were unsafe, we've reported them, in either case the prior access is irrelevant.
3294// We can overwrite them as *this* write is now after them.
3295//
3296// Note: intentionally ignore pending barriers and chains (i.e. don't apply or clear them), let ApplyPendingBarriers handle them.
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003297void ResourceAccessState::SetWrite(const SyncStageAccessFlags &usage_bit, const ResourceUsageTag &tag) {
John Zulaufab7756b2020-12-29 16:10:16 -07003298 last_reads.clear();
John Zulauf89311b42020-09-29 16:28:47 -06003299 last_read_stages = 0;
3300 read_execution_barriers = 0;
John Zulauff51fbb62020-10-02 14:43:24 -06003301 input_attachment_read = false; // Denotes no outstanding input attachment read after the last write.
John Zulauf89311b42020-09-29 16:28:47 -06003302
3303 write_barriers = 0;
3304 write_dependency_chain = 0;
3305 write_tag = tag;
3306 last_write = usage_bit;
John Zulauf9cb530d2019-09-30 14:14:10 -06003307}
3308
John Zulauf89311b42020-09-29 16:28:47 -06003309// Apply the memory barrier without updating the existing barriers. The execution barrier
3310// changes the "chaining" state, but to keep barriers independent, we defer this until all barriers
3311// of the batch have been processed. Also, depending on whether layout transition happens, we'll either
3312// replace the current write barriers or add to them, so accumulate to pending as well.
3313void ResourceAccessState::ApplyBarrier(const SyncBarrier &barrier, bool layout_transition) {
3314 // For independent barriers we need to track what the new barriers and dependency chain *will* be when we're done
3315 // applying the memory barriers
John Zulauf86356ca2020-10-19 11:46:41 -06003316 // NOTE: We update the write barrier if the write is in the first access scope or if there is a layout
3317 // transistion, under the theory of "most recent access". If the read/write *isn't* safe
3318 // vs. this layout transition DetectBarrierHazard should report it. We treat the layout
3319 // transistion *as* a write and in scope with the barrier (it's before visibility).
John Zulauf4a6105a2020-11-17 15:11:05 -07003320 if (layout_transition || WriteInSourceScopeOrChain(barrier.src_exec_scope, barrier.src_access_scope)) {
John Zulauf89311b42020-09-29 16:28:47 -06003321 pending_write_barriers |= barrier.dst_access_scope;
3322 pending_write_dep_chain |= barrier.dst_exec_scope;
John Zulaufa0a98292020-09-18 09:30:10 -06003323 }
John Zulauf89311b42020-09-29 16:28:47 -06003324 // Track layout transistion as pending as we can't modify last_write until all barriers processed
3325 pending_layout_transition |= layout_transition;
John Zulaufa0a98292020-09-18 09:30:10 -06003326
John Zulauf89311b42020-09-29 16:28:47 -06003327 if (!pending_layout_transition) {
3328 // Once we're dealing with a layout transition (which is modelled as a *write*) then the last reads/writes/chains
3329 // don't need to be tracked as we're just going to zero them.
John Zulaufab7756b2020-12-29 16:10:16 -07003330 for (auto &read_access : last_reads) {
John Zulauf89311b42020-09-29 16:28:47 -06003331 // The | implements the "dependency chain" logic for this access, as the barriers field stores the second sync scope
John Zulaufab7756b2020-12-29 16:10:16 -07003332 if (barrier.src_exec_scope & (read_access.stage | read_access.barriers)) {
3333 read_access.pending_dep_chain |= barrier.dst_exec_scope;
John Zulaufa0a98292020-09-18 09:30:10 -06003334 }
3335 }
John Zulaufa0a98292020-09-18 09:30:10 -06003336 }
John Zulaufa0a98292020-09-18 09:30:10 -06003337}
3338
John Zulauf4a6105a2020-11-17 15:11:05 -07003339// Apply the tag scoped memory barrier without updating the existing barriers. The execution barrier
3340// changes the "chaining" state, but to keep barriers independent. See discussion above.
3341void ResourceAccessState::ApplyBarrier(const ResourceUsageTag &scope_tag, const SyncBarrier &barrier, bool layout_transition) {
3342 // The scope logic for events is, if we're here, the resource usage was flagged as "in the first execution scope" at
3343 // the time of the SetEvent, thus all we need check is whether the access is the same one (i.e. before the scope tag
3344 // in order to know if it's in the excecution scope
3345 // Notice that the layout transition sets the pending barriers *regardless*, as any lack of src_access_scope to
3346 // guard against the layout transition should be reported in the detect barrier hazard phase, and we only report
3347 // errors w.r.t. "most recent" accesses.
3348 if (layout_transition || ((write_tag.IsBefore(scope_tag)) && (barrier.src_access_scope & last_write).any())) {
3349 pending_write_barriers |= barrier.dst_access_scope;
3350 pending_write_dep_chain |= barrier.dst_exec_scope;
3351 }
3352 // Track layout transistion as pending as we can't modify last_write until all barriers processed
3353 pending_layout_transition |= layout_transition;
3354
3355 if (!pending_layout_transition) {
3356 // Once we're dealing with a layout transition (which is modelled as a *write*) then the last reads/writes/chains
3357 // don't need to be tracked as we're just going to zero them.
John Zulaufab7756b2020-12-29 16:10:16 -07003358 for (auto &read_access : last_reads) {
John Zulauf4a6105a2020-11-17 15:11:05 -07003359 // If this read is the same one we included in the set event and in scope, then apply the execution barrier...
3360 // NOTE: That's not really correct... this read stage might *not* have been included in the setevent, and the barriers
3361 // representing the chain might have changed since then (that would be an odd usage), so as a first approximation
3362 // we'll assume the barriers *haven't* been changed since (if the tag hasn't), and while this could be a false
3363 // positive in the case of Set; SomeBarrier; Wait; we'll live with it until we can add more state to the first scope
3364 // capture (the specific write and read stages that *were* in scope at the moment of SetEvents.
3365 // TODO: eliminate the false positive by including write/read-stages "in scope" information in SetEvents first_scope
John Zulaufab7756b2020-12-29 16:10:16 -07003366 if (read_access.tag.IsBefore(scope_tag) && (barrier.src_exec_scope & (read_access.stage | read_access.barriers))) {
3367 read_access.pending_dep_chain |= barrier.dst_exec_scope;
John Zulauf4a6105a2020-11-17 15:11:05 -07003368 }
3369 }
3370 }
3371}
John Zulauf89311b42020-09-29 16:28:47 -06003372void ResourceAccessState::ApplyPendingBarriers(const ResourceUsageTag &tag) {
3373 if (pending_layout_transition) {
John Zulauf89311b42020-09-29 16:28:47 -06003374 // SetWrite clobbers the read count, and thus we don't have to clear the read_state out.
3375 SetWrite(SYNC_IMAGE_LAYOUT_TRANSITION_BIT, tag); // Side effect notes below
John Zulauffaea0ee2021-01-14 14:01:32 -07003376 UpdateFirst(tag, SYNC_IMAGE_LAYOUT_TRANSITION, SyncOrdering::kNonAttachment);
John Zulauf89311b42020-09-29 16:28:47 -06003377 pending_layout_transition = false;
John Zulauf9cb530d2019-09-30 14:14:10 -06003378 }
John Zulauf89311b42020-09-29 16:28:47 -06003379
3380 // Apply the accumulate execution barriers (and thus update chaining information)
3381 // for layout transition, read count is zeroed by SetWrite, so this will be skipped.
John Zulaufab7756b2020-12-29 16:10:16 -07003382 for (auto &read_access : last_reads) {
3383 read_access.barriers |= read_access.pending_dep_chain;
3384 read_execution_barriers |= read_access.barriers;
3385 read_access.pending_dep_chain = 0;
John Zulauf89311b42020-09-29 16:28:47 -06003386 }
3387
3388 // We OR in the accumulated write chain and barriers even in the case of a layout transition as SetWrite zeros them.
3389 write_dependency_chain |= pending_write_dep_chain;
3390 write_barriers |= pending_write_barriers;
3391 pending_write_dep_chain = 0;
3392 pending_write_barriers = 0;
John Zulauf9cb530d2019-09-30 14:14:10 -06003393}
3394
John Zulauf59e25072020-07-17 10:55:21 -06003395// This should be just Bits or Index, but we don't have an invalid state for Index
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003396VkPipelineStageFlags ResourceAccessState::GetReadBarriers(const SyncStageAccessFlags &usage_bit) const {
John Zulauf59e25072020-07-17 10:55:21 -06003397 VkPipelineStageFlags barriers = 0U;
John Zulauf4285ee92020-09-23 10:20:52 -06003398
John Zulaufab7756b2020-12-29 16:10:16 -07003399 for (const auto &read_access : last_reads) {
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003400 if ((read_access.access & usage_bit).any()) {
John Zulauf4285ee92020-09-23 10:20:52 -06003401 barriers = read_access.barriers;
3402 break;
John Zulauf59e25072020-07-17 10:55:21 -06003403 }
3404 }
John Zulauf4285ee92020-09-23 10:20:52 -06003405
John Zulauf59e25072020-07-17 10:55:21 -06003406 return barriers;
3407}
3408
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003409inline bool ResourceAccessState::IsRAWHazard(VkPipelineStageFlagBits usage_stage, const SyncStageAccessFlags &usage) const {
John Zulauf4285ee92020-09-23 10:20:52 -06003410 assert(IsRead(usage));
3411 // Only RAW vs. last_write if it doesn't happen-after any other read because either:
3412 // * the previous reads are not hazards, and thus last_write must be visible and available to
3413 // any reads that happen after.
3414 // * the previous reads *are* hazards to last_write, have been reported, and if that hazard is fixed
3415 // the current read will be also not be a hazard, thus reporting a hazard here adds no needed information.
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003416 return last_write.any() && (0 == (read_execution_barriers & usage_stage)) && IsWriteHazard(usage);
John Zulauf4285ee92020-09-23 10:20:52 -06003417}
3418
John Zulauf8e3c3e92021-01-06 11:19:36 -07003419VkPipelineStageFlags ResourceAccessState::GetOrderedStages(const OrderingBarrier &ordering) const {
John Zulauf4285ee92020-09-23 10:20:52 -06003420 // Whether the stage are in the ordering scope only matters if the current write is ordered
3421 VkPipelineStageFlags ordered_stages = last_read_stages & ordering.exec_scope;
3422 // Special input attachment handling as always (not encoded in exec_scop)
Jeremy Gebbend0de1f82020-11-09 08:21:07 -07003423 const bool input_attachment_ordering = (ordering.access_scope & SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT).any();
John Zulauff51fbb62020-10-02 14:43:24 -06003424 if (input_attachment_ordering && input_attachment_read) {
John Zulauf4285ee92020-09-23 10:20:52 -06003425 // If we have an input attachment in last_reads and input attachments are ordered we all that stage
3426 ordered_stages |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
3427 }
3428
3429 return ordered_stages;
3430}
3431
John Zulauffaea0ee2021-01-14 14:01:32 -07003432void ResourceAccessState::UpdateFirst(const ResourceUsageTag &tag, SyncStageAccessIndex usage_index, SyncOrdering ordering_rule) {
3433 // Only record until we record a write.
3434 if (first_accesses_.empty() || IsRead(first_accesses_.back().usage_index)) {
John Zulaufe7f6a5e2021-01-16 14:31:18 -07003435 const VkPipelineStageFlags usage_stage =
3436 IsRead(usage_index) ? static_cast<VkPipelineStageFlags>(PipelineStageBit(usage_index)) : 0U;
John Zulauffaea0ee2021-01-14 14:01:32 -07003437 if (0 == (usage_stage & first_read_stages_)) {
3438 // If this is a read we haven't seen or a write, record.
3439 first_read_stages_ |= usage_stage;
3440 first_accesses_.emplace_back(tag, usage_index, ordering_rule);
3441 }
3442 }
3443}
3444
John Zulaufd1f85d42020-04-15 12:23:15 -06003445void SyncValidator::ResetCommandBufferCallback(VkCommandBuffer command_buffer) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003446 auto *access_context = GetAccessContextNoInsert(command_buffer);
3447 if (access_context) {
3448 access_context->Reset();
John Zulauf9cb530d2019-09-30 14:14:10 -06003449 }
3450}
3451
John Zulaufd1f85d42020-04-15 12:23:15 -06003452void SyncValidator::FreeCommandBufferCallback(VkCommandBuffer command_buffer) {
3453 auto access_found = cb_access_state.find(command_buffer);
3454 if (access_found != cb_access_state.end()) {
3455 access_found->second->Reset();
3456 cb_access_state.erase(access_found);
3457 }
3458}
3459
John Zulauf9cb530d2019-09-30 14:14:10 -06003460bool SyncValidator::PreCallValidateCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
3461 uint32_t regionCount, const VkBufferCopy *pRegions) const {
3462 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -06003463 const auto *cb_context = GetAccessContext(commandBuffer);
3464 assert(cb_context);
3465 if (!cb_context) return skip;
3466 const auto *context = cb_context->GetCurrentAccessContext();
John Zulauf9cb530d2019-09-30 14:14:10 -06003467
John Zulauf3d84f1b2020-03-09 13:33:25 -06003468 // If we have no previous accesses, we have no hazards
John Zulauf3d84f1b2020-03-09 13:33:25 -06003469 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003470 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003471
3472 for (uint32_t region = 0; region < regionCount; region++) {
3473 const auto &copy_region = pRegions[region];
John Zulauf16adfc92020-04-08 10:28:33 -06003474 if (src_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06003475 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
John Zulauf16adfc92020-04-08 10:28:33 -06003476 auto hazard = context->DetectHazard(*src_buffer, SYNC_TRANSFER_TRANSFER_READ, src_range);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003477 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003478 skip |= LogError(srcBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003479 "vkCmdCopyBuffer: Hazard %s for srcBuffer %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06003480 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcBuffer).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003481 cb_context->FormatUsage(hazard).c_str());
John Zulauf9cb530d2019-09-30 14:14:10 -06003482 }
John Zulauf9cb530d2019-09-30 14:14:10 -06003483 }
John Zulauf16adfc92020-04-08 10:28:33 -06003484 if (dst_buffer && !skip) {
John Zulauf3e86bf02020-09-12 10:47:57 -06003485 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
John Zulauf355e49b2020-04-24 15:11:15 -06003486 auto hazard = context->DetectHazard(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, dst_range);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003487 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003488 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003489 "vkCmdCopyBuffer: Hazard %s for dstBuffer %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06003490 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstBuffer).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003491 cb_context->FormatUsage(hazard).c_str());
John Zulauf3d84f1b2020-03-09 13:33:25 -06003492 }
3493 }
3494 if (skip) break;
John Zulauf9cb530d2019-09-30 14:14:10 -06003495 }
3496 return skip;
3497}
3498
3499void SyncValidator::PreCallRecordCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
3500 uint32_t regionCount, const VkBufferCopy *pRegions) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003501 auto *cb_context = GetAccessContext(commandBuffer);
3502 assert(cb_context);
John Zulauf2b151bf2020-04-24 15:37:44 -06003503 const auto tag = cb_context->NextCommandTag(CMD_COPYBUFFER);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003504 auto *context = cb_context->GetCurrentAccessContext();
3505
John Zulauf9cb530d2019-09-30 14:14:10 -06003506 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
John Zulauf9cb530d2019-09-30 14:14:10 -06003507 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
John Zulauf9cb530d2019-09-30 14:14:10 -06003508
3509 for (uint32_t region = 0; region < regionCount; region++) {
3510 const auto &copy_region = pRegions[region];
John Zulauf16adfc92020-04-08 10:28:33 -06003511 if (src_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06003512 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
John Zulauf8e3c3e92021-01-06 11:19:36 -07003513 context->UpdateAccessState(*src_buffer, SYNC_TRANSFER_TRANSFER_READ, SyncOrdering::kNonAttachment, src_range, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003514 }
John Zulauf16adfc92020-04-08 10:28:33 -06003515 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06003516 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
John Zulauf8e3c3e92021-01-06 11:19:36 -07003517 context->UpdateAccessState(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment, dst_range, tag);
John Zulauf5c5e88d2019-12-26 11:22:02 -07003518 }
3519 }
3520}
3521
John Zulauf4a6105a2020-11-17 15:11:05 -07003522void SyncValidator::PreCallRecordDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
3523 // Clear out events from the command buffer contexts
3524 for (auto &cb_context : cb_access_state) {
3525 cb_context.second->RecordDestroyEvent(event);
3526 }
3527}
3528
Jeff Leger178b1e52020-10-05 12:22:23 -04003529bool SyncValidator::PreCallValidateCmdCopyBuffer2KHR(VkCommandBuffer commandBuffer,
3530 const VkCopyBufferInfo2KHR *pCopyBufferInfos) const {
3531 bool skip = false;
3532 const auto *cb_context = GetAccessContext(commandBuffer);
3533 assert(cb_context);
3534 if (!cb_context) return skip;
3535 const auto *context = cb_context->GetCurrentAccessContext();
3536
3537 // If we have no previous accesses, we have no hazards
3538 const auto *src_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->srcBuffer);
3539 const auto *dst_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->dstBuffer);
3540
3541 for (uint32_t region = 0; region < pCopyBufferInfos->regionCount; region++) {
3542 const auto &copy_region = pCopyBufferInfos->pRegions[region];
3543 if (src_buffer) {
3544 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
3545 auto hazard = context->DetectHazard(*src_buffer, SYNC_TRANSFER_TRANSFER_READ, src_range);
3546 if (hazard.hazard) {
3547 // TODO -- add tag information to log msg when useful.
3548 skip |= LogError(pCopyBufferInfos->srcBuffer, string_SyncHazardVUID(hazard.hazard),
3549 "vkCmdCopyBuffer2KHR(): Hazard %s for srcBuffer %s, region %" PRIu32 ". Access info %s.",
3550 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyBufferInfos->srcBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07003551 region, cb_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04003552 }
3553 }
3554 if (dst_buffer && !skip) {
3555 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
3556 auto hazard = context->DetectHazard(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, dst_range);
3557 if (hazard.hazard) {
3558 skip |= LogError(pCopyBufferInfos->dstBuffer, string_SyncHazardVUID(hazard.hazard),
3559 "vkCmdCopyBuffer2KHR(): Hazard %s for dstBuffer %s, region %" PRIu32 ". Access info %s.",
3560 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyBufferInfos->dstBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07003561 region, cb_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04003562 }
3563 }
3564 if (skip) break;
3565 }
3566 return skip;
3567}
3568
3569void SyncValidator::PreCallRecordCmdCopyBuffer2KHR(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2KHR *pCopyBufferInfos) {
3570 auto *cb_context = GetAccessContext(commandBuffer);
3571 assert(cb_context);
3572 const auto tag = cb_context->NextCommandTag(CMD_COPYBUFFER2KHR);
3573 auto *context = cb_context->GetCurrentAccessContext();
3574
3575 const auto *src_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->srcBuffer);
3576 const auto *dst_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->dstBuffer);
3577
3578 for (uint32_t region = 0; region < pCopyBufferInfos->regionCount; region++) {
3579 const auto &copy_region = pCopyBufferInfos->pRegions[region];
3580 if (src_buffer) {
3581 const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
John Zulauf8e3c3e92021-01-06 11:19:36 -07003582 context->UpdateAccessState(*src_buffer, SYNC_TRANSFER_TRANSFER_READ, SyncOrdering::kNonAttachment, src_range, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04003583 }
3584 if (dst_buffer) {
3585 const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
John Zulauf8e3c3e92021-01-06 11:19:36 -07003586 context->UpdateAccessState(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment, dst_range, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04003587 }
3588 }
3589}
3590
John Zulauf5c5e88d2019-12-26 11:22:02 -07003591bool SyncValidator::PreCallValidateCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
3592 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
3593 const VkImageCopy *pRegions) const {
3594 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -06003595 const auto *cb_access_context = GetAccessContext(commandBuffer);
3596 assert(cb_access_context);
3597 if (!cb_access_context) return skip;
John Zulauf5c5e88d2019-12-26 11:22:02 -07003598
John Zulauf3d84f1b2020-03-09 13:33:25 -06003599 const auto *context = cb_access_context->GetCurrentAccessContext();
3600 assert(context);
3601 if (!context) return skip;
3602
3603 const auto *src_image = Get<IMAGE_STATE>(srcImage);
3604 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003605 for (uint32_t region = 0; region < regionCount; region++) {
3606 const auto &copy_region = pRegions[region];
3607 if (src_image) {
John Zulauf540266b2020-04-06 18:54:53 -06003608 auto hazard = context->DetectHazard(*src_image, SYNC_TRANSFER_TRANSFER_READ, copy_region.srcSubresource,
John Zulauf3d84f1b2020-03-09 13:33:25 -06003609 copy_region.srcOffset, copy_region.extent);
3610 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003611 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003612 "vkCmdCopyImage: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06003613 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003614 cb_access_context->FormatUsage(hazard).c_str());
John Zulauf5c5e88d2019-12-26 11:22:02 -07003615 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06003616 }
3617
3618 if (dst_image) {
locke-lunarg1df1f882020-03-02 16:42:08 -07003619 VkExtent3D dst_copy_extent =
3620 GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
John Zulauf540266b2020-04-06 18:54:53 -06003621 auto hazard = context->DetectHazard(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, copy_region.dstSubresource,
locke-lunarg1df1f882020-03-02 16:42:08 -07003622 copy_region.dstOffset, dst_copy_extent);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003623 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06003624 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06003625 "vkCmdCopyImage: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06003626 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07003627 cb_access_context->FormatUsage(hazard).c_str());
John Zulauf5c5e88d2019-12-26 11:22:02 -07003628 }
locke-lunarg1dbbb9e2020-02-28 22:43:53 -07003629 if (skip) break;
John Zulauf5c5e88d2019-12-26 11:22:02 -07003630 }
3631 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06003632
John Zulauf5c5e88d2019-12-26 11:22:02 -07003633 return skip;
3634}
3635
3636void SyncValidator::PreCallRecordCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
3637 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
3638 const VkImageCopy *pRegions) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003639 auto *cb_access_context = GetAccessContext(commandBuffer);
3640 assert(cb_access_context);
John Zulauf2b151bf2020-04-24 15:37:44 -06003641 const auto tag = cb_access_context->NextCommandTag(CMD_COPYIMAGE);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003642 auto *context = cb_access_context->GetCurrentAccessContext();
3643 assert(context);
3644
John Zulauf5c5e88d2019-12-26 11:22:02 -07003645 auto *src_image = Get<IMAGE_STATE>(srcImage);
John Zulauf5c5e88d2019-12-26 11:22:02 -07003646 auto *dst_image = Get<IMAGE_STATE>(dstImage);
John Zulauf5c5e88d2019-12-26 11:22:02 -07003647
3648 for (uint32_t region = 0; region < regionCount; region++) {
3649 const auto &copy_region = pRegions[region];
John Zulauf3d84f1b2020-03-09 13:33:25 -06003650 if (src_image) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07003651 context->UpdateAccessState(*src_image, SYNC_TRANSFER_TRANSFER_READ, SyncOrdering::kNonAttachment,
3652 copy_region.srcSubresource, copy_region.srcOffset, copy_region.extent, tag);
John Zulauf5c5e88d2019-12-26 11:22:02 -07003653 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06003654 if (dst_image) {
locke-lunarg1df1f882020-03-02 16:42:08 -07003655 VkExtent3D dst_copy_extent =
3656 GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
John Zulauf8e3c3e92021-01-06 11:19:36 -07003657 context->UpdateAccessState(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
3658 copy_region.dstSubresource, copy_region.dstOffset, dst_copy_extent, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06003659 }
3660 }
3661}
3662
Jeff Leger178b1e52020-10-05 12:22:23 -04003663bool SyncValidator::PreCallValidateCmdCopyImage2KHR(VkCommandBuffer commandBuffer,
3664 const VkCopyImageInfo2KHR *pCopyImageInfo) const {
3665 bool skip = false;
3666 const auto *cb_access_context = GetAccessContext(commandBuffer);
3667 assert(cb_access_context);
3668 if (!cb_access_context) return skip;
3669
3670 const auto *context = cb_access_context->GetCurrentAccessContext();
3671 assert(context);
3672 if (!context) return skip;
3673
3674 const auto *src_image = Get<IMAGE_STATE>(pCopyImageInfo->srcImage);
3675 const auto *dst_image = Get<IMAGE_STATE>(pCopyImageInfo->dstImage);
3676 for (uint32_t region = 0; region < pCopyImageInfo->regionCount; region++) {
3677 const auto &copy_region = pCopyImageInfo->pRegions[region];
3678 if (src_image) {
3679 auto hazard = context->DetectHazard(*src_image, SYNC_TRANSFER_TRANSFER_READ, copy_region.srcSubresource,
3680 copy_region.srcOffset, copy_region.extent);
3681 if (hazard.hazard) {
3682 skip |= LogError(pCopyImageInfo->srcImage, string_SyncHazardVUID(hazard.hazard),
3683 "vkCmdCopyImage2KHR: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.",
3684 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyImageInfo->srcImage).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07003685 region, cb_access_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04003686 }
3687 }
3688
3689 if (dst_image) {
3690 VkExtent3D dst_copy_extent =
3691 GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
3692 auto hazard = context->DetectHazard(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, copy_region.dstSubresource,
3693 copy_region.dstOffset, dst_copy_extent);
3694 if (hazard.hazard) {
3695 skip |= LogError(pCopyImageInfo->dstImage, string_SyncHazardVUID(hazard.hazard),
3696 "vkCmdCopyImage2KHR: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.",
3697 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyImageInfo->dstImage).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07003698 region, cb_access_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04003699 }
3700 if (skip) break;
3701 }
3702 }
3703
3704 return skip;
3705}
3706
3707void SyncValidator::PreCallRecordCmdCopyImage2KHR(VkCommandBuffer commandBuffer, const VkCopyImageInfo2KHR *pCopyImageInfo) {
3708 auto *cb_access_context = GetAccessContext(commandBuffer);
3709 assert(cb_access_context);
3710 const auto tag = cb_access_context->NextCommandTag(CMD_COPYIMAGE2KHR);
3711 auto *context = cb_access_context->GetCurrentAccessContext();
3712 assert(context);
3713
3714 auto *src_image = Get<IMAGE_STATE>(pCopyImageInfo->srcImage);
3715 auto *dst_image = Get<IMAGE_STATE>(pCopyImageInfo->dstImage);
3716
3717 for (uint32_t region = 0; region < pCopyImageInfo->regionCount; region++) {
3718 const auto &copy_region = pCopyImageInfo->pRegions[region];
3719 if (src_image) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07003720 context->UpdateAccessState(*src_image, SYNC_TRANSFER_TRANSFER_READ, SyncOrdering::kNonAttachment,
3721 copy_region.srcSubresource, copy_region.srcOffset, copy_region.extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04003722 }
3723 if (dst_image) {
3724 VkExtent3D dst_copy_extent =
3725 GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
John Zulauf8e3c3e92021-01-06 11:19:36 -07003726 context->UpdateAccessState(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
3727 copy_region.dstSubresource, copy_region.dstOffset, dst_copy_extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04003728 }
3729 }
3730}
3731
John Zulauf9cb530d2019-09-30 14:14:10 -06003732bool SyncValidator::PreCallValidateCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
3733 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
3734 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
3735 uint32_t bufferMemoryBarrierCount,
3736 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
3737 uint32_t imageMemoryBarrierCount,
3738 const VkImageMemoryBarrier *pImageMemoryBarriers) const {
3739 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -06003740 const auto *cb_access_context = GetAccessContext(commandBuffer);
3741 assert(cb_access_context);
3742 if (!cb_access_context) return skip;
John Zulauf0cb5be22020-01-23 12:18:22 -07003743
John Zulaufe7f6a5e2021-01-16 14:31:18 -07003744 SyncOpPipelineBarrier pipeline_barrier(*this, cb_access_context->GetQueueFlags(), srcStageMask, dstStageMask, dependencyFlags,
3745 memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
3746 imageMemoryBarrierCount, pImageMemoryBarriers);
3747 skip = pipeline_barrier.Validate(*cb_access_context);
John Zulauf9cb530d2019-09-30 14:14:10 -06003748 return skip;
3749}
3750
3751void SyncValidator::PreCallRecordCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
3752 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
3753 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
3754 uint32_t bufferMemoryBarrierCount,
3755 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
3756 uint32_t imageMemoryBarrierCount,
3757 const VkImageMemoryBarrier *pImageMemoryBarriers) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003758 auto *cb_access_context = GetAccessContext(commandBuffer);
3759 assert(cb_access_context);
3760 if (!cb_access_context) return;
John Zulauf9cb530d2019-09-30 14:14:10 -06003761
John Zulaufe7f6a5e2021-01-16 14:31:18 -07003762 SyncOpPipelineBarrier pipeline_barrier(*this, cb_access_context->GetQueueFlags(), srcStageMask, dstStageMask, dependencyFlags,
3763 memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
3764 imageMemoryBarrierCount, pImageMemoryBarriers);
3765 pipeline_barrier.Record(cb_access_context, cb_access_context->NextCommandTag(CMD_PIPELINEBARRIER));
John Zulauf9cb530d2019-09-30 14:14:10 -06003766}
3767
3768void SyncValidator::PostCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
3769 const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, VkResult result) {
3770 // The state tracker sets up the device state
3771 StateTracker::PostCallRecordCreateDevice(gpu, pCreateInfo, pAllocator, pDevice, result);
3772
John Zulauf5f13a792020-03-10 07:31:21 -06003773 // Add the callback hooks for the functions that are either broadly or deeply used and that the ValidationStateTracker
3774 // refactor would be messier without.
John Zulauf9cb530d2019-09-30 14:14:10 -06003775 // TODO: Find a good way to do this hooklessly.
3776 ValidationObject *device_object = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
3777 ValidationObject *validation_data = GetValidationObject(device_object->object_dispatch, LayerObjectTypeSyncValidation);
3778 SyncValidator *sync_device_state = static_cast<SyncValidator *>(validation_data);
3779
John Zulaufd1f85d42020-04-15 12:23:15 -06003780 sync_device_state->SetCommandBufferResetCallback([sync_device_state](VkCommandBuffer command_buffer) -> void {
3781 sync_device_state->ResetCommandBufferCallback(command_buffer);
3782 });
3783 sync_device_state->SetCommandBufferFreeCallback([sync_device_state](VkCommandBuffer command_buffer) -> void {
3784 sync_device_state->FreeCommandBufferCallback(command_buffer);
3785 });
John Zulauf9cb530d2019-09-30 14:14:10 -06003786}
John Zulauf3d84f1b2020-03-09 13:33:25 -06003787
John Zulauf355e49b2020-04-24 15:11:15 -06003788bool SyncValidator::ValidateBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
Mike Schuchardt2df08912020-12-15 16:28:09 -08003789 const VkSubpassBeginInfo *pSubpassBeginInfo, const char *func_name) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003790 bool skip = false;
3791 const auto rp_state = Get<RENDER_PASS_STATE>(pRenderPassBegin->renderPass);
3792 auto cb_context = GetAccessContext(commandBuffer);
3793
3794 if (rp_state && cb_context) {
3795 skip |= cb_context->ValidateBeginRenderPass(*rp_state, pRenderPassBegin, pSubpassBeginInfo, func_name);
3796 }
3797
3798 return skip;
3799}
3800
3801bool SyncValidator::PreCallValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
3802 VkSubpassContents contents) const {
3803 bool skip = StateTracker::PreCallValidateCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07003804 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf355e49b2020-04-24 15:11:15 -06003805 subpass_begin_info.contents = contents;
3806 skip |= ValidateBeginRenderPass(commandBuffer, pRenderPassBegin, &subpass_begin_info, "vkCmdBeginRenderPass");
3807 return skip;
3808}
3809
3810bool SyncValidator::PreCallValidateCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
Mike Schuchardt2df08912020-12-15 16:28:09 -08003811 const VkSubpassBeginInfo *pSubpassBeginInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003812 bool skip = StateTracker::PreCallValidateCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
3813 skip |= ValidateBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, "vkCmdBeginRenderPass2");
3814 return skip;
3815}
3816
3817bool SyncValidator::PreCallValidateCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
3818 const VkRenderPassBeginInfo *pRenderPassBegin,
Mike Schuchardt2df08912020-12-15 16:28:09 -08003819 const VkSubpassBeginInfo *pSubpassBeginInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003820 bool skip = StateTracker::PreCallValidateCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
3821 skip |= ValidateBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, "vkCmdBeginRenderPass2KHR");
3822 return skip;
3823}
3824
John Zulauf3d84f1b2020-03-09 13:33:25 -06003825void SyncValidator::PostCallRecordBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo,
3826 VkResult result) {
3827 // The state tracker sets up the command buffer state
3828 StateTracker::PostCallRecordBeginCommandBuffer(commandBuffer, pBeginInfo, result);
3829
3830 // Create/initialize the structure that trackers accesses at the command buffer scope.
3831 auto cb_access_context = GetAccessContext(commandBuffer);
3832 assert(cb_access_context);
3833 cb_access_context->Reset();
3834}
3835
3836void SyncValidator::RecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
John Zulauf355e49b2020-04-24 15:11:15 -06003837 const VkSubpassBeginInfo *pSubpassBeginInfo, CMD_TYPE command) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003838 auto cb_context = GetAccessContext(commandBuffer);
John Zulauf355e49b2020-04-24 15:11:15 -06003839 if (cb_context) {
3840 cb_context->RecordBeginRenderPass(cb_context->NextCommandTag(command));
John Zulauf3d84f1b2020-03-09 13:33:25 -06003841 }
3842}
3843
3844void SyncValidator::PostCallRecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
3845 VkSubpassContents contents) {
3846 StateTracker::PostCallRecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07003847 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf3d84f1b2020-03-09 13:33:25 -06003848 subpass_begin_info.contents = contents;
John Zulauf355e49b2020-04-24 15:11:15 -06003849 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, &subpass_begin_info, CMD_BEGINRENDERPASS);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003850}
3851
3852void SyncValidator::PostCallRecordCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
3853 const VkSubpassBeginInfo *pSubpassBeginInfo) {
3854 StateTracker::PostCallRecordCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
John Zulauf355e49b2020-04-24 15:11:15 -06003855 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003856}
3857
3858void SyncValidator::PostCallRecordCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
3859 const VkRenderPassBeginInfo *pRenderPassBegin,
3860 const VkSubpassBeginInfo *pSubpassBeginInfo) {
3861 StateTracker::PostCallRecordCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
John Zulauf355e49b2020-04-24 15:11:15 -06003862 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2);
3863}
3864
Mike Schuchardt2df08912020-12-15 16:28:09 -08003865bool SyncValidator::ValidateCmdNextSubpass(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
3866 const VkSubpassEndInfo *pSubpassEndInfo, const char *func_name) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003867 bool skip = false;
3868
3869 auto cb_context = GetAccessContext(commandBuffer);
3870 assert(cb_context);
3871 auto cb_state = cb_context->GetCommandBufferState();
3872 if (!cb_state) return skip;
3873
3874 auto rp_state = cb_state->activeRenderPass;
3875 if (!rp_state) return skip;
3876
3877 skip |= cb_context->ValidateNextSubpass(func_name);
3878
3879 return skip;
3880}
3881
3882bool SyncValidator::PreCallValidateCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) const {
3883 bool skip = StateTracker::PreCallValidateCmdNextSubpass(commandBuffer, contents);
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07003884 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf355e49b2020-04-24 15:11:15 -06003885 subpass_begin_info.contents = contents;
3886 skip |= ValidateCmdNextSubpass(commandBuffer, &subpass_begin_info, nullptr, "vkCmdNextSubpass");
3887 return skip;
3888}
3889
Mike Schuchardt2df08912020-12-15 16:28:09 -08003890bool SyncValidator::PreCallValidateCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
3891 const VkSubpassEndInfo *pSubpassEndInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003892 bool skip = StateTracker::PreCallValidateCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
3893 skip |= ValidateCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, "vkCmdNextSubpass2KHR");
3894 return skip;
3895}
3896
3897bool SyncValidator::PreCallValidateCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
3898 const VkSubpassEndInfo *pSubpassEndInfo) const {
3899 bool skip = StateTracker::PreCallValidateCmdNextSubpass2(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
3900 skip |= ValidateCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, "vkCmdNextSubpass2");
3901 return skip;
John Zulauf3d84f1b2020-03-09 13:33:25 -06003902}
3903
3904void SyncValidator::RecordCmdNextSubpass(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
John Zulauf355e49b2020-04-24 15:11:15 -06003905 const VkSubpassEndInfo *pSubpassEndInfo, CMD_TYPE command) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06003906 auto cb_context = GetAccessContext(commandBuffer);
3907 assert(cb_context);
3908 auto cb_state = cb_context->GetCommandBufferState();
3909 if (!cb_state) return;
3910
3911 auto rp_state = cb_state->activeRenderPass;
3912 if (!rp_state) return;
3913
John Zulauffaea0ee2021-01-14 14:01:32 -07003914 cb_context->RecordNextSubpass(*rp_state, command);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003915}
3916
3917void SyncValidator::PostCallRecordCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
3918 StateTracker::PostCallRecordCmdNextSubpass(commandBuffer, contents);
Mark Lobodzinski6fe9e702020-12-30 15:36:39 -07003919 auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
John Zulauf3d84f1b2020-03-09 13:33:25 -06003920 subpass_begin_info.contents = contents;
John Zulauf355e49b2020-04-24 15:11:15 -06003921 RecordCmdNextSubpass(commandBuffer, &subpass_begin_info, nullptr, CMD_NEXTSUBPASS);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003922}
3923
3924void SyncValidator::PostCallRecordCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
3925 const VkSubpassEndInfo *pSubpassEndInfo) {
3926 StateTracker::PostCallRecordCmdNextSubpass2(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
John Zulauf355e49b2020-04-24 15:11:15 -06003927 RecordCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003928}
3929
3930void SyncValidator::PostCallRecordCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
3931 const VkSubpassEndInfo *pSubpassEndInfo) {
3932 StateTracker::PostCallRecordCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
John Zulauf355e49b2020-04-24 15:11:15 -06003933 RecordCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003934}
3935
Mike Schuchardt2df08912020-12-15 16:28:09 -08003936bool SyncValidator::ValidateCmdEndRenderPass(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo,
John Zulauf355e49b2020-04-24 15:11:15 -06003937 const char *func_name) const {
3938 bool skip = false;
3939
3940 auto cb_context = GetAccessContext(commandBuffer);
3941 assert(cb_context);
3942 auto cb_state = cb_context->GetCommandBufferState();
3943 if (!cb_state) return skip;
3944
3945 auto rp_state = cb_state->activeRenderPass;
3946 if (!rp_state) return skip;
3947
3948 skip |= cb_context->ValidateEndRenderpass(func_name);
3949 return skip;
3950}
3951
3952bool SyncValidator::PreCallValidateCmdEndRenderPass(VkCommandBuffer commandBuffer) const {
3953 bool skip = StateTracker::PreCallValidateCmdEndRenderPass(commandBuffer);
3954 skip |= ValidateCmdEndRenderPass(commandBuffer, nullptr, "vkEndRenderPass");
3955 return skip;
3956}
3957
Mike Schuchardt2df08912020-12-15 16:28:09 -08003958bool SyncValidator::PreCallValidateCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003959 bool skip = StateTracker::PreCallValidateCmdEndRenderPass2(commandBuffer, pSubpassEndInfo);
3960 skip |= ValidateCmdEndRenderPass(commandBuffer, pSubpassEndInfo, "vkEndRenderPass2");
3961 return skip;
3962}
3963
3964bool SyncValidator::PreCallValidateCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer,
Mike Schuchardt2df08912020-12-15 16:28:09 -08003965 const VkSubpassEndInfo *pSubpassEndInfo) const {
John Zulauf355e49b2020-04-24 15:11:15 -06003966 bool skip = StateTracker::PreCallValidateCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo);
3967 skip |= ValidateCmdEndRenderPass(commandBuffer, pSubpassEndInfo, "vkEndRenderPass2KHR");
3968 return skip;
3969}
3970
3971void SyncValidator::RecordCmdEndRenderPass(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo,
3972 CMD_TYPE command) {
John Zulaufe5da6e52020-03-18 15:32:18 -06003973 // Resolve the all subpass contexts to the command buffer contexts
3974 auto cb_context = GetAccessContext(commandBuffer);
3975 assert(cb_context);
3976 auto cb_state = cb_context->GetCommandBufferState();
3977 if (!cb_state) return;
3978
locke-lunargaecf2152020-05-12 17:15:41 -06003979 const auto *rp_state = cb_state->activeRenderPass.get();
John Zulaufe5da6e52020-03-18 15:32:18 -06003980 if (!rp_state) return;
3981
John Zulauffaea0ee2021-01-14 14:01:32 -07003982 cb_context->RecordEndRenderPass(*rp_state, command);
John Zulaufe5da6e52020-03-18 15:32:18 -06003983}
John Zulauf3d84f1b2020-03-09 13:33:25 -06003984
John Zulauf33fc1d52020-07-17 11:01:10 -06003985// Simple heuristic rule to detect WAW operations representing algorithmically safe or increment
3986// updates to a resource which do not conflict at the byte level.
3987// TODO: Revisit this rule to see if it needs to be tighter or looser
3988// TODO: Add programatic control over suppression heuristics
3989bool SyncValidator::SupressedBoundDescriptorWAW(const HazardResult &hazard) const {
3990 return (hazard.hazard == WRITE_AFTER_WRITE) && (FlagBit(hazard.usage_index) == hazard.prior_access);
3991}
3992
John Zulauf3d84f1b2020-03-09 13:33:25 -06003993void SyncValidator::PostCallRecordCmdEndRenderPass(VkCommandBuffer commandBuffer) {
John Zulauf355e49b2020-04-24 15:11:15 -06003994 RecordCmdEndRenderPass(commandBuffer, nullptr, CMD_ENDRENDERPASS);
John Zulauf5a1a5382020-06-22 17:23:25 -06003995 StateTracker::PostCallRecordCmdEndRenderPass(commandBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -06003996}
3997
3998void SyncValidator::PostCallRecordCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
John Zulauf355e49b2020-04-24 15:11:15 -06003999 RecordCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2);
John Zulauf5a1a5382020-06-22 17:23:25 -06004000 StateTracker::PostCallRecordCmdEndRenderPass2(commandBuffer, pSubpassEndInfo);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004001}
4002
4003void SyncValidator::PostCallRecordCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
John Zulauf355e49b2020-04-24 15:11:15 -06004004 RecordCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2);
John Zulauf5a1a5382020-06-22 17:23:25 -06004005 StateTracker::PostCallRecordCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo);
John Zulauf3d84f1b2020-03-09 13:33:25 -06004006}
locke-lunarga19c71d2020-03-02 18:17:04 -07004007
Jeff Leger178b1e52020-10-05 12:22:23 -04004008template <typename BufferImageCopyRegionType>
4009bool SyncValidator::ValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
4010 VkImageLayout dstImageLayout, uint32_t regionCount,
4011 const BufferImageCopyRegionType *pRegions, CopyCommandVersion version) const {
locke-lunarga19c71d2020-03-02 18:17:04 -07004012 bool skip = false;
4013 const auto *cb_access_context = GetAccessContext(commandBuffer);
4014 assert(cb_access_context);
4015 if (!cb_access_context) return skip;
4016
Jeff Leger178b1e52020-10-05 12:22:23 -04004017 const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
4018 const char *func_name = is_2khr ? "vkCmdCopyBufferToImage2KHR()" : "vkCmdCopyBufferToImage()";
4019
locke-lunarga19c71d2020-03-02 18:17:04 -07004020 const auto *context = cb_access_context->GetCurrentAccessContext();
4021 assert(context);
4022 if (!context) return skip;
4023
4024 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
locke-lunarga19c71d2020-03-02 18:17:04 -07004025 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
4026
4027 for (uint32_t region = 0; region < regionCount; region++) {
4028 const auto &copy_region = pRegions[region];
John Zulauf477700e2021-01-06 11:41:49 -07004029 HazardResult hazard;
locke-lunarga19c71d2020-03-02 18:17:04 -07004030 if (dst_image) {
John Zulauf477700e2021-01-06 11:41:49 -07004031 if (src_buffer) {
4032 ResourceAccessRange src_range =
4033 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, dst_image->createInfo.format));
4034 hazard = context->DetectHazard(*src_buffer, SYNC_TRANSFER_TRANSFER_READ, src_range);
4035 if (hazard.hazard) {
4036 // PHASE1 TODO -- add tag information to log msg when useful.
4037 skip |= LogError(srcBuffer, string_SyncHazardVUID(hazard.hazard),
4038 "%s: Hazard %s for srcBuffer %s, region %" PRIu32 ". Access info %s.", func_name,
4039 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcBuffer).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07004040 cb_access_context->FormatUsage(hazard).c_str());
John Zulauf477700e2021-01-06 11:41:49 -07004041 }
4042 }
4043
4044 hazard = context->DetectHazard(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, copy_region.imageSubresource,
4045 copy_region.imageOffset, copy_region.imageExtent);
locke-lunarga19c71d2020-03-02 18:17:04 -07004046 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06004047 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
Jeff Leger178b1e52020-10-05 12:22:23 -04004048 "%s: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.", func_name,
John Zulauf1dae9192020-06-16 15:46:44 -06004049 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07004050 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07004051 }
4052 if (skip) break;
4053 }
4054 if (skip) break;
4055 }
4056 return skip;
4057}
4058
Jeff Leger178b1e52020-10-05 12:22:23 -04004059bool SyncValidator::PreCallValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
4060 VkImageLayout dstImageLayout, uint32_t regionCount,
4061 const VkBufferImageCopy *pRegions) const {
4062 return ValidateCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions,
4063 COPY_COMMAND_VERSION_1);
4064}
4065
4066bool SyncValidator::PreCallValidateCmdCopyBufferToImage2KHR(VkCommandBuffer commandBuffer,
4067 const VkCopyBufferToImageInfo2KHR *pCopyBufferToImageInfo) const {
4068 return ValidateCmdCopyBufferToImage(commandBuffer, pCopyBufferToImageInfo->srcBuffer, pCopyBufferToImageInfo->dstImage,
4069 pCopyBufferToImageInfo->dstImageLayout, pCopyBufferToImageInfo->regionCount,
4070 pCopyBufferToImageInfo->pRegions, COPY_COMMAND_VERSION_2);
4071}
4072
4073template <typename BufferImageCopyRegionType>
4074void SyncValidator::RecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
4075 VkImageLayout dstImageLayout, uint32_t regionCount,
4076 const BufferImageCopyRegionType *pRegions, CopyCommandVersion version) {
locke-lunarga19c71d2020-03-02 18:17:04 -07004077 auto *cb_access_context = GetAccessContext(commandBuffer);
4078 assert(cb_access_context);
Jeff Leger178b1e52020-10-05 12:22:23 -04004079
4080 const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
4081 const CMD_TYPE cmd_type = is_2khr ? CMD_COPYBUFFERTOIMAGE2KHR : CMD_COPYBUFFERTOIMAGE;
4082
4083 const auto tag = cb_access_context->NextCommandTag(cmd_type);
locke-lunarga19c71d2020-03-02 18:17:04 -07004084 auto *context = cb_access_context->GetCurrentAccessContext();
4085 assert(context);
4086
4087 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
John Zulauf16adfc92020-04-08 10:28:33 -06004088 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07004089
4090 for (uint32_t region = 0; region < regionCount; region++) {
4091 const auto &copy_region = pRegions[region];
locke-lunarga19c71d2020-03-02 18:17:04 -07004092 if (dst_image) {
John Zulauf477700e2021-01-06 11:41:49 -07004093 if (src_buffer) {
4094 ResourceAccessRange src_range =
4095 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, dst_image->createInfo.format));
4096 context->UpdateAccessState(*src_buffer, SYNC_TRANSFER_TRANSFER_READ, SyncOrdering::kNonAttachment, src_range, tag);
4097 }
John Zulauf8e3c3e92021-01-06 11:19:36 -07004098 context->UpdateAccessState(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
4099 copy_region.imageSubresource, copy_region.imageOffset, copy_region.imageExtent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07004100 }
4101 }
4102}
4103
Jeff Leger178b1e52020-10-05 12:22:23 -04004104void SyncValidator::PreCallRecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
4105 VkImageLayout dstImageLayout, uint32_t regionCount,
4106 const VkBufferImageCopy *pRegions) {
4107 StateTracker::PreCallRecordCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
4108 RecordCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions, COPY_COMMAND_VERSION_1);
4109}
4110
4111void SyncValidator::PreCallRecordCmdCopyBufferToImage2KHR(VkCommandBuffer commandBuffer,
4112 const VkCopyBufferToImageInfo2KHR *pCopyBufferToImageInfo) {
4113 StateTracker::PreCallRecordCmdCopyBufferToImage2KHR(commandBuffer, pCopyBufferToImageInfo);
4114 RecordCmdCopyBufferToImage(commandBuffer, pCopyBufferToImageInfo->srcBuffer, pCopyBufferToImageInfo->dstImage,
4115 pCopyBufferToImageInfo->dstImageLayout, pCopyBufferToImageInfo->regionCount,
4116 pCopyBufferToImageInfo->pRegions, COPY_COMMAND_VERSION_2);
4117}
4118
4119template <typename BufferImageCopyRegionType>
4120bool SyncValidator::ValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4121 VkBuffer dstBuffer, uint32_t regionCount,
4122 const BufferImageCopyRegionType *pRegions, CopyCommandVersion version) const {
locke-lunarga19c71d2020-03-02 18:17:04 -07004123 bool skip = false;
4124 const auto *cb_access_context = GetAccessContext(commandBuffer);
4125 assert(cb_access_context);
4126 if (!cb_access_context) return skip;
4127
Jeff Leger178b1e52020-10-05 12:22:23 -04004128 const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
4129 const char *func_name = is_2khr ? "vkCmdCopyImageToBuffer2KHR()" : "vkCmdCopyImageToBuffer()";
4130
locke-lunarga19c71d2020-03-02 18:17:04 -07004131 const auto *context = cb_access_context->GetCurrentAccessContext();
4132 assert(context);
4133 if (!context) return skip;
4134
4135 const auto *src_image = Get<IMAGE_STATE>(srcImage);
4136 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
4137 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
4138 for (uint32_t region = 0; region < regionCount; region++) {
4139 const auto &copy_region = pRegions[region];
4140 if (src_image) {
John Zulauf540266b2020-04-06 18:54:53 -06004141 auto hazard = context->DetectHazard(*src_image, SYNC_TRANSFER_TRANSFER_READ, copy_region.imageSubresource,
locke-lunarga19c71d2020-03-02 18:17:04 -07004142 copy_region.imageOffset, copy_region.imageExtent);
4143 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06004144 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
Jeff Leger178b1e52020-10-05 12:22:23 -04004145 "%s: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.", func_name,
John Zulauf1dae9192020-06-16 15:46:44 -06004146 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07004147 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07004148 }
John Zulauf477700e2021-01-06 11:41:49 -07004149 if (dst_mem) {
4150 ResourceAccessRange dst_range =
4151 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, src_image->createInfo.format));
4152 hazard = context->DetectHazard(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, dst_range);
4153 if (hazard.hazard) {
4154 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
4155 "%s: Hazard %s for dstBuffer %s, region %" PRIu32 ". Access info %s.", func_name,
4156 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstBuffer).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07004157 cb_access_context->FormatUsage(hazard).c_str());
John Zulauf477700e2021-01-06 11:41:49 -07004158 }
locke-lunarga19c71d2020-03-02 18:17:04 -07004159 }
4160 }
4161 if (skip) break;
4162 }
4163 return skip;
4164}
4165
Jeff Leger178b1e52020-10-05 12:22:23 -04004166bool SyncValidator::PreCallValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
4167 VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount,
4168 const VkBufferImageCopy *pRegions) const {
4169 return ValidateCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions,
4170 COPY_COMMAND_VERSION_1);
4171}
4172
4173bool SyncValidator::PreCallValidateCmdCopyImageToBuffer2KHR(VkCommandBuffer commandBuffer,
4174 const VkCopyImageToBufferInfo2KHR *pCopyImageToBufferInfo) const {
4175 return ValidateCmdCopyImageToBuffer(commandBuffer, pCopyImageToBufferInfo->srcImage, pCopyImageToBufferInfo->srcImageLayout,
4176 pCopyImageToBufferInfo->dstBuffer, pCopyImageToBufferInfo->regionCount,
4177 pCopyImageToBufferInfo->pRegions, COPY_COMMAND_VERSION_2);
4178}
4179
4180template <typename BufferImageCopyRegionType>
4181void SyncValidator::RecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4182 VkBuffer dstBuffer, uint32_t regionCount, const BufferImageCopyRegionType *pRegions,
4183 CopyCommandVersion version) {
locke-lunarga19c71d2020-03-02 18:17:04 -07004184 auto *cb_access_context = GetAccessContext(commandBuffer);
4185 assert(cb_access_context);
Jeff Leger178b1e52020-10-05 12:22:23 -04004186
4187 const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
4188 const CMD_TYPE cmd_type = is_2khr ? CMD_COPYIMAGETOBUFFER2KHR : CMD_COPYIMAGETOBUFFER;
4189
4190 const auto tag = cb_access_context->NextCommandTag(cmd_type);
locke-lunarga19c71d2020-03-02 18:17:04 -07004191 auto *context = cb_access_context->GetCurrentAccessContext();
4192 assert(context);
4193
4194 const auto *src_image = Get<IMAGE_STATE>(srcImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07004195 auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
4196 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
John Zulauf5f13a792020-03-10 07:31:21 -06004197 const VulkanTypedHandle dst_handle(dst_mem, kVulkanObjectTypeDeviceMemory);
locke-lunarga19c71d2020-03-02 18:17:04 -07004198
4199 for (uint32_t region = 0; region < regionCount; region++) {
4200 const auto &copy_region = pRegions[region];
4201 if (src_image) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07004202 context->UpdateAccessState(*src_image, SYNC_TRANSFER_TRANSFER_READ, SyncOrdering::kNonAttachment,
4203 copy_region.imageSubresource, copy_region.imageOffset, copy_region.imageExtent, tag);
John Zulauf477700e2021-01-06 11:41:49 -07004204 if (dst_buffer) {
4205 ResourceAccessRange dst_range =
4206 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, src_image->createInfo.format));
4207 context->UpdateAccessState(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment, dst_range, tag);
4208 }
locke-lunarga19c71d2020-03-02 18:17:04 -07004209 }
4210 }
4211}
4212
Jeff Leger178b1e52020-10-05 12:22:23 -04004213void SyncValidator::PreCallRecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4214 VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
4215 StateTracker::PreCallRecordCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
4216 RecordCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions, COPY_COMMAND_VERSION_1);
4217}
4218
4219void SyncValidator::PreCallRecordCmdCopyImageToBuffer2KHR(VkCommandBuffer commandBuffer,
4220 const VkCopyImageToBufferInfo2KHR *pCopyImageToBufferInfo) {
4221 StateTracker::PreCallRecordCmdCopyImageToBuffer2KHR(commandBuffer, pCopyImageToBufferInfo);
4222 RecordCmdCopyImageToBuffer(commandBuffer, pCopyImageToBufferInfo->srcImage, pCopyImageToBufferInfo->srcImageLayout,
4223 pCopyImageToBufferInfo->dstBuffer, pCopyImageToBufferInfo->regionCount,
4224 pCopyImageToBufferInfo->pRegions, COPY_COMMAND_VERSION_2);
4225}
4226
4227template <typename RegionType>
4228bool SyncValidator::ValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4229 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
4230 const RegionType *pRegions, VkFilter filter, const char *apiName) const {
locke-lunarga19c71d2020-03-02 18:17:04 -07004231 bool skip = false;
4232 const auto *cb_access_context = GetAccessContext(commandBuffer);
4233 assert(cb_access_context);
4234 if (!cb_access_context) return skip;
4235
4236 const auto *context = cb_access_context->GetCurrentAccessContext();
4237 assert(context);
4238 if (!context) return skip;
4239
4240 const auto *src_image = Get<IMAGE_STATE>(srcImage);
4241 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
4242
4243 for (uint32_t region = 0; region < regionCount; region++) {
4244 const auto &blit_region = pRegions[region];
4245 if (src_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06004246 VkOffset3D offset = {std::min(blit_region.srcOffsets[0].x, blit_region.srcOffsets[1].x),
4247 std::min(blit_region.srcOffsets[0].y, blit_region.srcOffsets[1].y),
4248 std::min(blit_region.srcOffsets[0].z, blit_region.srcOffsets[1].z)};
4249 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.srcOffsets[1].x - blit_region.srcOffsets[0].x)),
4250 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].y - blit_region.srcOffsets[0].y)),
4251 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].z - blit_region.srcOffsets[0].z))};
4252 auto hazard =
4253 context->DetectHazard(*src_image, SYNC_TRANSFER_TRANSFER_READ, blit_region.srcSubresource, offset, extent);
locke-lunarga19c71d2020-03-02 18:17:04 -07004254 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06004255 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
Jeff Leger178b1e52020-10-05 12:22:23 -04004256 "%s: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.", apiName,
John Zulauf1dae9192020-06-16 15:46:44 -06004257 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07004258 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07004259 }
4260 }
4261
4262 if (dst_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06004263 VkOffset3D offset = {std::min(blit_region.dstOffsets[0].x, blit_region.dstOffsets[1].x),
4264 std::min(blit_region.dstOffsets[0].y, blit_region.dstOffsets[1].y),
4265 std::min(blit_region.dstOffsets[0].z, blit_region.dstOffsets[1].z)};
4266 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.dstOffsets[1].x - blit_region.dstOffsets[0].x)),
4267 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].y - blit_region.dstOffsets[0].y)),
4268 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].z - blit_region.dstOffsets[0].z))};
4269 auto hazard =
4270 context->DetectHazard(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, blit_region.dstSubresource, offset, extent);
locke-lunarga19c71d2020-03-02 18:17:04 -07004271 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06004272 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
Jeff Leger178b1e52020-10-05 12:22:23 -04004273 "%s: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.", apiName,
John Zulauf1dae9192020-06-16 15:46:44 -06004274 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07004275 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarga19c71d2020-03-02 18:17:04 -07004276 }
4277 if (skip) break;
4278 }
4279 }
4280
4281 return skip;
4282}
4283
Jeff Leger178b1e52020-10-05 12:22:23 -04004284bool SyncValidator::PreCallValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4285 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
4286 const VkImageBlit *pRegions, VkFilter filter) const {
4287 return ValidateCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter,
4288 "vkCmdBlitImage");
4289}
4290
4291bool SyncValidator::PreCallValidateCmdBlitImage2KHR(VkCommandBuffer commandBuffer,
4292 const VkBlitImageInfo2KHR *pBlitImageInfo) const {
4293 return ValidateCmdBlitImage(commandBuffer, pBlitImageInfo->srcImage, pBlitImageInfo->srcImageLayout, pBlitImageInfo->dstImage,
4294 pBlitImageInfo->dstImageLayout, pBlitImageInfo->regionCount, pBlitImageInfo->pRegions,
4295 pBlitImageInfo->filter, "vkCmdBlitImage2KHR");
4296}
4297
4298template <typename RegionType>
4299void SyncValidator::RecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4300 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
4301 const RegionType *pRegions, VkFilter filter, ResourceUsageTag tag) {
locke-lunarga19c71d2020-03-02 18:17:04 -07004302 auto *cb_access_context = GetAccessContext(commandBuffer);
4303 assert(cb_access_context);
4304 auto *context = cb_access_context->GetCurrentAccessContext();
4305 assert(context);
4306
4307 auto *src_image = Get<IMAGE_STATE>(srcImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07004308 auto *dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07004309
4310 for (uint32_t region = 0; region < regionCount; region++) {
4311 const auto &blit_region = pRegions[region];
4312 if (src_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06004313 VkOffset3D offset = {std::min(blit_region.srcOffsets[0].x, blit_region.srcOffsets[1].x),
4314 std::min(blit_region.srcOffsets[0].y, blit_region.srcOffsets[1].y),
4315 std::min(blit_region.srcOffsets[0].z, blit_region.srcOffsets[1].z)};
4316 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.srcOffsets[1].x - blit_region.srcOffsets[0].x)),
4317 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].y - blit_region.srcOffsets[0].y)),
4318 static_cast<uint32_t>(abs(blit_region.srcOffsets[1].z - blit_region.srcOffsets[0].z))};
John Zulauf8e3c3e92021-01-06 11:19:36 -07004319 context->UpdateAccessState(*src_image, SYNC_TRANSFER_TRANSFER_READ, SyncOrdering::kNonAttachment,
4320 blit_region.srcSubresource, offset, extent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07004321 }
4322 if (dst_image) {
locke-lunarg8f93acc2020-06-18 21:26:46 -06004323 VkOffset3D offset = {std::min(blit_region.dstOffsets[0].x, blit_region.dstOffsets[1].x),
4324 std::min(blit_region.dstOffsets[0].y, blit_region.dstOffsets[1].y),
4325 std::min(blit_region.dstOffsets[0].z, blit_region.dstOffsets[1].z)};
4326 VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.dstOffsets[1].x - blit_region.dstOffsets[0].x)),
4327 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].y - blit_region.dstOffsets[0].y)),
4328 static_cast<uint32_t>(abs(blit_region.dstOffsets[1].z - blit_region.dstOffsets[0].z))};
John Zulauf8e3c3e92021-01-06 11:19:36 -07004329 context->UpdateAccessState(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
4330 blit_region.dstSubresource, offset, extent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07004331 }
4332 }
4333}
locke-lunarg36ba2592020-04-03 09:42:04 -06004334
Jeff Leger178b1e52020-10-05 12:22:23 -04004335void SyncValidator::PreCallRecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4336 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
4337 const VkImageBlit *pRegions, VkFilter filter) {
4338 auto *cb_access_context = GetAccessContext(commandBuffer);
4339 assert(cb_access_context);
4340 const auto tag = cb_access_context->NextCommandTag(CMD_BLITIMAGE);
4341 StateTracker::PreCallRecordCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
4342 pRegions, filter);
4343 RecordCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter, tag);
4344}
4345
4346void SyncValidator::PreCallRecordCmdBlitImage2KHR(VkCommandBuffer commandBuffer, const VkBlitImageInfo2KHR *pBlitImageInfo) {
4347 StateTracker::PreCallRecordCmdBlitImage2KHR(commandBuffer, pBlitImageInfo);
4348 auto *cb_access_context = GetAccessContext(commandBuffer);
4349 assert(cb_access_context);
4350 const auto tag = cb_access_context->NextCommandTag(CMD_BLITIMAGE2KHR);
4351 RecordCmdBlitImage(commandBuffer, pBlitImageInfo->srcImage, pBlitImageInfo->srcImageLayout, pBlitImageInfo->dstImage,
4352 pBlitImageInfo->dstImageLayout, pBlitImageInfo->regionCount, pBlitImageInfo->pRegions,
4353 pBlitImageInfo->filter, tag);
4354}
4355
John Zulauffaea0ee2021-01-14 14:01:32 -07004356bool SyncValidator::ValidateIndirectBuffer(const CommandBufferAccessContext &cb_context, const AccessContext &context,
4357 VkCommandBuffer commandBuffer, const VkDeviceSize struct_size, const VkBuffer buffer,
4358 const VkDeviceSize offset, const uint32_t drawCount, const uint32_t stride,
4359 const char *function) const {
locke-lunargff255f92020-05-13 18:53:52 -06004360 bool skip = false;
4361 if (drawCount == 0) return skip;
4362
4363 const auto *buf_state = Get<BUFFER_STATE>(buffer);
4364 VkDeviceSize size = struct_size;
4365 if (drawCount == 1 || stride == size) {
4366 if (drawCount > 1) size *= drawCount;
John Zulauf3e86bf02020-09-12 10:47:57 -06004367 const ResourceAccessRange range = MakeRange(offset, size);
locke-lunargff255f92020-05-13 18:53:52 -06004368 auto hazard = context.DetectHazard(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, range);
4369 if (hazard.hazard) {
John Zulauf1dae9192020-06-16 15:46:44 -06004370 skip |= LogError(buf_state->buffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004371 "%s: Hazard %s for indirect %s in %s. Access info %s.", function, string_SyncHazard(hazard.hazard),
John Zulauf1dae9192020-06-16 15:46:44 -06004372 report_data->FormatHandle(buffer).c_str(), report_data->FormatHandle(commandBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07004373 cb_context.FormatUsage(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06004374 }
4375 } else {
4376 for (uint32_t i = 0; i < drawCount; ++i) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004377 const ResourceAccessRange range = MakeRange(offset + i * stride, size);
locke-lunargff255f92020-05-13 18:53:52 -06004378 auto hazard = context.DetectHazard(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, range);
4379 if (hazard.hazard) {
John Zulauf1dae9192020-06-16 15:46:44 -06004380 skip |= LogError(buf_state->buffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004381 "%s: Hazard %s for indirect %s in %s. Access info %s.", function, string_SyncHazard(hazard.hazard),
4382 report_data->FormatHandle(buffer).c_str(), report_data->FormatHandle(commandBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07004383 cb_context.FormatUsage(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06004384 break;
4385 }
4386 }
4387 }
4388 return skip;
4389}
4390
locke-lunarg61870c22020-06-09 14:51:50 -06004391void SyncValidator::RecordIndirectBuffer(AccessContext &context, const ResourceUsageTag &tag, const VkDeviceSize struct_size,
4392 const VkBuffer buffer, const VkDeviceSize offset, const uint32_t drawCount,
4393 uint32_t stride) {
locke-lunargff255f92020-05-13 18:53:52 -06004394 const auto *buf_state = Get<BUFFER_STATE>(buffer);
4395 VkDeviceSize size = struct_size;
4396 if (drawCount == 1 || stride == size) {
4397 if (drawCount > 1) size *= drawCount;
John Zulauf3e86bf02020-09-12 10:47:57 -06004398 const ResourceAccessRange range = MakeRange(offset, size);
John Zulauf8e3c3e92021-01-06 11:19:36 -07004399 context.UpdateAccessState(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, SyncOrdering::kNonAttachment, range, tag);
locke-lunargff255f92020-05-13 18:53:52 -06004400 } else {
4401 for (uint32_t i = 0; i < drawCount; ++i) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004402 const ResourceAccessRange range = MakeRange(offset + i * stride, size);
John Zulauf8e3c3e92021-01-06 11:19:36 -07004403 context.UpdateAccessState(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, SyncOrdering::kNonAttachment, range,
4404 tag);
locke-lunargff255f92020-05-13 18:53:52 -06004405 }
4406 }
4407}
4408
John Zulauffaea0ee2021-01-14 14:01:32 -07004409bool SyncValidator::ValidateCountBuffer(const CommandBufferAccessContext &cb_context, const AccessContext &context,
4410 VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4411 const char *function) const {
locke-lunargff255f92020-05-13 18:53:52 -06004412 bool skip = false;
4413
4414 const auto *count_buf_state = Get<BUFFER_STATE>(buffer);
John Zulauf3e86bf02020-09-12 10:47:57 -06004415 const ResourceAccessRange range = MakeRange(offset, 4);
locke-lunargff255f92020-05-13 18:53:52 -06004416 auto hazard = context.DetectHazard(*count_buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, range);
4417 if (hazard.hazard) {
John Zulauf1dae9192020-06-16 15:46:44 -06004418 skip |= LogError(count_buf_state->buffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004419 "%s: Hazard %s for countBuffer %s in %s. Access info %s.", function, string_SyncHazard(hazard.hazard),
John Zulauf1dae9192020-06-16 15:46:44 -06004420 report_data->FormatHandle(buffer).c_str(), report_data->FormatHandle(commandBuffer).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07004421 cb_context.FormatUsage(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06004422 }
4423 return skip;
4424}
4425
locke-lunarg61870c22020-06-09 14:51:50 -06004426void SyncValidator::RecordCountBuffer(AccessContext &context, const ResourceUsageTag &tag, VkBuffer buffer, VkDeviceSize offset) {
locke-lunargff255f92020-05-13 18:53:52 -06004427 const auto *count_buf_state = Get<BUFFER_STATE>(buffer);
John Zulauf3e86bf02020-09-12 10:47:57 -06004428 const ResourceAccessRange range = MakeRange(offset, 4);
John Zulauf8e3c3e92021-01-06 11:19:36 -07004429 context.UpdateAccessState(*count_buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, SyncOrdering::kNonAttachment, range, tag);
locke-lunargff255f92020-05-13 18:53:52 -06004430}
4431
locke-lunarg36ba2592020-04-03 09:42:04 -06004432bool SyncValidator::PreCallValidateCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) const {
locke-lunargff255f92020-05-13 18:53:52 -06004433 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004434 const auto *cb_access_context = GetAccessContext(commandBuffer);
4435 assert(cb_access_context);
4436 if (!cb_access_context) return skip;
4437
locke-lunarg61870c22020-06-09 14:51:50 -06004438 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, "vkCmdDispatch");
locke-lunargff255f92020-05-13 18:53:52 -06004439 return skip;
locke-lunarg36ba2592020-04-03 09:42:04 -06004440}
4441
4442void SyncValidator::PreCallRecordCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004443 StateTracker::PreCallRecordCmdDispatch(commandBuffer, x, y, z);
locke-lunargff255f92020-05-13 18:53:52 -06004444 auto *cb_access_context = GetAccessContext(commandBuffer);
4445 assert(cb_access_context);
4446 const auto tag = cb_access_context->NextCommandTag(CMD_DISPATCH);
locke-lunargff255f92020-05-13 18:53:52 -06004447
locke-lunarg61870c22020-06-09 14:51:50 -06004448 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, tag);
locke-lunarg36ba2592020-04-03 09:42:04 -06004449}
locke-lunarge1a67022020-04-29 00:15:36 -06004450
4451bool SyncValidator::PreCallValidateCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) const {
locke-lunargff255f92020-05-13 18:53:52 -06004452 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004453 const auto *cb_access_context = GetAccessContext(commandBuffer);
4454 assert(cb_access_context);
4455 if (!cb_access_context) return skip;
4456
4457 const auto *context = cb_access_context->GetCurrentAccessContext();
4458 assert(context);
4459 if (!context) return skip;
4460
locke-lunarg61870c22020-06-09 14:51:50 -06004461 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, "vkCmdDispatchIndirect");
John Zulauffaea0ee2021-01-14 14:01:32 -07004462 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDispatchIndirectCommand), buffer, offset,
4463 1, sizeof(VkDispatchIndirectCommand), "vkCmdDispatchIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004464 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004465}
4466
4467void SyncValidator::PreCallRecordCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004468 StateTracker::PreCallRecordCmdDispatchIndirect(commandBuffer, buffer, offset);
locke-lunargff255f92020-05-13 18:53:52 -06004469 auto *cb_access_context = GetAccessContext(commandBuffer);
4470 assert(cb_access_context);
4471 const auto tag = cb_access_context->NextCommandTag(CMD_DISPATCHINDIRECT);
4472 auto *context = cb_access_context->GetCurrentAccessContext();
4473 assert(context);
4474
locke-lunarg61870c22020-06-09 14:51:50 -06004475 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, tag);
4476 RecordIndirectBuffer(*context, tag, sizeof(VkDispatchIndirectCommand), buffer, offset, 1, sizeof(VkDispatchIndirectCommand));
locke-lunarge1a67022020-04-29 00:15:36 -06004477}
4478
4479bool SyncValidator::PreCallValidateCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
4480 uint32_t firstVertex, uint32_t firstInstance) const {
locke-lunarga4d39ea2020-05-22 14:17:29 -06004481 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004482 const auto *cb_access_context = GetAccessContext(commandBuffer);
4483 assert(cb_access_context);
4484 if (!cb_access_context) return skip;
4485
locke-lunarg61870c22020-06-09 14:51:50 -06004486 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDraw");
4487 skip |= cb_access_context->ValidateDrawVertex(vertexCount, firstVertex, "vkCmdDraw");
4488 skip |= cb_access_context->ValidateDrawSubpassAttachment("vkCmdDraw");
locke-lunarga4d39ea2020-05-22 14:17:29 -06004489 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004490}
4491
4492void SyncValidator::PreCallRecordCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
4493 uint32_t firstVertex, uint32_t firstInstance) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004494 StateTracker::PreCallRecordCmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
locke-lunargff255f92020-05-13 18:53:52 -06004495 auto *cb_access_context = GetAccessContext(commandBuffer);
4496 assert(cb_access_context);
4497 const auto tag = cb_access_context->NextCommandTag(CMD_DRAW);
locke-lunargff255f92020-05-13 18:53:52 -06004498
locke-lunarg61870c22020-06-09 14:51:50 -06004499 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4500 cb_access_context->RecordDrawVertex(vertexCount, firstVertex, tag);
4501 cb_access_context->RecordDrawSubpassAttachment(tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004502}
4503
4504bool SyncValidator::PreCallValidateCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
4505 uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) const {
locke-lunarga4d39ea2020-05-22 14:17:29 -06004506 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004507 const auto *cb_access_context = GetAccessContext(commandBuffer);
4508 assert(cb_access_context);
4509 if (!cb_access_context) return skip;
4510
locke-lunarg61870c22020-06-09 14:51:50 -06004511 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndexed");
4512 skip |= cb_access_context->ValidateDrawVertexIndex(indexCount, firstIndex, "vkCmdDrawIndexed");
4513 skip |= cb_access_context->ValidateDrawSubpassAttachment("vkCmdDrawIndexed");
locke-lunarga4d39ea2020-05-22 14:17:29 -06004514 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004515}
4516
4517void SyncValidator::PreCallRecordCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
4518 uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004519 StateTracker::PreCallRecordCmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
locke-lunargff255f92020-05-13 18:53:52 -06004520 auto *cb_access_context = GetAccessContext(commandBuffer);
4521 assert(cb_access_context);
4522 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDEXED);
locke-lunargff255f92020-05-13 18:53:52 -06004523
locke-lunarg61870c22020-06-09 14:51:50 -06004524 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4525 cb_access_context->RecordDrawVertexIndex(indexCount, firstIndex, tag);
4526 cb_access_context->RecordDrawSubpassAttachment(tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004527}
4528
4529bool SyncValidator::PreCallValidateCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4530 uint32_t drawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004531 bool skip = false;
4532 if (drawCount == 0) return skip;
4533
locke-lunargff255f92020-05-13 18:53:52 -06004534 const auto *cb_access_context = GetAccessContext(commandBuffer);
4535 assert(cb_access_context);
4536 if (!cb_access_context) return skip;
4537
4538 const auto *context = cb_access_context->GetCurrentAccessContext();
4539 assert(context);
4540 if (!context) return skip;
4541
locke-lunarg61870c22020-06-09 14:51:50 -06004542 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndirect");
4543 skip |= cb_access_context->ValidateDrawSubpassAttachment("vkCmdDrawIndirect");
John Zulauffaea0ee2021-01-14 14:01:32 -07004544 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndirectCommand), buffer, offset,
4545 drawCount, stride, "vkCmdDrawIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004546
4547 // TODO: For now, we validate the whole vertex buffer. It might cause some false positive.
4548 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
4549 // We will validate the vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004550 skip |= cb_access_context->ValidateDrawVertex(UINT32_MAX, 0, "vkCmdDrawIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004551 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004552}
4553
4554void SyncValidator::PreCallRecordCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4555 uint32_t drawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004556 StateTracker::PreCallRecordCmdDrawIndirect(commandBuffer, buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06004557 if (drawCount == 0) return;
locke-lunargff255f92020-05-13 18:53:52 -06004558 auto *cb_access_context = GetAccessContext(commandBuffer);
4559 assert(cb_access_context);
4560 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDIRECT);
4561 auto *context = cb_access_context->GetCurrentAccessContext();
4562 assert(context);
4563
locke-lunarg61870c22020-06-09 14:51:50 -06004564 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4565 cb_access_context->RecordDrawSubpassAttachment(tag);
4566 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndirectCommand), buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06004567
4568 // TODO: For now, we record the whole vertex buffer. It might cause some false positive.
4569 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
4570 // We will record the vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004571 cb_access_context->RecordDrawVertex(UINT32_MAX, 0, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004572}
4573
4574bool SyncValidator::PreCallValidateCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4575 uint32_t drawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004576 bool skip = false;
4577 if (drawCount == 0) return skip;
locke-lunargff255f92020-05-13 18:53:52 -06004578 const auto *cb_access_context = GetAccessContext(commandBuffer);
4579 assert(cb_access_context);
4580 if (!cb_access_context) return skip;
4581
4582 const auto *context = cb_access_context->GetCurrentAccessContext();
4583 assert(context);
4584 if (!context) return skip;
4585
locke-lunarg61870c22020-06-09 14:51:50 -06004586 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndexedIndirect");
4587 skip |= cb_access_context->ValidateDrawSubpassAttachment("vkCmdDrawIndexedIndirect");
John Zulauffaea0ee2021-01-14 14:01:32 -07004588 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndexedIndirectCommand), buffer,
4589 offset, drawCount, stride, "vkCmdDrawIndexedIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004590
4591 // TODO: For now, we validate the whole index and vertex buffer. It might cause some false positive.
4592 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
4593 // We will validate the index and vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004594 skip |= cb_access_context->ValidateDrawVertexIndex(UINT32_MAX, 0, "vkCmdDrawIndexedIndirect");
locke-lunargff255f92020-05-13 18:53:52 -06004595 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004596}
4597
4598void SyncValidator::PreCallRecordCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4599 uint32_t drawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004600 StateTracker::PreCallRecordCmdDrawIndexedIndirect(commandBuffer, buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06004601 auto *cb_access_context = GetAccessContext(commandBuffer);
4602 assert(cb_access_context);
4603 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDEXEDINDIRECT);
4604 auto *context = cb_access_context->GetCurrentAccessContext();
4605 assert(context);
4606
locke-lunarg61870c22020-06-09 14:51:50 -06004607 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4608 cb_access_context->RecordDrawSubpassAttachment(tag);
4609 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndexedIndirectCommand), buffer, offset, drawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06004610
4611 // TODO: For now, we record the whole index and vertex buffer. It might cause some false positive.
4612 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
4613 // We will record the index and vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004614 cb_access_context->RecordDrawVertexIndex(UINT32_MAX, 0, tag);
locke-lunargff255f92020-05-13 18:53:52 -06004615}
4616
4617bool SyncValidator::ValidateCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4618 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4619 uint32_t stride, const char *function) const {
4620 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004621 const auto *cb_access_context = GetAccessContext(commandBuffer);
4622 assert(cb_access_context);
4623 if (!cb_access_context) return skip;
4624
4625 const auto *context = cb_access_context->GetCurrentAccessContext();
4626 assert(context);
4627 if (!context) return skip;
4628
locke-lunarg61870c22020-06-09 14:51:50 -06004629 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, function);
4630 skip |= cb_access_context->ValidateDrawSubpassAttachment(function);
John Zulauffaea0ee2021-01-14 14:01:32 -07004631 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndirectCommand), buffer, offset,
4632 maxDrawCount, stride, function);
4633 skip |= ValidateCountBuffer(*cb_access_context, *context, commandBuffer, countBuffer, countBufferOffset, function);
locke-lunargff255f92020-05-13 18:53:52 -06004634
4635 // TODO: For now, we validate the whole vertex buffer. It might cause some false positive.
4636 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
4637 // We will validate the vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004638 skip |= cb_access_context->ValidateDrawVertex(UINT32_MAX, 0, function);
locke-lunargff255f92020-05-13 18:53:52 -06004639 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004640}
4641
4642bool SyncValidator::PreCallValidateCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4643 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4644 uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004645 return ValidateCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4646 "vkCmdDrawIndirectCount");
locke-lunarge1a67022020-04-29 00:15:36 -06004647}
4648
4649void SyncValidator::PreCallRecordCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4650 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4651 uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004652 StateTracker::PreCallRecordCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount,
4653 stride);
locke-lunargff255f92020-05-13 18:53:52 -06004654 auto *cb_access_context = GetAccessContext(commandBuffer);
4655 assert(cb_access_context);
4656 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDIRECTCOUNT);
4657 auto *context = cb_access_context->GetCurrentAccessContext();
4658 assert(context);
4659
locke-lunarg61870c22020-06-09 14:51:50 -06004660 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4661 cb_access_context->RecordDrawSubpassAttachment(tag);
4662 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndirectCommand), buffer, offset, 1, stride);
4663 RecordCountBuffer(*context, tag, countBuffer, countBufferOffset);
locke-lunargff255f92020-05-13 18:53:52 -06004664
4665 // TODO: For now, we record the whole vertex buffer. It might cause some false positive.
4666 // VkDrawIndirectCommand buffer could be changed until SubmitQueue.
4667 // We will record the vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004668 cb_access_context->RecordDrawVertex(UINT32_MAX, 0, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004669}
4670
4671bool SyncValidator::PreCallValidateCmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4672 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4673 uint32_t maxDrawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004674 return ValidateCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4675 "vkCmdDrawIndirectCountKHR");
locke-lunarge1a67022020-04-29 00:15:36 -06004676}
4677
4678void SyncValidator::PreCallRecordCmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4679 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4680 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004681 StateTracker::PreCallRecordCmdDrawIndirectCountKHR(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount,
4682 stride);
locke-lunargff255f92020-05-13 18:53:52 -06004683 PreCallRecordCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride);
locke-lunarge1a67022020-04-29 00:15:36 -06004684}
4685
4686bool SyncValidator::PreCallValidateCmdDrawIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4687 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4688 uint32_t maxDrawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004689 return ValidateCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4690 "vkCmdDrawIndirectCountAMD");
locke-lunarge1a67022020-04-29 00:15:36 -06004691}
4692
4693void SyncValidator::PreCallRecordCmdDrawIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4694 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4695 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004696 StateTracker::PreCallRecordCmdDrawIndirectCountAMD(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount,
4697 stride);
locke-lunargff255f92020-05-13 18:53:52 -06004698 PreCallRecordCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride);
4699}
4700
4701bool SyncValidator::ValidateCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4702 VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4703 uint32_t stride, const char *function) const {
4704 bool skip = false;
locke-lunargff255f92020-05-13 18:53:52 -06004705 const auto *cb_access_context = GetAccessContext(commandBuffer);
4706 assert(cb_access_context);
4707 if (!cb_access_context) return skip;
4708
4709 const auto *context = cb_access_context->GetCurrentAccessContext();
4710 assert(context);
4711 if (!context) return skip;
4712
locke-lunarg61870c22020-06-09 14:51:50 -06004713 skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, function);
4714 skip |= cb_access_context->ValidateDrawSubpassAttachment(function);
John Zulauffaea0ee2021-01-14 14:01:32 -07004715 skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndexedIndirectCommand), buffer,
4716 offset, maxDrawCount, stride, function);
4717 skip |= ValidateCountBuffer(*cb_access_context, *context, commandBuffer, countBuffer, countBufferOffset, function);
locke-lunargff255f92020-05-13 18:53:52 -06004718
4719 // TODO: For now, we validate the whole index and vertex buffer. It might cause some false positive.
4720 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
4721 // We will validate the index and vertex buffer in SubmitQueue in the future.
locke-lunarg61870c22020-06-09 14:51:50 -06004722 skip |= cb_access_context->ValidateDrawVertexIndex(UINT32_MAX, 0, function);
locke-lunargff255f92020-05-13 18:53:52 -06004723 return skip;
locke-lunarge1a67022020-04-29 00:15:36 -06004724}
4725
4726bool SyncValidator::PreCallValidateCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4727 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4728 uint32_t maxDrawCount, uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004729 return ValidateCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4730 "vkCmdDrawIndexedIndirectCount");
locke-lunarge1a67022020-04-29 00:15:36 -06004731}
4732
4733void SyncValidator::PreCallRecordCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4734 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4735 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004736 StateTracker::PreCallRecordCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
4737 maxDrawCount, stride);
locke-lunargff255f92020-05-13 18:53:52 -06004738 auto *cb_access_context = GetAccessContext(commandBuffer);
4739 assert(cb_access_context);
4740 const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDEXEDINDIRECTCOUNT);
4741 auto *context = cb_access_context->GetCurrentAccessContext();
4742 assert(context);
4743
locke-lunarg61870c22020-06-09 14:51:50 -06004744 cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
4745 cb_access_context->RecordDrawSubpassAttachment(tag);
4746 RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndexedIndirectCommand), buffer, offset, 1, stride);
4747 RecordCountBuffer(*context, tag, countBuffer, countBufferOffset);
locke-lunargff255f92020-05-13 18:53:52 -06004748
4749 // TODO: For now, we record the whole index and vertex buffer. It might cause some false positive.
4750 // VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
locke-lunarg61870c22020-06-09 14:51:50 -06004751 // We will update the index and vertex buffer in SubmitQueue in the future.
4752 cb_access_context->RecordDrawVertexIndex(UINT32_MAX, 0, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004753}
4754
4755bool SyncValidator::PreCallValidateCmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer,
4756 VkDeviceSize offset, VkBuffer countBuffer,
4757 VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4758 uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004759 return ValidateCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4760 "vkCmdDrawIndexedIndirectCountKHR");
locke-lunarge1a67022020-04-29 00:15:36 -06004761}
4762
4763void SyncValidator::PreCallRecordCmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4764 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4765 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004766 StateTracker::PreCallRecordCmdDrawIndexedIndirectCountKHR(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
4767 maxDrawCount, stride);
locke-lunarge1a67022020-04-29 00:15:36 -06004768 PreCallRecordCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride);
4769}
4770
4771bool SyncValidator::PreCallValidateCmdDrawIndexedIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer,
4772 VkDeviceSize offset, VkBuffer countBuffer,
4773 VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
4774 uint32_t stride) const {
locke-lunargff255f92020-05-13 18:53:52 -06004775 return ValidateCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
4776 "vkCmdDrawIndexedIndirectCountAMD");
locke-lunarge1a67022020-04-29 00:15:36 -06004777}
4778
4779void SyncValidator::PreCallRecordCmdDrawIndexedIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
4780 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
4781 uint32_t maxDrawCount, uint32_t stride) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004782 StateTracker::PreCallRecordCmdDrawIndexedIndirectCountAMD(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
4783 maxDrawCount, stride);
locke-lunarge1a67022020-04-29 00:15:36 -06004784 PreCallRecordCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride);
4785}
4786
4787bool SyncValidator::PreCallValidateCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
4788 const VkClearColorValue *pColor, uint32_t rangeCount,
4789 const VkImageSubresourceRange *pRanges) const {
4790 bool skip = false;
4791 const auto *cb_access_context = GetAccessContext(commandBuffer);
4792 assert(cb_access_context);
4793 if (!cb_access_context) return skip;
4794
4795 const auto *context = cb_access_context->GetCurrentAccessContext();
4796 assert(context);
4797 if (!context) return skip;
4798
4799 const auto *image_state = Get<IMAGE_STATE>(image);
4800
4801 for (uint32_t index = 0; index < rangeCount; index++) {
4802 const auto &range = pRanges[index];
4803 if (image_state) {
4804 auto hazard =
4805 context->DetectHazard(*image_state, SYNC_TRANSFER_TRANSFER_WRITE, range, {0, 0, 0}, image_state->createInfo.extent);
4806 if (hazard.hazard) {
4807 skip |= LogError(image, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004808 "vkCmdClearColorImage: Hazard %s for %s, range index %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06004809 string_SyncHazard(hazard.hazard), report_data->FormatHandle(image).c_str(), index,
John Zulauffaea0ee2021-01-14 14:01:32 -07004810 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06004811 }
4812 }
4813 }
4814 return skip;
4815}
4816
4817void SyncValidator::PreCallRecordCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
4818 const VkClearColorValue *pColor, uint32_t rangeCount,
4819 const VkImageSubresourceRange *pRanges) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004820 StateTracker::PreCallRecordCmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
locke-lunarge1a67022020-04-29 00:15:36 -06004821 auto *cb_access_context = GetAccessContext(commandBuffer);
4822 assert(cb_access_context);
4823 const auto tag = cb_access_context->NextCommandTag(CMD_CLEARCOLORIMAGE);
4824 auto *context = cb_access_context->GetCurrentAccessContext();
4825 assert(context);
4826
4827 const auto *image_state = Get<IMAGE_STATE>(image);
4828
4829 for (uint32_t index = 0; index < rangeCount; index++) {
4830 const auto &range = pRanges[index];
4831 if (image_state) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07004832 context->UpdateAccessState(*image_state, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, {0, 0, 0},
4833 image_state->createInfo.extent, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004834 }
4835 }
4836}
4837
4838bool SyncValidator::PreCallValidateCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image,
4839 VkImageLayout imageLayout,
4840 const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
4841 const VkImageSubresourceRange *pRanges) const {
4842 bool skip = false;
4843 const auto *cb_access_context = GetAccessContext(commandBuffer);
4844 assert(cb_access_context);
4845 if (!cb_access_context) return skip;
4846
4847 const auto *context = cb_access_context->GetCurrentAccessContext();
4848 assert(context);
4849 if (!context) return skip;
4850
4851 const auto *image_state = Get<IMAGE_STATE>(image);
4852
4853 for (uint32_t index = 0; index < rangeCount; index++) {
4854 const auto &range = pRanges[index];
4855 if (image_state) {
4856 auto hazard =
4857 context->DetectHazard(*image_state, SYNC_TRANSFER_TRANSFER_WRITE, range, {0, 0, 0}, image_state->createInfo.extent);
4858 if (hazard.hazard) {
4859 skip |= LogError(image, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004860 "vkCmdClearDepthStencilImage: Hazard %s for %s, range index %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06004861 string_SyncHazard(hazard.hazard), report_data->FormatHandle(image).c_str(), index,
John Zulauffaea0ee2021-01-14 14:01:32 -07004862 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06004863 }
4864 }
4865 }
4866 return skip;
4867}
4868
4869void SyncValidator::PreCallRecordCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
4870 const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
4871 const VkImageSubresourceRange *pRanges) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004872 StateTracker::PreCallRecordCmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
locke-lunarge1a67022020-04-29 00:15:36 -06004873 auto *cb_access_context = GetAccessContext(commandBuffer);
4874 assert(cb_access_context);
4875 const auto tag = cb_access_context->NextCommandTag(CMD_CLEARDEPTHSTENCILIMAGE);
4876 auto *context = cb_access_context->GetCurrentAccessContext();
4877 assert(context);
4878
4879 const auto *image_state = Get<IMAGE_STATE>(image);
4880
4881 for (uint32_t index = 0; index < rangeCount; index++) {
4882 const auto &range = pRanges[index];
4883 if (image_state) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07004884 context->UpdateAccessState(*image_state, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, {0, 0, 0},
4885 image_state->createInfo.extent, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004886 }
4887 }
4888}
4889
4890bool SyncValidator::PreCallValidateCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool,
4891 uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer,
4892 VkDeviceSize dstOffset, VkDeviceSize stride,
4893 VkQueryResultFlags flags) const {
4894 bool skip = false;
4895 const auto *cb_access_context = GetAccessContext(commandBuffer);
4896 assert(cb_access_context);
4897 if (!cb_access_context) return skip;
4898
4899 const auto *context = cb_access_context->GetCurrentAccessContext();
4900 assert(context);
4901 if (!context) return skip;
4902
4903 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
4904
4905 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004906 const ResourceAccessRange range = MakeRange(dstOffset, stride * queryCount);
locke-lunarge1a67022020-04-29 00:15:36 -06004907 auto hazard = context->DetectHazard(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, range);
4908 if (hazard.hazard) {
John Zulauf59e25072020-07-17 10:55:21 -06004909 skip |=
4910 LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
4911 "vkCmdCopyQueryPoolResults: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07004912 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06004913 }
4914 }
locke-lunargff255f92020-05-13 18:53:52 -06004915
4916 // TODO:Track VkQueryPool
locke-lunarge1a67022020-04-29 00:15:36 -06004917 return skip;
4918}
4919
4920void SyncValidator::PreCallRecordCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
4921 uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
4922 VkDeviceSize stride, VkQueryResultFlags flags) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004923 StateTracker::PreCallRecordCmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset,
4924 stride, flags);
locke-lunarge1a67022020-04-29 00:15:36 -06004925 auto *cb_access_context = GetAccessContext(commandBuffer);
4926 assert(cb_access_context);
locke-lunargff255f92020-05-13 18:53:52 -06004927 const auto tag = cb_access_context->NextCommandTag(CMD_COPYQUERYPOOLRESULTS);
locke-lunarge1a67022020-04-29 00:15:36 -06004928 auto *context = cb_access_context->GetCurrentAccessContext();
4929 assert(context);
4930
4931 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
4932
4933 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004934 const ResourceAccessRange range = MakeRange(dstOffset, stride * queryCount);
John Zulauf8e3c3e92021-01-06 11:19:36 -07004935 context->UpdateAccessState(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004936 }
locke-lunargff255f92020-05-13 18:53:52 -06004937
4938 // TODO:Track VkQueryPool
locke-lunarge1a67022020-04-29 00:15:36 -06004939}
4940
4941bool SyncValidator::PreCallValidateCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
4942 VkDeviceSize size, uint32_t data) const {
4943 bool skip = false;
4944 const auto *cb_access_context = GetAccessContext(commandBuffer);
4945 assert(cb_access_context);
4946 if (!cb_access_context) return skip;
4947
4948 const auto *context = cb_access_context->GetCurrentAccessContext();
4949 assert(context);
4950 if (!context) return skip;
4951
4952 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
4953
4954 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004955 const ResourceAccessRange range = MakeRange(*dst_buffer, dstOffset, size);
locke-lunarge1a67022020-04-29 00:15:36 -06004956 auto hazard = context->DetectHazard(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, range);
4957 if (hazard.hazard) {
John Zulauf1dae9192020-06-16 15:46:44 -06004958 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06004959 "vkCmdFillBuffer: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07004960 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06004961 }
4962 }
4963 return skip;
4964}
4965
4966void SyncValidator::PreCallRecordCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
4967 VkDeviceSize size, uint32_t data) {
locke-lunarg8ec19162020-06-16 18:48:34 -06004968 StateTracker::PreCallRecordCmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
locke-lunarge1a67022020-04-29 00:15:36 -06004969 auto *cb_access_context = GetAccessContext(commandBuffer);
4970 assert(cb_access_context);
4971 const auto tag = cb_access_context->NextCommandTag(CMD_FILLBUFFER);
4972 auto *context = cb_access_context->GetCurrentAccessContext();
4973 assert(context);
4974
4975 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
4976
4977 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06004978 const ResourceAccessRange range = MakeRange(*dst_buffer, dstOffset, size);
John Zulauf8e3c3e92021-01-06 11:19:36 -07004979 context->UpdateAccessState(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06004980 }
4981}
4982
4983bool SyncValidator::PreCallValidateCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
4984 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
4985 const VkImageResolve *pRegions) const {
4986 bool skip = false;
4987 const auto *cb_access_context = GetAccessContext(commandBuffer);
4988 assert(cb_access_context);
4989 if (!cb_access_context) return skip;
4990
4991 const auto *context = cb_access_context->GetCurrentAccessContext();
4992 assert(context);
4993 if (!context) return skip;
4994
4995 const auto *src_image = Get<IMAGE_STATE>(srcImage);
4996 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
4997
4998 for (uint32_t region = 0; region < regionCount; region++) {
4999 const auto &resolve_region = pRegions[region];
5000 if (src_image) {
5001 auto hazard = context->DetectHazard(*src_image, SYNC_TRANSFER_TRANSFER_READ, resolve_region.srcSubresource,
5002 resolve_region.srcOffset, resolve_region.extent);
5003 if (hazard.hazard) {
5004 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06005005 "vkCmdResolveImage: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06005006 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07005007 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06005008 }
5009 }
5010
5011 if (dst_image) {
5012 auto hazard = context->DetectHazard(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, resolve_region.dstSubresource,
5013 resolve_region.dstOffset, resolve_region.extent);
5014 if (hazard.hazard) {
5015 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06005016 "vkCmdResolveImage: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.",
John Zulauf1dae9192020-06-16 15:46:44 -06005017 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
John Zulauffaea0ee2021-01-14 14:01:32 -07005018 cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06005019 }
5020 if (skip) break;
5021 }
5022 }
5023
5024 return skip;
5025}
5026
5027void SyncValidator::PreCallRecordCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
5028 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
5029 const VkImageResolve *pRegions) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005030 StateTracker::PreCallRecordCmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
5031 pRegions);
locke-lunarge1a67022020-04-29 00:15:36 -06005032 auto *cb_access_context = GetAccessContext(commandBuffer);
5033 assert(cb_access_context);
5034 const auto tag = cb_access_context->NextCommandTag(CMD_RESOLVEIMAGE);
5035 auto *context = cb_access_context->GetCurrentAccessContext();
5036 assert(context);
5037
5038 auto *src_image = Get<IMAGE_STATE>(srcImage);
5039 auto *dst_image = Get<IMAGE_STATE>(dstImage);
5040
5041 for (uint32_t region = 0; region < regionCount; region++) {
5042 const auto &resolve_region = pRegions[region];
5043 if (src_image) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07005044 context->UpdateAccessState(*src_image, SYNC_TRANSFER_TRANSFER_READ, SyncOrdering::kNonAttachment,
5045 resolve_region.srcSubresource, resolve_region.srcOffset, resolve_region.extent, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005046 }
5047 if (dst_image) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07005048 context->UpdateAccessState(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
5049 resolve_region.dstSubresource, resolve_region.dstOffset, resolve_region.extent, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005050 }
5051 }
5052}
5053
Jeff Leger178b1e52020-10-05 12:22:23 -04005054bool SyncValidator::PreCallValidateCmdResolveImage2KHR(VkCommandBuffer commandBuffer,
5055 const VkResolveImageInfo2KHR *pResolveImageInfo) const {
5056 bool skip = false;
5057 const auto *cb_access_context = GetAccessContext(commandBuffer);
5058 assert(cb_access_context);
5059 if (!cb_access_context) return skip;
5060
5061 const auto *context = cb_access_context->GetCurrentAccessContext();
5062 assert(context);
5063 if (!context) return skip;
5064
5065 const auto *src_image = Get<IMAGE_STATE>(pResolveImageInfo->srcImage);
5066 const auto *dst_image = Get<IMAGE_STATE>(pResolveImageInfo->dstImage);
5067
5068 for (uint32_t region = 0; region < pResolveImageInfo->regionCount; region++) {
5069 const auto &resolve_region = pResolveImageInfo->pRegions[region];
5070 if (src_image) {
5071 auto hazard = context->DetectHazard(*src_image, SYNC_TRANSFER_TRANSFER_READ, resolve_region.srcSubresource,
5072 resolve_region.srcOffset, resolve_region.extent);
5073 if (hazard.hazard) {
5074 skip |= LogError(pResolveImageInfo->srcImage, string_SyncHazardVUID(hazard.hazard),
5075 "vkCmdResolveImage2KHR: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.",
5076 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pResolveImageInfo->srcImage).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07005077 region, cb_access_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04005078 }
5079 }
5080
5081 if (dst_image) {
5082 auto hazard = context->DetectHazard(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, resolve_region.dstSubresource,
5083 resolve_region.dstOffset, resolve_region.extent);
5084 if (hazard.hazard) {
5085 skip |= LogError(pResolveImageInfo->dstImage, string_SyncHazardVUID(hazard.hazard),
5086 "vkCmdResolveImage2KHR: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.",
5087 string_SyncHazard(hazard.hazard), report_data->FormatHandle(pResolveImageInfo->dstImage).c_str(),
John Zulauffaea0ee2021-01-14 14:01:32 -07005088 region, cb_access_context->FormatUsage(hazard).c_str());
Jeff Leger178b1e52020-10-05 12:22:23 -04005089 }
5090 if (skip) break;
5091 }
5092 }
5093
5094 return skip;
5095}
5096
5097void SyncValidator::PreCallRecordCmdResolveImage2KHR(VkCommandBuffer commandBuffer,
5098 const VkResolveImageInfo2KHR *pResolveImageInfo) {
5099 StateTracker::PreCallRecordCmdResolveImage2KHR(commandBuffer, pResolveImageInfo);
5100 auto *cb_access_context = GetAccessContext(commandBuffer);
5101 assert(cb_access_context);
5102 const auto tag = cb_access_context->NextCommandTag(CMD_RESOLVEIMAGE2KHR);
5103 auto *context = cb_access_context->GetCurrentAccessContext();
5104 assert(context);
5105
5106 auto *src_image = Get<IMAGE_STATE>(pResolveImageInfo->srcImage);
5107 auto *dst_image = Get<IMAGE_STATE>(pResolveImageInfo->dstImage);
5108
5109 for (uint32_t region = 0; region < pResolveImageInfo->regionCount; region++) {
5110 const auto &resolve_region = pResolveImageInfo->pRegions[region];
5111 if (src_image) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07005112 context->UpdateAccessState(*src_image, SYNC_TRANSFER_TRANSFER_READ, SyncOrdering::kNonAttachment,
5113 resolve_region.srcSubresource, resolve_region.srcOffset, resolve_region.extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04005114 }
5115 if (dst_image) {
John Zulauf8e3c3e92021-01-06 11:19:36 -07005116 context->UpdateAccessState(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
5117 resolve_region.dstSubresource, resolve_region.dstOffset, resolve_region.extent, tag);
Jeff Leger178b1e52020-10-05 12:22:23 -04005118 }
5119 }
5120}
5121
locke-lunarge1a67022020-04-29 00:15:36 -06005122bool SyncValidator::PreCallValidateCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
5123 VkDeviceSize dataSize, const void *pData) const {
5124 bool skip = false;
5125 const auto *cb_access_context = GetAccessContext(commandBuffer);
5126 assert(cb_access_context);
5127 if (!cb_access_context) return skip;
5128
5129 const auto *context = cb_access_context->GetCurrentAccessContext();
5130 assert(context);
5131 if (!context) return skip;
5132
5133 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
5134
5135 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005136 // VK_WHOLE_SIZE not allowed
5137 const ResourceAccessRange range = MakeRange(dstOffset, dataSize);
locke-lunarge1a67022020-04-29 00:15:36 -06005138 auto hazard = context->DetectHazard(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, range);
5139 if (hazard.hazard) {
John Zulauf1dae9192020-06-16 15:46:44 -06005140 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
John Zulauf59e25072020-07-17 10:55:21 -06005141 "vkCmdUpdateBuffer: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07005142 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatUsage(hazard).c_str());
locke-lunarge1a67022020-04-29 00:15:36 -06005143 }
5144 }
5145 return skip;
5146}
5147
5148void SyncValidator::PreCallRecordCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
5149 VkDeviceSize dataSize, const void *pData) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005150 StateTracker::PreCallRecordCmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
locke-lunarge1a67022020-04-29 00:15:36 -06005151 auto *cb_access_context = GetAccessContext(commandBuffer);
5152 assert(cb_access_context);
5153 const auto tag = cb_access_context->NextCommandTag(CMD_UPDATEBUFFER);
5154 auto *context = cb_access_context->GetCurrentAccessContext();
5155 assert(context);
5156
5157 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
5158
5159 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005160 // VK_WHOLE_SIZE not allowed
5161 const ResourceAccessRange range = MakeRange(dstOffset, dataSize);
John Zulauf8e3c3e92021-01-06 11:19:36 -07005162 context->UpdateAccessState(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunarge1a67022020-04-29 00:15:36 -06005163 }
5164}
locke-lunargff255f92020-05-13 18:53:52 -06005165
5166bool SyncValidator::PreCallValidateCmdWriteBufferMarkerAMD(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
5167 VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) const {
5168 bool skip = false;
5169 const auto *cb_access_context = GetAccessContext(commandBuffer);
5170 assert(cb_access_context);
5171 if (!cb_access_context) return skip;
5172
5173 const auto *context = cb_access_context->GetCurrentAccessContext();
5174 assert(context);
5175 if (!context) return skip;
5176
5177 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
5178
5179 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005180 const ResourceAccessRange range = MakeRange(dstOffset, 4);
locke-lunargff255f92020-05-13 18:53:52 -06005181 auto hazard = context->DetectHazard(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, range);
5182 if (hazard.hazard) {
John Zulauf59e25072020-07-17 10:55:21 -06005183 skip |=
5184 LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
5185 "vkCmdWriteBufferMarkerAMD: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
John Zulauffaea0ee2021-01-14 14:01:32 -07005186 report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatUsage(hazard).c_str());
locke-lunargff255f92020-05-13 18:53:52 -06005187 }
5188 }
5189 return skip;
5190}
5191
5192void SyncValidator::PreCallRecordCmdWriteBufferMarkerAMD(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
5193 VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) {
locke-lunarg8ec19162020-06-16 18:48:34 -06005194 StateTracker::PreCallRecordCmdWriteBufferMarkerAMD(commandBuffer, pipelineStage, dstBuffer, dstOffset, marker);
locke-lunargff255f92020-05-13 18:53:52 -06005195 auto *cb_access_context = GetAccessContext(commandBuffer);
5196 assert(cb_access_context);
5197 const auto tag = cb_access_context->NextCommandTag(CMD_WRITEBUFFERMARKERAMD);
5198 auto *context = cb_access_context->GetCurrentAccessContext();
5199 assert(context);
5200
5201 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
5202
5203 if (dst_buffer) {
John Zulauf3e86bf02020-09-12 10:47:57 -06005204 const ResourceAccessRange range = MakeRange(dstOffset, 4);
John Zulauf8e3c3e92021-01-06 11:19:36 -07005205 context->UpdateAccessState(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
locke-lunargff255f92020-05-13 18:53:52 -06005206 }
5207}
John Zulauf49beb112020-11-04 16:06:31 -07005208
5209bool SyncValidator::PreCallValidateCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) const {
5210 bool skip = false;
5211 const auto *cb_context = GetAccessContext(commandBuffer);
5212 assert(cb_context);
5213 if (!cb_context) return skip;
5214
5215 return cb_context->ValidateSetEvent(commandBuffer, event, stageMask);
5216}
5217
5218void SyncValidator::PostCallRecordCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
5219 StateTracker::PostCallRecordCmdSetEvent(commandBuffer, event, stageMask);
5220 auto *cb_context = GetAccessContext(commandBuffer);
5221 assert(cb_context);
5222 if (!cb_context) return;
John Zulauf4a6105a2020-11-17 15:11:05 -07005223 const auto tag = cb_context->NextCommandTag(CMD_SETEVENT);
5224 cb_context->RecordSetEvent(commandBuffer, event, stageMask, tag);
John Zulauf49beb112020-11-04 16:06:31 -07005225}
5226
5227bool SyncValidator::PreCallValidateCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event,
5228 VkPipelineStageFlags stageMask) const {
5229 bool skip = false;
5230 const auto *cb_context = GetAccessContext(commandBuffer);
5231 assert(cb_context);
5232 if (!cb_context) return skip;
5233
5234 return cb_context->ValidateResetEvent(commandBuffer, event, stageMask);
5235}
5236
5237void SyncValidator::PostCallRecordCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
5238 StateTracker::PostCallRecordCmdResetEvent(commandBuffer, event, stageMask);
5239 auto *cb_context = GetAccessContext(commandBuffer);
5240 assert(cb_context);
5241 if (!cb_context) return;
5242
5243 cb_context->RecordResetEvent(commandBuffer, event, stageMask);
5244}
5245
5246bool SyncValidator::PreCallValidateCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
5247 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
5248 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
5249 uint32_t bufferMemoryBarrierCount,
5250 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
5251 uint32_t imageMemoryBarrierCount,
5252 const VkImageMemoryBarrier *pImageMemoryBarriers) const {
5253 bool skip = false;
5254 const auto *cb_context = GetAccessContext(commandBuffer);
5255 assert(cb_context);
5256 if (!cb_context) return skip;
5257
John Zulauf4a6105a2020-11-17 15:11:05 -07005258 return cb_context->ValidateWaitEvents(eventCount, pEvents, srcStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers,
5259 bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
John Zulauf49beb112020-11-04 16:06:31 -07005260 pImageMemoryBarriers);
5261}
5262
5263void SyncValidator::PostCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
5264 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
5265 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
5266 uint32_t bufferMemoryBarrierCount,
5267 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
5268 uint32_t imageMemoryBarrierCount,
5269 const VkImageMemoryBarrier *pImageMemoryBarriers) {
5270 StateTracker::PostCallRecordCmdWaitEvents(commandBuffer, eventCount, pEvents, srcStageMask, dstStageMask, memoryBarrierCount,
5271 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
5272 imageMemoryBarrierCount, pImageMemoryBarriers);
5273
5274 auto *cb_context = GetAccessContext(commandBuffer);
5275 assert(cb_context);
5276 if (!cb_context) return;
5277
John Zulauf4a6105a2020-11-17 15:11:05 -07005278 const auto tag = cb_context->NextCommandTag(CMD_WAITEVENTS);
John Zulauf49beb112020-11-04 16:06:31 -07005279 cb_context->RecordWaitEvents(commandBuffer, eventCount, pEvents, srcStageMask, dstStageMask, memoryBarrierCount,
5280 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
John Zulauf4a6105a2020-11-17 15:11:05 -07005281 pImageMemoryBarriers, tag);
5282}
5283
5284void SyncEventState::ResetFirstScope() {
5285 for (const auto address_type : kAddressTypes) {
5286 first_scope[static_cast<size_t>(address_type)].clear();
5287 }
Jeremy Gebben9893daf2021-01-04 10:40:50 -07005288 scope = SyncExecScope();
John Zulauf4a6105a2020-11-17 15:11:05 -07005289}
5290
5291// Keep the "ignore this event" logic in same place for ValidateWait and RecordWait to use
5292SyncEventState::IgnoreReason SyncEventState::IsIgnoredByWait(VkPipelineStageFlags srcStageMask) const {
5293 IgnoreReason reason = NotIgnored;
5294
5295 if (last_command == CMD_RESETEVENT && !HasBarrier(0U, 0U)) {
5296 reason = ResetWaitRace;
5297 } else if (unsynchronized_set) {
5298 reason = SetRace;
5299 } else {
Jeremy Gebben9893daf2021-01-04 10:40:50 -07005300 const VkPipelineStageFlags missing_bits = scope.mask_param & ~srcStageMask;
John Zulauf4a6105a2020-11-17 15:11:05 -07005301 if (missing_bits) reason = MissingStageBits;
5302 }
5303
5304 return reason;
5305}
5306
5307bool SyncEventState::HasBarrier(VkPipelineStageFlags stageMask, VkPipelineStageFlags exec_scope_arg) const {
5308 bool has_barrier = (last_command == CMD_NONE) || (stageMask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) ||
5309 (barriers & exec_scope_arg) || (barriers & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
5310 return has_barrier;
John Zulauf49beb112020-11-04 16:06:31 -07005311}
John Zulaufe7f6a5e2021-01-16 14:31:18 -07005312
5313SyncOpPipelineBarrier::SyncOpPipelineBarrier(const SyncValidator &sync_state, VkQueueFlags queue_flags,
5314 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
5315 VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount,
5316 const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount,
5317 const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount,
5318 const VkImageMemoryBarrier *pImageMemoryBarriers)
5319 : dependency_flags_(dependencyFlags),
5320 src_exec_scope_(SyncExecScope::MakeSrc(queue_flags, srcStageMask)),
5321 dst_exec_scope_(SyncExecScope::MakeDst(queue_flags, dstStageMask)) {
5322 // Translate the API parameters into structures SyncVal understands directly, and dehandle for safer/faster replay.
5323 MakeMemoryBarriers(src_exec_scope_, dst_exec_scope_, dependencyFlags, memoryBarrierCount, pMemoryBarriers);
5324 MakeBufferMemoryBarriers(sync_state, src_exec_scope_, dst_exec_scope_, dependencyFlags, bufferMemoryBarrierCount,
5325 pBufferMemoryBarriers);
5326 MakeImageMemoryBarriers(sync_state, src_exec_scope_, dst_exec_scope_, dependencyFlags, imageMemoryBarrierCount,
5327 pImageMemoryBarriers);
5328}
5329
5330bool SyncOpPipelineBarrier::Validate(const CommandBufferAccessContext &cb_context) const {
5331 bool skip = false;
5332 const auto *context = cb_context.GetCurrentAccessContext();
5333 assert(context);
5334 if (!context) return skip;
5335 // Validate Image Layout transitions
5336 for (const auto image_barrier : image_memory_barriers_) {
5337 if (image_barrier.new_layout == image_barrier.old_layout) continue; // Only interested in layout transitions at this point.
5338 const auto *image_state = image_barrier.image.get();
5339 if (!image_state) continue;
5340 const auto hazard = context->DetectImageBarrierHazard(image_barrier);
5341 if (hazard.hazard) {
5342 // PHASE1 TODO -- add tag information to log msg when useful.
5343 const auto &sync_state = cb_context.GetSyncState();
5344 const auto image_handle = image_state->image;
5345 skip |= sync_state.LogError(image_handle, string_SyncHazardVUID(hazard.hazard),
5346 "vkCmdPipelineBarrier: Hazard %s for image barrier %" PRIu32 " %s. Access info %s.",
5347 string_SyncHazard(hazard.hazard), image_barrier.index,
5348 sync_state.report_data->FormatHandle(image_handle).c_str(),
5349 cb_context.FormatUsage(hazard).c_str());
5350 }
5351 }
5352
5353 return skip;
5354}
5355
5356void SyncOpPipelineBarrier::Record(CommandBufferAccessContext *cb_context, const ResourceUsageTag &tag) const {
5357 auto context = cb_context->GetCurrentAccessContext();
5358 assert(context);
5359 // Apply buffer barriers to the pending state
5360 for (const auto &buffer_barrier : buffer_memory_barriers_) {
5361 const auto *buffer = buffer_barrier.buffer.get();
5362 if (!buffer) continue;
5363 const ApplyBarrierFunctor<PipelineBarrierOp> update_action({buffer_barrier.barrier, false /* layout_transition */});
5364 context->UpdateResourceAccess(*buffer, buffer_barrier.range, update_action);
5365 }
5366
5367 // Apply buffer barriers to the pending state
5368 for (const auto &image_barrier : image_memory_barriers_) {
5369 const auto *image = image_barrier.image.get();
5370 if (!image) continue;
5371 bool layout_transition = image_barrier.old_layout != image_barrier.new_layout;
5372 const ApplyBarrierFunctor<PipelineBarrierOp> barrier_action({image_barrier.barrier, layout_transition});
5373 context->UpdateResourceAccess(*image, image_barrier.subresource_range, barrier_action);
5374 }
5375
5376 // Apply buffer barriers to the pending state and resolve
5377 std::vector<PipelineBarrierOp> barrier_ops;
5378 barrier_ops.reserve(memory_barriers_.size());
5379 for (const auto &memory_barrier : memory_barriers_) {
5380 barrier_ops.emplace_back(memory_barrier, false /* layout transition */);
5381 }
5382 ApplyBarrierOpsFunctor<PipelineBarrierOp> barriers_functor(true /* resolve */, barrier_ops, tag);
5383 context->ApplyGlobalBarriers(barriers_functor);
5384
5385 cb_context->ApplyGlobalBarriersToEvents(src_exec_scope_, dst_exec_scope_);
5386}
5387
5388void SyncOpPipelineBarrier::MakeMemoryBarriers(const SyncExecScope &src, const SyncExecScope &dst,
5389 VkDependencyFlags dependency_flags, uint32_t memory_barrier_count,
5390 const VkMemoryBarrier *memory_barriers) {
5391 memory_barriers_.reserve(std::min<uint32_t>(1, memory_barrier_count));
5392 for (uint32_t barrier_index = 0; barrier_index < memory_barrier_count; barrier_index++) {
5393 const auto &barrier = memory_barriers[barrier_index];
5394 SyncBarrier sync_barrier(barrier, src, dst);
5395 memory_barriers_.emplace_back(sync_barrier);
5396 }
5397 if (0 == memory_barrier_count) {
5398 // If there are no global memory barriers, force an exec barrier
5399 memory_barriers_.emplace_back(SyncBarrier(src, dst));
5400 }
5401}
5402
5403void SyncOpPipelineBarrier::MakeBufferMemoryBarriers(const SyncValidator &sync_state, const SyncExecScope &src,
5404 const SyncExecScope &dst, VkDependencyFlags dependencyFlags,
5405 uint32_t barrier_count, const VkBufferMemoryBarrier *barriers) {
5406 buffer_memory_barriers_.reserve(barrier_count);
5407 for (uint32_t index = 0; index < barrier_count; index++) {
5408 const auto &barrier = barriers[index];
5409 auto buffer = sync_state.GetShared<BUFFER_STATE>(barrier.buffer);
5410 if (buffer) {
5411 const auto barrier_size = GetBufferWholeSize(*buffer, barrier.offset, barrier.size);
5412 const auto range = MakeRange(barrier.offset, barrier_size);
5413 const SyncBarrier sync_barrier(barrier, src, dst);
5414 buffer_memory_barriers_.emplace_back(buffer, sync_barrier, range);
5415 } else {
5416 buffer_memory_barriers_.emplace_back();
5417 }
5418 }
5419}
5420
5421void SyncOpPipelineBarrier::MakeImageMemoryBarriers(const SyncValidator &sync_state, const SyncExecScope &src,
5422 const SyncExecScope &dst, VkDependencyFlags dependencyFlags,
5423 uint32_t barrier_count, const VkImageMemoryBarrier *barriers) {
5424 image_memory_barriers_.reserve(barrier_count);
5425 for (uint32_t index = 0; index < barrier_count; index++) {
5426 const auto &barrier = barriers[index];
5427 const auto image = sync_state.GetShared<IMAGE_STATE>(barrier.image);
5428 if (image) {
5429 auto subresource_range = NormalizeSubresourceRange(image->createInfo, barrier.subresourceRange);
5430 const SyncBarrier sync_barrier(barrier, src, dst);
5431 image_memory_barriers_.emplace_back(image, index, sync_barrier, barrier.oldLayout, barrier.newLayout,
5432 subresource_range);
5433 } else {
5434 image_memory_barriers_.emplace_back();
5435 image_memory_barriers_.back().index = index; // Just in case we're interested in the ones we skipped.
5436 }
5437 }
5438}