blob: 5e5df3893c68d501a4975ef94aec70c38f185903 [file] [log] [blame]
John Zulauf9cb530d2019-09-30 14:14:10 -06001/* Copyright (c) 2019 The Khronos Group Inc.
2 * Copyright (c) 2019 Valve Corporation
3 * Copyright (c) 2019 LunarG, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 * Author: John Zulauf <jzulauf@lunarg.com>
18 */
19
20#include <limits>
21#include <vector>
locke-lunarg296a3c92020-03-25 01:04:29 -060022#include <memory>
23#include <bitset>
John Zulauf9cb530d2019-09-30 14:14:10 -060024#include "synchronization_validation.h"
25
26static const char *string_SyncHazardVUID(SyncHazard hazard) {
27 switch (hazard) {
28 case SyncHazard::NONE:
John Zulauf2f952d22020-02-10 11:34:51 -070029 return "SYNC-HAZARD-NONE";
John Zulauf9cb530d2019-09-30 14:14:10 -060030 break;
31 case SyncHazard::READ_AFTER_WRITE:
32 return "SYNC-HAZARD-READ_AFTER_WRITE";
33 break;
34 case SyncHazard::WRITE_AFTER_READ:
35 return "SYNC-HAZARD-WRITE_AFTER_READ";
36 break;
37 case SyncHazard::WRITE_AFTER_WRITE:
38 return "SYNC-HAZARD-WRITE_AFTER_WRITE";
39 break;
John Zulauf2f952d22020-02-10 11:34:51 -070040 case SyncHazard::READ_RACING_WRITE:
41 return "SYNC-HAZARD-READ-RACING-WRITE";
42 break;
43 case SyncHazard::WRITE_RACING_WRITE:
44 return "SYNC-HAZARD-WRITE-RACING-WRITE";
45 break;
46 case SyncHazard::WRITE_RACING_READ:
47 return "SYNC-HAZARD-WRITE-RACING-READ";
48 break;
John Zulauf9cb530d2019-09-30 14:14:10 -060049 default:
50 assert(0);
51 }
52 return "SYNC-HAZARD-INVALID";
53}
54
55static const char *string_SyncHazard(SyncHazard hazard) {
56 switch (hazard) {
57 case SyncHazard::NONE:
58 return "NONR";
59 break;
60 case SyncHazard::READ_AFTER_WRITE:
61 return "READ_AFTER_WRITE";
62 break;
63 case SyncHazard::WRITE_AFTER_READ:
64 return "WRITE_AFTER_READ";
65 break;
66 case SyncHazard::WRITE_AFTER_WRITE:
67 return "WRITE_AFTER_WRITE";
68 break;
John Zulauf2f952d22020-02-10 11:34:51 -070069 case SyncHazard::READ_RACING_WRITE:
70 return "READ_RACING_WRITE";
71 break;
72 case SyncHazard::WRITE_RACING_WRITE:
73 return "WRITE_RACING_WRITE";
74 break;
75 case SyncHazard::WRITE_RACING_READ:
76 return "WRITE_RACING_READ";
77 break;
John Zulauf9cb530d2019-09-30 14:14:10 -060078 default:
79 assert(0);
80 }
81 return "INVALID HAZARD";
82}
83
locke-lunarg3c038002020-04-30 23:08:08 -060084inline VkDeviceSize GetRealWholeSize(VkDeviceSize offset, VkDeviceSize size, VkDeviceSize whole_size) {
85 if (size == VK_WHOLE_SIZE) {
86 return (whole_size - offset);
87 }
88 return size;
89}
90
John Zulauf16adfc92020-04-08 10:28:33 -060091template <typename T>
John Zulauf355e49b2020-04-24 15:11:15 -060092static ResourceAccessRange MakeRange(const T &has_offset_and_size) {
John Zulauf16adfc92020-04-08 10:28:33 -060093 return ResourceAccessRange(has_offset_and_size.offset, (has_offset_and_size.offset + has_offset_and_size.size));
94}
95
John Zulauf355e49b2020-04-24 15:11:15 -060096static ResourceAccessRange MakeRange(VkDeviceSize start, VkDeviceSize size) { return ResourceAccessRange(start, (start + size)); }
John Zulauf16adfc92020-04-08 10:28:33 -060097
John Zulauf0cb5be22020-01-23 12:18:22 -070098// Expand the pipeline stage without regard to whether the are valid w.r.t. queue or extension
99VkPipelineStageFlags ExpandPipelineStages(VkQueueFlags queue_flags, VkPipelineStageFlags stage_mask) {
100 VkPipelineStageFlags expanded = stage_mask;
101 if (VK_PIPELINE_STAGE_ALL_COMMANDS_BIT & stage_mask) {
102 expanded = expanded & ~VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
103 for (const auto &all_commands : syncAllCommandStagesByQueueFlags) {
104 if (all_commands.first & queue_flags) {
105 expanded |= all_commands.second;
106 }
107 }
108 }
109 if (VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT & stage_mask) {
110 expanded = expanded & ~VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
111 expanded |= syncAllCommandStagesByQueueFlags.at(VK_QUEUE_GRAPHICS_BIT) & ~VK_PIPELINE_STAGE_HOST_BIT;
112 }
113 return expanded;
114}
115
John Zulauf36bcf6a2020-02-03 15:12:52 -0700116VkPipelineStageFlags RelatedPipelineStages(VkPipelineStageFlags stage_mask,
117 std::map<VkPipelineStageFlagBits, VkPipelineStageFlags> &map) {
118 VkPipelineStageFlags unscanned = stage_mask;
119 VkPipelineStageFlags related = 0;
120 for (const auto entry : map) {
121 const auto stage = entry.first;
122 if (stage & unscanned) {
123 related = related | entry.second;
124 unscanned = unscanned & ~stage;
125 if (!unscanned) break;
126 }
127 }
128 return related;
129}
130
131VkPipelineStageFlags WithEarlierPipelineStages(VkPipelineStageFlags stage_mask) {
132 return stage_mask | RelatedPipelineStages(stage_mask, syncLogicallyEarlierStages);
133}
134
135VkPipelineStageFlags WithLaterPipelineStages(VkPipelineStageFlags stage_mask) {
136 return stage_mask | RelatedPipelineStages(stage_mask, syncLogicallyLaterStages);
137}
138
John Zulauf5c5e88d2019-12-26 11:22:02 -0700139static const ResourceAccessRange full_range(std::numeric_limits<VkDeviceSize>::min(), std::numeric_limits<VkDeviceSize>::max());
John Zulauf5c5e88d2019-12-26 11:22:02 -0700140
John Zulauf355e49b2020-04-24 15:11:15 -0600141// Class AccessContext stores the state of accesses specific to a Command, Subpass, or Queue
142const std::array<AccessContext::AddressType, AccessContext::kAddressTypeCount> AccessContext::kAddressTypes = {
143 AccessContext::AddressType::kLinearAddress, AccessContext::AddressType::kIdealizedAddress};
144
John Zulauf540266b2020-04-06 18:54:53 -0600145AccessContext::AccessContext(uint32_t subpass, VkQueueFlags queue_flags,
146 const std::vector<SubpassDependencyGraphNode> &dependencies,
147 const std::vector<AccessContext> &contexts, AccessContext *external_context) {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600148 Reset();
149 const auto &subpass_dep = dependencies[subpass];
150 prev_.reserve(subpass_dep.prev.size());
John Zulauf355e49b2020-04-24 15:11:15 -0600151 prev_by_subpass_.resize(subpass, nullptr); // Can't be more prevs than the subpass we're on
John Zulauf3d84f1b2020-03-09 13:33:25 -0600152 for (const auto &prev_dep : subpass_dep.prev) {
153 assert(prev_dep.dependency);
154 const auto dep = *prev_dep.dependency;
John Zulauf540266b2020-04-06 18:54:53 -0600155 prev_.emplace_back(const_cast<AccessContext *>(&contexts[dep.srcSubpass]), queue_flags, dep);
John Zulauf355e49b2020-04-24 15:11:15 -0600156 prev_by_subpass_[dep.srcSubpass] = &prev_.back();
John Zulauf5c5e88d2019-12-26 11:22:02 -0700157 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600158
159 async_.reserve(subpass_dep.async.size());
160 for (const auto async_subpass : subpass_dep.async) {
John Zulauf540266b2020-04-06 18:54:53 -0600161 async_.emplace_back(const_cast<AccessContext *>(&contexts[async_subpass]));
John Zulauf3d84f1b2020-03-09 13:33:25 -0600162 }
John Zulaufe5da6e52020-03-18 15:32:18 -0600163 if (subpass_dep.barrier_from_external) {
164 src_external_ = TrackBack(external_context, queue_flags, *subpass_dep.barrier_from_external);
165 } else {
166 src_external_ = TrackBack();
167 }
168 if (subpass_dep.barrier_to_external) {
169 dst_external_ = TrackBack(this, queue_flags, *subpass_dep.barrier_to_external);
170 } else {
171 dst_external_ = TrackBack();
John Zulauf3d84f1b2020-03-09 13:33:25 -0600172 }
John Zulauf5c5e88d2019-12-26 11:22:02 -0700173}
174
John Zulauf5f13a792020-03-10 07:31:21 -0600175template <typename Detector>
John Zulauf16adfc92020-04-08 10:28:33 -0600176HazardResult AccessContext::DetectPreviousHazard(AddressType type, const Detector &detector,
John Zulauf540266b2020-04-06 18:54:53 -0600177 const ResourceAccessRange &range) const {
John Zulauf5f13a792020-03-10 07:31:21 -0600178 ResourceAccessRangeMap descent_map;
179 ResourceAccessState default_state; // When present, PreviousAccess will "infill"
John Zulauf16adfc92020-04-08 10:28:33 -0600180 ResolvePreviousAccess(type, range, &descent_map, &default_state);
John Zulauf5f13a792020-03-10 07:31:21 -0600181
182 HazardResult hazard;
183 for (auto prev = descent_map.begin(); prev != descent_map.end() && !hazard.hazard; ++prev) {
184 hazard = detector.Detect(prev);
185 }
186 return hazard;
187}
188
John Zulauf3d84f1b2020-03-09 13:33:25 -0600189// A recursive range walker for hazard detection, first for the current context and the (DetectHazardRecur) to walk
190// the DAG of the contexts (for example subpasses)
191template <typename Detector>
John Zulauf355e49b2020-04-24 15:11:15 -0600192HazardResult AccessContext::DetectHazard(AddressType type, const Detector &detector, const ResourceAccessRange &range,
193 DetectOptions options) const {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600194 HazardResult hazard;
John Zulauf5f13a792020-03-10 07:31:21 -0600195
John Zulauf355e49b2020-04-24 15:11:15 -0600196 if (static_cast<uint32_t>(options) | DetectOptions::kDetectAsync) {
197 // Async checks don't require recursive lookups, as the async lists are exhaustive for the top-level context
198 // so we'll check these first
199 for (const auto &async_context : async_) {
200 hazard = async_context->DetectAsyncHazard(type, detector, range);
201 if (hazard.hazard) return hazard;
202 }
John Zulauf5f13a792020-03-10 07:31:21 -0600203 }
204
John Zulauf355e49b2020-04-24 15:11:15 -0600205 if (static_cast<uint32_t>(options) | DetectOptions::kDetectAsync) {
206 const auto &accesses = GetAccessStateMap(type);
207 const auto from = accesses.lower_bound(range);
208 if (from != accesses.end() && from->first.intersects(range)) {
209 const auto to = accesses.upper_bound(range);
210 ResourceAccessRange gap = {range.begin, range.begin};
211 for (auto pos = from; pos != to; ++pos) {
212 hazard = detector.Detect(pos);
213 if (hazard.hazard) return hazard;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600214
John Zulauf355e49b2020-04-24 15:11:15 -0600215 // make sure we don't go past range
216 auto upper_bound = std::min(range.end, pos->first.end);
217 gap.end = upper_bound;
John Zulauf5f13a792020-03-10 07:31:21 -0600218
John Zulauf355e49b2020-04-24 15:11:15 -0600219 // TODO: After profiling we may want to change the descent logic such that we don't recur per gap...
220 if (!gap.empty()) {
221 // Must recur on all gaps
222 hazard = DetectPreviousHazard(type, detector, gap);
223 if (hazard.hazard) return hazard;
224 }
225 gap.begin = upper_bound;
226 }
227 gap.end = range.end;
228 if (gap.non_empty()) {
John Zulauf16adfc92020-04-08 10:28:33 -0600229 hazard = DetectPreviousHazard(type, detector, gap);
John Zulauf5f13a792020-03-10 07:31:21 -0600230 if (hazard.hazard) return hazard;
231 }
John Zulauf355e49b2020-04-24 15:11:15 -0600232 } else {
233 hazard = DetectPreviousHazard(type, detector, range);
John Zulauf16adfc92020-04-08 10:28:33 -0600234 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600235 }
236
237 return hazard;
238}
239
240// A non recursive range walker for the asynchronous contexts (those we have no barriers with)
241template <typename Detector>
John Zulauf355e49b2020-04-24 15:11:15 -0600242HazardResult AccessContext::DetectAsyncHazard(AddressType type, const Detector &detector, const ResourceAccessRange &range) const {
John Zulauf16adfc92020-04-08 10:28:33 -0600243 auto &accesses = GetAccessStateMap(type);
244 const auto from = accesses.lower_bound(range);
245 const auto to = accesses.upper_bound(range);
246
John Zulauf3d84f1b2020-03-09 13:33:25 -0600247 HazardResult hazard;
John Zulauf16adfc92020-04-08 10:28:33 -0600248 for (auto pos = from; pos != to && !hazard.hazard; ++pos) {
249 hazard = detector.DetectAsync(pos);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600250 }
John Zulauf16adfc92020-04-08 10:28:33 -0600251
John Zulauf3d84f1b2020-03-09 13:33:25 -0600252 return hazard;
253}
254
John Zulauf355e49b2020-04-24 15:11:15 -0600255// Returns the last resolved entry
256static void ResolveMapToEntry(ResourceAccessRangeMap *dest, ResourceAccessRangeMap::iterator entry,
257 ResourceAccessRangeMap::const_iterator first, ResourceAccessRangeMap::const_iterator last,
258 const SyncBarrier *barrier) {
259 auto at = entry;
260 for (auto pos = first; pos != last; ++pos) {
261 // Every member of the input iterator range must fit within the remaining portion of entry
262 assert(at->first.includes(pos->first));
263 assert(at != dest->end());
264 // Trim up at to the same size as the entry to resolve
265 at = sparse_container::split(at, *dest, pos->first);
266 auto access = pos->second;
267 if (barrier) {
268 access.ApplyBarrier(*barrier);
269 }
270 at->second.Resolve(access);
271 ++at; // Go to the remaining unused section of entry
272 }
273}
274
275void AccessContext::ResolveAccessRange(AddressType type, const ResourceAccessRange &range, const SyncBarrier *barrier,
276 ResourceAccessRangeMap *resolve_map, const ResourceAccessState *infill_state,
277 bool recur_to_infill) const {
278 ResourceRangeMergeIterator current(*resolve_map, GetAccessStateMap(type), range.begin);
279 while (current->range.non_empty() && range.includes(current->range.begin)) {
John Zulauf16adfc92020-04-08 10:28:33 -0600280 if (current->pos_B->valid) {
281 const auto &src_pos = current->pos_B->lower_bound;
John Zulauf355e49b2020-04-24 15:11:15 -0600282 auto access = src_pos->second;
283 if (barrier) {
284 access.ApplyBarrier(*barrier);
285 }
John Zulauf16adfc92020-04-08 10:28:33 -0600286 if (current->pos_A->valid) {
287 current.trim_A();
John Zulauf355e49b2020-04-24 15:11:15 -0600288 current->pos_A->lower_bound->second.Resolve(access);
John Zulauf5f13a792020-03-10 07:31:21 -0600289 } else {
John Zulauf355e49b2020-04-24 15:11:15 -0600290 auto inserted = resolve_map->insert(current->pos_A->lower_bound, std::make_pair(current->range, access));
291 current.invalidate_A(inserted); // Update the parallel iterator to point at the insert segment
John Zulauf5f13a792020-03-10 07:31:21 -0600292 }
John Zulauf16adfc92020-04-08 10:28:33 -0600293 } else {
294 // we have to descend to fill this gap
295 if (recur_to_infill) {
John Zulauf355e49b2020-04-24 15:11:15 -0600296 if (current->pos_A->valid) {
297 // Dest is valid, so we need to accumulate along the DAG and then resolve... in an N-to-1 resolve operation
298 ResourceAccessRangeMap gap_map;
299 ResolvePreviousAccess(type, current->range, &gap_map, infill_state);
300 ResolveMapToEntry(resolve_map, current->pos_A->lower_bound, gap_map.begin(), gap_map.end(), barrier);
301 } else {
302 // There isn't anything in dest in current->range, so we can accumulate directly into it.
303 ResolvePreviousAccess(type, current->range, resolve_map, infill_state);
304 if (barrier) {
305 // Need to apply the barrier to the accesses we accumulated, noting that we haven't updated current
306 for (auto pos = resolve_map->lower_bound(current->range); pos != current->pos_A->lower_bound; ++pos) {
307 pos->second.ApplyBarrier(*barrier);
308 }
309 }
310 }
311 // Given that there could be gaps we need to seek carefully to not repeatedly search the same gaps in the next
312 // iterator of the outer while.
313
314 // Set the parallel iterator to the end of this range s.t. ++ will move us to the next range whether or
315 // not the end of the range is a gap. For the seek to work, first we need to warn the parallel iterator
316 // we stepped on the dest map
317 const auto seek_to = current->range.end - 1; // The subtraction is safe as range can't be empty (loop condition)
318 current.invalidate_A(); // Changes current->range
319 current.seek(seek_to);
320 } else if (!current->pos_A->valid && infill_state) {
321 // If we didn't find anything in the current range, and we aren't reccuring... we infill if required
322 auto inserted = resolve_map->insert(current->pos_A->lower_bound, std::make_pair(current->range, *infill_state));
323 current.invalidate_A(inserted); // Update the parallel iterator to point at the correct segment after insert
John Zulauf16adfc92020-04-08 10:28:33 -0600324 }
John Zulauf5f13a792020-03-10 07:31:21 -0600325 }
John Zulauf16adfc92020-04-08 10:28:33 -0600326 ++current;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600327 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600328}
329
John Zulauf355e49b2020-04-24 15:11:15 -0600330void AccessContext::ResolvePreviousAccess(AddressType type, const ResourceAccessRange &range, ResourceAccessRangeMap *descent_map,
331 const ResourceAccessState *infill_state) const {
John Zulaufe5da6e52020-03-18 15:32:18 -0600332 if ((prev_.size() == 0) && (src_external_.context == nullptr)) {
John Zulauf5f13a792020-03-10 07:31:21 -0600333 if (range.non_empty() && infill_state) {
334 descent_map->insert(std::make_pair(range, *infill_state));
335 }
336 } else {
337 // Look for something to fill the gap further along.
338 for (const auto &prev_dep : prev_) {
John Zulauf355e49b2020-04-24 15:11:15 -0600339 prev_dep.context->ResolveAccessRange(type, range, &prev_dep.barrier, descent_map, infill_state);
John Zulauf5f13a792020-03-10 07:31:21 -0600340 }
341
John Zulaufe5da6e52020-03-18 15:32:18 -0600342 if (src_external_.context) {
John Zulauf355e49b2020-04-24 15:11:15 -0600343 src_external_.context->ResolveAccessRange(type, range, &src_external_.barrier, descent_map, infill_state);
John Zulauf5f13a792020-03-10 07:31:21 -0600344 }
345 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600346}
347
John Zulauf16adfc92020-04-08 10:28:33 -0600348AccessContext::AddressType AccessContext::ImageAddressType(const IMAGE_STATE &image) {
locke-lunarg3f6978b2020-04-16 16:51:35 -0600349 return (image.fragment_encoder->IsLinearImage()) ? AddressType::kLinearAddress : AddressType::kIdealizedAddress;
John Zulauf16adfc92020-04-08 10:28:33 -0600350}
351
352VkDeviceSize AccessContext::ResourceBaseAddress(const BINDABLE &bindable) {
353 return bindable.binding.offset + bindable.binding.mem_state->fake_base_address;
354}
355
John Zulauf355e49b2020-04-24 15:11:15 -0600356static bool SimpleBinding(const BINDABLE &bindable) { return !bindable.sparse && bindable.binding.mem_state; }
John Zulauf16adfc92020-04-08 10:28:33 -0600357
John Zulauf540266b2020-04-06 18:54:53 -0600358void AccessContext::ResolvePreviousAccess(const IMAGE_STATE &image_state, const VkImageSubresourceRange &subresource_range_arg,
John Zulauf355e49b2020-04-24 15:11:15 -0600359 AddressType address_type, ResourceAccessRangeMap *descent_map,
360 const ResourceAccessState *infill_state) const {
John Zulauf16adfc92020-04-08 10:28:33 -0600361 if (!SimpleBinding(image_state)) return;
362
John Zulauf62f10592020-04-03 12:20:02 -0600363 auto subresource_range = NormalizeSubresourceRange(image_state.createInfo, subresource_range_arg);
locke-lunargae26eac2020-04-16 15:29:05 -0600364 subresource_adapter::ImageRangeGenerator range_gen(*image_state.fragment_encoder.get(), subresource_range, {0, 0, 0},
locke-lunarg5f7d3c62020-04-07 00:10:39 -0600365 image_state.createInfo.extent);
John Zulauf16adfc92020-04-08 10:28:33 -0600366 const auto base_address = ResourceBaseAddress(image_state);
John Zulauf62f10592020-04-03 12:20:02 -0600367 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf16adfc92020-04-08 10:28:33 -0600368 ResolvePreviousAccess(address_type, (*range_gen + base_address), descent_map, infill_state);
John Zulauf62f10592020-04-03 12:20:02 -0600369 }
370}
371
John Zulauf355e49b2020-04-24 15:11:15 -0600372static bool ValidateLayoutTransitions(const SyncValidator &sync_state, const RENDER_PASS_STATE &rp_state,
373 const std::vector<const IMAGE_VIEW_STATE *> &attachment_views, const char *func_name,
374 uint32_t subpass, const AccessContext &context) {
375 bool skip = false;
376 const auto &transitions = rp_state.subpass_transitions[subpass];
377 for (const auto &transition : transitions) {
378 auto hazard = context.DetectSubpassTransitionHazard(transition, attachment_views);
379 if (hazard.hazard) {
380 skip |= sync_state.LogError(rp_state.renderPass, string_SyncHazardVUID(hazard.hazard),
381 "%s: Hazard %s in subpass %" PRIu32 " for attachment %" PRIu32 " image layout transition.",
382 func_name, string_SyncHazard(hazard.hazard), subpass, transition.attachment);
383 }
384 }
385 return skip;
386}
387
John Zulauf3d84f1b2020-03-09 13:33:25 -0600388class HazardDetector {
389 SyncStageAccessIndex usage_index_;
390
391 public:
John Zulauf5f13a792020-03-10 07:31:21 -0600392 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const { return pos->second.DetectHazard(usage_index_); }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600393 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos) const {
394 return pos->second.DetectAsyncHazard(usage_index_);
395 }
396 HazardDetector(SyncStageAccessIndex usage) : usage_index_(usage) {}
397};
398
John Zulauf16adfc92020-04-08 10:28:33 -0600399HazardResult AccessContext::DetectHazard(AddressType type, SyncStageAccessIndex usage_index,
John Zulauf540266b2020-04-06 18:54:53 -0600400 const ResourceAccessRange &range) const {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600401 HazardDetector detector(usage_index);
John Zulauf355e49b2020-04-24 15:11:15 -0600402 return DetectHazard(type, detector, range, DetectOptions::kDetectAll);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600403}
404
John Zulauf16adfc92020-04-08 10:28:33 -0600405HazardResult AccessContext::DetectHazard(const BUFFER_STATE &buffer, SyncStageAccessIndex usage_index,
John Zulauf355e49b2020-04-24 15:11:15 -0600406 const ResourceAccessRange &range) const {
John Zulauf16adfc92020-04-08 10:28:33 -0600407 if (!SimpleBinding(buffer)) return HazardResult();
John Zulauf355e49b2020-04-24 15:11:15 -0600408 return DetectHazard(AddressType::kLinearAddress, usage_index, range + ResourceBaseAddress(buffer));
John Zulaufe5da6e52020-03-18 15:32:18 -0600409}
410
John Zulauf540266b2020-04-06 18:54:53 -0600411HazardResult AccessContext::DetectHazard(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
412 const VkImageSubresourceLayers &subresource, const VkOffset3D &offset,
413 const VkExtent3D &extent) const {
John Zulauf16adfc92020-04-08 10:28:33 -0600414 if (!SimpleBinding(image)) return HazardResult();
John Zulauf5c5e88d2019-12-26 11:22:02 -0700415 // TODO: replace the encoder/generator with offset3D/extent3D aware versions
416 VkImageSubresourceRange subresource_range = {subresource.aspectMask, subresource.mipLevel, 1, subresource.baseArrayLayer,
417 subresource.layerCount};
locke-lunargae26eac2020-04-16 15:29:05 -0600418 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, offset, extent);
John Zulauf16adfc92020-04-08 10:28:33 -0600419 const auto address_type = ImageAddressType(image);
420 const auto base_address = ResourceBaseAddress(image);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700421 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf16adfc92020-04-08 10:28:33 -0600422 HazardResult hazard = DetectHazard(address_type, current_usage, (*range_gen + base_address));
John Zulauf5c5e88d2019-12-26 11:22:02 -0700423 if (hazard.hazard) return hazard;
424 }
425 return HazardResult();
John Zulauf9cb530d2019-09-30 14:14:10 -0600426}
427
John Zulauf3d84f1b2020-03-09 13:33:25 -0600428class BarrierHazardDetector {
429 public:
430 BarrierHazardDetector(SyncStageAccessIndex usage_index, VkPipelineStageFlags src_exec_scope,
431 SyncStageAccessFlags src_access_scope)
432 : usage_index_(usage_index), src_exec_scope_(src_exec_scope), src_access_scope_(src_access_scope) {}
433
John Zulauf5f13a792020-03-10 07:31:21 -0600434 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
435 return pos->second.DetectBarrierHazard(usage_index_, src_exec_scope_, src_access_scope_);
John Zulauf0cb5be22020-01-23 12:18:22 -0700436 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600437 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos) const {
438 // Async barrier hazard detection can use the same path as the usage index is not IsRead, but is IsWrite
439 return pos->second.DetectAsyncHazard(usage_index_);
440 }
441
442 private:
443 SyncStageAccessIndex usage_index_;
444 VkPipelineStageFlags src_exec_scope_;
445 SyncStageAccessFlags src_access_scope_;
446};
447
John Zulauf16adfc92020-04-08 10:28:33 -0600448HazardResult AccessContext::DetectBarrierHazard(AddressType type, SyncStageAccessIndex current_usage,
John Zulauf540266b2020-04-06 18:54:53 -0600449 VkPipelineStageFlags src_exec_scope, SyncStageAccessFlags src_access_scope,
John Zulauf355e49b2020-04-24 15:11:15 -0600450 const ResourceAccessRange &range, DetectOptions options) const {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600451 BarrierHazardDetector detector(current_usage, src_exec_scope, src_access_scope);
John Zulauf355e49b2020-04-24 15:11:15 -0600452 return DetectHazard(type, detector, range, DetectOptions::kDetectAll);
John Zulauf0cb5be22020-01-23 12:18:22 -0700453}
454
John Zulauf16adfc92020-04-08 10:28:33 -0600455HazardResult AccessContext::DetectImageBarrierHazard(const IMAGE_STATE &image, VkPipelineStageFlags src_exec_scope,
John Zulauf355e49b2020-04-24 15:11:15 -0600456 SyncStageAccessFlags src_access_scope,
457 const VkImageSubresourceRange &subresource_range,
458 DetectOptions options) const {
John Zulauf16adfc92020-04-08 10:28:33 -0600459 if (!SimpleBinding(image)) return HazardResult();
locke-lunargae26eac2020-04-16 15:29:05 -0600460 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, {0, 0, 0},
locke-lunarg5f7d3c62020-04-07 00:10:39 -0600461 image.createInfo.extent);
John Zulauf16adfc92020-04-08 10:28:33 -0600462 const auto address_type = ImageAddressType(image);
463 const auto base_address = ResourceBaseAddress(image);
locke-lunarg296a3c92020-03-25 01:04:29 -0600464 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf355e49b2020-04-24 15:11:15 -0600465 HazardResult hazard = DetectBarrierHazard(address_type, SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION, src_exec_scope,
466 src_access_scope, (*range_gen + base_address), options);
locke-lunarg296a3c92020-03-25 01:04:29 -0600467 if (hazard.hazard) return hazard;
John Zulauf0cb5be22020-01-23 12:18:22 -0700468 }
469 return HazardResult();
470}
471
John Zulauf355e49b2020-04-24 15:11:15 -0600472HazardResult AccessContext::DetectImageBarrierHazard(const IMAGE_STATE &image, VkPipelineStageFlags src_exec_scope,
473 SyncStageAccessFlags src_stage_accesses,
474 const VkImageMemoryBarrier &barrier) const {
475 auto subresource_range = NormalizeSubresourceRange(image.createInfo, barrier.subresourceRange);
476 const auto src_access_scope = SyncStageAccess::AccessScope(src_stage_accesses, barrier.srcAccessMask);
477 return DetectImageBarrierHazard(image, src_exec_scope, src_access_scope, subresource_range, kDetectAll);
478}
479
John Zulauf9cb530d2019-09-30 14:14:10 -0600480template <typename Flags, typename Map>
481SyncStageAccessFlags AccessScopeImpl(Flags flag_mask, const Map &map) {
482 SyncStageAccessFlags scope = 0;
483 for (const auto &bit_scope : map) {
484 if (flag_mask < bit_scope.first) break;
485
486 if (flag_mask & bit_scope.first) {
487 scope |= bit_scope.second;
488 }
489 }
490 return scope;
491}
492
493SyncStageAccessFlags SyncStageAccess::AccessScopeByStage(VkPipelineStageFlags stages) {
494 return AccessScopeImpl(stages, syncStageAccessMaskByStageBit);
495}
496
497SyncStageAccessFlags SyncStageAccess::AccessScopeByAccess(VkAccessFlags accesses) {
498 return AccessScopeImpl(accesses, syncStageAccessMaskByAccessBit);
499}
500
501// Getting from stage mask and access mask to stage/acess masks is something we need to be good at...
502SyncStageAccessFlags SyncStageAccess::AccessScope(VkPipelineStageFlags stages, VkAccessFlags accesses) {
John Zulauf5f13a792020-03-10 07:31:21 -0600503 // The access scope is the intersection of all stage/access types possible for the enabled stages and the enables
504 // accesses (after doing a couple factoring of common terms the union of stage/access intersections is the intersections
505 // of the union of all stage/access types for all the stages and the same unions for the access mask...
John Zulauf9cb530d2019-09-30 14:14:10 -0600506 return AccessScopeByStage(stages) & AccessScopeByAccess(accesses);
507}
508
509template <typename Action>
John Zulauf5c5e88d2019-12-26 11:22:02 -0700510void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const ResourceAccessRange &range, const Action &action) {
John Zulauf9cb530d2019-09-30 14:14:10 -0600511 // TODO -- region/mem-range accuracte update
512 auto pos = accesses->lower_bound(range);
513 if (pos == accesses->end() || !pos->first.intersects(range)) {
514 // The range is empty, fill it with a default value.
515 pos = action.Infill(accesses, pos, range);
516 } else if (range.begin < pos->first.begin) {
517 // Leading empty space, infill
John Zulauf5c5e88d2019-12-26 11:22:02 -0700518 pos = action.Infill(accesses, pos, ResourceAccessRange(range.begin, pos->first.begin));
John Zulauf9cb530d2019-09-30 14:14:10 -0600519 } else if (pos->first.begin < range.begin) {
520 // Trim the beginning if needed
521 pos = accesses->split(pos, range.begin, sparse_container::split_op_keep_both());
522 ++pos;
523 }
524
525 const auto the_end = accesses->end();
526 while ((pos != the_end) && pos->first.intersects(range)) {
527 if (pos->first.end > range.end) {
528 pos = accesses->split(pos, range.end, sparse_container::split_op_keep_both());
529 }
530
531 pos = action(accesses, pos);
532 if (pos == the_end) break;
533
534 auto next = pos;
535 ++next;
536 if ((pos->first.end < range.end) && (next != the_end) && !next->first.is_subsequent_to(pos->first)) {
537 // Need to infill if next is disjoint
538 VkDeviceSize limit = (next == the_end) ? range.end : std::min(range.end, next->first.begin);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700539 ResourceAccessRange new_range(pos->first.end, limit);
John Zulauf9cb530d2019-09-30 14:14:10 -0600540 next = action.Infill(accesses, next, new_range);
541 }
542 pos = next;
543 }
544}
545
546struct UpdateMemoryAccessStateFunctor {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700547 using Iterator = ResourceAccessRangeMap::iterator;
548 Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const {
John Zulauf5f13a792020-03-10 07:31:21 -0600549 // this is only called on gaps, and never returns a gap.
550 ResourceAccessState default_state;
John Zulauf16adfc92020-04-08 10:28:33 -0600551 context.ResolvePreviousAccess(type, range, accesses, &default_state);
John Zulauf5f13a792020-03-10 07:31:21 -0600552 return accesses->lower_bound(range);
John Zulauf9cb530d2019-09-30 14:14:10 -0600553 }
John Zulauf5f13a792020-03-10 07:31:21 -0600554
John Zulauf5c5e88d2019-12-26 11:22:02 -0700555 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -0600556 auto &access_state = pos->second;
557 access_state.Update(usage, tag);
558 return pos;
559 }
560
John Zulauf16adfc92020-04-08 10:28:33 -0600561 UpdateMemoryAccessStateFunctor(AccessContext::AddressType type_, const AccessContext &context_, SyncStageAccessIndex usage_,
John Zulauf540266b2020-04-06 18:54:53 -0600562 const ResourceUsageTag &tag_)
John Zulauf16adfc92020-04-08 10:28:33 -0600563 : type(type_), context(context_), usage(usage_), tag(tag_) {}
564 const AccessContext::AddressType type;
John Zulauf540266b2020-04-06 18:54:53 -0600565 const AccessContext &context;
John Zulauf16adfc92020-04-08 10:28:33 -0600566 const SyncStageAccessIndex usage;
John Zulauf9cb530d2019-09-30 14:14:10 -0600567 const ResourceUsageTag &tag;
568};
569
570struct ApplyMemoryAccessBarrierFunctor {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700571 using Iterator = ResourceAccessRangeMap::iterator;
572 inline Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const { return pos; }
John Zulauf9cb530d2019-09-30 14:14:10 -0600573
John Zulauf5c5e88d2019-12-26 11:22:02 -0700574 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -0600575 auto &access_state = pos->second;
John Zulauf36bcf6a2020-02-03 15:12:52 -0700576 access_state.ApplyMemoryAccessBarrier(src_exec_scope, src_access_scope, dst_exec_scope, dst_access_scope);
John Zulauf9cb530d2019-09-30 14:14:10 -0600577 return pos;
578 }
579
John Zulauf36bcf6a2020-02-03 15:12:52 -0700580 ApplyMemoryAccessBarrierFunctor(VkPipelineStageFlags src_exec_scope_, SyncStageAccessFlags src_access_scope_,
581 VkPipelineStageFlags dst_exec_scope_, SyncStageAccessFlags dst_access_scope_)
582 : src_exec_scope(src_exec_scope_),
583 src_access_scope(src_access_scope_),
584 dst_exec_scope(dst_exec_scope_),
585 dst_access_scope(dst_access_scope_) {}
John Zulauf9cb530d2019-09-30 14:14:10 -0600586
John Zulauf36bcf6a2020-02-03 15:12:52 -0700587 VkPipelineStageFlags src_exec_scope;
588 SyncStageAccessFlags src_access_scope;
589 VkPipelineStageFlags dst_exec_scope;
590 SyncStageAccessFlags dst_access_scope;
John Zulauf9cb530d2019-09-30 14:14:10 -0600591};
592
593struct ApplyGlobalBarrierFunctor {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700594 using Iterator = ResourceAccessRangeMap::iterator;
595 inline Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const { return pos; }
John Zulauf9cb530d2019-09-30 14:14:10 -0600596
John Zulauf5c5e88d2019-12-26 11:22:02 -0700597 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -0600598 auto &access_state = pos->second;
John Zulauf36bcf6a2020-02-03 15:12:52 -0700599 access_state.ApplyExecutionBarrier(src_exec_scope, dst_exec_scope);
John Zulauf9cb530d2019-09-30 14:14:10 -0600600
601 for (const auto &functor : barrier_functor) {
602 functor(accesses, pos);
603 }
604 return pos;
605 }
606
John Zulauf36bcf6a2020-02-03 15:12:52 -0700607 ApplyGlobalBarrierFunctor(VkPipelineStageFlags src_exec_scope, VkPipelineStageFlags dst_exec_scope,
608 SyncStageAccessFlags src_stage_accesses, SyncStageAccessFlags dst_stage_accesses,
John Zulauf9cb530d2019-09-30 14:14:10 -0600609 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers)
John Zulauf36bcf6a2020-02-03 15:12:52 -0700610 : src_exec_scope(src_exec_scope), dst_exec_scope(dst_exec_scope) {
John Zulauf9cb530d2019-09-30 14:14:10 -0600611 // Don't want to create this per tracked item, but don't want to loop through all tracked items per barrier...
612 barrier_functor.reserve(memoryBarrierCount);
613 for (uint32_t barrier_index = 0; barrier_index < memoryBarrierCount; barrier_index++) {
614 const auto &barrier = pMemoryBarriers[barrier_index];
John Zulauf36bcf6a2020-02-03 15:12:52 -0700615 barrier_functor.emplace_back(src_exec_scope, SyncStageAccess::AccessScope(src_stage_accesses, barrier.srcAccessMask),
616 dst_exec_scope, SyncStageAccess::AccessScope(dst_stage_accesses, barrier.dstAccessMask));
John Zulauf9cb530d2019-09-30 14:14:10 -0600617 }
618 }
619
John Zulauf36bcf6a2020-02-03 15:12:52 -0700620 const VkPipelineStageFlags src_exec_scope;
621 const VkPipelineStageFlags dst_exec_scope;
John Zulauf9cb530d2019-09-30 14:14:10 -0600622 std::vector<ApplyMemoryAccessBarrierFunctor> barrier_functor;
623};
624
John Zulauf355e49b2020-04-24 15:11:15 -0600625void AccessContext::UpdateAccessState(AddressType type, SyncStageAccessIndex current_usage, const ResourceAccessRange &range,
626 const ResourceUsageTag &tag) {
John Zulauf16adfc92020-04-08 10:28:33 -0600627 UpdateMemoryAccessStateFunctor action(type, *this, current_usage, tag);
628 UpdateMemoryAccessState(&GetAccessStateMap(type), range, action);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600629}
630
John Zulauf16adfc92020-04-08 10:28:33 -0600631void AccessContext::UpdateAccessState(const BUFFER_STATE &buffer, SyncStageAccessIndex current_usage,
John Zulauf355e49b2020-04-24 15:11:15 -0600632 const ResourceAccessRange &range, const ResourceUsageTag &tag) {
John Zulauf16adfc92020-04-08 10:28:33 -0600633 if (!SimpleBinding(buffer)) return;
634 const auto base_address = ResourceBaseAddress(buffer);
635 UpdateAccessState(AddressType::kLinearAddress, current_usage, range + base_address, tag);
636}
John Zulauf355e49b2020-04-24 15:11:15 -0600637
John Zulauf540266b2020-04-06 18:54:53 -0600638void AccessContext::UpdateAccessState(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
John Zulauf355e49b2020-04-24 15:11:15 -0600639 const VkImageSubresourceRange &subresource_range, const VkOffset3D &offset,
John Zulauf540266b2020-04-06 18:54:53 -0600640 const VkExtent3D &extent, const ResourceUsageTag &tag) {
John Zulauf16adfc92020-04-08 10:28:33 -0600641 if (!SimpleBinding(image)) return;
locke-lunargae26eac2020-04-16 15:29:05 -0600642 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, offset, extent);
John Zulauf16adfc92020-04-08 10:28:33 -0600643 const auto address_type = ImageAddressType(image);
644 const auto base_address = ResourceBaseAddress(image);
645 UpdateMemoryAccessStateFunctor action(address_type, *this, current_usage, tag);
John Zulauf5f13a792020-03-10 07:31:21 -0600646 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf16adfc92020-04-08 10:28:33 -0600647 UpdateMemoryAccessState(&GetAccessStateMap(address_type), (*range_gen + base_address), action);
John Zulauf5f13a792020-03-10 07:31:21 -0600648 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600649}
650
John Zulauf355e49b2020-04-24 15:11:15 -0600651void AccessContext::UpdateAccessState(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
652 const VkImageSubresourceLayers &subresource, const VkOffset3D &offset,
653 const VkExtent3D &extent, const ResourceUsageTag &tag) {
654 // TODO: replace the encoder/generator with offset3D aware versions
655 VkImageSubresourceRange subresource_range = {subresource.aspectMask, subresource.mipLevel, 1, subresource.baseArrayLayer,
656 subresource.layerCount};
657 UpdateAccessState(image, current_usage, subresource_range, offset, extent, tag);
658}
659
John Zulauf540266b2020-04-06 18:54:53 -0600660template <typename Action>
661void AccessContext::UpdateMemoryAccess(const BUFFER_STATE &buffer, const ResourceAccessRange &range, const Action action) {
John Zulauf16adfc92020-04-08 10:28:33 -0600662 if (!SimpleBinding(buffer)) return;
663 const auto base_address = ResourceBaseAddress(buffer);
664 UpdateMemoryAccessState(&GetAccessStateMap(AddressType::kLinearAddress), (range + base_address), action);
John Zulauf540266b2020-04-06 18:54:53 -0600665}
666
667template <typename Action>
668void AccessContext::UpdateMemoryAccess(const IMAGE_STATE &image, const VkImageSubresourceRange &subresource_range,
669 const Action action) {
John Zulauf16adfc92020-04-08 10:28:33 -0600670 if (!SimpleBinding(image)) return;
671 const auto address_type = ImageAddressType(image);
672 auto *accesses = &GetAccessStateMap(address_type);
John Zulauf540266b2020-04-06 18:54:53 -0600673
locke-lunargae26eac2020-04-16 15:29:05 -0600674 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, {0, 0, 0},
locke-lunarg5f7d3c62020-04-07 00:10:39 -0600675 image.createInfo.extent);
John Zulauf540266b2020-04-06 18:54:53 -0600676
John Zulauf16adfc92020-04-08 10:28:33 -0600677 const auto base_address = ResourceBaseAddress(image);
John Zulauf540266b2020-04-06 18:54:53 -0600678 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf16adfc92020-04-08 10:28:33 -0600679 UpdateMemoryAccessState(accesses, (*range_gen + base_address), action);
John Zulauf540266b2020-04-06 18:54:53 -0600680 }
681}
682
683template <typename Action>
684void AccessContext::ApplyGlobalBarriers(const Action &barrier_action) {
685 // Note: Barriers do *not* cross context boundaries, applying to accessess within.... (at least for renderpass subpasses)
John Zulauf16adfc92020-04-08 10:28:33 -0600686 for (const auto address_type : kAddressTypes) {
687 UpdateMemoryAccessState(&GetAccessStateMap(address_type), full_range, barrier_action);
John Zulauf540266b2020-04-06 18:54:53 -0600688 }
689}
690
691void AccessContext::ResolveChildContexts(const std::vector<AccessContext> &contexts) {
John Zulauf540266b2020-04-06 18:54:53 -0600692 for (uint32_t subpass_index = 0; subpass_index < contexts.size(); subpass_index++) {
693 auto &context = contexts[subpass_index];
John Zulauf16adfc92020-04-08 10:28:33 -0600694 for (const auto address_type : kAddressTypes) {
John Zulauf355e49b2020-04-24 15:11:15 -0600695 context.ResolveAccessRange(address_type, full_range, &context.GetDstExternalTrackBack().barrier,
696 &GetAccessStateMap(address_type), nullptr, false);
John Zulauf540266b2020-04-06 18:54:53 -0600697 }
698 }
699}
700
John Zulauf355e49b2020-04-24 15:11:15 -0600701void AccessContext::ApplyImageBarrier(const IMAGE_STATE &image, VkPipelineStageFlags src_exec_scope,
702 SyncStageAccessFlags src_access_scope, VkPipelineStageFlags dst_exec_scope,
703 SyncStageAccessFlags dst_access_scope, const VkImageSubresourceRange &subresource_range) {
704 const ApplyMemoryAccessBarrierFunctor barrier_action(src_exec_scope, src_access_scope, dst_exec_scope, dst_access_scope);
705 UpdateMemoryAccess(image, subresource_range, barrier_action);
706}
707
708// TODO: Plumb offset/extent throughout the image call stacks, with default injector overloads to preserved backwards compatiblity
709// as needed
710void AccessContext::ApplyImageBarrier(const IMAGE_STATE &image, VkPipelineStageFlags src_exec_scope,
711 SyncStageAccessFlags src_access_scope, VkPipelineStageFlags dst_exec_scope,
712 SyncStageAccessFlags dst_access_scope, const VkImageSubresourceRange &subresource_range,
713 bool layout_transition, const ResourceUsageTag &tag) {
714 if (layout_transition) {
715 UpdateAccessState(image, SYNC_IMAGE_LAYOUT_TRANSITION, subresource_range, VkOffset3D{0, 0, 0}, image.createInfo.extent,
716 tag);
717 ApplyImageBarrier(image, src_exec_scope, SYNC_IMAGE_LAYOUT_TRANSITION_BIT, dst_exec_scope, dst_access_scope,
718 subresource_range);
719 }
720 ApplyImageBarrier(image, src_exec_scope, src_access_scope, dst_exec_scope, dst_access_scope, subresource_range);
721}
722
723void AccessContext::ApplyImageBarrier(const IMAGE_STATE &image, const SyncBarrier &barrier,
724 const VkImageSubresourceRange &subresource_range, bool layout_transition,
725 const ResourceUsageTag &tag) {
726 ApplyImageBarrier(image, barrier.src_exec_scope, barrier.src_access_scope, barrier.dst_exec_scope, barrier.dst_access_scope,
727 subresource_range, layout_transition, tag);
728}
729
730// Suitable only for *subpass* access contexts
731HazardResult AccessContext::DetectSubpassTransitionHazard(const RENDER_PASS_STATE::AttachmentTransition &transition,
732 const std::vector<const IMAGE_VIEW_STATE *> &attachments) const {
733 const auto *attach_view = attachments[transition.attachment];
734 if (!attach_view) return HazardResult();
735 const auto image_state = attach_view->image_state.get();
736 if (!image_state) return HazardResult();
737
738 const auto *track_back = GetTrackBackFromSubpass(transition.prev_pass);
739 // We should never ask for a transition from a context we don't have
740 assert(track_back);
741 assert(track_back->context);
742
743 // Do the detection against the specific prior context independent of other contexts. (Synchronous only)
744 auto hazard = track_back->context->DetectImageBarrierHazard(*image_state, track_back->barrier.src_exec_scope,
745 track_back->barrier.src_access_scope,
746 attach_view->normalized_subresource_range, kDetectPrevious);
747 if (!hazard.hazard) {
748 // The Async hazard check is against the current context's async set.
749 hazard = DetectImageBarrierHazard(*image_state, track_back->barrier.src_exec_scope, track_back->barrier.src_access_scope,
750 attach_view->normalized_subresource_range, kDetectAsync);
751 }
752 return hazard;
753}
754
755// Class CommandBufferAccessContext: Keep track of resource access state information for a specific command buffer
756bool CommandBufferAccessContext::ValidateBeginRenderPass(const RENDER_PASS_STATE &rp_state,
757
758 const VkRenderPassBeginInfo *pRenderPassBegin,
759 const VkSubpassBeginInfoKHR *pSubpassBeginInfo,
760 const char *func_name) const {
761 // Check if any of the layout transitions are hazardous.... but we don't have the renderpass context to work with, so we
762 bool skip = false;
763 uint32_t subpass = 0;
764 const auto &transitions = rp_state.subpass_transitions[subpass];
765 if (transitions.size()) {
766 const std::vector<AccessContext> empty_context_vector;
767 // Create context we can use to validate against...
768 AccessContext temp_context(subpass, queue_flags_, rp_state.subpass_dependencies, empty_context_vector,
769 const_cast<AccessContext *>(&cb_access_context_));
770
771 assert(pRenderPassBegin);
772 if (nullptr == pRenderPassBegin) return skip;
773
774 const auto fb_state = sync_state_->Get<FRAMEBUFFER_STATE>(pRenderPassBegin->framebuffer);
775 assert(fb_state);
776 if (nullptr == fb_state) return skip;
777
778 // Create a limited array of views (which we'll need to toss
779 std::vector<const IMAGE_VIEW_STATE *> views;
780 const auto count_attachment = GetFramebufferAttachments(*pRenderPassBegin, *fb_state);
781 const auto attachment_count = count_attachment.first;
782 const auto *attachments = count_attachment.second;
783 views.resize(attachment_count, nullptr);
784 for (const auto &transition : transitions) {
785 assert(transition.attachment < attachment_count);
786 views[transition.attachment] = sync_state_->Get<IMAGE_VIEW_STATE>(attachments[transition.attachment]);
787 }
788
789 skip |= ValidateLayoutTransitions(*sync_state_, rp_state, views, func_name, 0, temp_context);
790 }
791 return skip;
792}
793
794bool CommandBufferAccessContext::ValidateNextSubpass(const char *func_name) const {
795 // TODO: Things to add here.
796 // Validate Preserve/Resolve attachments
797 bool skip = false;
798 skip |= current_renderpass_context_->ValidateNextSubpassLayoutTransitions(*sync_state_, func_name);
799
800 return skip;
801}
802
803bool CommandBufferAccessContext::ValidateEndRenderpass(const char *func_name) const {
804 // TODO: Things to add here.
805 // Validate Preserve/Resolve attachments
806 bool skip = false;
807 skip |= current_renderpass_context_->ValidateFinalSubpassLayoutTransitions(*sync_state_, func_name);
808
809 return skip;
810}
811
812void CommandBufferAccessContext::RecordBeginRenderPass(const ResourceUsageTag &tag) {
813 assert(sync_state_);
814 if (!cb_state_) return;
815
816 // Create an access context the current renderpass.
817 render_pass_contexts_.emplace_back(&cb_access_context_);
John Zulauf16adfc92020-04-08 10:28:33 -0600818 current_renderpass_context_ = &render_pass_contexts_.back();
John Zulauf355e49b2020-04-24 15:11:15 -0600819 current_renderpass_context_->RecordBeginRenderPass(*sync_state_, *cb_state_, queue_flags_, tag);
John Zulauf16adfc92020-04-08 10:28:33 -0600820 current_context_ = &current_renderpass_context_->CurrentContext();
John Zulauf16adfc92020-04-08 10:28:33 -0600821}
822
John Zulauf355e49b2020-04-24 15:11:15 -0600823void CommandBufferAccessContext::RecordNextSubpass(const RENDER_PASS_STATE &rp_state, const ResourceUsageTag &tag) {
John Zulauf16adfc92020-04-08 10:28:33 -0600824 assert(current_renderpass_context_);
John Zulauf355e49b2020-04-24 15:11:15 -0600825 current_renderpass_context_->RecordNextSubpass(tag);
John Zulauf16adfc92020-04-08 10:28:33 -0600826 current_context_ = &current_renderpass_context_->CurrentContext();
827}
828
John Zulauf355e49b2020-04-24 15:11:15 -0600829void CommandBufferAccessContext::RecordEndRenderPass(const RENDER_PASS_STATE &render_pass, const ResourceUsageTag &tag) {
830 // TODO: Add layout load/store/resolve access (here or in RenderPassContext)
John Zulauf16adfc92020-04-08 10:28:33 -0600831 assert(current_renderpass_context_);
832 if (!current_renderpass_context_) return;
833
John Zulauf355e49b2020-04-24 15:11:15 -0600834 current_renderpass_context_->RecordEndRenderPass(tag);
835 current_context_ = &cb_access_context_;
John Zulauf16adfc92020-04-08 10:28:33 -0600836 current_renderpass_context_ = nullptr;
837}
838
John Zulauf355e49b2020-04-24 15:11:15 -0600839bool RenderPassAccessContext::ValidateNextSubpassLayoutTransitions(const SyncValidator &sync_state, const char *func_name) const {
840 bool skip = false;
841 const auto next_subpass = current_subpass_ + 1;
842 skip |= ValidateLayoutTransitions(sync_state, *rp_state_, attachment_views_, func_name, next_subpass,
843 subpass_contexts_[next_subpass]);
844 return skip;
845}
846
847bool RenderPassAccessContext::ValidateFinalSubpassLayoutTransitions(const SyncValidator &sync_state, const char *func_name) const {
848 bool skip = false;
849
850 // Validate the "finalLayout" transitions to external
851 // Get them from where there we're hidding in the extra entry.
852 const auto &final_transitions = rp_state_->subpass_transitions.back();
853 for (const auto &transition : final_transitions) {
854 const auto &attach_view = attachment_views_[transition.attachment];
855 const auto &trackback = subpass_contexts_[transition.prev_pass].GetDstExternalTrackBack();
856 assert(trackback.context); // Transitions are given implicit transitions if the StateTracker is working correctly
857 auto hazard = trackback.context->DetectImageBarrierHazard(
858 *attach_view->image_state, trackback.barrier.src_exec_scope, trackback.barrier.src_access_scope,
859 attach_view->normalized_subresource_range, AccessContext::DetectOptions::kDetectPrevious);
860 if (hazard.hazard) {
861 skip |= sync_state.LogError(rp_state_->renderPass, string_SyncHazardVUID(hazard.hazard),
862 "%s: Hazard %s with last use subpass %" PRIu32 " for attachment %" PRIu32
863 " final image layout transition.",
864 func_name, string_SyncHazard(hazard.hazard), transition.prev_pass, transition.attachment);
865 }
866 }
867 return skip;
868}
869
870void RenderPassAccessContext::RecordLayoutTransitions(const ResourceUsageTag &tag) {
871 // Add layout transitions...
872 const auto &transitions = rp_state_->subpass_transitions[current_subpass_];
873 auto &subpass_context = subpass_contexts_[current_subpass_];
874 for (const auto &transition : transitions) {
875 const auto attachment_view = attachment_views_[transition.attachment];
876 if (!attachment_view) continue;
877 const auto image = attachment_view->image_state.get();
878 if (!image) continue;
879
880 const auto *barrier = subpass_context.GetTrackBackFromSubpass(transition.prev_pass);
881 subpass_context.ApplyImageBarrier(*image, barrier->barrier, attachment_view->normalized_subresource_range, true, tag);
882 }
883}
884
885void RenderPassAccessContext::RecordBeginRenderPass(const SyncValidator &state, const CMD_BUFFER_STATE &cb_state,
886 VkQueueFlags queue_flags, const ResourceUsageTag &tag) {
887 current_subpass_ = 0;
888 rp_state_ = cb_state.activeRenderPass;
889 subpass_contexts_.reserve(rp_state_->createInfo.subpassCount);
890 // Add this for all subpasses here so that they exsist during next subpass validation
891 for (uint32_t pass = 0; pass < rp_state_->createInfo.subpassCount; pass++) {
892 subpass_contexts_.emplace_back(pass, queue_flags, rp_state_->subpass_dependencies, subpass_contexts_, external_context_);
893 }
894 attachment_views_ = state.GetCurrentAttachmentViews(cb_state);
895
896 RecordLayoutTransitions(tag);
897}
898void RenderPassAccessContext::RecordNextSubpass(const ResourceUsageTag &tag) {
899 current_subpass_++;
900 assert(current_subpass_ < subpass_contexts_.size());
901 RecordLayoutTransitions(tag);
902}
903
904void RenderPassAccessContext::RecordEndRenderPass(const ResourceUsageTag &tag) {
905 // Export the accesses from the renderpass...
906 external_context_->ResolveChildContexts(subpass_contexts_);
907
908 // Add the "finalLayout" transitions to external
909 // Get them from where there we're hidding in the extra entry.
910 const auto &final_transitions = rp_state_->subpass_transitions.back();
911 for (const auto &transition : final_transitions) {
912 const auto &attachment = attachment_views_[transition.attachment];
913 const auto &last_trackback = subpass_contexts_[transition.prev_pass].GetDstExternalTrackBack();
914 assert(external_context_ == last_trackback.context);
915 external_context_->ApplyImageBarrier(*attachment->image_state, last_trackback.barrier,
916 attachment->normalized_subresource_range, true, tag);
917 }
918}
919
John Zulauf3d84f1b2020-03-09 13:33:25 -0600920SyncBarrier::SyncBarrier(VkQueueFlags queue_flags, const VkSubpassDependency2 &barrier) {
921 const auto src_stage_mask = ExpandPipelineStages(queue_flags, barrier.srcStageMask);
922 src_exec_scope = WithEarlierPipelineStages(src_stage_mask);
923 src_access_scope = SyncStageAccess::AccessScope(src_stage_mask, barrier.srcAccessMask);
924 const auto dst_stage_mask = ExpandPipelineStages(queue_flags, barrier.dstStageMask);
925 dst_exec_scope = WithLaterPipelineStages(dst_stage_mask);
926 dst_access_scope = SyncStageAccess::AccessScope(dst_stage_mask, barrier.dstAccessMask);
927}
928
929void ResourceAccessState::ApplyBarrier(const SyncBarrier &barrier) {
930 ApplyExecutionBarrier(barrier.src_exec_scope, barrier.dst_exec_scope);
931 ApplyMemoryAccessBarrier(barrier.src_exec_scope, barrier.src_access_scope, barrier.dst_exec_scope, barrier.dst_access_scope);
932}
933
John Zulauf9cb530d2019-09-30 14:14:10 -0600934HazardResult ResourceAccessState::DetectHazard(SyncStageAccessIndex usage_index) const {
935 HazardResult hazard;
936 auto usage = FlagBit(usage_index);
937 if (IsRead(usage)) {
938 if (IsWriteHazard(usage)) {
939 hazard.Set(READ_AFTER_WRITE, write_tag);
940 }
941 } else {
942 // Assume write
943 // TODO determine what to do with READ-WRITE usage states if any
944 // Write-After-Write check -- if we have a previous write to test against
945 if (last_write && IsWriteHazard(usage)) {
946 hazard.Set(WRITE_AFTER_WRITE, write_tag);
947 } else {
948 // Only look for casus belli for WAR
949 const auto usage_stage = PipelineStageBit(usage_index);
950 for (uint32_t read_index = 0; read_index < last_read_count; read_index++) {
951 if (IsReadHazard(usage_stage, last_reads[read_index])) {
952 hazard.Set(WRITE_AFTER_READ, last_reads[read_index].tag);
953 break;
954 }
955 }
956 }
957 }
958 return hazard;
959}
960
John Zulauf2f952d22020-02-10 11:34:51 -0700961// Asynchronous Hazards occur between subpasses with no connection through the DAG
John Zulauf3d84f1b2020-03-09 13:33:25 -0600962HazardResult ResourceAccessState::DetectAsyncHazard(SyncStageAccessIndex usage_index) const {
John Zulauf2f952d22020-02-10 11:34:51 -0700963 HazardResult hazard;
964 auto usage = FlagBit(usage_index);
965 if (IsRead(usage)) {
966 if (last_write != 0) {
967 hazard.Set(READ_RACING_WRITE, write_tag);
968 }
969 } else {
970 if (last_write != 0) {
971 hazard.Set(WRITE_RACING_WRITE, write_tag);
972 } else if (last_read_count > 0) {
973 hazard.Set(WRITE_RACING_READ, last_reads[0].tag);
974 }
975 }
976 return hazard;
977}
978
John Zulauf36bcf6a2020-02-03 15:12:52 -0700979HazardResult ResourceAccessState::DetectBarrierHazard(SyncStageAccessIndex usage_index, VkPipelineStageFlags src_exec_scope,
980 SyncStageAccessFlags src_access_scope) const {
John Zulauf0cb5be22020-01-23 12:18:22 -0700981 // Only supporting image layout transitions for now
982 assert(usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION);
983 HazardResult hazard;
984 if (last_write) {
985 // If the previous write is *not* in the 1st access scope
986 // *AND* the current barrier is not in the dependency chain
987 // *AND* the there is no prior memory barrier for the previous write in the dependency chain
988 // then the barrier access is unsafe (R/W after W)
John Zulauf36bcf6a2020-02-03 15:12:52 -0700989 if (((last_write & src_access_scope) == 0) && ((src_exec_scope & write_dependency_chain) == 0) && (write_barriers == 0)) {
John Zulauf0cb5be22020-01-23 12:18:22 -0700990 // TODO: Do we need a difference hazard name for this?
991 hazard.Set(WRITE_AFTER_WRITE, write_tag);
992 }
John Zulauf355e49b2020-04-24 15:11:15 -0600993 }
994 if (!hazard.hazard) {
995 // Look at the reads if any
John Zulauf0cb5be22020-01-23 12:18:22 -0700996 for (uint32_t read_index = 0; read_index < last_read_count; read_index++) {
John Zulauf36bcf6a2020-02-03 15:12:52 -0700997 const auto &read_access = last_reads[read_index];
998 // If the read stage is not in the src sync sync
999 // *AND* not execution chained with an existing sync barrier (that's the or)
1000 // then the barrier access is unsafe (R/W after R)
1001 if ((src_exec_scope & (read_access.stage | read_access.barriers)) == 0) {
1002 hazard.Set(WRITE_AFTER_READ, read_access.tag);
John Zulauf0cb5be22020-01-23 12:18:22 -07001003 break;
1004 }
1005 }
1006 }
1007 return hazard;
1008}
1009
John Zulauf5f13a792020-03-10 07:31:21 -06001010// The logic behind resolves is the same as update, we assume that earlier hazards have be reported, and that no
1011// tranistive hazard can exists with a hazard between the earlier operations. Yes, an early hazard can mask that another
1012// exists, but if you fix *that* hazard it either fixes or unmasks the subsequent ones.
1013void ResourceAccessState::Resolve(const ResourceAccessState &other) {
1014 if (write_tag.IsBefore(other.write_tag)) {
1015 // If this is a later write, we've reported any exsiting hazard, and we can just overwrite as the more recent operation
1016 *this = other;
1017 } else if (!other.write_tag.IsBefore(write_tag)) {
1018 // This is the *equals* case for write operations, we merged the write barriers and the read state (but without the
1019 // dependency chaining logic or any stage expansion)
1020 write_barriers |= other.write_barriers;
1021
1022 // Merge that read states
1023 for (uint32_t other_read_index = 0; other_read_index < other.last_read_count; other_read_index++) {
1024 auto &other_read = other.last_reads[other_read_index];
1025 if (last_read_stages & other_read.stage) {
1026 // Merge in the barriers for read stages that exist in *both* this and other
1027 // TODO: This is N^2 with stages... perhaps the ReadStates should be by stage index.
1028 for (uint32_t my_read_index = 0; my_read_index < last_read_count; my_read_index++) {
1029 auto &my_read = last_reads[my_read_index];
1030 if (other_read.stage == my_read.stage) {
1031 if (my_read.tag.IsBefore(other_read.tag)) {
1032 my_read.tag = other_read.tag;
1033 }
1034 my_read.barriers |= other_read.barriers;
1035 break;
1036 }
1037 }
1038 } else {
1039 // The other read stage doesn't exist in this, so add it.
1040 last_reads[last_read_count] = other_read;
1041 last_read_count++;
1042 last_read_stages |= other_read.stage;
1043 }
1044 }
1045 } // the else clause would be that other write is before this write... in which case we supercede the other state and ignore
1046 // it.
1047}
1048
John Zulauf9cb530d2019-09-30 14:14:10 -06001049void ResourceAccessState::Update(SyncStageAccessIndex usage_index, const ResourceUsageTag &tag) {
1050 // Move this logic in the ResourceStateTracker as methods, thereof (or we'll repeat it for every flavor of resource...
1051 const auto usage_bit = FlagBit(usage_index);
1052 if (IsRead(usage_index)) {
1053 // Mulitple outstanding reads may be of interest and do dependency chains independently
1054 // However, for purposes of barrier tracking, only one read per pipeline stage matters
1055 const auto usage_stage = PipelineStageBit(usage_index);
1056 if (usage_stage & last_read_stages) {
1057 for (uint32_t read_index = 0; read_index < last_read_count; read_index++) {
1058 ReadState &access = last_reads[read_index];
1059 if (access.stage == usage_stage) {
1060 access.barriers = 0;
1061 access.tag = tag;
1062 break;
1063 }
1064 }
1065 } else {
1066 // We don't have this stage in the list yet...
1067 assert(last_read_count < last_reads.size());
1068 ReadState &access = last_reads[last_read_count++];
1069 access.stage = usage_stage;
1070 access.barriers = 0;
1071 access.tag = tag;
1072 last_read_stages |= usage_stage;
1073 }
1074 } else {
1075 // Assume write
1076 // TODO determine what to do with READ-WRITE operations if any
1077 // Clobber last read and both sets of barriers... because all we have is DANGER, DANGER, WILL ROBINSON!!!
1078 // if the last_reads/last_write were unsafe, we've reported them,
1079 // in either case the prior access is irrelevant, we can overwrite them as *this* write is now after them
1080 last_read_count = 0;
1081 last_read_stages = 0;
1082
1083 write_barriers = 0;
1084 write_dependency_chain = 0;
1085 write_tag = tag;
1086 last_write = usage_bit;
1087 }
1088}
John Zulauf5f13a792020-03-10 07:31:21 -06001089
John Zulauf9cb530d2019-09-30 14:14:10 -06001090void ResourceAccessState::ApplyExecutionBarrier(VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask) {
1091 // Execution Barriers only protect read operations
1092 for (uint32_t read_index = 0; read_index < last_read_count; read_index++) {
1093 ReadState &access = last_reads[read_index];
1094 // The | implements the "dependency chain" logic for this access, as the barriers field stores the second sync scope
1095 if (srcStageMask & (access.stage | access.barriers)) {
1096 access.barriers |= dstStageMask;
1097 }
1098 }
1099 if (write_dependency_chain & srcStageMask) write_dependency_chain |= dstStageMask;
1100}
1101
John Zulauf36bcf6a2020-02-03 15:12:52 -07001102void ResourceAccessState::ApplyMemoryAccessBarrier(VkPipelineStageFlags src_exec_scope, SyncStageAccessFlags src_access_scope,
1103 VkPipelineStageFlags dst_exec_scope, SyncStageAccessFlags dst_access_scope) {
John Zulauf9cb530d2019-09-30 14:14:10 -06001104 // Assuming we've applied the execution side of this barrier, we update just the write
1105 // The || implements the "dependency chain" logic for this barrier
John Zulauf36bcf6a2020-02-03 15:12:52 -07001106 if ((src_access_scope & last_write) || (write_dependency_chain & src_exec_scope)) {
1107 write_barriers |= dst_access_scope;
1108 write_dependency_chain |= dst_exec_scope;
John Zulauf9cb530d2019-09-30 14:14:10 -06001109 }
1110}
1111
John Zulaufd1f85d42020-04-15 12:23:15 -06001112void SyncValidator::ResetCommandBufferCallback(VkCommandBuffer command_buffer) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06001113 auto *access_context = GetAccessContextNoInsert(command_buffer);
1114 if (access_context) {
1115 access_context->Reset();
John Zulauf9cb530d2019-09-30 14:14:10 -06001116 }
1117}
1118
John Zulaufd1f85d42020-04-15 12:23:15 -06001119void SyncValidator::FreeCommandBufferCallback(VkCommandBuffer command_buffer) {
1120 auto access_found = cb_access_state.find(command_buffer);
1121 if (access_found != cb_access_state.end()) {
1122 access_found->second->Reset();
1123 cb_access_state.erase(access_found);
1124 }
1125}
1126
John Zulauf540266b2020-04-06 18:54:53 -06001127void SyncValidator::ApplyGlobalBarriers(AccessContext *context, VkPipelineStageFlags srcStageMask,
John Zulauf36bcf6a2020-02-03 15:12:52 -07001128 VkPipelineStageFlags dstStageMask, SyncStageAccessFlags src_access_scope,
1129 SyncStageAccessFlags dst_access_scope, uint32_t memoryBarrierCount,
John Zulauf9cb530d2019-09-30 14:14:10 -06001130 const VkMemoryBarrier *pMemoryBarriers) {
1131 // TODO: Implement this better (maybe some delayed/on-demand integration).
John Zulauf36bcf6a2020-02-03 15:12:52 -07001132 ApplyGlobalBarrierFunctor barriers_functor(srcStageMask, dstStageMask, src_access_scope, dst_access_scope, memoryBarrierCount,
John Zulauf9cb530d2019-09-30 14:14:10 -06001133 pMemoryBarriers);
John Zulauf540266b2020-04-06 18:54:53 -06001134 context->ApplyGlobalBarriers(barriers_functor);
John Zulauf9cb530d2019-09-30 14:14:10 -06001135}
1136
John Zulauf540266b2020-04-06 18:54:53 -06001137void SyncValidator::ApplyBufferBarriers(AccessContext *context, VkPipelineStageFlags src_exec_scope,
John Zulauf36bcf6a2020-02-03 15:12:52 -07001138 SyncStageAccessFlags src_stage_accesses, VkPipelineStageFlags dst_exec_scope,
1139 SyncStageAccessFlags dst_stage_accesses, uint32_t barrier_count,
John Zulauf9cb530d2019-09-30 14:14:10 -06001140 const VkBufferMemoryBarrier *barriers) {
1141 // TODO Implement this at subresource/memory_range accuracy
1142 for (uint32_t index = 0; index < barrier_count; index++) {
locke-lunarg3c038002020-04-30 23:08:08 -06001143 auto barrier = barriers[index];
John Zulauf9cb530d2019-09-30 14:14:10 -06001144 const auto *buffer = Get<BUFFER_STATE>(barrier.buffer);
1145 if (!buffer) continue;
locke-lunarg3c038002020-04-30 23:08:08 -06001146 barrier.size = GetRealWholeSize(barrier.offset, barrier.size, buffer->createInfo.size);
John Zulauf16adfc92020-04-08 10:28:33 -06001147 ResourceAccessRange range = MakeRange(barrier);
John Zulauf540266b2020-04-06 18:54:53 -06001148 const auto src_access_scope = AccessScope(src_stage_accesses, barrier.srcAccessMask);
1149 const auto dst_access_scope = AccessScope(dst_stage_accesses, barrier.dstAccessMask);
1150 const ApplyMemoryAccessBarrierFunctor update_action(src_exec_scope, src_access_scope, dst_exec_scope, dst_access_scope);
1151 context->UpdateMemoryAccess(*buffer, range, update_action);
John Zulauf9cb530d2019-09-30 14:14:10 -06001152 }
1153}
1154
John Zulauf540266b2020-04-06 18:54:53 -06001155void SyncValidator::ApplyImageBarriers(AccessContext *context, VkPipelineStageFlags src_exec_scope,
1156 SyncStageAccessFlags src_stage_accesses, VkPipelineStageFlags dst_exec_scope,
1157 SyncStageAccessFlags dst_stage_accesses, uint32_t barrier_count,
John Zulauf355e49b2020-04-24 15:11:15 -06001158 const VkImageMemoryBarrier *barriers, const ResourceUsageTag &tag) {
John Zulauf5c5e88d2019-12-26 11:22:02 -07001159 for (uint32_t index = 0; index < barrier_count; index++) {
1160 const auto &barrier = barriers[index];
1161 const auto *image = Get<IMAGE_STATE>(barrier.image);
1162 if (!image) continue;
John Zulauf540266b2020-04-06 18:54:53 -06001163 auto subresource_range = NormalizeSubresourceRange(image->createInfo, barrier.subresourceRange);
John Zulauf355e49b2020-04-24 15:11:15 -06001164 bool layout_transition = barrier.oldLayout != barrier.newLayout;
1165 const auto src_access_scope = AccessScope(src_stage_accesses, barrier.srcAccessMask);
1166 const auto dst_access_scope = AccessScope(dst_stage_accesses, barrier.dstAccessMask);
1167 context->ApplyImageBarrier(*image, src_exec_scope, src_access_scope, dst_exec_scope, dst_access_scope, subresource_range,
1168 layout_transition, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06001169 }
John Zulauf9cb530d2019-09-30 14:14:10 -06001170}
1171
1172bool SyncValidator::PreCallValidateCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
1173 uint32_t regionCount, const VkBufferCopy *pRegions) const {
1174 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -06001175 const auto *cb_context = GetAccessContext(commandBuffer);
1176 assert(cb_context);
1177 if (!cb_context) return skip;
1178 const auto *context = cb_context->GetCurrentAccessContext();
John Zulauf9cb530d2019-09-30 14:14:10 -06001179
John Zulauf3d84f1b2020-03-09 13:33:25 -06001180 // If we have no previous accesses, we have no hazards
1181 // TODO: make this sub-resource capable
1182 // TODO: make this general, and stuff it into templates/utility functions
1183 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001184 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001185
1186 for (uint32_t region = 0; region < regionCount; region++) {
1187 const auto &copy_region = pRegions[region];
John Zulauf16adfc92020-04-08 10:28:33 -06001188 if (src_buffer) {
locke-lunarg3c038002020-04-30 23:08:08 -06001189 ResourceAccessRange src_range =
1190 MakeRange(copy_region.srcOffset, GetRealWholeSize(copy_region.srcOffset, copy_region.size, src_buffer->createInfo.size));
John Zulauf16adfc92020-04-08 10:28:33 -06001191 auto hazard = context->DetectHazard(*src_buffer, SYNC_TRANSFER_TRANSFER_READ, src_range);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001192 if (hazard.hazard) {
1193 // TODO -- add tag information to log msg when useful.
locke-lunarga0003652020-03-10 11:38:51 -06001194 skip |= LogError(srcBuffer, string_SyncHazardVUID(hazard.hazard),
1195 "vkCmdCopyBuffer: Hazard %s for srcBuffer %s, region %" PRIu32, string_SyncHazard(hazard.hazard),
1196 report_data->FormatHandle(srcBuffer).c_str(), region);
John Zulauf9cb530d2019-09-30 14:14:10 -06001197 }
John Zulauf9cb530d2019-09-30 14:14:10 -06001198 }
John Zulauf16adfc92020-04-08 10:28:33 -06001199 if (dst_buffer && !skip) {
locke-lunarg3c038002020-04-30 23:08:08 -06001200 ResourceAccessRange dst_range =
1201 MakeRange(copy_region.dstOffset, GetRealWholeSize(copy_region.dstOffset, copy_region.size, dst_buffer->createInfo.size));
John Zulauf355e49b2020-04-24 15:11:15 -06001202 auto hazard = context->DetectHazard(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, dst_range);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001203 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06001204 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
1205 "vkCmdCopyBuffer: Hazard %s for dstBuffer %s, region %" PRIu32, string_SyncHazard(hazard.hazard),
1206 report_data->FormatHandle(dstBuffer).c_str(), region);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001207 }
1208 }
1209 if (skip) break;
John Zulauf9cb530d2019-09-30 14:14:10 -06001210 }
1211 return skip;
1212}
1213
1214void SyncValidator::PreCallRecordCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
1215 uint32_t regionCount, const VkBufferCopy *pRegions) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06001216 auto *cb_context = GetAccessContext(commandBuffer);
1217 assert(cb_context);
John Zulauf2b151bf2020-04-24 15:37:44 -06001218 const auto tag = cb_context->NextCommandTag(CMD_COPYBUFFER);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001219 auto *context = cb_context->GetCurrentAccessContext();
1220
John Zulauf9cb530d2019-09-30 14:14:10 -06001221 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
John Zulauf9cb530d2019-09-30 14:14:10 -06001222 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
John Zulauf9cb530d2019-09-30 14:14:10 -06001223
1224 for (uint32_t region = 0; region < regionCount; region++) {
1225 const auto &copy_region = pRegions[region];
John Zulauf16adfc92020-04-08 10:28:33 -06001226 if (src_buffer) {
locke-lunarg3c038002020-04-30 23:08:08 -06001227 ResourceAccessRange src_range =
1228 MakeRange(copy_region.srcOffset, GetRealWholeSize(copy_region.srcOffset, copy_region.size, src_buffer->createInfo.size));
John Zulauf16adfc92020-04-08 10:28:33 -06001229 context->UpdateAccessState(*src_buffer, SYNC_TRANSFER_TRANSFER_READ, src_range, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06001230 }
John Zulauf16adfc92020-04-08 10:28:33 -06001231 if (dst_buffer) {
locke-lunarg3c038002020-04-30 23:08:08 -06001232 ResourceAccessRange dst_range =
1233 MakeRange(copy_region.dstOffset, GetRealWholeSize(copy_region.dstOffset, copy_region.size, dst_buffer->createInfo.size));
John Zulauf16adfc92020-04-08 10:28:33 -06001234 context->UpdateAccessState(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, dst_range, tag);
John Zulauf5c5e88d2019-12-26 11:22:02 -07001235 }
1236 }
1237}
1238
1239bool SyncValidator::PreCallValidateCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
1240 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
1241 const VkImageCopy *pRegions) const {
1242 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -06001243 const auto *cb_access_context = GetAccessContext(commandBuffer);
1244 assert(cb_access_context);
1245 if (!cb_access_context) return skip;
John Zulauf5c5e88d2019-12-26 11:22:02 -07001246
John Zulauf3d84f1b2020-03-09 13:33:25 -06001247 const auto *context = cb_access_context->GetCurrentAccessContext();
1248 assert(context);
1249 if (!context) return skip;
1250
1251 const auto *src_image = Get<IMAGE_STATE>(srcImage);
1252 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001253 for (uint32_t region = 0; region < regionCount; region++) {
1254 const auto &copy_region = pRegions[region];
1255 if (src_image) {
John Zulauf540266b2020-04-06 18:54:53 -06001256 auto hazard = context->DetectHazard(*src_image, SYNC_TRANSFER_TRANSFER_READ, copy_region.srcSubresource,
John Zulauf3d84f1b2020-03-09 13:33:25 -06001257 copy_region.srcOffset, copy_region.extent);
1258 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06001259 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
1260 "vkCmdCopyImage: Hazard %s for srcImage %s, region %" PRIu32, string_SyncHazard(hazard.hazard),
1261 report_data->FormatHandle(srcImage).c_str(), region);
John Zulauf5c5e88d2019-12-26 11:22:02 -07001262 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06001263 }
1264
1265 if (dst_image) {
locke-lunarg1df1f882020-03-02 16:42:08 -07001266 VkExtent3D dst_copy_extent =
1267 GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
John Zulauf540266b2020-04-06 18:54:53 -06001268 auto hazard = context->DetectHazard(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, copy_region.dstSubresource,
locke-lunarg1df1f882020-03-02 16:42:08 -07001269 copy_region.dstOffset, dst_copy_extent);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001270 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06001271 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
1272 "vkCmdCopyImage: Hazard %s for dstImage %s, region %" PRIu32, string_SyncHazard(hazard.hazard),
1273 report_data->FormatHandle(dstImage).c_str(), region);
John Zulauf5c5e88d2019-12-26 11:22:02 -07001274 }
locke-lunarg1dbbb9e2020-02-28 22:43:53 -07001275 if (skip) break;
John Zulauf5c5e88d2019-12-26 11:22:02 -07001276 }
1277 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06001278
John Zulauf5c5e88d2019-12-26 11:22:02 -07001279 return skip;
1280}
1281
1282void SyncValidator::PreCallRecordCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
1283 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
1284 const VkImageCopy *pRegions) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06001285 auto *cb_access_context = GetAccessContext(commandBuffer);
1286 assert(cb_access_context);
John Zulauf2b151bf2020-04-24 15:37:44 -06001287 const auto tag = cb_access_context->NextCommandTag(CMD_COPYIMAGE);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001288 auto *context = cb_access_context->GetCurrentAccessContext();
1289 assert(context);
1290
John Zulauf5c5e88d2019-12-26 11:22:02 -07001291 auto *src_image = Get<IMAGE_STATE>(srcImage);
John Zulauf5c5e88d2019-12-26 11:22:02 -07001292 auto *dst_image = Get<IMAGE_STATE>(dstImage);
John Zulauf5c5e88d2019-12-26 11:22:02 -07001293
1294 for (uint32_t region = 0; region < regionCount; region++) {
1295 const auto &copy_region = pRegions[region];
John Zulauf3d84f1b2020-03-09 13:33:25 -06001296 if (src_image) {
John Zulauf540266b2020-04-06 18:54:53 -06001297 context->UpdateAccessState(*src_image, SYNC_TRANSFER_TRANSFER_READ, copy_region.srcSubresource, copy_region.srcOffset,
1298 copy_region.extent, tag);
John Zulauf5c5e88d2019-12-26 11:22:02 -07001299 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06001300 if (dst_image) {
locke-lunarg1df1f882020-03-02 16:42:08 -07001301 VkExtent3D dst_copy_extent =
1302 GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
John Zulauf540266b2020-04-06 18:54:53 -06001303 context->UpdateAccessState(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, copy_region.dstSubresource, copy_region.dstOffset,
1304 dst_copy_extent, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06001305 }
1306 }
1307}
1308
1309bool SyncValidator::PreCallValidateCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
1310 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
1311 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
1312 uint32_t bufferMemoryBarrierCount,
1313 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
1314 uint32_t imageMemoryBarrierCount,
1315 const VkImageMemoryBarrier *pImageMemoryBarriers) const {
1316 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -06001317 const auto *cb_access_context = GetAccessContext(commandBuffer);
1318 assert(cb_access_context);
1319 if (!cb_access_context) return skip;
John Zulauf0cb5be22020-01-23 12:18:22 -07001320
John Zulauf3d84f1b2020-03-09 13:33:25 -06001321 const auto *context = cb_access_context->GetCurrentAccessContext();
1322 assert(context);
1323 if (!context) return skip;
John Zulauf0cb5be22020-01-23 12:18:22 -07001324
John Zulauf3d84f1b2020-03-09 13:33:25 -06001325 const auto src_stage_mask = ExpandPipelineStages(cb_access_context->GetQueueFlags(), srcStageMask);
John Zulauf36bcf6a2020-02-03 15:12:52 -07001326 const auto src_exec_scope = WithEarlierPipelineStages(src_stage_mask);
1327 auto src_stage_accesses = AccessScopeByStage(src_stage_mask);
John Zulauf0cb5be22020-01-23 12:18:22 -07001328 // Validate Image Layout transitions
1329 for (uint32_t index = 0; index < imageMemoryBarrierCount; index++) {
1330 const auto &barrier = pImageMemoryBarriers[index];
1331 if (barrier.newLayout == barrier.oldLayout) continue; // Only interested in layout transitions at this point.
1332 const auto *image_state = Get<IMAGE_STATE>(barrier.image);
1333 if (!image_state) continue;
John Zulauf16adfc92020-04-08 10:28:33 -06001334 const auto hazard = context->DetectImageBarrierHazard(*image_state, src_exec_scope, src_stage_accesses, barrier);
John Zulauf0cb5be22020-01-23 12:18:22 -07001335 if (hazard.hazard) {
1336 // TODO -- add tag information to log msg when useful.
locke-lunarga0003652020-03-10 11:38:51 -06001337 skip |= LogError(barrier.image, string_SyncHazardVUID(hazard.hazard),
1338 "vkCmdPipelineBarrier: Hazard %s for image barrier %" PRIu32 " %s", string_SyncHazard(hazard.hazard),
1339 index, report_data->FormatHandle(barrier.image).c_str());
John Zulauf0cb5be22020-01-23 12:18:22 -07001340 }
1341 }
John Zulauf9cb530d2019-09-30 14:14:10 -06001342
1343 return skip;
1344}
1345
1346void SyncValidator::PreCallRecordCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
1347 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
1348 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
1349 uint32_t bufferMemoryBarrierCount,
1350 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
1351 uint32_t imageMemoryBarrierCount,
1352 const VkImageMemoryBarrier *pImageMemoryBarriers) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06001353 auto *cb_access_context = GetAccessContext(commandBuffer);
1354 assert(cb_access_context);
1355 if (!cb_access_context) return;
John Zulauf2b151bf2020-04-24 15:37:44 -06001356 const auto tag = cb_access_context->NextCommandTag(CMD_PIPELINEBARRIER);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001357 auto access_context = cb_access_context->GetCurrentAccessContext();
1358 assert(access_context);
1359 if (!access_context) return;
John Zulauf9cb530d2019-09-30 14:14:10 -06001360
John Zulauf3d84f1b2020-03-09 13:33:25 -06001361 const auto src_stage_mask = ExpandPipelineStages(cb_access_context->GetQueueFlags(), srcStageMask);
John Zulauf36bcf6a2020-02-03 15:12:52 -07001362 auto src_stage_accesses = AccessScopeByStage(src_stage_mask);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001363 const auto dst_stage_mask = ExpandPipelineStages(cb_access_context->GetQueueFlags(), dstStageMask);
John Zulauf36bcf6a2020-02-03 15:12:52 -07001364 auto dst_stage_accesses = AccessScopeByStage(dst_stage_mask);
1365 const auto src_exec_scope = WithEarlierPipelineStages(src_stage_mask);
1366 const auto dst_exec_scope = WithLaterPipelineStages(dst_stage_mask);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001367 ApplyBufferBarriers(access_context, src_exec_scope, src_stage_accesses, dst_exec_scope, dst_stage_accesses,
1368 bufferMemoryBarrierCount, pBufferMemoryBarriers);
John Zulauf540266b2020-04-06 18:54:53 -06001369 ApplyImageBarriers(access_context, src_exec_scope, src_stage_accesses, dst_exec_scope, dst_stage_accesses,
John Zulauf355e49b2020-04-24 15:11:15 -06001370 imageMemoryBarrierCount, pImageMemoryBarriers, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06001371
1372 // Apply these last in-case there operation is a superset of the other two and would clean them up...
John Zulauf3d84f1b2020-03-09 13:33:25 -06001373 ApplyGlobalBarriers(access_context, src_exec_scope, dst_exec_scope, src_stage_accesses, dst_stage_accesses, memoryBarrierCount,
John Zulauf0cb5be22020-01-23 12:18:22 -07001374 pMemoryBarriers);
John Zulauf9cb530d2019-09-30 14:14:10 -06001375}
1376
1377void SyncValidator::PostCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
1378 const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, VkResult result) {
1379 // The state tracker sets up the device state
1380 StateTracker::PostCallRecordCreateDevice(gpu, pCreateInfo, pAllocator, pDevice, result);
1381
John Zulauf5f13a792020-03-10 07:31:21 -06001382 // Add the callback hooks for the functions that are either broadly or deeply used and that the ValidationStateTracker
1383 // refactor would be messier without.
John Zulauf9cb530d2019-09-30 14:14:10 -06001384 // TODO: Find a good way to do this hooklessly.
1385 ValidationObject *device_object = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
1386 ValidationObject *validation_data = GetValidationObject(device_object->object_dispatch, LayerObjectTypeSyncValidation);
1387 SyncValidator *sync_device_state = static_cast<SyncValidator *>(validation_data);
1388
John Zulaufd1f85d42020-04-15 12:23:15 -06001389 sync_device_state->SetCommandBufferResetCallback([sync_device_state](VkCommandBuffer command_buffer) -> void {
1390 sync_device_state->ResetCommandBufferCallback(command_buffer);
1391 });
1392 sync_device_state->SetCommandBufferFreeCallback([sync_device_state](VkCommandBuffer command_buffer) -> void {
1393 sync_device_state->FreeCommandBufferCallback(command_buffer);
1394 });
John Zulauf9cb530d2019-09-30 14:14:10 -06001395}
John Zulauf3d84f1b2020-03-09 13:33:25 -06001396
John Zulauf355e49b2020-04-24 15:11:15 -06001397bool SyncValidator::ValidateBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
1398 const VkSubpassBeginInfoKHR *pSubpassBeginInfo, const char *func_name) const {
1399 bool skip = false;
1400 const auto rp_state = Get<RENDER_PASS_STATE>(pRenderPassBegin->renderPass);
1401 auto cb_context = GetAccessContext(commandBuffer);
1402
1403 if (rp_state && cb_context) {
1404 skip |= cb_context->ValidateBeginRenderPass(*rp_state, pRenderPassBegin, pSubpassBeginInfo, func_name);
1405 }
1406
1407 return skip;
1408}
1409
1410bool SyncValidator::PreCallValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
1411 VkSubpassContents contents) const {
1412 bool skip = StateTracker::PreCallValidateCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
1413 auto subpass_begin_info = lvl_init_struct<VkSubpassBeginInfo>();
1414 subpass_begin_info.contents = contents;
1415 skip |= ValidateBeginRenderPass(commandBuffer, pRenderPassBegin, &subpass_begin_info, "vkCmdBeginRenderPass");
1416 return skip;
1417}
1418
1419bool SyncValidator::PreCallValidateCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
1420 const VkSubpassBeginInfoKHR *pSubpassBeginInfo) const {
1421 bool skip = StateTracker::PreCallValidateCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
1422 skip |= ValidateBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, "vkCmdBeginRenderPass2");
1423 return skip;
1424}
1425
1426bool SyncValidator::PreCallValidateCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
1427 const VkRenderPassBeginInfo *pRenderPassBegin,
1428 const VkSubpassBeginInfoKHR *pSubpassBeginInfo) const {
1429 bool skip = StateTracker::PreCallValidateCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
1430 skip |= ValidateBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, "vkCmdBeginRenderPass2KHR");
1431 return skip;
1432}
1433
John Zulauf3d84f1b2020-03-09 13:33:25 -06001434void SyncValidator::PostCallRecordBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo,
1435 VkResult result) {
1436 // The state tracker sets up the command buffer state
1437 StateTracker::PostCallRecordBeginCommandBuffer(commandBuffer, pBeginInfo, result);
1438
1439 // Create/initialize the structure that trackers accesses at the command buffer scope.
1440 auto cb_access_context = GetAccessContext(commandBuffer);
1441 assert(cb_access_context);
1442 cb_access_context->Reset();
1443}
1444
1445void SyncValidator::RecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
John Zulauf355e49b2020-04-24 15:11:15 -06001446 const VkSubpassBeginInfo *pSubpassBeginInfo, CMD_TYPE command) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06001447 auto cb_context = GetAccessContext(commandBuffer);
John Zulauf355e49b2020-04-24 15:11:15 -06001448 if (cb_context) {
1449 cb_context->RecordBeginRenderPass(cb_context->NextCommandTag(command));
John Zulauf3d84f1b2020-03-09 13:33:25 -06001450 }
1451}
1452
1453void SyncValidator::PostCallRecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
1454 VkSubpassContents contents) {
1455 StateTracker::PostCallRecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
1456 auto subpass_begin_info = lvl_init_struct<VkSubpassBeginInfo>();
1457 subpass_begin_info.contents = contents;
John Zulauf355e49b2020-04-24 15:11:15 -06001458 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, &subpass_begin_info, CMD_BEGINRENDERPASS);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001459}
1460
1461void SyncValidator::PostCallRecordCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
1462 const VkSubpassBeginInfo *pSubpassBeginInfo) {
1463 StateTracker::PostCallRecordCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
John Zulauf355e49b2020-04-24 15:11:15 -06001464 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001465}
1466
1467void SyncValidator::PostCallRecordCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
1468 const VkRenderPassBeginInfo *pRenderPassBegin,
1469 const VkSubpassBeginInfo *pSubpassBeginInfo) {
1470 StateTracker::PostCallRecordCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
John Zulauf355e49b2020-04-24 15:11:15 -06001471 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2);
1472}
1473
1474bool SyncValidator::ValidateCmdNextSubpass(VkCommandBuffer commandBuffer, const VkSubpassBeginInfoKHR *pSubpassBeginInfo,
1475 const VkSubpassEndInfoKHR *pSubpassEndInfo, const char *func_name) const {
1476 bool skip = false;
1477
1478 auto cb_context = GetAccessContext(commandBuffer);
1479 assert(cb_context);
1480 auto cb_state = cb_context->GetCommandBufferState();
1481 if (!cb_state) return skip;
1482
1483 auto rp_state = cb_state->activeRenderPass;
1484 if (!rp_state) return skip;
1485
1486 skip |= cb_context->ValidateNextSubpass(func_name);
1487
1488 return skip;
1489}
1490
1491bool SyncValidator::PreCallValidateCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) const {
1492 bool skip = StateTracker::PreCallValidateCmdNextSubpass(commandBuffer, contents);
1493 auto subpass_begin_info = lvl_init_struct<VkSubpassBeginInfo>();
1494 subpass_begin_info.contents = contents;
1495 skip |= ValidateCmdNextSubpass(commandBuffer, &subpass_begin_info, nullptr, "vkCmdNextSubpass");
1496 return skip;
1497}
1498
1499bool SyncValidator::PreCallValidateCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfoKHR *pSubpassBeginInfo,
1500 const VkSubpassEndInfoKHR *pSubpassEndInfo) const {
1501 bool skip = StateTracker::PreCallValidateCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
1502 skip |= ValidateCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, "vkCmdNextSubpass2KHR");
1503 return skip;
1504}
1505
1506bool SyncValidator::PreCallValidateCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
1507 const VkSubpassEndInfo *pSubpassEndInfo) const {
1508 bool skip = StateTracker::PreCallValidateCmdNextSubpass2(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
1509 skip |= ValidateCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, "vkCmdNextSubpass2");
1510 return skip;
John Zulauf3d84f1b2020-03-09 13:33:25 -06001511}
1512
1513void SyncValidator::RecordCmdNextSubpass(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
John Zulauf355e49b2020-04-24 15:11:15 -06001514 const VkSubpassEndInfo *pSubpassEndInfo, CMD_TYPE command) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06001515 auto cb_context = GetAccessContext(commandBuffer);
1516 assert(cb_context);
1517 auto cb_state = cb_context->GetCommandBufferState();
1518 if (!cb_state) return;
1519
1520 auto rp_state = cb_state->activeRenderPass;
1521 if (!rp_state) return;
1522
John Zulauf355e49b2020-04-24 15:11:15 -06001523 cb_context->RecordNextSubpass(*rp_state, cb_context->NextCommandTag(command));
John Zulauf3d84f1b2020-03-09 13:33:25 -06001524}
1525
1526void SyncValidator::PostCallRecordCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
1527 StateTracker::PostCallRecordCmdNextSubpass(commandBuffer, contents);
1528 auto subpass_begin_info = lvl_init_struct<VkSubpassBeginInfo>();
1529 subpass_begin_info.contents = contents;
John Zulauf355e49b2020-04-24 15:11:15 -06001530 RecordCmdNextSubpass(commandBuffer, &subpass_begin_info, nullptr, CMD_NEXTSUBPASS);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001531}
1532
1533void SyncValidator::PostCallRecordCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
1534 const VkSubpassEndInfo *pSubpassEndInfo) {
1535 StateTracker::PostCallRecordCmdNextSubpass2(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
John Zulauf355e49b2020-04-24 15:11:15 -06001536 RecordCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001537}
1538
1539void SyncValidator::PostCallRecordCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
1540 const VkSubpassEndInfo *pSubpassEndInfo) {
1541 StateTracker::PostCallRecordCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
John Zulauf355e49b2020-04-24 15:11:15 -06001542 RecordCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001543}
1544
John Zulauf355e49b2020-04-24 15:11:15 -06001545bool SyncValidator::ValidateCmdEndRenderPass(VkCommandBuffer commandBuffer, const VkSubpassEndInfoKHR *pSubpassEndInfo,
1546 const char *func_name) const {
1547 bool skip = false;
1548
1549 auto cb_context = GetAccessContext(commandBuffer);
1550 assert(cb_context);
1551 auto cb_state = cb_context->GetCommandBufferState();
1552 if (!cb_state) return skip;
1553
1554 auto rp_state = cb_state->activeRenderPass;
1555 if (!rp_state) return skip;
1556
1557 skip |= cb_context->ValidateEndRenderpass(func_name);
1558 return skip;
1559}
1560
1561bool SyncValidator::PreCallValidateCmdEndRenderPass(VkCommandBuffer commandBuffer) const {
1562 bool skip = StateTracker::PreCallValidateCmdEndRenderPass(commandBuffer);
1563 skip |= ValidateCmdEndRenderPass(commandBuffer, nullptr, "vkEndRenderPass");
1564 return skip;
1565}
1566
1567bool SyncValidator::PreCallValidateCmdEndRenderPass2(VkCommandBuffer commandBuffer,
1568 const VkSubpassEndInfoKHR *pSubpassEndInfo) const {
1569 bool skip = StateTracker::PreCallValidateCmdEndRenderPass2(commandBuffer, pSubpassEndInfo);
1570 skip |= ValidateCmdEndRenderPass(commandBuffer, pSubpassEndInfo, "vkEndRenderPass2");
1571 return skip;
1572}
1573
1574bool SyncValidator::PreCallValidateCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer,
1575 const VkSubpassEndInfoKHR *pSubpassEndInfo) const {
1576 bool skip = StateTracker::PreCallValidateCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo);
1577 skip |= ValidateCmdEndRenderPass(commandBuffer, pSubpassEndInfo, "vkEndRenderPass2KHR");
1578 return skip;
1579}
1580
1581void SyncValidator::RecordCmdEndRenderPass(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo,
1582 CMD_TYPE command) {
John Zulaufe5da6e52020-03-18 15:32:18 -06001583 // Resolve the all subpass contexts to the command buffer contexts
1584 auto cb_context = GetAccessContext(commandBuffer);
1585 assert(cb_context);
1586 auto cb_state = cb_context->GetCommandBufferState();
1587 if (!cb_state) return;
1588
1589 const auto *rp_state = cb_state->activeRenderPass;
1590 if (!rp_state) return;
1591
John Zulauf355e49b2020-04-24 15:11:15 -06001592 cb_context->RecordEndRenderPass(*rp_state, cb_context->NextCommandTag(command));
John Zulaufe5da6e52020-03-18 15:32:18 -06001593}
John Zulauf3d84f1b2020-03-09 13:33:25 -06001594
1595void SyncValidator::PostCallRecordCmdEndRenderPass(VkCommandBuffer commandBuffer) {
1596 StateTracker::PostCallRecordCmdEndRenderPass(commandBuffer);
John Zulauf355e49b2020-04-24 15:11:15 -06001597 RecordCmdEndRenderPass(commandBuffer, nullptr, CMD_ENDRENDERPASS);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001598}
1599
1600void SyncValidator::PostCallRecordCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
1601 StateTracker::PostCallRecordCmdEndRenderPass2(commandBuffer, pSubpassEndInfo);
John Zulauf355e49b2020-04-24 15:11:15 -06001602 RecordCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001603}
1604
1605void SyncValidator::PostCallRecordCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
1606 StateTracker::PostCallRecordCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo);
John Zulauf355e49b2020-04-24 15:11:15 -06001607 RecordCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001608}
locke-lunarga19c71d2020-03-02 18:17:04 -07001609
1610bool SyncValidator::PreCallValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
1611 VkImageLayout dstImageLayout, uint32_t regionCount,
1612 const VkBufferImageCopy *pRegions) const {
1613 bool skip = false;
1614 const auto *cb_access_context = GetAccessContext(commandBuffer);
1615 assert(cb_access_context);
1616 if (!cb_access_context) return skip;
1617
1618 const auto *context = cb_access_context->GetCurrentAccessContext();
1619 assert(context);
1620 if (!context) return skip;
1621
1622 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
locke-lunarga19c71d2020-03-02 18:17:04 -07001623 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
1624
1625 for (uint32_t region = 0; region < regionCount; region++) {
1626 const auto &copy_region = pRegions[region];
John Zulauf16adfc92020-04-08 10:28:33 -06001627 if (src_buffer) {
John Zulauf355e49b2020-04-24 15:11:15 -06001628 ResourceAccessRange src_range =
1629 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, dst_image->createInfo.format));
John Zulauf16adfc92020-04-08 10:28:33 -06001630 auto hazard = context->DetectHazard(*src_buffer, SYNC_TRANSFER_TRANSFER_READ, src_range);
locke-lunarga19c71d2020-03-02 18:17:04 -07001631 if (hazard.hazard) {
1632 // TODO -- add tag information to log msg when useful.
locke-lunarga0003652020-03-10 11:38:51 -06001633 skip |= LogError(srcBuffer, string_SyncHazardVUID(hazard.hazard),
1634 "vkCmdCopyBufferToImage: Hazard %s for srcBuffer %s, region %" PRIu32,
locke-lunarga19c71d2020-03-02 18:17:04 -07001635 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcBuffer).c_str(), region);
1636 }
1637 }
1638 if (dst_image) {
John Zulauf540266b2020-04-06 18:54:53 -06001639 auto hazard = context->DetectHazard(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, copy_region.imageSubresource,
locke-lunarga19c71d2020-03-02 18:17:04 -07001640 copy_region.imageOffset, copy_region.imageExtent);
1641 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06001642 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
1643 "vkCmdCopyBufferToImage: Hazard %s for dstImage %s, region %" PRIu32,
locke-lunarga19c71d2020-03-02 18:17:04 -07001644 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region);
1645 }
1646 if (skip) break;
1647 }
1648 if (skip) break;
1649 }
1650 return skip;
1651}
1652
1653void SyncValidator::PreCallRecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
1654 VkImageLayout dstImageLayout, uint32_t regionCount,
1655 const VkBufferImageCopy *pRegions) {
1656 auto *cb_access_context = GetAccessContext(commandBuffer);
1657 assert(cb_access_context);
John Zulauf2b151bf2020-04-24 15:37:44 -06001658 const auto tag = cb_access_context->NextCommandTag(CMD_COPYBUFFERTOIMAGE);
locke-lunarga19c71d2020-03-02 18:17:04 -07001659 auto *context = cb_access_context->GetCurrentAccessContext();
1660 assert(context);
1661
1662 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
John Zulauf16adfc92020-04-08 10:28:33 -06001663 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07001664
1665 for (uint32_t region = 0; region < regionCount; region++) {
1666 const auto &copy_region = pRegions[region];
1667 if (src_buffer) {
John Zulauf355e49b2020-04-24 15:11:15 -06001668 ResourceAccessRange src_range =
1669 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, dst_image->createInfo.format));
John Zulauf16adfc92020-04-08 10:28:33 -06001670 context->UpdateAccessState(*src_buffer, SYNC_TRANSFER_TRANSFER_READ, src_range, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07001671 }
1672 if (dst_image) {
John Zulauf540266b2020-04-06 18:54:53 -06001673 context->UpdateAccessState(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, copy_region.imageSubresource,
John Zulauf5f13a792020-03-10 07:31:21 -06001674 copy_region.imageOffset, copy_region.imageExtent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07001675 }
1676 }
1677}
1678
1679bool SyncValidator::PreCallValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
1680 VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount,
1681 const VkBufferImageCopy *pRegions) const {
1682 bool skip = false;
1683 const auto *cb_access_context = GetAccessContext(commandBuffer);
1684 assert(cb_access_context);
1685 if (!cb_access_context) return skip;
1686
1687 const auto *context = cb_access_context->GetCurrentAccessContext();
1688 assert(context);
1689 if (!context) return skip;
1690
1691 const auto *src_image = Get<IMAGE_STATE>(srcImage);
1692 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
1693 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
1694 for (uint32_t region = 0; region < regionCount; region++) {
1695 const auto &copy_region = pRegions[region];
1696 if (src_image) {
John Zulauf540266b2020-04-06 18:54:53 -06001697 auto hazard = context->DetectHazard(*src_image, SYNC_TRANSFER_TRANSFER_READ, copy_region.imageSubresource,
locke-lunarga19c71d2020-03-02 18:17:04 -07001698 copy_region.imageOffset, copy_region.imageExtent);
1699 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06001700 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
1701 "vkCmdCopyImageToBuffer: Hazard %s for srcImage %s, region %" PRIu32,
locke-lunarga19c71d2020-03-02 18:17:04 -07001702 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region);
1703 }
1704 }
1705 if (dst_mem) {
John Zulauf355e49b2020-04-24 15:11:15 -06001706 ResourceAccessRange dst_range =
1707 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, src_image->createInfo.format));
John Zulauf16adfc92020-04-08 10:28:33 -06001708 auto hazard = context->DetectHazard(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, dst_range);
locke-lunarga19c71d2020-03-02 18:17:04 -07001709 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06001710 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
1711 "vkCmdCopyImageToBuffer: Hazard %s for dstBuffer %s, region %" PRIu32,
locke-lunarga19c71d2020-03-02 18:17:04 -07001712 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstBuffer).c_str(), region);
1713 }
1714 }
1715 if (skip) break;
1716 }
1717 return skip;
1718}
1719
1720void SyncValidator::PreCallRecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
1721 VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
1722 auto *cb_access_context = GetAccessContext(commandBuffer);
1723 assert(cb_access_context);
John Zulauf2b151bf2020-04-24 15:37:44 -06001724 const auto tag = cb_access_context->NextCommandTag(CMD_COPYIMAGETOBUFFER);
locke-lunarga19c71d2020-03-02 18:17:04 -07001725 auto *context = cb_access_context->GetCurrentAccessContext();
1726 assert(context);
1727
1728 const auto *src_image = Get<IMAGE_STATE>(srcImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07001729 auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
1730 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
John Zulauf5f13a792020-03-10 07:31:21 -06001731 const VulkanTypedHandle dst_handle(dst_mem, kVulkanObjectTypeDeviceMemory);
locke-lunarga19c71d2020-03-02 18:17:04 -07001732
1733 for (uint32_t region = 0; region < regionCount; region++) {
1734 const auto &copy_region = pRegions[region];
1735 if (src_image) {
John Zulauf540266b2020-04-06 18:54:53 -06001736 context->UpdateAccessState(*src_image, SYNC_TRANSFER_TRANSFER_READ, copy_region.imageSubresource,
John Zulauf5f13a792020-03-10 07:31:21 -06001737 copy_region.imageOffset, copy_region.imageExtent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07001738 }
1739 if (dst_buffer) {
John Zulauf355e49b2020-04-24 15:11:15 -06001740 ResourceAccessRange dst_range =
1741 MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, src_image->createInfo.format));
John Zulauf16adfc92020-04-08 10:28:33 -06001742 context->UpdateAccessState(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, dst_range, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07001743 }
1744 }
1745}
1746
1747bool SyncValidator::PreCallValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
1748 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
1749 const VkImageBlit *pRegions, VkFilter filter) const {
1750 bool skip = false;
1751 const auto *cb_access_context = GetAccessContext(commandBuffer);
1752 assert(cb_access_context);
1753 if (!cb_access_context) return skip;
1754
1755 const auto *context = cb_access_context->GetCurrentAccessContext();
1756 assert(context);
1757 if (!context) return skip;
1758
1759 const auto *src_image = Get<IMAGE_STATE>(srcImage);
1760 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
1761
1762 for (uint32_t region = 0; region < regionCount; region++) {
1763 const auto &blit_region = pRegions[region];
1764 if (src_image) {
1765 VkExtent3D extent = {static_cast<uint32_t>(blit_region.srcOffsets[1].x - blit_region.srcOffsets[0].x),
1766 static_cast<uint32_t>(blit_region.srcOffsets[1].y - blit_region.srcOffsets[0].y),
1767 static_cast<uint32_t>(blit_region.srcOffsets[1].z - blit_region.srcOffsets[0].z)};
John Zulauf540266b2020-04-06 18:54:53 -06001768 auto hazard = context->DetectHazard(*src_image, SYNC_TRANSFER_TRANSFER_READ, blit_region.srcSubresource,
locke-lunarga19c71d2020-03-02 18:17:04 -07001769 blit_region.srcOffsets[0], extent);
1770 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06001771 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
1772 "vkCmdBlitImage: Hazard %s for srcImage %s, region %" PRIu32, string_SyncHazard(hazard.hazard),
1773 report_data->FormatHandle(srcImage).c_str(), region);
locke-lunarga19c71d2020-03-02 18:17:04 -07001774 }
1775 }
1776
1777 if (dst_image) {
1778 VkExtent3D extent = {static_cast<uint32_t>(blit_region.dstOffsets[1].x - blit_region.dstOffsets[0].x),
1779 static_cast<uint32_t>(blit_region.dstOffsets[1].y - blit_region.dstOffsets[0].y),
1780 static_cast<uint32_t>(blit_region.dstOffsets[1].z - blit_region.dstOffsets[0].z)};
John Zulauf540266b2020-04-06 18:54:53 -06001781 auto hazard = context->DetectHazard(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, blit_region.dstSubresource,
locke-lunarga19c71d2020-03-02 18:17:04 -07001782 blit_region.dstOffsets[0], extent);
1783 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06001784 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
1785 "vkCmdBlitImage: Hazard %s for dstImage %s, region %" PRIu32, string_SyncHazard(hazard.hazard),
1786 report_data->FormatHandle(dstImage).c_str(), region);
locke-lunarga19c71d2020-03-02 18:17:04 -07001787 }
1788 if (skip) break;
1789 }
1790 }
1791
1792 return skip;
1793}
1794
1795void SyncValidator::PreCallRecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
1796 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
1797 const VkImageBlit *pRegions, VkFilter filter) {
1798 auto *cb_access_context = GetAccessContext(commandBuffer);
1799 assert(cb_access_context);
John Zulauf2b151bf2020-04-24 15:37:44 -06001800 const auto tag = cb_access_context->NextCommandTag(CMD_BLITIMAGE);
locke-lunarga19c71d2020-03-02 18:17:04 -07001801 auto *context = cb_access_context->GetCurrentAccessContext();
1802 assert(context);
1803
1804 auto *src_image = Get<IMAGE_STATE>(srcImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07001805 auto *dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07001806
1807 for (uint32_t region = 0; region < regionCount; region++) {
1808 const auto &blit_region = pRegions[region];
1809 if (src_image) {
1810 VkExtent3D extent = {static_cast<uint32_t>(blit_region.srcOffsets[1].x - blit_region.srcOffsets[0].x),
1811 static_cast<uint32_t>(blit_region.srcOffsets[1].y - blit_region.srcOffsets[0].y),
1812 static_cast<uint32_t>(blit_region.srcOffsets[1].z - blit_region.srcOffsets[0].z)};
John Zulauf540266b2020-04-06 18:54:53 -06001813 context->UpdateAccessState(*src_image, SYNC_TRANSFER_TRANSFER_READ, blit_region.srcSubresource,
John Zulauf5f13a792020-03-10 07:31:21 -06001814 blit_region.srcOffsets[0], extent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07001815 }
1816 if (dst_image) {
1817 VkExtent3D extent = {static_cast<uint32_t>(blit_region.dstOffsets[1].x - blit_region.dstOffsets[0].x),
1818 static_cast<uint32_t>(blit_region.dstOffsets[1].y - blit_region.dstOffsets[0].y),
1819 static_cast<uint32_t>(blit_region.dstOffsets[1].z - blit_region.dstOffsets[0].z)};
John Zulauf540266b2020-04-06 18:54:53 -06001820 context->UpdateAccessState(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, blit_region.dstSubresource,
John Zulauf5f13a792020-03-10 07:31:21 -06001821 blit_region.dstOffsets[0], extent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07001822 }
1823 }
1824}