blob: 3828611465906215423fc3062498269f0fde1e39 [file] [log] [blame]
John Zulauf9cb530d2019-09-30 14:14:10 -06001/* Copyright (c) 2019 The Khronos Group Inc.
2 * Copyright (c) 2019 Valve Corporation
3 * Copyright (c) 2019 LunarG, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 * Author: John Zulauf <jzulauf@lunarg.com>
18 */
19
20#include <limits>
21#include <vector>
locke-lunarg296a3c92020-03-25 01:04:29 -060022#include <memory>
23#include <bitset>
John Zulauf9cb530d2019-09-30 14:14:10 -060024#include "synchronization_validation.h"
25
26static const char *string_SyncHazardVUID(SyncHazard hazard) {
27 switch (hazard) {
28 case SyncHazard::NONE:
John Zulauf2f952d22020-02-10 11:34:51 -070029 return "SYNC-HAZARD-NONE";
John Zulauf9cb530d2019-09-30 14:14:10 -060030 break;
31 case SyncHazard::READ_AFTER_WRITE:
32 return "SYNC-HAZARD-READ_AFTER_WRITE";
33 break;
34 case SyncHazard::WRITE_AFTER_READ:
35 return "SYNC-HAZARD-WRITE_AFTER_READ";
36 break;
37 case SyncHazard::WRITE_AFTER_WRITE:
38 return "SYNC-HAZARD-WRITE_AFTER_WRITE";
39 break;
John Zulauf2f952d22020-02-10 11:34:51 -070040 case SyncHazard::READ_RACING_WRITE:
41 return "SYNC-HAZARD-READ-RACING-WRITE";
42 break;
43 case SyncHazard::WRITE_RACING_WRITE:
44 return "SYNC-HAZARD-WRITE-RACING-WRITE";
45 break;
46 case SyncHazard::WRITE_RACING_READ:
47 return "SYNC-HAZARD-WRITE-RACING-READ";
48 break;
John Zulauf9cb530d2019-09-30 14:14:10 -060049 default:
50 assert(0);
51 }
52 return "SYNC-HAZARD-INVALID";
53}
54
55static const char *string_SyncHazard(SyncHazard hazard) {
56 switch (hazard) {
57 case SyncHazard::NONE:
58 return "NONR";
59 break;
60 case SyncHazard::READ_AFTER_WRITE:
61 return "READ_AFTER_WRITE";
62 break;
63 case SyncHazard::WRITE_AFTER_READ:
64 return "WRITE_AFTER_READ";
65 break;
66 case SyncHazard::WRITE_AFTER_WRITE:
67 return "WRITE_AFTER_WRITE";
68 break;
John Zulauf2f952d22020-02-10 11:34:51 -070069 case SyncHazard::READ_RACING_WRITE:
70 return "READ_RACING_WRITE";
71 break;
72 case SyncHazard::WRITE_RACING_WRITE:
73 return "WRITE_RACING_WRITE";
74 break;
75 case SyncHazard::WRITE_RACING_READ:
76 return "WRITE_RACING_READ";
77 break;
John Zulauf9cb530d2019-09-30 14:14:10 -060078 default:
79 assert(0);
80 }
81 return "INVALID HAZARD";
82}
83
John Zulauf16adfc92020-04-08 10:28:33 -060084template <typename T>
85static ResourceAccessRange MakeRange(const T&has_offset_and_size) {
86 return ResourceAccessRange(has_offset_and_size.offset, (has_offset_and_size.offset + has_offset_and_size.size));
87}
88
89static ResourceAccessRange MakeRange(VkDeviceSize start, VkDeviceSize size) {
90 return ResourceAccessRange(start, (start + size));
91}
92
John Zulauf0cb5be22020-01-23 12:18:22 -070093// Expand the pipeline stage without regard to whether the are valid w.r.t. queue or extension
94VkPipelineStageFlags ExpandPipelineStages(VkQueueFlags queue_flags, VkPipelineStageFlags stage_mask) {
95 VkPipelineStageFlags expanded = stage_mask;
96 if (VK_PIPELINE_STAGE_ALL_COMMANDS_BIT & stage_mask) {
97 expanded = expanded & ~VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
98 for (const auto &all_commands : syncAllCommandStagesByQueueFlags) {
99 if (all_commands.first & queue_flags) {
100 expanded |= all_commands.second;
101 }
102 }
103 }
104 if (VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT & stage_mask) {
105 expanded = expanded & ~VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
106 expanded |= syncAllCommandStagesByQueueFlags.at(VK_QUEUE_GRAPHICS_BIT) & ~VK_PIPELINE_STAGE_HOST_BIT;
107 }
108 return expanded;
109}
110
John Zulauf36bcf6a2020-02-03 15:12:52 -0700111VkPipelineStageFlags RelatedPipelineStages(VkPipelineStageFlags stage_mask,
112 std::map<VkPipelineStageFlagBits, VkPipelineStageFlags> &map) {
113 VkPipelineStageFlags unscanned = stage_mask;
114 VkPipelineStageFlags related = 0;
115 for (const auto entry : map) {
116 const auto stage = entry.first;
117 if (stage & unscanned) {
118 related = related | entry.second;
119 unscanned = unscanned & ~stage;
120 if (!unscanned) break;
121 }
122 }
123 return related;
124}
125
126VkPipelineStageFlags WithEarlierPipelineStages(VkPipelineStageFlags stage_mask) {
127 return stage_mask | RelatedPipelineStages(stage_mask, syncLogicallyEarlierStages);
128}
129
130VkPipelineStageFlags WithLaterPipelineStages(VkPipelineStageFlags stage_mask) {
131 return stage_mask | RelatedPipelineStages(stage_mask, syncLogicallyLaterStages);
132}
133
John Zulauf5c5e88d2019-12-26 11:22:02 -0700134static const ResourceAccessRange full_range(std::numeric_limits<VkDeviceSize>::min(), std::numeric_limits<VkDeviceSize>::max());
John Zulauf5c5e88d2019-12-26 11:22:02 -0700135
John Zulauf540266b2020-04-06 18:54:53 -0600136AccessContext::AccessContext(uint32_t subpass, VkQueueFlags queue_flags,
137 const std::vector<SubpassDependencyGraphNode> &dependencies,
138 const std::vector<AccessContext> &contexts, AccessContext *external_context) {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600139 Reset();
140 const auto &subpass_dep = dependencies[subpass];
141 prev_.reserve(subpass_dep.prev.size());
142 for (const auto &prev_dep : subpass_dep.prev) {
143 assert(prev_dep.dependency);
144 const auto dep = *prev_dep.dependency;
John Zulauf540266b2020-04-06 18:54:53 -0600145 prev_.emplace_back(const_cast<AccessContext *>(&contexts[dep.srcSubpass]), queue_flags, dep);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700146 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600147
148 async_.reserve(subpass_dep.async.size());
149 for (const auto async_subpass : subpass_dep.async) {
John Zulauf540266b2020-04-06 18:54:53 -0600150 async_.emplace_back(const_cast<AccessContext *>(&contexts[async_subpass]));
John Zulauf3d84f1b2020-03-09 13:33:25 -0600151 }
John Zulaufe5da6e52020-03-18 15:32:18 -0600152 if (subpass_dep.barrier_from_external) {
153 src_external_ = TrackBack(external_context, queue_flags, *subpass_dep.barrier_from_external);
154 } else {
155 src_external_ = TrackBack();
156 }
157 if (subpass_dep.barrier_to_external) {
158 dst_external_ = TrackBack(this, queue_flags, *subpass_dep.barrier_to_external);
159 } else {
160 dst_external_ = TrackBack();
John Zulauf3d84f1b2020-03-09 13:33:25 -0600161 }
John Zulauf5c5e88d2019-12-26 11:22:02 -0700162}
163
John Zulauf5f13a792020-03-10 07:31:21 -0600164template <typename Detector>
John Zulauf16adfc92020-04-08 10:28:33 -0600165HazardResult AccessContext::DetectPreviousHazard(AddressType type, const Detector &detector,
John Zulauf540266b2020-04-06 18:54:53 -0600166 const ResourceAccessRange &range) const {
John Zulauf5f13a792020-03-10 07:31:21 -0600167 ResourceAccessRangeMap descent_map;
168 ResourceAccessState default_state; // When present, PreviousAccess will "infill"
John Zulauf16adfc92020-04-08 10:28:33 -0600169 ResolvePreviousAccess(type, range, &descent_map, &default_state);
John Zulauf5f13a792020-03-10 07:31:21 -0600170
171 HazardResult hazard;
172 for (auto prev = descent_map.begin(); prev != descent_map.end() && !hazard.hazard; ++prev) {
173 hazard = detector.Detect(prev);
174 }
175 return hazard;
176}
177
John Zulauf3d84f1b2020-03-09 13:33:25 -0600178// A recursive range walker for hazard detection, first for the current context and the (DetectHazardRecur) to walk
179// the DAG of the contexts (for example subpasses)
180template <typename Detector>
John Zulauf16adfc92020-04-08 10:28:33 -0600181HazardResult AccessContext::DetectHazard(AddressType type, const Detector &detector,
John Zulauf540266b2020-04-06 18:54:53 -0600182 const ResourceAccessRange &range) const {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600183 HazardResult hazard;
John Zulauf5f13a792020-03-10 07:31:21 -0600184
185 // Async checks don't require recursive lookups, as the async lists are exhaustive for the top-level context
186 // so we'll check these first
187 for (const auto &async_context : async_) {
John Zulauf16adfc92020-04-08 10:28:33 -0600188 hazard = async_context->DetectAsyncHazard(type, detector, range);
John Zulauf5f13a792020-03-10 07:31:21 -0600189 if (hazard.hazard) return hazard;
190 }
191
John Zulauf16adfc92020-04-08 10:28:33 -0600192 const auto &accesses = GetAccessStateMap(type);
193 const auto from = accesses.lower_bound(range);
194 if (from != accesses.end() && from->first.intersects(range)) {
195 const auto to = accesses.upper_bound(range);
196 ResourceAccessRange gap = {range.begin, range.begin};
197 for (auto pos = from; pos != to; ++pos) {
198 hazard = detector.Detect(pos);
199 if (hazard.hazard) return hazard;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600200
John Zulauf16adfc92020-04-08 10:28:33 -0600201 // make sure we don't go past range
202 auto upper_bound = std::min(range.end, pos->first.end);
203 gap.end = upper_bound;
John Zulauf5f13a792020-03-10 07:31:21 -0600204
John Zulauf16adfc92020-04-08 10:28:33 -0600205 // TODO: After profiling we may want to change the descent logic such that we don't recur per gap...
206 if (!gap.empty()) {
207 // Must recur on all gaps
208 hazard = DetectPreviousHazard(type, detector, gap);
John Zulauf5f13a792020-03-10 07:31:21 -0600209 if (hazard.hazard) return hazard;
210 }
John Zulauf16adfc92020-04-08 10:28:33 -0600211 gap.begin = upper_bound;
212 }
213 gap.end = range.end;
214 if (gap.non_empty()) {
215 hazard = DetectPreviousHazard(type, detector, gap);
216 if (hazard.hazard) return hazard;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600217 }
John Zulauf5f13a792020-03-10 07:31:21 -0600218 } else {
John Zulauf16adfc92020-04-08 10:28:33 -0600219 hazard = DetectPreviousHazard(type, detector, range);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600220 }
221
222 return hazard;
223}
224
225// A non recursive range walker for the asynchronous contexts (those we have no barriers with)
226template <typename Detector>
John Zulauf16adfc92020-04-08 10:28:33 -0600227HazardResult AccessContext::DetectAsyncHazard(AddressType type, const Detector &detector,
John Zulauf540266b2020-04-06 18:54:53 -0600228 const ResourceAccessRange &range) const {
John Zulauf16adfc92020-04-08 10:28:33 -0600229 auto &accesses = GetAccessStateMap(type);
230 const auto from = accesses.lower_bound(range);
231 const auto to = accesses.upper_bound(range);
232
John Zulauf3d84f1b2020-03-09 13:33:25 -0600233 HazardResult hazard;
John Zulauf16adfc92020-04-08 10:28:33 -0600234 for (auto pos = from; pos != to && !hazard.hazard; ++pos) {
235 hazard = detector.DetectAsync(pos);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600236 }
John Zulauf16adfc92020-04-08 10:28:33 -0600237
John Zulauf3d84f1b2020-03-09 13:33:25 -0600238 return hazard;
239}
240
John Zulauf16adfc92020-04-08 10:28:33 -0600241void AccessContext::ResolveTrackBack(AddressType type, const ResourceAccessRange &range,
John Zulauf540266b2020-04-06 18:54:53 -0600242 const AccessContext::TrackBack &track_back, ResourceAccessRangeMap *descent_map,
243 const ResourceAccessState *infill_state, bool recur_to_infill) const {
John Zulauf16adfc92020-04-08 10:28:33 -0600244 ParallelMapIterator current(*descent_map, GetAccessStateMap(type), range.begin);
245 while (current->range.non_empty()) {
246 if (current->pos_B->valid) {
247 const auto &src_pos = current->pos_B->lower_bound;
248 auto access_with_barrier = src_pos->second;
249 access_with_barrier.ApplyBarrier(track_back.barrier);
250 if (current->pos_A->valid) {
251 current.trim_A();
252 current->pos_A->lower_bound->second.Resolve(access_with_barrier);
John Zulauf5f13a792020-03-10 07:31:21 -0600253 } else {
John Zulauf16adfc92020-04-08 10:28:33 -0600254 descent_map->insert(current->pos_A->lower_bound, std::make_pair(current->range, access_with_barrier));
255 current.invalidate_A(); // Update the parallel iterator to point at the correct segment after split(s)
John Zulauf5f13a792020-03-10 07:31:21 -0600256 }
John Zulauf16adfc92020-04-08 10:28:33 -0600257 } else {
258 // we have to descend to fill this gap
259 if (recur_to_infill) {
260 track_back.context->ResolvePreviousAccess(type, current->range, descent_map, infill_state);
261 current.invalidate_A(); // Update the parallel iterator to point at the correct segment after recursion.
262 }
263 if (!current->pos_A->valid && infill_state) {
264 // If we didn't find anything in the previous range, we infill with default to prevent repeating
265 // a fruitless search
266 descent_map->insert(current->pos_A->lower_bound, std::make_pair(current->range, *infill_state));
267 current.invalidate_A(); // Update the parallel iterator to point at the correct segment after insert
268 }
John Zulauf5f13a792020-03-10 07:31:21 -0600269 }
John Zulauf16adfc92020-04-08 10:28:33 -0600270 ++current;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600271 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600272}
273
John Zulauf16adfc92020-04-08 10:28:33 -0600274void AccessContext::ResolvePreviousAccess(AddressType type, const ResourceAccessRange &range,
John Zulauf540266b2020-04-06 18:54:53 -0600275 ResourceAccessRangeMap *descent_map, const ResourceAccessState *infill_state) const {
John Zulaufe5da6e52020-03-18 15:32:18 -0600276 if ((prev_.size() == 0) && (src_external_.context == nullptr)) {
John Zulauf5f13a792020-03-10 07:31:21 -0600277 if (range.non_empty() && infill_state) {
278 descent_map->insert(std::make_pair(range, *infill_state));
279 }
280 } else {
281 // Look for something to fill the gap further along.
282 for (const auto &prev_dep : prev_) {
John Zulauf16adfc92020-04-08 10:28:33 -0600283 ResolveTrackBack(type, range, prev_dep, descent_map, infill_state);
John Zulauf5f13a792020-03-10 07:31:21 -0600284 }
285
John Zulaufe5da6e52020-03-18 15:32:18 -0600286 if (src_external_.context) {
John Zulauf16adfc92020-04-08 10:28:33 -0600287 ResolveTrackBack(type, range, src_external_, descent_map, infill_state);
John Zulauf5f13a792020-03-10 07:31:21 -0600288 }
289 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600290}
291
John Zulauf16adfc92020-04-08 10:28:33 -0600292AccessContext::AddressType AccessContext::ImageAddressType(const IMAGE_STATE &image) {
293 return (image.createInfo.tiling == VK_IMAGE_TILING_LINEAR) ? AddressType::kLinearAddress : AddressType::kIdealizedAddress;
294}
295
296VkDeviceSize AccessContext::ResourceBaseAddress(const BINDABLE &bindable) {
297 return bindable.binding.offset + bindable.binding.mem_state->fake_base_address;
298}
299
300static bool SimpleBinding(const BINDABLE &bindable) {
301 return !bindable.sparse && bindable.binding.mem_state;
302}
303
John Zulauf540266b2020-04-06 18:54:53 -0600304void AccessContext::ResolvePreviousAccess(const IMAGE_STATE &image_state, const VkImageSubresourceRange &subresource_range_arg,
John Zulauf16adfc92020-04-08 10:28:33 -0600305 AddressType address_type, ResourceAccessRangeMap *descent_map, const ResourceAccessState *infill_state) const {
306 if (!SimpleBinding(image_state)) return;
307
John Zulauf62f10592020-04-03 12:20:02 -0600308 auto subresource_range = NormalizeSubresourceRange(image_state.createInfo, subresource_range_arg);
locke-lunargae26eac2020-04-16 15:29:05 -0600309 subresource_adapter::ImageRangeGenerator range_gen(*image_state.fragment_encoder.get(), subresource_range, {0, 0, 0},
locke-lunarg5f7d3c62020-04-07 00:10:39 -0600310 image_state.createInfo.extent);
John Zulauf16adfc92020-04-08 10:28:33 -0600311 const auto base_address = ResourceBaseAddress(image_state);
John Zulauf62f10592020-04-03 12:20:02 -0600312 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf16adfc92020-04-08 10:28:33 -0600313 ResolvePreviousAccess(address_type, (*range_gen + base_address), descent_map, infill_state);
John Zulauf62f10592020-04-03 12:20:02 -0600314 }
315}
316
John Zulauf3d84f1b2020-03-09 13:33:25 -0600317class HazardDetector {
318 SyncStageAccessIndex usage_index_;
319
320 public:
John Zulauf5f13a792020-03-10 07:31:21 -0600321 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const { return pos->second.DetectHazard(usage_index_); }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600322 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos) const {
323 return pos->second.DetectAsyncHazard(usage_index_);
324 }
325 HazardDetector(SyncStageAccessIndex usage) : usage_index_(usage) {}
326};
327
John Zulauf16adfc92020-04-08 10:28:33 -0600328HazardResult AccessContext::DetectHazard(AddressType type, SyncStageAccessIndex usage_index,
John Zulauf540266b2020-04-06 18:54:53 -0600329 const ResourceAccessRange &range) const {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600330 HazardDetector detector(usage_index);
John Zulauf16adfc92020-04-08 10:28:33 -0600331 return DetectHazard(type, detector, range);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600332}
333
John Zulauf16adfc92020-04-08 10:28:33 -0600334HazardResult AccessContext::DetectHazard(const BUFFER_STATE &buffer, SyncStageAccessIndex usage_index,
335 const ResourceAccessRange &range) const {
336 if (!SimpleBinding(buffer)) return HazardResult();
337 return DetectHazard (AddressType::kLinearAddress, usage_index, range + ResourceBaseAddress(buffer));
John Zulaufe5da6e52020-03-18 15:32:18 -0600338}
339
John Zulauf540266b2020-04-06 18:54:53 -0600340HazardResult AccessContext::DetectHazard(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
341 const VkImageSubresourceLayers &subresource, const VkOffset3D &offset,
342 const VkExtent3D &extent) const {
John Zulauf16adfc92020-04-08 10:28:33 -0600343 if (!SimpleBinding(image)) return HazardResult();
John Zulauf5c5e88d2019-12-26 11:22:02 -0700344 // TODO: replace the encoder/generator with offset3D/extent3D aware versions
345 VkImageSubresourceRange subresource_range = {subresource.aspectMask, subresource.mipLevel, 1, subresource.baseArrayLayer,
346 subresource.layerCount};
locke-lunargae26eac2020-04-16 15:29:05 -0600347 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, offset, extent);
John Zulauf16adfc92020-04-08 10:28:33 -0600348 const auto address_type = ImageAddressType(image);
349 const auto base_address = ResourceBaseAddress(image);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700350 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf16adfc92020-04-08 10:28:33 -0600351 HazardResult hazard = DetectHazard(address_type, current_usage, (*range_gen + base_address));
John Zulauf5c5e88d2019-12-26 11:22:02 -0700352 if (hazard.hazard) return hazard;
353 }
354 return HazardResult();
John Zulauf9cb530d2019-09-30 14:14:10 -0600355}
356
John Zulauf3d84f1b2020-03-09 13:33:25 -0600357class BarrierHazardDetector {
358 public:
359 BarrierHazardDetector(SyncStageAccessIndex usage_index, VkPipelineStageFlags src_exec_scope,
360 SyncStageAccessFlags src_access_scope)
361 : usage_index_(usage_index), src_exec_scope_(src_exec_scope), src_access_scope_(src_access_scope) {}
362
John Zulauf5f13a792020-03-10 07:31:21 -0600363 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
364 return pos->second.DetectBarrierHazard(usage_index_, src_exec_scope_, src_access_scope_);
John Zulauf0cb5be22020-01-23 12:18:22 -0700365 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600366 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos) const {
367 // Async barrier hazard detection can use the same path as the usage index is not IsRead, but is IsWrite
368 return pos->second.DetectAsyncHazard(usage_index_);
369 }
370
371 private:
372 SyncStageAccessIndex usage_index_;
373 VkPipelineStageFlags src_exec_scope_;
374 SyncStageAccessFlags src_access_scope_;
375};
376
John Zulauf16adfc92020-04-08 10:28:33 -0600377HazardResult AccessContext::DetectBarrierHazard(AddressType type, SyncStageAccessIndex current_usage,
John Zulauf540266b2020-04-06 18:54:53 -0600378 VkPipelineStageFlags src_exec_scope, SyncStageAccessFlags src_access_scope,
379 const ResourceAccessRange &range) const {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600380 BarrierHazardDetector detector(current_usage, src_exec_scope, src_access_scope);
John Zulauf16adfc92020-04-08 10:28:33 -0600381 return DetectHazard(type, detector, range);
John Zulauf0cb5be22020-01-23 12:18:22 -0700382}
383
John Zulauf16adfc92020-04-08 10:28:33 -0600384HazardResult AccessContext::DetectImageBarrierHazard(const IMAGE_STATE &image, VkPipelineStageFlags src_exec_scope,
385 SyncStageAccessFlags src_stage_accesses, const VkImageMemoryBarrier &barrier) const {
386 if (!SimpleBinding(image)) return HazardResult();
387
John Zulauf0cb5be22020-01-23 12:18:22 -0700388 auto subresource_range = NormalizeSubresourceRange(image.createInfo, barrier.subresourceRange);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600389 const VulkanTypedHandle image_handle(image.image, kVulkanObjectTypeImage);
John Zulauf36bcf6a2020-02-03 15:12:52 -0700390 const auto src_access_scope = SyncStageAccess::AccessScope(src_stage_accesses, barrier.srcAccessMask);
locke-lunargae26eac2020-04-16 15:29:05 -0600391 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, {0, 0, 0},
locke-lunarg5f7d3c62020-04-07 00:10:39 -0600392 image.createInfo.extent);
John Zulauf16adfc92020-04-08 10:28:33 -0600393 const auto address_type = ImageAddressType(image);
394 const auto base_address = ResourceBaseAddress(image);
locke-lunarg296a3c92020-03-25 01:04:29 -0600395 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf16adfc92020-04-08 10:28:33 -0600396 HazardResult hazard = DetectBarrierHazard(address_type, SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION,
397 src_exec_scope, src_access_scope, (*range_gen + base_address));
locke-lunarg296a3c92020-03-25 01:04:29 -0600398 if (hazard.hazard) return hazard;
John Zulauf0cb5be22020-01-23 12:18:22 -0700399 }
400 return HazardResult();
401}
402
John Zulauf9cb530d2019-09-30 14:14:10 -0600403template <typename Flags, typename Map>
404SyncStageAccessFlags AccessScopeImpl(Flags flag_mask, const Map &map) {
405 SyncStageAccessFlags scope = 0;
406 for (const auto &bit_scope : map) {
407 if (flag_mask < bit_scope.first) break;
408
409 if (flag_mask & bit_scope.first) {
410 scope |= bit_scope.second;
411 }
412 }
413 return scope;
414}
415
416SyncStageAccessFlags SyncStageAccess::AccessScopeByStage(VkPipelineStageFlags stages) {
417 return AccessScopeImpl(stages, syncStageAccessMaskByStageBit);
418}
419
420SyncStageAccessFlags SyncStageAccess::AccessScopeByAccess(VkAccessFlags accesses) {
421 return AccessScopeImpl(accesses, syncStageAccessMaskByAccessBit);
422}
423
424// Getting from stage mask and access mask to stage/acess masks is something we need to be good at...
425SyncStageAccessFlags SyncStageAccess::AccessScope(VkPipelineStageFlags stages, VkAccessFlags accesses) {
John Zulauf5f13a792020-03-10 07:31:21 -0600426 // The access scope is the intersection of all stage/access types possible for the enabled stages and the enables
427 // accesses (after doing a couple factoring of common terms the union of stage/access intersections is the intersections
428 // of the union of all stage/access types for all the stages and the same unions for the access mask...
John Zulauf9cb530d2019-09-30 14:14:10 -0600429 return AccessScopeByStage(stages) & AccessScopeByAccess(accesses);
430}
431
432template <typename Action>
John Zulauf5c5e88d2019-12-26 11:22:02 -0700433void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const ResourceAccessRange &range, const Action &action) {
John Zulauf9cb530d2019-09-30 14:14:10 -0600434 // TODO -- region/mem-range accuracte update
435 auto pos = accesses->lower_bound(range);
436 if (pos == accesses->end() || !pos->first.intersects(range)) {
437 // The range is empty, fill it with a default value.
438 pos = action.Infill(accesses, pos, range);
439 } else if (range.begin < pos->first.begin) {
440 // Leading empty space, infill
John Zulauf5c5e88d2019-12-26 11:22:02 -0700441 pos = action.Infill(accesses, pos, ResourceAccessRange(range.begin, pos->first.begin));
John Zulauf9cb530d2019-09-30 14:14:10 -0600442 } else if (pos->first.begin < range.begin) {
443 // Trim the beginning if needed
444 pos = accesses->split(pos, range.begin, sparse_container::split_op_keep_both());
445 ++pos;
446 }
447
448 const auto the_end = accesses->end();
449 while ((pos != the_end) && pos->first.intersects(range)) {
450 if (pos->first.end > range.end) {
451 pos = accesses->split(pos, range.end, sparse_container::split_op_keep_both());
452 }
453
454 pos = action(accesses, pos);
455 if (pos == the_end) break;
456
457 auto next = pos;
458 ++next;
459 if ((pos->first.end < range.end) && (next != the_end) && !next->first.is_subsequent_to(pos->first)) {
460 // Need to infill if next is disjoint
461 VkDeviceSize limit = (next == the_end) ? range.end : std::min(range.end, next->first.begin);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700462 ResourceAccessRange new_range(pos->first.end, limit);
John Zulauf9cb530d2019-09-30 14:14:10 -0600463 next = action.Infill(accesses, next, new_range);
464 }
465 pos = next;
466 }
467}
468
469struct UpdateMemoryAccessStateFunctor {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700470 using Iterator = ResourceAccessRangeMap::iterator;
471 Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const {
John Zulauf5f13a792020-03-10 07:31:21 -0600472 // this is only called on gaps, and never returns a gap.
473 ResourceAccessState default_state;
John Zulauf16adfc92020-04-08 10:28:33 -0600474 context.ResolvePreviousAccess(type, range, accesses, &default_state);
John Zulauf5f13a792020-03-10 07:31:21 -0600475 return accesses->lower_bound(range);
John Zulauf9cb530d2019-09-30 14:14:10 -0600476 }
John Zulauf5f13a792020-03-10 07:31:21 -0600477
John Zulauf5c5e88d2019-12-26 11:22:02 -0700478 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -0600479 auto &access_state = pos->second;
480 access_state.Update(usage, tag);
481 return pos;
482 }
483
John Zulauf16adfc92020-04-08 10:28:33 -0600484 UpdateMemoryAccessStateFunctor(AccessContext::AddressType type_, const AccessContext &context_, SyncStageAccessIndex usage_,
John Zulauf540266b2020-04-06 18:54:53 -0600485 const ResourceUsageTag &tag_)
John Zulauf16adfc92020-04-08 10:28:33 -0600486 : type(type_), context(context_), usage(usage_), tag(tag_) {}
487 const AccessContext::AddressType type;
John Zulauf540266b2020-04-06 18:54:53 -0600488 const AccessContext &context;
John Zulauf16adfc92020-04-08 10:28:33 -0600489 const SyncStageAccessIndex usage;
John Zulauf9cb530d2019-09-30 14:14:10 -0600490 const ResourceUsageTag &tag;
491};
492
493struct ApplyMemoryAccessBarrierFunctor {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700494 using Iterator = ResourceAccessRangeMap::iterator;
495 inline Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const { return pos; }
John Zulauf9cb530d2019-09-30 14:14:10 -0600496
John Zulauf5c5e88d2019-12-26 11:22:02 -0700497 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -0600498 auto &access_state = pos->second;
John Zulauf36bcf6a2020-02-03 15:12:52 -0700499 access_state.ApplyMemoryAccessBarrier(src_exec_scope, src_access_scope, dst_exec_scope, dst_access_scope);
John Zulauf9cb530d2019-09-30 14:14:10 -0600500 return pos;
501 }
502
John Zulauf36bcf6a2020-02-03 15:12:52 -0700503 ApplyMemoryAccessBarrierFunctor(VkPipelineStageFlags src_exec_scope_, SyncStageAccessFlags src_access_scope_,
504 VkPipelineStageFlags dst_exec_scope_, SyncStageAccessFlags dst_access_scope_)
505 : src_exec_scope(src_exec_scope_),
506 src_access_scope(src_access_scope_),
507 dst_exec_scope(dst_exec_scope_),
508 dst_access_scope(dst_access_scope_) {}
John Zulauf9cb530d2019-09-30 14:14:10 -0600509
John Zulauf36bcf6a2020-02-03 15:12:52 -0700510 VkPipelineStageFlags src_exec_scope;
511 SyncStageAccessFlags src_access_scope;
512 VkPipelineStageFlags dst_exec_scope;
513 SyncStageAccessFlags dst_access_scope;
John Zulauf9cb530d2019-09-30 14:14:10 -0600514};
515
516struct ApplyGlobalBarrierFunctor {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700517 using Iterator = ResourceAccessRangeMap::iterator;
518 inline Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const { return pos; }
John Zulauf9cb530d2019-09-30 14:14:10 -0600519
John Zulauf5c5e88d2019-12-26 11:22:02 -0700520 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -0600521 auto &access_state = pos->second;
John Zulauf36bcf6a2020-02-03 15:12:52 -0700522 access_state.ApplyExecutionBarrier(src_exec_scope, dst_exec_scope);
John Zulauf9cb530d2019-09-30 14:14:10 -0600523
524 for (const auto &functor : barrier_functor) {
525 functor(accesses, pos);
526 }
527 return pos;
528 }
529
John Zulauf36bcf6a2020-02-03 15:12:52 -0700530 ApplyGlobalBarrierFunctor(VkPipelineStageFlags src_exec_scope, VkPipelineStageFlags dst_exec_scope,
531 SyncStageAccessFlags src_stage_accesses, SyncStageAccessFlags dst_stage_accesses,
John Zulauf9cb530d2019-09-30 14:14:10 -0600532 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers)
John Zulauf36bcf6a2020-02-03 15:12:52 -0700533 : src_exec_scope(src_exec_scope), dst_exec_scope(dst_exec_scope) {
John Zulauf9cb530d2019-09-30 14:14:10 -0600534 // Don't want to create this per tracked item, but don't want to loop through all tracked items per barrier...
535 barrier_functor.reserve(memoryBarrierCount);
536 for (uint32_t barrier_index = 0; barrier_index < memoryBarrierCount; barrier_index++) {
537 const auto &barrier = pMemoryBarriers[barrier_index];
John Zulauf36bcf6a2020-02-03 15:12:52 -0700538 barrier_functor.emplace_back(src_exec_scope, SyncStageAccess::AccessScope(src_stage_accesses, barrier.srcAccessMask),
539 dst_exec_scope, SyncStageAccess::AccessScope(dst_stage_accesses, barrier.dstAccessMask));
John Zulauf9cb530d2019-09-30 14:14:10 -0600540 }
541 }
542
John Zulauf36bcf6a2020-02-03 15:12:52 -0700543 const VkPipelineStageFlags src_exec_scope;
544 const VkPipelineStageFlags dst_exec_scope;
John Zulauf9cb530d2019-09-30 14:14:10 -0600545 std::vector<ApplyMemoryAccessBarrierFunctor> barrier_functor;
546};
547
John Zulauf16adfc92020-04-08 10:28:33 -0600548void AccessContext::UpdateAccessState(AddressType type, SyncStageAccessIndex current_usage,
John Zulauf540266b2020-04-06 18:54:53 -0600549 const ResourceAccessRange &range, const ResourceUsageTag &tag) {
John Zulauf16adfc92020-04-08 10:28:33 -0600550 UpdateMemoryAccessStateFunctor action(type, *this, current_usage, tag);
551 UpdateMemoryAccessState(&GetAccessStateMap(type), range, action);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600552}
553
John Zulauf16adfc92020-04-08 10:28:33 -0600554void AccessContext::UpdateAccessState(const BUFFER_STATE &buffer, SyncStageAccessIndex current_usage,
555 const ResourceAccessRange &range, const ResourceUsageTag &tag) {
556 if (!SimpleBinding(buffer)) return;
557 const auto base_address = ResourceBaseAddress(buffer);
558 UpdateAccessState(AddressType::kLinearAddress, current_usage, range + base_address, tag);
559}
John Zulauf540266b2020-04-06 18:54:53 -0600560void AccessContext::UpdateAccessState(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
561 const VkImageSubresourceLayers &subresource, const VkOffset3D &offset,
562 const VkExtent3D &extent, const ResourceUsageTag &tag) {
John Zulauf16adfc92020-04-08 10:28:33 -0600563 if (!SimpleBinding(image)) return;
John Zulauf5c5e88d2019-12-26 11:22:02 -0700564 // TODO: replace the encoder/generator with offset3D aware versions
565 VkImageSubresourceRange subresource_range = {subresource.aspectMask, subresource.mipLevel, 1, subresource.baseArrayLayer,
566 subresource.layerCount};
locke-lunargae26eac2020-04-16 15:29:05 -0600567 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, offset, extent);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600568 const VulkanTypedHandle handle(image.image, kVulkanObjectTypeImage);
John Zulauf16adfc92020-04-08 10:28:33 -0600569 const auto address_type = ImageAddressType(image);
570 const auto base_address = ResourceBaseAddress(image);
571 UpdateMemoryAccessStateFunctor action(address_type, *this, current_usage, tag);
John Zulauf5f13a792020-03-10 07:31:21 -0600572 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf16adfc92020-04-08 10:28:33 -0600573 UpdateMemoryAccessState(&GetAccessStateMap(address_type), (*range_gen + base_address), action);
John Zulauf5f13a792020-03-10 07:31:21 -0600574 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600575}
576
John Zulauf540266b2020-04-06 18:54:53 -0600577template <typename Action>
578void AccessContext::UpdateMemoryAccess(const BUFFER_STATE &buffer, const ResourceAccessRange &range, const Action action) {
John Zulauf16adfc92020-04-08 10:28:33 -0600579 if (!SimpleBinding(buffer)) return;
580 const auto base_address = ResourceBaseAddress(buffer);
581 UpdateMemoryAccessState(&GetAccessStateMap(AddressType::kLinearAddress), (range + base_address), action);
John Zulauf540266b2020-04-06 18:54:53 -0600582}
583
584template <typename Action>
585void AccessContext::UpdateMemoryAccess(const IMAGE_STATE &image, const VkImageSubresourceRange &subresource_range,
586 const Action action) {
John Zulauf16adfc92020-04-08 10:28:33 -0600587 if (!SimpleBinding(image)) return;
588 const auto address_type = ImageAddressType(image);
589 auto *accesses = &GetAccessStateMap(address_type);
John Zulauf540266b2020-04-06 18:54:53 -0600590
locke-lunargae26eac2020-04-16 15:29:05 -0600591 subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, {0, 0, 0},
locke-lunarg5f7d3c62020-04-07 00:10:39 -0600592 image.createInfo.extent);
John Zulauf540266b2020-04-06 18:54:53 -0600593
John Zulauf16adfc92020-04-08 10:28:33 -0600594 const auto base_address = ResourceBaseAddress(image);
John Zulauf540266b2020-04-06 18:54:53 -0600595 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf16adfc92020-04-08 10:28:33 -0600596 UpdateMemoryAccessState(accesses, (*range_gen + base_address), action);
John Zulauf540266b2020-04-06 18:54:53 -0600597 }
598}
599
600template <typename Action>
601void AccessContext::ApplyGlobalBarriers(const Action &barrier_action) {
602 // Note: Barriers do *not* cross context boundaries, applying to accessess within.... (at least for renderpass subpasses)
John Zulauf16adfc92020-04-08 10:28:33 -0600603 for (const auto address_type : kAddressTypes) {
604 UpdateMemoryAccessState(&GetAccessStateMap(address_type), full_range, barrier_action);
John Zulauf540266b2020-04-06 18:54:53 -0600605 }
606}
607
John Zulauf16adfc92020-04-08 10:28:33 -0600608const std::array<AccessContext::AddressType, AccessContext::kAddressTypeCount> AccessContext::kAddressTypes = {
609 AccessContext::AddressType::kLinearAddress,
610 AccessContext::AddressType::kIdealizedAddress
611};
John Zulauf540266b2020-04-06 18:54:53 -0600612void AccessContext::ResolveChildContexts(const std::vector<AccessContext> &contexts) {
John Zulauf540266b2020-04-06 18:54:53 -0600613 for (uint32_t subpass_index = 0; subpass_index < contexts.size(); subpass_index++) {
614 auto &context = contexts[subpass_index];
John Zulauf16adfc92020-04-08 10:28:33 -0600615 for (const auto address_type : kAddressTypes) {
616 context.ResolveTrackBack(address_type, full_range, context.GetDstExternalTrackBack(),
617 &GetAccessStateMap(address_type), nullptr, false);
John Zulauf540266b2020-04-06 18:54:53 -0600618 }
619 }
620}
621
John Zulauf16adfc92020-04-08 10:28:33 -0600622void CommandBufferAccessContext::BeginRenderPass(const RENDER_PASS_STATE &rp_state) {
623 // Create an access context for the first subpass and add it to the command buffers collection
624 render_pass_contexts_.emplace_back(queue_flags_, &rp_state.subpass_dependencies, &cb_tracker_context_);
625 current_renderpass_context_ = &render_pass_contexts_.back();
626 current_context_ = &current_renderpass_context_->CurrentContext();
627
628 // TODO: Add layout load/store/transition/resolve access (here or in RenderPassContext)
629}
630
631void CommandBufferAccessContext::NextRenderPass(const RENDER_PASS_STATE &rp_state) {
632 assert(current_renderpass_context_);
633 current_renderpass_context_->NextSubpass(queue_flags_, &cb_tracker_context_);
634 // TODO: Add layout load/store/transition/resolve access (here or in RenderPassContext)
635 current_context_ = &current_renderpass_context_->CurrentContext();
636}
637
638void CommandBufferAccessContext::EndRenderPass(const RENDER_PASS_STATE &render_pass) {
639 // TODO: Add layout load/store/transition/resolve access (here or in RenderPassContext)
640 assert(current_renderpass_context_);
641 if (!current_renderpass_context_) return;
642
643 const auto &contexts = current_renderpass_context_->subpass_contexts_;
644 cb_tracker_context_.ResolveChildContexts(contexts);
645
646 current_context_ = &cb_tracker_context_;
647 current_renderpass_context_ = nullptr;
648}
649
John Zulauf3d84f1b2020-03-09 13:33:25 -0600650SyncBarrier::SyncBarrier(VkQueueFlags queue_flags, const VkSubpassDependency2 &barrier) {
651 const auto src_stage_mask = ExpandPipelineStages(queue_flags, barrier.srcStageMask);
652 src_exec_scope = WithEarlierPipelineStages(src_stage_mask);
653 src_access_scope = SyncStageAccess::AccessScope(src_stage_mask, barrier.srcAccessMask);
654 const auto dst_stage_mask = ExpandPipelineStages(queue_flags, barrier.dstStageMask);
655 dst_exec_scope = WithLaterPipelineStages(dst_stage_mask);
656 dst_access_scope = SyncStageAccess::AccessScope(dst_stage_mask, barrier.dstAccessMask);
657}
658
659void ResourceAccessState::ApplyBarrier(const SyncBarrier &barrier) {
660 ApplyExecutionBarrier(barrier.src_exec_scope, barrier.dst_exec_scope);
661 ApplyMemoryAccessBarrier(barrier.src_exec_scope, barrier.src_access_scope, barrier.dst_exec_scope, barrier.dst_access_scope);
662}
663
664ResourceAccessState ResourceAccessState::ApplyBarrierStack(const ResourceAccessState &that, const SyncBarrierStack &barrier_stack) {
665 ResourceAccessState copy = that;
666 for (auto barrier = barrier_stack.begin(); barrier != barrier_stack.end(); ++barrier) {
667 assert(*barrier);
668 copy.ApplyBarrier(*(*barrier));
669 }
670 return copy;
671}
672
673HazardResult ResourceAccessState::DetectHazard(SyncStageAccessIndex usage_index, SyncBarrierStack *barrier_stack) const {
674 if (barrier_stack) {
675 return ApplyBarrierStack(*this, *barrier_stack).DetectHazard(usage_index);
676 }
677 return DetectHazard(usage_index);
678}
679
John Zulauf9cb530d2019-09-30 14:14:10 -0600680HazardResult ResourceAccessState::DetectHazard(SyncStageAccessIndex usage_index) const {
681 HazardResult hazard;
682 auto usage = FlagBit(usage_index);
683 if (IsRead(usage)) {
684 if (IsWriteHazard(usage)) {
685 hazard.Set(READ_AFTER_WRITE, write_tag);
686 }
687 } else {
688 // Assume write
689 // TODO determine what to do with READ-WRITE usage states if any
690 // Write-After-Write check -- if we have a previous write to test against
691 if (last_write && IsWriteHazard(usage)) {
692 hazard.Set(WRITE_AFTER_WRITE, write_tag);
693 } else {
694 // Only look for casus belli for WAR
695 const auto usage_stage = PipelineStageBit(usage_index);
696 for (uint32_t read_index = 0; read_index < last_read_count; read_index++) {
697 if (IsReadHazard(usage_stage, last_reads[read_index])) {
698 hazard.Set(WRITE_AFTER_READ, last_reads[read_index].tag);
699 break;
700 }
701 }
702 }
703 }
704 return hazard;
705}
706
John Zulauf2f952d22020-02-10 11:34:51 -0700707// Asynchronous Hazards occur between subpasses with no connection through the DAG
John Zulauf3d84f1b2020-03-09 13:33:25 -0600708HazardResult ResourceAccessState::DetectAsyncHazard(SyncStageAccessIndex usage_index) const {
John Zulauf2f952d22020-02-10 11:34:51 -0700709 HazardResult hazard;
710 auto usage = FlagBit(usage_index);
711 if (IsRead(usage)) {
712 if (last_write != 0) {
713 hazard.Set(READ_RACING_WRITE, write_tag);
714 }
715 } else {
716 if (last_write != 0) {
717 hazard.Set(WRITE_RACING_WRITE, write_tag);
718 } else if (last_read_count > 0) {
719 hazard.Set(WRITE_RACING_READ, last_reads[0].tag);
720 }
721 }
722 return hazard;
723}
724
John Zulauf36bcf6a2020-02-03 15:12:52 -0700725HazardResult ResourceAccessState::DetectBarrierHazard(SyncStageAccessIndex usage_index, VkPipelineStageFlags src_exec_scope,
John Zulauf3d84f1b2020-03-09 13:33:25 -0600726 SyncStageAccessFlags src_access_scope,
727 SyncBarrierStack *barrier_stack) const {
728 if (barrier_stack) {
729 return ApplyBarrierStack(*this, *barrier_stack).DetectBarrierHazard(usage_index, src_exec_scope, src_access_scope);
730 }
731 return DetectBarrierHazard(usage_index, src_exec_scope, src_access_scope);
732}
733
734HazardResult ResourceAccessState::DetectBarrierHazard(SyncStageAccessIndex usage_index, VkPipelineStageFlags src_exec_scope,
John Zulauf36bcf6a2020-02-03 15:12:52 -0700735 SyncStageAccessFlags src_access_scope) const {
John Zulauf0cb5be22020-01-23 12:18:22 -0700736 // Only supporting image layout transitions for now
737 assert(usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION);
738 HazardResult hazard;
739 if (last_write) {
740 // If the previous write is *not* in the 1st access scope
741 // *AND* the current barrier is not in the dependency chain
742 // *AND* the there is no prior memory barrier for the previous write in the dependency chain
743 // then the barrier access is unsafe (R/W after W)
John Zulauf36bcf6a2020-02-03 15:12:52 -0700744 if (((last_write & src_access_scope) == 0) && ((src_exec_scope & write_dependency_chain) == 0) && (write_barriers == 0)) {
John Zulauf0cb5be22020-01-23 12:18:22 -0700745 // TODO: Do we need a difference hazard name for this?
746 hazard.Set(WRITE_AFTER_WRITE, write_tag);
747 }
748 } else {
749 // Look at the reads
750 for (uint32_t read_index = 0; read_index < last_read_count; read_index++) {
John Zulauf36bcf6a2020-02-03 15:12:52 -0700751 const auto &read_access = last_reads[read_index];
752 // If the read stage is not in the src sync sync
753 // *AND* not execution chained with an existing sync barrier (that's the or)
754 // then the barrier access is unsafe (R/W after R)
755 if ((src_exec_scope & (read_access.stage | read_access.barriers)) == 0) {
756 hazard.Set(WRITE_AFTER_READ, read_access.tag);
John Zulauf0cb5be22020-01-23 12:18:22 -0700757 break;
758 }
759 }
760 }
761 return hazard;
762}
763
John Zulauf5f13a792020-03-10 07:31:21 -0600764// The logic behind resolves is the same as update, we assume that earlier hazards have be reported, and that no
765// tranistive hazard can exists with a hazard between the earlier operations. Yes, an early hazard can mask that another
766// exists, but if you fix *that* hazard it either fixes or unmasks the subsequent ones.
767void ResourceAccessState::Resolve(const ResourceAccessState &other) {
768 if (write_tag.IsBefore(other.write_tag)) {
769 // If this is a later write, we've reported any exsiting hazard, and we can just overwrite as the more recent operation
770 *this = other;
771 } else if (!other.write_tag.IsBefore(write_tag)) {
772 // This is the *equals* case for write operations, we merged the write barriers and the read state (but without the
773 // dependency chaining logic or any stage expansion)
774 write_barriers |= other.write_barriers;
775
776 // Merge that read states
777 for (uint32_t other_read_index = 0; other_read_index < other.last_read_count; other_read_index++) {
778 auto &other_read = other.last_reads[other_read_index];
779 if (last_read_stages & other_read.stage) {
780 // Merge in the barriers for read stages that exist in *both* this and other
781 // TODO: This is N^2 with stages... perhaps the ReadStates should be by stage index.
782 for (uint32_t my_read_index = 0; my_read_index < last_read_count; my_read_index++) {
783 auto &my_read = last_reads[my_read_index];
784 if (other_read.stage == my_read.stage) {
785 if (my_read.tag.IsBefore(other_read.tag)) {
786 my_read.tag = other_read.tag;
787 }
788 my_read.barriers |= other_read.barriers;
789 break;
790 }
791 }
792 } else {
793 // The other read stage doesn't exist in this, so add it.
794 last_reads[last_read_count] = other_read;
795 last_read_count++;
796 last_read_stages |= other_read.stage;
797 }
798 }
799 } // the else clause would be that other write is before this write... in which case we supercede the other state and ignore
800 // it.
801}
802
John Zulauf9cb530d2019-09-30 14:14:10 -0600803void ResourceAccessState::Update(SyncStageAccessIndex usage_index, const ResourceUsageTag &tag) {
804 // Move this logic in the ResourceStateTracker as methods, thereof (or we'll repeat it for every flavor of resource...
805 const auto usage_bit = FlagBit(usage_index);
806 if (IsRead(usage_index)) {
807 // Mulitple outstanding reads may be of interest and do dependency chains independently
808 // However, for purposes of barrier tracking, only one read per pipeline stage matters
809 const auto usage_stage = PipelineStageBit(usage_index);
810 if (usage_stage & last_read_stages) {
811 for (uint32_t read_index = 0; read_index < last_read_count; read_index++) {
812 ReadState &access = last_reads[read_index];
813 if (access.stage == usage_stage) {
814 access.barriers = 0;
815 access.tag = tag;
816 break;
817 }
818 }
819 } else {
820 // We don't have this stage in the list yet...
821 assert(last_read_count < last_reads.size());
822 ReadState &access = last_reads[last_read_count++];
823 access.stage = usage_stage;
824 access.barriers = 0;
825 access.tag = tag;
826 last_read_stages |= usage_stage;
827 }
828 } else {
829 // Assume write
830 // TODO determine what to do with READ-WRITE operations if any
831 // Clobber last read and both sets of barriers... because all we have is DANGER, DANGER, WILL ROBINSON!!!
832 // if the last_reads/last_write were unsafe, we've reported them,
833 // in either case the prior access is irrelevant, we can overwrite them as *this* write is now after them
834 last_read_count = 0;
835 last_read_stages = 0;
836
837 write_barriers = 0;
838 write_dependency_chain = 0;
839 write_tag = tag;
840 last_write = usage_bit;
841 }
842}
John Zulauf5f13a792020-03-10 07:31:21 -0600843
John Zulauf9cb530d2019-09-30 14:14:10 -0600844void ResourceAccessState::ApplyExecutionBarrier(VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask) {
845 // Execution Barriers only protect read operations
846 for (uint32_t read_index = 0; read_index < last_read_count; read_index++) {
847 ReadState &access = last_reads[read_index];
848 // The | implements the "dependency chain" logic for this access, as the barriers field stores the second sync scope
849 if (srcStageMask & (access.stage | access.barriers)) {
850 access.barriers |= dstStageMask;
851 }
852 }
853 if (write_dependency_chain & srcStageMask) write_dependency_chain |= dstStageMask;
854}
855
John Zulauf36bcf6a2020-02-03 15:12:52 -0700856void ResourceAccessState::ApplyMemoryAccessBarrier(VkPipelineStageFlags src_exec_scope, SyncStageAccessFlags src_access_scope,
857 VkPipelineStageFlags dst_exec_scope, SyncStageAccessFlags dst_access_scope) {
John Zulauf9cb530d2019-09-30 14:14:10 -0600858 // Assuming we've applied the execution side of this barrier, we update just the write
859 // The || implements the "dependency chain" logic for this barrier
John Zulauf36bcf6a2020-02-03 15:12:52 -0700860 if ((src_access_scope & last_write) || (write_dependency_chain & src_exec_scope)) {
861 write_barriers |= dst_access_scope;
862 write_dependency_chain |= dst_exec_scope;
John Zulauf9cb530d2019-09-30 14:14:10 -0600863 }
864}
865
John Zulaufd1f85d42020-04-15 12:23:15 -0600866void SyncValidator::ResetCommandBufferCallback(VkCommandBuffer command_buffer) {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600867 auto *access_context = GetAccessContextNoInsert(command_buffer);
868 if (access_context) {
869 access_context->Reset();
John Zulauf9cb530d2019-09-30 14:14:10 -0600870 }
871}
872
John Zulaufd1f85d42020-04-15 12:23:15 -0600873void SyncValidator::FreeCommandBufferCallback(VkCommandBuffer command_buffer) {
874 auto access_found = cb_access_state.find(command_buffer);
875 if (access_found != cb_access_state.end()) {
876 access_found->second->Reset();
877 cb_access_state.erase(access_found);
878 }
879}
880
John Zulauf540266b2020-04-06 18:54:53 -0600881void SyncValidator::ApplyGlobalBarriers(AccessContext *context, VkPipelineStageFlags srcStageMask,
John Zulauf36bcf6a2020-02-03 15:12:52 -0700882 VkPipelineStageFlags dstStageMask, SyncStageAccessFlags src_access_scope,
883 SyncStageAccessFlags dst_access_scope, uint32_t memoryBarrierCount,
John Zulauf9cb530d2019-09-30 14:14:10 -0600884 const VkMemoryBarrier *pMemoryBarriers) {
885 // TODO: Implement this better (maybe some delayed/on-demand integration).
John Zulauf36bcf6a2020-02-03 15:12:52 -0700886 ApplyGlobalBarrierFunctor barriers_functor(srcStageMask, dstStageMask, src_access_scope, dst_access_scope, memoryBarrierCount,
John Zulauf9cb530d2019-09-30 14:14:10 -0600887 pMemoryBarriers);
John Zulauf540266b2020-04-06 18:54:53 -0600888 context->ApplyGlobalBarriers(barriers_functor);
John Zulauf9cb530d2019-09-30 14:14:10 -0600889}
890
John Zulauf16adfc92020-04-08 10:28:33 -0600891
John Zulauf540266b2020-04-06 18:54:53 -0600892void SyncValidator::ApplyBufferBarriers(AccessContext *context, VkPipelineStageFlags src_exec_scope,
John Zulauf36bcf6a2020-02-03 15:12:52 -0700893 SyncStageAccessFlags src_stage_accesses, VkPipelineStageFlags dst_exec_scope,
894 SyncStageAccessFlags dst_stage_accesses, uint32_t barrier_count,
John Zulauf9cb530d2019-09-30 14:14:10 -0600895 const VkBufferMemoryBarrier *barriers) {
896 // TODO Implement this at subresource/memory_range accuracy
897 for (uint32_t index = 0; index < barrier_count; index++) {
898 const auto &barrier = barriers[index];
899 const auto *buffer = Get<BUFFER_STATE>(barrier.buffer);
900 if (!buffer) continue;
John Zulauf16adfc92020-04-08 10:28:33 -0600901 ResourceAccessRange range = MakeRange(barrier);
John Zulauf540266b2020-04-06 18:54:53 -0600902 const auto src_access_scope = AccessScope(src_stage_accesses, barrier.srcAccessMask);
903 const auto dst_access_scope = AccessScope(dst_stage_accesses, barrier.dstAccessMask);
904 const ApplyMemoryAccessBarrierFunctor update_action(src_exec_scope, src_access_scope, dst_exec_scope, dst_access_scope);
905 context->UpdateMemoryAccess(*buffer, range, update_action);
John Zulauf9cb530d2019-09-30 14:14:10 -0600906 }
907}
908
John Zulauf540266b2020-04-06 18:54:53 -0600909void SyncValidator::ApplyImageBarriers(AccessContext *context, VkPipelineStageFlags src_exec_scope,
910 SyncStageAccessFlags src_stage_accesses, VkPipelineStageFlags dst_exec_scope,
911 SyncStageAccessFlags dst_stage_accesses, uint32_t barrier_count,
912 const VkImageMemoryBarrier *barriers) {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700913 for (uint32_t index = 0; index < barrier_count; index++) {
914 const auto &barrier = barriers[index];
915 const auto *image = Get<IMAGE_STATE>(barrier.image);
916 if (!image) continue;
locke-lunarg296a3c92020-03-25 01:04:29 -0600917 const ApplyMemoryAccessBarrierFunctor barrier_action(src_exec_scope, AccessScope(src_stage_accesses, barrier.srcAccessMask),
918 dst_exec_scope,
919 AccessScope(dst_stage_accesses, barrier.dstAccessMask));
John Zulauf540266b2020-04-06 18:54:53 -0600920
921 auto subresource_range = NormalizeSubresourceRange(image->createInfo, barrier.subresourceRange);
922 context->UpdateMemoryAccess(*image, subresource_range, barrier_action);
John Zulauf9cb530d2019-09-30 14:14:10 -0600923 }
John Zulauf9cb530d2019-09-30 14:14:10 -0600924}
925
926bool SyncValidator::PreCallValidateCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
927 uint32_t regionCount, const VkBufferCopy *pRegions) const {
928 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600929 const auto *cb_context = GetAccessContext(commandBuffer);
930 assert(cb_context);
931 if (!cb_context) return skip;
932 const auto *context = cb_context->GetCurrentAccessContext();
John Zulauf9cb530d2019-09-30 14:14:10 -0600933
John Zulauf3d84f1b2020-03-09 13:33:25 -0600934 // If we have no previous accesses, we have no hazards
935 // TODO: make this sub-resource capable
936 // TODO: make this general, and stuff it into templates/utility functions
937 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600938 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600939
940 for (uint32_t region = 0; region < regionCount; region++) {
941 const auto &copy_region = pRegions[region];
John Zulauf16adfc92020-04-08 10:28:33 -0600942 if (src_buffer) {
943 ResourceAccessRange src_range = MakeRange(copy_region.srcOffset, copy_region.size);
944 auto hazard = context->DetectHazard(*src_buffer, SYNC_TRANSFER_TRANSFER_READ, src_range);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600945 if (hazard.hazard) {
946 // TODO -- add tag information to log msg when useful.
locke-lunarga0003652020-03-10 11:38:51 -0600947 skip |= LogError(srcBuffer, string_SyncHazardVUID(hazard.hazard),
948 "vkCmdCopyBuffer: Hazard %s for srcBuffer %s, region %" PRIu32, string_SyncHazard(hazard.hazard),
949 report_data->FormatHandle(srcBuffer).c_str(), region);
John Zulauf9cb530d2019-09-30 14:14:10 -0600950 }
John Zulauf9cb530d2019-09-30 14:14:10 -0600951 }
John Zulauf16adfc92020-04-08 10:28:33 -0600952 if (dst_buffer && !skip) {
953 ResourceAccessRange dst_range = MakeRange(copy_region.dstOffset, copy_region.size);
954 auto hazard = context->DetectHazard(*dst_buffer,
John Zulauf3d84f1b2020-03-09 13:33:25 -0600955 SYNC_TRANSFER_TRANSFER_WRITE, dst_range);
956 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -0600957 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
958 "vkCmdCopyBuffer: Hazard %s for dstBuffer %s, region %" PRIu32, string_SyncHazard(hazard.hazard),
959 report_data->FormatHandle(dstBuffer).c_str(), region);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600960 }
961 }
962 if (skip) break;
John Zulauf9cb530d2019-09-30 14:14:10 -0600963 }
964 return skip;
965}
966
967void SyncValidator::PreCallRecordCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
968 uint32_t regionCount, const VkBufferCopy *pRegions) {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600969 auto *cb_context = GetAccessContext(commandBuffer);
970 assert(cb_context);
971 auto *context = cb_context->GetCurrentAccessContext();
972
John Zulauf9cb530d2019-09-30 14:14:10 -0600973 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
John Zulauf9cb530d2019-09-30 14:14:10 -0600974 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
John Zulauf9cb530d2019-09-30 14:14:10 -0600975
976 for (uint32_t region = 0; region < regionCount; region++) {
977 const auto &copy_region = pRegions[region];
John Zulauf16adfc92020-04-08 10:28:33 -0600978 if (src_buffer) {
979 ResourceAccessRange src_range = MakeRange(copy_region.srcOffset, copy_region.size);
980 context->UpdateAccessState(*src_buffer, SYNC_TRANSFER_TRANSFER_READ, src_range, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -0600981 }
John Zulauf16adfc92020-04-08 10:28:33 -0600982 if (dst_buffer) {
983 ResourceAccessRange dst_range = MakeRange(copy_region.dstOffset, copy_region.size);
984 context->UpdateAccessState(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, dst_range, tag);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700985 }
986 }
987}
988
989bool SyncValidator::PreCallValidateCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
990 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
991 const VkImageCopy *pRegions) const {
992 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600993 const auto *cb_access_context = GetAccessContext(commandBuffer);
994 assert(cb_access_context);
995 if (!cb_access_context) return skip;
John Zulauf5c5e88d2019-12-26 11:22:02 -0700996
John Zulauf3d84f1b2020-03-09 13:33:25 -0600997 const auto *context = cb_access_context->GetCurrentAccessContext();
998 assert(context);
999 if (!context) return skip;
1000
1001 const auto *src_image = Get<IMAGE_STATE>(srcImage);
1002 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001003 for (uint32_t region = 0; region < regionCount; region++) {
1004 const auto &copy_region = pRegions[region];
1005 if (src_image) {
John Zulauf540266b2020-04-06 18:54:53 -06001006 auto hazard = context->DetectHazard(*src_image, SYNC_TRANSFER_TRANSFER_READ, copy_region.srcSubresource,
John Zulauf3d84f1b2020-03-09 13:33:25 -06001007 copy_region.srcOffset, copy_region.extent);
1008 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06001009 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
1010 "vkCmdCopyImage: Hazard %s for srcImage %s, region %" PRIu32, string_SyncHazard(hazard.hazard),
1011 report_data->FormatHandle(srcImage).c_str(), region);
John Zulauf5c5e88d2019-12-26 11:22:02 -07001012 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06001013 }
1014
1015 if (dst_image) {
locke-lunarg1df1f882020-03-02 16:42:08 -07001016 VkExtent3D dst_copy_extent =
1017 GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
John Zulauf540266b2020-04-06 18:54:53 -06001018 auto hazard = context->DetectHazard(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, copy_region.dstSubresource,
locke-lunarg1df1f882020-03-02 16:42:08 -07001019 copy_region.dstOffset, dst_copy_extent);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001020 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06001021 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
1022 "vkCmdCopyImage: Hazard %s for dstImage %s, region %" PRIu32, string_SyncHazard(hazard.hazard),
1023 report_data->FormatHandle(dstImage).c_str(), region);
John Zulauf5c5e88d2019-12-26 11:22:02 -07001024 }
locke-lunarg1dbbb9e2020-02-28 22:43:53 -07001025 if (skip) break;
John Zulauf5c5e88d2019-12-26 11:22:02 -07001026 }
1027 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06001028
John Zulauf5c5e88d2019-12-26 11:22:02 -07001029 return skip;
1030}
1031
1032void SyncValidator::PreCallRecordCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
1033 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
1034 const VkImageCopy *pRegions) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06001035 auto *cb_access_context = GetAccessContext(commandBuffer);
1036 assert(cb_access_context);
1037 auto *context = cb_access_context->GetCurrentAccessContext();
1038 assert(context);
1039
John Zulauf5c5e88d2019-12-26 11:22:02 -07001040 auto *src_image = Get<IMAGE_STATE>(srcImage);
John Zulauf5c5e88d2019-12-26 11:22:02 -07001041 auto *dst_image = Get<IMAGE_STATE>(dstImage);
John Zulauf5c5e88d2019-12-26 11:22:02 -07001042
1043 for (uint32_t region = 0; region < regionCount; region++) {
1044 const auto &copy_region = pRegions[region];
John Zulauf3d84f1b2020-03-09 13:33:25 -06001045 if (src_image) {
John Zulauf540266b2020-04-06 18:54:53 -06001046 context->UpdateAccessState(*src_image, SYNC_TRANSFER_TRANSFER_READ, copy_region.srcSubresource, copy_region.srcOffset,
1047 copy_region.extent, tag);
John Zulauf5c5e88d2019-12-26 11:22:02 -07001048 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06001049 if (dst_image) {
locke-lunarg1df1f882020-03-02 16:42:08 -07001050 VkExtent3D dst_copy_extent =
1051 GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
John Zulauf540266b2020-04-06 18:54:53 -06001052 context->UpdateAccessState(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, copy_region.dstSubresource, copy_region.dstOffset,
1053 dst_copy_extent, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06001054 }
1055 }
1056}
1057
1058bool SyncValidator::PreCallValidateCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
1059 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
1060 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
1061 uint32_t bufferMemoryBarrierCount,
1062 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
1063 uint32_t imageMemoryBarrierCount,
1064 const VkImageMemoryBarrier *pImageMemoryBarriers) const {
1065 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -06001066 const auto *cb_access_context = GetAccessContext(commandBuffer);
1067 assert(cb_access_context);
1068 if (!cb_access_context) return skip;
John Zulauf0cb5be22020-01-23 12:18:22 -07001069
John Zulauf3d84f1b2020-03-09 13:33:25 -06001070 const auto *context = cb_access_context->GetCurrentAccessContext();
1071 assert(context);
1072 if (!context) return skip;
John Zulauf0cb5be22020-01-23 12:18:22 -07001073
John Zulauf3d84f1b2020-03-09 13:33:25 -06001074 const auto src_stage_mask = ExpandPipelineStages(cb_access_context->GetQueueFlags(), srcStageMask);
John Zulauf36bcf6a2020-02-03 15:12:52 -07001075 const auto src_exec_scope = WithEarlierPipelineStages(src_stage_mask);
1076 auto src_stage_accesses = AccessScopeByStage(src_stage_mask);
John Zulauf0cb5be22020-01-23 12:18:22 -07001077 // Validate Image Layout transitions
1078 for (uint32_t index = 0; index < imageMemoryBarrierCount; index++) {
1079 const auto &barrier = pImageMemoryBarriers[index];
1080 if (barrier.newLayout == barrier.oldLayout) continue; // Only interested in layout transitions at this point.
1081 const auto *image_state = Get<IMAGE_STATE>(barrier.image);
1082 if (!image_state) continue;
John Zulauf16adfc92020-04-08 10:28:33 -06001083 const auto hazard = context->DetectImageBarrierHazard(*image_state, src_exec_scope, src_stage_accesses, barrier);
John Zulauf0cb5be22020-01-23 12:18:22 -07001084 if (hazard.hazard) {
1085 // TODO -- add tag information to log msg when useful.
locke-lunarga0003652020-03-10 11:38:51 -06001086 skip |= LogError(barrier.image, string_SyncHazardVUID(hazard.hazard),
1087 "vkCmdPipelineBarrier: Hazard %s for image barrier %" PRIu32 " %s", string_SyncHazard(hazard.hazard),
1088 index, report_data->FormatHandle(barrier.image).c_str());
John Zulauf0cb5be22020-01-23 12:18:22 -07001089 }
1090 }
John Zulauf9cb530d2019-09-30 14:14:10 -06001091
1092 return skip;
1093}
1094
1095void SyncValidator::PreCallRecordCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
1096 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
1097 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
1098 uint32_t bufferMemoryBarrierCount,
1099 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
1100 uint32_t imageMemoryBarrierCount,
1101 const VkImageMemoryBarrier *pImageMemoryBarriers) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06001102 auto *cb_access_context = GetAccessContext(commandBuffer);
1103 assert(cb_access_context);
1104 if (!cb_access_context) return;
1105 auto access_context = cb_access_context->GetCurrentAccessContext();
1106 assert(access_context);
1107 if (!access_context) return;
John Zulauf9cb530d2019-09-30 14:14:10 -06001108
John Zulauf3d84f1b2020-03-09 13:33:25 -06001109 const auto src_stage_mask = ExpandPipelineStages(cb_access_context->GetQueueFlags(), srcStageMask);
John Zulauf36bcf6a2020-02-03 15:12:52 -07001110 auto src_stage_accesses = AccessScopeByStage(src_stage_mask);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001111 const auto dst_stage_mask = ExpandPipelineStages(cb_access_context->GetQueueFlags(), dstStageMask);
John Zulauf36bcf6a2020-02-03 15:12:52 -07001112 auto dst_stage_accesses = AccessScopeByStage(dst_stage_mask);
1113 const auto src_exec_scope = WithEarlierPipelineStages(src_stage_mask);
1114 const auto dst_exec_scope = WithLaterPipelineStages(dst_stage_mask);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001115 ApplyBufferBarriers(access_context, src_exec_scope, src_stage_accesses, dst_exec_scope, dst_stage_accesses,
1116 bufferMemoryBarrierCount, pBufferMemoryBarriers);
John Zulauf540266b2020-04-06 18:54:53 -06001117 ApplyImageBarriers(access_context, src_exec_scope, src_stage_accesses, dst_exec_scope, dst_stage_accesses,
John Zulauf3d84f1b2020-03-09 13:33:25 -06001118 imageMemoryBarrierCount, pImageMemoryBarriers);
John Zulauf9cb530d2019-09-30 14:14:10 -06001119
1120 // Apply these last in-case there operation is a superset of the other two and would clean them up...
John Zulauf3d84f1b2020-03-09 13:33:25 -06001121 ApplyGlobalBarriers(access_context, src_exec_scope, dst_exec_scope, src_stage_accesses, dst_stage_accesses, memoryBarrierCount,
John Zulauf0cb5be22020-01-23 12:18:22 -07001122 pMemoryBarriers);
John Zulauf9cb530d2019-09-30 14:14:10 -06001123}
1124
1125void SyncValidator::PostCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
1126 const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, VkResult result) {
1127 // The state tracker sets up the device state
1128 StateTracker::PostCallRecordCreateDevice(gpu, pCreateInfo, pAllocator, pDevice, result);
1129
John Zulauf5f13a792020-03-10 07:31:21 -06001130 // Add the callback hooks for the functions that are either broadly or deeply used and that the ValidationStateTracker
1131 // refactor would be messier without.
John Zulauf9cb530d2019-09-30 14:14:10 -06001132 // TODO: Find a good way to do this hooklessly.
1133 ValidationObject *device_object = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
1134 ValidationObject *validation_data = GetValidationObject(device_object->object_dispatch, LayerObjectTypeSyncValidation);
1135 SyncValidator *sync_device_state = static_cast<SyncValidator *>(validation_data);
1136
John Zulaufd1f85d42020-04-15 12:23:15 -06001137 sync_device_state->SetCommandBufferResetCallback([sync_device_state](VkCommandBuffer command_buffer) -> void {
1138 sync_device_state->ResetCommandBufferCallback(command_buffer);
1139 });
1140 sync_device_state->SetCommandBufferFreeCallback([sync_device_state](VkCommandBuffer command_buffer) -> void {
1141 sync_device_state->FreeCommandBufferCallback(command_buffer);
1142 });
John Zulauf9cb530d2019-09-30 14:14:10 -06001143}
John Zulauf3d84f1b2020-03-09 13:33:25 -06001144
1145void SyncValidator::PostCallRecordBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo,
1146 VkResult result) {
1147 // The state tracker sets up the command buffer state
1148 StateTracker::PostCallRecordBeginCommandBuffer(commandBuffer, pBeginInfo, result);
1149
1150 // Create/initialize the structure that trackers accesses at the command buffer scope.
1151 auto cb_access_context = GetAccessContext(commandBuffer);
1152 assert(cb_access_context);
1153 cb_access_context->Reset();
1154}
1155
1156void SyncValidator::RecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
1157 const VkSubpassBeginInfo *pSubpassBeginInfo) {
1158 const auto rp_state = Get<RENDER_PASS_STATE>(pRenderPassBegin->renderPass);
1159 auto cb_context = GetAccessContext(commandBuffer);
1160 if (rp_state && cb_context) {
1161 cb_context->BeginRenderPass(*rp_state);
1162 }
1163}
1164
1165void SyncValidator::PostCallRecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
1166 VkSubpassContents contents) {
1167 StateTracker::PostCallRecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
1168 auto subpass_begin_info = lvl_init_struct<VkSubpassBeginInfo>();
1169 subpass_begin_info.contents = contents;
1170 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, &subpass_begin_info);
1171}
1172
1173void SyncValidator::PostCallRecordCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
1174 const VkSubpassBeginInfo *pSubpassBeginInfo) {
1175 StateTracker::PostCallRecordCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
1176 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
1177}
1178
1179void SyncValidator::PostCallRecordCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
1180 const VkRenderPassBeginInfo *pRenderPassBegin,
1181 const VkSubpassBeginInfo *pSubpassBeginInfo) {
1182 StateTracker::PostCallRecordCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
1183 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
1184}
1185
1186void SyncValidator::RecordCmdNextSubpass(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
1187 const VkSubpassEndInfo *pSubpassEndInfo) {
1188 auto cb_context = GetAccessContext(commandBuffer);
1189 assert(cb_context);
1190 auto cb_state = cb_context->GetCommandBufferState();
1191 if (!cb_state) return;
1192
1193 auto rp_state = cb_state->activeRenderPass;
1194 if (!rp_state) return;
1195
1196 cb_context->NextRenderPass(*rp_state);
1197}
1198
1199void SyncValidator::PostCallRecordCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
1200 StateTracker::PostCallRecordCmdNextSubpass(commandBuffer, contents);
1201 auto subpass_begin_info = lvl_init_struct<VkSubpassBeginInfo>();
1202 subpass_begin_info.contents = contents;
1203 RecordCmdNextSubpass(commandBuffer, &subpass_begin_info, nullptr);
1204}
1205
1206void SyncValidator::PostCallRecordCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
1207 const VkSubpassEndInfo *pSubpassEndInfo) {
1208 StateTracker::PostCallRecordCmdNextSubpass2(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
1209 RecordCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
1210}
1211
1212void SyncValidator::PostCallRecordCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
1213 const VkSubpassEndInfo *pSubpassEndInfo) {
1214 StateTracker::PostCallRecordCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
1215 RecordCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
1216}
1217
John Zulaufe5da6e52020-03-18 15:32:18 -06001218void SyncValidator::RecordCmdEndRenderPass(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
1219 // Resolve the all subpass contexts to the command buffer contexts
1220 auto cb_context = GetAccessContext(commandBuffer);
1221 assert(cb_context);
1222 auto cb_state = cb_context->GetCommandBufferState();
1223 if (!cb_state) return;
1224
1225 const auto *rp_state = cb_state->activeRenderPass;
1226 if (!rp_state) return;
1227
1228 cb_context->EndRenderPass(*rp_state);
1229}
John Zulauf3d84f1b2020-03-09 13:33:25 -06001230
1231void SyncValidator::PostCallRecordCmdEndRenderPass(VkCommandBuffer commandBuffer) {
1232 StateTracker::PostCallRecordCmdEndRenderPass(commandBuffer);
1233 RecordCmdEndRenderPass(commandBuffer, nullptr);
1234}
1235
1236void SyncValidator::PostCallRecordCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
1237 StateTracker::PostCallRecordCmdEndRenderPass2(commandBuffer, pSubpassEndInfo);
1238 RecordCmdEndRenderPass(commandBuffer, pSubpassEndInfo);
1239}
1240
1241void SyncValidator::PostCallRecordCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
1242 StateTracker::PostCallRecordCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo);
1243 RecordCmdEndRenderPass(commandBuffer, pSubpassEndInfo);
1244}
locke-lunarga19c71d2020-03-02 18:17:04 -07001245
1246bool SyncValidator::PreCallValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
1247 VkImageLayout dstImageLayout, uint32_t regionCount,
1248 const VkBufferImageCopy *pRegions) const {
1249 bool skip = false;
1250 const auto *cb_access_context = GetAccessContext(commandBuffer);
1251 assert(cb_access_context);
1252 if (!cb_access_context) return skip;
1253
1254 const auto *context = cb_access_context->GetCurrentAccessContext();
1255 assert(context);
1256 if (!context) return skip;
1257
1258 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
locke-lunarga19c71d2020-03-02 18:17:04 -07001259 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
1260
1261 for (uint32_t region = 0; region < regionCount; region++) {
1262 const auto &copy_region = pRegions[region];
John Zulauf16adfc92020-04-08 10:28:33 -06001263 if (src_buffer) {
1264 ResourceAccessRange src_range = MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, dst_image->createInfo.format));
1265 auto hazard = context->DetectHazard(*src_buffer, SYNC_TRANSFER_TRANSFER_READ, src_range);
locke-lunarga19c71d2020-03-02 18:17:04 -07001266 if (hazard.hazard) {
1267 // TODO -- add tag information to log msg when useful.
locke-lunarga0003652020-03-10 11:38:51 -06001268 skip |= LogError(srcBuffer, string_SyncHazardVUID(hazard.hazard),
1269 "vkCmdCopyBufferToImage: Hazard %s for srcBuffer %s, region %" PRIu32,
locke-lunarga19c71d2020-03-02 18:17:04 -07001270 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcBuffer).c_str(), region);
1271 }
1272 }
1273 if (dst_image) {
John Zulauf540266b2020-04-06 18:54:53 -06001274 auto hazard = context->DetectHazard(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, copy_region.imageSubresource,
locke-lunarga19c71d2020-03-02 18:17:04 -07001275 copy_region.imageOffset, copy_region.imageExtent);
1276 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06001277 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
1278 "vkCmdCopyBufferToImage: Hazard %s for dstImage %s, region %" PRIu32,
locke-lunarga19c71d2020-03-02 18:17:04 -07001279 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region);
1280 }
1281 if (skip) break;
1282 }
1283 if (skip) break;
1284 }
1285 return skip;
1286}
1287
1288void SyncValidator::PreCallRecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
1289 VkImageLayout dstImageLayout, uint32_t regionCount,
1290 const VkBufferImageCopy *pRegions) {
1291 auto *cb_access_context = GetAccessContext(commandBuffer);
1292 assert(cb_access_context);
1293 auto *context = cb_access_context->GetCurrentAccessContext();
1294 assert(context);
1295
1296 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
John Zulauf16adfc92020-04-08 10:28:33 -06001297 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07001298
1299 for (uint32_t region = 0; region < regionCount; region++) {
1300 const auto &copy_region = pRegions[region];
1301 if (src_buffer) {
John Zulauf16adfc92020-04-08 10:28:33 -06001302 ResourceAccessRange src_range = MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, dst_image->createInfo.format));
1303 context->UpdateAccessState(*src_buffer, SYNC_TRANSFER_TRANSFER_READ, src_range, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07001304 }
1305 if (dst_image) {
John Zulauf540266b2020-04-06 18:54:53 -06001306 context->UpdateAccessState(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, copy_region.imageSubresource,
John Zulauf5f13a792020-03-10 07:31:21 -06001307 copy_region.imageOffset, copy_region.imageExtent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07001308 }
1309 }
1310}
1311
1312bool SyncValidator::PreCallValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
1313 VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount,
1314 const VkBufferImageCopy *pRegions) const {
1315 bool skip = false;
1316 const auto *cb_access_context = GetAccessContext(commandBuffer);
1317 assert(cb_access_context);
1318 if (!cb_access_context) return skip;
1319
1320 const auto *context = cb_access_context->GetCurrentAccessContext();
1321 assert(context);
1322 if (!context) return skip;
1323
1324 const auto *src_image = Get<IMAGE_STATE>(srcImage);
1325 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
1326 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
1327 for (uint32_t region = 0; region < regionCount; region++) {
1328 const auto &copy_region = pRegions[region];
1329 if (src_image) {
John Zulauf540266b2020-04-06 18:54:53 -06001330 auto hazard = context->DetectHazard(*src_image, SYNC_TRANSFER_TRANSFER_READ, copy_region.imageSubresource,
locke-lunarga19c71d2020-03-02 18:17:04 -07001331 copy_region.imageOffset, copy_region.imageExtent);
1332 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06001333 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
1334 "vkCmdCopyImageToBuffer: Hazard %s for srcImage %s, region %" PRIu32,
locke-lunarga19c71d2020-03-02 18:17:04 -07001335 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region);
1336 }
1337 }
1338 if (dst_mem) {
John Zulauf16adfc92020-04-08 10:28:33 -06001339 ResourceAccessRange dst_range = MakeRange( copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, src_image->createInfo.format));
1340 auto hazard = context->DetectHazard(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, dst_range);
locke-lunarga19c71d2020-03-02 18:17:04 -07001341 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06001342 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
1343 "vkCmdCopyImageToBuffer: Hazard %s for dstBuffer %s, region %" PRIu32,
locke-lunarga19c71d2020-03-02 18:17:04 -07001344 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstBuffer).c_str(), region);
1345 }
1346 }
1347 if (skip) break;
1348 }
1349 return skip;
1350}
1351
1352void SyncValidator::PreCallRecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
1353 VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
1354 auto *cb_access_context = GetAccessContext(commandBuffer);
1355 assert(cb_access_context);
1356 auto *context = cb_access_context->GetCurrentAccessContext();
1357 assert(context);
1358
1359 const auto *src_image = Get<IMAGE_STATE>(srcImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07001360 auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
1361 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
John Zulauf5f13a792020-03-10 07:31:21 -06001362 const VulkanTypedHandle dst_handle(dst_mem, kVulkanObjectTypeDeviceMemory);
locke-lunarga19c71d2020-03-02 18:17:04 -07001363
1364 for (uint32_t region = 0; region < regionCount; region++) {
1365 const auto &copy_region = pRegions[region];
1366 if (src_image) {
John Zulauf540266b2020-04-06 18:54:53 -06001367 context->UpdateAccessState(*src_image, SYNC_TRANSFER_TRANSFER_READ, copy_region.imageSubresource,
John Zulauf5f13a792020-03-10 07:31:21 -06001368 copy_region.imageOffset, copy_region.imageExtent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07001369 }
1370 if (dst_buffer) {
John Zulauf16adfc92020-04-08 10:28:33 -06001371 ResourceAccessRange dst_range = MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, src_image->createInfo.format));
1372 context->UpdateAccessState(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, dst_range, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07001373 }
1374 }
1375}
1376
1377bool SyncValidator::PreCallValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
1378 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
1379 const VkImageBlit *pRegions, VkFilter filter) const {
1380 bool skip = false;
1381 const auto *cb_access_context = GetAccessContext(commandBuffer);
1382 assert(cb_access_context);
1383 if (!cb_access_context) return skip;
1384
1385 const auto *context = cb_access_context->GetCurrentAccessContext();
1386 assert(context);
1387 if (!context) return skip;
1388
1389 const auto *src_image = Get<IMAGE_STATE>(srcImage);
1390 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
1391
1392 for (uint32_t region = 0; region < regionCount; region++) {
1393 const auto &blit_region = pRegions[region];
1394 if (src_image) {
1395 VkExtent3D extent = {static_cast<uint32_t>(blit_region.srcOffsets[1].x - blit_region.srcOffsets[0].x),
1396 static_cast<uint32_t>(blit_region.srcOffsets[1].y - blit_region.srcOffsets[0].y),
1397 static_cast<uint32_t>(blit_region.srcOffsets[1].z - blit_region.srcOffsets[0].z)};
John Zulauf540266b2020-04-06 18:54:53 -06001398 auto hazard = context->DetectHazard(*src_image, SYNC_TRANSFER_TRANSFER_READ, blit_region.srcSubresource,
locke-lunarga19c71d2020-03-02 18:17:04 -07001399 blit_region.srcOffsets[0], extent);
1400 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06001401 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
1402 "vkCmdBlitImage: Hazard %s for srcImage %s, region %" PRIu32, string_SyncHazard(hazard.hazard),
1403 report_data->FormatHandle(srcImage).c_str(), region);
locke-lunarga19c71d2020-03-02 18:17:04 -07001404 }
1405 }
1406
1407 if (dst_image) {
1408 VkExtent3D extent = {static_cast<uint32_t>(blit_region.dstOffsets[1].x - blit_region.dstOffsets[0].x),
1409 static_cast<uint32_t>(blit_region.dstOffsets[1].y - blit_region.dstOffsets[0].y),
1410 static_cast<uint32_t>(blit_region.dstOffsets[1].z - blit_region.dstOffsets[0].z)};
John Zulauf540266b2020-04-06 18:54:53 -06001411 auto hazard = context->DetectHazard(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, blit_region.dstSubresource,
locke-lunarga19c71d2020-03-02 18:17:04 -07001412 blit_region.dstOffsets[0], extent);
1413 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06001414 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
1415 "vkCmdBlitImage: Hazard %s for dstImage %s, region %" PRIu32, string_SyncHazard(hazard.hazard),
1416 report_data->FormatHandle(dstImage).c_str(), region);
locke-lunarga19c71d2020-03-02 18:17:04 -07001417 }
1418 if (skip) break;
1419 }
1420 }
1421
1422 return skip;
1423}
1424
1425void SyncValidator::PreCallRecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
1426 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
1427 const VkImageBlit *pRegions, VkFilter filter) {
1428 auto *cb_access_context = GetAccessContext(commandBuffer);
1429 assert(cb_access_context);
1430 auto *context = cb_access_context->GetCurrentAccessContext();
1431 assert(context);
1432
1433 auto *src_image = Get<IMAGE_STATE>(srcImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07001434 auto *dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07001435
1436 for (uint32_t region = 0; region < regionCount; region++) {
1437 const auto &blit_region = pRegions[region];
1438 if (src_image) {
1439 VkExtent3D extent = {static_cast<uint32_t>(blit_region.srcOffsets[1].x - blit_region.srcOffsets[0].x),
1440 static_cast<uint32_t>(blit_region.srcOffsets[1].y - blit_region.srcOffsets[0].y),
1441 static_cast<uint32_t>(blit_region.srcOffsets[1].z - blit_region.srcOffsets[0].z)};
John Zulauf540266b2020-04-06 18:54:53 -06001442 context->UpdateAccessState(*src_image, SYNC_TRANSFER_TRANSFER_READ, blit_region.srcSubresource,
John Zulauf5f13a792020-03-10 07:31:21 -06001443 blit_region.srcOffsets[0], extent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07001444 }
1445 if (dst_image) {
1446 VkExtent3D extent = {static_cast<uint32_t>(blit_region.dstOffsets[1].x - blit_region.dstOffsets[0].x),
1447 static_cast<uint32_t>(blit_region.dstOffsets[1].y - blit_region.dstOffsets[0].y),
1448 static_cast<uint32_t>(blit_region.dstOffsets[1].z - blit_region.dstOffsets[0].z)};
John Zulauf540266b2020-04-06 18:54:53 -06001449 context->UpdateAccessState(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, blit_region.dstSubresource,
John Zulauf5f13a792020-03-10 07:31:21 -06001450 blit_region.dstOffsets[0], extent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07001451 }
1452 }
1453}