blob: 00e0250f886ecf087ac0bae446da8c27fffb48fd [file] [log] [blame]
John Zulauf9cb530d2019-09-30 14:14:10 -06001/* Copyright (c) 2019 The Khronos Group Inc.
2 * Copyright (c) 2019 Valve Corporation
3 * Copyright (c) 2019 LunarG, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 * Author: John Zulauf <jzulauf@lunarg.com>
18 */
19
20#include <limits>
21#include <vector>
locke-lunarg296a3c92020-03-25 01:04:29 -060022#include <memory>
23#include <bitset>
John Zulauf9cb530d2019-09-30 14:14:10 -060024#include "synchronization_validation.h"
25
26static const char *string_SyncHazardVUID(SyncHazard hazard) {
27 switch (hazard) {
28 case SyncHazard::NONE:
John Zulauf2f952d22020-02-10 11:34:51 -070029 return "SYNC-HAZARD-NONE";
John Zulauf9cb530d2019-09-30 14:14:10 -060030 break;
31 case SyncHazard::READ_AFTER_WRITE:
32 return "SYNC-HAZARD-READ_AFTER_WRITE";
33 break;
34 case SyncHazard::WRITE_AFTER_READ:
35 return "SYNC-HAZARD-WRITE_AFTER_READ";
36 break;
37 case SyncHazard::WRITE_AFTER_WRITE:
38 return "SYNC-HAZARD-WRITE_AFTER_WRITE";
39 break;
John Zulauf2f952d22020-02-10 11:34:51 -070040 case SyncHazard::READ_RACING_WRITE:
41 return "SYNC-HAZARD-READ-RACING-WRITE";
42 break;
43 case SyncHazard::WRITE_RACING_WRITE:
44 return "SYNC-HAZARD-WRITE-RACING-WRITE";
45 break;
46 case SyncHazard::WRITE_RACING_READ:
47 return "SYNC-HAZARD-WRITE-RACING-READ";
48 break;
John Zulauf9cb530d2019-09-30 14:14:10 -060049 default:
50 assert(0);
51 }
52 return "SYNC-HAZARD-INVALID";
53}
54
55static const char *string_SyncHazard(SyncHazard hazard) {
56 switch (hazard) {
57 case SyncHazard::NONE:
58 return "NONR";
59 break;
60 case SyncHazard::READ_AFTER_WRITE:
61 return "READ_AFTER_WRITE";
62 break;
63 case SyncHazard::WRITE_AFTER_READ:
64 return "WRITE_AFTER_READ";
65 break;
66 case SyncHazard::WRITE_AFTER_WRITE:
67 return "WRITE_AFTER_WRITE";
68 break;
John Zulauf2f952d22020-02-10 11:34:51 -070069 case SyncHazard::READ_RACING_WRITE:
70 return "READ_RACING_WRITE";
71 break;
72 case SyncHazard::WRITE_RACING_WRITE:
73 return "WRITE_RACING_WRITE";
74 break;
75 case SyncHazard::WRITE_RACING_READ:
76 return "WRITE_RACING_READ";
77 break;
John Zulauf9cb530d2019-09-30 14:14:10 -060078 default:
79 assert(0);
80 }
81 return "INVALID HAZARD";
82}
83
John Zulauf0cb5be22020-01-23 12:18:22 -070084// Expand the pipeline stage without regard to whether the are valid w.r.t. queue or extension
85VkPipelineStageFlags ExpandPipelineStages(VkQueueFlags queue_flags, VkPipelineStageFlags stage_mask) {
86 VkPipelineStageFlags expanded = stage_mask;
87 if (VK_PIPELINE_STAGE_ALL_COMMANDS_BIT & stage_mask) {
88 expanded = expanded & ~VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
89 for (const auto &all_commands : syncAllCommandStagesByQueueFlags) {
90 if (all_commands.first & queue_flags) {
91 expanded |= all_commands.second;
92 }
93 }
94 }
95 if (VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT & stage_mask) {
96 expanded = expanded & ~VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
97 expanded |= syncAllCommandStagesByQueueFlags.at(VK_QUEUE_GRAPHICS_BIT) & ~VK_PIPELINE_STAGE_HOST_BIT;
98 }
99 return expanded;
100}
101
John Zulauf36bcf6a2020-02-03 15:12:52 -0700102VkPipelineStageFlags RelatedPipelineStages(VkPipelineStageFlags stage_mask,
103 std::map<VkPipelineStageFlagBits, VkPipelineStageFlags> &map) {
104 VkPipelineStageFlags unscanned = stage_mask;
105 VkPipelineStageFlags related = 0;
106 for (const auto entry : map) {
107 const auto stage = entry.first;
108 if (stage & unscanned) {
109 related = related | entry.second;
110 unscanned = unscanned & ~stage;
111 if (!unscanned) break;
112 }
113 }
114 return related;
115}
116
117VkPipelineStageFlags WithEarlierPipelineStages(VkPipelineStageFlags stage_mask) {
118 return stage_mask | RelatedPipelineStages(stage_mask, syncLogicallyEarlierStages);
119}
120
121VkPipelineStageFlags WithLaterPipelineStages(VkPipelineStageFlags stage_mask) {
122 return stage_mask | RelatedPipelineStages(stage_mask, syncLogicallyLaterStages);
123}
124
John Zulauf5c5e88d2019-12-26 11:22:02 -0700125static const ResourceAccessRange full_range(std::numeric_limits<VkDeviceSize>::min(), std::numeric_limits<VkDeviceSize>::max());
126static ResourceAccessRange MakeMemoryAccessRange(const BUFFER_STATE &buffer, VkDeviceSize offset, VkDeviceSize size) {
John Zulauf9cb530d2019-09-30 14:14:10 -0600127 assert(!buffer.sparse);
128 const auto base = offset + buffer.binding.offset;
John Zulauf5c5e88d2019-12-26 11:22:02 -0700129 return ResourceAccessRange(base, base + size);
130}
131
John Zulauf3d84f1b2020-03-09 13:33:25 -0600132AccessTrackerContext::AccessTrackerContext(uint32_t subpass, VkQueueFlags queue_flags,
133 const std::vector<SubpassDependencyGraphNode> &dependencies,
134 const std::vector<AccessTrackerContext> &contexts,
135 AccessTrackerContext *external_context) {
136 Reset();
137 const auto &subpass_dep = dependencies[subpass];
138 prev_.reserve(subpass_dep.prev.size());
139 for (const auto &prev_dep : subpass_dep.prev) {
140 assert(prev_dep.dependency);
141 const auto dep = *prev_dep.dependency;
142 prev_.emplace_back(const_cast<AccessTrackerContext *>(&contexts[dep.srcSubpass]), queue_flags, dep);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700143 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600144
145 async_.reserve(subpass_dep.async.size());
146 for (const auto async_subpass : subpass_dep.async) {
147 async_.emplace_back(const_cast<AccessTrackerContext *>(&contexts[async_subpass]));
148 }
John Zulaufe5da6e52020-03-18 15:32:18 -0600149 if (subpass_dep.barrier_from_external) {
150 src_external_ = TrackBack(external_context, queue_flags, *subpass_dep.barrier_from_external);
151 } else {
152 src_external_ = TrackBack();
153 }
154 if (subpass_dep.barrier_to_external) {
155 dst_external_ = TrackBack(this, queue_flags, *subpass_dep.barrier_to_external);
156 } else {
157 dst_external_ = TrackBack();
John Zulauf3d84f1b2020-03-09 13:33:25 -0600158 }
John Zulauf5c5e88d2019-12-26 11:22:02 -0700159}
160
John Zulauf5f13a792020-03-10 07:31:21 -0600161template <typename Detector>
162HazardResult AccessTrackerContext::DetectPreviousHazard(const VulkanTypedHandle &handle, const Detector &detector,
163 const ResourceAccessRange &range) const {
164 ResourceAccessRangeMap descent_map;
165 ResourceAccessState default_state; // When present, PreviousAccess will "infill"
166 ResolvePreviousAccess(handle, range, &descent_map, &default_state);
167
168 HazardResult hazard;
169 for (auto prev = descent_map.begin(); prev != descent_map.end() && !hazard.hazard; ++prev) {
170 hazard = detector.Detect(prev);
171 }
172 return hazard;
173}
174
John Zulauf3d84f1b2020-03-09 13:33:25 -0600175// A recursive range walker for hazard detection, first for the current context and the (DetectHazardRecur) to walk
176// the DAG of the contexts (for example subpasses)
177template <typename Detector>
178HazardResult AccessTrackerContext::DetectHazard(const VulkanTypedHandle &handle, const Detector &detector,
John Zulauf5f13a792020-03-10 07:31:21 -0600179 const ResourceAccessRange &range) const {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600180 HazardResult hazard;
John Zulauf5f13a792020-03-10 07:31:21 -0600181
182 // Async checks don't require recursive lookups, as the async lists are exhaustive for the top-level context
183 // so we'll check these first
184 for (const auto &async_context : async_) {
185 hazard = async_context->DetectAsyncHazard(handle, detector, range);
186 if (hazard.hazard) return hazard;
187 }
188
189 const auto access_tracker = GetAccessTracker(handle);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600190 if (access_tracker) {
191 const auto &accesses = access_tracker->GetCurrentAccessMap();
192 const auto from = accesses.lower_bound(range);
193 if (from != accesses.end() && from->first.intersects(range)) {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600194 const auto to = accesses.upper_bound(range);
195 ResourceAccessRange gap = {range.begin, range.begin};
196 for (auto pos = from; pos != to; ++pos) {
John Zulauf5f13a792020-03-10 07:31:21 -0600197 hazard = detector.Detect(pos);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600198 if (hazard.hazard) return hazard;
199
John Zulauf5f13a792020-03-10 07:31:21 -0600200 // make sure we don't go past range
John Zulauf3d84f1b2020-03-09 13:33:25 -0600201 auto upper_bound = std::min(range.end, pos->first.end);
John Zulauf5f13a792020-03-10 07:31:21 -0600202 gap.end = upper_bound;
203
204 // TODO: After profiling we may want to change the descent logic such that we don't recur per gap...
John Zulauf3d84f1b2020-03-09 13:33:25 -0600205 if (!gap.empty()) {
206 // Must recur on all gaps
John Zulauf5f13a792020-03-10 07:31:21 -0600207 hazard = DetectPreviousHazard(handle, detector, gap);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600208 if (hazard.hazard) return hazard;
209 }
210 gap.begin = upper_bound;
211 }
John Zulauf5f13a792020-03-10 07:31:21 -0600212 gap.end = range.end;
213 if (gap.non_empty()) {
214 hazard = DetectPreviousHazard(handle, detector, gap);
215 if (hazard.hazard) return hazard;
216 }
217 } else {
218 hazard = DetectPreviousHazard(handle, detector, range);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600219 }
John Zulauf5f13a792020-03-10 07:31:21 -0600220 } else {
221 hazard = DetectPreviousHazard(handle, detector, range);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600222 }
223
224 return hazard;
225}
226
227// A non recursive range walker for the asynchronous contexts (those we have no barriers with)
228template <typename Detector>
229HazardResult AccessTrackerContext::DetectAsyncHazard(const VulkanTypedHandle &handle, const Detector &detector,
230 const ResourceAccessRange &range) const {
231 const auto access_tracker = GetAccessTracker(handle);
232 HazardResult hazard;
233 if (access_tracker) {
234 auto accesses = access_tracker->GetCurrentAccessMap();
235 const auto from = accesses.lower_bound(range);
236 const auto to = accesses.upper_bound(range);
237 for (auto pos = from; pos != to; ++pos) {
238 hazard = detector.DetectAsync(pos);
239 if (hazard.hazard) break;
240 }
241 }
242 return hazard;
243}
244
John Zulauf5f13a792020-03-10 07:31:21 -0600245void AccessTrackerContext::ResolveTrackBack(const VulkanTypedHandle &handle, const ResourceAccessRange &range,
246 const AccessTrackerContext::TrackBack &track_back, ResourceAccessRangeMap *descent_map,
John Zulaufe5da6e52020-03-18 15:32:18 -0600247 const ResourceAccessState *infill_state, bool recur_to_infill) const {
John Zulauf5f13a792020-03-10 07:31:21 -0600248 const auto *access_tracker = GetAccessTracker(handle);
249 if (access_tracker) {
250 sparse_container::parallel_iterator<ResourceAccessRangeMap, const ResourceAccessRangeMap> current(
251 *descent_map, access_tracker->GetCurrentAccessMap(), range.begin);
252 while (current->range.non_empty()) {
253 if (current->pos_B->valid) {
John Zulauf62f10592020-04-03 12:20:02 -0600254 const auto &src_pos = current->pos_B->lower_bound;
255 auto access_with_barrier = src_pos->second;
John Zulauf5f13a792020-03-10 07:31:21 -0600256 access_with_barrier.ApplyBarrier(track_back.barrier);
257 if (current->pos_A->valid) {
John Zulauf62f10592020-04-03 12:20:02 -0600258 current.trim_A();
John Zulauf5f13a792020-03-10 07:31:21 -0600259 current->pos_A->lower_bound->second.Resolve(access_with_barrier);
260 } else {
261 descent_map->insert(current->pos_A->lower_bound, std::make_pair(current->range, access_with_barrier));
262 current.invalidate_A(); // Update the parallel iterator to point at the correct segment after split(s)
263 }
264 } else {
265 // we have to descend to fill this gap
John Zulaufe5da6e52020-03-18 15:32:18 -0600266 if (recur_to_infill) {
John Zulauf62f10592020-04-03 12:20:02 -0600267 track_back.context->ResolvePreviousAccess(handle, current->range, descent_map, infill_state);
John Zulaufe5da6e52020-03-18 15:32:18 -0600268 current.invalidate_A(); // Update the parallel iterator to point at the correct segment after recursion.
269 }
John Zulauf5f13a792020-03-10 07:31:21 -0600270 if (!current->pos_A->valid && infill_state) {
271 // If we didn't find anything in the previous range, we infill with default to prevent repeating
272 // a fruitless search
273 descent_map->insert(current->pos_A->lower_bound, std::make_pair(current->range, *infill_state));
274 current.invalidate_A(); // Update the parallel iterator to point at the correct segment after insert
275 }
276 }
277 ++current;
278 }
John Zulauf62f10592020-04-03 12:20:02 -0600279 } else if (recur_to_infill) {
280 track_back.context->ResolvePreviousAccess(handle, range, descent_map, infill_state);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600281 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600282}
283
John Zulauf5f13a792020-03-10 07:31:21 -0600284void AccessTrackerContext::ResolvePreviousAccess(const VulkanTypedHandle &handle, const ResourceAccessRange &range,
285 ResourceAccessRangeMap *descent_map,
286 const ResourceAccessState *infill_state) const {
John Zulaufe5da6e52020-03-18 15:32:18 -0600287 if ((prev_.size() == 0) && (src_external_.context == nullptr)) {
John Zulauf5f13a792020-03-10 07:31:21 -0600288 if (range.non_empty() && infill_state) {
289 descent_map->insert(std::make_pair(range, *infill_state));
290 }
291 } else {
292 // Look for something to fill the gap further along.
293 for (const auto &prev_dep : prev_) {
294 ResolveTrackBack(handle, range, prev_dep, descent_map, infill_state);
295 }
296
John Zulaufe5da6e52020-03-18 15:32:18 -0600297 if (src_external_.context) {
298 ResolveTrackBack(handle, range, src_external_, descent_map, infill_state);
John Zulauf5f13a792020-03-10 07:31:21 -0600299 }
300 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600301}
302
John Zulauf62f10592020-04-03 12:20:02 -0600303void AccessTrackerContext::ResolvePreviousAccess(const CMD_BUFFER_STATE &cmd_state, const IMAGE_STATE &image_state,
304 const VkImageSubresourceRange &subresource_range_arg,
305 ResourceAccessRangeMap *descent_map,
306 const ResourceAccessState *infill_state) const {
307 const VulkanTypedHandle image_handle(image_state.image, kVulkanObjectTypeImage);
308 auto subresource_range = NormalizeSubresourceRange(image_state.createInfo, subresource_range_arg);
309 subresource_adapter::ImageRangeEncoder encoder(cmd_state.device, image_state);
310 subresource_adapter::ImageRangeGenerator range_gen(encoder, subresource_range, {0, 0, 0}, image_state.createInfo.extent);
311 for (; range_gen->non_empty(); ++range_gen) {
312 ResolvePreviousAccess(image_handle, *range_gen, descent_map, infill_state);
313 }
314}
315
John Zulauf3d84f1b2020-03-09 13:33:25 -0600316class HazardDetector {
317 SyncStageAccessIndex usage_index_;
318
319 public:
John Zulauf5f13a792020-03-10 07:31:21 -0600320 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const { return pos->second.DetectHazard(usage_index_); }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600321 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos) const {
322 return pos->second.DetectAsyncHazard(usage_index_);
323 }
324 HazardDetector(SyncStageAccessIndex usage) : usage_index_(usage) {}
325};
326
327HazardResult AccessTrackerContext::DetectHazard(const VulkanTypedHandle &handle, SyncStageAccessIndex usage_index,
328 const ResourceAccessRange &range) const {
329 HazardDetector detector(usage_index);
330 return DetectHazard(handle, detector, range);
331}
332
333void CommandBufferAccessContext::BeginRenderPass(const RENDER_PASS_STATE &rp_state) {
334 // Create an access context for the first subpass and add it to the command buffers collection
335 render_pass_contexts_.emplace_back(queue_flags_, &rp_state.subpass_dependencies, &cb_tracker_context_);
336 current_renderpass_context_ = &render_pass_contexts_.back();
337 current_context_ = &current_renderpass_context_->CurrentContext();
John Zulaufe5da6e52020-03-18 15:32:18 -0600338
339 // TODO: Add layout load/store/transition/resolve access (here or in RenderPassContext)
John Zulauf3d84f1b2020-03-09 13:33:25 -0600340}
341
342void CommandBufferAccessContext::NextRenderPass(const RENDER_PASS_STATE &rp_state) {
343 assert(current_renderpass_context_);
344 current_renderpass_context_->NextSubpass(queue_flags_, &cb_tracker_context_);
John Zulaufe5da6e52020-03-18 15:32:18 -0600345 // TODO: Add layout load/store/transition/resolve access (here or in RenderPassContext)
John Zulauf3d84f1b2020-03-09 13:33:25 -0600346 current_context_ = &current_renderpass_context_->CurrentContext();
347}
348
John Zulaufe5da6e52020-03-18 15:32:18 -0600349void CommandBufferAccessContext::EndRenderPass(const RENDER_PASS_STATE &render_pass) {
350 std::unordered_set<VulkanTypedHandle> resolved;
351 assert(current_renderpass_context_);
352 if (!current_renderpass_context_) return;
353
354 const auto &contexts = current_renderpass_context_->subpass_contexts_;
355
356 // TODO: Add layout load/store/transition/resolve access (here or in RenderPassContext)
357
358 for (uint32_t subpass_index = 0; subpass_index < contexts.size(); subpass_index++) {
359 auto &context = contexts[subpass_index];
360 for (const auto &tracker_pair : context.GetAccessTrackerMap()) {
361 if (tracker_pair.second.GetCurrentAccessMap().size() == 0) continue;
362 auto insert_pair = resolved.insert(tracker_pair.first);
363 if (insert_pair.second) { // only create the resolve map for this handle if we haven't seen it before
364 // This is the first time we've seen this handle accessed, resolve this for all subsequent subpasses
365 ResourceAccessRangeMap resolve_map;
366 auto resolve_index = static_cast<uint32_t>(contexts.size());
367 while (resolve_index > subpass_index) {
368 resolve_index--;
369 const auto &from_context = contexts[resolve_index];
370 from_context.ResolveTrackBack(tracker_pair.first, full_range, from_context.GetDstExternalTrackBack(),
371 &resolve_map, nullptr, false);
372 }
373 // Given that all DAG paths lead back to the src_external_ (if only a default one) we can just overwrite.
374 sparse_container::splice(&cb_tracker_context_.GetAccessTracker(tracker_pair.first)->GetCurrentAccessMap(),
375 resolve_map, sparse_container::value_precedence::prefer_source);
376 // TODO: This might be a place to consolidate the map
377 }
378 }
379 }
380 current_context_ = &cb_tracker_context_;
381 current_renderpass_context_ = nullptr;
382}
383
locke-lunarg296a3c92020-03-25 01:04:29 -0600384HazardResult AccessTrackerContext::DetectHazard(const CMD_BUFFER_STATE &cmd, const IMAGE_STATE &image,
385 SyncStageAccessIndex current_usage, const VkImageSubresourceLayers &subresource,
386 const VkOffset3D &offset, const VkExtent3D &extent) const {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700387 // TODO: replace the encoder/generator with offset3D/extent3D aware versions
388 VkImageSubresourceRange subresource_range = {subresource.aspectMask, subresource.mipLevel, 1, subresource.baseArrayLayer,
389 subresource.layerCount};
locke-lunarg296a3c92020-03-25 01:04:29 -0600390 subresource_adapter::ImageRangeEncoder encoder(image.store_device_as_workaround, image);
391 subresource_adapter::ImageRangeGenerator range_gen(encoder, subresource_range, offset, extent);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600392 VulkanTypedHandle image_handle(image.image, kVulkanObjectTypeImage);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700393 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600394 HazardResult hazard = DetectHazard(image_handle, current_usage, *range_gen);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700395 if (hazard.hazard) return hazard;
396 }
397 return HazardResult();
John Zulauf9cb530d2019-09-30 14:14:10 -0600398}
399
John Zulauf3d84f1b2020-03-09 13:33:25 -0600400class BarrierHazardDetector {
401 public:
402 BarrierHazardDetector(SyncStageAccessIndex usage_index, VkPipelineStageFlags src_exec_scope,
403 SyncStageAccessFlags src_access_scope)
404 : usage_index_(usage_index), src_exec_scope_(src_exec_scope), src_access_scope_(src_access_scope) {}
405
John Zulauf5f13a792020-03-10 07:31:21 -0600406 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
407 return pos->second.DetectBarrierHazard(usage_index_, src_exec_scope_, src_access_scope_);
John Zulauf0cb5be22020-01-23 12:18:22 -0700408 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600409 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos) const {
410 // Async barrier hazard detection can use the same path as the usage index is not IsRead, but is IsWrite
411 return pos->second.DetectAsyncHazard(usage_index_);
412 }
413
414 private:
415 SyncStageAccessIndex usage_index_;
416 VkPipelineStageFlags src_exec_scope_;
417 SyncStageAccessFlags src_access_scope_;
418};
419
420HazardResult AccessTrackerContext::DetectBarrierHazard(const VulkanTypedHandle &handle, SyncStageAccessIndex current_usage,
421 VkPipelineStageFlags src_exec_scope, SyncStageAccessFlags src_access_scope,
422 const ResourceAccessRange &range) const {
423 BarrierHazardDetector detector(current_usage, src_exec_scope, src_access_scope);
424 return DetectHazard(handle, detector, range);
John Zulauf0cb5be22020-01-23 12:18:22 -0700425}
426
locke-lunarg296a3c92020-03-25 01:04:29 -0600427HazardResult DetectImageBarrierHazard(const CMD_BUFFER_STATE &cmd, const AccessTrackerContext &context, const IMAGE_STATE &image,
John Zulauf36bcf6a2020-02-03 15:12:52 -0700428 VkPipelineStageFlags src_exec_scope, SyncStageAccessFlags src_stage_accesses,
John Zulauf0cb5be22020-01-23 12:18:22 -0700429 const VkImageMemoryBarrier &barrier) {
John Zulauf0cb5be22020-01-23 12:18:22 -0700430 auto subresource_range = NormalizeSubresourceRange(image.createInfo, barrier.subresourceRange);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600431 const VulkanTypedHandle image_handle(image.image, kVulkanObjectTypeImage);
John Zulauf36bcf6a2020-02-03 15:12:52 -0700432 const auto src_access_scope = SyncStageAccess::AccessScope(src_stage_accesses, barrier.srcAccessMask);
locke-lunarg296a3c92020-03-25 01:04:29 -0600433 subresource_adapter::ImageRangeEncoder encoder(image.store_device_as_workaround, image);
434 subresource_adapter::ImageRangeGenerator range_gen(encoder, subresource_range, {0, 0, 0}, image.createInfo.extent);
435 for (; range_gen->non_empty(); ++range_gen) {
436 HazardResult hazard = context.DetectBarrierHazard(image_handle, SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION,
437 src_exec_scope, src_access_scope, *range_gen);
438 if (hazard.hazard) return hazard;
John Zulauf0cb5be22020-01-23 12:18:22 -0700439 }
440 return HazardResult();
441}
442
John Zulauf9cb530d2019-09-30 14:14:10 -0600443template <typename Flags, typename Map>
444SyncStageAccessFlags AccessScopeImpl(Flags flag_mask, const Map &map) {
445 SyncStageAccessFlags scope = 0;
446 for (const auto &bit_scope : map) {
447 if (flag_mask < bit_scope.first) break;
448
449 if (flag_mask & bit_scope.first) {
450 scope |= bit_scope.second;
451 }
452 }
453 return scope;
454}
455
456SyncStageAccessFlags SyncStageAccess::AccessScopeByStage(VkPipelineStageFlags stages) {
457 return AccessScopeImpl(stages, syncStageAccessMaskByStageBit);
458}
459
460SyncStageAccessFlags SyncStageAccess::AccessScopeByAccess(VkAccessFlags accesses) {
461 return AccessScopeImpl(accesses, syncStageAccessMaskByAccessBit);
462}
463
464// Getting from stage mask and access mask to stage/acess masks is something we need to be good at...
465SyncStageAccessFlags SyncStageAccess::AccessScope(VkPipelineStageFlags stages, VkAccessFlags accesses) {
John Zulauf5f13a792020-03-10 07:31:21 -0600466 // The access scope is the intersection of all stage/access types possible for the enabled stages and the enables
467 // accesses (after doing a couple factoring of common terms the union of stage/access intersections is the intersections
468 // of the union of all stage/access types for all the stages and the same unions for the access mask...
John Zulauf9cb530d2019-09-30 14:14:10 -0600469 return AccessScopeByStage(stages) & AccessScopeByAccess(accesses);
470}
471
472template <typename Action>
John Zulauf5c5e88d2019-12-26 11:22:02 -0700473void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const ResourceAccessRange &range, const Action &action) {
John Zulauf9cb530d2019-09-30 14:14:10 -0600474 // TODO -- region/mem-range accuracte update
475 auto pos = accesses->lower_bound(range);
476 if (pos == accesses->end() || !pos->first.intersects(range)) {
477 // The range is empty, fill it with a default value.
478 pos = action.Infill(accesses, pos, range);
479 } else if (range.begin < pos->first.begin) {
480 // Leading empty space, infill
John Zulauf5c5e88d2019-12-26 11:22:02 -0700481 pos = action.Infill(accesses, pos, ResourceAccessRange(range.begin, pos->first.begin));
John Zulauf9cb530d2019-09-30 14:14:10 -0600482 } else if (pos->first.begin < range.begin) {
483 // Trim the beginning if needed
484 pos = accesses->split(pos, range.begin, sparse_container::split_op_keep_both());
485 ++pos;
486 }
487
488 const auto the_end = accesses->end();
489 while ((pos != the_end) && pos->first.intersects(range)) {
490 if (pos->first.end > range.end) {
491 pos = accesses->split(pos, range.end, sparse_container::split_op_keep_both());
492 }
493
494 pos = action(accesses, pos);
495 if (pos == the_end) break;
496
497 auto next = pos;
498 ++next;
499 if ((pos->first.end < range.end) && (next != the_end) && !next->first.is_subsequent_to(pos->first)) {
500 // Need to infill if next is disjoint
501 VkDeviceSize limit = (next == the_end) ? range.end : std::min(range.end, next->first.begin);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700502 ResourceAccessRange new_range(pos->first.end, limit);
John Zulauf9cb530d2019-09-30 14:14:10 -0600503 next = action.Infill(accesses, next, new_range);
504 }
505 pos = next;
506 }
507}
508
509struct UpdateMemoryAccessStateFunctor {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700510 using Iterator = ResourceAccessRangeMap::iterator;
511 Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const {
John Zulauf5f13a792020-03-10 07:31:21 -0600512 // this is only called on gaps, and never returns a gap.
513 ResourceAccessState default_state;
514 context.ResolvePreviousAccess(handle, range, accesses, &default_state);
515 return accesses->lower_bound(range);
John Zulauf9cb530d2019-09-30 14:14:10 -0600516 }
John Zulauf5f13a792020-03-10 07:31:21 -0600517
John Zulauf5c5e88d2019-12-26 11:22:02 -0700518 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -0600519 auto &access_state = pos->second;
520 access_state.Update(usage, tag);
521 return pos;
522 }
523
John Zulauf5f13a792020-03-10 07:31:21 -0600524 UpdateMemoryAccessStateFunctor(const VulkanTypedHandle &handle_, const AccessTrackerContext &context_,
525 SyncStageAccessIndex usage_, const ResourceUsageTag &tag_)
526 : handle(handle_), context(context_), usage(usage_), tag(tag_) {}
527 const VulkanTypedHandle handle;
528 const AccessTrackerContext &context;
John Zulauf9cb530d2019-09-30 14:14:10 -0600529 SyncStageAccessIndex usage;
530 const ResourceUsageTag &tag;
531};
532
533struct ApplyMemoryAccessBarrierFunctor {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700534 using Iterator = ResourceAccessRangeMap::iterator;
535 inline Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const { return pos; }
John Zulauf9cb530d2019-09-30 14:14:10 -0600536
John Zulauf5c5e88d2019-12-26 11:22:02 -0700537 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -0600538 auto &access_state = pos->second;
John Zulauf36bcf6a2020-02-03 15:12:52 -0700539 access_state.ApplyMemoryAccessBarrier(src_exec_scope, src_access_scope, dst_exec_scope, dst_access_scope);
John Zulauf9cb530d2019-09-30 14:14:10 -0600540 return pos;
541 }
542
John Zulauf36bcf6a2020-02-03 15:12:52 -0700543 ApplyMemoryAccessBarrierFunctor(VkPipelineStageFlags src_exec_scope_, SyncStageAccessFlags src_access_scope_,
544 VkPipelineStageFlags dst_exec_scope_, SyncStageAccessFlags dst_access_scope_)
545 : src_exec_scope(src_exec_scope_),
546 src_access_scope(src_access_scope_),
547 dst_exec_scope(dst_exec_scope_),
548 dst_access_scope(dst_access_scope_) {}
John Zulauf9cb530d2019-09-30 14:14:10 -0600549
John Zulauf36bcf6a2020-02-03 15:12:52 -0700550 VkPipelineStageFlags src_exec_scope;
551 SyncStageAccessFlags src_access_scope;
552 VkPipelineStageFlags dst_exec_scope;
553 SyncStageAccessFlags dst_access_scope;
John Zulauf9cb530d2019-09-30 14:14:10 -0600554};
555
556struct ApplyGlobalBarrierFunctor {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700557 using Iterator = ResourceAccessRangeMap::iterator;
558 inline Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const { return pos; }
John Zulauf9cb530d2019-09-30 14:14:10 -0600559
John Zulauf5c5e88d2019-12-26 11:22:02 -0700560 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -0600561 auto &access_state = pos->second;
John Zulauf36bcf6a2020-02-03 15:12:52 -0700562 access_state.ApplyExecutionBarrier(src_exec_scope, dst_exec_scope);
John Zulauf9cb530d2019-09-30 14:14:10 -0600563
564 for (const auto &functor : barrier_functor) {
565 functor(accesses, pos);
566 }
567 return pos;
568 }
569
John Zulauf36bcf6a2020-02-03 15:12:52 -0700570 ApplyGlobalBarrierFunctor(VkPipelineStageFlags src_exec_scope, VkPipelineStageFlags dst_exec_scope,
571 SyncStageAccessFlags src_stage_accesses, SyncStageAccessFlags dst_stage_accesses,
John Zulauf9cb530d2019-09-30 14:14:10 -0600572 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers)
John Zulauf36bcf6a2020-02-03 15:12:52 -0700573 : src_exec_scope(src_exec_scope), dst_exec_scope(dst_exec_scope) {
John Zulauf9cb530d2019-09-30 14:14:10 -0600574 // Don't want to create this per tracked item, but don't want to loop through all tracked items per barrier...
575 barrier_functor.reserve(memoryBarrierCount);
576 for (uint32_t barrier_index = 0; barrier_index < memoryBarrierCount; barrier_index++) {
577 const auto &barrier = pMemoryBarriers[barrier_index];
John Zulauf36bcf6a2020-02-03 15:12:52 -0700578 barrier_functor.emplace_back(src_exec_scope, SyncStageAccess::AccessScope(src_stage_accesses, barrier.srcAccessMask),
579 dst_exec_scope, SyncStageAccess::AccessScope(dst_stage_accesses, barrier.dstAccessMask));
John Zulauf9cb530d2019-09-30 14:14:10 -0600580 }
581 }
582
John Zulauf36bcf6a2020-02-03 15:12:52 -0700583 const VkPipelineStageFlags src_exec_scope;
584 const VkPipelineStageFlags dst_exec_scope;
John Zulauf9cb530d2019-09-30 14:14:10 -0600585 std::vector<ApplyMemoryAccessBarrierFunctor> barrier_functor;
586};
587
John Zulauf3d84f1b2020-03-09 13:33:25 -0600588void AccessTrackerContext::UpdateAccessState(const VulkanTypedHandle &handle, SyncStageAccessIndex current_usage,
589 const ResourceAccessRange &range, const ResourceUsageTag &tag) {
John Zulauf5f13a792020-03-10 07:31:21 -0600590 UpdateMemoryAccessStateFunctor action(handle, *this, current_usage, tag);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600591 auto *tracker = GetAccessTracker(handle);
592 assert(tracker);
John Zulauf5f13a792020-03-10 07:31:21 -0600593 UpdateMemoryAccessState(&tracker->GetCurrentAccessMap(), range, action);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600594}
595
locke-lunarg296a3c92020-03-25 01:04:29 -0600596void AccessTrackerContext::UpdateAccessState(const CMD_BUFFER_STATE &cmd, const IMAGE_STATE &image,
597 SyncStageAccessIndex current_usage, const VkImageSubresourceLayers &subresource,
598 const VkOffset3D &offset, const VkExtent3D &extent, const ResourceUsageTag &tag) {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700599 // TODO: replace the encoder/generator with offset3D aware versions
600 VkImageSubresourceRange subresource_range = {subresource.aspectMask, subresource.mipLevel, 1, subresource.baseArrayLayer,
601 subresource.layerCount};
locke-lunarg296a3c92020-03-25 01:04:29 -0600602 subresource_adapter::ImageRangeEncoder encoder(image.store_device_as_workaround, image);
603 subresource_adapter::ImageRangeGenerator range_gen(encoder, subresource_range, offset, extent);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600604 const VulkanTypedHandle handle(image.image, kVulkanObjectTypeImage);
605 auto *tracker = GetAccessTracker(handle);
606 assert(tracker);
John Zulauf5f13a792020-03-10 07:31:21 -0600607
608 UpdateMemoryAccessStateFunctor action(handle, *this, current_usage, tag);
609 for (; range_gen->non_empty(); ++range_gen) {
610 UpdateMemoryAccessState(&tracker->GetCurrentAccessMap(), *range_gen, action);
611 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600612}
613
614SyncBarrier::SyncBarrier(VkQueueFlags queue_flags, const VkSubpassDependency2 &barrier) {
615 const auto src_stage_mask = ExpandPipelineStages(queue_flags, barrier.srcStageMask);
616 src_exec_scope = WithEarlierPipelineStages(src_stage_mask);
617 src_access_scope = SyncStageAccess::AccessScope(src_stage_mask, barrier.srcAccessMask);
618 const auto dst_stage_mask = ExpandPipelineStages(queue_flags, barrier.dstStageMask);
619 dst_exec_scope = WithLaterPipelineStages(dst_stage_mask);
620 dst_access_scope = SyncStageAccess::AccessScope(dst_stage_mask, barrier.dstAccessMask);
621}
622
623void ResourceAccessState::ApplyBarrier(const SyncBarrier &barrier) {
624 ApplyExecutionBarrier(barrier.src_exec_scope, barrier.dst_exec_scope);
625 ApplyMemoryAccessBarrier(barrier.src_exec_scope, barrier.src_access_scope, barrier.dst_exec_scope, barrier.dst_access_scope);
626}
627
628ResourceAccessState ResourceAccessState::ApplyBarrierStack(const ResourceAccessState &that, const SyncBarrierStack &barrier_stack) {
629 ResourceAccessState copy = that;
630 for (auto barrier = barrier_stack.begin(); barrier != barrier_stack.end(); ++barrier) {
631 assert(*barrier);
632 copy.ApplyBarrier(*(*barrier));
633 }
634 return copy;
635}
636
637HazardResult ResourceAccessState::DetectHazard(SyncStageAccessIndex usage_index, SyncBarrierStack *barrier_stack) const {
638 if (barrier_stack) {
639 return ApplyBarrierStack(*this, *barrier_stack).DetectHazard(usage_index);
640 }
641 return DetectHazard(usage_index);
642}
643
John Zulauf9cb530d2019-09-30 14:14:10 -0600644HazardResult ResourceAccessState::DetectHazard(SyncStageAccessIndex usage_index) const {
645 HazardResult hazard;
646 auto usage = FlagBit(usage_index);
647 if (IsRead(usage)) {
648 if (IsWriteHazard(usage)) {
649 hazard.Set(READ_AFTER_WRITE, write_tag);
650 }
651 } else {
652 // Assume write
653 // TODO determine what to do with READ-WRITE usage states if any
654 // Write-After-Write check -- if we have a previous write to test against
655 if (last_write && IsWriteHazard(usage)) {
656 hazard.Set(WRITE_AFTER_WRITE, write_tag);
657 } else {
658 // Only look for casus belli for WAR
659 const auto usage_stage = PipelineStageBit(usage_index);
660 for (uint32_t read_index = 0; read_index < last_read_count; read_index++) {
661 if (IsReadHazard(usage_stage, last_reads[read_index])) {
662 hazard.Set(WRITE_AFTER_READ, last_reads[read_index].tag);
663 break;
664 }
665 }
666 }
667 }
668 return hazard;
669}
670
John Zulauf2f952d22020-02-10 11:34:51 -0700671// Asynchronous Hazards occur between subpasses with no connection through the DAG
John Zulauf3d84f1b2020-03-09 13:33:25 -0600672HazardResult ResourceAccessState::DetectAsyncHazard(SyncStageAccessIndex usage_index) const {
John Zulauf2f952d22020-02-10 11:34:51 -0700673 HazardResult hazard;
674 auto usage = FlagBit(usage_index);
675 if (IsRead(usage)) {
676 if (last_write != 0) {
677 hazard.Set(READ_RACING_WRITE, write_tag);
678 }
679 } else {
680 if (last_write != 0) {
681 hazard.Set(WRITE_RACING_WRITE, write_tag);
682 } else if (last_read_count > 0) {
683 hazard.Set(WRITE_RACING_READ, last_reads[0].tag);
684 }
685 }
686 return hazard;
687}
688
John Zulauf36bcf6a2020-02-03 15:12:52 -0700689HazardResult ResourceAccessState::DetectBarrierHazard(SyncStageAccessIndex usage_index, VkPipelineStageFlags src_exec_scope,
John Zulauf3d84f1b2020-03-09 13:33:25 -0600690 SyncStageAccessFlags src_access_scope,
691 SyncBarrierStack *barrier_stack) const {
692 if (barrier_stack) {
693 return ApplyBarrierStack(*this, *barrier_stack).DetectBarrierHazard(usage_index, src_exec_scope, src_access_scope);
694 }
695 return DetectBarrierHazard(usage_index, src_exec_scope, src_access_scope);
696}
697
698HazardResult ResourceAccessState::DetectBarrierHazard(SyncStageAccessIndex usage_index, VkPipelineStageFlags src_exec_scope,
John Zulauf36bcf6a2020-02-03 15:12:52 -0700699 SyncStageAccessFlags src_access_scope) const {
John Zulauf0cb5be22020-01-23 12:18:22 -0700700 // Only supporting image layout transitions for now
701 assert(usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION);
702 HazardResult hazard;
703 if (last_write) {
704 // If the previous write is *not* in the 1st access scope
705 // *AND* the current barrier is not in the dependency chain
706 // *AND* the there is no prior memory barrier for the previous write in the dependency chain
707 // then the barrier access is unsafe (R/W after W)
John Zulauf36bcf6a2020-02-03 15:12:52 -0700708 if (((last_write & src_access_scope) == 0) && ((src_exec_scope & write_dependency_chain) == 0) && (write_barriers == 0)) {
John Zulauf0cb5be22020-01-23 12:18:22 -0700709 // TODO: Do we need a difference hazard name for this?
710 hazard.Set(WRITE_AFTER_WRITE, write_tag);
711 }
712 } else {
713 // Look at the reads
714 for (uint32_t read_index = 0; read_index < last_read_count; read_index++) {
John Zulauf36bcf6a2020-02-03 15:12:52 -0700715 const auto &read_access = last_reads[read_index];
716 // If the read stage is not in the src sync sync
717 // *AND* not execution chained with an existing sync barrier (that's the or)
718 // then the barrier access is unsafe (R/W after R)
719 if ((src_exec_scope & (read_access.stage | read_access.barriers)) == 0) {
720 hazard.Set(WRITE_AFTER_READ, read_access.tag);
John Zulauf0cb5be22020-01-23 12:18:22 -0700721 break;
722 }
723 }
724 }
725 return hazard;
726}
727
John Zulauf5f13a792020-03-10 07:31:21 -0600728// The logic behind resolves is the same as update, we assume that earlier hazards have be reported, and that no
729// tranistive hazard can exists with a hazard between the earlier operations. Yes, an early hazard can mask that another
730// exists, but if you fix *that* hazard it either fixes or unmasks the subsequent ones.
731void ResourceAccessState::Resolve(const ResourceAccessState &other) {
732 if (write_tag.IsBefore(other.write_tag)) {
733 // If this is a later write, we've reported any exsiting hazard, and we can just overwrite as the more recent operation
734 *this = other;
735 } else if (!other.write_tag.IsBefore(write_tag)) {
736 // This is the *equals* case for write operations, we merged the write barriers and the read state (but without the
737 // dependency chaining logic or any stage expansion)
738 write_barriers |= other.write_barriers;
739
740 // Merge that read states
741 for (uint32_t other_read_index = 0; other_read_index < other.last_read_count; other_read_index++) {
742 auto &other_read = other.last_reads[other_read_index];
743 if (last_read_stages & other_read.stage) {
744 // Merge in the barriers for read stages that exist in *both* this and other
745 // TODO: This is N^2 with stages... perhaps the ReadStates should be by stage index.
746 for (uint32_t my_read_index = 0; my_read_index < last_read_count; my_read_index++) {
747 auto &my_read = last_reads[my_read_index];
748 if (other_read.stage == my_read.stage) {
749 if (my_read.tag.IsBefore(other_read.tag)) {
750 my_read.tag = other_read.tag;
751 }
752 my_read.barriers |= other_read.barriers;
753 break;
754 }
755 }
756 } else {
757 // The other read stage doesn't exist in this, so add it.
758 last_reads[last_read_count] = other_read;
759 last_read_count++;
760 last_read_stages |= other_read.stage;
761 }
762 }
763 } // the else clause would be that other write is before this write... in which case we supercede the other state and ignore
764 // it.
765}
766
John Zulauf9cb530d2019-09-30 14:14:10 -0600767void ResourceAccessState::Update(SyncStageAccessIndex usage_index, const ResourceUsageTag &tag) {
768 // Move this logic in the ResourceStateTracker as methods, thereof (or we'll repeat it for every flavor of resource...
769 const auto usage_bit = FlagBit(usage_index);
770 if (IsRead(usage_index)) {
771 // Mulitple outstanding reads may be of interest and do dependency chains independently
772 // However, for purposes of barrier tracking, only one read per pipeline stage matters
773 const auto usage_stage = PipelineStageBit(usage_index);
774 if (usage_stage & last_read_stages) {
775 for (uint32_t read_index = 0; read_index < last_read_count; read_index++) {
776 ReadState &access = last_reads[read_index];
777 if (access.stage == usage_stage) {
778 access.barriers = 0;
779 access.tag = tag;
780 break;
781 }
782 }
783 } else {
784 // We don't have this stage in the list yet...
785 assert(last_read_count < last_reads.size());
786 ReadState &access = last_reads[last_read_count++];
787 access.stage = usage_stage;
788 access.barriers = 0;
789 access.tag = tag;
790 last_read_stages |= usage_stage;
791 }
792 } else {
793 // Assume write
794 // TODO determine what to do with READ-WRITE operations if any
795 // Clobber last read and both sets of barriers... because all we have is DANGER, DANGER, WILL ROBINSON!!!
796 // if the last_reads/last_write were unsafe, we've reported them,
797 // in either case the prior access is irrelevant, we can overwrite them as *this* write is now after them
798 last_read_count = 0;
799 last_read_stages = 0;
800
801 write_barriers = 0;
802 write_dependency_chain = 0;
803 write_tag = tag;
804 last_write = usage_bit;
805 }
806}
John Zulauf5f13a792020-03-10 07:31:21 -0600807
John Zulauf9cb530d2019-09-30 14:14:10 -0600808void ResourceAccessState::ApplyExecutionBarrier(VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask) {
809 // Execution Barriers only protect read operations
810 for (uint32_t read_index = 0; read_index < last_read_count; read_index++) {
811 ReadState &access = last_reads[read_index];
812 // The | implements the "dependency chain" logic for this access, as the barriers field stores the second sync scope
813 if (srcStageMask & (access.stage | access.barriers)) {
814 access.barriers |= dstStageMask;
815 }
816 }
817 if (write_dependency_chain & srcStageMask) write_dependency_chain |= dstStageMask;
818}
819
John Zulauf36bcf6a2020-02-03 15:12:52 -0700820void ResourceAccessState::ApplyMemoryAccessBarrier(VkPipelineStageFlags src_exec_scope, SyncStageAccessFlags src_access_scope,
821 VkPipelineStageFlags dst_exec_scope, SyncStageAccessFlags dst_access_scope) {
John Zulauf9cb530d2019-09-30 14:14:10 -0600822 // Assuming we've applied the execution side of this barrier, we update just the write
823 // The || implements the "dependency chain" logic for this barrier
John Zulauf36bcf6a2020-02-03 15:12:52 -0700824 if ((src_access_scope & last_write) || (write_dependency_chain & src_exec_scope)) {
825 write_barriers |= dst_access_scope;
826 write_dependency_chain |= dst_exec_scope;
John Zulauf9cb530d2019-09-30 14:14:10 -0600827 }
828}
829
830void SyncValidator::ResetCommandBuffer(VkCommandBuffer command_buffer) {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600831 auto *access_context = GetAccessContextNoInsert(command_buffer);
832 if (access_context) {
833 access_context->Reset();
John Zulauf9cb530d2019-09-30 14:14:10 -0600834 }
835}
836
John Zulauf3d84f1b2020-03-09 13:33:25 -0600837void SyncValidator::ApplyGlobalBarriers(AccessTrackerContext *context, VkPipelineStageFlags srcStageMask,
John Zulauf36bcf6a2020-02-03 15:12:52 -0700838 VkPipelineStageFlags dstStageMask, SyncStageAccessFlags src_access_scope,
839 SyncStageAccessFlags dst_access_scope, uint32_t memoryBarrierCount,
John Zulauf9cb530d2019-09-30 14:14:10 -0600840 const VkMemoryBarrier *pMemoryBarriers) {
841 // TODO: Implement this better (maybe some delayed/on-demand integration).
John Zulauf36bcf6a2020-02-03 15:12:52 -0700842 ApplyGlobalBarrierFunctor barriers_functor(srcStageMask, dstStageMask, src_access_scope, dst_access_scope, memoryBarrierCount,
John Zulauf9cb530d2019-09-30 14:14:10 -0600843 pMemoryBarriers);
John Zulauf5f13a792020-03-10 07:31:21 -0600844 // Note: Barriers do *not* cross context boundaries, applying to accessess within.... (at least for renderpass subpasses
John Zulauf3d84f1b2020-03-09 13:33:25 -0600845 for (auto &handle_tracker_pair : context->GetAccessTrackerMap()) {
846 UpdateMemoryAccessState(&handle_tracker_pair.second.GetCurrentAccessMap(), full_range, barriers_functor);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700847 }
John Zulauf9cb530d2019-09-30 14:14:10 -0600848}
849
John Zulauf3d84f1b2020-03-09 13:33:25 -0600850void SyncValidator::ApplyBufferBarriers(AccessTrackerContext *context, VkPipelineStageFlags src_exec_scope,
John Zulauf36bcf6a2020-02-03 15:12:52 -0700851 SyncStageAccessFlags src_stage_accesses, VkPipelineStageFlags dst_exec_scope,
852 SyncStageAccessFlags dst_stage_accesses, uint32_t barrier_count,
John Zulauf9cb530d2019-09-30 14:14:10 -0600853 const VkBufferMemoryBarrier *barriers) {
854 // TODO Implement this at subresource/memory_range accuracy
855 for (uint32_t index = 0; index < barrier_count; index++) {
856 const auto &barrier = barriers[index];
857 const auto *buffer = Get<BUFFER_STATE>(barrier.buffer);
858 if (!buffer) continue;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600859 auto *tracker = context->GetAccessTracker(VulkanTypedHandle(buffer->binding.mem_state->mem, kVulkanObjectTypeDeviceMemory));
860 if (!tracker) continue;
John Zulauf5c5e88d2019-12-26 11:22:02 -0700861 ResourceAccessRange range = MakeMemoryAccessRange(*buffer, barrier.offset, barrier.size);
John Zulauf9cb530d2019-09-30 14:14:10 -0600862 UpdateMemoryAccessState(
John Zulauf3d84f1b2020-03-09 13:33:25 -0600863 &tracker->GetCurrentAccessMap(), range,
John Zulauf36bcf6a2020-02-03 15:12:52 -0700864 ApplyMemoryAccessBarrierFunctor(src_exec_scope, AccessScope(src_stage_accesses, barrier.srcAccessMask), dst_exec_scope,
865 AccessScope(dst_stage_accesses, barrier.dstAccessMask)));
John Zulauf9cb530d2019-09-30 14:14:10 -0600866 }
867}
868
locke-lunarg296a3c92020-03-25 01:04:29 -0600869void SyncValidator::ApplyImageBarriers(const CMD_BUFFER_STATE &cmd, AccessTrackerContext *context,
870 VkPipelineStageFlags src_exec_scope, SyncStageAccessFlags src_stage_accesses,
871 VkPipelineStageFlags dst_exec_scope, SyncStageAccessFlags dst_stage_accesses,
872 uint32_t barrier_count, const VkImageMemoryBarrier *barriers) {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700873 for (uint32_t index = 0; index < barrier_count; index++) {
874 const auto &barrier = barriers[index];
875 const auto *image = Get<IMAGE_STATE>(barrier.image);
876 if (!image) continue;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600877 auto tracker = context->GetAccessTrackerNoInsert(VulkanTypedHandle(barrier.image, kVulkanObjectTypeImage));
878 if (!tracker) continue;
879 auto *accesses = &tracker->GetCurrentAccessMap();
880
John Zulauf5c5e88d2019-12-26 11:22:02 -0700881 auto subresource_range = NormalizeSubresourceRange(image->createInfo, barrier.subresourceRange);
locke-lunarg296a3c92020-03-25 01:04:29 -0600882 subresource_adapter::ImageRangeEncoder encoder(image->store_device_as_workaround, *image);
883 subresource_adapter::ImageRangeGenerator range_gen(encoder, subresource_range, {0, 0, 0}, image->createInfo.extent);
locke-lunarg1dbbb9e2020-02-28 22:43:53 -0700884
locke-lunarg296a3c92020-03-25 01:04:29 -0600885 const ApplyMemoryAccessBarrierFunctor barrier_action(src_exec_scope, AccessScope(src_stage_accesses, barrier.srcAccessMask),
886 dst_exec_scope,
887 AccessScope(dst_stage_accesses, barrier.dstAccessMask));
888 for (; range_gen->non_empty(); ++range_gen) {
889 UpdateMemoryAccessState(accesses, *range_gen, barrier_action);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700890 }
John Zulauf9cb530d2019-09-30 14:14:10 -0600891 }
John Zulauf9cb530d2019-09-30 14:14:10 -0600892}
893
894bool SyncValidator::PreCallValidateCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
895 uint32_t regionCount, const VkBufferCopy *pRegions) const {
896 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600897 const auto *cb_context = GetAccessContext(commandBuffer);
898 assert(cb_context);
899 if (!cb_context) return skip;
900 const auto *context = cb_context->GetCurrentAccessContext();
John Zulauf9cb530d2019-09-30 14:14:10 -0600901
John Zulauf3d84f1b2020-03-09 13:33:25 -0600902 // If we have no previous accesses, we have no hazards
903 // TODO: make this sub-resource capable
904 // TODO: make this general, and stuff it into templates/utility functions
905 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
906 const auto src_mem = (src_buffer && !src_buffer->sparse) ? src_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
907 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
908 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
909
910 for (uint32_t region = 0; region < regionCount; region++) {
911 const auto &copy_region = pRegions[region];
912 if (src_mem != VK_NULL_HANDLE) {
913 ResourceAccessRange src_range = MakeMemoryAccessRange(*src_buffer, copy_region.srcOffset, copy_region.size);
914 auto hazard = context->DetectHazard(VulkanTypedHandle(src_mem, kVulkanObjectTypeDeviceMemory),
915 SYNC_TRANSFER_TRANSFER_READ, src_range);
916 if (hazard.hazard) {
917 // TODO -- add tag information to log msg when useful.
locke-lunarga0003652020-03-10 11:38:51 -0600918 skip |= LogError(srcBuffer, string_SyncHazardVUID(hazard.hazard),
919 "vkCmdCopyBuffer: Hazard %s for srcBuffer %s, region %" PRIu32, string_SyncHazard(hazard.hazard),
920 report_data->FormatHandle(srcBuffer).c_str(), region);
John Zulauf9cb530d2019-09-30 14:14:10 -0600921 }
John Zulauf9cb530d2019-09-30 14:14:10 -0600922 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600923 if ((dst_mem != VK_NULL_HANDLE) && !skip) {
924 ResourceAccessRange dst_range = MakeMemoryAccessRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
925 auto hazard = context->DetectHazard(VulkanTypedHandle(dst_mem, kVulkanObjectTypeDeviceMemory),
926 SYNC_TRANSFER_TRANSFER_WRITE, dst_range);
927 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -0600928 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
929 "vkCmdCopyBuffer: Hazard %s for dstBuffer %s, region %" PRIu32, string_SyncHazard(hazard.hazard),
930 report_data->FormatHandle(dstBuffer).c_str(), region);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600931 }
932 }
933 if (skip) break;
John Zulauf9cb530d2019-09-30 14:14:10 -0600934 }
935 return skip;
936}
937
938void SyncValidator::PreCallRecordCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
939 uint32_t regionCount, const VkBufferCopy *pRegions) {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600940 auto *cb_context = GetAccessContext(commandBuffer);
941 assert(cb_context);
942 auto *context = cb_context->GetCurrentAccessContext();
943
John Zulauf9cb530d2019-09-30 14:14:10 -0600944 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600945 const auto src_mem = (src_buffer && !src_buffer->sparse) ? src_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
946 const VulkanTypedHandle src_handle(src_mem, kVulkanObjectTypeDeviceMemory);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600947
John Zulauf9cb530d2019-09-30 14:14:10 -0600948 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600949 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
950 const VulkanTypedHandle dst_handle(dst_mem, kVulkanObjectTypeDeviceMemory);
John Zulauf9cb530d2019-09-30 14:14:10 -0600951
952 for (uint32_t region = 0; region < regionCount; region++) {
953 const auto &copy_region = pRegions[region];
John Zulauf3d84f1b2020-03-09 13:33:25 -0600954 if (src_mem) {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700955 ResourceAccessRange src_range = MakeMemoryAccessRange(*src_buffer, copy_region.srcOffset, copy_region.size);
John Zulauf5f13a792020-03-10 07:31:21 -0600956 context->UpdateAccessState(src_handle, SYNC_TRANSFER_TRANSFER_READ, src_range, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -0600957 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600958 if (dst_mem) {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700959 ResourceAccessRange dst_range = MakeMemoryAccessRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
John Zulauf5f13a792020-03-10 07:31:21 -0600960 context->UpdateAccessState(dst_handle, SYNC_TRANSFER_TRANSFER_WRITE, dst_range, tag);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700961 }
962 }
963}
964
965bool SyncValidator::PreCallValidateCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
966 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
967 const VkImageCopy *pRegions) const {
968 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600969 const auto *cb_access_context = GetAccessContext(commandBuffer);
970 assert(cb_access_context);
971 if (!cb_access_context) return skip;
John Zulauf5c5e88d2019-12-26 11:22:02 -0700972
John Zulauf3d84f1b2020-03-09 13:33:25 -0600973 const auto *context = cb_access_context->GetCurrentAccessContext();
974 assert(context);
975 if (!context) return skip;
976
977 const auto *src_image = Get<IMAGE_STATE>(srcImage);
978 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarg296a3c92020-03-25 01:04:29 -0600979 const auto *cmd = Get<CMD_BUFFER_STATE>(commandBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600980 for (uint32_t region = 0; region < regionCount; region++) {
981 const auto &copy_region = pRegions[region];
982 if (src_image) {
locke-lunarg296a3c92020-03-25 01:04:29 -0600983 auto hazard = context->DetectHazard(*cmd, *src_image, SYNC_TRANSFER_TRANSFER_READ, copy_region.srcSubresource,
John Zulauf3d84f1b2020-03-09 13:33:25 -0600984 copy_region.srcOffset, copy_region.extent);
985 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -0600986 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
987 "vkCmdCopyImage: Hazard %s for srcImage %s, region %" PRIu32, string_SyncHazard(hazard.hazard),
988 report_data->FormatHandle(srcImage).c_str(), region);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700989 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600990 }
991
992 if (dst_image) {
locke-lunarg1df1f882020-03-02 16:42:08 -0700993 VkExtent3D dst_copy_extent =
994 GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
locke-lunarg296a3c92020-03-25 01:04:29 -0600995 auto hazard = context->DetectHazard(*cmd, *dst_image, SYNC_TRANSFER_TRANSFER_WRITE, copy_region.dstSubresource,
locke-lunarg1df1f882020-03-02 16:42:08 -0700996 copy_region.dstOffset, dst_copy_extent);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600997 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -0600998 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
999 "vkCmdCopyImage: Hazard %s for dstImage %s, region %" PRIu32, string_SyncHazard(hazard.hazard),
1000 report_data->FormatHandle(dstImage).c_str(), region);
John Zulauf5c5e88d2019-12-26 11:22:02 -07001001 }
locke-lunarg1dbbb9e2020-02-28 22:43:53 -07001002 if (skip) break;
John Zulauf5c5e88d2019-12-26 11:22:02 -07001003 }
1004 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06001005
John Zulauf5c5e88d2019-12-26 11:22:02 -07001006 return skip;
1007}
1008
1009void SyncValidator::PreCallRecordCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
1010 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
1011 const VkImageCopy *pRegions) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06001012 auto *cb_access_context = GetAccessContext(commandBuffer);
1013 assert(cb_access_context);
1014 auto *context = cb_access_context->GetCurrentAccessContext();
1015 assert(context);
1016
John Zulauf5c5e88d2019-12-26 11:22:02 -07001017 auto *src_image = Get<IMAGE_STATE>(srcImage);
John Zulauf5c5e88d2019-12-26 11:22:02 -07001018 auto *dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarg296a3c92020-03-25 01:04:29 -06001019 const auto *cmd = Get<CMD_BUFFER_STATE>(commandBuffer);
John Zulauf5c5e88d2019-12-26 11:22:02 -07001020
1021 for (uint32_t region = 0; region < regionCount; region++) {
1022 const auto &copy_region = pRegions[region];
John Zulauf3d84f1b2020-03-09 13:33:25 -06001023 if (src_image) {
locke-lunarg296a3c92020-03-25 01:04:29 -06001024 context->UpdateAccessState(*cmd, *src_image, SYNC_TRANSFER_TRANSFER_READ, copy_region.srcSubresource,
1025 copy_region.srcOffset, copy_region.extent, tag);
John Zulauf5c5e88d2019-12-26 11:22:02 -07001026 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06001027 if (dst_image) {
locke-lunarg1df1f882020-03-02 16:42:08 -07001028 VkExtent3D dst_copy_extent =
1029 GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
locke-lunarg296a3c92020-03-25 01:04:29 -06001030 context->UpdateAccessState(*cmd, *dst_image, SYNC_TRANSFER_TRANSFER_WRITE, copy_region.dstSubresource,
1031 copy_region.dstOffset, dst_copy_extent, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06001032 }
1033 }
1034}
1035
1036bool SyncValidator::PreCallValidateCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
1037 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
1038 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
1039 uint32_t bufferMemoryBarrierCount,
1040 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
1041 uint32_t imageMemoryBarrierCount,
1042 const VkImageMemoryBarrier *pImageMemoryBarriers) const {
1043 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -06001044 const auto *cb_access_context = GetAccessContext(commandBuffer);
1045 assert(cb_access_context);
1046 if (!cb_access_context) return skip;
John Zulauf0cb5be22020-01-23 12:18:22 -07001047
John Zulauf3d84f1b2020-03-09 13:33:25 -06001048 const auto *context = cb_access_context->GetCurrentAccessContext();
1049 assert(context);
1050 if (!context) return skip;
John Zulauf0cb5be22020-01-23 12:18:22 -07001051
locke-lunarg296a3c92020-03-25 01:04:29 -06001052 const auto *cmd = Get<CMD_BUFFER_STATE>(commandBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001053 const auto src_stage_mask = ExpandPipelineStages(cb_access_context->GetQueueFlags(), srcStageMask);
John Zulauf36bcf6a2020-02-03 15:12:52 -07001054 const auto src_exec_scope = WithEarlierPipelineStages(src_stage_mask);
1055 auto src_stage_accesses = AccessScopeByStage(src_stage_mask);
John Zulauf0cb5be22020-01-23 12:18:22 -07001056 // Validate Image Layout transitions
1057 for (uint32_t index = 0; index < imageMemoryBarrierCount; index++) {
1058 const auto &barrier = pImageMemoryBarriers[index];
1059 if (barrier.newLayout == barrier.oldLayout) continue; // Only interested in layout transitions at this point.
1060 const auto *image_state = Get<IMAGE_STATE>(barrier.image);
1061 if (!image_state) continue;
locke-lunarg296a3c92020-03-25 01:04:29 -06001062 const auto hazard = DetectImageBarrierHazard(*cmd, *context, *image_state, src_exec_scope, src_stage_accesses, barrier);
John Zulauf0cb5be22020-01-23 12:18:22 -07001063 if (hazard.hazard) {
1064 // TODO -- add tag information to log msg when useful.
locke-lunarga0003652020-03-10 11:38:51 -06001065 skip |= LogError(barrier.image, string_SyncHazardVUID(hazard.hazard),
1066 "vkCmdPipelineBarrier: Hazard %s for image barrier %" PRIu32 " %s", string_SyncHazard(hazard.hazard),
1067 index, report_data->FormatHandle(barrier.image).c_str());
John Zulauf0cb5be22020-01-23 12:18:22 -07001068 }
1069 }
John Zulauf9cb530d2019-09-30 14:14:10 -06001070
1071 return skip;
1072}
1073
1074void SyncValidator::PreCallRecordCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
1075 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
1076 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
1077 uint32_t bufferMemoryBarrierCount,
1078 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
1079 uint32_t imageMemoryBarrierCount,
1080 const VkImageMemoryBarrier *pImageMemoryBarriers) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06001081 auto *cb_access_context = GetAccessContext(commandBuffer);
1082 assert(cb_access_context);
1083 if (!cb_access_context) return;
1084 auto access_context = cb_access_context->GetCurrentAccessContext();
1085 assert(access_context);
1086 if (!access_context) return;
John Zulauf9cb530d2019-09-30 14:14:10 -06001087
John Zulauf3d84f1b2020-03-09 13:33:25 -06001088 const auto src_stage_mask = ExpandPipelineStages(cb_access_context->GetQueueFlags(), srcStageMask);
John Zulauf36bcf6a2020-02-03 15:12:52 -07001089 auto src_stage_accesses = AccessScopeByStage(src_stage_mask);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001090 const auto dst_stage_mask = ExpandPipelineStages(cb_access_context->GetQueueFlags(), dstStageMask);
John Zulauf36bcf6a2020-02-03 15:12:52 -07001091 auto dst_stage_accesses = AccessScopeByStage(dst_stage_mask);
1092 const auto src_exec_scope = WithEarlierPipelineStages(src_stage_mask);
1093 const auto dst_exec_scope = WithLaterPipelineStages(dst_stage_mask);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001094 ApplyBufferBarriers(access_context, src_exec_scope, src_stage_accesses, dst_exec_scope, dst_stage_accesses,
1095 bufferMemoryBarrierCount, pBufferMemoryBarriers);
locke-lunarg296a3c92020-03-25 01:04:29 -06001096 const auto *cmd = Get<CMD_BUFFER_STATE>(commandBuffer);
1097 ApplyImageBarriers(*cmd, access_context, src_exec_scope, src_stage_accesses, dst_exec_scope, dst_stage_accesses,
John Zulauf3d84f1b2020-03-09 13:33:25 -06001098 imageMemoryBarrierCount, pImageMemoryBarriers);
John Zulauf9cb530d2019-09-30 14:14:10 -06001099
1100 // Apply these last in-case there operation is a superset of the other two and would clean them up...
John Zulauf3d84f1b2020-03-09 13:33:25 -06001101 ApplyGlobalBarriers(access_context, src_exec_scope, dst_exec_scope, src_stage_accesses, dst_stage_accesses, memoryBarrierCount,
John Zulauf0cb5be22020-01-23 12:18:22 -07001102 pMemoryBarriers);
John Zulauf9cb530d2019-09-30 14:14:10 -06001103}
1104
1105void SyncValidator::PostCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
1106 const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, VkResult result) {
1107 // The state tracker sets up the device state
1108 StateTracker::PostCallRecordCreateDevice(gpu, pCreateInfo, pAllocator, pDevice, result);
1109
John Zulauf5f13a792020-03-10 07:31:21 -06001110 // Add the callback hooks for the functions that are either broadly or deeply used and that the ValidationStateTracker
1111 // refactor would be messier without.
John Zulauf9cb530d2019-09-30 14:14:10 -06001112 // TODO: Find a good way to do this hooklessly.
1113 ValidationObject *device_object = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
1114 ValidationObject *validation_data = GetValidationObject(device_object->object_dispatch, LayerObjectTypeSyncValidation);
1115 SyncValidator *sync_device_state = static_cast<SyncValidator *>(validation_data);
1116
1117 sync_device_state->SetCommandBufferResetCallback(
1118 [sync_device_state](VkCommandBuffer command_buffer) -> void { sync_device_state->ResetCommandBuffer(command_buffer); });
1119}
John Zulauf3d84f1b2020-03-09 13:33:25 -06001120
1121void SyncValidator::PostCallRecordBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo,
1122 VkResult result) {
1123 // The state tracker sets up the command buffer state
1124 StateTracker::PostCallRecordBeginCommandBuffer(commandBuffer, pBeginInfo, result);
1125
1126 // Create/initialize the structure that trackers accesses at the command buffer scope.
1127 auto cb_access_context = GetAccessContext(commandBuffer);
1128 assert(cb_access_context);
1129 cb_access_context->Reset();
1130}
1131
1132void SyncValidator::RecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
1133 const VkSubpassBeginInfo *pSubpassBeginInfo) {
1134 const auto rp_state = Get<RENDER_PASS_STATE>(pRenderPassBegin->renderPass);
1135 auto cb_context = GetAccessContext(commandBuffer);
1136 if (rp_state && cb_context) {
1137 cb_context->BeginRenderPass(*rp_state);
1138 }
1139}
1140
1141void SyncValidator::PostCallRecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
1142 VkSubpassContents contents) {
1143 StateTracker::PostCallRecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
1144 auto subpass_begin_info = lvl_init_struct<VkSubpassBeginInfo>();
1145 subpass_begin_info.contents = contents;
1146 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, &subpass_begin_info);
1147}
1148
1149void SyncValidator::PostCallRecordCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
1150 const VkSubpassBeginInfo *pSubpassBeginInfo) {
1151 StateTracker::PostCallRecordCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
1152 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
1153}
1154
1155void SyncValidator::PostCallRecordCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
1156 const VkRenderPassBeginInfo *pRenderPassBegin,
1157 const VkSubpassBeginInfo *pSubpassBeginInfo) {
1158 StateTracker::PostCallRecordCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
1159 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
1160}
1161
1162void SyncValidator::RecordCmdNextSubpass(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
1163 const VkSubpassEndInfo *pSubpassEndInfo) {
1164 auto cb_context = GetAccessContext(commandBuffer);
1165 assert(cb_context);
1166 auto cb_state = cb_context->GetCommandBufferState();
1167 if (!cb_state) return;
1168
1169 auto rp_state = cb_state->activeRenderPass;
1170 if (!rp_state) return;
1171
1172 cb_context->NextRenderPass(*rp_state);
1173}
1174
1175void SyncValidator::PostCallRecordCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
1176 StateTracker::PostCallRecordCmdNextSubpass(commandBuffer, contents);
1177 auto subpass_begin_info = lvl_init_struct<VkSubpassBeginInfo>();
1178 subpass_begin_info.contents = contents;
1179 RecordCmdNextSubpass(commandBuffer, &subpass_begin_info, nullptr);
1180}
1181
1182void SyncValidator::PostCallRecordCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
1183 const VkSubpassEndInfo *pSubpassEndInfo) {
1184 StateTracker::PostCallRecordCmdNextSubpass2(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
1185 RecordCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
1186}
1187
1188void SyncValidator::PostCallRecordCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
1189 const VkSubpassEndInfo *pSubpassEndInfo) {
1190 StateTracker::PostCallRecordCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
1191 RecordCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
1192}
1193
John Zulaufe5da6e52020-03-18 15:32:18 -06001194void SyncValidator::RecordCmdEndRenderPass(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
1195 // Resolve the all subpass contexts to the command buffer contexts
1196 auto cb_context = GetAccessContext(commandBuffer);
1197 assert(cb_context);
1198 auto cb_state = cb_context->GetCommandBufferState();
1199 if (!cb_state) return;
1200
1201 const auto *rp_state = cb_state->activeRenderPass;
1202 if (!rp_state) return;
1203
1204 cb_context->EndRenderPass(*rp_state);
1205}
John Zulauf3d84f1b2020-03-09 13:33:25 -06001206
1207void SyncValidator::PostCallRecordCmdEndRenderPass(VkCommandBuffer commandBuffer) {
1208 StateTracker::PostCallRecordCmdEndRenderPass(commandBuffer);
1209 RecordCmdEndRenderPass(commandBuffer, nullptr);
1210}
1211
1212void SyncValidator::PostCallRecordCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
1213 StateTracker::PostCallRecordCmdEndRenderPass2(commandBuffer, pSubpassEndInfo);
1214 RecordCmdEndRenderPass(commandBuffer, pSubpassEndInfo);
1215}
1216
1217void SyncValidator::PostCallRecordCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
1218 StateTracker::PostCallRecordCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo);
1219 RecordCmdEndRenderPass(commandBuffer, pSubpassEndInfo);
1220}
locke-lunarga19c71d2020-03-02 18:17:04 -07001221
1222bool SyncValidator::PreCallValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
1223 VkImageLayout dstImageLayout, uint32_t regionCount,
1224 const VkBufferImageCopy *pRegions) const {
1225 bool skip = false;
1226 const auto *cb_access_context = GetAccessContext(commandBuffer);
1227 assert(cb_access_context);
1228 if (!cb_access_context) return skip;
1229
1230 const auto *context = cb_access_context->GetCurrentAccessContext();
1231 assert(context);
1232 if (!context) return skip;
1233
1234 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
1235 const auto src_mem = (src_buffer && !src_buffer->sparse) ? src_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
1236 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarg296a3c92020-03-25 01:04:29 -06001237 const auto *cmd = Get<CMD_BUFFER_STATE>(commandBuffer);
locke-lunarga19c71d2020-03-02 18:17:04 -07001238
1239 for (uint32_t region = 0; region < regionCount; region++) {
1240 const auto &copy_region = pRegions[region];
1241 if (src_mem) {
1242 ResourceAccessRange src_range = MakeMemoryAccessRange(
1243 *src_buffer, copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, dst_image->createInfo.format));
1244 auto hazard = context->DetectHazard(VulkanTypedHandle(src_mem, kVulkanObjectTypeDeviceMemory),
1245 SYNC_TRANSFER_TRANSFER_READ, src_range);
1246 if (hazard.hazard) {
1247 // TODO -- add tag information to log msg when useful.
locke-lunarga0003652020-03-10 11:38:51 -06001248 skip |= LogError(srcBuffer, string_SyncHazardVUID(hazard.hazard),
1249 "vkCmdCopyBufferToImage: Hazard %s for srcBuffer %s, region %" PRIu32,
locke-lunarga19c71d2020-03-02 18:17:04 -07001250 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcBuffer).c_str(), region);
1251 }
1252 }
1253 if (dst_image) {
locke-lunarg296a3c92020-03-25 01:04:29 -06001254 auto hazard = context->DetectHazard(*cmd, *dst_image, SYNC_TRANSFER_TRANSFER_WRITE, copy_region.imageSubresource,
locke-lunarga19c71d2020-03-02 18:17:04 -07001255 copy_region.imageOffset, copy_region.imageExtent);
1256 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06001257 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
1258 "vkCmdCopyBufferToImage: Hazard %s for dstImage %s, region %" PRIu32,
locke-lunarga19c71d2020-03-02 18:17:04 -07001259 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region);
1260 }
1261 if (skip) break;
1262 }
1263 if (skip) break;
1264 }
1265 return skip;
1266}
1267
1268void SyncValidator::PreCallRecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
1269 VkImageLayout dstImageLayout, uint32_t regionCount,
1270 const VkBufferImageCopy *pRegions) {
1271 auto *cb_access_context = GetAccessContext(commandBuffer);
1272 assert(cb_access_context);
1273 auto *context = cb_access_context->GetCurrentAccessContext();
1274 assert(context);
1275
1276 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
1277 const auto src_mem = (src_buffer && !src_buffer->sparse) ? src_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
1278 const VulkanTypedHandle src_handle(src_mem, kVulkanObjectTypeDeviceMemory);
John Zulauf5f13a792020-03-10 07:31:21 -06001279
locke-lunarga19c71d2020-03-02 18:17:04 -07001280 auto *dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarg296a3c92020-03-25 01:04:29 -06001281 const auto *cmd = Get<CMD_BUFFER_STATE>(commandBuffer);
locke-lunarga19c71d2020-03-02 18:17:04 -07001282
1283 for (uint32_t region = 0; region < regionCount; region++) {
1284 const auto &copy_region = pRegions[region];
1285 if (src_buffer) {
1286 ResourceAccessRange src_range = MakeMemoryAccessRange(
1287 *src_buffer, copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, dst_image->createInfo.format));
John Zulauf5f13a792020-03-10 07:31:21 -06001288 context->UpdateAccessState(src_handle, SYNC_TRANSFER_TRANSFER_READ, src_range, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07001289 }
1290 if (dst_image) {
locke-lunarg296a3c92020-03-25 01:04:29 -06001291 context->UpdateAccessState(*cmd, *dst_image, SYNC_TRANSFER_TRANSFER_WRITE, copy_region.imageSubresource,
John Zulauf5f13a792020-03-10 07:31:21 -06001292 copy_region.imageOffset, copy_region.imageExtent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07001293 }
1294 }
1295}
1296
1297bool SyncValidator::PreCallValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
1298 VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount,
1299 const VkBufferImageCopy *pRegions) const {
1300 bool skip = false;
1301 const auto *cb_access_context = GetAccessContext(commandBuffer);
1302 assert(cb_access_context);
1303 if (!cb_access_context) return skip;
1304
1305 const auto *context = cb_access_context->GetCurrentAccessContext();
1306 assert(context);
1307 if (!context) return skip;
1308
locke-lunarg296a3c92020-03-25 01:04:29 -06001309 const auto *cmd = Get<CMD_BUFFER_STATE>(commandBuffer);
locke-lunarga19c71d2020-03-02 18:17:04 -07001310 const auto *src_image = Get<IMAGE_STATE>(srcImage);
1311 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
1312 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
1313 for (uint32_t region = 0; region < regionCount; region++) {
1314 const auto &copy_region = pRegions[region];
1315 if (src_image) {
locke-lunarg296a3c92020-03-25 01:04:29 -06001316 auto hazard = context->DetectHazard(*cmd, *src_image, SYNC_TRANSFER_TRANSFER_READ, copy_region.imageSubresource,
locke-lunarga19c71d2020-03-02 18:17:04 -07001317 copy_region.imageOffset, copy_region.imageExtent);
1318 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06001319 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
1320 "vkCmdCopyImageToBuffer: Hazard %s for srcImage %s, region %" PRIu32,
locke-lunarga19c71d2020-03-02 18:17:04 -07001321 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region);
1322 }
1323 }
1324 if (dst_mem) {
1325 ResourceAccessRange dst_range = MakeMemoryAccessRange(
1326 *dst_buffer, copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, src_image->createInfo.format));
1327 auto hazard = context->DetectHazard(VulkanTypedHandle(dst_mem, kVulkanObjectTypeDeviceMemory),
1328 SYNC_TRANSFER_TRANSFER_WRITE, dst_range);
1329 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06001330 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
1331 "vkCmdCopyImageToBuffer: Hazard %s for dstBuffer %s, region %" PRIu32,
locke-lunarga19c71d2020-03-02 18:17:04 -07001332 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstBuffer).c_str(), region);
1333 }
1334 }
1335 if (skip) break;
1336 }
1337 return skip;
1338}
1339
1340void SyncValidator::PreCallRecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
1341 VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
1342 auto *cb_access_context = GetAccessContext(commandBuffer);
1343 assert(cb_access_context);
1344 auto *context = cb_access_context->GetCurrentAccessContext();
1345 assert(context);
1346
locke-lunarg296a3c92020-03-25 01:04:29 -06001347 const auto *cmd = Get<CMD_BUFFER_STATE>(commandBuffer);
locke-lunarga19c71d2020-03-02 18:17:04 -07001348 const auto *src_image = Get<IMAGE_STATE>(srcImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07001349 auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
1350 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
John Zulauf5f13a792020-03-10 07:31:21 -06001351 const VulkanTypedHandle dst_handle(dst_mem, kVulkanObjectTypeDeviceMemory);
locke-lunarga19c71d2020-03-02 18:17:04 -07001352
1353 for (uint32_t region = 0; region < regionCount; region++) {
1354 const auto &copy_region = pRegions[region];
1355 if (src_image) {
locke-lunarg296a3c92020-03-25 01:04:29 -06001356 context->UpdateAccessState(*cmd, *src_image, SYNC_TRANSFER_TRANSFER_READ, copy_region.imageSubresource,
John Zulauf5f13a792020-03-10 07:31:21 -06001357 copy_region.imageOffset, copy_region.imageExtent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07001358 }
1359 if (dst_buffer) {
1360 ResourceAccessRange dst_range = MakeMemoryAccessRange(
1361 *dst_buffer, copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, src_image->createInfo.format));
John Zulauf5f13a792020-03-10 07:31:21 -06001362 context->UpdateAccessState(dst_handle, SYNC_TRANSFER_TRANSFER_WRITE, dst_range, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07001363 }
1364 }
1365}
1366
1367bool SyncValidator::PreCallValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
1368 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
1369 const VkImageBlit *pRegions, VkFilter filter) const {
1370 bool skip = false;
1371 const auto *cb_access_context = GetAccessContext(commandBuffer);
1372 assert(cb_access_context);
1373 if (!cb_access_context) return skip;
1374
1375 const auto *context = cb_access_context->GetCurrentAccessContext();
1376 assert(context);
1377 if (!context) return skip;
1378
1379 const auto *src_image = Get<IMAGE_STATE>(srcImage);
1380 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarg296a3c92020-03-25 01:04:29 -06001381 const auto *cmd = Get<CMD_BUFFER_STATE>(commandBuffer);
locke-lunarga19c71d2020-03-02 18:17:04 -07001382
1383 for (uint32_t region = 0; region < regionCount; region++) {
1384 const auto &blit_region = pRegions[region];
1385 if (src_image) {
1386 VkExtent3D extent = {static_cast<uint32_t>(blit_region.srcOffsets[1].x - blit_region.srcOffsets[0].x),
1387 static_cast<uint32_t>(blit_region.srcOffsets[1].y - blit_region.srcOffsets[0].y),
1388 static_cast<uint32_t>(blit_region.srcOffsets[1].z - blit_region.srcOffsets[0].z)};
locke-lunarg296a3c92020-03-25 01:04:29 -06001389 auto hazard = context->DetectHazard(*cmd, *src_image, SYNC_TRANSFER_TRANSFER_READ, blit_region.srcSubresource,
locke-lunarga19c71d2020-03-02 18:17:04 -07001390 blit_region.srcOffsets[0], extent);
1391 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06001392 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
1393 "vkCmdBlitImage: Hazard %s for srcImage %s, region %" PRIu32, string_SyncHazard(hazard.hazard),
1394 report_data->FormatHandle(srcImage).c_str(), region);
locke-lunarga19c71d2020-03-02 18:17:04 -07001395 }
1396 }
1397
1398 if (dst_image) {
1399 VkExtent3D extent = {static_cast<uint32_t>(blit_region.dstOffsets[1].x - blit_region.dstOffsets[0].x),
1400 static_cast<uint32_t>(blit_region.dstOffsets[1].y - blit_region.dstOffsets[0].y),
1401 static_cast<uint32_t>(blit_region.dstOffsets[1].z - blit_region.dstOffsets[0].z)};
locke-lunarg296a3c92020-03-25 01:04:29 -06001402 auto hazard = context->DetectHazard(*cmd, *dst_image, SYNC_TRANSFER_TRANSFER_WRITE, blit_region.dstSubresource,
locke-lunarga19c71d2020-03-02 18:17:04 -07001403 blit_region.dstOffsets[0], extent);
1404 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06001405 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
1406 "vkCmdBlitImage: Hazard %s for dstImage %s, region %" PRIu32, string_SyncHazard(hazard.hazard),
1407 report_data->FormatHandle(dstImage).c_str(), region);
locke-lunarga19c71d2020-03-02 18:17:04 -07001408 }
1409 if (skip) break;
1410 }
1411 }
1412
1413 return skip;
1414}
1415
1416void SyncValidator::PreCallRecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
1417 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
1418 const VkImageBlit *pRegions, VkFilter filter) {
1419 auto *cb_access_context = GetAccessContext(commandBuffer);
1420 assert(cb_access_context);
1421 auto *context = cb_access_context->GetCurrentAccessContext();
1422 assert(context);
1423
1424 auto *src_image = Get<IMAGE_STATE>(srcImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07001425 auto *dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarg296a3c92020-03-25 01:04:29 -06001426 const auto *cmd = Get<CMD_BUFFER_STATE>(commandBuffer);
locke-lunarga19c71d2020-03-02 18:17:04 -07001427
1428 for (uint32_t region = 0; region < regionCount; region++) {
1429 const auto &blit_region = pRegions[region];
1430 if (src_image) {
1431 VkExtent3D extent = {static_cast<uint32_t>(blit_region.srcOffsets[1].x - blit_region.srcOffsets[0].x),
1432 static_cast<uint32_t>(blit_region.srcOffsets[1].y - blit_region.srcOffsets[0].y),
1433 static_cast<uint32_t>(blit_region.srcOffsets[1].z - blit_region.srcOffsets[0].z)};
locke-lunarg296a3c92020-03-25 01:04:29 -06001434 context->UpdateAccessState(*cmd, *src_image, SYNC_TRANSFER_TRANSFER_READ, blit_region.srcSubresource,
John Zulauf5f13a792020-03-10 07:31:21 -06001435 blit_region.srcOffsets[0], extent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07001436 }
1437 if (dst_image) {
1438 VkExtent3D extent = {static_cast<uint32_t>(blit_region.dstOffsets[1].x - blit_region.dstOffsets[0].x),
1439 static_cast<uint32_t>(blit_region.dstOffsets[1].y - blit_region.dstOffsets[0].y),
1440 static_cast<uint32_t>(blit_region.dstOffsets[1].z - blit_region.dstOffsets[0].z)};
locke-lunarg296a3c92020-03-25 01:04:29 -06001441 context->UpdateAccessState(*cmd, *dst_image, SYNC_TRANSFER_TRANSFER_WRITE, blit_region.dstSubresource,
John Zulauf5f13a792020-03-10 07:31:21 -06001442 blit_region.dstOffsets[0], extent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07001443 }
1444 }
1445}