blob: df85d23fa1ce6c5967548885df6b2846ebfe7e0b [file] [log] [blame]
John Zulauf9cb530d2019-09-30 14:14:10 -06001/* Copyright (c) 2019 The Khronos Group Inc.
2 * Copyright (c) 2019 Valve Corporation
3 * Copyright (c) 2019 LunarG, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 * Author: John Zulauf <jzulauf@lunarg.com>
18 */
19
20#include <limits>
21#include <vector>
locke-lunarg296a3c92020-03-25 01:04:29 -060022#include <memory>
23#include <bitset>
John Zulauf9cb530d2019-09-30 14:14:10 -060024#include "synchronization_validation.h"
25
26static const char *string_SyncHazardVUID(SyncHazard hazard) {
27 switch (hazard) {
28 case SyncHazard::NONE:
John Zulauf2f952d22020-02-10 11:34:51 -070029 return "SYNC-HAZARD-NONE";
John Zulauf9cb530d2019-09-30 14:14:10 -060030 break;
31 case SyncHazard::READ_AFTER_WRITE:
32 return "SYNC-HAZARD-READ_AFTER_WRITE";
33 break;
34 case SyncHazard::WRITE_AFTER_READ:
35 return "SYNC-HAZARD-WRITE_AFTER_READ";
36 break;
37 case SyncHazard::WRITE_AFTER_WRITE:
38 return "SYNC-HAZARD-WRITE_AFTER_WRITE";
39 break;
John Zulauf2f952d22020-02-10 11:34:51 -070040 case SyncHazard::READ_RACING_WRITE:
41 return "SYNC-HAZARD-READ-RACING-WRITE";
42 break;
43 case SyncHazard::WRITE_RACING_WRITE:
44 return "SYNC-HAZARD-WRITE-RACING-WRITE";
45 break;
46 case SyncHazard::WRITE_RACING_READ:
47 return "SYNC-HAZARD-WRITE-RACING-READ";
48 break;
John Zulauf9cb530d2019-09-30 14:14:10 -060049 default:
50 assert(0);
51 }
52 return "SYNC-HAZARD-INVALID";
53}
54
55static const char *string_SyncHazard(SyncHazard hazard) {
56 switch (hazard) {
57 case SyncHazard::NONE:
58 return "NONR";
59 break;
60 case SyncHazard::READ_AFTER_WRITE:
61 return "READ_AFTER_WRITE";
62 break;
63 case SyncHazard::WRITE_AFTER_READ:
64 return "WRITE_AFTER_READ";
65 break;
66 case SyncHazard::WRITE_AFTER_WRITE:
67 return "WRITE_AFTER_WRITE";
68 break;
John Zulauf2f952d22020-02-10 11:34:51 -070069 case SyncHazard::READ_RACING_WRITE:
70 return "READ_RACING_WRITE";
71 break;
72 case SyncHazard::WRITE_RACING_WRITE:
73 return "WRITE_RACING_WRITE";
74 break;
75 case SyncHazard::WRITE_RACING_READ:
76 return "WRITE_RACING_READ";
77 break;
John Zulauf9cb530d2019-09-30 14:14:10 -060078 default:
79 assert(0);
80 }
81 return "INVALID HAZARD";
82}
83
John Zulauf0cb5be22020-01-23 12:18:22 -070084// Expand the pipeline stage without regard to whether the are valid w.r.t. queue or extension
85VkPipelineStageFlags ExpandPipelineStages(VkQueueFlags queue_flags, VkPipelineStageFlags stage_mask) {
86 VkPipelineStageFlags expanded = stage_mask;
87 if (VK_PIPELINE_STAGE_ALL_COMMANDS_BIT & stage_mask) {
88 expanded = expanded & ~VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
89 for (const auto &all_commands : syncAllCommandStagesByQueueFlags) {
90 if (all_commands.first & queue_flags) {
91 expanded |= all_commands.second;
92 }
93 }
94 }
95 if (VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT & stage_mask) {
96 expanded = expanded & ~VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
97 expanded |= syncAllCommandStagesByQueueFlags.at(VK_QUEUE_GRAPHICS_BIT) & ~VK_PIPELINE_STAGE_HOST_BIT;
98 }
99 return expanded;
100}
101
John Zulauf36bcf6a2020-02-03 15:12:52 -0700102VkPipelineStageFlags RelatedPipelineStages(VkPipelineStageFlags stage_mask,
103 std::map<VkPipelineStageFlagBits, VkPipelineStageFlags> &map) {
104 VkPipelineStageFlags unscanned = stage_mask;
105 VkPipelineStageFlags related = 0;
106 for (const auto entry : map) {
107 const auto stage = entry.first;
108 if (stage & unscanned) {
109 related = related | entry.second;
110 unscanned = unscanned & ~stage;
111 if (!unscanned) break;
112 }
113 }
114 return related;
115}
116
117VkPipelineStageFlags WithEarlierPipelineStages(VkPipelineStageFlags stage_mask) {
118 return stage_mask | RelatedPipelineStages(stage_mask, syncLogicallyEarlierStages);
119}
120
121VkPipelineStageFlags WithLaterPipelineStages(VkPipelineStageFlags stage_mask) {
122 return stage_mask | RelatedPipelineStages(stage_mask, syncLogicallyLaterStages);
123}
124
John Zulauf5c5e88d2019-12-26 11:22:02 -0700125static const ResourceAccessRange full_range(std::numeric_limits<VkDeviceSize>::min(), std::numeric_limits<VkDeviceSize>::max());
126static ResourceAccessRange MakeMemoryAccessRange(const BUFFER_STATE &buffer, VkDeviceSize offset, VkDeviceSize size) {
John Zulauf9cb530d2019-09-30 14:14:10 -0600127 assert(!buffer.sparse);
128 const auto base = offset + buffer.binding.offset;
John Zulauf5c5e88d2019-12-26 11:22:02 -0700129 return ResourceAccessRange(base, base + size);
130}
131
John Zulauf540266b2020-04-06 18:54:53 -0600132AccessContext::AccessContext(uint32_t subpass, VkQueueFlags queue_flags,
133 const std::vector<SubpassDependencyGraphNode> &dependencies,
134 const std::vector<AccessContext> &contexts, AccessContext *external_context) {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600135 Reset();
136 const auto &subpass_dep = dependencies[subpass];
137 prev_.reserve(subpass_dep.prev.size());
138 for (const auto &prev_dep : subpass_dep.prev) {
139 assert(prev_dep.dependency);
140 const auto dep = *prev_dep.dependency;
John Zulauf540266b2020-04-06 18:54:53 -0600141 prev_.emplace_back(const_cast<AccessContext *>(&contexts[dep.srcSubpass]), queue_flags, dep);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700142 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600143
144 async_.reserve(subpass_dep.async.size());
145 for (const auto async_subpass : subpass_dep.async) {
John Zulauf540266b2020-04-06 18:54:53 -0600146 async_.emplace_back(const_cast<AccessContext *>(&contexts[async_subpass]));
John Zulauf3d84f1b2020-03-09 13:33:25 -0600147 }
John Zulaufe5da6e52020-03-18 15:32:18 -0600148 if (subpass_dep.barrier_from_external) {
149 src_external_ = TrackBack(external_context, queue_flags, *subpass_dep.barrier_from_external);
150 } else {
151 src_external_ = TrackBack();
152 }
153 if (subpass_dep.barrier_to_external) {
154 dst_external_ = TrackBack(this, queue_flags, *subpass_dep.barrier_to_external);
155 } else {
156 dst_external_ = TrackBack();
John Zulauf3d84f1b2020-03-09 13:33:25 -0600157 }
John Zulauf5c5e88d2019-12-26 11:22:02 -0700158}
159
John Zulauf5f13a792020-03-10 07:31:21 -0600160template <typename Detector>
John Zulauf540266b2020-04-06 18:54:53 -0600161HazardResult AccessContext::DetectPreviousHazard(const VulkanTypedHandle &handle, const Detector &detector,
162 const ResourceAccessRange &range) const {
John Zulauf5f13a792020-03-10 07:31:21 -0600163 ResourceAccessRangeMap descent_map;
164 ResourceAccessState default_state; // When present, PreviousAccess will "infill"
165 ResolvePreviousAccess(handle, range, &descent_map, &default_state);
166
167 HazardResult hazard;
168 for (auto prev = descent_map.begin(); prev != descent_map.end() && !hazard.hazard; ++prev) {
169 hazard = detector.Detect(prev);
170 }
171 return hazard;
172}
173
John Zulauf3d84f1b2020-03-09 13:33:25 -0600174// A recursive range walker for hazard detection, first for the current context and the (DetectHazardRecur) to walk
175// the DAG of the contexts (for example subpasses)
176template <typename Detector>
John Zulauf540266b2020-04-06 18:54:53 -0600177HazardResult AccessContext::DetectHazard(const VulkanTypedHandle &handle, const Detector &detector,
178 const ResourceAccessRange &range) const {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600179 HazardResult hazard;
John Zulauf5f13a792020-03-10 07:31:21 -0600180
181 // Async checks don't require recursive lookups, as the async lists are exhaustive for the top-level context
182 // so we'll check these first
183 for (const auto &async_context : async_) {
184 hazard = async_context->DetectAsyncHazard(handle, detector, range);
185 if (hazard.hazard) return hazard;
186 }
187
188 const auto access_tracker = GetAccessTracker(handle);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600189 if (access_tracker) {
190 const auto &accesses = access_tracker->GetCurrentAccessMap();
191 const auto from = accesses.lower_bound(range);
192 if (from != accesses.end() && from->first.intersects(range)) {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600193 const auto to = accesses.upper_bound(range);
194 ResourceAccessRange gap = {range.begin, range.begin};
195 for (auto pos = from; pos != to; ++pos) {
John Zulauf5f13a792020-03-10 07:31:21 -0600196 hazard = detector.Detect(pos);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600197 if (hazard.hazard) return hazard;
198
John Zulauf5f13a792020-03-10 07:31:21 -0600199 // make sure we don't go past range
John Zulauf3d84f1b2020-03-09 13:33:25 -0600200 auto upper_bound = std::min(range.end, pos->first.end);
John Zulauf5f13a792020-03-10 07:31:21 -0600201 gap.end = upper_bound;
202
203 // TODO: After profiling we may want to change the descent logic such that we don't recur per gap...
John Zulauf3d84f1b2020-03-09 13:33:25 -0600204 if (!gap.empty()) {
205 // Must recur on all gaps
John Zulauf5f13a792020-03-10 07:31:21 -0600206 hazard = DetectPreviousHazard(handle, detector, gap);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600207 if (hazard.hazard) return hazard;
208 }
209 gap.begin = upper_bound;
210 }
John Zulauf5f13a792020-03-10 07:31:21 -0600211 gap.end = range.end;
212 if (gap.non_empty()) {
213 hazard = DetectPreviousHazard(handle, detector, gap);
214 if (hazard.hazard) return hazard;
215 }
216 } else {
217 hazard = DetectPreviousHazard(handle, detector, range);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600218 }
John Zulauf5f13a792020-03-10 07:31:21 -0600219 } else {
220 hazard = DetectPreviousHazard(handle, detector, range);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600221 }
222
223 return hazard;
224}
225
226// A non recursive range walker for the asynchronous contexts (those we have no barriers with)
227template <typename Detector>
John Zulauf540266b2020-04-06 18:54:53 -0600228HazardResult AccessContext::DetectAsyncHazard(const VulkanTypedHandle &handle, const Detector &detector,
229 const ResourceAccessRange &range) const {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600230 const auto access_tracker = GetAccessTracker(handle);
231 HazardResult hazard;
232 if (access_tracker) {
233 auto accesses = access_tracker->GetCurrentAccessMap();
234 const auto from = accesses.lower_bound(range);
235 const auto to = accesses.upper_bound(range);
236 for (auto pos = from; pos != to; ++pos) {
237 hazard = detector.DetectAsync(pos);
238 if (hazard.hazard) break;
239 }
240 }
241 return hazard;
242}
243
John Zulauf540266b2020-04-06 18:54:53 -0600244void AccessContext::ResolveTrackBack(const VulkanTypedHandle &handle, const ResourceAccessRange &range,
245 const AccessContext::TrackBack &track_back, ResourceAccessRangeMap *descent_map,
246 const ResourceAccessState *infill_state, bool recur_to_infill) const {
John Zulauf5f13a792020-03-10 07:31:21 -0600247 const auto *access_tracker = GetAccessTracker(handle);
248 if (access_tracker) {
249 sparse_container::parallel_iterator<ResourceAccessRangeMap, const ResourceAccessRangeMap> current(
250 *descent_map, access_tracker->GetCurrentAccessMap(), range.begin);
251 while (current->range.non_empty()) {
252 if (current->pos_B->valid) {
John Zulauf62f10592020-04-03 12:20:02 -0600253 const auto &src_pos = current->pos_B->lower_bound;
254 auto access_with_barrier = src_pos->second;
John Zulauf5f13a792020-03-10 07:31:21 -0600255 access_with_barrier.ApplyBarrier(track_back.barrier);
256 if (current->pos_A->valid) {
John Zulauf62f10592020-04-03 12:20:02 -0600257 current.trim_A();
John Zulauf5f13a792020-03-10 07:31:21 -0600258 current->pos_A->lower_bound->second.Resolve(access_with_barrier);
259 } else {
260 descent_map->insert(current->pos_A->lower_bound, std::make_pair(current->range, access_with_barrier));
261 current.invalidate_A(); // Update the parallel iterator to point at the correct segment after split(s)
262 }
263 } else {
264 // we have to descend to fill this gap
John Zulaufe5da6e52020-03-18 15:32:18 -0600265 if (recur_to_infill) {
John Zulauf62f10592020-04-03 12:20:02 -0600266 track_back.context->ResolvePreviousAccess(handle, current->range, descent_map, infill_state);
John Zulaufe5da6e52020-03-18 15:32:18 -0600267 current.invalidate_A(); // Update the parallel iterator to point at the correct segment after recursion.
268 }
John Zulauf5f13a792020-03-10 07:31:21 -0600269 if (!current->pos_A->valid && infill_state) {
270 // If we didn't find anything in the previous range, we infill with default to prevent repeating
271 // a fruitless search
272 descent_map->insert(current->pos_A->lower_bound, std::make_pair(current->range, *infill_state));
273 current.invalidate_A(); // Update the parallel iterator to point at the correct segment after insert
274 }
275 }
276 ++current;
277 }
John Zulauf62f10592020-04-03 12:20:02 -0600278 } else if (recur_to_infill) {
279 track_back.context->ResolvePreviousAccess(handle, range, descent_map, infill_state);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600280 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600281}
282
John Zulauf540266b2020-04-06 18:54:53 -0600283void AccessContext::ResolvePreviousAccess(const VulkanTypedHandle &handle, const ResourceAccessRange &range,
284 ResourceAccessRangeMap *descent_map, const ResourceAccessState *infill_state) const {
John Zulaufe5da6e52020-03-18 15:32:18 -0600285 if ((prev_.size() == 0) && (src_external_.context == nullptr)) {
John Zulauf5f13a792020-03-10 07:31:21 -0600286 if (range.non_empty() && infill_state) {
287 descent_map->insert(std::make_pair(range, *infill_state));
288 }
289 } else {
290 // Look for something to fill the gap further along.
291 for (const auto &prev_dep : prev_) {
292 ResolveTrackBack(handle, range, prev_dep, descent_map, infill_state);
293 }
294
John Zulaufe5da6e52020-03-18 15:32:18 -0600295 if (src_external_.context) {
296 ResolveTrackBack(handle, range, src_external_, descent_map, infill_state);
John Zulauf5f13a792020-03-10 07:31:21 -0600297 }
298 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600299}
300
John Zulauf540266b2020-04-06 18:54:53 -0600301void AccessContext::ResolvePreviousAccess(const IMAGE_STATE &image_state, const VkImageSubresourceRange &subresource_range_arg,
302 ResourceAccessRangeMap *descent_map, const ResourceAccessState *infill_state) const {
John Zulauf62f10592020-04-03 12:20:02 -0600303 const VulkanTypedHandle image_handle(image_state.image, kVulkanObjectTypeImage);
304 auto subresource_range = NormalizeSubresourceRange(image_state.createInfo, subresource_range_arg);
locke-lunarg5f7d3c62020-04-07 00:10:39 -0600305 subresource_adapter::ImageRangeGenerator range_gen(image_state.fragment_encoder, subresource_range, {0, 0, 0},
306 image_state.createInfo.extent);
John Zulauf62f10592020-04-03 12:20:02 -0600307 for (; range_gen->non_empty(); ++range_gen) {
308 ResolvePreviousAccess(image_handle, *range_gen, descent_map, infill_state);
309 }
310}
311
John Zulauf3d84f1b2020-03-09 13:33:25 -0600312class HazardDetector {
313 SyncStageAccessIndex usage_index_;
314
315 public:
John Zulauf5f13a792020-03-10 07:31:21 -0600316 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const { return pos->second.DetectHazard(usage_index_); }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600317 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos) const {
318 return pos->second.DetectAsyncHazard(usage_index_);
319 }
320 HazardDetector(SyncStageAccessIndex usage) : usage_index_(usage) {}
321};
322
John Zulauf540266b2020-04-06 18:54:53 -0600323HazardResult AccessContext::DetectHazard(const VulkanTypedHandle &handle, SyncStageAccessIndex usage_index,
324 const ResourceAccessRange &range) const {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600325 HazardDetector detector(usage_index);
326 return DetectHazard(handle, detector, range);
327}
328
329void CommandBufferAccessContext::BeginRenderPass(const RENDER_PASS_STATE &rp_state) {
330 // Create an access context for the first subpass and add it to the command buffers collection
331 render_pass_contexts_.emplace_back(queue_flags_, &rp_state.subpass_dependencies, &cb_tracker_context_);
332 current_renderpass_context_ = &render_pass_contexts_.back();
333 current_context_ = &current_renderpass_context_->CurrentContext();
John Zulaufe5da6e52020-03-18 15:32:18 -0600334
335 // TODO: Add layout load/store/transition/resolve access (here or in RenderPassContext)
John Zulauf3d84f1b2020-03-09 13:33:25 -0600336}
337
338void CommandBufferAccessContext::NextRenderPass(const RENDER_PASS_STATE &rp_state) {
339 assert(current_renderpass_context_);
340 current_renderpass_context_->NextSubpass(queue_flags_, &cb_tracker_context_);
John Zulaufe5da6e52020-03-18 15:32:18 -0600341 // TODO: Add layout load/store/transition/resolve access (here or in RenderPassContext)
John Zulauf3d84f1b2020-03-09 13:33:25 -0600342 current_context_ = &current_renderpass_context_->CurrentContext();
343}
344
John Zulaufe5da6e52020-03-18 15:32:18 -0600345void CommandBufferAccessContext::EndRenderPass(const RENDER_PASS_STATE &render_pass) {
John Zulauf540266b2020-04-06 18:54:53 -0600346 // TODO: Add layout load/store/transition/resolve access (here or in RenderPassContext)
John Zulaufe5da6e52020-03-18 15:32:18 -0600347 assert(current_renderpass_context_);
348 if (!current_renderpass_context_) return;
349
350 const auto &contexts = current_renderpass_context_->subpass_contexts_;
John Zulauf540266b2020-04-06 18:54:53 -0600351 cb_tracker_context_.ResolveChildContexts(contexts);
John Zulaufe5da6e52020-03-18 15:32:18 -0600352
John Zulaufe5da6e52020-03-18 15:32:18 -0600353 current_context_ = &cb_tracker_context_;
354 current_renderpass_context_ = nullptr;
355}
356
John Zulauf540266b2020-04-06 18:54:53 -0600357HazardResult AccessContext::DetectHazard(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
358 const VkImageSubresourceLayers &subresource, const VkOffset3D &offset,
359 const VkExtent3D &extent) const {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700360 // TODO: replace the encoder/generator with offset3D/extent3D aware versions
361 VkImageSubresourceRange subresource_range = {subresource.aspectMask, subresource.mipLevel, 1, subresource.baseArrayLayer,
362 subresource.layerCount};
locke-lunarg5f7d3c62020-04-07 00:10:39 -0600363 subresource_adapter::ImageRangeGenerator range_gen(image.fragment_encoder, subresource_range, offset, extent);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600364 VulkanTypedHandle image_handle(image.image, kVulkanObjectTypeImage);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700365 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600366 HazardResult hazard = DetectHazard(image_handle, current_usage, *range_gen);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700367 if (hazard.hazard) return hazard;
368 }
369 return HazardResult();
John Zulauf9cb530d2019-09-30 14:14:10 -0600370}
371
John Zulauf3d84f1b2020-03-09 13:33:25 -0600372class BarrierHazardDetector {
373 public:
374 BarrierHazardDetector(SyncStageAccessIndex usage_index, VkPipelineStageFlags src_exec_scope,
375 SyncStageAccessFlags src_access_scope)
376 : usage_index_(usage_index), src_exec_scope_(src_exec_scope), src_access_scope_(src_access_scope) {}
377
John Zulauf5f13a792020-03-10 07:31:21 -0600378 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
379 return pos->second.DetectBarrierHazard(usage_index_, src_exec_scope_, src_access_scope_);
John Zulauf0cb5be22020-01-23 12:18:22 -0700380 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600381 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos) const {
382 // Async barrier hazard detection can use the same path as the usage index is not IsRead, but is IsWrite
383 return pos->second.DetectAsyncHazard(usage_index_);
384 }
385
386 private:
387 SyncStageAccessIndex usage_index_;
388 VkPipelineStageFlags src_exec_scope_;
389 SyncStageAccessFlags src_access_scope_;
390};
391
John Zulauf540266b2020-04-06 18:54:53 -0600392HazardResult AccessContext::DetectBarrierHazard(const VulkanTypedHandle &handle, SyncStageAccessIndex current_usage,
393 VkPipelineStageFlags src_exec_scope, SyncStageAccessFlags src_access_scope,
394 const ResourceAccessRange &range) const {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600395 BarrierHazardDetector detector(current_usage, src_exec_scope, src_access_scope);
396 return DetectHazard(handle, detector, range);
John Zulauf0cb5be22020-01-23 12:18:22 -0700397}
398
John Zulauf540266b2020-04-06 18:54:53 -0600399HazardResult DetectImageBarrierHazard(const AccessContext &context, const IMAGE_STATE &image, VkPipelineStageFlags src_exec_scope,
400 SyncStageAccessFlags src_stage_accesses, const VkImageMemoryBarrier &barrier) {
John Zulauf0cb5be22020-01-23 12:18:22 -0700401 auto subresource_range = NormalizeSubresourceRange(image.createInfo, barrier.subresourceRange);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600402 const VulkanTypedHandle image_handle(image.image, kVulkanObjectTypeImage);
John Zulauf36bcf6a2020-02-03 15:12:52 -0700403 const auto src_access_scope = SyncStageAccess::AccessScope(src_stage_accesses, barrier.srcAccessMask);
locke-lunarg5f7d3c62020-04-07 00:10:39 -0600404 subresource_adapter::ImageRangeGenerator range_gen(image.fragment_encoder, subresource_range, {0, 0, 0},
405 image.createInfo.extent);
locke-lunarg296a3c92020-03-25 01:04:29 -0600406 for (; range_gen->non_empty(); ++range_gen) {
407 HazardResult hazard = context.DetectBarrierHazard(image_handle, SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION,
408 src_exec_scope, src_access_scope, *range_gen);
409 if (hazard.hazard) return hazard;
John Zulauf0cb5be22020-01-23 12:18:22 -0700410 }
411 return HazardResult();
412}
413
John Zulauf9cb530d2019-09-30 14:14:10 -0600414template <typename Flags, typename Map>
415SyncStageAccessFlags AccessScopeImpl(Flags flag_mask, const Map &map) {
416 SyncStageAccessFlags scope = 0;
417 for (const auto &bit_scope : map) {
418 if (flag_mask < bit_scope.first) break;
419
420 if (flag_mask & bit_scope.first) {
421 scope |= bit_scope.second;
422 }
423 }
424 return scope;
425}
426
427SyncStageAccessFlags SyncStageAccess::AccessScopeByStage(VkPipelineStageFlags stages) {
428 return AccessScopeImpl(stages, syncStageAccessMaskByStageBit);
429}
430
431SyncStageAccessFlags SyncStageAccess::AccessScopeByAccess(VkAccessFlags accesses) {
432 return AccessScopeImpl(accesses, syncStageAccessMaskByAccessBit);
433}
434
435// Getting from stage mask and access mask to stage/acess masks is something we need to be good at...
436SyncStageAccessFlags SyncStageAccess::AccessScope(VkPipelineStageFlags stages, VkAccessFlags accesses) {
John Zulauf5f13a792020-03-10 07:31:21 -0600437 // The access scope is the intersection of all stage/access types possible for the enabled stages and the enables
438 // accesses (after doing a couple factoring of common terms the union of stage/access intersections is the intersections
439 // of the union of all stage/access types for all the stages and the same unions for the access mask...
John Zulauf9cb530d2019-09-30 14:14:10 -0600440 return AccessScopeByStage(stages) & AccessScopeByAccess(accesses);
441}
442
443template <typename Action>
John Zulauf5c5e88d2019-12-26 11:22:02 -0700444void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const ResourceAccessRange &range, const Action &action) {
John Zulauf9cb530d2019-09-30 14:14:10 -0600445 // TODO -- region/mem-range accuracte update
446 auto pos = accesses->lower_bound(range);
447 if (pos == accesses->end() || !pos->first.intersects(range)) {
448 // The range is empty, fill it with a default value.
449 pos = action.Infill(accesses, pos, range);
450 } else if (range.begin < pos->first.begin) {
451 // Leading empty space, infill
John Zulauf5c5e88d2019-12-26 11:22:02 -0700452 pos = action.Infill(accesses, pos, ResourceAccessRange(range.begin, pos->first.begin));
John Zulauf9cb530d2019-09-30 14:14:10 -0600453 } else if (pos->first.begin < range.begin) {
454 // Trim the beginning if needed
455 pos = accesses->split(pos, range.begin, sparse_container::split_op_keep_both());
456 ++pos;
457 }
458
459 const auto the_end = accesses->end();
460 while ((pos != the_end) && pos->first.intersects(range)) {
461 if (pos->first.end > range.end) {
462 pos = accesses->split(pos, range.end, sparse_container::split_op_keep_both());
463 }
464
465 pos = action(accesses, pos);
466 if (pos == the_end) break;
467
468 auto next = pos;
469 ++next;
470 if ((pos->first.end < range.end) && (next != the_end) && !next->first.is_subsequent_to(pos->first)) {
471 // Need to infill if next is disjoint
472 VkDeviceSize limit = (next == the_end) ? range.end : std::min(range.end, next->first.begin);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700473 ResourceAccessRange new_range(pos->first.end, limit);
John Zulauf9cb530d2019-09-30 14:14:10 -0600474 next = action.Infill(accesses, next, new_range);
475 }
476 pos = next;
477 }
478}
479
480struct UpdateMemoryAccessStateFunctor {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700481 using Iterator = ResourceAccessRangeMap::iterator;
482 Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const {
John Zulauf5f13a792020-03-10 07:31:21 -0600483 // this is only called on gaps, and never returns a gap.
484 ResourceAccessState default_state;
485 context.ResolvePreviousAccess(handle, range, accesses, &default_state);
486 return accesses->lower_bound(range);
John Zulauf9cb530d2019-09-30 14:14:10 -0600487 }
John Zulauf5f13a792020-03-10 07:31:21 -0600488
John Zulauf5c5e88d2019-12-26 11:22:02 -0700489 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -0600490 auto &access_state = pos->second;
491 access_state.Update(usage, tag);
492 return pos;
493 }
494
John Zulauf540266b2020-04-06 18:54:53 -0600495 UpdateMemoryAccessStateFunctor(const VulkanTypedHandle &handle_, const AccessContext &context_, SyncStageAccessIndex usage_,
496 const ResourceUsageTag &tag_)
John Zulauf5f13a792020-03-10 07:31:21 -0600497 : handle(handle_), context(context_), usage(usage_), tag(tag_) {}
498 const VulkanTypedHandle handle;
John Zulauf540266b2020-04-06 18:54:53 -0600499 const AccessContext &context;
John Zulauf9cb530d2019-09-30 14:14:10 -0600500 SyncStageAccessIndex usage;
501 const ResourceUsageTag &tag;
502};
503
504struct ApplyMemoryAccessBarrierFunctor {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700505 using Iterator = ResourceAccessRangeMap::iterator;
506 inline Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const { return pos; }
John Zulauf9cb530d2019-09-30 14:14:10 -0600507
John Zulauf5c5e88d2019-12-26 11:22:02 -0700508 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -0600509 auto &access_state = pos->second;
John Zulauf36bcf6a2020-02-03 15:12:52 -0700510 access_state.ApplyMemoryAccessBarrier(src_exec_scope, src_access_scope, dst_exec_scope, dst_access_scope);
John Zulauf9cb530d2019-09-30 14:14:10 -0600511 return pos;
512 }
513
John Zulauf36bcf6a2020-02-03 15:12:52 -0700514 ApplyMemoryAccessBarrierFunctor(VkPipelineStageFlags src_exec_scope_, SyncStageAccessFlags src_access_scope_,
515 VkPipelineStageFlags dst_exec_scope_, SyncStageAccessFlags dst_access_scope_)
516 : src_exec_scope(src_exec_scope_),
517 src_access_scope(src_access_scope_),
518 dst_exec_scope(dst_exec_scope_),
519 dst_access_scope(dst_access_scope_) {}
John Zulauf9cb530d2019-09-30 14:14:10 -0600520
John Zulauf36bcf6a2020-02-03 15:12:52 -0700521 VkPipelineStageFlags src_exec_scope;
522 SyncStageAccessFlags src_access_scope;
523 VkPipelineStageFlags dst_exec_scope;
524 SyncStageAccessFlags dst_access_scope;
John Zulauf9cb530d2019-09-30 14:14:10 -0600525};
526
527struct ApplyGlobalBarrierFunctor {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700528 using Iterator = ResourceAccessRangeMap::iterator;
529 inline Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const { return pos; }
John Zulauf9cb530d2019-09-30 14:14:10 -0600530
John Zulauf5c5e88d2019-12-26 11:22:02 -0700531 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -0600532 auto &access_state = pos->second;
John Zulauf36bcf6a2020-02-03 15:12:52 -0700533 access_state.ApplyExecutionBarrier(src_exec_scope, dst_exec_scope);
John Zulauf9cb530d2019-09-30 14:14:10 -0600534
535 for (const auto &functor : barrier_functor) {
536 functor(accesses, pos);
537 }
538 return pos;
539 }
540
John Zulauf36bcf6a2020-02-03 15:12:52 -0700541 ApplyGlobalBarrierFunctor(VkPipelineStageFlags src_exec_scope, VkPipelineStageFlags dst_exec_scope,
542 SyncStageAccessFlags src_stage_accesses, SyncStageAccessFlags dst_stage_accesses,
John Zulauf9cb530d2019-09-30 14:14:10 -0600543 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers)
John Zulauf36bcf6a2020-02-03 15:12:52 -0700544 : src_exec_scope(src_exec_scope), dst_exec_scope(dst_exec_scope) {
John Zulauf9cb530d2019-09-30 14:14:10 -0600545 // Don't want to create this per tracked item, but don't want to loop through all tracked items per barrier...
546 barrier_functor.reserve(memoryBarrierCount);
547 for (uint32_t barrier_index = 0; barrier_index < memoryBarrierCount; barrier_index++) {
548 const auto &barrier = pMemoryBarriers[barrier_index];
John Zulauf36bcf6a2020-02-03 15:12:52 -0700549 barrier_functor.emplace_back(src_exec_scope, SyncStageAccess::AccessScope(src_stage_accesses, barrier.srcAccessMask),
550 dst_exec_scope, SyncStageAccess::AccessScope(dst_stage_accesses, barrier.dstAccessMask));
John Zulauf9cb530d2019-09-30 14:14:10 -0600551 }
552 }
553
John Zulauf36bcf6a2020-02-03 15:12:52 -0700554 const VkPipelineStageFlags src_exec_scope;
555 const VkPipelineStageFlags dst_exec_scope;
John Zulauf9cb530d2019-09-30 14:14:10 -0600556 std::vector<ApplyMemoryAccessBarrierFunctor> barrier_functor;
557};
558
John Zulauf540266b2020-04-06 18:54:53 -0600559void AccessContext::UpdateAccessState(const VulkanTypedHandle &handle, SyncStageAccessIndex current_usage,
560 const ResourceAccessRange &range, const ResourceUsageTag &tag) {
John Zulauf5f13a792020-03-10 07:31:21 -0600561 UpdateMemoryAccessStateFunctor action(handle, *this, current_usage, tag);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600562 auto *tracker = GetAccessTracker(handle);
563 assert(tracker);
John Zulauf5f13a792020-03-10 07:31:21 -0600564 UpdateMemoryAccessState(&tracker->GetCurrentAccessMap(), range, action);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600565}
566
John Zulauf540266b2020-04-06 18:54:53 -0600567void AccessContext::UpdateAccessState(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
568 const VkImageSubresourceLayers &subresource, const VkOffset3D &offset,
569 const VkExtent3D &extent, const ResourceUsageTag &tag) {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700570 // TODO: replace the encoder/generator with offset3D aware versions
571 VkImageSubresourceRange subresource_range = {subresource.aspectMask, subresource.mipLevel, 1, subresource.baseArrayLayer,
572 subresource.layerCount};
locke-lunarg5f7d3c62020-04-07 00:10:39 -0600573 subresource_adapter::ImageRangeGenerator range_gen(image.fragment_encoder, subresource_range, offset, extent);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600574 const VulkanTypedHandle handle(image.image, kVulkanObjectTypeImage);
575 auto *tracker = GetAccessTracker(handle);
576 assert(tracker);
John Zulauf5f13a792020-03-10 07:31:21 -0600577
578 UpdateMemoryAccessStateFunctor action(handle, *this, current_usage, tag);
579 for (; range_gen->non_empty(); ++range_gen) {
580 UpdateMemoryAccessState(&tracker->GetCurrentAccessMap(), *range_gen, action);
581 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600582}
583
John Zulauf540266b2020-04-06 18:54:53 -0600584template <typename Action>
585void AccessContext::UpdateMemoryAccess(const BUFFER_STATE &buffer, const ResourceAccessRange &range, const Action action) {
586 auto *tracker = GetAccessTracker(VulkanTypedHandle(buffer.binding.mem_state->mem, kVulkanObjectTypeDeviceMemory));
587 if (!tracker) return;
588 UpdateMemoryAccessState(&tracker->GetCurrentAccessMap(), range, action);
589}
590
591template <typename Action>
592void AccessContext::UpdateMemoryAccess(const IMAGE_STATE &image, const VkImageSubresourceRange &subresource_range,
593 const Action action) {
594 auto tracker = GetAccessTrackerNoInsert(VulkanTypedHandle(image.image, kVulkanObjectTypeImage));
595 if (!tracker) return;
596 auto *accesses = &tracker->GetCurrentAccessMap();
597
locke-lunarg5f7d3c62020-04-07 00:10:39 -0600598 subresource_adapter::ImageRangeGenerator range_gen(image.fragment_encoder, subresource_range, {0, 0, 0},
599 image.createInfo.extent);
John Zulauf540266b2020-04-06 18:54:53 -0600600
601 for (; range_gen->non_empty(); ++range_gen) {
602 UpdateMemoryAccessState(accesses, *range_gen, action);
603 }
604}
605
606template <typename Action>
607void AccessContext::ApplyGlobalBarriers(const Action &barrier_action) {
608 // Note: Barriers do *not* cross context boundaries, applying to accessess within.... (at least for renderpass subpasses)
609 for (auto &handle_tracker_pair : GetAccessTrackerMap()) {
610 UpdateMemoryAccessState(&handle_tracker_pair.second.GetCurrentAccessMap(), full_range, barrier_action);
611 }
612}
613
614void AccessContext::ResolveChildContexts(const std::vector<AccessContext> &contexts) {
615 std::unordered_set<VulkanTypedHandle> resolved;
616 for (uint32_t subpass_index = 0; subpass_index < contexts.size(); subpass_index++) {
617 auto &context = contexts[subpass_index];
618 for (const auto &tracker_pair : context.GetAccessTrackerMap()) {
619 if (tracker_pair.second.GetCurrentAccessMap().size() == 0) continue;
620 auto insert_pair = resolved.insert(tracker_pair.first);
621 if (insert_pair.second) { // only create the resolve map for this handle if we haven't seen it before
622 // This is the first time we've seen this handle accessed, resolve this for all subsequent subpasses
623 ResourceAccessRangeMap resolve_map;
624 auto resolve_index = static_cast<uint32_t>(contexts.size());
625 while (resolve_index > subpass_index) {
626 resolve_index--;
627 const auto &from_context = contexts[resolve_index];
628 from_context.ResolveTrackBack(tracker_pair.first, full_range, from_context.GetDstExternalTrackBack(),
629 &resolve_map, nullptr, false);
630 }
631 // Given that all DAG paths lead back to the src_external_ (if only a default one) we can just overwrite.
632 sparse_container::splice(&GetAccessTracker(tracker_pair.first)->GetCurrentAccessMap(), resolve_map,
633 sparse_container::value_precedence::prefer_source);
634 // TODO: This might be a place to consolidate the map
635 }
636 }
637 }
638}
639
John Zulauf3d84f1b2020-03-09 13:33:25 -0600640SyncBarrier::SyncBarrier(VkQueueFlags queue_flags, const VkSubpassDependency2 &barrier) {
641 const auto src_stage_mask = ExpandPipelineStages(queue_flags, barrier.srcStageMask);
642 src_exec_scope = WithEarlierPipelineStages(src_stage_mask);
643 src_access_scope = SyncStageAccess::AccessScope(src_stage_mask, barrier.srcAccessMask);
644 const auto dst_stage_mask = ExpandPipelineStages(queue_flags, barrier.dstStageMask);
645 dst_exec_scope = WithLaterPipelineStages(dst_stage_mask);
646 dst_access_scope = SyncStageAccess::AccessScope(dst_stage_mask, barrier.dstAccessMask);
647}
648
649void ResourceAccessState::ApplyBarrier(const SyncBarrier &barrier) {
650 ApplyExecutionBarrier(barrier.src_exec_scope, barrier.dst_exec_scope);
651 ApplyMemoryAccessBarrier(barrier.src_exec_scope, barrier.src_access_scope, barrier.dst_exec_scope, barrier.dst_access_scope);
652}
653
654ResourceAccessState ResourceAccessState::ApplyBarrierStack(const ResourceAccessState &that, const SyncBarrierStack &barrier_stack) {
655 ResourceAccessState copy = that;
656 for (auto barrier = barrier_stack.begin(); barrier != barrier_stack.end(); ++barrier) {
657 assert(*barrier);
658 copy.ApplyBarrier(*(*barrier));
659 }
660 return copy;
661}
662
663HazardResult ResourceAccessState::DetectHazard(SyncStageAccessIndex usage_index, SyncBarrierStack *barrier_stack) const {
664 if (barrier_stack) {
665 return ApplyBarrierStack(*this, *barrier_stack).DetectHazard(usage_index);
666 }
667 return DetectHazard(usage_index);
668}
669
John Zulauf9cb530d2019-09-30 14:14:10 -0600670HazardResult ResourceAccessState::DetectHazard(SyncStageAccessIndex usage_index) const {
671 HazardResult hazard;
672 auto usage = FlagBit(usage_index);
673 if (IsRead(usage)) {
674 if (IsWriteHazard(usage)) {
675 hazard.Set(READ_AFTER_WRITE, write_tag);
676 }
677 } else {
678 // Assume write
679 // TODO determine what to do with READ-WRITE usage states if any
680 // Write-After-Write check -- if we have a previous write to test against
681 if (last_write && IsWriteHazard(usage)) {
682 hazard.Set(WRITE_AFTER_WRITE, write_tag);
683 } else {
684 // Only look for casus belli for WAR
685 const auto usage_stage = PipelineStageBit(usage_index);
686 for (uint32_t read_index = 0; read_index < last_read_count; read_index++) {
687 if (IsReadHazard(usage_stage, last_reads[read_index])) {
688 hazard.Set(WRITE_AFTER_READ, last_reads[read_index].tag);
689 break;
690 }
691 }
692 }
693 }
694 return hazard;
695}
696
John Zulauf2f952d22020-02-10 11:34:51 -0700697// Asynchronous Hazards occur between subpasses with no connection through the DAG
John Zulauf3d84f1b2020-03-09 13:33:25 -0600698HazardResult ResourceAccessState::DetectAsyncHazard(SyncStageAccessIndex usage_index) const {
John Zulauf2f952d22020-02-10 11:34:51 -0700699 HazardResult hazard;
700 auto usage = FlagBit(usage_index);
701 if (IsRead(usage)) {
702 if (last_write != 0) {
703 hazard.Set(READ_RACING_WRITE, write_tag);
704 }
705 } else {
706 if (last_write != 0) {
707 hazard.Set(WRITE_RACING_WRITE, write_tag);
708 } else if (last_read_count > 0) {
709 hazard.Set(WRITE_RACING_READ, last_reads[0].tag);
710 }
711 }
712 return hazard;
713}
714
John Zulauf36bcf6a2020-02-03 15:12:52 -0700715HazardResult ResourceAccessState::DetectBarrierHazard(SyncStageAccessIndex usage_index, VkPipelineStageFlags src_exec_scope,
John Zulauf3d84f1b2020-03-09 13:33:25 -0600716 SyncStageAccessFlags src_access_scope,
717 SyncBarrierStack *barrier_stack) const {
718 if (barrier_stack) {
719 return ApplyBarrierStack(*this, *barrier_stack).DetectBarrierHazard(usage_index, src_exec_scope, src_access_scope);
720 }
721 return DetectBarrierHazard(usage_index, src_exec_scope, src_access_scope);
722}
723
724HazardResult ResourceAccessState::DetectBarrierHazard(SyncStageAccessIndex usage_index, VkPipelineStageFlags src_exec_scope,
John Zulauf36bcf6a2020-02-03 15:12:52 -0700725 SyncStageAccessFlags src_access_scope) const {
John Zulauf0cb5be22020-01-23 12:18:22 -0700726 // Only supporting image layout transitions for now
727 assert(usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION);
728 HazardResult hazard;
729 if (last_write) {
730 // If the previous write is *not* in the 1st access scope
731 // *AND* the current barrier is not in the dependency chain
732 // *AND* the there is no prior memory barrier for the previous write in the dependency chain
733 // then the barrier access is unsafe (R/W after W)
John Zulauf36bcf6a2020-02-03 15:12:52 -0700734 if (((last_write & src_access_scope) == 0) && ((src_exec_scope & write_dependency_chain) == 0) && (write_barriers == 0)) {
John Zulauf0cb5be22020-01-23 12:18:22 -0700735 // TODO: Do we need a difference hazard name for this?
736 hazard.Set(WRITE_AFTER_WRITE, write_tag);
737 }
738 } else {
739 // Look at the reads
740 for (uint32_t read_index = 0; read_index < last_read_count; read_index++) {
John Zulauf36bcf6a2020-02-03 15:12:52 -0700741 const auto &read_access = last_reads[read_index];
742 // If the read stage is not in the src sync sync
743 // *AND* not execution chained with an existing sync barrier (that's the or)
744 // then the barrier access is unsafe (R/W after R)
745 if ((src_exec_scope & (read_access.stage | read_access.barriers)) == 0) {
746 hazard.Set(WRITE_AFTER_READ, read_access.tag);
John Zulauf0cb5be22020-01-23 12:18:22 -0700747 break;
748 }
749 }
750 }
751 return hazard;
752}
753
John Zulauf5f13a792020-03-10 07:31:21 -0600754// The logic behind resolves is the same as update, we assume that earlier hazards have be reported, and that no
755// tranistive hazard can exists with a hazard between the earlier operations. Yes, an early hazard can mask that another
756// exists, but if you fix *that* hazard it either fixes or unmasks the subsequent ones.
757void ResourceAccessState::Resolve(const ResourceAccessState &other) {
758 if (write_tag.IsBefore(other.write_tag)) {
759 // If this is a later write, we've reported any exsiting hazard, and we can just overwrite as the more recent operation
760 *this = other;
761 } else if (!other.write_tag.IsBefore(write_tag)) {
762 // This is the *equals* case for write operations, we merged the write barriers and the read state (but without the
763 // dependency chaining logic or any stage expansion)
764 write_barriers |= other.write_barriers;
765
766 // Merge that read states
767 for (uint32_t other_read_index = 0; other_read_index < other.last_read_count; other_read_index++) {
768 auto &other_read = other.last_reads[other_read_index];
769 if (last_read_stages & other_read.stage) {
770 // Merge in the barriers for read stages that exist in *both* this and other
771 // TODO: This is N^2 with stages... perhaps the ReadStates should be by stage index.
772 for (uint32_t my_read_index = 0; my_read_index < last_read_count; my_read_index++) {
773 auto &my_read = last_reads[my_read_index];
774 if (other_read.stage == my_read.stage) {
775 if (my_read.tag.IsBefore(other_read.tag)) {
776 my_read.tag = other_read.tag;
777 }
778 my_read.barriers |= other_read.barriers;
779 break;
780 }
781 }
782 } else {
783 // The other read stage doesn't exist in this, so add it.
784 last_reads[last_read_count] = other_read;
785 last_read_count++;
786 last_read_stages |= other_read.stage;
787 }
788 }
789 } // the else clause would be that other write is before this write... in which case we supercede the other state and ignore
790 // it.
791}
792
John Zulauf9cb530d2019-09-30 14:14:10 -0600793void ResourceAccessState::Update(SyncStageAccessIndex usage_index, const ResourceUsageTag &tag) {
794 // Move this logic in the ResourceStateTracker as methods, thereof (or we'll repeat it for every flavor of resource...
795 const auto usage_bit = FlagBit(usage_index);
796 if (IsRead(usage_index)) {
797 // Mulitple outstanding reads may be of interest and do dependency chains independently
798 // However, for purposes of barrier tracking, only one read per pipeline stage matters
799 const auto usage_stage = PipelineStageBit(usage_index);
800 if (usage_stage & last_read_stages) {
801 for (uint32_t read_index = 0; read_index < last_read_count; read_index++) {
802 ReadState &access = last_reads[read_index];
803 if (access.stage == usage_stage) {
804 access.barriers = 0;
805 access.tag = tag;
806 break;
807 }
808 }
809 } else {
810 // We don't have this stage in the list yet...
811 assert(last_read_count < last_reads.size());
812 ReadState &access = last_reads[last_read_count++];
813 access.stage = usage_stage;
814 access.barriers = 0;
815 access.tag = tag;
816 last_read_stages |= usage_stage;
817 }
818 } else {
819 // Assume write
820 // TODO determine what to do with READ-WRITE operations if any
821 // Clobber last read and both sets of barriers... because all we have is DANGER, DANGER, WILL ROBINSON!!!
822 // if the last_reads/last_write were unsafe, we've reported them,
823 // in either case the prior access is irrelevant, we can overwrite them as *this* write is now after them
824 last_read_count = 0;
825 last_read_stages = 0;
826
827 write_barriers = 0;
828 write_dependency_chain = 0;
829 write_tag = tag;
830 last_write = usage_bit;
831 }
832}
John Zulauf5f13a792020-03-10 07:31:21 -0600833
John Zulauf9cb530d2019-09-30 14:14:10 -0600834void ResourceAccessState::ApplyExecutionBarrier(VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask) {
835 // Execution Barriers only protect read operations
836 for (uint32_t read_index = 0; read_index < last_read_count; read_index++) {
837 ReadState &access = last_reads[read_index];
838 // The | implements the "dependency chain" logic for this access, as the barriers field stores the second sync scope
839 if (srcStageMask & (access.stage | access.barriers)) {
840 access.barriers |= dstStageMask;
841 }
842 }
843 if (write_dependency_chain & srcStageMask) write_dependency_chain |= dstStageMask;
844}
845
John Zulauf36bcf6a2020-02-03 15:12:52 -0700846void ResourceAccessState::ApplyMemoryAccessBarrier(VkPipelineStageFlags src_exec_scope, SyncStageAccessFlags src_access_scope,
847 VkPipelineStageFlags dst_exec_scope, SyncStageAccessFlags dst_access_scope) {
John Zulauf9cb530d2019-09-30 14:14:10 -0600848 // Assuming we've applied the execution side of this barrier, we update just the write
849 // The || implements the "dependency chain" logic for this barrier
John Zulauf36bcf6a2020-02-03 15:12:52 -0700850 if ((src_access_scope & last_write) || (write_dependency_chain & src_exec_scope)) {
851 write_barriers |= dst_access_scope;
852 write_dependency_chain |= dst_exec_scope;
John Zulauf9cb530d2019-09-30 14:14:10 -0600853 }
854}
855
856void SyncValidator::ResetCommandBuffer(VkCommandBuffer command_buffer) {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600857 auto *access_context = GetAccessContextNoInsert(command_buffer);
858 if (access_context) {
859 access_context->Reset();
John Zulauf9cb530d2019-09-30 14:14:10 -0600860 }
861}
862
John Zulauf540266b2020-04-06 18:54:53 -0600863void SyncValidator::ApplyGlobalBarriers(AccessContext *context, VkPipelineStageFlags srcStageMask,
John Zulauf36bcf6a2020-02-03 15:12:52 -0700864 VkPipelineStageFlags dstStageMask, SyncStageAccessFlags src_access_scope,
865 SyncStageAccessFlags dst_access_scope, uint32_t memoryBarrierCount,
John Zulauf9cb530d2019-09-30 14:14:10 -0600866 const VkMemoryBarrier *pMemoryBarriers) {
867 // TODO: Implement this better (maybe some delayed/on-demand integration).
John Zulauf36bcf6a2020-02-03 15:12:52 -0700868 ApplyGlobalBarrierFunctor barriers_functor(srcStageMask, dstStageMask, src_access_scope, dst_access_scope, memoryBarrierCount,
John Zulauf9cb530d2019-09-30 14:14:10 -0600869 pMemoryBarriers);
John Zulauf540266b2020-04-06 18:54:53 -0600870 context->ApplyGlobalBarriers(barriers_functor);
John Zulauf9cb530d2019-09-30 14:14:10 -0600871}
872
John Zulauf540266b2020-04-06 18:54:53 -0600873void SyncValidator::ApplyBufferBarriers(AccessContext *context, VkPipelineStageFlags src_exec_scope,
John Zulauf36bcf6a2020-02-03 15:12:52 -0700874 SyncStageAccessFlags src_stage_accesses, VkPipelineStageFlags dst_exec_scope,
875 SyncStageAccessFlags dst_stage_accesses, uint32_t barrier_count,
John Zulauf9cb530d2019-09-30 14:14:10 -0600876 const VkBufferMemoryBarrier *barriers) {
877 // TODO Implement this at subresource/memory_range accuracy
878 for (uint32_t index = 0; index < barrier_count; index++) {
879 const auto &barrier = barriers[index];
880 const auto *buffer = Get<BUFFER_STATE>(barrier.buffer);
881 if (!buffer) continue;
John Zulauf5c5e88d2019-12-26 11:22:02 -0700882 ResourceAccessRange range = MakeMemoryAccessRange(*buffer, barrier.offset, barrier.size);
John Zulauf540266b2020-04-06 18:54:53 -0600883 const auto src_access_scope = AccessScope(src_stage_accesses, barrier.srcAccessMask);
884 const auto dst_access_scope = AccessScope(dst_stage_accesses, barrier.dstAccessMask);
885 const ApplyMemoryAccessBarrierFunctor update_action(src_exec_scope, src_access_scope, dst_exec_scope, dst_access_scope);
886 context->UpdateMemoryAccess(*buffer, range, update_action);
John Zulauf9cb530d2019-09-30 14:14:10 -0600887 }
888}
889
John Zulauf540266b2020-04-06 18:54:53 -0600890void SyncValidator::ApplyImageBarriers(AccessContext *context, VkPipelineStageFlags src_exec_scope,
891 SyncStageAccessFlags src_stage_accesses, VkPipelineStageFlags dst_exec_scope,
892 SyncStageAccessFlags dst_stage_accesses, uint32_t barrier_count,
893 const VkImageMemoryBarrier *barriers) {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700894 for (uint32_t index = 0; index < barrier_count; index++) {
895 const auto &barrier = barriers[index];
896 const auto *image = Get<IMAGE_STATE>(barrier.image);
897 if (!image) continue;
locke-lunarg296a3c92020-03-25 01:04:29 -0600898 const ApplyMemoryAccessBarrierFunctor barrier_action(src_exec_scope, AccessScope(src_stage_accesses, barrier.srcAccessMask),
899 dst_exec_scope,
900 AccessScope(dst_stage_accesses, barrier.dstAccessMask));
John Zulauf540266b2020-04-06 18:54:53 -0600901
902 auto subresource_range = NormalizeSubresourceRange(image->createInfo, barrier.subresourceRange);
903 context->UpdateMemoryAccess(*image, subresource_range, barrier_action);
John Zulauf9cb530d2019-09-30 14:14:10 -0600904 }
John Zulauf9cb530d2019-09-30 14:14:10 -0600905}
906
907bool SyncValidator::PreCallValidateCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
908 uint32_t regionCount, const VkBufferCopy *pRegions) const {
909 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600910 const auto *cb_context = GetAccessContext(commandBuffer);
911 assert(cb_context);
912 if (!cb_context) return skip;
913 const auto *context = cb_context->GetCurrentAccessContext();
John Zulauf9cb530d2019-09-30 14:14:10 -0600914
John Zulauf3d84f1b2020-03-09 13:33:25 -0600915 // If we have no previous accesses, we have no hazards
916 // TODO: make this sub-resource capable
917 // TODO: make this general, and stuff it into templates/utility functions
918 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
919 const auto src_mem = (src_buffer && !src_buffer->sparse) ? src_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
920 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
921 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
922
923 for (uint32_t region = 0; region < regionCount; region++) {
924 const auto &copy_region = pRegions[region];
925 if (src_mem != VK_NULL_HANDLE) {
926 ResourceAccessRange src_range = MakeMemoryAccessRange(*src_buffer, copy_region.srcOffset, copy_region.size);
927 auto hazard = context->DetectHazard(VulkanTypedHandle(src_mem, kVulkanObjectTypeDeviceMemory),
928 SYNC_TRANSFER_TRANSFER_READ, src_range);
929 if (hazard.hazard) {
930 // TODO -- add tag information to log msg when useful.
locke-lunarga0003652020-03-10 11:38:51 -0600931 skip |= LogError(srcBuffer, string_SyncHazardVUID(hazard.hazard),
932 "vkCmdCopyBuffer: Hazard %s for srcBuffer %s, region %" PRIu32, string_SyncHazard(hazard.hazard),
933 report_data->FormatHandle(srcBuffer).c_str(), region);
John Zulauf9cb530d2019-09-30 14:14:10 -0600934 }
John Zulauf9cb530d2019-09-30 14:14:10 -0600935 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600936 if ((dst_mem != VK_NULL_HANDLE) && !skip) {
937 ResourceAccessRange dst_range = MakeMemoryAccessRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
938 auto hazard = context->DetectHazard(VulkanTypedHandle(dst_mem, kVulkanObjectTypeDeviceMemory),
939 SYNC_TRANSFER_TRANSFER_WRITE, dst_range);
940 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -0600941 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
942 "vkCmdCopyBuffer: Hazard %s for dstBuffer %s, region %" PRIu32, string_SyncHazard(hazard.hazard),
943 report_data->FormatHandle(dstBuffer).c_str(), region);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600944 }
945 }
946 if (skip) break;
John Zulauf9cb530d2019-09-30 14:14:10 -0600947 }
948 return skip;
949}
950
951void SyncValidator::PreCallRecordCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
952 uint32_t regionCount, const VkBufferCopy *pRegions) {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600953 auto *cb_context = GetAccessContext(commandBuffer);
954 assert(cb_context);
955 auto *context = cb_context->GetCurrentAccessContext();
956
John Zulauf9cb530d2019-09-30 14:14:10 -0600957 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600958 const auto src_mem = (src_buffer && !src_buffer->sparse) ? src_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
959 const VulkanTypedHandle src_handle(src_mem, kVulkanObjectTypeDeviceMemory);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600960
John Zulauf9cb530d2019-09-30 14:14:10 -0600961 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600962 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
963 const VulkanTypedHandle dst_handle(dst_mem, kVulkanObjectTypeDeviceMemory);
John Zulauf9cb530d2019-09-30 14:14:10 -0600964
965 for (uint32_t region = 0; region < regionCount; region++) {
966 const auto &copy_region = pRegions[region];
John Zulauf3d84f1b2020-03-09 13:33:25 -0600967 if (src_mem) {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700968 ResourceAccessRange src_range = MakeMemoryAccessRange(*src_buffer, copy_region.srcOffset, copy_region.size);
John Zulauf5f13a792020-03-10 07:31:21 -0600969 context->UpdateAccessState(src_handle, SYNC_TRANSFER_TRANSFER_READ, src_range, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -0600970 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600971 if (dst_mem) {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700972 ResourceAccessRange dst_range = MakeMemoryAccessRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
John Zulauf5f13a792020-03-10 07:31:21 -0600973 context->UpdateAccessState(dst_handle, SYNC_TRANSFER_TRANSFER_WRITE, dst_range, tag);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700974 }
975 }
976}
977
978bool SyncValidator::PreCallValidateCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
979 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
980 const VkImageCopy *pRegions) const {
981 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600982 const auto *cb_access_context = GetAccessContext(commandBuffer);
983 assert(cb_access_context);
984 if (!cb_access_context) return skip;
John Zulauf5c5e88d2019-12-26 11:22:02 -0700985
John Zulauf3d84f1b2020-03-09 13:33:25 -0600986 const auto *context = cb_access_context->GetCurrentAccessContext();
987 assert(context);
988 if (!context) return skip;
989
990 const auto *src_image = Get<IMAGE_STATE>(srcImage);
991 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600992 for (uint32_t region = 0; region < regionCount; region++) {
993 const auto &copy_region = pRegions[region];
994 if (src_image) {
John Zulauf540266b2020-04-06 18:54:53 -0600995 auto hazard = context->DetectHazard(*src_image, SYNC_TRANSFER_TRANSFER_READ, copy_region.srcSubresource,
John Zulauf3d84f1b2020-03-09 13:33:25 -0600996 copy_region.srcOffset, copy_region.extent);
997 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -0600998 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
999 "vkCmdCopyImage: Hazard %s for srcImage %s, region %" PRIu32, string_SyncHazard(hazard.hazard),
1000 report_data->FormatHandle(srcImage).c_str(), region);
John Zulauf5c5e88d2019-12-26 11:22:02 -07001001 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06001002 }
1003
1004 if (dst_image) {
locke-lunarg1df1f882020-03-02 16:42:08 -07001005 VkExtent3D dst_copy_extent =
1006 GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
John Zulauf540266b2020-04-06 18:54:53 -06001007 auto hazard = context->DetectHazard(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, copy_region.dstSubresource,
locke-lunarg1df1f882020-03-02 16:42:08 -07001008 copy_region.dstOffset, dst_copy_extent);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001009 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06001010 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
1011 "vkCmdCopyImage: Hazard %s for dstImage %s, region %" PRIu32, string_SyncHazard(hazard.hazard),
1012 report_data->FormatHandle(dstImage).c_str(), region);
John Zulauf5c5e88d2019-12-26 11:22:02 -07001013 }
locke-lunarg1dbbb9e2020-02-28 22:43:53 -07001014 if (skip) break;
John Zulauf5c5e88d2019-12-26 11:22:02 -07001015 }
1016 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06001017
John Zulauf5c5e88d2019-12-26 11:22:02 -07001018 return skip;
1019}
1020
1021void SyncValidator::PreCallRecordCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
1022 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
1023 const VkImageCopy *pRegions) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06001024 auto *cb_access_context = GetAccessContext(commandBuffer);
1025 assert(cb_access_context);
1026 auto *context = cb_access_context->GetCurrentAccessContext();
1027 assert(context);
1028
John Zulauf5c5e88d2019-12-26 11:22:02 -07001029 auto *src_image = Get<IMAGE_STATE>(srcImage);
John Zulauf5c5e88d2019-12-26 11:22:02 -07001030 auto *dst_image = Get<IMAGE_STATE>(dstImage);
John Zulauf5c5e88d2019-12-26 11:22:02 -07001031
1032 for (uint32_t region = 0; region < regionCount; region++) {
1033 const auto &copy_region = pRegions[region];
John Zulauf3d84f1b2020-03-09 13:33:25 -06001034 if (src_image) {
John Zulauf540266b2020-04-06 18:54:53 -06001035 context->UpdateAccessState(*src_image, SYNC_TRANSFER_TRANSFER_READ, copy_region.srcSubresource, copy_region.srcOffset,
1036 copy_region.extent, tag);
John Zulauf5c5e88d2019-12-26 11:22:02 -07001037 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06001038 if (dst_image) {
locke-lunarg1df1f882020-03-02 16:42:08 -07001039 VkExtent3D dst_copy_extent =
1040 GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
John Zulauf540266b2020-04-06 18:54:53 -06001041 context->UpdateAccessState(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, copy_region.dstSubresource, copy_region.dstOffset,
1042 dst_copy_extent, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06001043 }
1044 }
1045}
1046
1047bool SyncValidator::PreCallValidateCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
1048 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
1049 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
1050 uint32_t bufferMemoryBarrierCount,
1051 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
1052 uint32_t imageMemoryBarrierCount,
1053 const VkImageMemoryBarrier *pImageMemoryBarriers) const {
1054 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -06001055 const auto *cb_access_context = GetAccessContext(commandBuffer);
1056 assert(cb_access_context);
1057 if (!cb_access_context) return skip;
John Zulauf0cb5be22020-01-23 12:18:22 -07001058
John Zulauf3d84f1b2020-03-09 13:33:25 -06001059 const auto *context = cb_access_context->GetCurrentAccessContext();
1060 assert(context);
1061 if (!context) return skip;
John Zulauf0cb5be22020-01-23 12:18:22 -07001062
John Zulauf3d84f1b2020-03-09 13:33:25 -06001063 const auto src_stage_mask = ExpandPipelineStages(cb_access_context->GetQueueFlags(), srcStageMask);
John Zulauf36bcf6a2020-02-03 15:12:52 -07001064 const auto src_exec_scope = WithEarlierPipelineStages(src_stage_mask);
1065 auto src_stage_accesses = AccessScopeByStage(src_stage_mask);
John Zulauf0cb5be22020-01-23 12:18:22 -07001066 // Validate Image Layout transitions
1067 for (uint32_t index = 0; index < imageMemoryBarrierCount; index++) {
1068 const auto &barrier = pImageMemoryBarriers[index];
1069 if (barrier.newLayout == barrier.oldLayout) continue; // Only interested in layout transitions at this point.
1070 const auto *image_state = Get<IMAGE_STATE>(barrier.image);
1071 if (!image_state) continue;
John Zulauf540266b2020-04-06 18:54:53 -06001072 const auto hazard = DetectImageBarrierHazard(*context, *image_state, src_exec_scope, src_stage_accesses, barrier);
John Zulauf0cb5be22020-01-23 12:18:22 -07001073 if (hazard.hazard) {
1074 // TODO -- add tag information to log msg when useful.
locke-lunarga0003652020-03-10 11:38:51 -06001075 skip |= LogError(barrier.image, string_SyncHazardVUID(hazard.hazard),
1076 "vkCmdPipelineBarrier: Hazard %s for image barrier %" PRIu32 " %s", string_SyncHazard(hazard.hazard),
1077 index, report_data->FormatHandle(barrier.image).c_str());
John Zulauf0cb5be22020-01-23 12:18:22 -07001078 }
1079 }
John Zulauf9cb530d2019-09-30 14:14:10 -06001080
1081 return skip;
1082}
1083
1084void SyncValidator::PreCallRecordCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
1085 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
1086 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
1087 uint32_t bufferMemoryBarrierCount,
1088 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
1089 uint32_t imageMemoryBarrierCount,
1090 const VkImageMemoryBarrier *pImageMemoryBarriers) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06001091 auto *cb_access_context = GetAccessContext(commandBuffer);
1092 assert(cb_access_context);
1093 if (!cb_access_context) return;
1094 auto access_context = cb_access_context->GetCurrentAccessContext();
1095 assert(access_context);
1096 if (!access_context) return;
John Zulauf9cb530d2019-09-30 14:14:10 -06001097
John Zulauf3d84f1b2020-03-09 13:33:25 -06001098 const auto src_stage_mask = ExpandPipelineStages(cb_access_context->GetQueueFlags(), srcStageMask);
John Zulauf36bcf6a2020-02-03 15:12:52 -07001099 auto src_stage_accesses = AccessScopeByStage(src_stage_mask);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001100 const auto dst_stage_mask = ExpandPipelineStages(cb_access_context->GetQueueFlags(), dstStageMask);
John Zulauf36bcf6a2020-02-03 15:12:52 -07001101 auto dst_stage_accesses = AccessScopeByStage(dst_stage_mask);
1102 const auto src_exec_scope = WithEarlierPipelineStages(src_stage_mask);
1103 const auto dst_exec_scope = WithLaterPipelineStages(dst_stage_mask);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001104 ApplyBufferBarriers(access_context, src_exec_scope, src_stage_accesses, dst_exec_scope, dst_stage_accesses,
1105 bufferMemoryBarrierCount, pBufferMemoryBarriers);
John Zulauf540266b2020-04-06 18:54:53 -06001106 ApplyImageBarriers(access_context, src_exec_scope, src_stage_accesses, dst_exec_scope, dst_stage_accesses,
John Zulauf3d84f1b2020-03-09 13:33:25 -06001107 imageMemoryBarrierCount, pImageMemoryBarriers);
John Zulauf9cb530d2019-09-30 14:14:10 -06001108
1109 // Apply these last in-case there operation is a superset of the other two and would clean them up...
John Zulauf3d84f1b2020-03-09 13:33:25 -06001110 ApplyGlobalBarriers(access_context, src_exec_scope, dst_exec_scope, src_stage_accesses, dst_stage_accesses, memoryBarrierCount,
John Zulauf0cb5be22020-01-23 12:18:22 -07001111 pMemoryBarriers);
John Zulauf9cb530d2019-09-30 14:14:10 -06001112}
1113
1114void SyncValidator::PostCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
1115 const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, VkResult result) {
1116 // The state tracker sets up the device state
1117 StateTracker::PostCallRecordCreateDevice(gpu, pCreateInfo, pAllocator, pDevice, result);
1118
John Zulauf5f13a792020-03-10 07:31:21 -06001119 // Add the callback hooks for the functions that are either broadly or deeply used and that the ValidationStateTracker
1120 // refactor would be messier without.
John Zulauf9cb530d2019-09-30 14:14:10 -06001121 // TODO: Find a good way to do this hooklessly.
1122 ValidationObject *device_object = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
1123 ValidationObject *validation_data = GetValidationObject(device_object->object_dispatch, LayerObjectTypeSyncValidation);
1124 SyncValidator *sync_device_state = static_cast<SyncValidator *>(validation_data);
1125
1126 sync_device_state->SetCommandBufferResetCallback(
1127 [sync_device_state](VkCommandBuffer command_buffer) -> void { sync_device_state->ResetCommandBuffer(command_buffer); });
1128}
John Zulauf3d84f1b2020-03-09 13:33:25 -06001129
1130void SyncValidator::PostCallRecordBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo,
1131 VkResult result) {
1132 // The state tracker sets up the command buffer state
1133 StateTracker::PostCallRecordBeginCommandBuffer(commandBuffer, pBeginInfo, result);
1134
1135 // Create/initialize the structure that trackers accesses at the command buffer scope.
1136 auto cb_access_context = GetAccessContext(commandBuffer);
1137 assert(cb_access_context);
1138 cb_access_context->Reset();
1139}
1140
1141void SyncValidator::RecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
1142 const VkSubpassBeginInfo *pSubpassBeginInfo) {
1143 const auto rp_state = Get<RENDER_PASS_STATE>(pRenderPassBegin->renderPass);
1144 auto cb_context = GetAccessContext(commandBuffer);
1145 if (rp_state && cb_context) {
1146 cb_context->BeginRenderPass(*rp_state);
1147 }
1148}
1149
1150void SyncValidator::PostCallRecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
1151 VkSubpassContents contents) {
1152 StateTracker::PostCallRecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
1153 auto subpass_begin_info = lvl_init_struct<VkSubpassBeginInfo>();
1154 subpass_begin_info.contents = contents;
1155 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, &subpass_begin_info);
1156}
1157
1158void SyncValidator::PostCallRecordCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
1159 const VkSubpassBeginInfo *pSubpassBeginInfo) {
1160 StateTracker::PostCallRecordCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
1161 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
1162}
1163
1164void SyncValidator::PostCallRecordCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
1165 const VkRenderPassBeginInfo *pRenderPassBegin,
1166 const VkSubpassBeginInfo *pSubpassBeginInfo) {
1167 StateTracker::PostCallRecordCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
1168 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
1169}
1170
1171void SyncValidator::RecordCmdNextSubpass(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
1172 const VkSubpassEndInfo *pSubpassEndInfo) {
1173 auto cb_context = GetAccessContext(commandBuffer);
1174 assert(cb_context);
1175 auto cb_state = cb_context->GetCommandBufferState();
1176 if (!cb_state) return;
1177
1178 auto rp_state = cb_state->activeRenderPass;
1179 if (!rp_state) return;
1180
1181 cb_context->NextRenderPass(*rp_state);
1182}
1183
1184void SyncValidator::PostCallRecordCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
1185 StateTracker::PostCallRecordCmdNextSubpass(commandBuffer, contents);
1186 auto subpass_begin_info = lvl_init_struct<VkSubpassBeginInfo>();
1187 subpass_begin_info.contents = contents;
1188 RecordCmdNextSubpass(commandBuffer, &subpass_begin_info, nullptr);
1189}
1190
1191void SyncValidator::PostCallRecordCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
1192 const VkSubpassEndInfo *pSubpassEndInfo) {
1193 StateTracker::PostCallRecordCmdNextSubpass2(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
1194 RecordCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
1195}
1196
1197void SyncValidator::PostCallRecordCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
1198 const VkSubpassEndInfo *pSubpassEndInfo) {
1199 StateTracker::PostCallRecordCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
1200 RecordCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
1201}
1202
John Zulaufe5da6e52020-03-18 15:32:18 -06001203void SyncValidator::RecordCmdEndRenderPass(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
1204 // Resolve the all subpass contexts to the command buffer contexts
1205 auto cb_context = GetAccessContext(commandBuffer);
1206 assert(cb_context);
1207 auto cb_state = cb_context->GetCommandBufferState();
1208 if (!cb_state) return;
1209
1210 const auto *rp_state = cb_state->activeRenderPass;
1211 if (!rp_state) return;
1212
1213 cb_context->EndRenderPass(*rp_state);
1214}
John Zulauf3d84f1b2020-03-09 13:33:25 -06001215
1216void SyncValidator::PostCallRecordCmdEndRenderPass(VkCommandBuffer commandBuffer) {
1217 StateTracker::PostCallRecordCmdEndRenderPass(commandBuffer);
1218 RecordCmdEndRenderPass(commandBuffer, nullptr);
1219}
1220
1221void SyncValidator::PostCallRecordCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
1222 StateTracker::PostCallRecordCmdEndRenderPass2(commandBuffer, pSubpassEndInfo);
1223 RecordCmdEndRenderPass(commandBuffer, pSubpassEndInfo);
1224}
1225
1226void SyncValidator::PostCallRecordCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
1227 StateTracker::PostCallRecordCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo);
1228 RecordCmdEndRenderPass(commandBuffer, pSubpassEndInfo);
1229}
locke-lunarga19c71d2020-03-02 18:17:04 -07001230
1231bool SyncValidator::PreCallValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
1232 VkImageLayout dstImageLayout, uint32_t regionCount,
1233 const VkBufferImageCopy *pRegions) const {
1234 bool skip = false;
1235 const auto *cb_access_context = GetAccessContext(commandBuffer);
1236 assert(cb_access_context);
1237 if (!cb_access_context) return skip;
1238
1239 const auto *context = cb_access_context->GetCurrentAccessContext();
1240 assert(context);
1241 if (!context) return skip;
1242
1243 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
1244 const auto src_mem = (src_buffer && !src_buffer->sparse) ? src_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
1245 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
1246
1247 for (uint32_t region = 0; region < regionCount; region++) {
1248 const auto &copy_region = pRegions[region];
1249 if (src_mem) {
1250 ResourceAccessRange src_range = MakeMemoryAccessRange(
1251 *src_buffer, copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, dst_image->createInfo.format));
1252 auto hazard = context->DetectHazard(VulkanTypedHandle(src_mem, kVulkanObjectTypeDeviceMemory),
1253 SYNC_TRANSFER_TRANSFER_READ, src_range);
1254 if (hazard.hazard) {
1255 // TODO -- add tag information to log msg when useful.
locke-lunarga0003652020-03-10 11:38:51 -06001256 skip |= LogError(srcBuffer, string_SyncHazardVUID(hazard.hazard),
1257 "vkCmdCopyBufferToImage: Hazard %s for srcBuffer %s, region %" PRIu32,
locke-lunarga19c71d2020-03-02 18:17:04 -07001258 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcBuffer).c_str(), region);
1259 }
1260 }
1261 if (dst_image) {
John Zulauf540266b2020-04-06 18:54:53 -06001262 auto hazard = context->DetectHazard(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, copy_region.imageSubresource,
locke-lunarga19c71d2020-03-02 18:17:04 -07001263 copy_region.imageOffset, copy_region.imageExtent);
1264 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06001265 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
1266 "vkCmdCopyBufferToImage: Hazard %s for dstImage %s, region %" PRIu32,
locke-lunarga19c71d2020-03-02 18:17:04 -07001267 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region);
1268 }
1269 if (skip) break;
1270 }
1271 if (skip) break;
1272 }
1273 return skip;
1274}
1275
1276void SyncValidator::PreCallRecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
1277 VkImageLayout dstImageLayout, uint32_t regionCount,
1278 const VkBufferImageCopy *pRegions) {
1279 auto *cb_access_context = GetAccessContext(commandBuffer);
1280 assert(cb_access_context);
1281 auto *context = cb_access_context->GetCurrentAccessContext();
1282 assert(context);
1283
1284 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
1285 const auto src_mem = (src_buffer && !src_buffer->sparse) ? src_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
1286 const VulkanTypedHandle src_handle(src_mem, kVulkanObjectTypeDeviceMemory);
John Zulauf5f13a792020-03-10 07:31:21 -06001287
locke-lunarga19c71d2020-03-02 18:17:04 -07001288 auto *dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07001289
1290 for (uint32_t region = 0; region < regionCount; region++) {
1291 const auto &copy_region = pRegions[region];
1292 if (src_buffer) {
1293 ResourceAccessRange src_range = MakeMemoryAccessRange(
1294 *src_buffer, copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, dst_image->createInfo.format));
John Zulauf5f13a792020-03-10 07:31:21 -06001295 context->UpdateAccessState(src_handle, SYNC_TRANSFER_TRANSFER_READ, src_range, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07001296 }
1297 if (dst_image) {
John Zulauf540266b2020-04-06 18:54:53 -06001298 context->UpdateAccessState(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, copy_region.imageSubresource,
John Zulauf5f13a792020-03-10 07:31:21 -06001299 copy_region.imageOffset, copy_region.imageExtent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07001300 }
1301 }
1302}
1303
1304bool SyncValidator::PreCallValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
1305 VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount,
1306 const VkBufferImageCopy *pRegions) const {
1307 bool skip = false;
1308 const auto *cb_access_context = GetAccessContext(commandBuffer);
1309 assert(cb_access_context);
1310 if (!cb_access_context) return skip;
1311
1312 const auto *context = cb_access_context->GetCurrentAccessContext();
1313 assert(context);
1314 if (!context) return skip;
1315
1316 const auto *src_image = Get<IMAGE_STATE>(srcImage);
1317 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
1318 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
1319 for (uint32_t region = 0; region < regionCount; region++) {
1320 const auto &copy_region = pRegions[region];
1321 if (src_image) {
John Zulauf540266b2020-04-06 18:54:53 -06001322 auto hazard = context->DetectHazard(*src_image, SYNC_TRANSFER_TRANSFER_READ, copy_region.imageSubresource,
locke-lunarga19c71d2020-03-02 18:17:04 -07001323 copy_region.imageOffset, copy_region.imageExtent);
1324 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06001325 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
1326 "vkCmdCopyImageToBuffer: Hazard %s for srcImage %s, region %" PRIu32,
locke-lunarga19c71d2020-03-02 18:17:04 -07001327 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region);
1328 }
1329 }
1330 if (dst_mem) {
1331 ResourceAccessRange dst_range = MakeMemoryAccessRange(
1332 *dst_buffer, copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, src_image->createInfo.format));
1333 auto hazard = context->DetectHazard(VulkanTypedHandle(dst_mem, kVulkanObjectTypeDeviceMemory),
1334 SYNC_TRANSFER_TRANSFER_WRITE, dst_range);
1335 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06001336 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
1337 "vkCmdCopyImageToBuffer: Hazard %s for dstBuffer %s, region %" PRIu32,
locke-lunarga19c71d2020-03-02 18:17:04 -07001338 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstBuffer).c_str(), region);
1339 }
1340 }
1341 if (skip) break;
1342 }
1343 return skip;
1344}
1345
1346void SyncValidator::PreCallRecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
1347 VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
1348 auto *cb_access_context = GetAccessContext(commandBuffer);
1349 assert(cb_access_context);
1350 auto *context = cb_access_context->GetCurrentAccessContext();
1351 assert(context);
1352
1353 const auto *src_image = Get<IMAGE_STATE>(srcImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07001354 auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
1355 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
John Zulauf5f13a792020-03-10 07:31:21 -06001356 const VulkanTypedHandle dst_handle(dst_mem, kVulkanObjectTypeDeviceMemory);
locke-lunarga19c71d2020-03-02 18:17:04 -07001357
1358 for (uint32_t region = 0; region < regionCount; region++) {
1359 const auto &copy_region = pRegions[region];
1360 if (src_image) {
John Zulauf540266b2020-04-06 18:54:53 -06001361 context->UpdateAccessState(*src_image, SYNC_TRANSFER_TRANSFER_READ, copy_region.imageSubresource,
John Zulauf5f13a792020-03-10 07:31:21 -06001362 copy_region.imageOffset, copy_region.imageExtent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07001363 }
1364 if (dst_buffer) {
1365 ResourceAccessRange dst_range = MakeMemoryAccessRange(
1366 *dst_buffer, copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, src_image->createInfo.format));
John Zulauf5f13a792020-03-10 07:31:21 -06001367 context->UpdateAccessState(dst_handle, SYNC_TRANSFER_TRANSFER_WRITE, dst_range, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07001368 }
1369 }
1370}
1371
1372bool SyncValidator::PreCallValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
1373 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
1374 const VkImageBlit *pRegions, VkFilter filter) const {
1375 bool skip = false;
1376 const auto *cb_access_context = GetAccessContext(commandBuffer);
1377 assert(cb_access_context);
1378 if (!cb_access_context) return skip;
1379
1380 const auto *context = cb_access_context->GetCurrentAccessContext();
1381 assert(context);
1382 if (!context) return skip;
1383
1384 const auto *src_image = Get<IMAGE_STATE>(srcImage);
1385 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
1386
1387 for (uint32_t region = 0; region < regionCount; region++) {
1388 const auto &blit_region = pRegions[region];
1389 if (src_image) {
1390 VkExtent3D extent = {static_cast<uint32_t>(blit_region.srcOffsets[1].x - blit_region.srcOffsets[0].x),
1391 static_cast<uint32_t>(blit_region.srcOffsets[1].y - blit_region.srcOffsets[0].y),
1392 static_cast<uint32_t>(blit_region.srcOffsets[1].z - blit_region.srcOffsets[0].z)};
John Zulauf540266b2020-04-06 18:54:53 -06001393 auto hazard = context->DetectHazard(*src_image, SYNC_TRANSFER_TRANSFER_READ, blit_region.srcSubresource,
locke-lunarga19c71d2020-03-02 18:17:04 -07001394 blit_region.srcOffsets[0], extent);
1395 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06001396 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
1397 "vkCmdBlitImage: Hazard %s for srcImage %s, region %" PRIu32, string_SyncHazard(hazard.hazard),
1398 report_data->FormatHandle(srcImage).c_str(), region);
locke-lunarga19c71d2020-03-02 18:17:04 -07001399 }
1400 }
1401
1402 if (dst_image) {
1403 VkExtent3D extent = {static_cast<uint32_t>(blit_region.dstOffsets[1].x - blit_region.dstOffsets[0].x),
1404 static_cast<uint32_t>(blit_region.dstOffsets[1].y - blit_region.dstOffsets[0].y),
1405 static_cast<uint32_t>(blit_region.dstOffsets[1].z - blit_region.dstOffsets[0].z)};
John Zulauf540266b2020-04-06 18:54:53 -06001406 auto hazard = context->DetectHazard(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, blit_region.dstSubresource,
locke-lunarga19c71d2020-03-02 18:17:04 -07001407 blit_region.dstOffsets[0], extent);
1408 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06001409 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
1410 "vkCmdBlitImage: Hazard %s for dstImage %s, region %" PRIu32, string_SyncHazard(hazard.hazard),
1411 report_data->FormatHandle(dstImage).c_str(), region);
locke-lunarga19c71d2020-03-02 18:17:04 -07001412 }
1413 if (skip) break;
1414 }
1415 }
1416
1417 return skip;
1418}
1419
1420void SyncValidator::PreCallRecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
1421 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
1422 const VkImageBlit *pRegions, VkFilter filter) {
1423 auto *cb_access_context = GetAccessContext(commandBuffer);
1424 assert(cb_access_context);
1425 auto *context = cb_access_context->GetCurrentAccessContext();
1426 assert(context);
1427
1428 auto *src_image = Get<IMAGE_STATE>(srcImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07001429 auto *dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07001430
1431 for (uint32_t region = 0; region < regionCount; region++) {
1432 const auto &blit_region = pRegions[region];
1433 if (src_image) {
1434 VkExtent3D extent = {static_cast<uint32_t>(blit_region.srcOffsets[1].x - blit_region.srcOffsets[0].x),
1435 static_cast<uint32_t>(blit_region.srcOffsets[1].y - blit_region.srcOffsets[0].y),
1436 static_cast<uint32_t>(blit_region.srcOffsets[1].z - blit_region.srcOffsets[0].z)};
John Zulauf540266b2020-04-06 18:54:53 -06001437 context->UpdateAccessState(*src_image, SYNC_TRANSFER_TRANSFER_READ, blit_region.srcSubresource,
John Zulauf5f13a792020-03-10 07:31:21 -06001438 blit_region.srcOffsets[0], extent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07001439 }
1440 if (dst_image) {
1441 VkExtent3D extent = {static_cast<uint32_t>(blit_region.dstOffsets[1].x - blit_region.dstOffsets[0].x),
1442 static_cast<uint32_t>(blit_region.dstOffsets[1].y - blit_region.dstOffsets[0].y),
1443 static_cast<uint32_t>(blit_region.dstOffsets[1].z - blit_region.dstOffsets[0].z)};
John Zulauf540266b2020-04-06 18:54:53 -06001444 context->UpdateAccessState(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, blit_region.dstSubresource,
John Zulauf5f13a792020-03-10 07:31:21 -06001445 blit_region.dstOffsets[0], extent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07001446 }
1447 }
1448}