blob: 11652d058efdf7e0a85612a999bee8f7d1bb8bb1 [file] [log] [blame]
John Zulauf9cb530d2019-09-30 14:14:10 -06001/* Copyright (c) 2019 The Khronos Group Inc.
2 * Copyright (c) 2019 Valve Corporation
3 * Copyright (c) 2019 LunarG, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 * Author: John Zulauf <jzulauf@lunarg.com>
18 */
19
20#include <limits>
21#include <vector>
locke-lunarg296a3c92020-03-25 01:04:29 -060022#include <memory>
23#include <bitset>
John Zulauf9cb530d2019-09-30 14:14:10 -060024#include "synchronization_validation.h"
25
26static const char *string_SyncHazardVUID(SyncHazard hazard) {
27 switch (hazard) {
28 case SyncHazard::NONE:
John Zulauf2f952d22020-02-10 11:34:51 -070029 return "SYNC-HAZARD-NONE";
John Zulauf9cb530d2019-09-30 14:14:10 -060030 break;
31 case SyncHazard::READ_AFTER_WRITE:
32 return "SYNC-HAZARD-READ_AFTER_WRITE";
33 break;
34 case SyncHazard::WRITE_AFTER_READ:
35 return "SYNC-HAZARD-WRITE_AFTER_READ";
36 break;
37 case SyncHazard::WRITE_AFTER_WRITE:
38 return "SYNC-HAZARD-WRITE_AFTER_WRITE";
39 break;
John Zulauf2f952d22020-02-10 11:34:51 -070040 case SyncHazard::READ_RACING_WRITE:
41 return "SYNC-HAZARD-READ-RACING-WRITE";
42 break;
43 case SyncHazard::WRITE_RACING_WRITE:
44 return "SYNC-HAZARD-WRITE-RACING-WRITE";
45 break;
46 case SyncHazard::WRITE_RACING_READ:
47 return "SYNC-HAZARD-WRITE-RACING-READ";
48 break;
John Zulauf9cb530d2019-09-30 14:14:10 -060049 default:
50 assert(0);
51 }
52 return "SYNC-HAZARD-INVALID";
53}
54
55static const char *string_SyncHazard(SyncHazard hazard) {
56 switch (hazard) {
57 case SyncHazard::NONE:
58 return "NONR";
59 break;
60 case SyncHazard::READ_AFTER_WRITE:
61 return "READ_AFTER_WRITE";
62 break;
63 case SyncHazard::WRITE_AFTER_READ:
64 return "WRITE_AFTER_READ";
65 break;
66 case SyncHazard::WRITE_AFTER_WRITE:
67 return "WRITE_AFTER_WRITE";
68 break;
John Zulauf2f952d22020-02-10 11:34:51 -070069 case SyncHazard::READ_RACING_WRITE:
70 return "READ_RACING_WRITE";
71 break;
72 case SyncHazard::WRITE_RACING_WRITE:
73 return "WRITE_RACING_WRITE";
74 break;
75 case SyncHazard::WRITE_RACING_READ:
76 return "WRITE_RACING_READ";
77 break;
John Zulauf9cb530d2019-09-30 14:14:10 -060078 default:
79 assert(0);
80 }
81 return "INVALID HAZARD";
82}
83
John Zulauf0cb5be22020-01-23 12:18:22 -070084// Expand the pipeline stage without regard to whether the are valid w.r.t. queue or extension
85VkPipelineStageFlags ExpandPipelineStages(VkQueueFlags queue_flags, VkPipelineStageFlags stage_mask) {
86 VkPipelineStageFlags expanded = stage_mask;
87 if (VK_PIPELINE_STAGE_ALL_COMMANDS_BIT & stage_mask) {
88 expanded = expanded & ~VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
89 for (const auto &all_commands : syncAllCommandStagesByQueueFlags) {
90 if (all_commands.first & queue_flags) {
91 expanded |= all_commands.second;
92 }
93 }
94 }
95 if (VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT & stage_mask) {
96 expanded = expanded & ~VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
97 expanded |= syncAllCommandStagesByQueueFlags.at(VK_QUEUE_GRAPHICS_BIT) & ~VK_PIPELINE_STAGE_HOST_BIT;
98 }
99 return expanded;
100}
101
John Zulauf36bcf6a2020-02-03 15:12:52 -0700102VkPipelineStageFlags RelatedPipelineStages(VkPipelineStageFlags stage_mask,
103 std::map<VkPipelineStageFlagBits, VkPipelineStageFlags> &map) {
104 VkPipelineStageFlags unscanned = stage_mask;
105 VkPipelineStageFlags related = 0;
106 for (const auto entry : map) {
107 const auto stage = entry.first;
108 if (stage & unscanned) {
109 related = related | entry.second;
110 unscanned = unscanned & ~stage;
111 if (!unscanned) break;
112 }
113 }
114 return related;
115}
116
117VkPipelineStageFlags WithEarlierPipelineStages(VkPipelineStageFlags stage_mask) {
118 return stage_mask | RelatedPipelineStages(stage_mask, syncLogicallyEarlierStages);
119}
120
121VkPipelineStageFlags WithLaterPipelineStages(VkPipelineStageFlags stage_mask) {
122 return stage_mask | RelatedPipelineStages(stage_mask, syncLogicallyLaterStages);
123}
124
John Zulauf5c5e88d2019-12-26 11:22:02 -0700125static const ResourceAccessRange full_range(std::numeric_limits<VkDeviceSize>::min(), std::numeric_limits<VkDeviceSize>::max());
126static ResourceAccessRange MakeMemoryAccessRange(const BUFFER_STATE &buffer, VkDeviceSize offset, VkDeviceSize size) {
John Zulauf9cb530d2019-09-30 14:14:10 -0600127 assert(!buffer.sparse);
128 const auto base = offset + buffer.binding.offset;
John Zulauf5c5e88d2019-12-26 11:22:02 -0700129 return ResourceAccessRange(base, base + size);
130}
131
John Zulauf540266b2020-04-06 18:54:53 -0600132AccessContext::AccessContext(uint32_t subpass, VkQueueFlags queue_flags,
133 const std::vector<SubpassDependencyGraphNode> &dependencies,
134 const std::vector<AccessContext> &contexts, AccessContext *external_context) {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600135 Reset();
136 const auto &subpass_dep = dependencies[subpass];
137 prev_.reserve(subpass_dep.prev.size());
138 for (const auto &prev_dep : subpass_dep.prev) {
139 assert(prev_dep.dependency);
140 const auto dep = *prev_dep.dependency;
John Zulauf540266b2020-04-06 18:54:53 -0600141 prev_.emplace_back(const_cast<AccessContext *>(&contexts[dep.srcSubpass]), queue_flags, dep);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700142 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600143
144 async_.reserve(subpass_dep.async.size());
145 for (const auto async_subpass : subpass_dep.async) {
John Zulauf540266b2020-04-06 18:54:53 -0600146 async_.emplace_back(const_cast<AccessContext *>(&contexts[async_subpass]));
John Zulauf3d84f1b2020-03-09 13:33:25 -0600147 }
John Zulaufe5da6e52020-03-18 15:32:18 -0600148 if (subpass_dep.barrier_from_external) {
149 src_external_ = TrackBack(external_context, queue_flags, *subpass_dep.barrier_from_external);
150 } else {
151 src_external_ = TrackBack();
152 }
153 if (subpass_dep.barrier_to_external) {
154 dst_external_ = TrackBack(this, queue_flags, *subpass_dep.barrier_to_external);
155 } else {
156 dst_external_ = TrackBack();
John Zulauf3d84f1b2020-03-09 13:33:25 -0600157 }
John Zulauf5c5e88d2019-12-26 11:22:02 -0700158}
159
John Zulauf5f13a792020-03-10 07:31:21 -0600160template <typename Detector>
John Zulauf540266b2020-04-06 18:54:53 -0600161HazardResult AccessContext::DetectPreviousHazard(const VulkanTypedHandle &handle, const Detector &detector,
162 const ResourceAccessRange &range) const {
John Zulauf5f13a792020-03-10 07:31:21 -0600163 ResourceAccessRangeMap descent_map;
164 ResourceAccessState default_state; // When present, PreviousAccess will "infill"
165 ResolvePreviousAccess(handle, range, &descent_map, &default_state);
166
167 HazardResult hazard;
168 for (auto prev = descent_map.begin(); prev != descent_map.end() && !hazard.hazard; ++prev) {
169 hazard = detector.Detect(prev);
170 }
171 return hazard;
172}
173
John Zulauf3d84f1b2020-03-09 13:33:25 -0600174// A recursive range walker for hazard detection, first for the current context and the (DetectHazardRecur) to walk
175// the DAG of the contexts (for example subpasses)
176template <typename Detector>
John Zulauf540266b2020-04-06 18:54:53 -0600177HazardResult AccessContext::DetectHazard(const VulkanTypedHandle &handle, const Detector &detector,
178 const ResourceAccessRange &range) const {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600179 HazardResult hazard;
John Zulauf5f13a792020-03-10 07:31:21 -0600180
181 // Async checks don't require recursive lookups, as the async lists are exhaustive for the top-level context
182 // so we'll check these first
183 for (const auto &async_context : async_) {
184 hazard = async_context->DetectAsyncHazard(handle, detector, range);
185 if (hazard.hazard) return hazard;
186 }
187
188 const auto access_tracker = GetAccessTracker(handle);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600189 if (access_tracker) {
190 const auto &accesses = access_tracker->GetCurrentAccessMap();
191 const auto from = accesses.lower_bound(range);
192 if (from != accesses.end() && from->first.intersects(range)) {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600193 const auto to = accesses.upper_bound(range);
194 ResourceAccessRange gap = {range.begin, range.begin};
195 for (auto pos = from; pos != to; ++pos) {
John Zulauf5f13a792020-03-10 07:31:21 -0600196 hazard = detector.Detect(pos);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600197 if (hazard.hazard) return hazard;
198
John Zulauf5f13a792020-03-10 07:31:21 -0600199 // make sure we don't go past range
John Zulauf3d84f1b2020-03-09 13:33:25 -0600200 auto upper_bound = std::min(range.end, pos->first.end);
John Zulauf5f13a792020-03-10 07:31:21 -0600201 gap.end = upper_bound;
202
203 // TODO: After profiling we may want to change the descent logic such that we don't recur per gap...
John Zulauf3d84f1b2020-03-09 13:33:25 -0600204 if (!gap.empty()) {
205 // Must recur on all gaps
John Zulauf5f13a792020-03-10 07:31:21 -0600206 hazard = DetectPreviousHazard(handle, detector, gap);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600207 if (hazard.hazard) return hazard;
208 }
209 gap.begin = upper_bound;
210 }
John Zulauf5f13a792020-03-10 07:31:21 -0600211 gap.end = range.end;
212 if (gap.non_empty()) {
213 hazard = DetectPreviousHazard(handle, detector, gap);
214 if (hazard.hazard) return hazard;
215 }
216 } else {
217 hazard = DetectPreviousHazard(handle, detector, range);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600218 }
John Zulauf5f13a792020-03-10 07:31:21 -0600219 } else {
220 hazard = DetectPreviousHazard(handle, detector, range);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600221 }
222
223 return hazard;
224}
225
226// A non recursive range walker for the asynchronous contexts (those we have no barriers with)
227template <typename Detector>
John Zulauf540266b2020-04-06 18:54:53 -0600228HazardResult AccessContext::DetectAsyncHazard(const VulkanTypedHandle &handle, const Detector &detector,
229 const ResourceAccessRange &range) const {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600230 const auto access_tracker = GetAccessTracker(handle);
231 HazardResult hazard;
232 if (access_tracker) {
233 auto accesses = access_tracker->GetCurrentAccessMap();
234 const auto from = accesses.lower_bound(range);
235 const auto to = accesses.upper_bound(range);
236 for (auto pos = from; pos != to; ++pos) {
237 hazard = detector.DetectAsync(pos);
238 if (hazard.hazard) break;
239 }
240 }
241 return hazard;
242}
243
John Zulauf540266b2020-04-06 18:54:53 -0600244void AccessContext::ResolveTrackBack(const VulkanTypedHandle &handle, const ResourceAccessRange &range,
245 const AccessContext::TrackBack &track_back, ResourceAccessRangeMap *descent_map,
246 const ResourceAccessState *infill_state, bool recur_to_infill) const {
John Zulauf5f13a792020-03-10 07:31:21 -0600247 const auto *access_tracker = GetAccessTracker(handle);
248 if (access_tracker) {
249 sparse_container::parallel_iterator<ResourceAccessRangeMap, const ResourceAccessRangeMap> current(
250 *descent_map, access_tracker->GetCurrentAccessMap(), range.begin);
251 while (current->range.non_empty()) {
252 if (current->pos_B->valid) {
John Zulauf62f10592020-04-03 12:20:02 -0600253 const auto &src_pos = current->pos_B->lower_bound;
254 auto access_with_barrier = src_pos->second;
John Zulauf5f13a792020-03-10 07:31:21 -0600255 access_with_barrier.ApplyBarrier(track_back.barrier);
256 if (current->pos_A->valid) {
John Zulauf62f10592020-04-03 12:20:02 -0600257 current.trim_A();
John Zulauf5f13a792020-03-10 07:31:21 -0600258 current->pos_A->lower_bound->second.Resolve(access_with_barrier);
259 } else {
260 descent_map->insert(current->pos_A->lower_bound, std::make_pair(current->range, access_with_barrier));
261 current.invalidate_A(); // Update the parallel iterator to point at the correct segment after split(s)
262 }
263 } else {
264 // we have to descend to fill this gap
John Zulaufe5da6e52020-03-18 15:32:18 -0600265 if (recur_to_infill) {
John Zulauf62f10592020-04-03 12:20:02 -0600266 track_back.context->ResolvePreviousAccess(handle, current->range, descent_map, infill_state);
John Zulaufe5da6e52020-03-18 15:32:18 -0600267 current.invalidate_A(); // Update the parallel iterator to point at the correct segment after recursion.
268 }
John Zulauf5f13a792020-03-10 07:31:21 -0600269 if (!current->pos_A->valid && infill_state) {
270 // If we didn't find anything in the previous range, we infill with default to prevent repeating
271 // a fruitless search
272 descent_map->insert(current->pos_A->lower_bound, std::make_pair(current->range, *infill_state));
273 current.invalidate_A(); // Update the parallel iterator to point at the correct segment after insert
274 }
275 }
276 ++current;
277 }
John Zulauf62f10592020-04-03 12:20:02 -0600278 } else if (recur_to_infill) {
279 track_back.context->ResolvePreviousAccess(handle, range, descent_map, infill_state);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600280 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600281}
282
John Zulauf540266b2020-04-06 18:54:53 -0600283void AccessContext::ResolvePreviousAccess(const VulkanTypedHandle &handle, const ResourceAccessRange &range,
284 ResourceAccessRangeMap *descent_map, const ResourceAccessState *infill_state) const {
John Zulaufe5da6e52020-03-18 15:32:18 -0600285 if ((prev_.size() == 0) && (src_external_.context == nullptr)) {
John Zulauf5f13a792020-03-10 07:31:21 -0600286 if (range.non_empty() && infill_state) {
287 descent_map->insert(std::make_pair(range, *infill_state));
288 }
289 } else {
290 // Look for something to fill the gap further along.
291 for (const auto &prev_dep : prev_) {
292 ResolveTrackBack(handle, range, prev_dep, descent_map, infill_state);
293 }
294
John Zulaufe5da6e52020-03-18 15:32:18 -0600295 if (src_external_.context) {
296 ResolveTrackBack(handle, range, src_external_, descent_map, infill_state);
John Zulauf5f13a792020-03-10 07:31:21 -0600297 }
298 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600299}
300
John Zulauf540266b2020-04-06 18:54:53 -0600301void AccessContext::ResolvePreviousAccess(const IMAGE_STATE &image_state, const VkImageSubresourceRange &subresource_range_arg,
302 ResourceAccessRangeMap *descent_map, const ResourceAccessState *infill_state) const {
John Zulauf62f10592020-04-03 12:20:02 -0600303 const VulkanTypedHandle image_handle(image_state.image, kVulkanObjectTypeImage);
304 auto subresource_range = NormalizeSubresourceRange(image_state.createInfo, subresource_range_arg);
John Zulauf540266b2020-04-06 18:54:53 -0600305 subresource_adapter::ImageRangeEncoder encoder(image_state.store_device_as_workaround, image_state);
John Zulauf62f10592020-04-03 12:20:02 -0600306 subresource_adapter::ImageRangeGenerator range_gen(encoder, subresource_range, {0, 0, 0}, image_state.createInfo.extent);
307 for (; range_gen->non_empty(); ++range_gen) {
308 ResolvePreviousAccess(image_handle, *range_gen, descent_map, infill_state);
309 }
310}
311
John Zulauf3d84f1b2020-03-09 13:33:25 -0600312class HazardDetector {
313 SyncStageAccessIndex usage_index_;
314
315 public:
John Zulauf5f13a792020-03-10 07:31:21 -0600316 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const { return pos->second.DetectHazard(usage_index_); }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600317 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos) const {
318 return pos->second.DetectAsyncHazard(usage_index_);
319 }
320 HazardDetector(SyncStageAccessIndex usage) : usage_index_(usage) {}
321};
322
John Zulauf540266b2020-04-06 18:54:53 -0600323HazardResult AccessContext::DetectHazard(const VulkanTypedHandle &handle, SyncStageAccessIndex usage_index,
324 const ResourceAccessRange &range) const {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600325 HazardDetector detector(usage_index);
326 return DetectHazard(handle, detector, range);
327}
328
329void CommandBufferAccessContext::BeginRenderPass(const RENDER_PASS_STATE &rp_state) {
330 // Create an access context for the first subpass and add it to the command buffers collection
331 render_pass_contexts_.emplace_back(queue_flags_, &rp_state.subpass_dependencies, &cb_tracker_context_);
332 current_renderpass_context_ = &render_pass_contexts_.back();
333 current_context_ = &current_renderpass_context_->CurrentContext();
John Zulaufe5da6e52020-03-18 15:32:18 -0600334
335 // TODO: Add layout load/store/transition/resolve access (here or in RenderPassContext)
John Zulauf3d84f1b2020-03-09 13:33:25 -0600336}
337
338void CommandBufferAccessContext::NextRenderPass(const RENDER_PASS_STATE &rp_state) {
339 assert(current_renderpass_context_);
340 current_renderpass_context_->NextSubpass(queue_flags_, &cb_tracker_context_);
John Zulaufe5da6e52020-03-18 15:32:18 -0600341 // TODO: Add layout load/store/transition/resolve access (here or in RenderPassContext)
John Zulauf3d84f1b2020-03-09 13:33:25 -0600342 current_context_ = &current_renderpass_context_->CurrentContext();
343}
344
John Zulaufe5da6e52020-03-18 15:32:18 -0600345void CommandBufferAccessContext::EndRenderPass(const RENDER_PASS_STATE &render_pass) {
John Zulauf540266b2020-04-06 18:54:53 -0600346 // TODO: Add layout load/store/transition/resolve access (here or in RenderPassContext)
John Zulaufe5da6e52020-03-18 15:32:18 -0600347 assert(current_renderpass_context_);
348 if (!current_renderpass_context_) return;
349
350 const auto &contexts = current_renderpass_context_->subpass_contexts_;
John Zulauf540266b2020-04-06 18:54:53 -0600351 cb_tracker_context_.ResolveChildContexts(contexts);
John Zulaufe5da6e52020-03-18 15:32:18 -0600352
John Zulaufe5da6e52020-03-18 15:32:18 -0600353 current_context_ = &cb_tracker_context_;
354 current_renderpass_context_ = nullptr;
355}
356
John Zulauf540266b2020-04-06 18:54:53 -0600357HazardResult AccessContext::DetectHazard(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
358 const VkImageSubresourceLayers &subresource, const VkOffset3D &offset,
359 const VkExtent3D &extent) const {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700360 // TODO: replace the encoder/generator with offset3D/extent3D aware versions
361 VkImageSubresourceRange subresource_range = {subresource.aspectMask, subresource.mipLevel, 1, subresource.baseArrayLayer,
362 subresource.layerCount};
locke-lunarg296a3c92020-03-25 01:04:29 -0600363 subresource_adapter::ImageRangeEncoder encoder(image.store_device_as_workaround, image);
364 subresource_adapter::ImageRangeGenerator range_gen(encoder, subresource_range, offset, extent);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600365 VulkanTypedHandle image_handle(image.image, kVulkanObjectTypeImage);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700366 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600367 HazardResult hazard = DetectHazard(image_handle, current_usage, *range_gen);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700368 if (hazard.hazard) return hazard;
369 }
370 return HazardResult();
John Zulauf9cb530d2019-09-30 14:14:10 -0600371}
372
John Zulauf3d84f1b2020-03-09 13:33:25 -0600373class BarrierHazardDetector {
374 public:
375 BarrierHazardDetector(SyncStageAccessIndex usage_index, VkPipelineStageFlags src_exec_scope,
376 SyncStageAccessFlags src_access_scope)
377 : usage_index_(usage_index), src_exec_scope_(src_exec_scope), src_access_scope_(src_access_scope) {}
378
John Zulauf5f13a792020-03-10 07:31:21 -0600379 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
380 return pos->second.DetectBarrierHazard(usage_index_, src_exec_scope_, src_access_scope_);
John Zulauf0cb5be22020-01-23 12:18:22 -0700381 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600382 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos) const {
383 // Async barrier hazard detection can use the same path as the usage index is not IsRead, but is IsWrite
384 return pos->second.DetectAsyncHazard(usage_index_);
385 }
386
387 private:
388 SyncStageAccessIndex usage_index_;
389 VkPipelineStageFlags src_exec_scope_;
390 SyncStageAccessFlags src_access_scope_;
391};
392
John Zulauf540266b2020-04-06 18:54:53 -0600393HazardResult AccessContext::DetectBarrierHazard(const VulkanTypedHandle &handle, SyncStageAccessIndex current_usage,
394 VkPipelineStageFlags src_exec_scope, SyncStageAccessFlags src_access_scope,
395 const ResourceAccessRange &range) const {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600396 BarrierHazardDetector detector(current_usage, src_exec_scope, src_access_scope);
397 return DetectHazard(handle, detector, range);
John Zulauf0cb5be22020-01-23 12:18:22 -0700398}
399
John Zulauf540266b2020-04-06 18:54:53 -0600400HazardResult DetectImageBarrierHazard(const AccessContext &context, const IMAGE_STATE &image, VkPipelineStageFlags src_exec_scope,
401 SyncStageAccessFlags src_stage_accesses, const VkImageMemoryBarrier &barrier) {
John Zulauf0cb5be22020-01-23 12:18:22 -0700402 auto subresource_range = NormalizeSubresourceRange(image.createInfo, barrier.subresourceRange);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600403 const VulkanTypedHandle image_handle(image.image, kVulkanObjectTypeImage);
John Zulauf36bcf6a2020-02-03 15:12:52 -0700404 const auto src_access_scope = SyncStageAccess::AccessScope(src_stage_accesses, barrier.srcAccessMask);
locke-lunarg296a3c92020-03-25 01:04:29 -0600405 subresource_adapter::ImageRangeEncoder encoder(image.store_device_as_workaround, image);
406 subresource_adapter::ImageRangeGenerator range_gen(encoder, subresource_range, {0, 0, 0}, image.createInfo.extent);
407 for (; range_gen->non_empty(); ++range_gen) {
408 HazardResult hazard = context.DetectBarrierHazard(image_handle, SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION,
409 src_exec_scope, src_access_scope, *range_gen);
410 if (hazard.hazard) return hazard;
John Zulauf0cb5be22020-01-23 12:18:22 -0700411 }
412 return HazardResult();
413}
414
John Zulauf9cb530d2019-09-30 14:14:10 -0600415template <typename Flags, typename Map>
416SyncStageAccessFlags AccessScopeImpl(Flags flag_mask, const Map &map) {
417 SyncStageAccessFlags scope = 0;
418 for (const auto &bit_scope : map) {
419 if (flag_mask < bit_scope.first) break;
420
421 if (flag_mask & bit_scope.first) {
422 scope |= bit_scope.second;
423 }
424 }
425 return scope;
426}
427
428SyncStageAccessFlags SyncStageAccess::AccessScopeByStage(VkPipelineStageFlags stages) {
429 return AccessScopeImpl(stages, syncStageAccessMaskByStageBit);
430}
431
432SyncStageAccessFlags SyncStageAccess::AccessScopeByAccess(VkAccessFlags accesses) {
433 return AccessScopeImpl(accesses, syncStageAccessMaskByAccessBit);
434}
435
436// Getting from stage mask and access mask to stage/acess masks is something we need to be good at...
437SyncStageAccessFlags SyncStageAccess::AccessScope(VkPipelineStageFlags stages, VkAccessFlags accesses) {
John Zulauf5f13a792020-03-10 07:31:21 -0600438 // The access scope is the intersection of all stage/access types possible for the enabled stages and the enables
439 // accesses (after doing a couple factoring of common terms the union of stage/access intersections is the intersections
440 // of the union of all stage/access types for all the stages and the same unions for the access mask...
John Zulauf9cb530d2019-09-30 14:14:10 -0600441 return AccessScopeByStage(stages) & AccessScopeByAccess(accesses);
442}
443
444template <typename Action>
John Zulauf5c5e88d2019-12-26 11:22:02 -0700445void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const ResourceAccessRange &range, const Action &action) {
John Zulauf9cb530d2019-09-30 14:14:10 -0600446 // TODO -- region/mem-range accuracte update
447 auto pos = accesses->lower_bound(range);
448 if (pos == accesses->end() || !pos->first.intersects(range)) {
449 // The range is empty, fill it with a default value.
450 pos = action.Infill(accesses, pos, range);
451 } else if (range.begin < pos->first.begin) {
452 // Leading empty space, infill
John Zulauf5c5e88d2019-12-26 11:22:02 -0700453 pos = action.Infill(accesses, pos, ResourceAccessRange(range.begin, pos->first.begin));
John Zulauf9cb530d2019-09-30 14:14:10 -0600454 } else if (pos->first.begin < range.begin) {
455 // Trim the beginning if needed
456 pos = accesses->split(pos, range.begin, sparse_container::split_op_keep_both());
457 ++pos;
458 }
459
460 const auto the_end = accesses->end();
461 while ((pos != the_end) && pos->first.intersects(range)) {
462 if (pos->first.end > range.end) {
463 pos = accesses->split(pos, range.end, sparse_container::split_op_keep_both());
464 }
465
466 pos = action(accesses, pos);
467 if (pos == the_end) break;
468
469 auto next = pos;
470 ++next;
471 if ((pos->first.end < range.end) && (next != the_end) && !next->first.is_subsequent_to(pos->first)) {
472 // Need to infill if next is disjoint
473 VkDeviceSize limit = (next == the_end) ? range.end : std::min(range.end, next->first.begin);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700474 ResourceAccessRange new_range(pos->first.end, limit);
John Zulauf9cb530d2019-09-30 14:14:10 -0600475 next = action.Infill(accesses, next, new_range);
476 }
477 pos = next;
478 }
479}
480
481struct UpdateMemoryAccessStateFunctor {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700482 using Iterator = ResourceAccessRangeMap::iterator;
483 Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const {
John Zulauf5f13a792020-03-10 07:31:21 -0600484 // this is only called on gaps, and never returns a gap.
485 ResourceAccessState default_state;
486 context.ResolvePreviousAccess(handle, range, accesses, &default_state);
487 return accesses->lower_bound(range);
John Zulauf9cb530d2019-09-30 14:14:10 -0600488 }
John Zulauf5f13a792020-03-10 07:31:21 -0600489
John Zulauf5c5e88d2019-12-26 11:22:02 -0700490 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -0600491 auto &access_state = pos->second;
492 access_state.Update(usage, tag);
493 return pos;
494 }
495
John Zulauf540266b2020-04-06 18:54:53 -0600496 UpdateMemoryAccessStateFunctor(const VulkanTypedHandle &handle_, const AccessContext &context_, SyncStageAccessIndex usage_,
497 const ResourceUsageTag &tag_)
John Zulauf5f13a792020-03-10 07:31:21 -0600498 : handle(handle_), context(context_), usage(usage_), tag(tag_) {}
499 const VulkanTypedHandle handle;
John Zulauf540266b2020-04-06 18:54:53 -0600500 const AccessContext &context;
John Zulauf9cb530d2019-09-30 14:14:10 -0600501 SyncStageAccessIndex usage;
502 const ResourceUsageTag &tag;
503};
504
505struct ApplyMemoryAccessBarrierFunctor {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700506 using Iterator = ResourceAccessRangeMap::iterator;
507 inline Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const { return pos; }
John Zulauf9cb530d2019-09-30 14:14:10 -0600508
John Zulauf5c5e88d2019-12-26 11:22:02 -0700509 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -0600510 auto &access_state = pos->second;
John Zulauf36bcf6a2020-02-03 15:12:52 -0700511 access_state.ApplyMemoryAccessBarrier(src_exec_scope, src_access_scope, dst_exec_scope, dst_access_scope);
John Zulauf9cb530d2019-09-30 14:14:10 -0600512 return pos;
513 }
514
John Zulauf36bcf6a2020-02-03 15:12:52 -0700515 ApplyMemoryAccessBarrierFunctor(VkPipelineStageFlags src_exec_scope_, SyncStageAccessFlags src_access_scope_,
516 VkPipelineStageFlags dst_exec_scope_, SyncStageAccessFlags dst_access_scope_)
517 : src_exec_scope(src_exec_scope_),
518 src_access_scope(src_access_scope_),
519 dst_exec_scope(dst_exec_scope_),
520 dst_access_scope(dst_access_scope_) {}
John Zulauf9cb530d2019-09-30 14:14:10 -0600521
John Zulauf36bcf6a2020-02-03 15:12:52 -0700522 VkPipelineStageFlags src_exec_scope;
523 SyncStageAccessFlags src_access_scope;
524 VkPipelineStageFlags dst_exec_scope;
525 SyncStageAccessFlags dst_access_scope;
John Zulauf9cb530d2019-09-30 14:14:10 -0600526};
527
528struct ApplyGlobalBarrierFunctor {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700529 using Iterator = ResourceAccessRangeMap::iterator;
530 inline Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const { return pos; }
John Zulauf9cb530d2019-09-30 14:14:10 -0600531
John Zulauf5c5e88d2019-12-26 11:22:02 -0700532 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -0600533 auto &access_state = pos->second;
John Zulauf36bcf6a2020-02-03 15:12:52 -0700534 access_state.ApplyExecutionBarrier(src_exec_scope, dst_exec_scope);
John Zulauf9cb530d2019-09-30 14:14:10 -0600535
536 for (const auto &functor : barrier_functor) {
537 functor(accesses, pos);
538 }
539 return pos;
540 }
541
John Zulauf36bcf6a2020-02-03 15:12:52 -0700542 ApplyGlobalBarrierFunctor(VkPipelineStageFlags src_exec_scope, VkPipelineStageFlags dst_exec_scope,
543 SyncStageAccessFlags src_stage_accesses, SyncStageAccessFlags dst_stage_accesses,
John Zulauf9cb530d2019-09-30 14:14:10 -0600544 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers)
John Zulauf36bcf6a2020-02-03 15:12:52 -0700545 : src_exec_scope(src_exec_scope), dst_exec_scope(dst_exec_scope) {
John Zulauf9cb530d2019-09-30 14:14:10 -0600546 // Don't want to create this per tracked item, but don't want to loop through all tracked items per barrier...
547 barrier_functor.reserve(memoryBarrierCount);
548 for (uint32_t barrier_index = 0; barrier_index < memoryBarrierCount; barrier_index++) {
549 const auto &barrier = pMemoryBarriers[barrier_index];
John Zulauf36bcf6a2020-02-03 15:12:52 -0700550 barrier_functor.emplace_back(src_exec_scope, SyncStageAccess::AccessScope(src_stage_accesses, barrier.srcAccessMask),
551 dst_exec_scope, SyncStageAccess::AccessScope(dst_stage_accesses, barrier.dstAccessMask));
John Zulauf9cb530d2019-09-30 14:14:10 -0600552 }
553 }
554
John Zulauf36bcf6a2020-02-03 15:12:52 -0700555 const VkPipelineStageFlags src_exec_scope;
556 const VkPipelineStageFlags dst_exec_scope;
John Zulauf9cb530d2019-09-30 14:14:10 -0600557 std::vector<ApplyMemoryAccessBarrierFunctor> barrier_functor;
558};
559
John Zulauf540266b2020-04-06 18:54:53 -0600560void AccessContext::UpdateAccessState(const VulkanTypedHandle &handle, SyncStageAccessIndex current_usage,
561 const ResourceAccessRange &range, const ResourceUsageTag &tag) {
John Zulauf5f13a792020-03-10 07:31:21 -0600562 UpdateMemoryAccessStateFunctor action(handle, *this, current_usage, tag);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600563 auto *tracker = GetAccessTracker(handle);
564 assert(tracker);
John Zulauf5f13a792020-03-10 07:31:21 -0600565 UpdateMemoryAccessState(&tracker->GetCurrentAccessMap(), range, action);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600566}
567
John Zulauf540266b2020-04-06 18:54:53 -0600568void AccessContext::UpdateAccessState(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
569 const VkImageSubresourceLayers &subresource, const VkOffset3D &offset,
570 const VkExtent3D &extent, const ResourceUsageTag &tag) {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700571 // TODO: replace the encoder/generator with offset3D aware versions
572 VkImageSubresourceRange subresource_range = {subresource.aspectMask, subresource.mipLevel, 1, subresource.baseArrayLayer,
573 subresource.layerCount};
locke-lunarg296a3c92020-03-25 01:04:29 -0600574 subresource_adapter::ImageRangeEncoder encoder(image.store_device_as_workaround, image);
575 subresource_adapter::ImageRangeGenerator range_gen(encoder, subresource_range, offset, extent);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600576 const VulkanTypedHandle handle(image.image, kVulkanObjectTypeImage);
577 auto *tracker = GetAccessTracker(handle);
578 assert(tracker);
John Zulauf5f13a792020-03-10 07:31:21 -0600579
580 UpdateMemoryAccessStateFunctor action(handle, *this, current_usage, tag);
581 for (; range_gen->non_empty(); ++range_gen) {
582 UpdateMemoryAccessState(&tracker->GetCurrentAccessMap(), *range_gen, action);
583 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600584}
585
John Zulauf540266b2020-04-06 18:54:53 -0600586template <typename Action>
587void AccessContext::UpdateMemoryAccess(const BUFFER_STATE &buffer, const ResourceAccessRange &range, const Action action) {
588 auto *tracker = GetAccessTracker(VulkanTypedHandle(buffer.binding.mem_state->mem, kVulkanObjectTypeDeviceMemory));
589 if (!tracker) return;
590 UpdateMemoryAccessState(&tracker->GetCurrentAccessMap(), range, action);
591}
592
593template <typename Action>
594void AccessContext::UpdateMemoryAccess(const IMAGE_STATE &image, const VkImageSubresourceRange &subresource_range,
595 const Action action) {
596 auto tracker = GetAccessTrackerNoInsert(VulkanTypedHandle(image.image, kVulkanObjectTypeImage));
597 if (!tracker) return;
598 auto *accesses = &tracker->GetCurrentAccessMap();
599
600 subresource_adapter::ImageRangeEncoder encoder(image.store_device_as_workaround, image);
601 subresource_adapter::ImageRangeGenerator range_gen(encoder, subresource_range, {0, 0, 0}, image.createInfo.extent);
602 // TODO: Enable using encoder from image
603 // subresource_adapter::ImageRangeGenerator range_gen(image.fragment_encoder, subresource_range, {0, 0, 0},
604 // image.createInfo.extent);
605
606 for (; range_gen->non_empty(); ++range_gen) {
607 UpdateMemoryAccessState(accesses, *range_gen, action);
608 }
609}
610
611template <typename Action>
612void AccessContext::ApplyGlobalBarriers(const Action &barrier_action) {
613 // Note: Barriers do *not* cross context boundaries, applying to accessess within.... (at least for renderpass subpasses)
614 for (auto &handle_tracker_pair : GetAccessTrackerMap()) {
615 UpdateMemoryAccessState(&handle_tracker_pair.second.GetCurrentAccessMap(), full_range, barrier_action);
616 }
617}
618
619void AccessContext::ResolveChildContexts(const std::vector<AccessContext> &contexts) {
620 std::unordered_set<VulkanTypedHandle> resolved;
621 for (uint32_t subpass_index = 0; subpass_index < contexts.size(); subpass_index++) {
622 auto &context = contexts[subpass_index];
623 for (const auto &tracker_pair : context.GetAccessTrackerMap()) {
624 if (tracker_pair.second.GetCurrentAccessMap().size() == 0) continue;
625 auto insert_pair = resolved.insert(tracker_pair.first);
626 if (insert_pair.second) { // only create the resolve map for this handle if we haven't seen it before
627 // This is the first time we've seen this handle accessed, resolve this for all subsequent subpasses
628 ResourceAccessRangeMap resolve_map;
629 auto resolve_index = static_cast<uint32_t>(contexts.size());
630 while (resolve_index > subpass_index) {
631 resolve_index--;
632 const auto &from_context = contexts[resolve_index];
633 from_context.ResolveTrackBack(tracker_pair.first, full_range, from_context.GetDstExternalTrackBack(),
634 &resolve_map, nullptr, false);
635 }
636 // Given that all DAG paths lead back to the src_external_ (if only a default one) we can just overwrite.
637 sparse_container::splice(&GetAccessTracker(tracker_pair.first)->GetCurrentAccessMap(), resolve_map,
638 sparse_container::value_precedence::prefer_source);
639 // TODO: This might be a place to consolidate the map
640 }
641 }
642 }
643}
644
John Zulauf3d84f1b2020-03-09 13:33:25 -0600645SyncBarrier::SyncBarrier(VkQueueFlags queue_flags, const VkSubpassDependency2 &barrier) {
646 const auto src_stage_mask = ExpandPipelineStages(queue_flags, barrier.srcStageMask);
647 src_exec_scope = WithEarlierPipelineStages(src_stage_mask);
648 src_access_scope = SyncStageAccess::AccessScope(src_stage_mask, barrier.srcAccessMask);
649 const auto dst_stage_mask = ExpandPipelineStages(queue_flags, barrier.dstStageMask);
650 dst_exec_scope = WithLaterPipelineStages(dst_stage_mask);
651 dst_access_scope = SyncStageAccess::AccessScope(dst_stage_mask, barrier.dstAccessMask);
652}
653
654void ResourceAccessState::ApplyBarrier(const SyncBarrier &barrier) {
655 ApplyExecutionBarrier(barrier.src_exec_scope, barrier.dst_exec_scope);
656 ApplyMemoryAccessBarrier(barrier.src_exec_scope, barrier.src_access_scope, barrier.dst_exec_scope, barrier.dst_access_scope);
657}
658
659ResourceAccessState ResourceAccessState::ApplyBarrierStack(const ResourceAccessState &that, const SyncBarrierStack &barrier_stack) {
660 ResourceAccessState copy = that;
661 for (auto barrier = barrier_stack.begin(); barrier != barrier_stack.end(); ++barrier) {
662 assert(*barrier);
663 copy.ApplyBarrier(*(*barrier));
664 }
665 return copy;
666}
667
668HazardResult ResourceAccessState::DetectHazard(SyncStageAccessIndex usage_index, SyncBarrierStack *barrier_stack) const {
669 if (barrier_stack) {
670 return ApplyBarrierStack(*this, *barrier_stack).DetectHazard(usage_index);
671 }
672 return DetectHazard(usage_index);
673}
674
John Zulauf9cb530d2019-09-30 14:14:10 -0600675HazardResult ResourceAccessState::DetectHazard(SyncStageAccessIndex usage_index) const {
676 HazardResult hazard;
677 auto usage = FlagBit(usage_index);
678 if (IsRead(usage)) {
679 if (IsWriteHazard(usage)) {
680 hazard.Set(READ_AFTER_WRITE, write_tag);
681 }
682 } else {
683 // Assume write
684 // TODO determine what to do with READ-WRITE usage states if any
685 // Write-After-Write check -- if we have a previous write to test against
686 if (last_write && IsWriteHazard(usage)) {
687 hazard.Set(WRITE_AFTER_WRITE, write_tag);
688 } else {
689 // Only look for casus belli for WAR
690 const auto usage_stage = PipelineStageBit(usage_index);
691 for (uint32_t read_index = 0; read_index < last_read_count; read_index++) {
692 if (IsReadHazard(usage_stage, last_reads[read_index])) {
693 hazard.Set(WRITE_AFTER_READ, last_reads[read_index].tag);
694 break;
695 }
696 }
697 }
698 }
699 return hazard;
700}
701
John Zulauf2f952d22020-02-10 11:34:51 -0700702// Asynchronous Hazards occur between subpasses with no connection through the DAG
John Zulauf3d84f1b2020-03-09 13:33:25 -0600703HazardResult ResourceAccessState::DetectAsyncHazard(SyncStageAccessIndex usage_index) const {
John Zulauf2f952d22020-02-10 11:34:51 -0700704 HazardResult hazard;
705 auto usage = FlagBit(usage_index);
706 if (IsRead(usage)) {
707 if (last_write != 0) {
708 hazard.Set(READ_RACING_WRITE, write_tag);
709 }
710 } else {
711 if (last_write != 0) {
712 hazard.Set(WRITE_RACING_WRITE, write_tag);
713 } else if (last_read_count > 0) {
714 hazard.Set(WRITE_RACING_READ, last_reads[0].tag);
715 }
716 }
717 return hazard;
718}
719
John Zulauf36bcf6a2020-02-03 15:12:52 -0700720HazardResult ResourceAccessState::DetectBarrierHazard(SyncStageAccessIndex usage_index, VkPipelineStageFlags src_exec_scope,
John Zulauf3d84f1b2020-03-09 13:33:25 -0600721 SyncStageAccessFlags src_access_scope,
722 SyncBarrierStack *barrier_stack) const {
723 if (barrier_stack) {
724 return ApplyBarrierStack(*this, *barrier_stack).DetectBarrierHazard(usage_index, src_exec_scope, src_access_scope);
725 }
726 return DetectBarrierHazard(usage_index, src_exec_scope, src_access_scope);
727}
728
729HazardResult ResourceAccessState::DetectBarrierHazard(SyncStageAccessIndex usage_index, VkPipelineStageFlags src_exec_scope,
John Zulauf36bcf6a2020-02-03 15:12:52 -0700730 SyncStageAccessFlags src_access_scope) const {
John Zulauf0cb5be22020-01-23 12:18:22 -0700731 // Only supporting image layout transitions for now
732 assert(usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION);
733 HazardResult hazard;
734 if (last_write) {
735 // If the previous write is *not* in the 1st access scope
736 // *AND* the current barrier is not in the dependency chain
737 // *AND* the there is no prior memory barrier for the previous write in the dependency chain
738 // then the barrier access is unsafe (R/W after W)
John Zulauf36bcf6a2020-02-03 15:12:52 -0700739 if (((last_write & src_access_scope) == 0) && ((src_exec_scope & write_dependency_chain) == 0) && (write_barriers == 0)) {
John Zulauf0cb5be22020-01-23 12:18:22 -0700740 // TODO: Do we need a difference hazard name for this?
741 hazard.Set(WRITE_AFTER_WRITE, write_tag);
742 }
743 } else {
744 // Look at the reads
745 for (uint32_t read_index = 0; read_index < last_read_count; read_index++) {
John Zulauf36bcf6a2020-02-03 15:12:52 -0700746 const auto &read_access = last_reads[read_index];
747 // If the read stage is not in the src sync sync
748 // *AND* not execution chained with an existing sync barrier (that's the or)
749 // then the barrier access is unsafe (R/W after R)
750 if ((src_exec_scope & (read_access.stage | read_access.barriers)) == 0) {
751 hazard.Set(WRITE_AFTER_READ, read_access.tag);
John Zulauf0cb5be22020-01-23 12:18:22 -0700752 break;
753 }
754 }
755 }
756 return hazard;
757}
758
John Zulauf5f13a792020-03-10 07:31:21 -0600759// The logic behind resolves is the same as update, we assume that earlier hazards have be reported, and that no
760// tranistive hazard can exists with a hazard between the earlier operations. Yes, an early hazard can mask that another
761// exists, but if you fix *that* hazard it either fixes or unmasks the subsequent ones.
762void ResourceAccessState::Resolve(const ResourceAccessState &other) {
763 if (write_tag.IsBefore(other.write_tag)) {
764 // If this is a later write, we've reported any exsiting hazard, and we can just overwrite as the more recent operation
765 *this = other;
766 } else if (!other.write_tag.IsBefore(write_tag)) {
767 // This is the *equals* case for write operations, we merged the write barriers and the read state (but without the
768 // dependency chaining logic or any stage expansion)
769 write_barriers |= other.write_barriers;
770
771 // Merge that read states
772 for (uint32_t other_read_index = 0; other_read_index < other.last_read_count; other_read_index++) {
773 auto &other_read = other.last_reads[other_read_index];
774 if (last_read_stages & other_read.stage) {
775 // Merge in the barriers for read stages that exist in *both* this and other
776 // TODO: This is N^2 with stages... perhaps the ReadStates should be by stage index.
777 for (uint32_t my_read_index = 0; my_read_index < last_read_count; my_read_index++) {
778 auto &my_read = last_reads[my_read_index];
779 if (other_read.stage == my_read.stage) {
780 if (my_read.tag.IsBefore(other_read.tag)) {
781 my_read.tag = other_read.tag;
782 }
783 my_read.barriers |= other_read.barriers;
784 break;
785 }
786 }
787 } else {
788 // The other read stage doesn't exist in this, so add it.
789 last_reads[last_read_count] = other_read;
790 last_read_count++;
791 last_read_stages |= other_read.stage;
792 }
793 }
794 } // the else clause would be that other write is before this write... in which case we supercede the other state and ignore
795 // it.
796}
797
John Zulauf9cb530d2019-09-30 14:14:10 -0600798void ResourceAccessState::Update(SyncStageAccessIndex usage_index, const ResourceUsageTag &tag) {
799 // Move this logic in the ResourceStateTracker as methods, thereof (or we'll repeat it for every flavor of resource...
800 const auto usage_bit = FlagBit(usage_index);
801 if (IsRead(usage_index)) {
802 // Mulitple outstanding reads may be of interest and do dependency chains independently
803 // However, for purposes of barrier tracking, only one read per pipeline stage matters
804 const auto usage_stage = PipelineStageBit(usage_index);
805 if (usage_stage & last_read_stages) {
806 for (uint32_t read_index = 0; read_index < last_read_count; read_index++) {
807 ReadState &access = last_reads[read_index];
808 if (access.stage == usage_stage) {
809 access.barriers = 0;
810 access.tag = tag;
811 break;
812 }
813 }
814 } else {
815 // We don't have this stage in the list yet...
816 assert(last_read_count < last_reads.size());
817 ReadState &access = last_reads[last_read_count++];
818 access.stage = usage_stage;
819 access.barriers = 0;
820 access.tag = tag;
821 last_read_stages |= usage_stage;
822 }
823 } else {
824 // Assume write
825 // TODO determine what to do with READ-WRITE operations if any
826 // Clobber last read and both sets of barriers... because all we have is DANGER, DANGER, WILL ROBINSON!!!
827 // if the last_reads/last_write were unsafe, we've reported them,
828 // in either case the prior access is irrelevant, we can overwrite them as *this* write is now after them
829 last_read_count = 0;
830 last_read_stages = 0;
831
832 write_barriers = 0;
833 write_dependency_chain = 0;
834 write_tag = tag;
835 last_write = usage_bit;
836 }
837}
John Zulauf5f13a792020-03-10 07:31:21 -0600838
John Zulauf9cb530d2019-09-30 14:14:10 -0600839void ResourceAccessState::ApplyExecutionBarrier(VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask) {
840 // Execution Barriers only protect read operations
841 for (uint32_t read_index = 0; read_index < last_read_count; read_index++) {
842 ReadState &access = last_reads[read_index];
843 // The | implements the "dependency chain" logic for this access, as the barriers field stores the second sync scope
844 if (srcStageMask & (access.stage | access.barriers)) {
845 access.barriers |= dstStageMask;
846 }
847 }
848 if (write_dependency_chain & srcStageMask) write_dependency_chain |= dstStageMask;
849}
850
John Zulauf36bcf6a2020-02-03 15:12:52 -0700851void ResourceAccessState::ApplyMemoryAccessBarrier(VkPipelineStageFlags src_exec_scope, SyncStageAccessFlags src_access_scope,
852 VkPipelineStageFlags dst_exec_scope, SyncStageAccessFlags dst_access_scope) {
John Zulauf9cb530d2019-09-30 14:14:10 -0600853 // Assuming we've applied the execution side of this barrier, we update just the write
854 // The || implements the "dependency chain" logic for this barrier
John Zulauf36bcf6a2020-02-03 15:12:52 -0700855 if ((src_access_scope & last_write) || (write_dependency_chain & src_exec_scope)) {
856 write_barriers |= dst_access_scope;
857 write_dependency_chain |= dst_exec_scope;
John Zulauf9cb530d2019-09-30 14:14:10 -0600858 }
859}
860
861void SyncValidator::ResetCommandBuffer(VkCommandBuffer command_buffer) {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600862 auto *access_context = GetAccessContextNoInsert(command_buffer);
863 if (access_context) {
864 access_context->Reset();
John Zulauf9cb530d2019-09-30 14:14:10 -0600865 }
866}
867
John Zulauf540266b2020-04-06 18:54:53 -0600868void SyncValidator::ApplyGlobalBarriers(AccessContext *context, VkPipelineStageFlags srcStageMask,
John Zulauf36bcf6a2020-02-03 15:12:52 -0700869 VkPipelineStageFlags dstStageMask, SyncStageAccessFlags src_access_scope,
870 SyncStageAccessFlags dst_access_scope, uint32_t memoryBarrierCount,
John Zulauf9cb530d2019-09-30 14:14:10 -0600871 const VkMemoryBarrier *pMemoryBarriers) {
872 // TODO: Implement this better (maybe some delayed/on-demand integration).
John Zulauf36bcf6a2020-02-03 15:12:52 -0700873 ApplyGlobalBarrierFunctor barriers_functor(srcStageMask, dstStageMask, src_access_scope, dst_access_scope, memoryBarrierCount,
John Zulauf9cb530d2019-09-30 14:14:10 -0600874 pMemoryBarriers);
John Zulauf540266b2020-04-06 18:54:53 -0600875 context->ApplyGlobalBarriers(barriers_functor);
John Zulauf9cb530d2019-09-30 14:14:10 -0600876}
877
John Zulauf540266b2020-04-06 18:54:53 -0600878void SyncValidator::ApplyBufferBarriers(AccessContext *context, VkPipelineStageFlags src_exec_scope,
John Zulauf36bcf6a2020-02-03 15:12:52 -0700879 SyncStageAccessFlags src_stage_accesses, VkPipelineStageFlags dst_exec_scope,
880 SyncStageAccessFlags dst_stage_accesses, uint32_t barrier_count,
John Zulauf9cb530d2019-09-30 14:14:10 -0600881 const VkBufferMemoryBarrier *barriers) {
882 // TODO Implement this at subresource/memory_range accuracy
883 for (uint32_t index = 0; index < barrier_count; index++) {
884 const auto &barrier = barriers[index];
885 const auto *buffer = Get<BUFFER_STATE>(barrier.buffer);
886 if (!buffer) continue;
John Zulauf5c5e88d2019-12-26 11:22:02 -0700887 ResourceAccessRange range = MakeMemoryAccessRange(*buffer, barrier.offset, barrier.size);
John Zulauf540266b2020-04-06 18:54:53 -0600888 const auto src_access_scope = AccessScope(src_stage_accesses, barrier.srcAccessMask);
889 const auto dst_access_scope = AccessScope(dst_stage_accesses, barrier.dstAccessMask);
890 const ApplyMemoryAccessBarrierFunctor update_action(src_exec_scope, src_access_scope, dst_exec_scope, dst_access_scope);
891 context->UpdateMemoryAccess(*buffer, range, update_action);
John Zulauf9cb530d2019-09-30 14:14:10 -0600892 }
893}
894
John Zulauf540266b2020-04-06 18:54:53 -0600895void SyncValidator::ApplyImageBarriers(AccessContext *context, VkPipelineStageFlags src_exec_scope,
896 SyncStageAccessFlags src_stage_accesses, VkPipelineStageFlags dst_exec_scope,
897 SyncStageAccessFlags dst_stage_accesses, uint32_t barrier_count,
898 const VkImageMemoryBarrier *barriers) {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700899 for (uint32_t index = 0; index < barrier_count; index++) {
900 const auto &barrier = barriers[index];
901 const auto *image = Get<IMAGE_STATE>(barrier.image);
902 if (!image) continue;
locke-lunarg296a3c92020-03-25 01:04:29 -0600903 const ApplyMemoryAccessBarrierFunctor barrier_action(src_exec_scope, AccessScope(src_stage_accesses, barrier.srcAccessMask),
904 dst_exec_scope,
905 AccessScope(dst_stage_accesses, barrier.dstAccessMask));
John Zulauf540266b2020-04-06 18:54:53 -0600906
907 auto subresource_range = NormalizeSubresourceRange(image->createInfo, barrier.subresourceRange);
908 context->UpdateMemoryAccess(*image, subresource_range, barrier_action);
John Zulauf9cb530d2019-09-30 14:14:10 -0600909 }
John Zulauf9cb530d2019-09-30 14:14:10 -0600910}
911
912bool SyncValidator::PreCallValidateCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
913 uint32_t regionCount, const VkBufferCopy *pRegions) const {
914 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600915 const auto *cb_context = GetAccessContext(commandBuffer);
916 assert(cb_context);
917 if (!cb_context) return skip;
918 const auto *context = cb_context->GetCurrentAccessContext();
John Zulauf9cb530d2019-09-30 14:14:10 -0600919
John Zulauf3d84f1b2020-03-09 13:33:25 -0600920 // If we have no previous accesses, we have no hazards
921 // TODO: make this sub-resource capable
922 // TODO: make this general, and stuff it into templates/utility functions
923 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
924 const auto src_mem = (src_buffer && !src_buffer->sparse) ? src_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
925 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
926 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
927
928 for (uint32_t region = 0; region < regionCount; region++) {
929 const auto &copy_region = pRegions[region];
930 if (src_mem != VK_NULL_HANDLE) {
931 ResourceAccessRange src_range = MakeMemoryAccessRange(*src_buffer, copy_region.srcOffset, copy_region.size);
932 auto hazard = context->DetectHazard(VulkanTypedHandle(src_mem, kVulkanObjectTypeDeviceMemory),
933 SYNC_TRANSFER_TRANSFER_READ, src_range);
934 if (hazard.hazard) {
935 // TODO -- add tag information to log msg when useful.
locke-lunarga0003652020-03-10 11:38:51 -0600936 skip |= LogError(srcBuffer, string_SyncHazardVUID(hazard.hazard),
937 "vkCmdCopyBuffer: Hazard %s for srcBuffer %s, region %" PRIu32, string_SyncHazard(hazard.hazard),
938 report_data->FormatHandle(srcBuffer).c_str(), region);
John Zulauf9cb530d2019-09-30 14:14:10 -0600939 }
John Zulauf9cb530d2019-09-30 14:14:10 -0600940 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600941 if ((dst_mem != VK_NULL_HANDLE) && !skip) {
942 ResourceAccessRange dst_range = MakeMemoryAccessRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
943 auto hazard = context->DetectHazard(VulkanTypedHandle(dst_mem, kVulkanObjectTypeDeviceMemory),
944 SYNC_TRANSFER_TRANSFER_WRITE, dst_range);
945 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -0600946 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
947 "vkCmdCopyBuffer: Hazard %s for dstBuffer %s, region %" PRIu32, string_SyncHazard(hazard.hazard),
948 report_data->FormatHandle(dstBuffer).c_str(), region);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600949 }
950 }
951 if (skip) break;
John Zulauf9cb530d2019-09-30 14:14:10 -0600952 }
953 return skip;
954}
955
956void SyncValidator::PreCallRecordCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
957 uint32_t regionCount, const VkBufferCopy *pRegions) {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600958 auto *cb_context = GetAccessContext(commandBuffer);
959 assert(cb_context);
960 auto *context = cb_context->GetCurrentAccessContext();
961
John Zulauf9cb530d2019-09-30 14:14:10 -0600962 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600963 const auto src_mem = (src_buffer && !src_buffer->sparse) ? src_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
964 const VulkanTypedHandle src_handle(src_mem, kVulkanObjectTypeDeviceMemory);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600965
John Zulauf9cb530d2019-09-30 14:14:10 -0600966 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600967 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
968 const VulkanTypedHandle dst_handle(dst_mem, kVulkanObjectTypeDeviceMemory);
John Zulauf9cb530d2019-09-30 14:14:10 -0600969
970 for (uint32_t region = 0; region < regionCount; region++) {
971 const auto &copy_region = pRegions[region];
John Zulauf3d84f1b2020-03-09 13:33:25 -0600972 if (src_mem) {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700973 ResourceAccessRange src_range = MakeMemoryAccessRange(*src_buffer, copy_region.srcOffset, copy_region.size);
John Zulauf5f13a792020-03-10 07:31:21 -0600974 context->UpdateAccessState(src_handle, SYNC_TRANSFER_TRANSFER_READ, src_range, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -0600975 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600976 if (dst_mem) {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700977 ResourceAccessRange dst_range = MakeMemoryAccessRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
John Zulauf5f13a792020-03-10 07:31:21 -0600978 context->UpdateAccessState(dst_handle, SYNC_TRANSFER_TRANSFER_WRITE, dst_range, tag);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700979 }
980 }
981}
982
983bool SyncValidator::PreCallValidateCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
984 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
985 const VkImageCopy *pRegions) const {
986 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600987 const auto *cb_access_context = GetAccessContext(commandBuffer);
988 assert(cb_access_context);
989 if (!cb_access_context) return skip;
John Zulauf5c5e88d2019-12-26 11:22:02 -0700990
John Zulauf3d84f1b2020-03-09 13:33:25 -0600991 const auto *context = cb_access_context->GetCurrentAccessContext();
992 assert(context);
993 if (!context) return skip;
994
995 const auto *src_image = Get<IMAGE_STATE>(srcImage);
996 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600997 for (uint32_t region = 0; region < regionCount; region++) {
998 const auto &copy_region = pRegions[region];
999 if (src_image) {
John Zulauf540266b2020-04-06 18:54:53 -06001000 auto hazard = context->DetectHazard(*src_image, SYNC_TRANSFER_TRANSFER_READ, copy_region.srcSubresource,
John Zulauf3d84f1b2020-03-09 13:33:25 -06001001 copy_region.srcOffset, copy_region.extent);
1002 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06001003 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
1004 "vkCmdCopyImage: Hazard %s for srcImage %s, region %" PRIu32, string_SyncHazard(hazard.hazard),
1005 report_data->FormatHandle(srcImage).c_str(), region);
John Zulauf5c5e88d2019-12-26 11:22:02 -07001006 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06001007 }
1008
1009 if (dst_image) {
locke-lunarg1df1f882020-03-02 16:42:08 -07001010 VkExtent3D dst_copy_extent =
1011 GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
John Zulauf540266b2020-04-06 18:54:53 -06001012 auto hazard = context->DetectHazard(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, copy_region.dstSubresource,
locke-lunarg1df1f882020-03-02 16:42:08 -07001013 copy_region.dstOffset, dst_copy_extent);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001014 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06001015 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
1016 "vkCmdCopyImage: Hazard %s for dstImage %s, region %" PRIu32, string_SyncHazard(hazard.hazard),
1017 report_data->FormatHandle(dstImage).c_str(), region);
John Zulauf5c5e88d2019-12-26 11:22:02 -07001018 }
locke-lunarg1dbbb9e2020-02-28 22:43:53 -07001019 if (skip) break;
John Zulauf5c5e88d2019-12-26 11:22:02 -07001020 }
1021 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06001022
John Zulauf5c5e88d2019-12-26 11:22:02 -07001023 return skip;
1024}
1025
1026void SyncValidator::PreCallRecordCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
1027 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
1028 const VkImageCopy *pRegions) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06001029 auto *cb_access_context = GetAccessContext(commandBuffer);
1030 assert(cb_access_context);
1031 auto *context = cb_access_context->GetCurrentAccessContext();
1032 assert(context);
1033
John Zulauf5c5e88d2019-12-26 11:22:02 -07001034 auto *src_image = Get<IMAGE_STATE>(srcImage);
John Zulauf5c5e88d2019-12-26 11:22:02 -07001035 auto *dst_image = Get<IMAGE_STATE>(dstImage);
John Zulauf5c5e88d2019-12-26 11:22:02 -07001036
1037 for (uint32_t region = 0; region < regionCount; region++) {
1038 const auto &copy_region = pRegions[region];
John Zulauf3d84f1b2020-03-09 13:33:25 -06001039 if (src_image) {
John Zulauf540266b2020-04-06 18:54:53 -06001040 context->UpdateAccessState(*src_image, SYNC_TRANSFER_TRANSFER_READ, copy_region.srcSubresource, copy_region.srcOffset,
1041 copy_region.extent, tag);
John Zulauf5c5e88d2019-12-26 11:22:02 -07001042 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06001043 if (dst_image) {
locke-lunarg1df1f882020-03-02 16:42:08 -07001044 VkExtent3D dst_copy_extent =
1045 GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
John Zulauf540266b2020-04-06 18:54:53 -06001046 context->UpdateAccessState(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, copy_region.dstSubresource, copy_region.dstOffset,
1047 dst_copy_extent, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06001048 }
1049 }
1050}
1051
1052bool SyncValidator::PreCallValidateCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
1053 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
1054 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
1055 uint32_t bufferMemoryBarrierCount,
1056 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
1057 uint32_t imageMemoryBarrierCount,
1058 const VkImageMemoryBarrier *pImageMemoryBarriers) const {
1059 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -06001060 const auto *cb_access_context = GetAccessContext(commandBuffer);
1061 assert(cb_access_context);
1062 if (!cb_access_context) return skip;
John Zulauf0cb5be22020-01-23 12:18:22 -07001063
John Zulauf3d84f1b2020-03-09 13:33:25 -06001064 const auto *context = cb_access_context->GetCurrentAccessContext();
1065 assert(context);
1066 if (!context) return skip;
John Zulauf0cb5be22020-01-23 12:18:22 -07001067
John Zulauf3d84f1b2020-03-09 13:33:25 -06001068 const auto src_stage_mask = ExpandPipelineStages(cb_access_context->GetQueueFlags(), srcStageMask);
John Zulauf36bcf6a2020-02-03 15:12:52 -07001069 const auto src_exec_scope = WithEarlierPipelineStages(src_stage_mask);
1070 auto src_stage_accesses = AccessScopeByStage(src_stage_mask);
John Zulauf0cb5be22020-01-23 12:18:22 -07001071 // Validate Image Layout transitions
1072 for (uint32_t index = 0; index < imageMemoryBarrierCount; index++) {
1073 const auto &barrier = pImageMemoryBarriers[index];
1074 if (barrier.newLayout == barrier.oldLayout) continue; // Only interested in layout transitions at this point.
1075 const auto *image_state = Get<IMAGE_STATE>(barrier.image);
1076 if (!image_state) continue;
John Zulauf540266b2020-04-06 18:54:53 -06001077 const auto hazard = DetectImageBarrierHazard(*context, *image_state, src_exec_scope, src_stage_accesses, barrier);
John Zulauf0cb5be22020-01-23 12:18:22 -07001078 if (hazard.hazard) {
1079 // TODO -- add tag information to log msg when useful.
locke-lunarga0003652020-03-10 11:38:51 -06001080 skip |= LogError(barrier.image, string_SyncHazardVUID(hazard.hazard),
1081 "vkCmdPipelineBarrier: Hazard %s for image barrier %" PRIu32 " %s", string_SyncHazard(hazard.hazard),
1082 index, report_data->FormatHandle(barrier.image).c_str());
John Zulauf0cb5be22020-01-23 12:18:22 -07001083 }
1084 }
John Zulauf9cb530d2019-09-30 14:14:10 -06001085
1086 return skip;
1087}
1088
1089void SyncValidator::PreCallRecordCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
1090 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
1091 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
1092 uint32_t bufferMemoryBarrierCount,
1093 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
1094 uint32_t imageMemoryBarrierCount,
1095 const VkImageMemoryBarrier *pImageMemoryBarriers) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06001096 auto *cb_access_context = GetAccessContext(commandBuffer);
1097 assert(cb_access_context);
1098 if (!cb_access_context) return;
1099 auto access_context = cb_access_context->GetCurrentAccessContext();
1100 assert(access_context);
1101 if (!access_context) return;
John Zulauf9cb530d2019-09-30 14:14:10 -06001102
John Zulauf3d84f1b2020-03-09 13:33:25 -06001103 const auto src_stage_mask = ExpandPipelineStages(cb_access_context->GetQueueFlags(), srcStageMask);
John Zulauf36bcf6a2020-02-03 15:12:52 -07001104 auto src_stage_accesses = AccessScopeByStage(src_stage_mask);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001105 const auto dst_stage_mask = ExpandPipelineStages(cb_access_context->GetQueueFlags(), dstStageMask);
John Zulauf36bcf6a2020-02-03 15:12:52 -07001106 auto dst_stage_accesses = AccessScopeByStage(dst_stage_mask);
1107 const auto src_exec_scope = WithEarlierPipelineStages(src_stage_mask);
1108 const auto dst_exec_scope = WithLaterPipelineStages(dst_stage_mask);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001109 ApplyBufferBarriers(access_context, src_exec_scope, src_stage_accesses, dst_exec_scope, dst_stage_accesses,
1110 bufferMemoryBarrierCount, pBufferMemoryBarriers);
John Zulauf540266b2020-04-06 18:54:53 -06001111 ApplyImageBarriers(access_context, src_exec_scope, src_stage_accesses, dst_exec_scope, dst_stage_accesses,
John Zulauf3d84f1b2020-03-09 13:33:25 -06001112 imageMemoryBarrierCount, pImageMemoryBarriers);
John Zulauf9cb530d2019-09-30 14:14:10 -06001113
1114 // Apply these last in-case there operation is a superset of the other two and would clean them up...
John Zulauf3d84f1b2020-03-09 13:33:25 -06001115 ApplyGlobalBarriers(access_context, src_exec_scope, dst_exec_scope, src_stage_accesses, dst_stage_accesses, memoryBarrierCount,
John Zulauf0cb5be22020-01-23 12:18:22 -07001116 pMemoryBarriers);
John Zulauf9cb530d2019-09-30 14:14:10 -06001117}
1118
1119void SyncValidator::PostCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
1120 const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, VkResult result) {
1121 // The state tracker sets up the device state
1122 StateTracker::PostCallRecordCreateDevice(gpu, pCreateInfo, pAllocator, pDevice, result);
1123
John Zulauf5f13a792020-03-10 07:31:21 -06001124 // Add the callback hooks for the functions that are either broadly or deeply used and that the ValidationStateTracker
1125 // refactor would be messier without.
John Zulauf9cb530d2019-09-30 14:14:10 -06001126 // TODO: Find a good way to do this hooklessly.
1127 ValidationObject *device_object = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
1128 ValidationObject *validation_data = GetValidationObject(device_object->object_dispatch, LayerObjectTypeSyncValidation);
1129 SyncValidator *sync_device_state = static_cast<SyncValidator *>(validation_data);
1130
1131 sync_device_state->SetCommandBufferResetCallback(
1132 [sync_device_state](VkCommandBuffer command_buffer) -> void { sync_device_state->ResetCommandBuffer(command_buffer); });
1133}
John Zulauf3d84f1b2020-03-09 13:33:25 -06001134
1135void SyncValidator::PostCallRecordBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo,
1136 VkResult result) {
1137 // The state tracker sets up the command buffer state
1138 StateTracker::PostCallRecordBeginCommandBuffer(commandBuffer, pBeginInfo, result);
1139
1140 // Create/initialize the structure that trackers accesses at the command buffer scope.
1141 auto cb_access_context = GetAccessContext(commandBuffer);
1142 assert(cb_access_context);
1143 cb_access_context->Reset();
1144}
1145
1146void SyncValidator::RecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
1147 const VkSubpassBeginInfo *pSubpassBeginInfo) {
1148 const auto rp_state = Get<RENDER_PASS_STATE>(pRenderPassBegin->renderPass);
1149 auto cb_context = GetAccessContext(commandBuffer);
1150 if (rp_state && cb_context) {
1151 cb_context->BeginRenderPass(*rp_state);
1152 }
1153}
1154
1155void SyncValidator::PostCallRecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
1156 VkSubpassContents contents) {
1157 StateTracker::PostCallRecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
1158 auto subpass_begin_info = lvl_init_struct<VkSubpassBeginInfo>();
1159 subpass_begin_info.contents = contents;
1160 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, &subpass_begin_info);
1161}
1162
1163void SyncValidator::PostCallRecordCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
1164 const VkSubpassBeginInfo *pSubpassBeginInfo) {
1165 StateTracker::PostCallRecordCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
1166 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
1167}
1168
1169void SyncValidator::PostCallRecordCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
1170 const VkRenderPassBeginInfo *pRenderPassBegin,
1171 const VkSubpassBeginInfo *pSubpassBeginInfo) {
1172 StateTracker::PostCallRecordCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
1173 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
1174}
1175
1176void SyncValidator::RecordCmdNextSubpass(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
1177 const VkSubpassEndInfo *pSubpassEndInfo) {
1178 auto cb_context = GetAccessContext(commandBuffer);
1179 assert(cb_context);
1180 auto cb_state = cb_context->GetCommandBufferState();
1181 if (!cb_state) return;
1182
1183 auto rp_state = cb_state->activeRenderPass;
1184 if (!rp_state) return;
1185
1186 cb_context->NextRenderPass(*rp_state);
1187}
1188
1189void SyncValidator::PostCallRecordCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
1190 StateTracker::PostCallRecordCmdNextSubpass(commandBuffer, contents);
1191 auto subpass_begin_info = lvl_init_struct<VkSubpassBeginInfo>();
1192 subpass_begin_info.contents = contents;
1193 RecordCmdNextSubpass(commandBuffer, &subpass_begin_info, nullptr);
1194}
1195
1196void SyncValidator::PostCallRecordCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
1197 const VkSubpassEndInfo *pSubpassEndInfo) {
1198 StateTracker::PostCallRecordCmdNextSubpass2(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
1199 RecordCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
1200}
1201
1202void SyncValidator::PostCallRecordCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
1203 const VkSubpassEndInfo *pSubpassEndInfo) {
1204 StateTracker::PostCallRecordCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
1205 RecordCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
1206}
1207
John Zulaufe5da6e52020-03-18 15:32:18 -06001208void SyncValidator::RecordCmdEndRenderPass(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
1209 // Resolve the all subpass contexts to the command buffer contexts
1210 auto cb_context = GetAccessContext(commandBuffer);
1211 assert(cb_context);
1212 auto cb_state = cb_context->GetCommandBufferState();
1213 if (!cb_state) return;
1214
1215 const auto *rp_state = cb_state->activeRenderPass;
1216 if (!rp_state) return;
1217
1218 cb_context->EndRenderPass(*rp_state);
1219}
John Zulauf3d84f1b2020-03-09 13:33:25 -06001220
1221void SyncValidator::PostCallRecordCmdEndRenderPass(VkCommandBuffer commandBuffer) {
1222 StateTracker::PostCallRecordCmdEndRenderPass(commandBuffer);
1223 RecordCmdEndRenderPass(commandBuffer, nullptr);
1224}
1225
1226void SyncValidator::PostCallRecordCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
1227 StateTracker::PostCallRecordCmdEndRenderPass2(commandBuffer, pSubpassEndInfo);
1228 RecordCmdEndRenderPass(commandBuffer, pSubpassEndInfo);
1229}
1230
1231void SyncValidator::PostCallRecordCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
1232 StateTracker::PostCallRecordCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo);
1233 RecordCmdEndRenderPass(commandBuffer, pSubpassEndInfo);
1234}
locke-lunarga19c71d2020-03-02 18:17:04 -07001235
1236bool SyncValidator::PreCallValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
1237 VkImageLayout dstImageLayout, uint32_t regionCount,
1238 const VkBufferImageCopy *pRegions) const {
1239 bool skip = false;
1240 const auto *cb_access_context = GetAccessContext(commandBuffer);
1241 assert(cb_access_context);
1242 if (!cb_access_context) return skip;
1243
1244 const auto *context = cb_access_context->GetCurrentAccessContext();
1245 assert(context);
1246 if (!context) return skip;
1247
1248 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
1249 const auto src_mem = (src_buffer && !src_buffer->sparse) ? src_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
1250 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
1251
1252 for (uint32_t region = 0; region < regionCount; region++) {
1253 const auto &copy_region = pRegions[region];
1254 if (src_mem) {
1255 ResourceAccessRange src_range = MakeMemoryAccessRange(
1256 *src_buffer, copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, dst_image->createInfo.format));
1257 auto hazard = context->DetectHazard(VulkanTypedHandle(src_mem, kVulkanObjectTypeDeviceMemory),
1258 SYNC_TRANSFER_TRANSFER_READ, src_range);
1259 if (hazard.hazard) {
1260 // TODO -- add tag information to log msg when useful.
locke-lunarga0003652020-03-10 11:38:51 -06001261 skip |= LogError(srcBuffer, string_SyncHazardVUID(hazard.hazard),
1262 "vkCmdCopyBufferToImage: Hazard %s for srcBuffer %s, region %" PRIu32,
locke-lunarga19c71d2020-03-02 18:17:04 -07001263 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcBuffer).c_str(), region);
1264 }
1265 }
1266 if (dst_image) {
John Zulauf540266b2020-04-06 18:54:53 -06001267 auto hazard = context->DetectHazard(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, copy_region.imageSubresource,
locke-lunarga19c71d2020-03-02 18:17:04 -07001268 copy_region.imageOffset, copy_region.imageExtent);
1269 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06001270 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
1271 "vkCmdCopyBufferToImage: Hazard %s for dstImage %s, region %" PRIu32,
locke-lunarga19c71d2020-03-02 18:17:04 -07001272 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region);
1273 }
1274 if (skip) break;
1275 }
1276 if (skip) break;
1277 }
1278 return skip;
1279}
1280
1281void SyncValidator::PreCallRecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
1282 VkImageLayout dstImageLayout, uint32_t regionCount,
1283 const VkBufferImageCopy *pRegions) {
1284 auto *cb_access_context = GetAccessContext(commandBuffer);
1285 assert(cb_access_context);
1286 auto *context = cb_access_context->GetCurrentAccessContext();
1287 assert(context);
1288
1289 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
1290 const auto src_mem = (src_buffer && !src_buffer->sparse) ? src_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
1291 const VulkanTypedHandle src_handle(src_mem, kVulkanObjectTypeDeviceMemory);
John Zulauf5f13a792020-03-10 07:31:21 -06001292
locke-lunarga19c71d2020-03-02 18:17:04 -07001293 auto *dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07001294
1295 for (uint32_t region = 0; region < regionCount; region++) {
1296 const auto &copy_region = pRegions[region];
1297 if (src_buffer) {
1298 ResourceAccessRange src_range = MakeMemoryAccessRange(
1299 *src_buffer, copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, dst_image->createInfo.format));
John Zulauf5f13a792020-03-10 07:31:21 -06001300 context->UpdateAccessState(src_handle, SYNC_TRANSFER_TRANSFER_READ, src_range, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07001301 }
1302 if (dst_image) {
John Zulauf540266b2020-04-06 18:54:53 -06001303 context->UpdateAccessState(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, copy_region.imageSubresource,
John Zulauf5f13a792020-03-10 07:31:21 -06001304 copy_region.imageOffset, copy_region.imageExtent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07001305 }
1306 }
1307}
1308
1309bool SyncValidator::PreCallValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
1310 VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount,
1311 const VkBufferImageCopy *pRegions) const {
1312 bool skip = false;
1313 const auto *cb_access_context = GetAccessContext(commandBuffer);
1314 assert(cb_access_context);
1315 if (!cb_access_context) return skip;
1316
1317 const auto *context = cb_access_context->GetCurrentAccessContext();
1318 assert(context);
1319 if (!context) return skip;
1320
1321 const auto *src_image = Get<IMAGE_STATE>(srcImage);
1322 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
1323 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
1324 for (uint32_t region = 0; region < regionCount; region++) {
1325 const auto &copy_region = pRegions[region];
1326 if (src_image) {
John Zulauf540266b2020-04-06 18:54:53 -06001327 auto hazard = context->DetectHazard(*src_image, SYNC_TRANSFER_TRANSFER_READ, copy_region.imageSubresource,
locke-lunarga19c71d2020-03-02 18:17:04 -07001328 copy_region.imageOffset, copy_region.imageExtent);
1329 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06001330 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
1331 "vkCmdCopyImageToBuffer: Hazard %s for srcImage %s, region %" PRIu32,
locke-lunarga19c71d2020-03-02 18:17:04 -07001332 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region);
1333 }
1334 }
1335 if (dst_mem) {
1336 ResourceAccessRange dst_range = MakeMemoryAccessRange(
1337 *dst_buffer, copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, src_image->createInfo.format));
1338 auto hazard = context->DetectHazard(VulkanTypedHandle(dst_mem, kVulkanObjectTypeDeviceMemory),
1339 SYNC_TRANSFER_TRANSFER_WRITE, dst_range);
1340 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06001341 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
1342 "vkCmdCopyImageToBuffer: Hazard %s for dstBuffer %s, region %" PRIu32,
locke-lunarga19c71d2020-03-02 18:17:04 -07001343 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstBuffer).c_str(), region);
1344 }
1345 }
1346 if (skip) break;
1347 }
1348 return skip;
1349}
1350
1351void SyncValidator::PreCallRecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
1352 VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
1353 auto *cb_access_context = GetAccessContext(commandBuffer);
1354 assert(cb_access_context);
1355 auto *context = cb_access_context->GetCurrentAccessContext();
1356 assert(context);
1357
1358 const auto *src_image = Get<IMAGE_STATE>(srcImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07001359 auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
1360 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
John Zulauf5f13a792020-03-10 07:31:21 -06001361 const VulkanTypedHandle dst_handle(dst_mem, kVulkanObjectTypeDeviceMemory);
locke-lunarga19c71d2020-03-02 18:17:04 -07001362
1363 for (uint32_t region = 0; region < regionCount; region++) {
1364 const auto &copy_region = pRegions[region];
1365 if (src_image) {
John Zulauf540266b2020-04-06 18:54:53 -06001366 context->UpdateAccessState(*src_image, SYNC_TRANSFER_TRANSFER_READ, copy_region.imageSubresource,
John Zulauf5f13a792020-03-10 07:31:21 -06001367 copy_region.imageOffset, copy_region.imageExtent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07001368 }
1369 if (dst_buffer) {
1370 ResourceAccessRange dst_range = MakeMemoryAccessRange(
1371 *dst_buffer, copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, src_image->createInfo.format));
John Zulauf5f13a792020-03-10 07:31:21 -06001372 context->UpdateAccessState(dst_handle, SYNC_TRANSFER_TRANSFER_WRITE, dst_range, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07001373 }
1374 }
1375}
1376
1377bool SyncValidator::PreCallValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
1378 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
1379 const VkImageBlit *pRegions, VkFilter filter) const {
1380 bool skip = false;
1381 const auto *cb_access_context = GetAccessContext(commandBuffer);
1382 assert(cb_access_context);
1383 if (!cb_access_context) return skip;
1384
1385 const auto *context = cb_access_context->GetCurrentAccessContext();
1386 assert(context);
1387 if (!context) return skip;
1388
1389 const auto *src_image = Get<IMAGE_STATE>(srcImage);
1390 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
1391
1392 for (uint32_t region = 0; region < regionCount; region++) {
1393 const auto &blit_region = pRegions[region];
1394 if (src_image) {
1395 VkExtent3D extent = {static_cast<uint32_t>(blit_region.srcOffsets[1].x - blit_region.srcOffsets[0].x),
1396 static_cast<uint32_t>(blit_region.srcOffsets[1].y - blit_region.srcOffsets[0].y),
1397 static_cast<uint32_t>(blit_region.srcOffsets[1].z - blit_region.srcOffsets[0].z)};
John Zulauf540266b2020-04-06 18:54:53 -06001398 auto hazard = context->DetectHazard(*src_image, SYNC_TRANSFER_TRANSFER_READ, blit_region.srcSubresource,
locke-lunarga19c71d2020-03-02 18:17:04 -07001399 blit_region.srcOffsets[0], extent);
1400 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06001401 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
1402 "vkCmdBlitImage: Hazard %s for srcImage %s, region %" PRIu32, string_SyncHazard(hazard.hazard),
1403 report_data->FormatHandle(srcImage).c_str(), region);
locke-lunarga19c71d2020-03-02 18:17:04 -07001404 }
1405 }
1406
1407 if (dst_image) {
1408 VkExtent3D extent = {static_cast<uint32_t>(blit_region.dstOffsets[1].x - blit_region.dstOffsets[0].x),
1409 static_cast<uint32_t>(blit_region.dstOffsets[1].y - blit_region.dstOffsets[0].y),
1410 static_cast<uint32_t>(blit_region.dstOffsets[1].z - blit_region.dstOffsets[0].z)};
John Zulauf540266b2020-04-06 18:54:53 -06001411 auto hazard = context->DetectHazard(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, blit_region.dstSubresource,
locke-lunarga19c71d2020-03-02 18:17:04 -07001412 blit_region.dstOffsets[0], extent);
1413 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06001414 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
1415 "vkCmdBlitImage: Hazard %s for dstImage %s, region %" PRIu32, string_SyncHazard(hazard.hazard),
1416 report_data->FormatHandle(dstImage).c_str(), region);
locke-lunarga19c71d2020-03-02 18:17:04 -07001417 }
1418 if (skip) break;
1419 }
1420 }
1421
1422 return skip;
1423}
1424
1425void SyncValidator::PreCallRecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
1426 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
1427 const VkImageBlit *pRegions, VkFilter filter) {
1428 auto *cb_access_context = GetAccessContext(commandBuffer);
1429 assert(cb_access_context);
1430 auto *context = cb_access_context->GetCurrentAccessContext();
1431 assert(context);
1432
1433 auto *src_image = Get<IMAGE_STATE>(srcImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07001434 auto *dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07001435
1436 for (uint32_t region = 0; region < regionCount; region++) {
1437 const auto &blit_region = pRegions[region];
1438 if (src_image) {
1439 VkExtent3D extent = {static_cast<uint32_t>(blit_region.srcOffsets[1].x - blit_region.srcOffsets[0].x),
1440 static_cast<uint32_t>(blit_region.srcOffsets[1].y - blit_region.srcOffsets[0].y),
1441 static_cast<uint32_t>(blit_region.srcOffsets[1].z - blit_region.srcOffsets[0].z)};
John Zulauf540266b2020-04-06 18:54:53 -06001442 context->UpdateAccessState(*src_image, SYNC_TRANSFER_TRANSFER_READ, blit_region.srcSubresource,
John Zulauf5f13a792020-03-10 07:31:21 -06001443 blit_region.srcOffsets[0], extent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07001444 }
1445 if (dst_image) {
1446 VkExtent3D extent = {static_cast<uint32_t>(blit_region.dstOffsets[1].x - blit_region.dstOffsets[0].x),
1447 static_cast<uint32_t>(blit_region.dstOffsets[1].y - blit_region.dstOffsets[0].y),
1448 static_cast<uint32_t>(blit_region.dstOffsets[1].z - blit_region.dstOffsets[0].z)};
John Zulauf540266b2020-04-06 18:54:53 -06001449 context->UpdateAccessState(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, blit_region.dstSubresource,
John Zulauf5f13a792020-03-10 07:31:21 -06001450 blit_region.dstOffsets[0], extent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07001451 }
1452 }
1453}