blob: 6e6a818375c5e55010f3008f1e673514aed93bee [file] [log] [blame]
John Zulauf9cb530d2019-09-30 14:14:10 -06001/* Copyright (c) 2019 The Khronos Group Inc.
2 * Copyright (c) 2019 Valve Corporation
3 * Copyright (c) 2019 LunarG, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 * Author: John Zulauf <jzulauf@lunarg.com>
18 */
19
20#include <limits>
21#include <vector>
locke-lunarg296a3c92020-03-25 01:04:29 -060022#include <memory>
23#include <bitset>
John Zulauf9cb530d2019-09-30 14:14:10 -060024#include "synchronization_validation.h"
25
26static const char *string_SyncHazardVUID(SyncHazard hazard) {
27 switch (hazard) {
28 case SyncHazard::NONE:
John Zulauf2f952d22020-02-10 11:34:51 -070029 return "SYNC-HAZARD-NONE";
John Zulauf9cb530d2019-09-30 14:14:10 -060030 break;
31 case SyncHazard::READ_AFTER_WRITE:
32 return "SYNC-HAZARD-READ_AFTER_WRITE";
33 break;
34 case SyncHazard::WRITE_AFTER_READ:
35 return "SYNC-HAZARD-WRITE_AFTER_READ";
36 break;
37 case SyncHazard::WRITE_AFTER_WRITE:
38 return "SYNC-HAZARD-WRITE_AFTER_WRITE";
39 break;
John Zulauf2f952d22020-02-10 11:34:51 -070040 case SyncHazard::READ_RACING_WRITE:
41 return "SYNC-HAZARD-READ-RACING-WRITE";
42 break;
43 case SyncHazard::WRITE_RACING_WRITE:
44 return "SYNC-HAZARD-WRITE-RACING-WRITE";
45 break;
46 case SyncHazard::WRITE_RACING_READ:
47 return "SYNC-HAZARD-WRITE-RACING-READ";
48 break;
John Zulauf9cb530d2019-09-30 14:14:10 -060049 default:
50 assert(0);
51 }
52 return "SYNC-HAZARD-INVALID";
53}
54
55static const char *string_SyncHazard(SyncHazard hazard) {
56 switch (hazard) {
57 case SyncHazard::NONE:
58 return "NONR";
59 break;
60 case SyncHazard::READ_AFTER_WRITE:
61 return "READ_AFTER_WRITE";
62 break;
63 case SyncHazard::WRITE_AFTER_READ:
64 return "WRITE_AFTER_READ";
65 break;
66 case SyncHazard::WRITE_AFTER_WRITE:
67 return "WRITE_AFTER_WRITE";
68 break;
John Zulauf2f952d22020-02-10 11:34:51 -070069 case SyncHazard::READ_RACING_WRITE:
70 return "READ_RACING_WRITE";
71 break;
72 case SyncHazard::WRITE_RACING_WRITE:
73 return "WRITE_RACING_WRITE";
74 break;
75 case SyncHazard::WRITE_RACING_READ:
76 return "WRITE_RACING_READ";
77 break;
John Zulauf9cb530d2019-09-30 14:14:10 -060078 default:
79 assert(0);
80 }
81 return "INVALID HAZARD";
82}
83
John Zulauf0cb5be22020-01-23 12:18:22 -070084// Expand the pipeline stage without regard to whether the are valid w.r.t. queue or extension
85VkPipelineStageFlags ExpandPipelineStages(VkQueueFlags queue_flags, VkPipelineStageFlags stage_mask) {
86 VkPipelineStageFlags expanded = stage_mask;
87 if (VK_PIPELINE_STAGE_ALL_COMMANDS_BIT & stage_mask) {
88 expanded = expanded & ~VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
89 for (const auto &all_commands : syncAllCommandStagesByQueueFlags) {
90 if (all_commands.first & queue_flags) {
91 expanded |= all_commands.second;
92 }
93 }
94 }
95 if (VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT & stage_mask) {
96 expanded = expanded & ~VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
97 expanded |= syncAllCommandStagesByQueueFlags.at(VK_QUEUE_GRAPHICS_BIT) & ~VK_PIPELINE_STAGE_HOST_BIT;
98 }
99 return expanded;
100}
101
John Zulauf36bcf6a2020-02-03 15:12:52 -0700102VkPipelineStageFlags RelatedPipelineStages(VkPipelineStageFlags stage_mask,
103 std::map<VkPipelineStageFlagBits, VkPipelineStageFlags> &map) {
104 VkPipelineStageFlags unscanned = stage_mask;
105 VkPipelineStageFlags related = 0;
106 for (const auto entry : map) {
107 const auto stage = entry.first;
108 if (stage & unscanned) {
109 related = related | entry.second;
110 unscanned = unscanned & ~stage;
111 if (!unscanned) break;
112 }
113 }
114 return related;
115}
116
117VkPipelineStageFlags WithEarlierPipelineStages(VkPipelineStageFlags stage_mask) {
118 return stage_mask | RelatedPipelineStages(stage_mask, syncLogicallyEarlierStages);
119}
120
121VkPipelineStageFlags WithLaterPipelineStages(VkPipelineStageFlags stage_mask) {
122 return stage_mask | RelatedPipelineStages(stage_mask, syncLogicallyLaterStages);
123}
124
John Zulauf5c5e88d2019-12-26 11:22:02 -0700125static const ResourceAccessRange full_range(std::numeric_limits<VkDeviceSize>::min(), std::numeric_limits<VkDeviceSize>::max());
126static ResourceAccessRange MakeMemoryAccessRange(const BUFFER_STATE &buffer, VkDeviceSize offset, VkDeviceSize size) {
John Zulauf9cb530d2019-09-30 14:14:10 -0600127 assert(!buffer.sparse);
128 const auto base = offset + buffer.binding.offset;
John Zulauf5c5e88d2019-12-26 11:22:02 -0700129 return ResourceAccessRange(base, base + size);
130}
131
John Zulauf3d84f1b2020-03-09 13:33:25 -0600132AccessTrackerContext::AccessTrackerContext(uint32_t subpass, VkQueueFlags queue_flags,
133 const std::vector<SubpassDependencyGraphNode> &dependencies,
134 const std::vector<AccessTrackerContext> &contexts,
135 AccessTrackerContext *external_context) {
136 Reset();
137 const auto &subpass_dep = dependencies[subpass];
138 prev_.reserve(subpass_dep.prev.size());
139 for (const auto &prev_dep : subpass_dep.prev) {
140 assert(prev_dep.dependency);
141 const auto dep = *prev_dep.dependency;
142 prev_.emplace_back(const_cast<AccessTrackerContext *>(&contexts[dep.srcSubpass]), queue_flags, dep);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700143 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600144
145 async_.reserve(subpass_dep.async.size());
146 for (const auto async_subpass : subpass_dep.async) {
147 async_.emplace_back(const_cast<AccessTrackerContext *>(&contexts[async_subpass]));
148 }
John Zulaufe5da6e52020-03-18 15:32:18 -0600149 if (subpass_dep.barrier_from_external) {
150 src_external_ = TrackBack(external_context, queue_flags, *subpass_dep.barrier_from_external);
151 } else {
152 src_external_ = TrackBack();
153 }
154 if (subpass_dep.barrier_to_external) {
155 dst_external_ = TrackBack(this, queue_flags, *subpass_dep.barrier_to_external);
156 } else {
157 dst_external_ = TrackBack();
John Zulauf3d84f1b2020-03-09 13:33:25 -0600158 }
John Zulauf5c5e88d2019-12-26 11:22:02 -0700159}
160
John Zulauf5f13a792020-03-10 07:31:21 -0600161template <typename Detector>
162HazardResult AccessTrackerContext::DetectPreviousHazard(const VulkanTypedHandle &handle, const Detector &detector,
163 const ResourceAccessRange &range) const {
164 ResourceAccessRangeMap descent_map;
165 ResourceAccessState default_state; // When present, PreviousAccess will "infill"
166 ResolvePreviousAccess(handle, range, &descent_map, &default_state);
167
168 HazardResult hazard;
169 for (auto prev = descent_map.begin(); prev != descent_map.end() && !hazard.hazard; ++prev) {
170 hazard = detector.Detect(prev);
171 }
172 return hazard;
173}
174
John Zulauf3d84f1b2020-03-09 13:33:25 -0600175// A recursive range walker for hazard detection, first for the current context and the (DetectHazardRecur) to walk
176// the DAG of the contexts (for example subpasses)
177template <typename Detector>
178HazardResult AccessTrackerContext::DetectHazard(const VulkanTypedHandle &handle, const Detector &detector,
John Zulauf5f13a792020-03-10 07:31:21 -0600179 const ResourceAccessRange &range) const {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600180 HazardResult hazard;
John Zulauf5f13a792020-03-10 07:31:21 -0600181
182 // Async checks don't require recursive lookups, as the async lists are exhaustive for the top-level context
183 // so we'll check these first
184 for (const auto &async_context : async_) {
185 hazard = async_context->DetectAsyncHazard(handle, detector, range);
186 if (hazard.hazard) return hazard;
187 }
188
189 const auto access_tracker = GetAccessTracker(handle);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600190 if (access_tracker) {
191 const auto &accesses = access_tracker->GetCurrentAccessMap();
192 const auto from = accesses.lower_bound(range);
193 if (from != accesses.end() && from->first.intersects(range)) {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600194 const auto to = accesses.upper_bound(range);
195 ResourceAccessRange gap = {range.begin, range.begin};
196 for (auto pos = from; pos != to; ++pos) {
John Zulauf5f13a792020-03-10 07:31:21 -0600197 hazard = detector.Detect(pos);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600198 if (hazard.hazard) return hazard;
199
John Zulauf5f13a792020-03-10 07:31:21 -0600200 // make sure we don't go past range
John Zulauf3d84f1b2020-03-09 13:33:25 -0600201 auto upper_bound = std::min(range.end, pos->first.end);
John Zulauf5f13a792020-03-10 07:31:21 -0600202 gap.end = upper_bound;
203
204 // TODO: After profiling we may want to change the descent logic such that we don't recur per gap...
John Zulauf3d84f1b2020-03-09 13:33:25 -0600205 if (!gap.empty()) {
206 // Must recur on all gaps
John Zulauf5f13a792020-03-10 07:31:21 -0600207 hazard = DetectPreviousHazard(handle, detector, gap);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600208 if (hazard.hazard) return hazard;
209 }
210 gap.begin = upper_bound;
211 }
John Zulauf5f13a792020-03-10 07:31:21 -0600212 gap.end = range.end;
213 if (gap.non_empty()) {
214 hazard = DetectPreviousHazard(handle, detector, gap);
215 if (hazard.hazard) return hazard;
216 }
217 } else {
218 hazard = DetectPreviousHazard(handle, detector, range);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600219 }
John Zulauf5f13a792020-03-10 07:31:21 -0600220 } else {
221 hazard = DetectPreviousHazard(handle, detector, range);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600222 }
223
224 return hazard;
225}
226
227// A non recursive range walker for the asynchronous contexts (those we have no barriers with)
228template <typename Detector>
229HazardResult AccessTrackerContext::DetectAsyncHazard(const VulkanTypedHandle &handle, const Detector &detector,
230 const ResourceAccessRange &range) const {
231 const auto access_tracker = GetAccessTracker(handle);
232 HazardResult hazard;
233 if (access_tracker) {
234 auto accesses = access_tracker->GetCurrentAccessMap();
235 const auto from = accesses.lower_bound(range);
236 const auto to = accesses.upper_bound(range);
237 for (auto pos = from; pos != to; ++pos) {
238 hazard = detector.DetectAsync(pos);
239 if (hazard.hazard) break;
240 }
241 }
242 return hazard;
243}
244
John Zulauf5f13a792020-03-10 07:31:21 -0600245void AccessTrackerContext::ResolveTrackBack(const VulkanTypedHandle &handle, const ResourceAccessRange &range,
246 const AccessTrackerContext::TrackBack &track_back, ResourceAccessRangeMap *descent_map,
John Zulaufe5da6e52020-03-18 15:32:18 -0600247 const ResourceAccessState *infill_state, bool recur_to_infill) const {
John Zulauf5f13a792020-03-10 07:31:21 -0600248 const auto *access_tracker = GetAccessTracker(handle);
249 if (access_tracker) {
250 sparse_container::parallel_iterator<ResourceAccessRangeMap, const ResourceAccessRangeMap> current(
251 *descent_map, access_tracker->GetCurrentAccessMap(), range.begin);
252 while (current->range.non_empty()) {
253 if (current->pos_B->valid) {
254 auto access_with_barrier = current->pos_B->lower_bound->second;
255 access_with_barrier.ApplyBarrier(track_back.barrier);
256 if (current->pos_A->valid) {
257 // split A to match B's range
258 const auto &dst_range = current->pos_A->lower_bound->first;
259 const auto split_range = current->range & dst_range;
260 auto dst_pos = current->pos_A->lower_bound;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600261
John Zulauf5f13a792020-03-10 07:31:21 -0600262 if (split_range.begin != dst_range.begin) {
263 dst_pos = descent_map->split(dst_pos, split_range.begin, sparse_container::split_op_keep_both());
264 ++dst_pos;
265 }
266 if (split_range.end != dst_range.end) {
267 dst_pos = descent_map->split(dst_pos, split_range.end, sparse_container::split_op_keep_both());
268 }
269 if (split_range != dst_range) {
270 current.invalidate_A(); // Update the parallel iterator to point at the correct segment after split(s)
271 }
272 current->pos_A->lower_bound->second.Resolve(access_with_barrier);
273 } else {
274 descent_map->insert(current->pos_A->lower_bound, std::make_pair(current->range, access_with_barrier));
275 current.invalidate_A(); // Update the parallel iterator to point at the correct segment after split(s)
276 }
277 } else {
278 // we have to descend to fill this gap
John Zulaufe5da6e52020-03-18 15:32:18 -0600279 if (recur_to_infill) {
280 track_back.context->ResolvePreviousAccess(handle, range, descent_map, infill_state);
281 current.invalidate_A(); // Update the parallel iterator to point at the correct segment after recursion.
282 }
John Zulauf5f13a792020-03-10 07:31:21 -0600283 if (!current->pos_A->valid && infill_state) {
284 // If we didn't find anything in the previous range, we infill with default to prevent repeating
285 // a fruitless search
286 descent_map->insert(current->pos_A->lower_bound, std::make_pair(current->range, *infill_state));
287 current.invalidate_A(); // Update the parallel iterator to point at the correct segment after insert
288 }
289 }
290 ++current;
291 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600292 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600293}
294
John Zulauf5f13a792020-03-10 07:31:21 -0600295void AccessTrackerContext::ResolvePreviousAccess(const VulkanTypedHandle &handle, const ResourceAccessRange &range,
296 ResourceAccessRangeMap *descent_map,
297 const ResourceAccessState *infill_state) const {
John Zulaufe5da6e52020-03-18 15:32:18 -0600298 if ((prev_.size() == 0) && (src_external_.context == nullptr)) {
John Zulauf5f13a792020-03-10 07:31:21 -0600299 if (range.non_empty() && infill_state) {
300 descent_map->insert(std::make_pair(range, *infill_state));
301 }
302 } else {
303 // Look for something to fill the gap further along.
304 for (const auto &prev_dep : prev_) {
305 ResolveTrackBack(handle, range, prev_dep, descent_map, infill_state);
306 }
307
John Zulaufe5da6e52020-03-18 15:32:18 -0600308 if (src_external_.context) {
309 ResolveTrackBack(handle, range, src_external_, descent_map, infill_state);
John Zulauf5f13a792020-03-10 07:31:21 -0600310 }
311 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600312}
313
314class HazardDetector {
315 SyncStageAccessIndex usage_index_;
316
317 public:
John Zulauf5f13a792020-03-10 07:31:21 -0600318 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const { return pos->second.DetectHazard(usage_index_); }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600319 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos) const {
320 return pos->second.DetectAsyncHazard(usage_index_);
321 }
322 HazardDetector(SyncStageAccessIndex usage) : usage_index_(usage) {}
323};
324
325HazardResult AccessTrackerContext::DetectHazard(const VulkanTypedHandle &handle, SyncStageAccessIndex usage_index,
326 const ResourceAccessRange &range) const {
327 HazardDetector detector(usage_index);
328 return DetectHazard(handle, detector, range);
329}
330
331void CommandBufferAccessContext::BeginRenderPass(const RENDER_PASS_STATE &rp_state) {
332 // Create an access context for the first subpass and add it to the command buffers collection
333 render_pass_contexts_.emplace_back(queue_flags_, &rp_state.subpass_dependencies, &cb_tracker_context_);
334 current_renderpass_context_ = &render_pass_contexts_.back();
335 current_context_ = &current_renderpass_context_->CurrentContext();
John Zulaufe5da6e52020-03-18 15:32:18 -0600336
337 // TODO: Add layout load/store/transition/resolve access (here or in RenderPassContext)
John Zulauf3d84f1b2020-03-09 13:33:25 -0600338}
339
340void CommandBufferAccessContext::NextRenderPass(const RENDER_PASS_STATE &rp_state) {
341 assert(current_renderpass_context_);
342 current_renderpass_context_->NextSubpass(queue_flags_, &cb_tracker_context_);
John Zulaufe5da6e52020-03-18 15:32:18 -0600343 // TODO: Add layout load/store/transition/resolve access (here or in RenderPassContext)
John Zulauf3d84f1b2020-03-09 13:33:25 -0600344 current_context_ = &current_renderpass_context_->CurrentContext();
345}
346
John Zulaufe5da6e52020-03-18 15:32:18 -0600347void CommandBufferAccessContext::EndRenderPass(const RENDER_PASS_STATE &render_pass) {
348 std::unordered_set<VulkanTypedHandle> resolved;
349 assert(current_renderpass_context_);
350 if (!current_renderpass_context_) return;
351
352 const auto &contexts = current_renderpass_context_->subpass_contexts_;
353
354 // TODO: Add layout load/store/transition/resolve access (here or in RenderPassContext)
355
356 for (uint32_t subpass_index = 0; subpass_index < contexts.size(); subpass_index++) {
357 auto &context = contexts[subpass_index];
358 for (const auto &tracker_pair : context.GetAccessTrackerMap()) {
359 if (tracker_pair.second.GetCurrentAccessMap().size() == 0) continue;
360 auto insert_pair = resolved.insert(tracker_pair.first);
361 if (insert_pair.second) { // only create the resolve map for this handle if we haven't seen it before
362 // This is the first time we've seen this handle accessed, resolve this for all subsequent subpasses
363 ResourceAccessRangeMap resolve_map;
364 auto resolve_index = static_cast<uint32_t>(contexts.size());
365 while (resolve_index > subpass_index) {
366 resolve_index--;
367 const auto &from_context = contexts[resolve_index];
368 from_context.ResolveTrackBack(tracker_pair.first, full_range, from_context.GetDstExternalTrackBack(),
369 &resolve_map, nullptr, false);
370 }
371 // Given that all DAG paths lead back to the src_external_ (if only a default one) we can just overwrite.
372 sparse_container::splice(&cb_tracker_context_.GetAccessTracker(tracker_pair.first)->GetCurrentAccessMap(),
373 resolve_map, sparse_container::value_precedence::prefer_source);
374 // TODO: This might be a place to consolidate the map
375 }
376 }
377 }
378 current_context_ = &cb_tracker_context_;
379 current_renderpass_context_ = nullptr;
380}
381
locke-lunarg296a3c92020-03-25 01:04:29 -0600382HazardResult AccessTrackerContext::DetectHazard(const CMD_BUFFER_STATE &cmd, const IMAGE_STATE &image,
383 SyncStageAccessIndex current_usage, const VkImageSubresourceLayers &subresource,
384 const VkOffset3D &offset, const VkExtent3D &extent) const {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700385 // TODO: replace the encoder/generator with offset3D/extent3D aware versions
386 VkImageSubresourceRange subresource_range = {subresource.aspectMask, subresource.mipLevel, 1, subresource.baseArrayLayer,
387 subresource.layerCount};
locke-lunarg296a3c92020-03-25 01:04:29 -0600388 subresource_adapter::ImageRangeEncoder encoder(image.store_device_as_workaround, image);
389 subresource_adapter::ImageRangeGenerator range_gen(encoder, subresource_range, offset, extent);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600390 VulkanTypedHandle image_handle(image.image, kVulkanObjectTypeImage);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700391 for (; range_gen->non_empty(); ++range_gen) {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600392 HazardResult hazard = DetectHazard(image_handle, current_usage, *range_gen);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700393 if (hazard.hazard) return hazard;
394 }
395 return HazardResult();
John Zulauf9cb530d2019-09-30 14:14:10 -0600396}
397
John Zulauf3d84f1b2020-03-09 13:33:25 -0600398class BarrierHazardDetector {
399 public:
400 BarrierHazardDetector(SyncStageAccessIndex usage_index, VkPipelineStageFlags src_exec_scope,
401 SyncStageAccessFlags src_access_scope)
402 : usage_index_(usage_index), src_exec_scope_(src_exec_scope), src_access_scope_(src_access_scope) {}
403
John Zulauf5f13a792020-03-10 07:31:21 -0600404 HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
405 return pos->second.DetectBarrierHazard(usage_index_, src_exec_scope_, src_access_scope_);
John Zulauf0cb5be22020-01-23 12:18:22 -0700406 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600407 HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos) const {
408 // Async barrier hazard detection can use the same path as the usage index is not IsRead, but is IsWrite
409 return pos->second.DetectAsyncHazard(usage_index_);
410 }
411
412 private:
413 SyncStageAccessIndex usage_index_;
414 VkPipelineStageFlags src_exec_scope_;
415 SyncStageAccessFlags src_access_scope_;
416};
417
418HazardResult AccessTrackerContext::DetectBarrierHazard(const VulkanTypedHandle &handle, SyncStageAccessIndex current_usage,
419 VkPipelineStageFlags src_exec_scope, SyncStageAccessFlags src_access_scope,
420 const ResourceAccessRange &range) const {
421 BarrierHazardDetector detector(current_usage, src_exec_scope, src_access_scope);
422 return DetectHazard(handle, detector, range);
John Zulauf0cb5be22020-01-23 12:18:22 -0700423}
424
locke-lunarg296a3c92020-03-25 01:04:29 -0600425HazardResult DetectImageBarrierHazard(const CMD_BUFFER_STATE &cmd, const AccessTrackerContext &context, const IMAGE_STATE &image,
John Zulauf36bcf6a2020-02-03 15:12:52 -0700426 VkPipelineStageFlags src_exec_scope, SyncStageAccessFlags src_stage_accesses,
John Zulauf0cb5be22020-01-23 12:18:22 -0700427 const VkImageMemoryBarrier &barrier) {
John Zulauf0cb5be22020-01-23 12:18:22 -0700428 auto subresource_range = NormalizeSubresourceRange(image.createInfo, barrier.subresourceRange);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600429 const VulkanTypedHandle image_handle(image.image, kVulkanObjectTypeImage);
John Zulauf36bcf6a2020-02-03 15:12:52 -0700430 const auto src_access_scope = SyncStageAccess::AccessScope(src_stage_accesses, barrier.srcAccessMask);
locke-lunarg296a3c92020-03-25 01:04:29 -0600431 subresource_adapter::ImageRangeEncoder encoder(image.store_device_as_workaround, image);
432 subresource_adapter::ImageRangeGenerator range_gen(encoder, subresource_range, {0, 0, 0}, image.createInfo.extent);
433 for (; range_gen->non_empty(); ++range_gen) {
434 HazardResult hazard = context.DetectBarrierHazard(image_handle, SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION,
435 src_exec_scope, src_access_scope, *range_gen);
436 if (hazard.hazard) return hazard;
John Zulauf0cb5be22020-01-23 12:18:22 -0700437 }
438 return HazardResult();
439}
440
John Zulauf9cb530d2019-09-30 14:14:10 -0600441template <typename Flags, typename Map>
442SyncStageAccessFlags AccessScopeImpl(Flags flag_mask, const Map &map) {
443 SyncStageAccessFlags scope = 0;
444 for (const auto &bit_scope : map) {
445 if (flag_mask < bit_scope.first) break;
446
447 if (flag_mask & bit_scope.first) {
448 scope |= bit_scope.second;
449 }
450 }
451 return scope;
452}
453
454SyncStageAccessFlags SyncStageAccess::AccessScopeByStage(VkPipelineStageFlags stages) {
455 return AccessScopeImpl(stages, syncStageAccessMaskByStageBit);
456}
457
458SyncStageAccessFlags SyncStageAccess::AccessScopeByAccess(VkAccessFlags accesses) {
459 return AccessScopeImpl(accesses, syncStageAccessMaskByAccessBit);
460}
461
462// Getting from stage mask and access mask to stage/acess masks is something we need to be good at...
463SyncStageAccessFlags SyncStageAccess::AccessScope(VkPipelineStageFlags stages, VkAccessFlags accesses) {
John Zulauf5f13a792020-03-10 07:31:21 -0600464 // The access scope is the intersection of all stage/access types possible for the enabled stages and the enables
465 // accesses (after doing a couple factoring of common terms the union of stage/access intersections is the intersections
466 // of the union of all stage/access types for all the stages and the same unions for the access mask...
John Zulauf9cb530d2019-09-30 14:14:10 -0600467 return AccessScopeByStage(stages) & AccessScopeByAccess(accesses);
468}
469
470template <typename Action>
John Zulauf5c5e88d2019-12-26 11:22:02 -0700471void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const ResourceAccessRange &range, const Action &action) {
John Zulauf9cb530d2019-09-30 14:14:10 -0600472 // TODO -- region/mem-range accuracte update
473 auto pos = accesses->lower_bound(range);
474 if (pos == accesses->end() || !pos->first.intersects(range)) {
475 // The range is empty, fill it with a default value.
476 pos = action.Infill(accesses, pos, range);
477 } else if (range.begin < pos->first.begin) {
478 // Leading empty space, infill
John Zulauf5c5e88d2019-12-26 11:22:02 -0700479 pos = action.Infill(accesses, pos, ResourceAccessRange(range.begin, pos->first.begin));
John Zulauf9cb530d2019-09-30 14:14:10 -0600480 } else if (pos->first.begin < range.begin) {
481 // Trim the beginning if needed
482 pos = accesses->split(pos, range.begin, sparse_container::split_op_keep_both());
483 ++pos;
484 }
485
486 const auto the_end = accesses->end();
487 while ((pos != the_end) && pos->first.intersects(range)) {
488 if (pos->first.end > range.end) {
489 pos = accesses->split(pos, range.end, sparse_container::split_op_keep_both());
490 }
491
492 pos = action(accesses, pos);
493 if (pos == the_end) break;
494
495 auto next = pos;
496 ++next;
497 if ((pos->first.end < range.end) && (next != the_end) && !next->first.is_subsequent_to(pos->first)) {
498 // Need to infill if next is disjoint
499 VkDeviceSize limit = (next == the_end) ? range.end : std::min(range.end, next->first.begin);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700500 ResourceAccessRange new_range(pos->first.end, limit);
John Zulauf9cb530d2019-09-30 14:14:10 -0600501 next = action.Infill(accesses, next, new_range);
502 }
503 pos = next;
504 }
505}
506
507struct UpdateMemoryAccessStateFunctor {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700508 using Iterator = ResourceAccessRangeMap::iterator;
509 Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const {
John Zulauf5f13a792020-03-10 07:31:21 -0600510 // this is only called on gaps, and never returns a gap.
511 ResourceAccessState default_state;
512 context.ResolvePreviousAccess(handle, range, accesses, &default_state);
513 return accesses->lower_bound(range);
John Zulauf9cb530d2019-09-30 14:14:10 -0600514 }
John Zulauf5f13a792020-03-10 07:31:21 -0600515
John Zulauf5c5e88d2019-12-26 11:22:02 -0700516 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -0600517 auto &access_state = pos->second;
518 access_state.Update(usage, tag);
519 return pos;
520 }
521
John Zulauf5f13a792020-03-10 07:31:21 -0600522 UpdateMemoryAccessStateFunctor(const VulkanTypedHandle &handle_, const AccessTrackerContext &context_,
523 SyncStageAccessIndex usage_, const ResourceUsageTag &tag_)
524 : handle(handle_), context(context_), usage(usage_), tag(tag_) {}
525 const VulkanTypedHandle handle;
526 const AccessTrackerContext &context;
John Zulauf9cb530d2019-09-30 14:14:10 -0600527 SyncStageAccessIndex usage;
528 const ResourceUsageTag &tag;
529};
530
531struct ApplyMemoryAccessBarrierFunctor {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700532 using Iterator = ResourceAccessRangeMap::iterator;
533 inline Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const { return pos; }
John Zulauf9cb530d2019-09-30 14:14:10 -0600534
John Zulauf5c5e88d2019-12-26 11:22:02 -0700535 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -0600536 auto &access_state = pos->second;
John Zulauf36bcf6a2020-02-03 15:12:52 -0700537 access_state.ApplyMemoryAccessBarrier(src_exec_scope, src_access_scope, dst_exec_scope, dst_access_scope);
John Zulauf9cb530d2019-09-30 14:14:10 -0600538 return pos;
539 }
540
John Zulauf36bcf6a2020-02-03 15:12:52 -0700541 ApplyMemoryAccessBarrierFunctor(VkPipelineStageFlags src_exec_scope_, SyncStageAccessFlags src_access_scope_,
542 VkPipelineStageFlags dst_exec_scope_, SyncStageAccessFlags dst_access_scope_)
543 : src_exec_scope(src_exec_scope_),
544 src_access_scope(src_access_scope_),
545 dst_exec_scope(dst_exec_scope_),
546 dst_access_scope(dst_access_scope_) {}
John Zulauf9cb530d2019-09-30 14:14:10 -0600547
John Zulauf36bcf6a2020-02-03 15:12:52 -0700548 VkPipelineStageFlags src_exec_scope;
549 SyncStageAccessFlags src_access_scope;
550 VkPipelineStageFlags dst_exec_scope;
551 SyncStageAccessFlags dst_access_scope;
John Zulauf9cb530d2019-09-30 14:14:10 -0600552};
553
554struct ApplyGlobalBarrierFunctor {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700555 using Iterator = ResourceAccessRangeMap::iterator;
556 inline Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const { return pos; }
John Zulauf9cb530d2019-09-30 14:14:10 -0600557
John Zulauf5c5e88d2019-12-26 11:22:02 -0700558 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -0600559 auto &access_state = pos->second;
John Zulauf36bcf6a2020-02-03 15:12:52 -0700560 access_state.ApplyExecutionBarrier(src_exec_scope, dst_exec_scope);
John Zulauf9cb530d2019-09-30 14:14:10 -0600561
562 for (const auto &functor : barrier_functor) {
563 functor(accesses, pos);
564 }
565 return pos;
566 }
567
John Zulauf36bcf6a2020-02-03 15:12:52 -0700568 ApplyGlobalBarrierFunctor(VkPipelineStageFlags src_exec_scope, VkPipelineStageFlags dst_exec_scope,
569 SyncStageAccessFlags src_stage_accesses, SyncStageAccessFlags dst_stage_accesses,
John Zulauf9cb530d2019-09-30 14:14:10 -0600570 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers)
John Zulauf36bcf6a2020-02-03 15:12:52 -0700571 : src_exec_scope(src_exec_scope), dst_exec_scope(dst_exec_scope) {
John Zulauf9cb530d2019-09-30 14:14:10 -0600572 // Don't want to create this per tracked item, but don't want to loop through all tracked items per barrier...
573 barrier_functor.reserve(memoryBarrierCount);
574 for (uint32_t barrier_index = 0; barrier_index < memoryBarrierCount; barrier_index++) {
575 const auto &barrier = pMemoryBarriers[barrier_index];
John Zulauf36bcf6a2020-02-03 15:12:52 -0700576 barrier_functor.emplace_back(src_exec_scope, SyncStageAccess::AccessScope(src_stage_accesses, barrier.srcAccessMask),
577 dst_exec_scope, SyncStageAccess::AccessScope(dst_stage_accesses, barrier.dstAccessMask));
John Zulauf9cb530d2019-09-30 14:14:10 -0600578 }
579 }
580
John Zulauf36bcf6a2020-02-03 15:12:52 -0700581 const VkPipelineStageFlags src_exec_scope;
582 const VkPipelineStageFlags dst_exec_scope;
John Zulauf9cb530d2019-09-30 14:14:10 -0600583 std::vector<ApplyMemoryAccessBarrierFunctor> barrier_functor;
584};
585
John Zulauf3d84f1b2020-03-09 13:33:25 -0600586void AccessTrackerContext::UpdateAccessState(const VulkanTypedHandle &handle, SyncStageAccessIndex current_usage,
587 const ResourceAccessRange &range, const ResourceUsageTag &tag) {
John Zulauf5f13a792020-03-10 07:31:21 -0600588 UpdateMemoryAccessStateFunctor action(handle, *this, current_usage, tag);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600589 auto *tracker = GetAccessTracker(handle);
590 assert(tracker);
John Zulauf5f13a792020-03-10 07:31:21 -0600591 UpdateMemoryAccessState(&tracker->GetCurrentAccessMap(), range, action);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600592}
593
locke-lunarg296a3c92020-03-25 01:04:29 -0600594void AccessTrackerContext::UpdateAccessState(const CMD_BUFFER_STATE &cmd, const IMAGE_STATE &image,
595 SyncStageAccessIndex current_usage, const VkImageSubresourceLayers &subresource,
596 const VkOffset3D &offset, const VkExtent3D &extent, const ResourceUsageTag &tag) {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700597 // TODO: replace the encoder/generator with offset3D aware versions
598 VkImageSubresourceRange subresource_range = {subresource.aspectMask, subresource.mipLevel, 1, subresource.baseArrayLayer,
599 subresource.layerCount};
locke-lunarg296a3c92020-03-25 01:04:29 -0600600 subresource_adapter::ImageRangeEncoder encoder(image.store_device_as_workaround, image);
601 subresource_adapter::ImageRangeGenerator range_gen(encoder, subresource_range, offset, extent);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600602 const VulkanTypedHandle handle(image.image, kVulkanObjectTypeImage);
603 auto *tracker = GetAccessTracker(handle);
604 assert(tracker);
John Zulauf5f13a792020-03-10 07:31:21 -0600605
606 UpdateMemoryAccessStateFunctor action(handle, *this, current_usage, tag);
607 for (; range_gen->non_empty(); ++range_gen) {
608 UpdateMemoryAccessState(&tracker->GetCurrentAccessMap(), *range_gen, action);
609 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600610}
611
612SyncBarrier::SyncBarrier(VkQueueFlags queue_flags, const VkSubpassDependency2 &barrier) {
613 const auto src_stage_mask = ExpandPipelineStages(queue_flags, barrier.srcStageMask);
614 src_exec_scope = WithEarlierPipelineStages(src_stage_mask);
615 src_access_scope = SyncStageAccess::AccessScope(src_stage_mask, barrier.srcAccessMask);
616 const auto dst_stage_mask = ExpandPipelineStages(queue_flags, barrier.dstStageMask);
617 dst_exec_scope = WithLaterPipelineStages(dst_stage_mask);
618 dst_access_scope = SyncStageAccess::AccessScope(dst_stage_mask, barrier.dstAccessMask);
619}
620
621void ResourceAccessState::ApplyBarrier(const SyncBarrier &barrier) {
622 ApplyExecutionBarrier(barrier.src_exec_scope, barrier.dst_exec_scope);
623 ApplyMemoryAccessBarrier(barrier.src_exec_scope, barrier.src_access_scope, barrier.dst_exec_scope, barrier.dst_access_scope);
624}
625
626ResourceAccessState ResourceAccessState::ApplyBarrierStack(const ResourceAccessState &that, const SyncBarrierStack &barrier_stack) {
627 ResourceAccessState copy = that;
628 for (auto barrier = barrier_stack.begin(); barrier != barrier_stack.end(); ++barrier) {
629 assert(*barrier);
630 copy.ApplyBarrier(*(*barrier));
631 }
632 return copy;
633}
634
635HazardResult ResourceAccessState::DetectHazard(SyncStageAccessIndex usage_index, SyncBarrierStack *barrier_stack) const {
636 if (barrier_stack) {
637 return ApplyBarrierStack(*this, *barrier_stack).DetectHazard(usage_index);
638 }
639 return DetectHazard(usage_index);
640}
641
John Zulauf9cb530d2019-09-30 14:14:10 -0600642HazardResult ResourceAccessState::DetectHazard(SyncStageAccessIndex usage_index) const {
643 HazardResult hazard;
644 auto usage = FlagBit(usage_index);
645 if (IsRead(usage)) {
646 if (IsWriteHazard(usage)) {
647 hazard.Set(READ_AFTER_WRITE, write_tag);
648 }
649 } else {
650 // Assume write
651 // TODO determine what to do with READ-WRITE usage states if any
652 // Write-After-Write check -- if we have a previous write to test against
653 if (last_write && IsWriteHazard(usage)) {
654 hazard.Set(WRITE_AFTER_WRITE, write_tag);
655 } else {
656 // Only look for casus belli for WAR
657 const auto usage_stage = PipelineStageBit(usage_index);
658 for (uint32_t read_index = 0; read_index < last_read_count; read_index++) {
659 if (IsReadHazard(usage_stage, last_reads[read_index])) {
660 hazard.Set(WRITE_AFTER_READ, last_reads[read_index].tag);
661 break;
662 }
663 }
664 }
665 }
666 return hazard;
667}
668
John Zulauf2f952d22020-02-10 11:34:51 -0700669// Asynchronous Hazards occur between subpasses with no connection through the DAG
John Zulauf3d84f1b2020-03-09 13:33:25 -0600670HazardResult ResourceAccessState::DetectAsyncHazard(SyncStageAccessIndex usage_index) const {
John Zulauf2f952d22020-02-10 11:34:51 -0700671 HazardResult hazard;
672 auto usage = FlagBit(usage_index);
673 if (IsRead(usage)) {
674 if (last_write != 0) {
675 hazard.Set(READ_RACING_WRITE, write_tag);
676 }
677 } else {
678 if (last_write != 0) {
679 hazard.Set(WRITE_RACING_WRITE, write_tag);
680 } else if (last_read_count > 0) {
681 hazard.Set(WRITE_RACING_READ, last_reads[0].tag);
682 }
683 }
684 return hazard;
685}
686
John Zulauf36bcf6a2020-02-03 15:12:52 -0700687HazardResult ResourceAccessState::DetectBarrierHazard(SyncStageAccessIndex usage_index, VkPipelineStageFlags src_exec_scope,
John Zulauf3d84f1b2020-03-09 13:33:25 -0600688 SyncStageAccessFlags src_access_scope,
689 SyncBarrierStack *barrier_stack) const {
690 if (barrier_stack) {
691 return ApplyBarrierStack(*this, *barrier_stack).DetectBarrierHazard(usage_index, src_exec_scope, src_access_scope);
692 }
693 return DetectBarrierHazard(usage_index, src_exec_scope, src_access_scope);
694}
695
696HazardResult ResourceAccessState::DetectBarrierHazard(SyncStageAccessIndex usage_index, VkPipelineStageFlags src_exec_scope,
John Zulauf36bcf6a2020-02-03 15:12:52 -0700697 SyncStageAccessFlags src_access_scope) const {
John Zulauf0cb5be22020-01-23 12:18:22 -0700698 // Only supporting image layout transitions for now
699 assert(usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION);
700 HazardResult hazard;
701 if (last_write) {
702 // If the previous write is *not* in the 1st access scope
703 // *AND* the current barrier is not in the dependency chain
704 // *AND* the there is no prior memory barrier for the previous write in the dependency chain
705 // then the barrier access is unsafe (R/W after W)
John Zulauf36bcf6a2020-02-03 15:12:52 -0700706 if (((last_write & src_access_scope) == 0) && ((src_exec_scope & write_dependency_chain) == 0) && (write_barriers == 0)) {
John Zulauf0cb5be22020-01-23 12:18:22 -0700707 // TODO: Do we need a difference hazard name for this?
708 hazard.Set(WRITE_AFTER_WRITE, write_tag);
709 }
710 } else {
711 // Look at the reads
712 for (uint32_t read_index = 0; read_index < last_read_count; read_index++) {
John Zulauf36bcf6a2020-02-03 15:12:52 -0700713 const auto &read_access = last_reads[read_index];
714 // If the read stage is not in the src sync sync
715 // *AND* not execution chained with an existing sync barrier (that's the or)
716 // then the barrier access is unsafe (R/W after R)
717 if ((src_exec_scope & (read_access.stage | read_access.barriers)) == 0) {
718 hazard.Set(WRITE_AFTER_READ, read_access.tag);
John Zulauf0cb5be22020-01-23 12:18:22 -0700719 break;
720 }
721 }
722 }
723 return hazard;
724}
725
John Zulauf5f13a792020-03-10 07:31:21 -0600726// The logic behind resolves is the same as update, we assume that earlier hazards have be reported, and that no
727// tranistive hazard can exists with a hazard between the earlier operations. Yes, an early hazard can mask that another
728// exists, but if you fix *that* hazard it either fixes or unmasks the subsequent ones.
729void ResourceAccessState::Resolve(const ResourceAccessState &other) {
730 if (write_tag.IsBefore(other.write_tag)) {
731 // If this is a later write, we've reported any exsiting hazard, and we can just overwrite as the more recent operation
732 *this = other;
733 } else if (!other.write_tag.IsBefore(write_tag)) {
734 // This is the *equals* case for write operations, we merged the write barriers and the read state (but without the
735 // dependency chaining logic or any stage expansion)
736 write_barriers |= other.write_barriers;
737
738 // Merge that read states
739 for (uint32_t other_read_index = 0; other_read_index < other.last_read_count; other_read_index++) {
740 auto &other_read = other.last_reads[other_read_index];
741 if (last_read_stages & other_read.stage) {
742 // Merge in the barriers for read stages that exist in *both* this and other
743 // TODO: This is N^2 with stages... perhaps the ReadStates should be by stage index.
744 for (uint32_t my_read_index = 0; my_read_index < last_read_count; my_read_index++) {
745 auto &my_read = last_reads[my_read_index];
746 if (other_read.stage == my_read.stage) {
747 if (my_read.tag.IsBefore(other_read.tag)) {
748 my_read.tag = other_read.tag;
749 }
750 my_read.barriers |= other_read.barriers;
751 break;
752 }
753 }
754 } else {
755 // The other read stage doesn't exist in this, so add it.
756 last_reads[last_read_count] = other_read;
757 last_read_count++;
758 last_read_stages |= other_read.stage;
759 }
760 }
761 } // the else clause would be that other write is before this write... in which case we supercede the other state and ignore
762 // it.
763}
764
John Zulauf9cb530d2019-09-30 14:14:10 -0600765void ResourceAccessState::Update(SyncStageAccessIndex usage_index, const ResourceUsageTag &tag) {
766 // Move this logic in the ResourceStateTracker as methods, thereof (or we'll repeat it for every flavor of resource...
767 const auto usage_bit = FlagBit(usage_index);
768 if (IsRead(usage_index)) {
769 // Mulitple outstanding reads may be of interest and do dependency chains independently
770 // However, for purposes of barrier tracking, only one read per pipeline stage matters
771 const auto usage_stage = PipelineStageBit(usage_index);
772 if (usage_stage & last_read_stages) {
773 for (uint32_t read_index = 0; read_index < last_read_count; read_index++) {
774 ReadState &access = last_reads[read_index];
775 if (access.stage == usage_stage) {
776 access.barriers = 0;
777 access.tag = tag;
778 break;
779 }
780 }
781 } else {
782 // We don't have this stage in the list yet...
783 assert(last_read_count < last_reads.size());
784 ReadState &access = last_reads[last_read_count++];
785 access.stage = usage_stage;
786 access.barriers = 0;
787 access.tag = tag;
788 last_read_stages |= usage_stage;
789 }
790 } else {
791 // Assume write
792 // TODO determine what to do with READ-WRITE operations if any
793 // Clobber last read and both sets of barriers... because all we have is DANGER, DANGER, WILL ROBINSON!!!
794 // if the last_reads/last_write were unsafe, we've reported them,
795 // in either case the prior access is irrelevant, we can overwrite them as *this* write is now after them
796 last_read_count = 0;
797 last_read_stages = 0;
798
799 write_barriers = 0;
800 write_dependency_chain = 0;
801 write_tag = tag;
802 last_write = usage_bit;
803 }
804}
John Zulauf5f13a792020-03-10 07:31:21 -0600805
John Zulauf9cb530d2019-09-30 14:14:10 -0600806void ResourceAccessState::ApplyExecutionBarrier(VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask) {
807 // Execution Barriers only protect read operations
808 for (uint32_t read_index = 0; read_index < last_read_count; read_index++) {
809 ReadState &access = last_reads[read_index];
810 // The | implements the "dependency chain" logic for this access, as the barriers field stores the second sync scope
811 if (srcStageMask & (access.stage | access.barriers)) {
812 access.barriers |= dstStageMask;
813 }
814 }
815 if (write_dependency_chain & srcStageMask) write_dependency_chain |= dstStageMask;
816}
817
John Zulauf36bcf6a2020-02-03 15:12:52 -0700818void ResourceAccessState::ApplyMemoryAccessBarrier(VkPipelineStageFlags src_exec_scope, SyncStageAccessFlags src_access_scope,
819 VkPipelineStageFlags dst_exec_scope, SyncStageAccessFlags dst_access_scope) {
John Zulauf9cb530d2019-09-30 14:14:10 -0600820 // Assuming we've applied the execution side of this barrier, we update just the write
821 // The || implements the "dependency chain" logic for this barrier
John Zulauf36bcf6a2020-02-03 15:12:52 -0700822 if ((src_access_scope & last_write) || (write_dependency_chain & src_exec_scope)) {
823 write_barriers |= dst_access_scope;
824 write_dependency_chain |= dst_exec_scope;
John Zulauf9cb530d2019-09-30 14:14:10 -0600825 }
826}
827
828void SyncValidator::ResetCommandBuffer(VkCommandBuffer command_buffer) {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600829 auto *access_context = GetAccessContextNoInsert(command_buffer);
830 if (access_context) {
831 access_context->Reset();
John Zulauf9cb530d2019-09-30 14:14:10 -0600832 }
833}
834
John Zulauf3d84f1b2020-03-09 13:33:25 -0600835void SyncValidator::ApplyGlobalBarriers(AccessTrackerContext *context, VkPipelineStageFlags srcStageMask,
John Zulauf36bcf6a2020-02-03 15:12:52 -0700836 VkPipelineStageFlags dstStageMask, SyncStageAccessFlags src_access_scope,
837 SyncStageAccessFlags dst_access_scope, uint32_t memoryBarrierCount,
John Zulauf9cb530d2019-09-30 14:14:10 -0600838 const VkMemoryBarrier *pMemoryBarriers) {
839 // TODO: Implement this better (maybe some delayed/on-demand integration).
John Zulauf36bcf6a2020-02-03 15:12:52 -0700840 ApplyGlobalBarrierFunctor barriers_functor(srcStageMask, dstStageMask, src_access_scope, dst_access_scope, memoryBarrierCount,
John Zulauf9cb530d2019-09-30 14:14:10 -0600841 pMemoryBarriers);
John Zulauf5f13a792020-03-10 07:31:21 -0600842 // Note: Barriers do *not* cross context boundaries, applying to accessess within.... (at least for renderpass subpasses
John Zulauf3d84f1b2020-03-09 13:33:25 -0600843 for (auto &handle_tracker_pair : context->GetAccessTrackerMap()) {
844 UpdateMemoryAccessState(&handle_tracker_pair.second.GetCurrentAccessMap(), full_range, barriers_functor);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700845 }
John Zulauf9cb530d2019-09-30 14:14:10 -0600846}
847
John Zulauf3d84f1b2020-03-09 13:33:25 -0600848void SyncValidator::ApplyBufferBarriers(AccessTrackerContext *context, VkPipelineStageFlags src_exec_scope,
John Zulauf36bcf6a2020-02-03 15:12:52 -0700849 SyncStageAccessFlags src_stage_accesses, VkPipelineStageFlags dst_exec_scope,
850 SyncStageAccessFlags dst_stage_accesses, uint32_t barrier_count,
John Zulauf9cb530d2019-09-30 14:14:10 -0600851 const VkBufferMemoryBarrier *barriers) {
852 // TODO Implement this at subresource/memory_range accuracy
853 for (uint32_t index = 0; index < barrier_count; index++) {
854 const auto &barrier = barriers[index];
855 const auto *buffer = Get<BUFFER_STATE>(barrier.buffer);
856 if (!buffer) continue;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600857 auto *tracker = context->GetAccessTracker(VulkanTypedHandle(buffer->binding.mem_state->mem, kVulkanObjectTypeDeviceMemory));
858 if (!tracker) continue;
John Zulauf5c5e88d2019-12-26 11:22:02 -0700859 ResourceAccessRange range = MakeMemoryAccessRange(*buffer, barrier.offset, barrier.size);
John Zulauf9cb530d2019-09-30 14:14:10 -0600860 UpdateMemoryAccessState(
John Zulauf3d84f1b2020-03-09 13:33:25 -0600861 &tracker->GetCurrentAccessMap(), range,
John Zulauf36bcf6a2020-02-03 15:12:52 -0700862 ApplyMemoryAccessBarrierFunctor(src_exec_scope, AccessScope(src_stage_accesses, barrier.srcAccessMask), dst_exec_scope,
863 AccessScope(dst_stage_accesses, barrier.dstAccessMask)));
John Zulauf9cb530d2019-09-30 14:14:10 -0600864 }
865}
866
locke-lunarg296a3c92020-03-25 01:04:29 -0600867void SyncValidator::ApplyImageBarriers(const CMD_BUFFER_STATE &cmd, AccessTrackerContext *context,
868 VkPipelineStageFlags src_exec_scope, SyncStageAccessFlags src_stage_accesses,
869 VkPipelineStageFlags dst_exec_scope, SyncStageAccessFlags dst_stage_accesses,
870 uint32_t barrier_count, const VkImageMemoryBarrier *barriers) {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700871 for (uint32_t index = 0; index < barrier_count; index++) {
872 const auto &barrier = barriers[index];
873 const auto *image = Get<IMAGE_STATE>(barrier.image);
874 if (!image) continue;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600875 auto tracker = context->GetAccessTrackerNoInsert(VulkanTypedHandle(barrier.image, kVulkanObjectTypeImage));
876 if (!tracker) continue;
877 auto *accesses = &tracker->GetCurrentAccessMap();
878
John Zulauf5c5e88d2019-12-26 11:22:02 -0700879 auto subresource_range = NormalizeSubresourceRange(image->createInfo, barrier.subresourceRange);
locke-lunarg296a3c92020-03-25 01:04:29 -0600880 subresource_adapter::ImageRangeEncoder encoder(image->store_device_as_workaround, *image);
881 subresource_adapter::ImageRangeGenerator range_gen(encoder, subresource_range, {0, 0, 0}, image->createInfo.extent);
locke-lunarg1dbbb9e2020-02-28 22:43:53 -0700882
locke-lunarg296a3c92020-03-25 01:04:29 -0600883 const ApplyMemoryAccessBarrierFunctor barrier_action(src_exec_scope, AccessScope(src_stage_accesses, barrier.srcAccessMask),
884 dst_exec_scope,
885 AccessScope(dst_stage_accesses, barrier.dstAccessMask));
886 for (; range_gen->non_empty(); ++range_gen) {
887 UpdateMemoryAccessState(accesses, *range_gen, barrier_action);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700888 }
John Zulauf9cb530d2019-09-30 14:14:10 -0600889 }
John Zulauf9cb530d2019-09-30 14:14:10 -0600890}
891
892bool SyncValidator::PreCallValidateCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
893 uint32_t regionCount, const VkBufferCopy *pRegions) const {
894 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600895 const auto *cb_context = GetAccessContext(commandBuffer);
896 assert(cb_context);
897 if (!cb_context) return skip;
898 const auto *context = cb_context->GetCurrentAccessContext();
John Zulauf9cb530d2019-09-30 14:14:10 -0600899
John Zulauf3d84f1b2020-03-09 13:33:25 -0600900 // If we have no previous accesses, we have no hazards
901 // TODO: make this sub-resource capable
902 // TODO: make this general, and stuff it into templates/utility functions
903 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
904 const auto src_mem = (src_buffer && !src_buffer->sparse) ? src_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
905 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
906 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
907
908 for (uint32_t region = 0; region < regionCount; region++) {
909 const auto &copy_region = pRegions[region];
910 if (src_mem != VK_NULL_HANDLE) {
911 ResourceAccessRange src_range = MakeMemoryAccessRange(*src_buffer, copy_region.srcOffset, copy_region.size);
912 auto hazard = context->DetectHazard(VulkanTypedHandle(src_mem, kVulkanObjectTypeDeviceMemory),
913 SYNC_TRANSFER_TRANSFER_READ, src_range);
914 if (hazard.hazard) {
915 // TODO -- add tag information to log msg when useful.
locke-lunarga0003652020-03-10 11:38:51 -0600916 skip |= LogError(srcBuffer, string_SyncHazardVUID(hazard.hazard),
917 "vkCmdCopyBuffer: Hazard %s for srcBuffer %s, region %" PRIu32, string_SyncHazard(hazard.hazard),
918 report_data->FormatHandle(srcBuffer).c_str(), region);
John Zulauf9cb530d2019-09-30 14:14:10 -0600919 }
John Zulauf9cb530d2019-09-30 14:14:10 -0600920 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600921 if ((dst_mem != VK_NULL_HANDLE) && !skip) {
922 ResourceAccessRange dst_range = MakeMemoryAccessRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
923 auto hazard = context->DetectHazard(VulkanTypedHandle(dst_mem, kVulkanObjectTypeDeviceMemory),
924 SYNC_TRANSFER_TRANSFER_WRITE, dst_range);
925 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -0600926 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
927 "vkCmdCopyBuffer: Hazard %s for dstBuffer %s, region %" PRIu32, string_SyncHazard(hazard.hazard),
928 report_data->FormatHandle(dstBuffer).c_str(), region);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600929 }
930 }
931 if (skip) break;
John Zulauf9cb530d2019-09-30 14:14:10 -0600932 }
933 return skip;
934}
935
936void SyncValidator::PreCallRecordCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
937 uint32_t regionCount, const VkBufferCopy *pRegions) {
John Zulauf3d84f1b2020-03-09 13:33:25 -0600938 auto *cb_context = GetAccessContext(commandBuffer);
939 assert(cb_context);
940 auto *context = cb_context->GetCurrentAccessContext();
941
John Zulauf9cb530d2019-09-30 14:14:10 -0600942 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600943 const auto src_mem = (src_buffer && !src_buffer->sparse) ? src_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
944 const VulkanTypedHandle src_handle(src_mem, kVulkanObjectTypeDeviceMemory);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600945
John Zulauf9cb530d2019-09-30 14:14:10 -0600946 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600947 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
948 const VulkanTypedHandle dst_handle(dst_mem, kVulkanObjectTypeDeviceMemory);
John Zulauf9cb530d2019-09-30 14:14:10 -0600949
950 for (uint32_t region = 0; region < regionCount; region++) {
951 const auto &copy_region = pRegions[region];
John Zulauf3d84f1b2020-03-09 13:33:25 -0600952 if (src_mem) {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700953 ResourceAccessRange src_range = MakeMemoryAccessRange(*src_buffer, copy_region.srcOffset, copy_region.size);
John Zulauf5f13a792020-03-10 07:31:21 -0600954 context->UpdateAccessState(src_handle, SYNC_TRANSFER_TRANSFER_READ, src_range, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -0600955 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600956 if (dst_mem) {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700957 ResourceAccessRange dst_range = MakeMemoryAccessRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
John Zulauf5f13a792020-03-10 07:31:21 -0600958 context->UpdateAccessState(dst_handle, SYNC_TRANSFER_TRANSFER_WRITE, dst_range, tag);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700959 }
960 }
961}
962
963bool SyncValidator::PreCallValidateCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
964 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
965 const VkImageCopy *pRegions) const {
966 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -0600967 const auto *cb_access_context = GetAccessContext(commandBuffer);
968 assert(cb_access_context);
969 if (!cb_access_context) return skip;
John Zulauf5c5e88d2019-12-26 11:22:02 -0700970
John Zulauf3d84f1b2020-03-09 13:33:25 -0600971 const auto *context = cb_access_context->GetCurrentAccessContext();
972 assert(context);
973 if (!context) return skip;
974
975 const auto *src_image = Get<IMAGE_STATE>(srcImage);
976 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarg296a3c92020-03-25 01:04:29 -0600977 const auto *cmd = Get<CMD_BUFFER_STATE>(commandBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600978 for (uint32_t region = 0; region < regionCount; region++) {
979 const auto &copy_region = pRegions[region];
980 if (src_image) {
locke-lunarg296a3c92020-03-25 01:04:29 -0600981 auto hazard = context->DetectHazard(*cmd, *src_image, SYNC_TRANSFER_TRANSFER_READ, copy_region.srcSubresource,
John Zulauf3d84f1b2020-03-09 13:33:25 -0600982 copy_region.srcOffset, copy_region.extent);
983 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -0600984 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
985 "vkCmdCopyImage: Hazard %s for srcImage %s, region %" PRIu32, string_SyncHazard(hazard.hazard),
986 report_data->FormatHandle(srcImage).c_str(), region);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700987 }
John Zulauf3d84f1b2020-03-09 13:33:25 -0600988 }
989
990 if (dst_image) {
locke-lunarg1df1f882020-03-02 16:42:08 -0700991 VkExtent3D dst_copy_extent =
992 GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
locke-lunarg296a3c92020-03-25 01:04:29 -0600993 auto hazard = context->DetectHazard(*cmd, *dst_image, SYNC_TRANSFER_TRANSFER_WRITE, copy_region.dstSubresource,
locke-lunarg1df1f882020-03-02 16:42:08 -0700994 copy_region.dstOffset, dst_copy_extent);
John Zulauf3d84f1b2020-03-09 13:33:25 -0600995 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -0600996 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
997 "vkCmdCopyImage: Hazard %s for dstImage %s, region %" PRIu32, string_SyncHazard(hazard.hazard),
998 report_data->FormatHandle(dstImage).c_str(), region);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700999 }
locke-lunarg1dbbb9e2020-02-28 22:43:53 -07001000 if (skip) break;
John Zulauf5c5e88d2019-12-26 11:22:02 -07001001 }
1002 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06001003
John Zulauf5c5e88d2019-12-26 11:22:02 -07001004 return skip;
1005}
1006
1007void SyncValidator::PreCallRecordCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
1008 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
1009 const VkImageCopy *pRegions) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06001010 auto *cb_access_context = GetAccessContext(commandBuffer);
1011 assert(cb_access_context);
1012 auto *context = cb_access_context->GetCurrentAccessContext();
1013 assert(context);
1014
John Zulauf5c5e88d2019-12-26 11:22:02 -07001015 auto *src_image = Get<IMAGE_STATE>(srcImage);
John Zulauf5c5e88d2019-12-26 11:22:02 -07001016 auto *dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarg296a3c92020-03-25 01:04:29 -06001017 const auto *cmd = Get<CMD_BUFFER_STATE>(commandBuffer);
John Zulauf5c5e88d2019-12-26 11:22:02 -07001018
1019 for (uint32_t region = 0; region < regionCount; region++) {
1020 const auto &copy_region = pRegions[region];
John Zulauf3d84f1b2020-03-09 13:33:25 -06001021 if (src_image) {
locke-lunarg296a3c92020-03-25 01:04:29 -06001022 context->UpdateAccessState(*cmd, *src_image, SYNC_TRANSFER_TRANSFER_READ, copy_region.srcSubresource,
1023 copy_region.srcOffset, copy_region.extent, tag);
John Zulauf5c5e88d2019-12-26 11:22:02 -07001024 }
John Zulauf3d84f1b2020-03-09 13:33:25 -06001025 if (dst_image) {
locke-lunarg1df1f882020-03-02 16:42:08 -07001026 VkExtent3D dst_copy_extent =
1027 GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
locke-lunarg296a3c92020-03-25 01:04:29 -06001028 context->UpdateAccessState(*cmd, *dst_image, SYNC_TRANSFER_TRANSFER_WRITE, copy_region.dstSubresource,
1029 copy_region.dstOffset, dst_copy_extent, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -06001030 }
1031 }
1032}
1033
1034bool SyncValidator::PreCallValidateCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
1035 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
1036 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
1037 uint32_t bufferMemoryBarrierCount,
1038 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
1039 uint32_t imageMemoryBarrierCount,
1040 const VkImageMemoryBarrier *pImageMemoryBarriers) const {
1041 bool skip = false;
John Zulauf3d84f1b2020-03-09 13:33:25 -06001042 const auto *cb_access_context = GetAccessContext(commandBuffer);
1043 assert(cb_access_context);
1044 if (!cb_access_context) return skip;
John Zulauf0cb5be22020-01-23 12:18:22 -07001045
John Zulauf3d84f1b2020-03-09 13:33:25 -06001046 const auto *context = cb_access_context->GetCurrentAccessContext();
1047 assert(context);
1048 if (!context) return skip;
John Zulauf0cb5be22020-01-23 12:18:22 -07001049
locke-lunarg296a3c92020-03-25 01:04:29 -06001050 const auto *cmd = Get<CMD_BUFFER_STATE>(commandBuffer);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001051 const auto src_stage_mask = ExpandPipelineStages(cb_access_context->GetQueueFlags(), srcStageMask);
John Zulauf36bcf6a2020-02-03 15:12:52 -07001052 const auto src_exec_scope = WithEarlierPipelineStages(src_stage_mask);
1053 auto src_stage_accesses = AccessScopeByStage(src_stage_mask);
John Zulauf0cb5be22020-01-23 12:18:22 -07001054 // Validate Image Layout transitions
1055 for (uint32_t index = 0; index < imageMemoryBarrierCount; index++) {
1056 const auto &barrier = pImageMemoryBarriers[index];
1057 if (barrier.newLayout == barrier.oldLayout) continue; // Only interested in layout transitions at this point.
1058 const auto *image_state = Get<IMAGE_STATE>(barrier.image);
1059 if (!image_state) continue;
locke-lunarg296a3c92020-03-25 01:04:29 -06001060 const auto hazard = DetectImageBarrierHazard(*cmd, *context, *image_state, src_exec_scope, src_stage_accesses, barrier);
John Zulauf0cb5be22020-01-23 12:18:22 -07001061 if (hazard.hazard) {
1062 // TODO -- add tag information to log msg when useful.
locke-lunarga0003652020-03-10 11:38:51 -06001063 skip |= LogError(barrier.image, string_SyncHazardVUID(hazard.hazard),
1064 "vkCmdPipelineBarrier: Hazard %s for image barrier %" PRIu32 " %s", string_SyncHazard(hazard.hazard),
1065 index, report_data->FormatHandle(barrier.image).c_str());
John Zulauf0cb5be22020-01-23 12:18:22 -07001066 }
1067 }
John Zulauf9cb530d2019-09-30 14:14:10 -06001068
1069 return skip;
1070}
1071
1072void SyncValidator::PreCallRecordCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
1073 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
1074 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
1075 uint32_t bufferMemoryBarrierCount,
1076 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
1077 uint32_t imageMemoryBarrierCount,
1078 const VkImageMemoryBarrier *pImageMemoryBarriers) {
John Zulauf3d84f1b2020-03-09 13:33:25 -06001079 auto *cb_access_context = GetAccessContext(commandBuffer);
1080 assert(cb_access_context);
1081 if (!cb_access_context) return;
1082 auto access_context = cb_access_context->GetCurrentAccessContext();
1083 assert(access_context);
1084 if (!access_context) return;
John Zulauf9cb530d2019-09-30 14:14:10 -06001085
John Zulauf3d84f1b2020-03-09 13:33:25 -06001086 const auto src_stage_mask = ExpandPipelineStages(cb_access_context->GetQueueFlags(), srcStageMask);
John Zulauf36bcf6a2020-02-03 15:12:52 -07001087 auto src_stage_accesses = AccessScopeByStage(src_stage_mask);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001088 const auto dst_stage_mask = ExpandPipelineStages(cb_access_context->GetQueueFlags(), dstStageMask);
John Zulauf36bcf6a2020-02-03 15:12:52 -07001089 auto dst_stage_accesses = AccessScopeByStage(dst_stage_mask);
1090 const auto src_exec_scope = WithEarlierPipelineStages(src_stage_mask);
1091 const auto dst_exec_scope = WithLaterPipelineStages(dst_stage_mask);
John Zulauf3d84f1b2020-03-09 13:33:25 -06001092 ApplyBufferBarriers(access_context, src_exec_scope, src_stage_accesses, dst_exec_scope, dst_stage_accesses,
1093 bufferMemoryBarrierCount, pBufferMemoryBarriers);
locke-lunarg296a3c92020-03-25 01:04:29 -06001094 const auto *cmd = Get<CMD_BUFFER_STATE>(commandBuffer);
1095 ApplyImageBarriers(*cmd, access_context, src_exec_scope, src_stage_accesses, dst_exec_scope, dst_stage_accesses,
John Zulauf3d84f1b2020-03-09 13:33:25 -06001096 imageMemoryBarrierCount, pImageMemoryBarriers);
John Zulauf9cb530d2019-09-30 14:14:10 -06001097
1098 // Apply these last in-case there operation is a superset of the other two and would clean them up...
John Zulauf3d84f1b2020-03-09 13:33:25 -06001099 ApplyGlobalBarriers(access_context, src_exec_scope, dst_exec_scope, src_stage_accesses, dst_stage_accesses, memoryBarrierCount,
John Zulauf0cb5be22020-01-23 12:18:22 -07001100 pMemoryBarriers);
John Zulauf9cb530d2019-09-30 14:14:10 -06001101}
1102
1103void SyncValidator::PostCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
1104 const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, VkResult result) {
1105 // The state tracker sets up the device state
1106 StateTracker::PostCallRecordCreateDevice(gpu, pCreateInfo, pAllocator, pDevice, result);
1107
John Zulauf5f13a792020-03-10 07:31:21 -06001108 // Add the callback hooks for the functions that are either broadly or deeply used and that the ValidationStateTracker
1109 // refactor would be messier without.
John Zulauf9cb530d2019-09-30 14:14:10 -06001110 // TODO: Find a good way to do this hooklessly.
1111 ValidationObject *device_object = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
1112 ValidationObject *validation_data = GetValidationObject(device_object->object_dispatch, LayerObjectTypeSyncValidation);
1113 SyncValidator *sync_device_state = static_cast<SyncValidator *>(validation_data);
1114
1115 sync_device_state->SetCommandBufferResetCallback(
1116 [sync_device_state](VkCommandBuffer command_buffer) -> void { sync_device_state->ResetCommandBuffer(command_buffer); });
1117}
John Zulauf3d84f1b2020-03-09 13:33:25 -06001118
1119void SyncValidator::PostCallRecordBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo,
1120 VkResult result) {
1121 // The state tracker sets up the command buffer state
1122 StateTracker::PostCallRecordBeginCommandBuffer(commandBuffer, pBeginInfo, result);
1123
1124 // Create/initialize the structure that trackers accesses at the command buffer scope.
1125 auto cb_access_context = GetAccessContext(commandBuffer);
1126 assert(cb_access_context);
1127 cb_access_context->Reset();
1128}
1129
1130void SyncValidator::RecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
1131 const VkSubpassBeginInfo *pSubpassBeginInfo) {
1132 const auto rp_state = Get<RENDER_PASS_STATE>(pRenderPassBegin->renderPass);
1133 auto cb_context = GetAccessContext(commandBuffer);
1134 if (rp_state && cb_context) {
1135 cb_context->BeginRenderPass(*rp_state);
1136 }
1137}
1138
1139void SyncValidator::PostCallRecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
1140 VkSubpassContents contents) {
1141 StateTracker::PostCallRecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
1142 auto subpass_begin_info = lvl_init_struct<VkSubpassBeginInfo>();
1143 subpass_begin_info.contents = contents;
1144 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, &subpass_begin_info);
1145}
1146
1147void SyncValidator::PostCallRecordCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
1148 const VkSubpassBeginInfo *pSubpassBeginInfo) {
1149 StateTracker::PostCallRecordCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
1150 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
1151}
1152
1153void SyncValidator::PostCallRecordCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
1154 const VkRenderPassBeginInfo *pRenderPassBegin,
1155 const VkSubpassBeginInfo *pSubpassBeginInfo) {
1156 StateTracker::PostCallRecordCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
1157 RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
1158}
1159
1160void SyncValidator::RecordCmdNextSubpass(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
1161 const VkSubpassEndInfo *pSubpassEndInfo) {
1162 auto cb_context = GetAccessContext(commandBuffer);
1163 assert(cb_context);
1164 auto cb_state = cb_context->GetCommandBufferState();
1165 if (!cb_state) return;
1166
1167 auto rp_state = cb_state->activeRenderPass;
1168 if (!rp_state) return;
1169
1170 cb_context->NextRenderPass(*rp_state);
1171}
1172
1173void SyncValidator::PostCallRecordCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
1174 StateTracker::PostCallRecordCmdNextSubpass(commandBuffer, contents);
1175 auto subpass_begin_info = lvl_init_struct<VkSubpassBeginInfo>();
1176 subpass_begin_info.contents = contents;
1177 RecordCmdNextSubpass(commandBuffer, &subpass_begin_info, nullptr);
1178}
1179
1180void SyncValidator::PostCallRecordCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
1181 const VkSubpassEndInfo *pSubpassEndInfo) {
1182 StateTracker::PostCallRecordCmdNextSubpass2(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
1183 RecordCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
1184}
1185
1186void SyncValidator::PostCallRecordCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
1187 const VkSubpassEndInfo *pSubpassEndInfo) {
1188 StateTracker::PostCallRecordCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
1189 RecordCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
1190}
1191
John Zulaufe5da6e52020-03-18 15:32:18 -06001192void SyncValidator::RecordCmdEndRenderPass(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
1193 // Resolve the all subpass contexts to the command buffer contexts
1194 auto cb_context = GetAccessContext(commandBuffer);
1195 assert(cb_context);
1196 auto cb_state = cb_context->GetCommandBufferState();
1197 if (!cb_state) return;
1198
1199 const auto *rp_state = cb_state->activeRenderPass;
1200 if (!rp_state) return;
1201
1202 cb_context->EndRenderPass(*rp_state);
1203}
John Zulauf3d84f1b2020-03-09 13:33:25 -06001204
1205void SyncValidator::PostCallRecordCmdEndRenderPass(VkCommandBuffer commandBuffer) {
1206 StateTracker::PostCallRecordCmdEndRenderPass(commandBuffer);
1207 RecordCmdEndRenderPass(commandBuffer, nullptr);
1208}
1209
1210void SyncValidator::PostCallRecordCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
1211 StateTracker::PostCallRecordCmdEndRenderPass2(commandBuffer, pSubpassEndInfo);
1212 RecordCmdEndRenderPass(commandBuffer, pSubpassEndInfo);
1213}
1214
1215void SyncValidator::PostCallRecordCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
1216 StateTracker::PostCallRecordCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo);
1217 RecordCmdEndRenderPass(commandBuffer, pSubpassEndInfo);
1218}
locke-lunarga19c71d2020-03-02 18:17:04 -07001219
1220bool SyncValidator::PreCallValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
1221 VkImageLayout dstImageLayout, uint32_t regionCount,
1222 const VkBufferImageCopy *pRegions) const {
1223 bool skip = false;
1224 const auto *cb_access_context = GetAccessContext(commandBuffer);
1225 assert(cb_access_context);
1226 if (!cb_access_context) return skip;
1227
1228 const auto *context = cb_access_context->GetCurrentAccessContext();
1229 assert(context);
1230 if (!context) return skip;
1231
1232 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
1233 const auto src_mem = (src_buffer && !src_buffer->sparse) ? src_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
1234 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarg296a3c92020-03-25 01:04:29 -06001235 const auto *cmd = Get<CMD_BUFFER_STATE>(commandBuffer);
locke-lunarga19c71d2020-03-02 18:17:04 -07001236
1237 for (uint32_t region = 0; region < regionCount; region++) {
1238 const auto &copy_region = pRegions[region];
1239 if (src_mem) {
1240 ResourceAccessRange src_range = MakeMemoryAccessRange(
1241 *src_buffer, copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, dst_image->createInfo.format));
1242 auto hazard = context->DetectHazard(VulkanTypedHandle(src_mem, kVulkanObjectTypeDeviceMemory),
1243 SYNC_TRANSFER_TRANSFER_READ, src_range);
1244 if (hazard.hazard) {
1245 // TODO -- add tag information to log msg when useful.
locke-lunarga0003652020-03-10 11:38:51 -06001246 skip |= LogError(srcBuffer, string_SyncHazardVUID(hazard.hazard),
1247 "vkCmdCopyBufferToImage: Hazard %s for srcBuffer %s, region %" PRIu32,
locke-lunarga19c71d2020-03-02 18:17:04 -07001248 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcBuffer).c_str(), region);
1249 }
1250 }
1251 if (dst_image) {
locke-lunarg296a3c92020-03-25 01:04:29 -06001252 auto hazard = context->DetectHazard(*cmd, *dst_image, SYNC_TRANSFER_TRANSFER_WRITE, copy_region.imageSubresource,
locke-lunarga19c71d2020-03-02 18:17:04 -07001253 copy_region.imageOffset, copy_region.imageExtent);
1254 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06001255 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
1256 "vkCmdCopyBufferToImage: Hazard %s for dstImage %s, region %" PRIu32,
locke-lunarga19c71d2020-03-02 18:17:04 -07001257 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region);
1258 }
1259 if (skip) break;
1260 }
1261 if (skip) break;
1262 }
1263 return skip;
1264}
1265
1266void SyncValidator::PreCallRecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
1267 VkImageLayout dstImageLayout, uint32_t regionCount,
1268 const VkBufferImageCopy *pRegions) {
1269 auto *cb_access_context = GetAccessContext(commandBuffer);
1270 assert(cb_access_context);
1271 auto *context = cb_access_context->GetCurrentAccessContext();
1272 assert(context);
1273
1274 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
1275 const auto src_mem = (src_buffer && !src_buffer->sparse) ? src_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
1276 const VulkanTypedHandle src_handle(src_mem, kVulkanObjectTypeDeviceMemory);
John Zulauf5f13a792020-03-10 07:31:21 -06001277
locke-lunarga19c71d2020-03-02 18:17:04 -07001278 auto *dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarg296a3c92020-03-25 01:04:29 -06001279 const auto *cmd = Get<CMD_BUFFER_STATE>(commandBuffer);
locke-lunarga19c71d2020-03-02 18:17:04 -07001280
1281 for (uint32_t region = 0; region < regionCount; region++) {
1282 const auto &copy_region = pRegions[region];
1283 if (src_buffer) {
1284 ResourceAccessRange src_range = MakeMemoryAccessRange(
1285 *src_buffer, copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, dst_image->createInfo.format));
John Zulauf5f13a792020-03-10 07:31:21 -06001286 context->UpdateAccessState(src_handle, SYNC_TRANSFER_TRANSFER_READ, src_range, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07001287 }
1288 if (dst_image) {
locke-lunarg296a3c92020-03-25 01:04:29 -06001289 context->UpdateAccessState(*cmd, *dst_image, SYNC_TRANSFER_TRANSFER_WRITE, copy_region.imageSubresource,
John Zulauf5f13a792020-03-10 07:31:21 -06001290 copy_region.imageOffset, copy_region.imageExtent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07001291 }
1292 }
1293}
1294
1295bool SyncValidator::PreCallValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
1296 VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount,
1297 const VkBufferImageCopy *pRegions) const {
1298 bool skip = false;
1299 const auto *cb_access_context = GetAccessContext(commandBuffer);
1300 assert(cb_access_context);
1301 if (!cb_access_context) return skip;
1302
1303 const auto *context = cb_access_context->GetCurrentAccessContext();
1304 assert(context);
1305 if (!context) return skip;
1306
locke-lunarg296a3c92020-03-25 01:04:29 -06001307 const auto *cmd = Get<CMD_BUFFER_STATE>(commandBuffer);
locke-lunarga19c71d2020-03-02 18:17:04 -07001308 const auto *src_image = Get<IMAGE_STATE>(srcImage);
1309 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
1310 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
1311 for (uint32_t region = 0; region < regionCount; region++) {
1312 const auto &copy_region = pRegions[region];
1313 if (src_image) {
locke-lunarg296a3c92020-03-25 01:04:29 -06001314 auto hazard = context->DetectHazard(*cmd, *src_image, SYNC_TRANSFER_TRANSFER_READ, copy_region.imageSubresource,
locke-lunarga19c71d2020-03-02 18:17:04 -07001315 copy_region.imageOffset, copy_region.imageExtent);
1316 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06001317 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
1318 "vkCmdCopyImageToBuffer: Hazard %s for srcImage %s, region %" PRIu32,
locke-lunarga19c71d2020-03-02 18:17:04 -07001319 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region);
1320 }
1321 }
1322 if (dst_mem) {
1323 ResourceAccessRange dst_range = MakeMemoryAccessRange(
1324 *dst_buffer, copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, src_image->createInfo.format));
1325 auto hazard = context->DetectHazard(VulkanTypedHandle(dst_mem, kVulkanObjectTypeDeviceMemory),
1326 SYNC_TRANSFER_TRANSFER_WRITE, dst_range);
1327 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06001328 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
1329 "vkCmdCopyImageToBuffer: Hazard %s for dstBuffer %s, region %" PRIu32,
locke-lunarga19c71d2020-03-02 18:17:04 -07001330 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstBuffer).c_str(), region);
1331 }
1332 }
1333 if (skip) break;
1334 }
1335 return skip;
1336}
1337
1338void SyncValidator::PreCallRecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
1339 VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
1340 auto *cb_access_context = GetAccessContext(commandBuffer);
1341 assert(cb_access_context);
1342 auto *context = cb_access_context->GetCurrentAccessContext();
1343 assert(context);
1344
locke-lunarg296a3c92020-03-25 01:04:29 -06001345 const auto *cmd = Get<CMD_BUFFER_STATE>(commandBuffer);
locke-lunarga19c71d2020-03-02 18:17:04 -07001346 const auto *src_image = Get<IMAGE_STATE>(srcImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07001347 auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
1348 const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
John Zulauf5f13a792020-03-10 07:31:21 -06001349 const VulkanTypedHandle dst_handle(dst_mem, kVulkanObjectTypeDeviceMemory);
locke-lunarga19c71d2020-03-02 18:17:04 -07001350
1351 for (uint32_t region = 0; region < regionCount; region++) {
1352 const auto &copy_region = pRegions[region];
1353 if (src_image) {
locke-lunarg296a3c92020-03-25 01:04:29 -06001354 context->UpdateAccessState(*cmd, *src_image, SYNC_TRANSFER_TRANSFER_READ, copy_region.imageSubresource,
John Zulauf5f13a792020-03-10 07:31:21 -06001355 copy_region.imageOffset, copy_region.imageExtent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07001356 }
1357 if (dst_buffer) {
1358 ResourceAccessRange dst_range = MakeMemoryAccessRange(
1359 *dst_buffer, copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, src_image->createInfo.format));
John Zulauf5f13a792020-03-10 07:31:21 -06001360 context->UpdateAccessState(dst_handle, SYNC_TRANSFER_TRANSFER_WRITE, dst_range, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07001361 }
1362 }
1363}
1364
1365bool SyncValidator::PreCallValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
1366 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
1367 const VkImageBlit *pRegions, VkFilter filter) const {
1368 bool skip = false;
1369 const auto *cb_access_context = GetAccessContext(commandBuffer);
1370 assert(cb_access_context);
1371 if (!cb_access_context) return skip;
1372
1373 const auto *context = cb_access_context->GetCurrentAccessContext();
1374 assert(context);
1375 if (!context) return skip;
1376
1377 const auto *src_image = Get<IMAGE_STATE>(srcImage);
1378 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarg296a3c92020-03-25 01:04:29 -06001379 const auto *cmd = Get<CMD_BUFFER_STATE>(commandBuffer);
locke-lunarga19c71d2020-03-02 18:17:04 -07001380
1381 for (uint32_t region = 0; region < regionCount; region++) {
1382 const auto &blit_region = pRegions[region];
1383 if (src_image) {
1384 VkExtent3D extent = {static_cast<uint32_t>(blit_region.srcOffsets[1].x - blit_region.srcOffsets[0].x),
1385 static_cast<uint32_t>(blit_region.srcOffsets[1].y - blit_region.srcOffsets[0].y),
1386 static_cast<uint32_t>(blit_region.srcOffsets[1].z - blit_region.srcOffsets[0].z)};
locke-lunarg296a3c92020-03-25 01:04:29 -06001387 auto hazard = context->DetectHazard(*cmd, *src_image, SYNC_TRANSFER_TRANSFER_READ, blit_region.srcSubresource,
locke-lunarga19c71d2020-03-02 18:17:04 -07001388 blit_region.srcOffsets[0], extent);
1389 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06001390 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
1391 "vkCmdBlitImage: Hazard %s for srcImage %s, region %" PRIu32, string_SyncHazard(hazard.hazard),
1392 report_data->FormatHandle(srcImage).c_str(), region);
locke-lunarga19c71d2020-03-02 18:17:04 -07001393 }
1394 }
1395
1396 if (dst_image) {
1397 VkExtent3D extent = {static_cast<uint32_t>(blit_region.dstOffsets[1].x - blit_region.dstOffsets[0].x),
1398 static_cast<uint32_t>(blit_region.dstOffsets[1].y - blit_region.dstOffsets[0].y),
1399 static_cast<uint32_t>(blit_region.dstOffsets[1].z - blit_region.dstOffsets[0].z)};
locke-lunarg296a3c92020-03-25 01:04:29 -06001400 auto hazard = context->DetectHazard(*cmd, *dst_image, SYNC_TRANSFER_TRANSFER_WRITE, blit_region.dstSubresource,
locke-lunarga19c71d2020-03-02 18:17:04 -07001401 blit_region.dstOffsets[0], extent);
1402 if (hazard.hazard) {
locke-lunarga0003652020-03-10 11:38:51 -06001403 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
1404 "vkCmdBlitImage: Hazard %s for dstImage %s, region %" PRIu32, string_SyncHazard(hazard.hazard),
1405 report_data->FormatHandle(dstImage).c_str(), region);
locke-lunarga19c71d2020-03-02 18:17:04 -07001406 }
1407 if (skip) break;
1408 }
1409 }
1410
1411 return skip;
1412}
1413
1414void SyncValidator::PreCallRecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
1415 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
1416 const VkImageBlit *pRegions, VkFilter filter) {
1417 auto *cb_access_context = GetAccessContext(commandBuffer);
1418 assert(cb_access_context);
1419 auto *context = cb_access_context->GetCurrentAccessContext();
1420 assert(context);
1421
1422 auto *src_image = Get<IMAGE_STATE>(srcImage);
locke-lunarga19c71d2020-03-02 18:17:04 -07001423 auto *dst_image = Get<IMAGE_STATE>(dstImage);
locke-lunarg296a3c92020-03-25 01:04:29 -06001424 const auto *cmd = Get<CMD_BUFFER_STATE>(commandBuffer);
locke-lunarga19c71d2020-03-02 18:17:04 -07001425
1426 for (uint32_t region = 0; region < regionCount; region++) {
1427 const auto &blit_region = pRegions[region];
1428 if (src_image) {
1429 VkExtent3D extent = {static_cast<uint32_t>(blit_region.srcOffsets[1].x - blit_region.srcOffsets[0].x),
1430 static_cast<uint32_t>(blit_region.srcOffsets[1].y - blit_region.srcOffsets[0].y),
1431 static_cast<uint32_t>(blit_region.srcOffsets[1].z - blit_region.srcOffsets[0].z)};
locke-lunarg296a3c92020-03-25 01:04:29 -06001432 context->UpdateAccessState(*cmd, *src_image, SYNC_TRANSFER_TRANSFER_READ, blit_region.srcSubresource,
John Zulauf5f13a792020-03-10 07:31:21 -06001433 blit_region.srcOffsets[0], extent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07001434 }
1435 if (dst_image) {
1436 VkExtent3D extent = {static_cast<uint32_t>(blit_region.dstOffsets[1].x - blit_region.dstOffsets[0].x),
1437 static_cast<uint32_t>(blit_region.dstOffsets[1].y - blit_region.dstOffsets[0].y),
1438 static_cast<uint32_t>(blit_region.dstOffsets[1].z - blit_region.dstOffsets[0].z)};
locke-lunarg296a3c92020-03-25 01:04:29 -06001439 context->UpdateAccessState(*cmd, *dst_image, SYNC_TRANSFER_TRANSFER_WRITE, blit_region.dstSubresource,
John Zulauf5f13a792020-03-10 07:31:21 -06001440 blit_region.dstOffsets[0], extent, tag);
locke-lunarga19c71d2020-03-02 18:17:04 -07001441 }
1442 }
1443}