blob: c54f0237eb29efd7532df33f121616c8182a4d49 [file] [log] [blame]
John Zulauf9cb530d2019-09-30 14:14:10 -06001/* Copyright (c) 2019 The Khronos Group Inc.
2 * Copyright (c) 2019 Valve Corporation
3 * Copyright (c) 2019 LunarG, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 * Author: John Zulauf <jzulauf@lunarg.com>
18 */
19
20#include <limits>
21#include <vector>
22#include "synchronization_validation.h"
23
24static const char *string_SyncHazardVUID(SyncHazard hazard) {
25 switch (hazard) {
26 case SyncHazard::NONE:
27 return "SYNC-NONE";
28 break;
29 case SyncHazard::READ_AFTER_WRITE:
30 return "SYNC-HAZARD-READ_AFTER_WRITE";
31 break;
32 case SyncHazard::WRITE_AFTER_READ:
33 return "SYNC-HAZARD-WRITE_AFTER_READ";
34 break;
35 case SyncHazard::WRITE_AFTER_WRITE:
36 return "SYNC-HAZARD-WRITE_AFTER_WRITE";
37 break;
38 default:
39 assert(0);
40 }
41 return "SYNC-HAZARD-INVALID";
42}
43
44static const char *string_SyncHazard(SyncHazard hazard) {
45 switch (hazard) {
46 case SyncHazard::NONE:
47 return "NONR";
48 break;
49 case SyncHazard::READ_AFTER_WRITE:
50 return "READ_AFTER_WRITE";
51 break;
52 case SyncHazard::WRITE_AFTER_READ:
53 return "WRITE_AFTER_READ";
54 break;
55 case SyncHazard::WRITE_AFTER_WRITE:
56 return "WRITE_AFTER_WRITE";
57 break;
58 default:
59 assert(0);
60 }
61 return "INVALID HAZARD";
62}
63
John Zulauf5c5e88d2019-12-26 11:22:02 -070064static const ResourceAccessRange full_range(std::numeric_limits<VkDeviceSize>::min(), std::numeric_limits<VkDeviceSize>::max());
65static ResourceAccessRange MakeMemoryAccessRange(const BUFFER_STATE &buffer, VkDeviceSize offset, VkDeviceSize size) {
John Zulauf9cb530d2019-09-30 14:14:10 -060066 assert(!buffer.sparse);
67 const auto base = offset + buffer.binding.offset;
John Zulauf5c5e88d2019-12-26 11:22:02 -070068 return ResourceAccessRange(base, base + size);
69}
70
71HazardResult DetectHazard(const ResourceAccessRangeMap &accesses, SyncStageAccessIndex current_usage,
72 const ResourceAccessRange &range) {
73 const auto from = accesses.lower_bound(range);
74 const auto to = accesses.upper_bound(range);
75 for (auto pos = from; pos != to; ++pos) {
76 const auto &access_state = pos->second;
77 HazardResult hazard = access_state.DetectHazard(current_usage);
78 if (hazard.hazard) return hazard;
79 }
80 return HazardResult();
81}
82
83HazardResult DetectHazard(const IMAGE_STATE &image, const ResourceAccessRangeMap &accesses, SyncStageAccessIndex current_usage,
84 const VkImageSubresourceLayers &subresource, const VkOffset3D &offset, const VkExtent3D &extent) {
85 // TODO: replace the encoder/generator with offset3D/extent3D aware versions
86 VkImageSubresourceRange subresource_range = {subresource.aspectMask, subresource.mipLevel, 1, subresource.baseArrayLayer,
87 subresource.layerCount};
88 subresource_adapter::RangeGenerator range_gen(image.range_encoder, subresource_range);
89 for (; range_gen->non_empty(); ++range_gen) {
90 HazardResult hazard = DetectHazard(accesses, current_usage, *range_gen);
91 if (hazard.hazard) return hazard;
92 }
93 return HazardResult();
John Zulauf9cb530d2019-09-30 14:14:10 -060094}
95
96template <typename Flags, typename Map>
97SyncStageAccessFlags AccessScopeImpl(Flags flag_mask, const Map &map) {
98 SyncStageAccessFlags scope = 0;
99 for (const auto &bit_scope : map) {
100 if (flag_mask < bit_scope.first) break;
101
102 if (flag_mask & bit_scope.first) {
103 scope |= bit_scope.second;
104 }
105 }
106 return scope;
107}
108
109SyncStageAccessFlags SyncStageAccess::AccessScopeByStage(VkPipelineStageFlags stages) {
110 return AccessScopeImpl(stages, syncStageAccessMaskByStageBit);
111}
112
113SyncStageAccessFlags SyncStageAccess::AccessScopeByAccess(VkAccessFlags accesses) {
114 return AccessScopeImpl(accesses, syncStageAccessMaskByAccessBit);
115}
116
117// Getting from stage mask and access mask to stage/acess masks is something we need to be good at...
118SyncStageAccessFlags SyncStageAccess::AccessScope(VkPipelineStageFlags stages, VkAccessFlags accesses) {
119 // The access scope is the intersection of all stage/access types possible for the enabled stages and the enables accesses
120 // (after doing a couple factoring of common terms the union of stage/access intersections is the intersections of the
121 // union of all stage/access types for all the stages and the same unions for the access mask...
122 return AccessScopeByStage(stages) & AccessScopeByAccess(accesses);
123}
124
125template <typename Action>
John Zulauf5c5e88d2019-12-26 11:22:02 -0700126void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const ResourceAccessRange &range, const Action &action) {
John Zulauf9cb530d2019-09-30 14:14:10 -0600127 // TODO -- region/mem-range accuracte update
128 auto pos = accesses->lower_bound(range);
129 if (pos == accesses->end() || !pos->first.intersects(range)) {
130 // The range is empty, fill it with a default value.
131 pos = action.Infill(accesses, pos, range);
132 } else if (range.begin < pos->first.begin) {
133 // Leading empty space, infill
John Zulauf5c5e88d2019-12-26 11:22:02 -0700134 pos = action.Infill(accesses, pos, ResourceAccessRange(range.begin, pos->first.begin));
John Zulauf9cb530d2019-09-30 14:14:10 -0600135 } else if (pos->first.begin < range.begin) {
136 // Trim the beginning if needed
137 pos = accesses->split(pos, range.begin, sparse_container::split_op_keep_both());
138 ++pos;
139 }
140
141 const auto the_end = accesses->end();
142 while ((pos != the_end) && pos->first.intersects(range)) {
143 if (pos->first.end > range.end) {
144 pos = accesses->split(pos, range.end, sparse_container::split_op_keep_both());
145 }
146
147 pos = action(accesses, pos);
148 if (pos == the_end) break;
149
150 auto next = pos;
151 ++next;
152 if ((pos->first.end < range.end) && (next != the_end) && !next->first.is_subsequent_to(pos->first)) {
153 // Need to infill if next is disjoint
154 VkDeviceSize limit = (next == the_end) ? range.end : std::min(range.end, next->first.begin);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700155 ResourceAccessRange new_range(pos->first.end, limit);
John Zulauf9cb530d2019-09-30 14:14:10 -0600156 next = action.Infill(accesses, next, new_range);
157 }
158 pos = next;
159 }
160}
161
162struct UpdateMemoryAccessStateFunctor {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700163 using Iterator = ResourceAccessRangeMap::iterator;
164 Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const {
John Zulauf9cb530d2019-09-30 14:14:10 -0600165 return accesses->insert(pos, std::make_pair(range, ResourceAccessState()));
166 }
John Zulauf5c5e88d2019-12-26 11:22:02 -0700167 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -0600168 auto &access_state = pos->second;
169 access_state.Update(usage, tag);
170 return pos;
171 }
172
173 UpdateMemoryAccessStateFunctor(SyncStageAccessIndex usage_, const ResourceUsageTag &tag_) : usage(usage_), tag(tag_) {}
174 SyncStageAccessIndex usage;
175 const ResourceUsageTag &tag;
176};
177
178struct ApplyMemoryAccessBarrierFunctor {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700179 using Iterator = ResourceAccessRangeMap::iterator;
180 inline Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const { return pos; }
John Zulauf9cb530d2019-09-30 14:14:10 -0600181
John Zulauf5c5e88d2019-12-26 11:22:02 -0700182 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -0600183 auto &access_state = pos->second;
184 access_state.ApplyMemoryAccessBarrier(src_stage_mask, src_scope, dst_stage_mask, dst_scope);
185 return pos;
186 }
187
188 ApplyMemoryAccessBarrierFunctor(VkPipelineStageFlags src_stage_mask_, SyncStageAccessFlags src_scope_,
189 VkPipelineStageFlags dst_stage_mask_, SyncStageAccessFlags dst_scope_)
190 : src_stage_mask(src_stage_mask_), src_scope(src_scope_), dst_stage_mask(dst_stage_mask_), dst_scope(dst_scope_) {}
191
192 VkPipelineStageFlags src_stage_mask;
193 SyncStageAccessFlags src_scope;
194 VkPipelineStageFlags dst_stage_mask;
195 SyncStageAccessFlags dst_scope;
196};
197
198struct ApplyGlobalBarrierFunctor {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700199 using Iterator = ResourceAccessRangeMap::iterator;
200 inline Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const { return pos; }
John Zulauf9cb530d2019-09-30 14:14:10 -0600201
John Zulauf5c5e88d2019-12-26 11:22:02 -0700202 Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
John Zulauf9cb530d2019-09-30 14:14:10 -0600203 auto &access_state = pos->second;
204 access_state.ApplyExecutionBarrier(src_stage_mask, dst_stage_mask);
205
206 for (const auto &functor : barrier_functor) {
207 functor(accesses, pos);
208 }
209 return pos;
210 }
211
212 ApplyGlobalBarrierFunctor(VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
213 SyncStageAccessFlags src_stage_scope, SyncStageAccessFlags dst_stage_scope,
214 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers)
215 : src_stage_mask(srcStageMask), dst_stage_mask(dstStageMask) {
216 // Don't want to create this per tracked item, but don't want to loop through all tracked items per barrier...
217 barrier_functor.reserve(memoryBarrierCount);
218 for (uint32_t barrier_index = 0; barrier_index < memoryBarrierCount; barrier_index++) {
219 const auto &barrier = pMemoryBarriers[barrier_index];
220 barrier_functor.emplace_back(srcStageMask, SyncStageAccess::AccessScope(src_stage_scope, barrier.srcAccessMask),
221 dstStageMask, SyncStageAccess::AccessScope(dst_stage_scope, barrier.dstAccessMask));
222 }
223 }
224
225 const VkPipelineStageFlags src_stage_mask;
226 const VkPipelineStageFlags dst_stage_mask;
227 std::vector<ApplyMemoryAccessBarrierFunctor> barrier_functor;
228};
229
John Zulauf5c5e88d2019-12-26 11:22:02 -0700230void UpdateAccessState(ResourceAccessRangeMap *accesses, SyncStageAccessIndex current_usage, const ResourceAccessRange &range,
231 const ResourceUsageTag &tag) {
232 UpdateMemoryAccessStateFunctor action(current_usage, tag);
233 UpdateMemoryAccessState(accesses, range, action);
234}
235
236void UpdateAccessState(const IMAGE_STATE &image, ResourceAccessRangeMap *accesses, SyncStageAccessIndex current_usage,
237 const VkImageSubresourceLayers &subresource, const VkOffset3D &offset, const VkExtent3D &extent,
238 const ResourceUsageTag &tag) {
239 // TODO: replace the encoder/generator with offset3D aware versions
240 VkImageSubresourceRange subresource_range = {subresource.aspectMask, subresource.mipLevel, 1, subresource.baseArrayLayer,
241 subresource.layerCount};
242 subresource_adapter::RangeGenerator range_gen(image.range_encoder, subresource_range);
243 for (; range_gen->non_empty(); ++range_gen) {
244 UpdateAccessState(accesses, current_usage, *range_gen, tag);
245 }
246}
247
John Zulauf9cb530d2019-09-30 14:14:10 -0600248HazardResult ResourceAccessState::DetectHazard(SyncStageAccessIndex usage_index) const {
249 HazardResult hazard;
250 auto usage = FlagBit(usage_index);
251 if (IsRead(usage)) {
252 if (IsWriteHazard(usage)) {
253 hazard.Set(READ_AFTER_WRITE, write_tag);
254 }
255 } else {
256 // Assume write
257 // TODO determine what to do with READ-WRITE usage states if any
258 // Write-After-Write check -- if we have a previous write to test against
259 if (last_write && IsWriteHazard(usage)) {
260 hazard.Set(WRITE_AFTER_WRITE, write_tag);
261 } else {
262 // Only look for casus belli for WAR
263 const auto usage_stage = PipelineStageBit(usage_index);
264 for (uint32_t read_index = 0; read_index < last_read_count; read_index++) {
265 if (IsReadHazard(usage_stage, last_reads[read_index])) {
266 hazard.Set(WRITE_AFTER_READ, last_reads[read_index].tag);
267 break;
268 }
269 }
270 }
271 }
272 return hazard;
273}
274
275void ResourceAccessState::Update(SyncStageAccessIndex usage_index, const ResourceUsageTag &tag) {
276 // Move this logic in the ResourceStateTracker as methods, thereof (or we'll repeat it for every flavor of resource...
277 const auto usage_bit = FlagBit(usage_index);
278 if (IsRead(usage_index)) {
279 // Mulitple outstanding reads may be of interest and do dependency chains independently
280 // However, for purposes of barrier tracking, only one read per pipeline stage matters
281 const auto usage_stage = PipelineStageBit(usage_index);
282 if (usage_stage & last_read_stages) {
283 for (uint32_t read_index = 0; read_index < last_read_count; read_index++) {
284 ReadState &access = last_reads[read_index];
285 if (access.stage == usage_stage) {
286 access.barriers = 0;
287 access.tag = tag;
288 break;
289 }
290 }
291 } else {
292 // We don't have this stage in the list yet...
293 assert(last_read_count < last_reads.size());
294 ReadState &access = last_reads[last_read_count++];
295 access.stage = usage_stage;
296 access.barriers = 0;
297 access.tag = tag;
298 last_read_stages |= usage_stage;
299 }
300 } else {
301 // Assume write
302 // TODO determine what to do with READ-WRITE operations if any
303 // Clobber last read and both sets of barriers... because all we have is DANGER, DANGER, WILL ROBINSON!!!
304 // if the last_reads/last_write were unsafe, we've reported them,
305 // in either case the prior access is irrelevant, we can overwrite them as *this* write is now after them
306 last_read_count = 0;
307 last_read_stages = 0;
308
309 write_barriers = 0;
310 write_dependency_chain = 0;
311 write_tag = tag;
312 last_write = usage_bit;
313 }
314}
315void ResourceAccessState::ApplyExecutionBarrier(VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask) {
316 // Execution Barriers only protect read operations
317 for (uint32_t read_index = 0; read_index < last_read_count; read_index++) {
318 ReadState &access = last_reads[read_index];
319 // The | implements the "dependency chain" logic for this access, as the barriers field stores the second sync scope
320 if (srcStageMask & (access.stage | access.barriers)) {
321 access.barriers |= dstStageMask;
322 }
323 }
324 if (write_dependency_chain & srcStageMask) write_dependency_chain |= dstStageMask;
325}
326
327void ResourceAccessState::ApplyMemoryAccessBarrier(VkPipelineStageFlags src_stage_mask, SyncStageAccessFlags src_scope,
328 VkPipelineStageFlags dst_stage_mask, SyncStageAccessFlags dst_scope) {
329 // Assuming we've applied the execution side of this barrier, we update just the write
330 // The || implements the "dependency chain" logic for this barrier
331 if ((src_scope & last_write) || (write_dependency_chain & src_stage_mask)) {
332 write_barriers |= dst_scope;
333 write_dependency_chain |= dst_stage_mask;
334 }
335}
336
337void SyncValidator::ResetCommandBuffer(VkCommandBuffer command_buffer) {
338 auto *tracker = GetAccessTrackerNoInsert(command_buffer);
339 if (tracker) {
340 tracker->Reset();
341 }
342}
343
John Zulauf5c5e88d2019-12-26 11:22:02 -0700344void SyncValidator::ApplyGlobalBarriers(ResourceAccessTracker *tracker, VkPipelineStageFlags srcStageMask,
John Zulauf9cb530d2019-09-30 14:14:10 -0600345 VkPipelineStageFlags dstStageMask, SyncStageAccessFlags src_stage_scope,
346 SyncStageAccessFlags dst_stage_scope, uint32_t memoryBarrierCount,
347 const VkMemoryBarrier *pMemoryBarriers) {
348 // TODO: Implement this better (maybe some delayed/on-demand integration).
349 ApplyGlobalBarrierFunctor barriers_functor(srcStageMask, dstStageMask, src_stage_scope, dst_stage_scope, memoryBarrierCount,
350 pMemoryBarriers);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700351 for (auto &mem_access_pair : tracker->GetMemoryAccessMap()) {
John Zulauf9cb530d2019-09-30 14:14:10 -0600352 UpdateMemoryAccessState(&mem_access_pair.second, full_range, barriers_functor);
353 }
John Zulauf5c5e88d2019-12-26 11:22:02 -0700354 for (auto &image_access_pair : tracker->GetImageAccessMap()) {
355 UpdateMemoryAccessState(&image_access_pair.second, full_range, barriers_functor);
356 }
John Zulauf9cb530d2019-09-30 14:14:10 -0600357}
358
John Zulauf5c5e88d2019-12-26 11:22:02 -0700359void SyncValidator::ApplyBufferBarriers(ResourceAccessTracker *tracker, VkPipelineStageFlags src_stage_mask,
John Zulauf9cb530d2019-09-30 14:14:10 -0600360 SyncStageAccessFlags src_stage_scope, VkPipelineStageFlags dst_stage_mask,
361 SyncStageAccessFlags dst_stage_scope, uint32_t barrier_count,
362 const VkBufferMemoryBarrier *barriers) {
363 // TODO Implement this at subresource/memory_range accuracy
364 for (uint32_t index = 0; index < barrier_count; index++) {
365 const auto &barrier = barriers[index];
366 const auto *buffer = Get<BUFFER_STATE>(barrier.buffer);
367 if (!buffer) continue;
John Zulauf5c5e88d2019-12-26 11:22:02 -0700368 auto *accesses = tracker->GetMemoryAccessesNoInsert(buffer->binding.mem_state->mem);
John Zulauf9cb530d2019-09-30 14:14:10 -0600369 if (!accesses) continue;
John Zulauf5c5e88d2019-12-26 11:22:02 -0700370 ResourceAccessRange range = MakeMemoryAccessRange(*buffer, barrier.offset, barrier.size);
John Zulauf9cb530d2019-09-30 14:14:10 -0600371 UpdateMemoryAccessState(
372 accesses, range,
373 ApplyMemoryAccessBarrierFunctor(src_stage_mask, AccessScope(src_stage_scope, barrier.srcAccessMask), dst_stage_mask,
374 AccessScope(dst_stage_scope, barrier.dstAccessMask)));
375 }
376}
377
John Zulauf5c5e88d2019-12-26 11:22:02 -0700378void SyncValidator::ApplyImageBarriers(ResourceAccessTracker *tracker, VkPipelineStageFlags src_stage_mask,
379 SyncStageAccessFlags src_stage_scope, VkPipelineStageFlags dst_stage_mask,
380 SyncStageAccessFlags dst_stage_scope, uint32_t barrier_count,
381 const VkImageMemoryBarrier *barriers) {
382 for (uint32_t index = 0; index < barrier_count; index++) {
383 const auto &barrier = barriers[index];
384 const auto *image = Get<IMAGE_STATE>(barrier.image);
385 if (!image) continue;
386 auto *accesses = tracker->GetImageAccessesNoInsert(image->image);
387 if (!accesses) continue;
388 auto subresource_range = NormalizeSubresourceRange(image->createInfo, barrier.subresourceRange);
389 subresource_adapter::RangeGenerator range_gen(image->range_encoder, subresource_range);
390 const ApplyMemoryAccessBarrierFunctor barrier_action(src_stage_mask, AccessScope(src_stage_scope, barrier.srcAccessMask),
391 dst_stage_mask, AccessScope(dst_stage_scope, barrier.dstAccessMask));
392 for (; range_gen->non_empty(); ++range_gen) {
393 UpdateMemoryAccessState(accesses, *range_gen, barrier_action);
394 }
John Zulauf9cb530d2019-09-30 14:14:10 -0600395 }
John Zulauf9cb530d2019-09-30 14:14:10 -0600396}
397
398bool SyncValidator::PreCallValidateCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
399 uint32_t regionCount, const VkBufferCopy *pRegions) const {
400 bool skip = false;
401 const auto *const const_this = this;
402 const auto *tracker = const_this->GetAccessTracker(commandBuffer);
403 if (tracker) {
404 // If we have no previous accesses, we have no hazards
405 // TODO: make this sub-resource capable
406 // TODO: make this general, and stuff it into templates/utility functions
407 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700408 const auto src_access =
409 (src_buffer && !src_buffer->sparse) ? tracker->GetMemoryAccesses(src_buffer->binding.mem_state->mem) : nullptr;
John Zulauf9cb530d2019-09-30 14:14:10 -0600410 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700411 const auto dst_access =
412 (dst_buffer && !dst_buffer->sparse) ? tracker->GetMemoryAccesses(dst_buffer->binding.mem_state->mem) : nullptr;
John Zulauf9cb530d2019-09-30 14:14:10 -0600413
414 for (uint32_t region = 0; region < regionCount; region++) {
415 const auto &copy_region = pRegions[region];
416 if (src_access) {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700417 ResourceAccessRange src_range = MakeMemoryAccessRange(*src_buffer, copy_region.srcOffset, copy_region.size);
John Zulauf9cb530d2019-09-30 14:14:10 -0600418 auto hazard = DetectHazard(*src_access, SYNC_TRANSFER_TRANSFER_READ, src_range);
419 if (hazard.hazard) {
420 // TODO -- add tag information to log msg when useful.
421 skip |= LogError(srcBuffer, string_SyncHazardVUID(hazard.hazard), "Hazard %s for srcBuffer %s, region %" PRIu32,
422 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcBuffer).c_str(), region);
423 }
424 }
425 if (dst_access && !skip) {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700426 ResourceAccessRange dst_range = MakeMemoryAccessRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
John Zulauf9cb530d2019-09-30 14:14:10 -0600427 auto hazard = DetectHazard(*dst_access, SYNC_TRANSFER_TRANSFER_WRITE, dst_range);
428 if (hazard.hazard) {
429 skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard), "Hazard %s for dstBuffer %s, region %" PRIu32,
430 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstBuffer).c_str(), region);
431 }
432 }
433 if (skip) break;
434 }
435 }
436 return skip;
437}
438
439void SyncValidator::PreCallRecordCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
440 uint32_t regionCount, const VkBufferCopy *pRegions) {
441 auto *tracker = GetAccessTracker(commandBuffer);
442 assert(tracker);
443 const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700444 const auto src_access =
445 (src_buffer && !src_buffer->sparse) ? tracker->GetMemoryAccesses(src_buffer->binding.mem_state->mem) : nullptr;
John Zulauf9cb530d2019-09-30 14:14:10 -0600446 const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700447 const auto dst_access =
448 (dst_buffer && !dst_buffer->sparse) ? tracker->GetMemoryAccesses(dst_buffer->binding.mem_state->mem) : nullptr;
John Zulauf9cb530d2019-09-30 14:14:10 -0600449
450 for (uint32_t region = 0; region < regionCount; region++) {
451 const auto &copy_region = pRegions[region];
452 if (src_access) {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700453 ResourceAccessRange src_range = MakeMemoryAccessRange(*src_buffer, copy_region.srcOffset, copy_region.size);
454 UpdateAccessState(src_access, SYNC_TRANSFER_TRANSFER_READ, src_range, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -0600455 }
456 if (dst_access) {
John Zulauf5c5e88d2019-12-26 11:22:02 -0700457 ResourceAccessRange dst_range = MakeMemoryAccessRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
458 UpdateAccessState(dst_access, SYNC_TRANSFER_TRANSFER_WRITE, dst_range, tag);
459 }
460 }
461}
462
463bool SyncValidator::PreCallValidateCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
464 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
465 const VkImageCopy *pRegions) const {
466 bool skip = false;
467 auto *tracker = GetAccessTracker(commandBuffer);
468 if (tracker) {
469 const auto *src_image = Get<IMAGE_STATE>(srcImage);
470 const auto src_access = tracker->GetImageAccesses(srcImage);
471 const auto *dst_image = Get<IMAGE_STATE>(dstImage);
472 const auto dst_access = tracker->GetImageAccesses(dstImage);
473
474 for (uint32_t region = 0; region < regionCount; region++) {
475 const auto &copy_region = pRegions[region];
476 if (src_access) {
477 auto hazard = DetectHazard(*src_image, *src_access, SYNC_TRANSFER_TRANSFER_READ, copy_region.srcSubresource,
478 copy_region.srcOffset, copy_region.extent);
479 if (hazard.hazard) {
480 skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard), "Hazard %s for srcImage %s, region %" PRIu32,
481 string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region);
482 }
483 }
484 if (dst_access) {
485 auto hazard = DetectHazard(*dst_image, *dst_access, SYNC_TRANSFER_TRANSFER_WRITE, copy_region.dstSubresource,
486 copy_region.dstOffset, copy_region.extent);
487 if (hazard.hazard) {
488 skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard), "Hazard %s for dstImage %s, region %" PRIu32,
489 string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region);
490 }
491 }
492 }
493 }
494 return skip;
495}
496
497void SyncValidator::PreCallRecordCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
498 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
499 const VkImageCopy *pRegions) {
500 auto *tracker = GetAccessTracker(commandBuffer);
501 assert(tracker);
502 auto *src_image = Get<IMAGE_STATE>(srcImage);
503 auto src_access = tracker->GetImageAccesses(srcImage);
504 auto *dst_image = Get<IMAGE_STATE>(dstImage);
505 auto dst_access = tracker->GetImageAccesses(dstImage);
506
507 for (uint32_t region = 0; region < regionCount; region++) {
508 const auto &copy_region = pRegions[region];
509 if (src_access) {
510 UpdateAccessState(*src_image, src_access, SYNC_TRANSFER_TRANSFER_READ, copy_region.srcSubresource,
511 copy_region.srcOffset, copy_region.extent, tag);
512 }
513 if (dst_access) {
514 UpdateAccessState(*dst_image, dst_access, SYNC_TRANSFER_TRANSFER_WRITE, copy_region.dstSubresource,
515 copy_region.dstOffset, copy_region.extent, tag);
John Zulauf9cb530d2019-09-30 14:14:10 -0600516 }
517 }
518}
519
520bool SyncValidator::PreCallValidateCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
521 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
522 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
523 uint32_t bufferMemoryBarrierCount,
524 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
525 uint32_t imageMemoryBarrierCount,
526 const VkImageMemoryBarrier *pImageMemoryBarriers) const {
527 bool skip = false;
528
529 return skip;
530}
531
532void SyncValidator::PreCallRecordCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
533 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
534 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
535 uint32_t bufferMemoryBarrierCount,
536 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
537 uint32_t imageMemoryBarrierCount,
538 const VkImageMemoryBarrier *pImageMemoryBarriers) {
539 // Just implement the buffer barrier for now
540 auto *tracker = GetAccessTracker(commandBuffer);
541 assert(tracker);
542 auto src_stage_scope = AccessScopeByStage(srcStageMask);
543 auto dst_stage_scope = AccessScopeByStage(dstStageMask);
544
545 ApplyBufferBarriers(tracker, srcStageMask, src_stage_scope, dstStageMask, dst_stage_scope, bufferMemoryBarrierCount,
546 pBufferMemoryBarriers);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700547 ApplyImageBarriers(tracker, srcStageMask, src_stage_scope, dstStageMask, dst_stage_scope, imageMemoryBarrierCount,
548 pImageMemoryBarriers);
John Zulauf9cb530d2019-09-30 14:14:10 -0600549
550 // Apply these last in-case there operation is a superset of the other two and would clean them up...
551 ApplyGlobalBarriers(tracker, srcStageMask, dstStageMask, src_stage_scope, dst_stage_scope, memoryBarrierCount, pMemoryBarriers);
552}
553
554void SyncValidator::PostCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
555 const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, VkResult result) {
556 // The state tracker sets up the device state
557 StateTracker::PostCallRecordCreateDevice(gpu, pCreateInfo, pAllocator, pDevice, result);
558
559 // Add the callback hooks for the functions that are either broadly or deeply used and that the ValidationStateTracker refactor
560 // would be messier without.
561 // TODO: Find a good way to do this hooklessly.
562 ValidationObject *device_object = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
563 ValidationObject *validation_data = GetValidationObject(device_object->object_dispatch, LayerObjectTypeSyncValidation);
564 SyncValidator *sync_device_state = static_cast<SyncValidator *>(validation_data);
565
566 sync_device_state->SetCommandBufferResetCallback(
567 [sync_device_state](VkCommandBuffer command_buffer) -> void { sync_device_state->ResetCommandBuffer(command_buffer); });
568}