blob: d3a1c9db28a1b86c2b027d64c40e64502dfe502d [file] [log] [blame]
John Zulauf9cb530d2019-09-30 14:14:10 -06001/* Copyright (c) 2019 The Khronos Group Inc.
2 * Copyright (c) 2019 Valve Corporation
3 * Copyright (c) 2019 LunarG, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 * Author: John Zulauf <jzulauf@lunarg.com>
18 */
19
20#pragma once
21
22#include <map>
23#include <memory>
24#include <unordered_map>
25#include <vulkan/vulkan.h>
26
27#include "synchronization_validation_types.h"
28#include "state_tracker.h"
29
John Zulauf2f952d22020-02-10 11:34:51 -070030enum SyncHazard {
31 NONE = 0,
32 READ_AFTER_WRITE,
33 WRITE_AFTER_READ,
34 WRITE_AFTER_WRITE,
35 READ_RACING_WRITE,
36 WRITE_RACING_WRITE,
37 WRITE_RACING_READ,
38};
John Zulauf9cb530d2019-09-30 14:14:10 -060039
40// Useful Utilites for manipulating StageAccess parameters, suitable as base class to save typing
41struct SyncStageAccess {
42 static SyncStageAccessFlagBits FlagBit(SyncStageAccessIndex stage_access) {
43 return syncStageAccessInfoByStageAccessIndex[stage_access].stage_access_bit;
44 }
45
46 static bool IsRead(SyncStageAccessFlagBits stage_access_bit) { return 0 != (stage_access_bit & syncStageAccessReadMask); }
47 static bool IsRead(SyncStageAccessIndex stage_access_index) { return IsRead(FlagBit(stage_access_index)); }
48
49 static bool IsWrite(SyncStageAccessFlagBits stage_access_bit) { return 0 != (stage_access_bit & syncStageAccessWriteMask); }
50 static bool IsWrite(SyncStageAccessIndex stage_access_index) { return IsWrite(FlagBit(stage_access_index)); }
51 static VkPipelineStageFlagBits PipelineStageBit(SyncStageAccessIndex stage_access_index) {
52 return syncStageAccessInfoByStageAccessIndex[stage_access_index].stage_mask;
53 }
54 static SyncStageAccessFlags AccessScopeByStage(VkPipelineStageFlags stages);
55 static SyncStageAccessFlags AccessScopeByAccess(VkAccessFlags access);
56 static SyncStageAccessFlags AccessScope(VkPipelineStageFlags stages, VkAccessFlags access);
57 static SyncStageAccessFlags AccessScope(SyncStageAccessFlags stage_scope, VkAccessFlags accesses) {
58 return stage_scope & AccessScopeByAccess(accesses);
59 }
60};
61
62using ResourceUsageTag = uint64_t; // TODO -- identify a better DWORD or QWORD size UID/Tag for usages causing hazards
63struct HazardResult {
64 SyncHazard hazard = NONE;
65 ResourceUsageTag tag = ResourceUsageTag();
66 void Set(SyncHazard hazard_, const ResourceUsageTag &tag_) {
67 hazard = hazard_;
68 tag = tag_;
69 }
70};
71
72class ResourceAccessState : public SyncStageAccess {
73 protected:
74 // Mutliple read operations can be simlutaneously (and independently) synchronized,
75 // given the only the second execution scope creates a dependency chain, we have to track each,
76 // but only up to one per pipeline stage (as another read from the *same* stage become more recent,
77 // and applicable one for hazard detection
78 struct ReadState {
79 VkPipelineStageFlagBits stage; // The stage of this read
80 VkPipelineStageFlags barriers; // all applicable barriered stages
81 ResourceUsageTag tag;
82 };
83
84 public:
85 HazardResult DetectHazard(SyncStageAccessIndex usage_index) const;
John Zulauf2f952d22020-02-10 11:34:51 -070086 HazardResult DetectAsynchronousHazard(SyncStageAccessIndex usage_index) const;
John Zulauf0cb5be22020-01-23 12:18:22 -070087 HazardResult DetectBarrierHazard(SyncStageAccessIndex usage_index, VkPipelineStageFlags src_stage_mask,
88 SyncStageAccessFlags source_scope) const;
John Zulauf9cb530d2019-09-30 14:14:10 -060089 void Update(SyncStageAccessIndex usage_index, const ResourceUsageTag &tag);
90 void ApplyExecutionBarrier(VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask);
91 void ApplyMemoryAccessBarrier(VkPipelineStageFlags src_stage_mask, SyncStageAccessFlags src_scope,
92 VkPipelineStageFlags dst_stage_mask, SyncStageAccessFlags dst_scope);
93
94 ResourceAccessState()
95 : write_barriers(~SyncStageAccessFlags(0)), write_dependency_chain(0), last_read_count(0), last_read_stages(0) {}
96
97 private:
98 bool IsWriteHazard(SyncStageAccessFlagBits usage) const { return 0 != (usage & ~write_barriers); }
99 bool IsReadHazard(VkPipelineStageFlagBits stage, const ReadState &read_access) const {
100 return 0 != (stage & ~read_access.barriers);
101 }
John Zulauf0cb5be22020-01-23 12:18:22 -0700102 bool IsReadHazard(VkPipelineStageFlags stage_mask, const ReadState &read_access) const {
103 return stage_mask != (stage_mask & read_access.barriers);
104 }
John Zulauf9cb530d2019-09-30 14:14:10 -0600105 // With reads, each must be "safe" relative to it's prior write, so we need only
106 // save the most recent write operation (as anything *transitively* unsafe would arleady
107 // be included
108 SyncStageAccessFlags write_barriers; // union of applicable barrier masks since last write
109 VkPipelineStageFlags write_dependency_chain; // intiially zero, but accumulating the dstStages of barriers if they chain.
110 uint32_t last_read_count;
111 VkPipelineStageFlags last_read_stages;
112
113 ResourceUsageTag write_tag;
114
115 std::array<ReadState, 8 * sizeof(VkPipelineStageFlags)> last_reads;
116 SyncStageAccessFlagBits last_write; // only the most recent write
117};
118
John Zulauf5c5e88d2019-12-26 11:22:02 -0700119using ResourceAccessRangeMap = sparse_container::range_map<uint64_t, ResourceAccessState>;
120using ResourceAccessRange = typename ResourceAccessRangeMap::key_type;
John Zulauf9cb530d2019-09-30 14:14:10 -0600121
John Zulauf5c5e88d2019-12-26 11:22:02 -0700122class ResourceAccessTracker : public SyncStageAccess {
John Zulauf9cb530d2019-09-30 14:14:10 -0600123 public:
John Zulauf5c5e88d2019-12-26 11:22:02 -0700124 using MemoryAccessMap = std::map<VkDeviceMemory, ResourceAccessRangeMap>;
125 using ImageAccessMap = std::map<VkImage, ResourceAccessRangeMap>;
John Zulauf9cb530d2019-09-30 14:14:10 -0600126
John Zulauf5c5e88d2019-12-26 11:22:02 -0700127 protected:
John Zulauf9cb530d2019-09-30 14:14:10 -0600128 // TODO -- hide the details of the implementation..
John Zulauf5c5e88d2019-12-26 11:22:02 -0700129 template <typename Map, typename Key>
130 static typename Map::mapped_type *GetImpl(Map *map, Key key, bool do_insert) {
131 auto find_it = map->find(key);
132 if (find_it == map->end()) {
John Zulauf9cb530d2019-09-30 14:14:10 -0600133 if (!do_insert) return nullptr;
John Zulauf5c5e88d2019-12-26 11:22:02 -0700134 auto insert_pair = map->insert(std::make_pair(key, typename Map::mapped_type()));
John Zulauf9cb530d2019-09-30 14:14:10 -0600135 find_it = insert_pair.first;
136 }
137 return &find_it->second;
138 }
John Zulauf9cb530d2019-09-30 14:14:10 -0600139
John Zulauf5c5e88d2019-12-26 11:22:02 -0700140 template <typename Map, typename Key>
141 static const typename Map::mapped_type *GetConstImpl(const Map *map, Key key) {
142 auto find_it = map->find(key);
143 if (find_it == map->cend()) {
John Zulauf9cb530d2019-09-30 14:14:10 -0600144 return nullptr;
145 }
146 return &find_it->second;
147 }
148
John Zulauf5c5e88d2019-12-26 11:22:02 -0700149 private:
150 MemoryAccessMap memory_access_map_;
151 ImageAccessMap image_access_map_;
152
153 public:
154 ResourceAccessRangeMap *GetMemoryAccesses(VkDeviceMemory memory) { return GetImpl(&memory_access_map_, memory, true); }
155 ResourceAccessRangeMap *GetMemoryAccessesNoInsert(VkDeviceMemory memory) { return GetImpl(&memory_access_map_, memory, false); }
156 const ResourceAccessRangeMap *GetMemoryAccesses(VkDeviceMemory memory) const {
157 return GetConstImpl(&memory_access_map_, memory);
158 }
159 ResourceAccessRangeMap *GetImageAccesses(VkImage image) { return GetImpl(&image_access_map_, image, true); }
160 ResourceAccessRangeMap *GetImageAccessesNoInsert(VkImage image) { return GetImpl(&image_access_map_, image, false); }
161 const ResourceAccessRangeMap *GetImageAccesses(VkImage image) const { return GetConstImpl(&image_access_map_, image); }
162
163 MemoryAccessMap &GetMemoryAccessMap() { return memory_access_map_; };
164 ImageAccessMap &GetImageAccessMap() { return image_access_map_; };
165 const MemoryAccessMap &GetMap() const { return memory_access_map_; };
166 const ImageAccessMap &GetImageAccessMap() const { return image_access_map_; };
167
168 void Reset() {
169 memory_access_map_.clear();
170 image_access_map_.clear();
171 }
172
173 ResourceAccessTracker() : memory_access_map_(), image_access_map_() {}
John Zulauf9cb530d2019-09-30 14:14:10 -0600174};
175
176class SyncValidator : public ValidationStateTracker, public SyncStageAccess {
177 public:
178 SyncValidator() { container_type = LayerObjectTypeSyncValidation; }
179 using StateTracker = ValidationStateTracker;
180
181 using StateTracker::AccessorTraitsTypes;
182 ResourceUsageTag tag = 0; // Find a better tagging scheme...
John Zulauf5c5e88d2019-12-26 11:22:02 -0700183 std::map<VkCommandBuffer, std::unique_ptr<ResourceAccessTracker>> cb_access_state;
184 ResourceAccessTracker *GetAccessTrackerImpl(VkCommandBuffer command_buffer, bool do_insert) {
John Zulauf9cb530d2019-09-30 14:14:10 -0600185 auto found_it = cb_access_state.find(command_buffer);
186 if (found_it == cb_access_state.end()) {
187 if (!do_insert) return nullptr;
188 // If we don't have one, make it.
John Zulauf5c5e88d2019-12-26 11:22:02 -0700189 std::unique_ptr<ResourceAccessTracker> tracker(new ResourceAccessTracker);
John Zulauf9cb530d2019-09-30 14:14:10 -0600190 auto insert_pair = cb_access_state.insert(std::make_pair(command_buffer, std::move(tracker)));
191 found_it = insert_pair.first;
192 }
193 return found_it->second.get();
194 }
John Zulauf5c5e88d2019-12-26 11:22:02 -0700195 ResourceAccessTracker *GetAccessTracker(VkCommandBuffer command_buffer) {
John Zulauf9cb530d2019-09-30 14:14:10 -0600196 return GetAccessTrackerImpl(command_buffer, true); // true -> do_insert on not found
197 }
John Zulauf5c5e88d2019-12-26 11:22:02 -0700198 ResourceAccessTracker *GetAccessTrackerNoInsert(VkCommandBuffer command_buffer) {
John Zulauf9cb530d2019-09-30 14:14:10 -0600199 return GetAccessTrackerImpl(command_buffer, false); // false -> don't do_insert on not found
200 }
John Zulauf5c5e88d2019-12-26 11:22:02 -0700201 const ResourceAccessTracker *GetAccessTracker(VkCommandBuffer command_buffer) const {
John Zulauf9cb530d2019-09-30 14:14:10 -0600202 const auto found_it = cb_access_state.find(command_buffer);
203 if (found_it == cb_access_state.end()) {
204 return nullptr;
205 }
206 return found_it->second.get();
207 }
208
John Zulauf5c5e88d2019-12-26 11:22:02 -0700209 void ApplyGlobalBarriers(ResourceAccessTracker *tracker, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
John Zulauf9cb530d2019-09-30 14:14:10 -0600210 SyncStageAccessFlags src_stage_scope, SyncStageAccessFlags dst_stage_scope,
211 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700212 void ApplyBufferBarriers(ResourceAccessTracker *tracker, VkPipelineStageFlags src_stage_mask,
John Zulauf9cb530d2019-09-30 14:14:10 -0600213 SyncStageAccessFlags src_stage_scope, VkPipelineStageFlags dst_stage_mask,
214 SyncStageAccessFlags dst_stage_scope, uint32_t barrier_count, const VkBufferMemoryBarrier *barriers);
John Zulauf5c5e88d2019-12-26 11:22:02 -0700215 void ApplyImageBarriers(ResourceAccessTracker *tracker, VkPipelineStageFlags src_stage_mask,
216 SyncStageAccessFlags src_stage_scope, VkPipelineStageFlags dst_stage_mask,
217 SyncStageAccessFlags dst_stage_scope, uint32_t barrier_count, const VkImageMemoryBarrier *barriers);
John Zulauf9cb530d2019-09-30 14:14:10 -0600218
219 void ResetCommandBuffer(VkCommandBuffer command_buffer);
220
221 void PostCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
222 const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, VkResult result);
223
224 bool PreCallValidateCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount,
225 const VkBufferCopy *pRegions) const;
226
227 void PreCallRecordCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount,
228 const VkBufferCopy *pRegions);
229
John Zulauf5c5e88d2019-12-26 11:22:02 -0700230 bool PreCallValidateCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
231 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
232 const VkImageCopy *pRegions) const;
233
234 void PreCallRecordCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
235 VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions);
236
John Zulauf9cb530d2019-09-30 14:14:10 -0600237 bool PreCallValidateCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
238 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
239 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
240 uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
241 uint32_t imageMemoryBarrierCount,
242 const VkImageMemoryBarrier *pImageMemoryBarriers) const;
243
244 void PreCallRecordCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
245 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
246 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
247 uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
248 uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers);
249};