blob: e9b1796fcfef3e35696e2222fab9f7c8a7a70cce [file] [log] [blame]
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001/* Copyright (c) 2015-2017 The Khronos Group Inc.
2 * Copyright (c) 2015-2017 Valve Corporation
3 * Copyright (c) 2015-2017 LunarG, Inc.
4 * Copyright (C) 2015-2017 Google Inc.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * Author: Mark Lobodzinski <mark@lunarg.com>
19 * Author: Jon Ashburn <jon@lunarg.com>
20 * Author: Tobin Ehlis <tobin@lunarg.com>
21 */
22
John Zulauf0fe5bfe2018-05-23 09:36:00 -060023#define VALIDATION_ERROR_MAP_IMPL
24
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -060025#include "object_tracker.h"
26
27namespace object_tracker {
28
29std::unordered_map<void *, layer_data *> layer_data_map;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -060030std::mutex global_lock;
31uint64_t object_track_index = 0;
32uint32_t loader_layer_if_version = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
33
34void InitObjectTracker(layer_data *my_data, const VkAllocationCallbacks *pAllocator) {
Mark Young6ba8abe2017-11-09 10:37:04 -070035 layer_debug_report_actions(my_data->report_data, my_data->logging_callback, pAllocator, "lunarg_object_tracker");
36 layer_debug_messenger_actions(my_data->report_data, my_data->logging_messenger, pAllocator, "lunarg_object_tracker");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -060037}
38
39// Add new queue to head of global queue list
40void AddQueueInfo(VkDevice device, uint32_t queue_node_index, VkQueue queue) {
41 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
42 auto queueItem = device_data->queue_info_map.find(queue);
43 if (queueItem == device_data->queue_info_map.end()) {
Mark Lobodzinskiefc64392017-07-18 13:15:47 -060044 ObjTrackQueueInfo *p_queue_info = new ObjTrackQueueInfo;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -060045 if (p_queue_info != NULL) {
Mark Lobodzinskiefc64392017-07-18 13:15:47 -060046 memset(p_queue_info, 0, sizeof(ObjTrackQueueInfo));
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -060047 p_queue_info->queue = queue;
48 p_queue_info->queue_node_index = queue_node_index;
49 device_data->queue_info_map[queue] = p_queue_info;
50 } else {
51 log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
Mark Lobodzinski88529492018-04-01 10:38:15 -060052 HandleToUint64(queue), OBJTRACK_INTERNAL_ERROR,
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -060053 "ERROR: VK_ERROR_OUT_OF_HOST_MEMORY -- could not allocate memory for Queue Information");
54 }
55 }
56}
57
58// Destroy memRef lists and free all memory
59void DestroyQueueDataStructures(VkDevice device) {
60 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
61
62 for (auto queue_item : device_data->queue_info_map) {
63 delete queue_item.second;
64 }
65 device_data->queue_info_map.clear();
66
67 // Destroy the items in the queue map
68 auto queue = device_data->object_map[kVulkanObjectTypeQueue].begin();
69 while (queue != device_data->object_map[kVulkanObjectTypeQueue].end()) {
70 uint32_t obj_index = queue->second->object_type;
71 assert(device_data->num_total_objects > 0);
72 device_data->num_total_objects--;
73 assert(device_data->num_objects[obj_index] > 0);
74 device_data->num_objects[obj_index]--;
75 log_msg(device_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
Mark Lobodzinski88529492018-04-01 10:38:15 -060076 queue->second->handle, OBJTRACK_NONE,
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -060077 "OBJ_STAT Destroy Queue obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " Queue objs).",
78 queue->second->handle, device_data->num_total_objects, device_data->num_objects[obj_index]);
79 delete queue->second;
80 queue = device_data->object_map[kVulkanObjectTypeQueue].erase(queue);
81 }
82}
83
84// Check Queue type flags for selected queue operations
85void ValidateQueueFlags(VkQueue queue, const char *function) {
86 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
87 auto queue_item = device_data->queue_info_map.find(queue);
88 if (queue_item != device_data->queue_info_map.end()) {
Mark Lobodzinskiefc64392017-07-18 13:15:47 -060089 ObjTrackQueueInfo *pQueueInfo = queue_item->second;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -060090 if (pQueueInfo != NULL) {
91 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(device_data->physical_device), layer_data_map);
92 if ((instance_data->queue_family_properties[pQueueInfo->queue_node_index].queueFlags & VK_QUEUE_SPARSE_BINDING_BIT) ==
93 0) {
94 log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
Dave Houlton57ae22f2018-05-18 16:20:52 -060095 HandleToUint64(queue), "VUID-vkQueueBindSparse-queuetype",
Mark Lobodzinski487a0d12018-03-30 10:09:03 -060096 "Attempting %s on a non-memory-management capable queue -- VK_QUEUE_SPARSE_BINDING_BIT not set.", function);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -060097 }
98 }
99 }
100}
101
Mark Lobodzinski9bd81192017-11-13 09:38:23 -0700102// Look for this device object in any of the instance child devices lists.
103// NOTE: This is of dubious value. In most circumstances Vulkan will die a flaming death if a dispatchable object is invalid.
104// However, if this layer is loaded first and GetProcAddress is used to make API calls, it will detect bad DOs.
Dave Houlton379f1422018-05-23 12:47:07 -0600105bool ValidateDeviceObject(uint64_t device_handle, const std::string &invalid_handle_code, const std::string &wrong_device_code) {
Mark Lobodzinski9bd81192017-11-13 09:38:23 -0700106 VkInstance last_instance = nullptr;
107 for (auto layer_data : layer_data_map) {
108 for (auto object : layer_data.second->object_map[kVulkanObjectTypeDevice]) {
109 // Grab last instance to use for possible error message
110 last_instance = layer_data.second->instance;
111 if (object.second->handle == device_handle) return false;
112 }
113 }
114
115 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(last_instance), layer_data_map);
116 return log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device_handle,
Mark Lobodzinski88529492018-04-01 10:38:15 -0600117 invalid_handle_code, "Invalid Device Object 0x%" PRIxLEAST64 ".", device_handle);
Mark Lobodzinski9bd81192017-11-13 09:38:23 -0700118}
119
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600120void AllocateCommandBuffer(VkDevice device, const VkCommandPool command_pool, const VkCommandBuffer command_buffer,
121 VkCommandBufferLevel level) {
122 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
123
124 log_msg(device_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Mark Lobodzinski88529492018-04-01 10:38:15 -0600125 HandleToUint64(command_buffer), OBJTRACK_NONE, "OBJ[0x%" PRIxLEAST64 "] : CREATE %s object 0x%" PRIxLEAST64,
Mark Lobodzinskib1fd9d12018-03-30 14:26:00 -0600126 object_track_index++, "VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT", HandleToUint64(command_buffer));
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600127
Mark Lobodzinskiefc64392017-07-18 13:15:47 -0600128 ObjTrackState *pNewObjNode = new ObjTrackState;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600129 pNewObjNode->object_type = kVulkanObjectTypeCommandBuffer;
130 pNewObjNode->handle = HandleToUint64(command_buffer);
131 pNewObjNode->parent_object = HandleToUint64(command_pool);
132 if (level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
133 pNewObjNode->status = OBJSTATUS_COMMAND_BUFFER_SECONDARY;
134 } else {
135 pNewObjNode->status = OBJSTATUS_NONE;
136 }
137 device_data->object_map[kVulkanObjectTypeCommandBuffer][HandleToUint64(command_buffer)] = pNewObjNode;
138 device_data->num_objects[kVulkanObjectTypeCommandBuffer]++;
139 device_data->num_total_objects++;
140}
141
142bool ValidateCommandBuffer(VkDevice device, VkCommandPool command_pool, VkCommandBuffer command_buffer) {
143 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
144 bool skip = false;
145 uint64_t object_handle = HandleToUint64(command_buffer);
146 if (device_data->object_map[kVulkanObjectTypeCommandBuffer].find(object_handle) !=
147 device_data->object_map[kVulkanObjectTypeCommandBuffer].end()) {
Mark Lobodzinskiefc64392017-07-18 13:15:47 -0600148 ObjTrackState *pNode = device_data->object_map[kVulkanObjectTypeCommandBuffer][HandleToUint64(command_buffer)];
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600149
150 if (pNode->parent_object != HandleToUint64(command_pool)) {
151 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Dave Houlton57ae22f2018-05-18 16:20:52 -0600152 object_handle, "VUID-vkFreeCommandBuffers-pCommandBuffers-parent",
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600153 "FreeCommandBuffers is attempting to free Command Buffer 0x%" PRIxLEAST64
Mark Lobodzinski487a0d12018-03-30 10:09:03 -0600154 " belonging to Command Pool 0x%" PRIxLEAST64 " from pool 0x%" PRIxLEAST64 ").",
155 HandleToUint64(command_buffer), pNode->parent_object, HandleToUint64(command_pool));
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600156 }
157 } else {
Mark Lobodzinski487a0d12018-03-30 10:09:03 -0600158 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Dave Houlton57ae22f2018-05-18 16:20:52 -0600159 object_handle, "VUID-vkFreeCommandBuffers-pCommandBuffers-00048", "Invalid %s Object 0x%" PRIxLEAST64 ".",
Mark Lobodzinski487a0d12018-03-30 10:09:03 -0600160 object_string[kVulkanObjectTypeCommandBuffer], object_handle);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600161 }
162 return skip;
163}
164
165void AllocateDescriptorSet(VkDevice device, VkDescriptorPool descriptor_pool, VkDescriptorSet descriptor_set) {
166 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
167
168 log_msg(device_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
Mark Lobodzinski88529492018-04-01 10:38:15 -0600169 HandleToUint64(descriptor_set), OBJTRACK_NONE, "OBJ[0x%" PRIxLEAST64 "] : CREATE %s object 0x%" PRIxLEAST64,
Mark Lobodzinskib1fd9d12018-03-30 14:26:00 -0600170 object_track_index++, "VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT", HandleToUint64(descriptor_set));
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600171
Mark Lobodzinskiefc64392017-07-18 13:15:47 -0600172 ObjTrackState *pNewObjNode = new ObjTrackState;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600173 pNewObjNode->object_type = kVulkanObjectTypeDescriptorSet;
174 pNewObjNode->status = OBJSTATUS_NONE;
175 pNewObjNode->handle = HandleToUint64(descriptor_set);
176 pNewObjNode->parent_object = HandleToUint64(descriptor_pool);
177 device_data->object_map[kVulkanObjectTypeDescriptorSet][HandleToUint64(descriptor_set)] = pNewObjNode;
178 device_data->num_objects[kVulkanObjectTypeDescriptorSet]++;
179 device_data->num_total_objects++;
180}
181
182bool ValidateDescriptorSet(VkDevice device, VkDescriptorPool descriptor_pool, VkDescriptorSet descriptor_set) {
183 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
184 bool skip = false;
185 uint64_t object_handle = HandleToUint64(descriptor_set);
186 auto dsItem = device_data->object_map[kVulkanObjectTypeDescriptorSet].find(object_handle);
187 if (dsItem != device_data->object_map[kVulkanObjectTypeDescriptorSet].end()) {
Mark Lobodzinskiefc64392017-07-18 13:15:47 -0600188 ObjTrackState *pNode = dsItem->second;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600189
190 if (pNode->parent_object != HandleToUint64(descriptor_pool)) {
191 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
Dave Houlton57ae22f2018-05-18 16:20:52 -0600192 object_handle, "VUID-vkFreeDescriptorSets-pDescriptorSets-parent",
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600193 "FreeDescriptorSets is attempting to free descriptorSet 0x%" PRIxLEAST64
Mark Lobodzinski487a0d12018-03-30 10:09:03 -0600194 " belonging to Descriptor Pool 0x%" PRIxLEAST64 " from pool 0x%" PRIxLEAST64 ").",
195 HandleToUint64(descriptor_set), pNode->parent_object, HandleToUint64(descriptor_pool));
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600196 }
197 } else {
Mark Lobodzinski487a0d12018-03-30 10:09:03 -0600198 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
Dave Houlton57ae22f2018-05-18 16:20:52 -0600199 object_handle, "VUID-vkFreeDescriptorSets-pDescriptorSets-00310", "Invalid %s Object 0x%" PRIxLEAST64 ".",
Mark Lobodzinski487a0d12018-03-30 10:09:03 -0600200 object_string[kVulkanObjectTypeDescriptorSet], object_handle);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600201 }
202 return skip;
203}
204
Dave Houltona9df0ce2018-02-07 10:51:23 -0700205template <typename DispObj>
Chris Forbes2c600e92017-10-20 11:13:20 -0700206static bool ValidateDescriptorWrite(DispObj disp, VkWriteDescriptorSet const *desc, bool isPush) {
207 bool skip = false;
208
209 if (!isPush && desc->dstSet) {
Dave Houlton57ae22f2018-05-18 16:20:52 -0600210 skip |= ValidateObject(disp, desc->dstSet, kVulkanObjectTypeDescriptorSet, false, "VUID-VkWriteDescriptorSet-dstSet-00320",
211 "VUID-VkWriteDescriptorSet-commonparent");
Chris Forbes2c600e92017-10-20 11:13:20 -0700212 }
213
214 if ((desc->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) ||
215 (desc->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)) {
216 for (uint32_t idx2 = 0; idx2 < desc->descriptorCount; ++idx2) {
Dave Houltona9df0ce2018-02-07 10:51:23 -0700217 skip |= ValidateObject(disp, desc->pTexelBufferView[idx2], kVulkanObjectTypeBufferView, false,
Dave Houlton57ae22f2018-05-18 16:20:52 -0600218 "VUID-VkWriteDescriptorSet-descriptorType-00323", "VUID-VkWriteDescriptorSet-commonparent");
Chris Forbes2c600e92017-10-20 11:13:20 -0700219 }
220 }
221
222 if ((desc->descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) ||
Dave Houltona9df0ce2018-02-07 10:51:23 -0700223 (desc->descriptorType == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE) || (desc->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) ||
Chris Forbes2c600e92017-10-20 11:13:20 -0700224 (desc->descriptorType == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT)) {
225 for (uint32_t idx3 = 0; idx3 < desc->descriptorCount; ++idx3) {
Dave Houltona9df0ce2018-02-07 10:51:23 -0700226 skip |= ValidateObject(disp, desc->pImageInfo[idx3].imageView, kVulkanObjectTypeImageView, false,
Dave Houlton57ae22f2018-05-18 16:20:52 -0600227 "VUID-VkWriteDescriptorSet-descriptorType-00326", "VUID-VkDescriptorImageInfo-commonparent");
Chris Forbes2c600e92017-10-20 11:13:20 -0700228 }
229 }
230
231 if ((desc->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) ||
232 (desc->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER) ||
233 (desc->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ||
234 (desc->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
235 for (uint32_t idx4 = 0; idx4 < desc->descriptorCount; ++idx4) {
236 if (desc->pBufferInfo[idx4].buffer) {
Dave Houltona9df0ce2018-02-07 10:51:23 -0700237 skip |= ValidateObject(disp, desc->pBufferInfo[idx4].buffer, kVulkanObjectTypeBuffer, false,
Dave Houlton57ae22f2018-05-18 16:20:52 -0600238 "VUID-VkDescriptorBufferInfo-buffer-parameter", kVUIDUndefined);
Chris Forbes2c600e92017-10-20 11:13:20 -0700239 }
240 }
241 }
242
243 return skip;
244}
245
Tony Barbour2fd0c2c2017-08-08 12:51:33 -0600246VKAPI_ATTR void VKAPI_CALL CmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
247 VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount,
248 const VkWriteDescriptorSet *pDescriptorWrites) {
249 bool skip = false;
250 {
251 std::lock_guard<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -0600252 skip |=
253 ValidateObject(commandBuffer, commandBuffer, kVulkanObjectTypeCommandBuffer, false,
254 "VUID-vkCmdPushDescriptorSetKHR-commandBuffer-parameter", "VUID-vkCmdPushDescriptorSetKHR-commonparent");
255 skip |= ValidateObject(commandBuffer, layout, kVulkanObjectTypePipelineLayout, false,
256 "VUID-vkCmdPushDescriptorSetKHR-layout-parameter", "VUID-vkCmdPushDescriptorSetKHR-commonparent");
Tony Barbour2fd0c2c2017-08-08 12:51:33 -0600257 if (pDescriptorWrites) {
258 for (uint32_t index0 = 0; index0 < descriptorWriteCount; ++index0) {
Chris Forbesa94b60b2017-10-20 11:28:02 -0700259 skip |= ValidateDescriptorWrite(commandBuffer, &pDescriptorWrites[index0], true);
Tony Barbour2fd0c2c2017-08-08 12:51:33 -0600260 }
261 }
262 }
263 if (skip) return;
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600264 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
265 device_data->device_dispatch_table.CmdPushDescriptorSetKHR(commandBuffer, pipelineBindPoint, layout, set, descriptorWriteCount,
266 pDescriptorWrites);
Tony Barbour2fd0c2c2017-08-08 12:51:33 -0600267}
268
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600269void CreateQueue(VkDevice device, VkQueue vkObj) {
270 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
271
272 log_msg(device_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
Mark Lobodzinski88529492018-04-01 10:38:15 -0600273 HandleToUint64(vkObj), OBJTRACK_NONE, "OBJ[0x%" PRIxLEAST64 "] : CREATE %s object 0x%" PRIxLEAST64,
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600274 object_track_index++, "VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT", HandleToUint64(vkObj));
275
Mark Lobodzinskiefc64392017-07-18 13:15:47 -0600276 ObjTrackState *p_obj_node = NULL;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600277 auto queue_item = device_data->object_map[kVulkanObjectTypeQueue].find(HandleToUint64(vkObj));
278 if (queue_item == device_data->object_map[kVulkanObjectTypeQueue].end()) {
Mark Lobodzinskiefc64392017-07-18 13:15:47 -0600279 p_obj_node = new ObjTrackState;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600280 device_data->object_map[kVulkanObjectTypeQueue][HandleToUint64(vkObj)] = p_obj_node;
281 device_data->num_objects[kVulkanObjectTypeQueue]++;
282 device_data->num_total_objects++;
283 } else {
284 p_obj_node = queue_item->second;
285 }
286 p_obj_node->object_type = kVulkanObjectTypeQueue;
287 p_obj_node->status = OBJSTATUS_NONE;
288 p_obj_node->handle = HandleToUint64(vkObj);
289}
290
291void CreateSwapchainImageObject(VkDevice dispatchable_object, VkImage swapchain_image, VkSwapchainKHR swapchain) {
292 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(dispatchable_object), layer_data_map);
293 log_msg(device_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
Mark Lobodzinski88529492018-04-01 10:38:15 -0600294 HandleToUint64(swapchain_image), OBJTRACK_NONE, "OBJ[0x%" PRIxLEAST64 "] : CREATE %s object 0x%" PRIxLEAST64,
Mark Lobodzinskib1fd9d12018-03-30 14:26:00 -0600295 object_track_index++, "SwapchainImage", HandleToUint64(swapchain_image));
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600296
Mark Lobodzinskiefc64392017-07-18 13:15:47 -0600297 ObjTrackState *pNewObjNode = new ObjTrackState;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600298 pNewObjNode->object_type = kVulkanObjectTypeImage;
299 pNewObjNode->status = OBJSTATUS_NONE;
300 pNewObjNode->handle = HandleToUint64(swapchain_image);
301 pNewObjNode->parent_object = HandleToUint64(swapchain);
302 device_data->swapchainImageMap[HandleToUint64(swapchain_image)] = pNewObjNode;
303}
304
Dave Houlton379f1422018-05-23 12:47:07 -0600305void DeviceReportUndestroyedObjects(VkDevice device, VulkanObjectType object_type, const std::string &error_code) {
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600306 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
Gabríel Arthúr Péturssonfdcb5402018-03-20 21:52:06 +0000307 for (const auto &item : device_data->object_map[object_type]) {
308 const ObjTrackState *object_info = item.second;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600309 log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[object_type], object_info->handle,
Mark Lobodzinski88529492018-04-01 10:38:15 -0600310 error_code, "OBJ ERROR : For device 0x%" PRIxLEAST64 ", %s object 0x%" PRIxLEAST64 " has not been destroyed.",
Mark Lobodzinski487a0d12018-03-30 10:09:03 -0600311 HandleToUint64(device), object_string[object_type], object_info->handle);
Gabríel Arthúr Péturssonfdcb5402018-03-20 21:52:06 +0000312 }
313}
314
315void DeviceDestroyUndestroyedObjects(VkDevice device, VulkanObjectType object_type) {
316 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
317 while (!device_data->object_map[object_type].empty()) {
318 auto item = device_data->object_map[object_type].begin();
319
320 ObjTrackState *object_info = item->second;
321 DestroyObjectSilently(device, object_info->handle, object_type);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600322 }
323}
324
325VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
326 std::unique_lock<std::mutex> lock(global_lock);
327
328 dispatch_key key = get_dispatch_key(instance);
329 layer_data *instance_data = GetLayerDataPtr(key, layer_data_map);
330
331 // Enable the temporary callback(s) here to catch cleanup issues:
Mark Young6ba8abe2017-11-09 10:37:04 -0700332 if (instance_data->num_tmp_debug_messengers > 0) {
333 layer_enable_tmp_debug_messengers(instance_data->report_data, instance_data->num_tmp_debug_messengers,
334 instance_data->tmp_messenger_create_infos, instance_data->tmp_debug_messengers);
335 }
336 if (instance_data->num_tmp_report_callbacks > 0) {
337 layer_enable_tmp_report_callbacks(instance_data->report_data, instance_data->num_tmp_report_callbacks,
338 instance_data->tmp_report_create_infos, instance_data->tmp_report_callbacks);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600339 }
340
341 // TODO: The instance handle can not be validated here. The loader will likely have to validate it.
Dave Houlton379f1422018-05-23 12:47:07 -0600342 ValidateObject(instance, instance, kVulkanObjectTypeInstance, true, "VUID-vkDestroyInstance-instance-parameter",
343 kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600344
345 // Destroy physical devices
346 for (auto iit = instance_data->object_map[kVulkanObjectTypePhysicalDevice].begin();
347 iit != instance_data->object_map[kVulkanObjectTypePhysicalDevice].end();) {
Mark Lobodzinskiefc64392017-07-18 13:15:47 -0600348 ObjTrackState *pNode = iit->second;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600349 VkPhysicalDevice physical_device = reinterpret_cast<VkPhysicalDevice>(pNode->handle);
Mark Lobodzinski9bd81192017-11-13 09:38:23 -0700350
Dave Houlton379f1422018-05-23 12:47:07 -0600351 DestroyObject(instance, physical_device, kVulkanObjectTypePhysicalDevice, nullptr, kVUIDUndefined, kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600352 iit = instance_data->object_map[kVulkanObjectTypePhysicalDevice].begin();
353 }
354
Mark Lobodzinski9bd81192017-11-13 09:38:23 -0700355 // Destroy child devices
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600356 for (auto iit = instance_data->object_map[kVulkanObjectTypeDevice].begin();
357 iit != instance_data->object_map[kVulkanObjectTypeDevice].end();) {
Mark Lobodzinskiefc64392017-07-18 13:15:47 -0600358 ObjTrackState *pNode = iit->second;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600359
360 VkDevice device = reinterpret_cast<VkDevice>(pNode->handle);
361 VkDebugReportObjectTypeEXT debug_object_type = get_debug_report_enum[pNode->object_type];
362
Mark Lobodzinskib1fd9d12018-03-30 14:26:00 -0600363 log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, debug_object_type, pNode->handle, OBJTRACK_OBJECT_LEAK,
Mark Lobodzinski88529492018-04-01 10:38:15 -0600364 "OBJ ERROR : %s object 0x%" PRIxLEAST64 " has not been destroyed.",
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600365 string_VkDebugReportObjectTypeEXT(debug_object_type), pNode->handle);
366
Mark Lobodzinski9bd81192017-11-13 09:38:23 -0700367 // Report any remaining objects in LL
Dave Houlton57ae22f2018-05-18 16:20:52 -0600368 ReportUndestroyedObjects(device, "VUID-vkDestroyInstance-instance-00629");
Gabríel Arthúr Péturssonfdcb5402018-03-20 21:52:06 +0000369 DestroyUndestroyedObjects(device);
Mark Lobodzinski9bd81192017-11-13 09:38:23 -0700370
Dave Houlton379f1422018-05-23 12:47:07 -0600371 DestroyObject(instance, device, kVulkanObjectTypeDevice, pAllocator, "VUID-vkDestroyInstance-instance-00630",
372 "VUID-vkDestroyInstance-instance-00631");
Mark Lobodzinski9bd81192017-11-13 09:38:23 -0700373 iit = instance_data->object_map[kVulkanObjectTypeDevice].begin();
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600374 }
Mark Lobodzinski9bd81192017-11-13 09:38:23 -0700375
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600376 instance_data->object_map[kVulkanObjectTypeDevice].clear();
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600377 instance_data->instance_dispatch_table.DestroyInstance(instance, pAllocator);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600378
379 // Disable and cleanup the temporary callback(s):
Mark Young6ba8abe2017-11-09 10:37:04 -0700380 layer_disable_tmp_debug_messengers(instance_data->report_data, instance_data->num_tmp_debug_messengers,
381 instance_data->tmp_debug_messengers);
382 layer_disable_tmp_report_callbacks(instance_data->report_data, instance_data->num_tmp_report_callbacks,
383 instance_data->tmp_report_callbacks);
384 if (instance_data->num_tmp_debug_messengers > 0) {
385 layer_free_tmp_debug_messengers(instance_data->tmp_messenger_create_infos, instance_data->tmp_debug_messengers);
386 instance_data->num_tmp_debug_messengers = 0;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600387 }
Mark Young6ba8abe2017-11-09 10:37:04 -0700388 if (instance_data->num_tmp_report_callbacks > 0) {
389 layer_free_tmp_report_callbacks(instance_data->tmp_report_create_infos, instance_data->tmp_report_callbacks);
390 instance_data->num_tmp_report_callbacks = 0;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600391 }
392
393 // Clean up logging callback, if any
Mark Young6ba8abe2017-11-09 10:37:04 -0700394 while (instance_data->logging_messenger.size() > 0) {
395 VkDebugUtilsMessengerEXT messenger = instance_data->logging_messenger.back();
396 layer_destroy_messenger_callback(instance_data->report_data, messenger, pAllocator);
397 instance_data->logging_messenger.pop_back();
398 }
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600399 while (instance_data->logging_callback.size() > 0) {
400 VkDebugReportCallbackEXT callback = instance_data->logging_callback.back();
Mark Young6ba8abe2017-11-09 10:37:04 -0700401 layer_destroy_report_callback(instance_data->report_data, callback, pAllocator);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600402 instance_data->logging_callback.pop_back();
403 }
404
Dave Houlton379f1422018-05-23 12:47:07 -0600405 DestroyObject(instance, instance, kVulkanObjectTypeInstance, pAllocator, "VUID-vkDestroyInstance-instance-00630",
406 "VUID-vkDestroyInstance-instance-00631");
Gabríel Arthúr Pétursson3de74ca2018-03-18 01:50:54 +0000407
Mark Young6ba8abe2017-11-09 10:37:04 -0700408 layer_debug_utils_destroy_instance(instance_data->report_data);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600409 FreeLayerDataPtr(key, layer_data_map);
410
411 lock.unlock();
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600412}
413
414VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
415 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski9bd81192017-11-13 09:38:23 -0700416 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
Dave Houlton57ae22f2018-05-18 16:20:52 -0600417 ValidateObject(device, device, kVulkanObjectTypeDevice, true, "VUID-vkDestroyDevice-device-parameter", kVUIDUndefined);
418 DestroyObject(device_data->instance, device, kVulkanObjectTypeDevice, pAllocator, "VUID-vkDestroyDevice-device-00379",
419 "VUID-vkDestroyDevice-device-00380");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600420
421 // Report any remaining objects associated with this VkDevice object in LL
Dave Houlton57ae22f2018-05-18 16:20:52 -0600422 ReportUndestroyedObjects(device, "VUID-vkDestroyDevice-device-00378");
Gabríel Arthúr Péturssonfdcb5402018-03-20 21:52:06 +0000423 DestroyUndestroyedObjects(device);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600424
425 // Clean up Queue's MemRef Linked Lists
426 DestroyQueueDataStructures(device);
427
428 lock.unlock();
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600429 dispatch_key key = get_dispatch_key(device);
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600430 device_data->device_dispatch_table.DestroyDevice(device, pAllocator);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600431 FreeLayerDataPtr(key, layer_data_map);
432}
433
Mark Lobodzinski439645a2017-07-19 15:18:15 -0600434VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) {
435 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton57ae22f2018-05-18 16:20:52 -0600436 ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkGetDeviceQueue-device-parameter", kVUIDUndefined);
Mark Lobodzinski439645a2017-07-19 15:18:15 -0600437 lock.unlock();
438
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600439 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
440 device_data->device_dispatch_table.GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
Mark Lobodzinski439645a2017-07-19 15:18:15 -0600441
442 lock.lock();
443 CreateQueue(device, *pQueue);
444 AddQueueInfo(device, queueFamilyIndex, *pQueue);
445}
446
Yiwei Zhang991d88d2018-02-14 14:39:46 -0800447VKAPI_ATTR void VKAPI_CALL GetDeviceQueue2(VkDevice device, const VkDeviceQueueInfo2 *pQueueInfo, VkQueue *pQueue) {
448 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton57ae22f2018-05-18 16:20:52 -0600449 ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkGetDeviceQueue2-device-parameter", kVUIDUndefined);
Yiwei Zhang991d88d2018-02-14 14:39:46 -0800450 lock.unlock();
451
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600452 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
453 device_data->device_dispatch_table.GetDeviceQueue2(device, pQueueInfo, pQueue);
Yiwei Zhang991d88d2018-02-14 14:39:46 -0800454
455 lock.lock();
456 if (*pQueue != VK_NULL_HANDLE) {
457 CreateQueue(device, *pQueue);
458 AddQueueInfo(device, pQueueInfo->queueFamilyIndex, *pQueue);
459 }
460}
461
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600462VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
463 const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
464 const VkCopyDescriptorSet *pDescriptorCopies) {
465 bool skip = false;
466 {
467 std::lock_guard<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -0600468 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkUpdateDescriptorSets-device-parameter",
469 kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600470 if (pDescriptorCopies) {
471 for (uint32_t idx0 = 0; idx0 < descriptorCopyCount; ++idx0) {
472 if (pDescriptorCopies[idx0].dstSet) {
473 skip |= ValidateObject(device, pDescriptorCopies[idx0].dstSet, kVulkanObjectTypeDescriptorSet, false,
Dave Houlton57ae22f2018-05-18 16:20:52 -0600474 "VUID-VkCopyDescriptorSet-dstSet-parameter", "VUID-VkCopyDescriptorSet-commonparent");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600475 }
476 if (pDescriptorCopies[idx0].srcSet) {
477 skip |= ValidateObject(device, pDescriptorCopies[idx0].srcSet, kVulkanObjectTypeDescriptorSet, false,
Dave Houlton57ae22f2018-05-18 16:20:52 -0600478 "VUID-VkCopyDescriptorSet-srcSet-parameter", "VUID-VkCopyDescriptorSet-commonparent");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600479 }
480 }
481 }
482 if (pDescriptorWrites) {
483 for (uint32_t idx1 = 0; idx1 < descriptorWriteCount; ++idx1) {
Chris Forbes2c600e92017-10-20 11:13:20 -0700484 skip |= ValidateDescriptorWrite(device, &pDescriptorWrites[idx1], false);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600485 }
486 }
487 }
488 if (skip) {
489 return;
490 }
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600491 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
492 device_data->device_dispatch_table.UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
493 pDescriptorCopies);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600494}
495
Mark Lobodzinski2d26c5f2017-07-19 12:37:04 -0600496VKAPI_ATTR VkResult VKAPI_CALL CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount,
497 const VkComputePipelineCreateInfo *pCreateInfos,
498 const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
499 bool skip = VK_FALSE;
500 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -0600501 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkCreateComputePipelines-device-parameter",
502 kVUIDUndefined);
Mark Lobodzinski2d26c5f2017-07-19 12:37:04 -0600503 if (pCreateInfos) {
504 for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) {
505 if (pCreateInfos[idx0].basePipelineHandle) {
Dave Houlton379f1422018-05-23 12:47:07 -0600506 skip |=
507 ValidateObject(device, pCreateInfos[idx0].basePipelineHandle, kVulkanObjectTypePipeline, true,
508 "VUID-VkComputePipelineCreateInfo-flags-00697", "VUID-VkComputePipelineCreateInfo-commonparent");
Mark Lobodzinski2d26c5f2017-07-19 12:37:04 -0600509 }
510 if (pCreateInfos[idx0].layout) {
511 skip |= ValidateObject(device, pCreateInfos[idx0].layout, kVulkanObjectTypePipelineLayout, false,
Dave Houlton379f1422018-05-23 12:47:07 -0600512 "VUID-VkComputePipelineCreateInfo-layout-parameter",
513 "VUID-VkComputePipelineCreateInfo-commonparent");
Mark Lobodzinski2d26c5f2017-07-19 12:37:04 -0600514 }
515 if (pCreateInfos[idx0].stage.module) {
516 skip |= ValidateObject(device, pCreateInfos[idx0].stage.module, kVulkanObjectTypeShaderModule, false,
Dave Houlton57ae22f2018-05-18 16:20:52 -0600517 "VUID-VkPipelineShaderStageCreateInfo-module-parameter", kVUIDUndefined);
Mark Lobodzinski2d26c5f2017-07-19 12:37:04 -0600518 }
519 }
520 }
521 if (pipelineCache) {
Dave Houlton379f1422018-05-23 12:47:07 -0600522 skip |= ValidateObject(device, pipelineCache, kVulkanObjectTypePipelineCache, true,
523 "VUID-vkCreateComputePipelines-pipelineCache-parameter",
Dave Houlton57ae22f2018-05-18 16:20:52 -0600524 "VUID-vkCreateComputePipelines-pipelineCache-parent");
Mark Lobodzinski2d26c5f2017-07-19 12:37:04 -0600525 }
526 lock.unlock();
527 if (skip) {
528 for (uint32_t i = 0; i < createInfoCount; i++) {
529 pPipelines[i] = VK_NULL_HANDLE;
530 }
531 return VK_ERROR_VALIDATION_FAILED_EXT;
532 }
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600533 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
534 VkResult result = device_data->device_dispatch_table.CreateComputePipelines(device, pipelineCache, createInfoCount,
535 pCreateInfos, pAllocator, pPipelines);
536
Mark Lobodzinski2d26c5f2017-07-19 12:37:04 -0600537 lock.lock();
538 for (uint32_t idx1 = 0; idx1 < createInfoCount; ++idx1) {
539 if (pPipelines[idx1] != VK_NULL_HANDLE) {
540 CreateObject(device, pPipelines[idx1], kVulkanObjectTypePipeline, pAllocator);
541 }
542 }
543 lock.unlock();
544 return result;
545}
546
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600547VKAPI_ATTR VkResult VKAPI_CALL ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
548 VkDescriptorPoolResetFlags flags) {
549 bool skip = false;
550 std::unique_lock<std::mutex> lock(global_lock);
551 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
Dave Houlton379f1422018-05-23 12:47:07 -0600552 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkResetDescriptorPool-device-parameter",
553 kVUIDUndefined);
554 skip |=
555 ValidateObject(device, descriptorPool, kVulkanObjectTypeDescriptorPool, false,
556 "VUID-vkResetDescriptorPool-descriptorPool-parameter", "VUID-vkResetDescriptorPool-descriptorPool-parent");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600557 if (skip) {
558 return VK_ERROR_VALIDATION_FAILED_EXT;
559 }
560 // A DescriptorPool's descriptor sets are implicitly deleted when the pool is reset.
561 // Remove this pool's descriptor sets from our descriptorSet map.
562 auto itr = device_data->object_map[kVulkanObjectTypeDescriptorSet].begin();
563 while (itr != device_data->object_map[kVulkanObjectTypeDescriptorSet].end()) {
Mark Lobodzinskiefc64392017-07-18 13:15:47 -0600564 ObjTrackState *pNode = (*itr).second;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600565 auto del_itr = itr++;
566 if (pNode->parent_object == HandleToUint64(descriptorPool)) {
Dave Houlton379f1422018-05-23 12:47:07 -0600567 DestroyObject(device, (VkDescriptorSet)((*del_itr).first), kVulkanObjectTypeDescriptorSet, nullptr, kVUIDUndefined,
568 kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600569 }
570 }
571 lock.unlock();
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600572 VkResult result = device_data->device_dispatch_table.ResetDescriptorPool(device, descriptorPool, flags);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600573 return result;
574}
575
576VKAPI_ATTR VkResult VKAPI_CALL BeginCommandBuffer(VkCommandBuffer command_buffer, const VkCommandBufferBeginInfo *begin_info) {
577 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(command_buffer), layer_data_map);
578 bool skip = false;
579 {
580 std::lock_guard<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -0600581 skip |= ValidateObject(command_buffer, command_buffer, kVulkanObjectTypeCommandBuffer, false,
582 "VUID-vkBeginCommandBuffer-commandBuffer-parameter", kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600583 if (begin_info) {
Mark Lobodzinskiefc64392017-07-18 13:15:47 -0600584 ObjTrackState *pNode = device_data->object_map[kVulkanObjectTypeCommandBuffer][HandleToUint64(command_buffer)];
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600585 if ((begin_info->pInheritanceInfo) && (pNode->status & OBJSTATUS_COMMAND_BUFFER_SECONDARY) &&
586 (begin_info->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
Dave Houlton379f1422018-05-23 12:47:07 -0600587 skip |=
588 ValidateObject(command_buffer, begin_info->pInheritanceInfo->framebuffer, kVulkanObjectTypeFramebuffer, true,
589 "VUID-VkCommandBufferBeginInfo-flags-00055", "VUID-VkCommandBufferInheritanceInfo-commonparent");
590 skip |=
591 ValidateObject(command_buffer, begin_info->pInheritanceInfo->renderPass, kVulkanObjectTypeRenderPass, false,
592 "VUID-VkCommandBufferBeginInfo-flags-00053", "VUID-VkCommandBufferInheritanceInfo-commonparent");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600593 }
594 }
595 }
596 if (skip) {
597 return VK_ERROR_VALIDATION_FAILED_EXT;
598 }
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600599 VkResult result = device_data->device_dispatch_table.BeginCommandBuffer(command_buffer, begin_info);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600600 return result;
601}
602
603VKAPI_ATTR VkResult VKAPI_CALL CreateDebugReportCallbackEXT(VkInstance instance,
604 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
605 const VkAllocationCallbacks *pAllocator,
606 VkDebugReportCallbackEXT *pCallback) {
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600607 auto instance_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
608 VkResult result =
609 instance_data->instance_dispatch_table.CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pCallback);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600610 if (VK_SUCCESS == result) {
Mark Young6ba8abe2017-11-09 10:37:04 -0700611 result = layer_create_report_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pCallback);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600612 CreateObject(instance, *pCallback, kVulkanObjectTypeDebugReportCallbackEXT, pAllocator);
613 }
614 return result;
615}
616
617VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT msgCallback,
618 const VkAllocationCallbacks *pAllocator) {
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600619 auto instance_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
620 instance_data->instance_dispatch_table.DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
Mark Young6ba8abe2017-11-09 10:37:04 -0700621 layer_destroy_report_callback(instance_data->report_data, msgCallback, pAllocator);
Dave Houlton379f1422018-05-23 12:47:07 -0600622 DestroyObject(instance, msgCallback, kVulkanObjectTypeDebugReportCallbackEXT, pAllocator,
623 "VUID-vkDestroyDebugReportCallbackEXT-instance-01242", "VUID-vkDestroyDebugReportCallbackEXT-instance-01243");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600624}
625
626VKAPI_ATTR void VKAPI_CALL DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags,
627 VkDebugReportObjectTypeEXT objType, uint64_t object, size_t location,
628 int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600629 auto instance_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
630 instance_data->instance_dispatch_table.DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix,
631 pMsg);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600632}
633
Mark Young6ba8abe2017-11-09 10:37:04 -0700634// VK_EXT_debug_utils commands
635VKAPI_ATTR VkResult VKAPI_CALL SetDebugUtilsObjectNameEXT(VkDevice device, const VkDebugUtilsObjectNameInfoEXT *pNameInfo) {
636 bool skip = VK_FALSE;
637 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton57ae22f2018-05-18 16:20:52 -0600638 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, kVUIDUndefined, kVUIDUndefined);
Mark Young6ba8abe2017-11-09 10:37:04 -0700639 lock.unlock();
640 if (skip) {
641 return VK_ERROR_VALIDATION_FAILED_EXT;
642 }
643 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
644 if (pNameInfo->pObjectName) {
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600645 lock.lock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700646 dev_data->report_data->debugUtilsObjectNameMap->insert(
647 std::make_pair<uint64_t, std::string>((uint64_t &&) pNameInfo->objectHandle, pNameInfo->pObjectName));
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600648 lock.unlock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700649 } else {
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600650 lock.lock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700651 dev_data->report_data->debugUtilsObjectNameMap->erase(pNameInfo->objectHandle);
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600652 lock.unlock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700653 }
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600654 VkResult result = dev_data->device_dispatch_table.SetDebugUtilsObjectNameEXT(device, pNameInfo);
Mark Young6ba8abe2017-11-09 10:37:04 -0700655 return result;
656}
657
658VKAPI_ATTR VkResult VKAPI_CALL SetDebugUtilsObjectTagEXT(VkDevice device, const VkDebugUtilsObjectTagInfoEXT *pTagInfo) {
659 bool skip = VK_FALSE;
660 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton57ae22f2018-05-18 16:20:52 -0600661 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, kVUIDUndefined, kVUIDUndefined);
Mark Young6ba8abe2017-11-09 10:37:04 -0700662 lock.unlock();
663 if (skip) {
664 return VK_ERROR_VALIDATION_FAILED_EXT;
665 }
666 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600667 VkResult result = dev_data->device_dispatch_table.SetDebugUtilsObjectTagEXT(device, pTagInfo);
Mark Young6ba8abe2017-11-09 10:37:04 -0700668 return result;
669}
670
671VKAPI_ATTR void VKAPI_CALL QueueBeginDebugUtilsLabelEXT(VkQueue queue, const VkDebugUtilsLabelEXT *pLabelInfo) {
672 bool skip = VK_FALSE;
673 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton57ae22f2018-05-18 16:20:52 -0600674 skip |= ValidateObject(queue, queue, kVulkanObjectTypeQueue, false, kVUIDUndefined, kVUIDUndefined);
Mark Young6ba8abe2017-11-09 10:37:04 -0700675 lock.unlock();
676 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
677 if (!skip) {
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600678 lock.lock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700679 BeginQueueDebugUtilsLabel(dev_data->report_data, queue, pLabelInfo);
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600680 lock.unlock();
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600681 dev_data->device_dispatch_table.QueueBeginDebugUtilsLabelEXT(queue, pLabelInfo);
Mark Young6ba8abe2017-11-09 10:37:04 -0700682 }
683}
684
685VKAPI_ATTR void VKAPI_CALL QueueEndDebugUtilsLabelEXT(VkQueue queue) {
686 bool skip = VK_FALSE;
687 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton57ae22f2018-05-18 16:20:52 -0600688 skip |= ValidateObject(queue, queue, kVulkanObjectTypeQueue, false, kVUIDUndefined, kVUIDUndefined);
Mark Young6ba8abe2017-11-09 10:37:04 -0700689 lock.unlock();
690 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
691 if (!skip) {
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600692 dev_data->device_dispatch_table.QueueEndDebugUtilsLabelEXT(queue);
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600693 lock.lock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700694 EndQueueDebugUtilsLabel(dev_data->report_data, queue);
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600695 lock.unlock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700696 }
697}
698
699VKAPI_ATTR void VKAPI_CALL QueueInsertDebugUtilsLabelEXT(VkQueue queue, const VkDebugUtilsLabelEXT *pLabelInfo) {
700 bool skip = VK_FALSE;
701 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton57ae22f2018-05-18 16:20:52 -0600702 skip |= ValidateObject(queue, queue, kVulkanObjectTypeQueue, false, kVUIDUndefined, kVUIDUndefined);
Mark Young6ba8abe2017-11-09 10:37:04 -0700703 lock.unlock();
704 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
705 if (!skip) {
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600706 lock.lock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700707 InsertQueueDebugUtilsLabel(dev_data->report_data, queue, pLabelInfo);
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600708 lock.unlock();
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600709 dev_data->device_dispatch_table.QueueInsertDebugUtilsLabelEXT(queue, pLabelInfo);
Mark Young6ba8abe2017-11-09 10:37:04 -0700710 }
711}
712
713VKAPI_ATTR void VKAPI_CALL CmdBeginDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT *pLabelInfo) {
714 bool skip = VK_FALSE;
715 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -0600716 skip |= ValidateObject(commandBuffer, commandBuffer, kVulkanObjectTypeCommandBuffer, false, kVUIDUndefined, kVUIDUndefined);
Mark Young6ba8abe2017-11-09 10:37:04 -0700717 lock.unlock();
718 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
719 if (!skip) {
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600720 lock.lock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700721 BeginCmdDebugUtilsLabel(dev_data->report_data, commandBuffer, pLabelInfo);
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600722 lock.unlock();
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600723 dev_data->device_dispatch_table.CmdBeginDebugUtilsLabelEXT(commandBuffer, pLabelInfo);
Mark Young6ba8abe2017-11-09 10:37:04 -0700724 }
725}
726
727VKAPI_ATTR void VKAPI_CALL CmdEndDebugUtilsLabelEXT(VkCommandBuffer commandBuffer) {
728 bool skip = VK_FALSE;
729 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -0600730 skip |= ValidateObject(commandBuffer, commandBuffer, kVulkanObjectTypeCommandBuffer, false, kVUIDUndefined, kVUIDUndefined);
Mark Young6ba8abe2017-11-09 10:37:04 -0700731 lock.unlock();
732 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
733 if (!skip) {
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600734 dev_data->device_dispatch_table.CmdEndDebugUtilsLabelEXT(commandBuffer);
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600735 lock.lock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700736 EndCmdDebugUtilsLabel(dev_data->report_data, commandBuffer);
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600737 lock.unlock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700738 }
739}
740
741VKAPI_ATTR void VKAPI_CALL CmdInsertDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT *pLabelInfo) {
742 bool skip = VK_FALSE;
743 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -0600744 skip |= ValidateObject(commandBuffer, commandBuffer, kVulkanObjectTypeCommandBuffer, false, kVUIDUndefined, kVUIDUndefined);
Mark Young6ba8abe2017-11-09 10:37:04 -0700745 lock.unlock();
746 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
747 if (!skip) {
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600748 lock.lock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700749 InsertCmdDebugUtilsLabel(dev_data->report_data, commandBuffer, pLabelInfo);
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600750 lock.unlock();
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600751 dev_data->device_dispatch_table.CmdInsertDebugUtilsLabelEXT(commandBuffer, pLabelInfo);
Mark Young6ba8abe2017-11-09 10:37:04 -0700752 }
753}
754
755VKAPI_ATTR VkResult VKAPI_CALL CreateDebugUtilsMessengerEXT(VkInstance instance,
756 const VkDebugUtilsMessengerCreateInfoEXT *pCreateInfo,
757 const VkAllocationCallbacks *pAllocator,
758 VkDebugUtilsMessengerEXT *pMessenger) {
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600759 auto instance_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
760 VkResult result =
761 instance_data->instance_dispatch_table.CreateDebugUtilsMessengerEXT(instance, pCreateInfo, pAllocator, pMessenger);
Mark Young6ba8abe2017-11-09 10:37:04 -0700762 if (VK_SUCCESS == result) {
Mark Young6ba8abe2017-11-09 10:37:04 -0700763 result = layer_create_messenger_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMessenger);
764 CreateObject(instance, *pMessenger, kVulkanObjectTypeDebugUtilsMessengerEXT, pAllocator);
765 }
766 return result;
767}
768
769VKAPI_ATTR void VKAPI_CALL DestroyDebugUtilsMessengerEXT(VkInstance instance, VkDebugUtilsMessengerEXT messenger,
770 const VkAllocationCallbacks *pAllocator) {
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600771 auto instance_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
772 instance_data->instance_dispatch_table.DestroyDebugUtilsMessengerEXT(instance, messenger, pAllocator);
Mark Young6ba8abe2017-11-09 10:37:04 -0700773 layer_destroy_messenger_callback(instance_data->report_data, messenger, pAllocator);
Dave Houlton379f1422018-05-23 12:47:07 -0600774 DestroyObject(instance, messenger, kVulkanObjectTypeDebugUtilsMessengerEXT, pAllocator, kVUIDUndefined, kVUIDUndefined);
Mark Young6ba8abe2017-11-09 10:37:04 -0700775}
776
777VKAPI_ATTR void VKAPI_CALL SubmitDebugUtilsMessageEXT(VkInstance instance, VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
778 VkDebugUtilsMessageTypeFlagsEXT messageTypes,
779 const VkDebugUtilsMessengerCallbackDataEXT *pCallbackData) {
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600780 auto instance_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
781 instance_data->instance_dispatch_table.SubmitDebugUtilsMessageEXT(instance, messageSeverity, messageTypes, pCallbackData);
Mark Young6ba8abe2017-11-09 10:37:04 -0700782}
783
784static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION},
785 {VK_EXT_DEBUG_UTILS_EXTENSION_NAME, VK_EXT_DEBUG_UTILS_SPEC_VERSION}};
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600786
787static const VkLayerProperties globalLayerProps = {"VK_LAYER_LUNARG_object_tracker",
788 VK_LAYER_API_VERSION, // specVersion
789 1, // implementationVersion
790 "LunarG Validation Layer"};
791
792VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
793 return util_GetLayerProperties(1, &globalLayerProps, pCount, pProperties);
794}
795
796VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
797 VkLayerProperties *pProperties) {
798 return util_GetLayerProperties(1, &globalLayerProps, pCount, pProperties);
799}
800
801VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
802 VkExtensionProperties *pProperties) {
803 if (pLayerName && !strcmp(pLayerName, globalLayerProps.layerName))
804 return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
805
806 return VK_ERROR_LAYER_NOT_PRESENT;
807}
808
809VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, const char *pLayerName,
810 uint32_t *pCount, VkExtensionProperties *pProperties) {
811 if (pLayerName && !strcmp(pLayerName, globalLayerProps.layerName))
812 return util_GetExtensionProperties(0, nullptr, pCount, pProperties);
813
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600814 auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
815 return instance_data->instance_dispatch_table.EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600816}
817
818VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCreateInfo,
819 const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
820 std::lock_guard<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -0600821 bool skip = ValidateObject(physicalDevice, physicalDevice, kVulkanObjectTypePhysicalDevice, false,
822 "VUID-vkCreateDevice-physicalDevice-parameter", kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600823 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
824
825 layer_data *phy_dev_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
826 VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
827
828 assert(chain_info->u.pLayerInfo);
829 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
830 PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
831 PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(phy_dev_data->instance, "vkCreateDevice");
832 if (fpCreateDevice == NULL) {
833 return VK_ERROR_INITIALIZATION_FAILED;
834 }
835
836 // Advance the link info for the next element on the chain
837 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
838
839 VkResult result = fpCreateDevice(physicalDevice, pCreateInfo, pAllocator, pDevice);
840 if (result != VK_SUCCESS) {
841 return result;
842 }
843
844 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
Mark Young6ba8abe2017-11-09 10:37:04 -0700845 device_data->report_data = layer_debug_utils_create_device(phy_dev_data->report_data, *pDevice);
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600846 layer_init_device_dispatch_table(*pDevice, &device_data->device_dispatch_table, fpGetDeviceProcAddr);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600847
848 // Add link back to physDev
849 device_data->physical_device = physicalDevice;
Mark Lobodzinski9bd81192017-11-13 09:38:23 -0700850 device_data->instance = phy_dev_data->instance;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600851
Mark Lobodzinski9bd81192017-11-13 09:38:23 -0700852 CreateObject(phy_dev_data->instance, *pDevice, kVulkanObjectTypeDevice, pAllocator);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600853
854 return result;
855}
856
Mark Lobodzinski216843a2017-07-21 13:23:13 -0600857VKAPI_ATTR VkResult VKAPI_CALL GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
858 VkImage *pSwapchainImages) {
Mark Lobodzinski09fa2d42017-07-21 10:16:53 -0600859 bool skip = false;
Mark Lobodzinski216843a2017-07-21 13:23:13 -0600860 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -0600861 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkGetSwapchainImagesKHR-device-parameter",
Dave Houlton57ae22f2018-05-18 16:20:52 -0600862 kVUIDUndefined);
Dave Houlton379f1422018-05-23 12:47:07 -0600863 skip |= ValidateObject(device, swapchain, kVulkanObjectTypeSwapchainKHR, false,
864 "VUID-vkGetSwapchainImagesKHR-swapchain-parameter", kVUIDUndefined);
Mark Lobodzinski216843a2017-07-21 13:23:13 -0600865 lock.unlock();
Mark Lobodzinski09fa2d42017-07-21 10:16:53 -0600866 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
867
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600868 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
869 VkResult result =
870 device_data->device_dispatch_table.GetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages);
Mark Lobodzinski216843a2017-07-21 13:23:13 -0600871 if (pSwapchainImages != NULL) {
872 lock.lock();
873 for (uint32_t i = 0; i < *pSwapchainImageCount; i++) {
874 CreateSwapchainImageObject(device, pSwapchainImages[i], swapchain);
875 }
876 lock.unlock();
877 }
878 return result;
879}
880
Petr Kraus42f6f8d2017-12-17 17:37:33 +0100881VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
882 const VkAllocationCallbacks *pAllocator,
883 VkDescriptorSetLayout *pSetLayout) {
884 bool skip = false;
885 {
886 std::lock_guard<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -0600887 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkCreateDescriptorSetLayout-device-parameter",
888 kVUIDUndefined);
Petr Kraus42f6f8d2017-12-17 17:37:33 +0100889 if (pCreateInfo) {
890 if (pCreateInfo->pBindings) {
891 for (uint32_t binding_index = 0; binding_index < pCreateInfo->bindingCount; ++binding_index) {
892 const VkDescriptorSetLayoutBinding &binding = pCreateInfo->pBindings[binding_index];
893 const bool is_sampler_type = binding.descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER ||
894 binding.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
895 if (binding.pImmutableSamplers && is_sampler_type) {
896 for (uint32_t index2 = 0; index2 < binding.descriptorCount; ++index2) {
897 const VkSampler sampler = binding.pImmutableSamplers[index2];
Dave Houlton379f1422018-05-23 12:47:07 -0600898 skip |= ValidateObject(device, sampler, kVulkanObjectTypeSampler, false,
899 "VUID-VkDescriptorSetLayoutBinding-descriptorType-00282", kVUIDUndefined);
Petr Kraus42f6f8d2017-12-17 17:37:33 +0100900 }
901 }
902 }
903 }
904 }
905 }
906 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600907 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
908 VkResult result = device_data->device_dispatch_table.CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
Petr Kraus42f6f8d2017-12-17 17:37:33 +0100909 if (VK_SUCCESS == result) {
910 std::lock_guard<std::mutex> lock(global_lock);
911 CreateObject(device, *pSetLayout, kVulkanObjectTypeDescriptorSetLayout, pAllocator);
912 }
913 return result;
914}
915
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600916VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice,
917 uint32_t *pQueueFamilyPropertyCount,
918 VkQueueFamilyProperties *pQueueFamilyProperties) {
919 bool skip = false;
920 {
921 std::lock_guard<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -0600922 skip |= ValidateObject(physicalDevice, physicalDevice, kVulkanObjectTypePhysicalDevice, false,
923 "VUID-vkGetPhysicalDeviceQueueFamilyProperties-physicalDevice-parameter", kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600924 }
925 if (skip) {
926 return;
927 }
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600928 auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
929 instance_data->instance_dispatch_table.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pQueueFamilyPropertyCount,
930 pQueueFamilyProperties);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600931 std::lock_guard<std::mutex> lock(global_lock);
932 if (pQueueFamilyProperties != NULL) {
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600933 if (instance_data->queue_family_properties.size() < *pQueueFamilyPropertyCount) {
934 instance_data->queue_family_properties.resize(*pQueueFamilyPropertyCount);
935 }
936 for (uint32_t i = 0; i < *pQueueFamilyPropertyCount; i++) {
937 instance_data->queue_family_properties[i] = pQueueFamilyProperties[i];
938 }
939 }
940}
941
942VKAPI_ATTR VkResult VKAPI_CALL CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
943 VkInstance *pInstance) {
944 VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
945
946 assert(chain_info->u.pLayerInfo);
947 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
948 PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
949 if (fpCreateInstance == NULL) {
950 return VK_ERROR_INITIALIZATION_FAILED;
951 }
952
953 // Advance the link info for the next element on the chain
954 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
955
956 VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
957 if (result != VK_SUCCESS) {
958 return result;
959 }
960
961 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(*pInstance), layer_data_map);
962 instance_data->instance = *pInstance;
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600963 layer_init_instance_dispatch_table(*pInstance, &instance_data->instance_dispatch_table, fpGetInstanceProcAddr);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600964
965 // Look for one or more debug report create info structures, and copy the
966 // callback(s) for each one found (for use by vkDestroyInstance)
Mark Young6ba8abe2017-11-09 10:37:04 -0700967 layer_copy_tmp_debug_messengers(pCreateInfo->pNext, &instance_data->num_tmp_debug_messengers,
968 &instance_data->tmp_messenger_create_infos, &instance_data->tmp_debug_messengers);
969 layer_copy_tmp_report_callbacks(pCreateInfo->pNext, &instance_data->num_tmp_report_callbacks,
970 &instance_data->tmp_report_create_infos, &instance_data->tmp_report_callbacks);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600971
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600972 instance_data->report_data =
973 debug_utils_create_instance(&instance_data->instance_dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount,
974 pCreateInfo->ppEnabledExtensionNames);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600975
976 InitObjectTracker(instance_data, pAllocator);
977
978 CreateObject(*pInstance, *pInstance, kVulkanObjectTypeInstance, pAllocator);
979
980 return result;
981}
982
983VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
984 VkPhysicalDevice *pPhysicalDevices) {
985 bool skip = VK_FALSE;
986 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -0600987 skip |= ValidateObject(instance, instance, kVulkanObjectTypeInstance, false,
988 "VUID-vkEnumeratePhysicalDevices-instance-parameter", kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600989 lock.unlock();
990 if (skip) {
991 return VK_ERROR_VALIDATION_FAILED_EXT;
992 }
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600993 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
994 VkResult result =
995 instance_data->instance_dispatch_table.EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600996 lock.lock();
997 if (result == VK_SUCCESS) {
998 if (pPhysicalDevices) {
999 for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
1000 CreateObject(instance, pPhysicalDevices[i], kVulkanObjectTypePhysicalDevice, nullptr);
1001 }
1002 }
1003 }
1004 lock.unlock();
1005 return result;
1006}
1007
1008VKAPI_ATTR VkResult VKAPI_CALL AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pAllocateInfo,
1009 VkCommandBuffer *pCommandBuffers) {
1010 bool skip = VK_FALSE;
1011 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -06001012 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkAllocateCommandBuffers-device-parameter",
Dave Houlton57ae22f2018-05-18 16:20:52 -06001013 kVUIDUndefined);
Dave Houlton379f1422018-05-23 12:47:07 -06001014 skip |= ValidateObject(device, pAllocateInfo->commandPool, kVulkanObjectTypeCommandPool, false,
1015 "VUID-VkCommandBufferAllocateInfo-commandPool-parameter", kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001016 lock.unlock();
1017
1018 if (skip) {
1019 return VK_ERROR_VALIDATION_FAILED_EXT;
1020 }
1021
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -06001022 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
1023 VkResult result = device_data->device_dispatch_table.AllocateCommandBuffers(device, pAllocateInfo, pCommandBuffers);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001024
1025 lock.lock();
1026 for (uint32_t i = 0; i < pAllocateInfo->commandBufferCount; i++) {
1027 AllocateCommandBuffer(device, pAllocateInfo->commandPool, pCommandBuffers[i], pAllocateInfo->level);
1028 }
1029 lock.unlock();
1030
1031 return result;
1032}
1033
1034VKAPI_ATTR VkResult VKAPI_CALL AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
1035 VkDescriptorSet *pDescriptorSets) {
1036 bool skip = VK_FALSE;
1037 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -06001038 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkAllocateDescriptorSets-device-parameter",
1039 kVUIDUndefined);
1040 skip |= ValidateObject(device, pAllocateInfo->descriptorPool, kVulkanObjectTypeDescriptorPool, false,
1041 "VUID-VkDescriptorSetAllocateInfo-descriptorPool-parameter",
Dave Houlton57ae22f2018-05-18 16:20:52 -06001042 "VUID-VkDescriptorSetAllocateInfo-commonparent");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001043 for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
1044 skip |= ValidateObject(device, pAllocateInfo->pSetLayouts[i], kVulkanObjectTypeDescriptorSetLayout, false,
Dave Houlton379f1422018-05-23 12:47:07 -06001045 "VUID-VkDescriptorSetAllocateInfo-pSetLayouts-parameter",
1046 "VUID-VkDescriptorSetAllocateInfo-commonparent");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001047 }
1048 lock.unlock();
1049 if (skip) {
1050 return VK_ERROR_VALIDATION_FAILED_EXT;
1051 }
1052
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -06001053 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
1054 VkResult result = device_data->device_dispatch_table.AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001055
1056 if (VK_SUCCESS == result) {
1057 lock.lock();
1058 for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
1059 AllocateDescriptorSet(device, pAllocateInfo->descriptorPool, pDescriptorSets[i]);
1060 }
1061 lock.unlock();
1062 }
1063
1064 return result;
1065}
1066
1067VKAPI_ATTR void VKAPI_CALL FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
1068 const VkCommandBuffer *pCommandBuffers) {
1069 bool skip = false;
1070 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton57ae22f2018-05-18 16:20:52 -06001071 ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkFreeCommandBuffers-device-parameter", kVUIDUndefined);
Dave Houlton379f1422018-05-23 12:47:07 -06001072 ValidateObject(device, commandPool, kVulkanObjectTypeCommandPool, false, "VUID-vkFreeCommandBuffers-commandPool-parameter",
1073 "VUID-vkFreeCommandBuffers-commandPool-parent");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001074 for (uint32_t i = 0; i < commandBufferCount; i++) {
1075 if (pCommandBuffers[i] != VK_NULL_HANDLE) {
1076 skip |= ValidateCommandBuffer(device, commandPool, pCommandBuffers[i]);
1077 }
1078 }
1079
1080 for (uint32_t i = 0; i < commandBufferCount; i++) {
Dave Houlton379f1422018-05-23 12:47:07 -06001081 DestroyObject(device, pCommandBuffers[i], kVulkanObjectTypeCommandBuffer, nullptr, kVUIDUndefined, kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001082 }
1083
1084 lock.unlock();
1085 if (!skip) {
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -06001086 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
1087 device_data->device_dispatch_table.FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001088 }
1089}
1090
1091VKAPI_ATTR void VKAPI_CALL DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
1092 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
1093 std::unique_lock<std::mutex> lock(global_lock);
1094 // A swapchain's images are implicitly deleted when the swapchain is deleted.
1095 // Remove this swapchain's images from our map of such images.
Mark Lobodzinskiefc64392017-07-18 13:15:47 -06001096 std::unordered_map<uint64_t, ObjTrackState *>::iterator itr = device_data->swapchainImageMap.begin();
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001097 while (itr != device_data->swapchainImageMap.end()) {
Mark Lobodzinskiefc64392017-07-18 13:15:47 -06001098 ObjTrackState *pNode = (*itr).second;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001099 if (pNode->parent_object == HandleToUint64(swapchain)) {
1100 delete pNode;
1101 auto delete_item = itr++;
1102 device_data->swapchainImageMap.erase(delete_item);
1103 } else {
1104 ++itr;
1105 }
1106 }
Dave Houlton57ae22f2018-05-18 16:20:52 -06001107 DestroyObject(device, swapchain, kVulkanObjectTypeSwapchainKHR, pAllocator, "VUID-vkDestroySwapchainKHR-swapchain-01283",
1108 "VUID-vkDestroySwapchainKHR-swapchain-01284");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001109 lock.unlock();
1110
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -06001111 device_data->device_dispatch_table.DestroySwapchainKHR(device, swapchain, pAllocator);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001112}
1113
1114VKAPI_ATTR VkResult VKAPI_CALL FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t descriptorSetCount,
1115 const VkDescriptorSet *pDescriptorSets) {
1116 bool skip = false;
1117 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
1118 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -06001119 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkFreeDescriptorSets-device-parameter",
1120 kVUIDUndefined);
1121 skip |= ValidateObject(device, descriptorPool, kVulkanObjectTypeDescriptorPool, false,
1122 "VUID-vkFreeDescriptorSets-descriptorPool-parameter", "VUID-vkFreeDescriptorSets-descriptorPool-parent");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001123 for (uint32_t i = 0; i < descriptorSetCount; i++) {
1124 if (pDescriptorSets[i] != VK_NULL_HANDLE) {
1125 skip |= ValidateDescriptorSet(device, descriptorPool, pDescriptorSets[i]);
1126 }
1127 }
1128
1129 for (uint32_t i = 0; i < descriptorSetCount; i++) {
Dave Houlton379f1422018-05-23 12:47:07 -06001130 DestroyObject(device, pDescriptorSets[i], kVulkanObjectTypeDescriptorSet, nullptr, kVUIDUndefined, kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001131 }
1132
1133 lock.unlock();
1134 if (!skip) {
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -06001135 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
1136 result = device_data->device_dispatch_table.FreeDescriptorSets(device, descriptorPool, descriptorSetCount, pDescriptorSets);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001137 }
1138 return result;
1139}
1140
1141VKAPI_ATTR void VKAPI_CALL DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
1142 const VkAllocationCallbacks *pAllocator) {
1143 bool skip = VK_FALSE;
1144 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
1145 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -06001146 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkDestroyDescriptorPool-device-parameter",
1147 kVUIDUndefined);
1148 skip |= ValidateObject(device, descriptorPool, kVulkanObjectTypeDescriptorPool, true,
1149 "VUID-vkDestroyDescriptorPool-descriptorPool-parameter",
Dave Houlton57ae22f2018-05-18 16:20:52 -06001150 "VUID-vkDestroyDescriptorPool-descriptorPool-parent");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001151 lock.unlock();
1152 if (skip) {
1153 return;
1154 }
1155 // A DescriptorPool's descriptor sets are implicitly deleted when the pool is deleted.
1156 // Remove this pool's descriptor sets from our descriptorSet map.
1157 lock.lock();
Mark Lobodzinskiefc64392017-07-18 13:15:47 -06001158 std::unordered_map<uint64_t, ObjTrackState *>::iterator itr = device_data->object_map[kVulkanObjectTypeDescriptorSet].begin();
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001159 while (itr != device_data->object_map[kVulkanObjectTypeDescriptorSet].end()) {
Mark Lobodzinskiefc64392017-07-18 13:15:47 -06001160 ObjTrackState *pNode = (*itr).second;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001161 auto del_itr = itr++;
1162 if (pNode->parent_object == HandleToUint64(descriptorPool)) {
Dave Houlton379f1422018-05-23 12:47:07 -06001163 DestroyObject(device, (VkDescriptorSet)((*del_itr).first), kVulkanObjectTypeDescriptorSet, nullptr, kVUIDUndefined,
1164 kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001165 }
1166 }
Dave Houlton379f1422018-05-23 12:47:07 -06001167 DestroyObject(device, descriptorPool, kVulkanObjectTypeDescriptorPool, pAllocator,
1168 "VUID-vkDestroyDescriptorPool-descriptorPool-00304", "VUID-vkDestroyDescriptorPool-descriptorPool-00305");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001169 lock.unlock();
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -06001170 device_data->device_dispatch_table.DestroyDescriptorPool(device, descriptorPool, pAllocator);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001171}
1172
1173VKAPI_ATTR void VKAPI_CALL DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
1174 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
1175 bool skip = false;
1176 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -06001177 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkDestroyCommandPool-device-parameter",
1178 kVUIDUndefined);
1179 skip |= ValidateObject(device, commandPool, kVulkanObjectTypeCommandPool, true,
1180 "VUID-vkDestroyCommandPool-commandPool-parameter", "VUID-vkDestroyCommandPool-commandPool-parent");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001181 lock.unlock();
1182 if (skip) {
1183 return;
1184 }
1185 lock.lock();
1186 // A CommandPool's command buffers are implicitly deleted when the pool is deleted.
1187 // Remove this pool's cmdBuffers from our cmd buffer map.
1188 auto itr = device_data->object_map[kVulkanObjectTypeCommandBuffer].begin();
1189 auto del_itr = itr;
1190 while (itr != device_data->object_map[kVulkanObjectTypeCommandBuffer].end()) {
Mark Lobodzinskiefc64392017-07-18 13:15:47 -06001191 ObjTrackState *pNode = (*itr).second;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001192 del_itr = itr++;
1193 if (pNode->parent_object == HandleToUint64(commandPool)) {
1194 skip |= ValidateCommandBuffer(device, commandPool, reinterpret_cast<VkCommandBuffer>((*del_itr).first));
1195 DestroyObject(device, reinterpret_cast<VkCommandBuffer>((*del_itr).first), kVulkanObjectTypeCommandBuffer, nullptr,
Dave Houlton57ae22f2018-05-18 16:20:52 -06001196 kVUIDUndefined, kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001197 }
1198 }
Dave Houlton57ae22f2018-05-18 16:20:52 -06001199 DestroyObject(device, commandPool, kVulkanObjectTypeCommandPool, pAllocator, "VUID-vkDestroyCommandPool-commandPool-00042",
1200 "VUID-vkDestroyCommandPool-commandPool-00043");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001201 lock.unlock();
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -06001202 device_data->device_dispatch_table.DestroyCommandPool(device, commandPool, pAllocator);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001203}
1204
Mark Lobodzinski14ddc192017-10-25 16:57:04 -06001205// Note: This is the core version of this routine. The extension version is below.
1206VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice,
1207 uint32_t *pQueueFamilyPropertyCount,
1208 VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
1209 bool skip = false;
1210 {
1211 std::lock_guard<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -06001212 skip |=
1213 ValidateObject(physicalDevice, physicalDevice, kVulkanObjectTypePhysicalDevice, false, kVUIDUndefined, kVUIDUndefined);
Mark Lobodzinski14ddc192017-10-25 16:57:04 -06001214 }
1215 if (skip) {
1216 return;
1217 }
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -06001218 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
1219 instance_data->instance_dispatch_table.GetPhysicalDeviceQueueFamilyProperties2(physicalDevice, pQueueFamilyPropertyCount,
1220 pQueueFamilyProperties);
Mark Lobodzinski14ddc192017-10-25 16:57:04 -06001221 std::lock_guard<std::mutex> lock(global_lock);
1222 if (pQueueFamilyProperties != NULL) {
Mark Lobodzinski14ddc192017-10-25 16:57:04 -06001223 if (instance_data->queue_family_properties.size() < *pQueueFamilyPropertyCount) {
1224 instance_data->queue_family_properties.resize(*pQueueFamilyPropertyCount);
1225 }
1226 for (uint32_t i = 0; i < *pQueueFamilyPropertyCount; i++) {
1227 instance_data->queue_family_properties[i] = pQueueFamilyProperties[i].queueFamilyProperties;
1228 }
1229 }
1230}
1231
1232// Note: This is the extension version of this routine. The core version is above.
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001233VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice,
1234 uint32_t *pQueueFamilyPropertyCount,
1235 VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
1236 bool skip = false;
1237 {
1238 std::lock_guard<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -06001239 skip |=
1240 ValidateObject(physicalDevice, physicalDevice, kVulkanObjectTypePhysicalDevice, false, kVUIDUndefined, kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001241 }
1242 if (skip) {
1243 return;
1244 }
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -06001245 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
1246 instance_data->instance_dispatch_table.GetPhysicalDeviceQueueFamilyProperties2KHR(physicalDevice, pQueueFamilyPropertyCount,
1247 pQueueFamilyProperties);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001248 std::lock_guard<std::mutex> lock(global_lock);
1249 if (pQueueFamilyProperties != NULL) {
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001250 if (instance_data->queue_family_properties.size() < *pQueueFamilyPropertyCount) {
1251 instance_data->queue_family_properties.resize(*pQueueFamilyPropertyCount);
1252 }
1253 for (uint32_t i = 0; i < *pQueueFamilyPropertyCount; i++) {
1254 instance_data->queue_family_properties[i] = pQueueFamilyProperties[i].queueFamilyProperties;
1255 }
1256 }
1257}
1258
Shannon McPherson9d5167f2018-05-02 15:24:37 -06001259VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceDisplayPropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
1260 VkDisplayPropertiesKHR *pProperties) {
1261 bool skip = false;
1262 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -06001263 skip |= ValidateObject(physicalDevice, physicalDevice, kVulkanObjectTypePhysicalDevice, false,
1264 "VUID-vkGetPhysicalDeviceDisplayPropertiesKHR-physicalDevice-parameter", kVUIDUndefined);
Shannon McPherson9d5167f2018-05-02 15:24:37 -06001265 lock.unlock();
1266
1267 if (skip) {
1268 return VK_ERROR_VALIDATION_FAILED_EXT;
1269 }
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -06001270 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
1271 VkResult result =
1272 instance_data->instance_dispatch_table.GetPhysicalDeviceDisplayPropertiesKHR(physicalDevice, pPropertyCount, pProperties);
Shannon McPherson9d5167f2018-05-02 15:24:37 -06001273
1274 lock.lock();
1275 if (result == VK_SUCCESS) {
1276 if (pProperties) {
1277 for (uint32_t i = 0; i < *pPropertyCount; ++i) {
1278 CreateObject(physicalDevice, pProperties[i].display, kVulkanObjectTypeDisplayKHR, nullptr);
1279 }
1280 }
1281 }
1282 lock.unlock();
1283
1284 return result;
1285}
1286
1287VKAPI_ATTR VkResult VKAPI_CALL GetDisplayModePropertiesKHR(VkPhysicalDevice physicalDevice, VkDisplayKHR display,
1288 uint32_t *pPropertyCount, VkDisplayModePropertiesKHR *pProperties) {
1289 bool skip = false;
1290 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -06001291 skip |= ValidateObject(physicalDevice, physicalDevice, kVulkanObjectTypePhysicalDevice, false,
1292 "VUID-vkGetDisplayModePropertiesKHR-physicalDevice-parameter", kVUIDUndefined);
1293 skip |= ValidateObject(physicalDevice, display, kVulkanObjectTypeDisplayKHR, false,
1294 "VUID-vkGetDisplayModePropertiesKHR-display-parameter", kVUIDUndefined);
Shannon McPherson9d5167f2018-05-02 15:24:37 -06001295 lock.unlock();
1296
1297 if (skip) {
1298 return VK_ERROR_VALIDATION_FAILED_EXT;
1299 }
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -06001300 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
1301 VkResult result =
1302 instance_data->instance_dispatch_table.GetDisplayModePropertiesKHR(physicalDevice, display, pPropertyCount, pProperties);
Shannon McPherson9d5167f2018-05-02 15:24:37 -06001303
1304 lock.lock();
1305 if (result == VK_SUCCESS) {
1306 if (pProperties) {
1307 for (uint32_t i = 0; i < *pPropertyCount; ++i) {
1308 CreateObject(physicalDevice, pProperties[i].displayMode, kVulkanObjectTypeDisplayModeKHR, nullptr);
1309 }
1310 }
1311 }
1312 lock.unlock();
1313
1314 return result;
1315}
1316
Mark Lobodzinskidfe5e172017-07-19 13:03:22 -06001317VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectNameEXT(VkDevice device, const VkDebugMarkerObjectNameInfoEXT *pNameInfo) {
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001318 bool skip = VK_FALSE;
1319 std::unique_lock<std::mutex> lock(global_lock);
1320 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
1321 if (pNameInfo->pObjectName) {
1322 dev_data->report_data->debugObjectNameMap->insert(
1323 std::make_pair<uint64_t, std::string>((uint64_t &&) pNameInfo->object, pNameInfo->pObjectName));
1324 } else {
1325 dev_data->report_data->debugObjectNameMap->erase(pNameInfo->object);
1326 }
Dave Houlton379f1422018-05-23 12:47:07 -06001327 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkDebugMarkerSetObjectNameEXT-device-parameter",
1328 kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001329 lock.unlock();
1330 if (skip) {
1331 return VK_ERROR_VALIDATION_FAILED_EXT;
1332 }
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -06001333 VkResult result = dev_data->device_dispatch_table.DebugMarkerSetObjectNameEXT(device, pNameInfo);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001334 return result;
1335}
1336
1337VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName) {
1338 assert(instance);
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -06001339 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
1340 if (instance_data->instance_dispatch_table.GetPhysicalDeviceProcAddr == NULL) {
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001341 return NULL;
1342 }
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -06001343 return instance_data->instance_dispatch_table.GetPhysicalDeviceProcAddr(instance, funcName);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001344}
1345
1346VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char *funcName) {
1347 const auto item = name_to_funcptr_map.find(funcName);
1348 if (item != name_to_funcptr_map.end()) {
1349 return reinterpret_cast<PFN_vkVoidFunction>(item->second);
1350 }
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -06001351 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
1352 if (!device_data->device_dispatch_table.GetDeviceProcAddr) return NULL;
1353 return device_data->device_dispatch_table.GetDeviceProcAddr(device, funcName);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001354}
1355
1356VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
1357 const auto item = name_to_funcptr_map.find(funcName);
1358 if (item != name_to_funcptr_map.end()) {
1359 return reinterpret_cast<PFN_vkVoidFunction>(item->second);
1360 }
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -06001361 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
1362 if (!instance_data->instance_dispatch_table.GetInstanceProcAddr) return nullptr;
1363 return instance_data->instance_dispatch_table.GetInstanceProcAddr(instance, funcName);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001364}
1365
Piers Daniell16c253f2018-05-30 14:34:05 -06001366VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceDisplayProperties2KHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
1367 VkDisplayProperties2KHR *pProperties) {
1368 bool skip = false;
1369 {
1370 std::lock_guard<std::mutex> lock(global_lock);
1371 skip |=
1372 ValidateObject(physicalDevice, physicalDevice, kVulkanObjectTypePhysicalDevice, false, kVUIDUndefined, kVUIDUndefined);
1373 }
1374 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
1375 VkResult result = get_dispatch_table(ot_instance_table_map, physicalDevice)
1376 ->GetPhysicalDeviceDisplayProperties2KHR(physicalDevice, pPropertyCount, pProperties);
1377 if (pProperties && (VK_SUCCESS == result || VK_INCOMPLETE == result)) {
1378 std::lock_guard<std::mutex> lock(global_lock);
1379 for (uint32_t index = 0; index < *pPropertyCount; ++index) {
1380 CreateObject(physicalDevice, pProperties[index].displayProperties.display, kVulkanObjectTypeDisplayKHR, nullptr);
1381 }
1382 }
1383
1384 return result;
1385}
1386
1387VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex,
1388 uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) {
1389 bool skip = false;
1390 {
1391 std::lock_guard<std::mutex> lock(global_lock);
1392 skip |= ValidateObject(physicalDevice, physicalDevice, kVulkanObjectTypePhysicalDevice, false,
1393 "VUID-vkGetDisplayPlaneSupportedDisplaysKHR-physicalDevice-parameter", kVUIDUndefined);
1394 }
1395 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
1396 VkResult result = get_dispatch_table(ot_instance_table_map, physicalDevice)
1397 ->GetDisplayPlaneSupportedDisplaysKHR(physicalDevice, planeIndex, pDisplayCount, pDisplays);
1398 if (pDisplays && (VK_SUCCESS == result || VK_INCOMPLETE == result)) {
1399 std::lock_guard<std::mutex> lock(global_lock);
1400 for (uint32_t index = 0; index < *pDisplayCount; ++index) {
1401 CreateObject(physicalDevice, pDisplays[index], kVulkanObjectTypeDisplayKHR, nullptr);
1402 }
1403 }
1404
1405 return result;
1406}
1407
1408VKAPI_ATTR VkResult VKAPI_CALL GetDisplayModeProperties2KHR(VkPhysicalDevice physicalDevice, VkDisplayKHR display,
1409 uint32_t *pPropertyCount, VkDisplayModeProperties2KHR *pProperties) {
1410 bool skip = false;
1411 {
1412 std::lock_guard<std::mutex> lock(global_lock);
1413 skip |=
1414 ValidateObject(physicalDevice, physicalDevice, kVulkanObjectTypePhysicalDevice, false, kVUIDUndefined, kVUIDUndefined);
1415 skip |= ValidateObject(physicalDevice, display, kVulkanObjectTypeDisplayKHR, false, kVUIDUndefined, kVUIDUndefined);
1416 }
1417 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
1418 VkResult result = get_dispatch_table(ot_instance_table_map, physicalDevice)
1419 ->GetDisplayModeProperties2KHR(physicalDevice, display, pPropertyCount, pProperties);
1420 if (pProperties && (VK_SUCCESS == result || VK_INCOMPLETE == result)) {
1421 std::lock_guard<std::mutex> lock(global_lock);
1422 for (uint32_t index = 0; index < *pPropertyCount; ++index) {
1423 CreateObject(physicalDevice, pProperties[index].displayModeProperties.displayMode, kVulkanObjectTypeDisplayModeKHR,
1424 nullptr);
1425 }
1426 }
1427
1428 return result;
1429}
1430
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001431} // namespace object_tracker
1432
1433VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
1434 VkExtensionProperties *pProperties) {
1435 return object_tracker::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
1436}
1437
1438VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(uint32_t *pCount,
1439 VkLayerProperties *pProperties) {
1440 return object_tracker::EnumerateInstanceLayerProperties(pCount, pProperties);
1441}
1442
1443VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
1444 VkLayerProperties *pProperties) {
1445 // The layer command handles VK_NULL_HANDLE just fine internally
1446 assert(physicalDevice == VK_NULL_HANDLE);
1447 return object_tracker::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
1448}
1449
1450VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
1451 return object_tracker::GetDeviceProcAddr(dev, funcName);
1452}
1453
1454VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
1455 return object_tracker::GetInstanceProcAddr(instance, funcName);
1456}
1457
1458VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
1459 const char *pLayerName, uint32_t *pCount,
1460 VkExtensionProperties *pProperties) {
1461 // The layer command handles VK_NULL_HANDLE just fine internally
1462 assert(physicalDevice == VK_NULL_HANDLE);
1463 return object_tracker::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
1464}
1465
1466VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_layerGetPhysicalDeviceProcAddr(VkInstance instance,
1467 const char *funcName) {
1468 return object_tracker::GetPhysicalDeviceProcAddr(instance, funcName);
1469}
1470
1471VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct) {
1472 assert(pVersionStruct != NULL);
1473 assert(pVersionStruct->sType == LAYER_NEGOTIATE_INTERFACE_STRUCT);
1474
1475 // Fill in the function pointers if our version is at least capable of having the structure contain them.
1476 if (pVersionStruct->loaderLayerInterfaceVersion >= 2) {
1477 pVersionStruct->pfnGetInstanceProcAddr = vkGetInstanceProcAddr;
1478 pVersionStruct->pfnGetDeviceProcAddr = vkGetDeviceProcAddr;
1479 pVersionStruct->pfnGetPhysicalDeviceProcAddr = vk_layerGetPhysicalDeviceProcAddr;
1480 }
1481
1482 if (pVersionStruct->loaderLayerInterfaceVersion < CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
1483 object_tracker::loader_layer_if_version = pVersionStruct->loaderLayerInterfaceVersion;
1484 } else if (pVersionStruct->loaderLayerInterfaceVersion > CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
1485 pVersionStruct->loaderLayerInterfaceVersion = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
1486 }
1487
1488 return VK_SUCCESS;
1489}