blob: 63d968c85eb2c9f5c829b4ee5582400d4dff479a [file] [log] [blame]
Dave Houltonb817a872018-06-26 13:22:01 -06001/* Copyright (c) 2015-2018 The Khronos Group Inc.
2 * Copyright (c) 2015-2018 Valve Corporation
3 * Copyright (c) 2015-2018 LunarG, Inc.
4 * Copyright (C) 2015-2018 Google Inc.
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06005 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * Author: Mark Lobodzinski <mark@lunarg.com>
19 * Author: Jon Ashburn <jon@lunarg.com>
20 * Author: Tobin Ehlis <tobin@lunarg.com>
21 */
22
John Zulauf0fe5bfe2018-05-23 09:36:00 -060023#define VALIDATION_ERROR_MAP_IMPL
24
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -060025#include "object_tracker.h"
26
27namespace object_tracker {
28
29std::unordered_map<void *, layer_data *> layer_data_map;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -060030std::mutex global_lock;
31uint64_t object_track_index = 0;
32uint32_t loader_layer_if_version = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
33
34void InitObjectTracker(layer_data *my_data, const VkAllocationCallbacks *pAllocator) {
Mark Young6ba8abe2017-11-09 10:37:04 -070035 layer_debug_report_actions(my_data->report_data, my_data->logging_callback, pAllocator, "lunarg_object_tracker");
36 layer_debug_messenger_actions(my_data->report_data, my_data->logging_messenger, pAllocator, "lunarg_object_tracker");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -060037}
38
39// Add new queue to head of global queue list
40void AddQueueInfo(VkDevice device, uint32_t queue_node_index, VkQueue queue) {
41 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
42 auto queueItem = device_data->queue_info_map.find(queue);
43 if (queueItem == device_data->queue_info_map.end()) {
Mark Lobodzinskiefc64392017-07-18 13:15:47 -060044 ObjTrackQueueInfo *p_queue_info = new ObjTrackQueueInfo;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -060045 if (p_queue_info != NULL) {
Mark Lobodzinskiefc64392017-07-18 13:15:47 -060046 memset(p_queue_info, 0, sizeof(ObjTrackQueueInfo));
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -060047 p_queue_info->queue = queue;
48 p_queue_info->queue_node_index = queue_node_index;
49 device_data->queue_info_map[queue] = p_queue_info;
50 } else {
51 log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
Dave Houltonb817a872018-06-26 13:22:01 -060052 HandleToUint64(queue), kVUID_ObjectTracker_InternalError,
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -060053 "ERROR: VK_ERROR_OUT_OF_HOST_MEMORY -- could not allocate memory for Queue Information");
54 }
55 }
56}
57
58// Destroy memRef lists and free all memory
59void DestroyQueueDataStructures(VkDevice device) {
60 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
61
62 for (auto queue_item : device_data->queue_info_map) {
63 delete queue_item.second;
64 }
65 device_data->queue_info_map.clear();
66
67 // Destroy the items in the queue map
68 auto queue = device_data->object_map[kVulkanObjectTypeQueue].begin();
69 while (queue != device_data->object_map[kVulkanObjectTypeQueue].end()) {
70 uint32_t obj_index = queue->second->object_type;
71 assert(device_data->num_total_objects > 0);
72 device_data->num_total_objects--;
73 assert(device_data->num_objects[obj_index] > 0);
74 device_data->num_objects[obj_index]--;
75 log_msg(device_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
Dave Houltonb817a872018-06-26 13:22:01 -060076 queue->second->handle, kVUID_ObjectTracker_Info,
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -060077 "OBJ_STAT Destroy Queue obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " Queue objs).",
78 queue->second->handle, device_data->num_total_objects, device_data->num_objects[obj_index]);
79 delete queue->second;
80 queue = device_data->object_map[kVulkanObjectTypeQueue].erase(queue);
81 }
82}
83
84// Check Queue type flags for selected queue operations
85void ValidateQueueFlags(VkQueue queue, const char *function) {
86 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
87 auto queue_item = device_data->queue_info_map.find(queue);
88 if (queue_item != device_data->queue_info_map.end()) {
Mark Lobodzinskiefc64392017-07-18 13:15:47 -060089 ObjTrackQueueInfo *pQueueInfo = queue_item->second;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -060090 if (pQueueInfo != NULL) {
91 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(device_data->physical_device), layer_data_map);
92 if ((instance_data->queue_family_properties[pQueueInfo->queue_node_index].queueFlags & VK_QUEUE_SPARSE_BINDING_BIT) ==
93 0) {
94 log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
Dave Houlton57ae22f2018-05-18 16:20:52 -060095 HandleToUint64(queue), "VUID-vkQueueBindSparse-queuetype",
Mark Lobodzinski487a0d12018-03-30 10:09:03 -060096 "Attempting %s on a non-memory-management capable queue -- VK_QUEUE_SPARSE_BINDING_BIT not set.", function);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -060097 }
98 }
99 }
100}
101
Mark Lobodzinski9bd81192017-11-13 09:38:23 -0700102// Look for this device object in any of the instance child devices lists.
103// NOTE: This is of dubious value. In most circumstances Vulkan will die a flaming death if a dispatchable object is invalid.
104// However, if this layer is loaded first and GetProcAddress is used to make API calls, it will detect bad DOs.
Dave Houlton379f1422018-05-23 12:47:07 -0600105bool ValidateDeviceObject(uint64_t device_handle, const std::string &invalid_handle_code, const std::string &wrong_device_code) {
Mark Lobodzinski9bd81192017-11-13 09:38:23 -0700106 VkInstance last_instance = nullptr;
107 for (auto layer_data : layer_data_map) {
108 for (auto object : layer_data.second->object_map[kVulkanObjectTypeDevice]) {
109 // Grab last instance to use for possible error message
110 last_instance = layer_data.second->instance;
111 if (object.second->handle == device_handle) return false;
112 }
113 }
114
115 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(last_instance), layer_data_map);
116 return log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device_handle,
Mark Lobodzinski88529492018-04-01 10:38:15 -0600117 invalid_handle_code, "Invalid Device Object 0x%" PRIxLEAST64 ".", device_handle);
Mark Lobodzinski9bd81192017-11-13 09:38:23 -0700118}
119
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600120void AllocateCommandBuffer(VkDevice device, const VkCommandPool command_pool, const VkCommandBuffer command_buffer,
121 VkCommandBufferLevel level) {
122 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
123
124 log_msg(device_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Dave Houltonb817a872018-06-26 13:22:01 -0600125 HandleToUint64(command_buffer), kVUID_ObjectTracker_Info, "OBJ[0x%" PRIxLEAST64 "] : CREATE %s object 0x%" PRIxLEAST64,
Mark Lobodzinskib1fd9d12018-03-30 14:26:00 -0600126 object_track_index++, "VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT", HandleToUint64(command_buffer));
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600127
Mark Lobodzinskiefc64392017-07-18 13:15:47 -0600128 ObjTrackState *pNewObjNode = new ObjTrackState;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600129 pNewObjNode->object_type = kVulkanObjectTypeCommandBuffer;
130 pNewObjNode->handle = HandleToUint64(command_buffer);
131 pNewObjNode->parent_object = HandleToUint64(command_pool);
132 if (level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
133 pNewObjNode->status = OBJSTATUS_COMMAND_BUFFER_SECONDARY;
134 } else {
135 pNewObjNode->status = OBJSTATUS_NONE;
136 }
137 device_data->object_map[kVulkanObjectTypeCommandBuffer][HandleToUint64(command_buffer)] = pNewObjNode;
138 device_data->num_objects[kVulkanObjectTypeCommandBuffer]++;
139 device_data->num_total_objects++;
140}
141
142bool ValidateCommandBuffer(VkDevice device, VkCommandPool command_pool, VkCommandBuffer command_buffer) {
143 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
144 bool skip = false;
145 uint64_t object_handle = HandleToUint64(command_buffer);
146 if (device_data->object_map[kVulkanObjectTypeCommandBuffer].find(object_handle) !=
147 device_data->object_map[kVulkanObjectTypeCommandBuffer].end()) {
Mark Lobodzinskiefc64392017-07-18 13:15:47 -0600148 ObjTrackState *pNode = device_data->object_map[kVulkanObjectTypeCommandBuffer][HandleToUint64(command_buffer)];
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600149
150 if (pNode->parent_object != HandleToUint64(command_pool)) {
151 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Dave Houlton57ae22f2018-05-18 16:20:52 -0600152 object_handle, "VUID-vkFreeCommandBuffers-pCommandBuffers-parent",
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600153 "FreeCommandBuffers is attempting to free Command Buffer 0x%" PRIxLEAST64
Mark Lobodzinski487a0d12018-03-30 10:09:03 -0600154 " belonging to Command Pool 0x%" PRIxLEAST64 " from pool 0x%" PRIxLEAST64 ").",
155 HandleToUint64(command_buffer), pNode->parent_object, HandleToUint64(command_pool));
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600156 }
157 } else {
Mark Lobodzinski487a0d12018-03-30 10:09:03 -0600158 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Dave Houlton57ae22f2018-05-18 16:20:52 -0600159 object_handle, "VUID-vkFreeCommandBuffers-pCommandBuffers-00048", "Invalid %s Object 0x%" PRIxLEAST64 ".",
Mark Lobodzinski487a0d12018-03-30 10:09:03 -0600160 object_string[kVulkanObjectTypeCommandBuffer], object_handle);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600161 }
162 return skip;
163}
164
165void AllocateDescriptorSet(VkDevice device, VkDescriptorPool descriptor_pool, VkDescriptorSet descriptor_set) {
166 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
167
168 log_msg(device_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
Dave Houltonb817a872018-06-26 13:22:01 -0600169 HandleToUint64(descriptor_set), kVUID_ObjectTracker_Info, "OBJ[0x%" PRIxLEAST64 "] : CREATE %s object 0x%" PRIxLEAST64,
Mark Lobodzinskib1fd9d12018-03-30 14:26:00 -0600170 object_track_index++, "VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT", HandleToUint64(descriptor_set));
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600171
Mark Lobodzinskiefc64392017-07-18 13:15:47 -0600172 ObjTrackState *pNewObjNode = new ObjTrackState;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600173 pNewObjNode->object_type = kVulkanObjectTypeDescriptorSet;
174 pNewObjNode->status = OBJSTATUS_NONE;
175 pNewObjNode->handle = HandleToUint64(descriptor_set);
176 pNewObjNode->parent_object = HandleToUint64(descriptor_pool);
177 device_data->object_map[kVulkanObjectTypeDescriptorSet][HandleToUint64(descriptor_set)] = pNewObjNode;
178 device_data->num_objects[kVulkanObjectTypeDescriptorSet]++;
179 device_data->num_total_objects++;
180}
181
182bool ValidateDescriptorSet(VkDevice device, VkDescriptorPool descriptor_pool, VkDescriptorSet descriptor_set) {
183 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
184 bool skip = false;
185 uint64_t object_handle = HandleToUint64(descriptor_set);
186 auto dsItem = device_data->object_map[kVulkanObjectTypeDescriptorSet].find(object_handle);
187 if (dsItem != device_data->object_map[kVulkanObjectTypeDescriptorSet].end()) {
Mark Lobodzinskiefc64392017-07-18 13:15:47 -0600188 ObjTrackState *pNode = dsItem->second;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600189
190 if (pNode->parent_object != HandleToUint64(descriptor_pool)) {
191 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
Dave Houlton57ae22f2018-05-18 16:20:52 -0600192 object_handle, "VUID-vkFreeDescriptorSets-pDescriptorSets-parent",
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600193 "FreeDescriptorSets is attempting to free descriptorSet 0x%" PRIxLEAST64
Mark Lobodzinski487a0d12018-03-30 10:09:03 -0600194 " belonging to Descriptor Pool 0x%" PRIxLEAST64 " from pool 0x%" PRIxLEAST64 ").",
195 HandleToUint64(descriptor_set), pNode->parent_object, HandleToUint64(descriptor_pool));
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600196 }
197 } else {
Mark Lobodzinski487a0d12018-03-30 10:09:03 -0600198 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
Dave Houlton57ae22f2018-05-18 16:20:52 -0600199 object_handle, "VUID-vkFreeDescriptorSets-pDescriptorSets-00310", "Invalid %s Object 0x%" PRIxLEAST64 ".",
Mark Lobodzinski487a0d12018-03-30 10:09:03 -0600200 object_string[kVulkanObjectTypeDescriptorSet], object_handle);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600201 }
202 return skip;
203}
204
Dave Houltona9df0ce2018-02-07 10:51:23 -0700205template <typename DispObj>
Chris Forbes2c600e92017-10-20 11:13:20 -0700206static bool ValidateDescriptorWrite(DispObj disp, VkWriteDescriptorSet const *desc, bool isPush) {
207 bool skip = false;
208
209 if (!isPush && desc->dstSet) {
Dave Houlton57ae22f2018-05-18 16:20:52 -0600210 skip |= ValidateObject(disp, desc->dstSet, kVulkanObjectTypeDescriptorSet, false, "VUID-VkWriteDescriptorSet-dstSet-00320",
211 "VUID-VkWriteDescriptorSet-commonparent");
Chris Forbes2c600e92017-10-20 11:13:20 -0700212 }
213
214 if ((desc->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) ||
215 (desc->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)) {
216 for (uint32_t idx2 = 0; idx2 < desc->descriptorCount; ++idx2) {
Dave Houltona9df0ce2018-02-07 10:51:23 -0700217 skip |= ValidateObject(disp, desc->pTexelBufferView[idx2], kVulkanObjectTypeBufferView, false,
Dave Houlton57ae22f2018-05-18 16:20:52 -0600218 "VUID-VkWriteDescriptorSet-descriptorType-00323", "VUID-VkWriteDescriptorSet-commonparent");
Chris Forbes2c600e92017-10-20 11:13:20 -0700219 }
220 }
221
222 if ((desc->descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) ||
Dave Houltona9df0ce2018-02-07 10:51:23 -0700223 (desc->descriptorType == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE) || (desc->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) ||
Chris Forbes2c600e92017-10-20 11:13:20 -0700224 (desc->descriptorType == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT)) {
225 for (uint32_t idx3 = 0; idx3 < desc->descriptorCount; ++idx3) {
Dave Houltona9df0ce2018-02-07 10:51:23 -0700226 skip |= ValidateObject(disp, desc->pImageInfo[idx3].imageView, kVulkanObjectTypeImageView, false,
Dave Houlton57ae22f2018-05-18 16:20:52 -0600227 "VUID-VkWriteDescriptorSet-descriptorType-00326", "VUID-VkDescriptorImageInfo-commonparent");
Chris Forbes2c600e92017-10-20 11:13:20 -0700228 }
229 }
230
231 if ((desc->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) ||
232 (desc->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER) ||
233 (desc->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ||
234 (desc->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
235 for (uint32_t idx4 = 0; idx4 < desc->descriptorCount; ++idx4) {
236 if (desc->pBufferInfo[idx4].buffer) {
Dave Houltona9df0ce2018-02-07 10:51:23 -0700237 skip |= ValidateObject(disp, desc->pBufferInfo[idx4].buffer, kVulkanObjectTypeBuffer, false,
Dave Houlton57ae22f2018-05-18 16:20:52 -0600238 "VUID-VkDescriptorBufferInfo-buffer-parameter", kVUIDUndefined);
Chris Forbes2c600e92017-10-20 11:13:20 -0700239 }
240 }
241 }
242
243 return skip;
244}
245
Tony Barbour2fd0c2c2017-08-08 12:51:33 -0600246VKAPI_ATTR void VKAPI_CALL CmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
247 VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount,
248 const VkWriteDescriptorSet *pDescriptorWrites) {
249 bool skip = false;
250 {
251 std::lock_guard<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -0600252 skip |=
253 ValidateObject(commandBuffer, commandBuffer, kVulkanObjectTypeCommandBuffer, false,
254 "VUID-vkCmdPushDescriptorSetKHR-commandBuffer-parameter", "VUID-vkCmdPushDescriptorSetKHR-commonparent");
255 skip |= ValidateObject(commandBuffer, layout, kVulkanObjectTypePipelineLayout, false,
256 "VUID-vkCmdPushDescriptorSetKHR-layout-parameter", "VUID-vkCmdPushDescriptorSetKHR-commonparent");
Tony Barbour2fd0c2c2017-08-08 12:51:33 -0600257 if (pDescriptorWrites) {
258 for (uint32_t index0 = 0; index0 < descriptorWriteCount; ++index0) {
Chris Forbesa94b60b2017-10-20 11:28:02 -0700259 skip |= ValidateDescriptorWrite(commandBuffer, &pDescriptorWrites[index0], true);
Tony Barbour2fd0c2c2017-08-08 12:51:33 -0600260 }
261 }
262 }
263 if (skip) return;
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600264 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
265 device_data->device_dispatch_table.CmdPushDescriptorSetKHR(commandBuffer, pipelineBindPoint, layout, set, descriptorWriteCount,
266 pDescriptorWrites);
Tony Barbour2fd0c2c2017-08-08 12:51:33 -0600267}
268
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600269void CreateQueue(VkDevice device, VkQueue vkObj) {
270 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
271
272 log_msg(device_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
Dave Houltonb817a872018-06-26 13:22:01 -0600273 HandleToUint64(vkObj), kVUID_ObjectTracker_Info, "OBJ[0x%" PRIxLEAST64 "] : CREATE %s object 0x%" PRIxLEAST64,
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600274 object_track_index++, "VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT", HandleToUint64(vkObj));
275
Mark Lobodzinskiefc64392017-07-18 13:15:47 -0600276 ObjTrackState *p_obj_node = NULL;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600277 auto queue_item = device_data->object_map[kVulkanObjectTypeQueue].find(HandleToUint64(vkObj));
278 if (queue_item == device_data->object_map[kVulkanObjectTypeQueue].end()) {
Mark Lobodzinskiefc64392017-07-18 13:15:47 -0600279 p_obj_node = new ObjTrackState;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600280 device_data->object_map[kVulkanObjectTypeQueue][HandleToUint64(vkObj)] = p_obj_node;
281 device_data->num_objects[kVulkanObjectTypeQueue]++;
282 device_data->num_total_objects++;
283 } else {
284 p_obj_node = queue_item->second;
285 }
286 p_obj_node->object_type = kVulkanObjectTypeQueue;
287 p_obj_node->status = OBJSTATUS_NONE;
288 p_obj_node->handle = HandleToUint64(vkObj);
289}
290
291void CreateSwapchainImageObject(VkDevice dispatchable_object, VkImage swapchain_image, VkSwapchainKHR swapchain) {
292 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(dispatchable_object), layer_data_map);
293 log_msg(device_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
Dave Houltonb817a872018-06-26 13:22:01 -0600294 HandleToUint64(swapchain_image), kVUID_ObjectTracker_Info, "OBJ[0x%" PRIxLEAST64 "] : CREATE %s object 0x%" PRIxLEAST64,
Mark Lobodzinskib1fd9d12018-03-30 14:26:00 -0600295 object_track_index++, "SwapchainImage", HandleToUint64(swapchain_image));
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600296
Mark Lobodzinskiefc64392017-07-18 13:15:47 -0600297 ObjTrackState *pNewObjNode = new ObjTrackState;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600298 pNewObjNode->object_type = kVulkanObjectTypeImage;
299 pNewObjNode->status = OBJSTATUS_NONE;
300 pNewObjNode->handle = HandleToUint64(swapchain_image);
301 pNewObjNode->parent_object = HandleToUint64(swapchain);
302 device_data->swapchainImageMap[HandleToUint64(swapchain_image)] = pNewObjNode;
303}
304
Dave Houlton379f1422018-05-23 12:47:07 -0600305void DeviceReportUndestroyedObjects(VkDevice device, VulkanObjectType object_type, const std::string &error_code) {
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600306 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
Gabríel Arthúr Péturssonfdcb5402018-03-20 21:52:06 +0000307 for (const auto &item : device_data->object_map[object_type]) {
308 const ObjTrackState *object_info = item.second;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600309 log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[object_type], object_info->handle,
Mark Lobodzinski88529492018-04-01 10:38:15 -0600310 error_code, "OBJ ERROR : For device 0x%" PRIxLEAST64 ", %s object 0x%" PRIxLEAST64 " has not been destroyed.",
Mark Lobodzinski487a0d12018-03-30 10:09:03 -0600311 HandleToUint64(device), object_string[object_type], object_info->handle);
Gabríel Arthúr Péturssonfdcb5402018-03-20 21:52:06 +0000312 }
313}
314
315void DeviceDestroyUndestroyedObjects(VkDevice device, VulkanObjectType object_type) {
316 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
317 while (!device_data->object_map[object_type].empty()) {
318 auto item = device_data->object_map[object_type].begin();
319
320 ObjTrackState *object_info = item->second;
321 DestroyObjectSilently(device, object_info->handle, object_type);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600322 }
323}
324
325VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
326 std::unique_lock<std::mutex> lock(global_lock);
327
328 dispatch_key key = get_dispatch_key(instance);
329 layer_data *instance_data = GetLayerDataPtr(key, layer_data_map);
330
331 // Enable the temporary callback(s) here to catch cleanup issues:
Mark Young6ba8abe2017-11-09 10:37:04 -0700332 if (instance_data->num_tmp_debug_messengers > 0) {
333 layer_enable_tmp_debug_messengers(instance_data->report_data, instance_data->num_tmp_debug_messengers,
334 instance_data->tmp_messenger_create_infos, instance_data->tmp_debug_messengers);
335 }
336 if (instance_data->num_tmp_report_callbacks > 0) {
337 layer_enable_tmp_report_callbacks(instance_data->report_data, instance_data->num_tmp_report_callbacks,
338 instance_data->tmp_report_create_infos, instance_data->tmp_report_callbacks);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600339 }
340
341 // TODO: The instance handle can not be validated here. The loader will likely have to validate it.
Dave Houlton379f1422018-05-23 12:47:07 -0600342 ValidateObject(instance, instance, kVulkanObjectTypeInstance, true, "VUID-vkDestroyInstance-instance-parameter",
343 kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600344
345 // Destroy physical devices
346 for (auto iit = instance_data->object_map[kVulkanObjectTypePhysicalDevice].begin();
347 iit != instance_data->object_map[kVulkanObjectTypePhysicalDevice].end();) {
Mark Lobodzinskiefc64392017-07-18 13:15:47 -0600348 ObjTrackState *pNode = iit->second;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600349 VkPhysicalDevice physical_device = reinterpret_cast<VkPhysicalDevice>(pNode->handle);
Mark Lobodzinski9bd81192017-11-13 09:38:23 -0700350
Dave Houlton379f1422018-05-23 12:47:07 -0600351 DestroyObject(instance, physical_device, kVulkanObjectTypePhysicalDevice, nullptr, kVUIDUndefined, kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600352 iit = instance_data->object_map[kVulkanObjectTypePhysicalDevice].begin();
353 }
354
Mark Lobodzinski9bd81192017-11-13 09:38:23 -0700355 // Destroy child devices
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600356 for (auto iit = instance_data->object_map[kVulkanObjectTypeDevice].begin();
357 iit != instance_data->object_map[kVulkanObjectTypeDevice].end();) {
Mark Lobodzinskiefc64392017-07-18 13:15:47 -0600358 ObjTrackState *pNode = iit->second;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600359
360 VkDevice device = reinterpret_cast<VkDevice>(pNode->handle);
361 VkDebugReportObjectTypeEXT debug_object_type = get_debug_report_enum[pNode->object_type];
362
Dave Houltonb817a872018-06-26 13:22:01 -0600363 log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, debug_object_type, pNode->handle,
364 kVUID_ObjectTracker_ObjectLeak, "OBJ ERROR : %s object 0x%" PRIxLEAST64 " has not been destroyed.",
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600365 string_VkDebugReportObjectTypeEXT(debug_object_type), pNode->handle);
366
Mark Lobodzinski9bd81192017-11-13 09:38:23 -0700367 // Report any remaining objects in LL
Dave Houlton57ae22f2018-05-18 16:20:52 -0600368 ReportUndestroyedObjects(device, "VUID-vkDestroyInstance-instance-00629");
Gabríel Arthúr Péturssonfdcb5402018-03-20 21:52:06 +0000369 DestroyUndestroyedObjects(device);
Mark Lobodzinski9bd81192017-11-13 09:38:23 -0700370
Dave Houlton379f1422018-05-23 12:47:07 -0600371 DestroyObject(instance, device, kVulkanObjectTypeDevice, pAllocator, "VUID-vkDestroyInstance-instance-00630",
372 "VUID-vkDestroyInstance-instance-00631");
Mark Lobodzinski9bd81192017-11-13 09:38:23 -0700373 iit = instance_data->object_map[kVulkanObjectTypeDevice].begin();
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600374 }
Mark Lobodzinski9bd81192017-11-13 09:38:23 -0700375
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600376 instance_data->object_map[kVulkanObjectTypeDevice].clear();
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600377 instance_data->instance_dispatch_table.DestroyInstance(instance, pAllocator);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600378
379 // Disable and cleanup the temporary callback(s):
Mark Young6ba8abe2017-11-09 10:37:04 -0700380 layer_disable_tmp_debug_messengers(instance_data->report_data, instance_data->num_tmp_debug_messengers,
381 instance_data->tmp_debug_messengers);
382 layer_disable_tmp_report_callbacks(instance_data->report_data, instance_data->num_tmp_report_callbacks,
383 instance_data->tmp_report_callbacks);
384 if (instance_data->num_tmp_debug_messengers > 0) {
385 layer_free_tmp_debug_messengers(instance_data->tmp_messenger_create_infos, instance_data->tmp_debug_messengers);
386 instance_data->num_tmp_debug_messengers = 0;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600387 }
Mark Young6ba8abe2017-11-09 10:37:04 -0700388 if (instance_data->num_tmp_report_callbacks > 0) {
389 layer_free_tmp_report_callbacks(instance_data->tmp_report_create_infos, instance_data->tmp_report_callbacks);
390 instance_data->num_tmp_report_callbacks = 0;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600391 }
392
393 // Clean up logging callback, if any
Mark Young6ba8abe2017-11-09 10:37:04 -0700394 while (instance_data->logging_messenger.size() > 0) {
395 VkDebugUtilsMessengerEXT messenger = instance_data->logging_messenger.back();
396 layer_destroy_messenger_callback(instance_data->report_data, messenger, pAllocator);
397 instance_data->logging_messenger.pop_back();
398 }
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600399 while (instance_data->logging_callback.size() > 0) {
400 VkDebugReportCallbackEXT callback = instance_data->logging_callback.back();
Mark Young6ba8abe2017-11-09 10:37:04 -0700401 layer_destroy_report_callback(instance_data->report_data, callback, pAllocator);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600402 instance_data->logging_callback.pop_back();
403 }
404
Dave Houlton379f1422018-05-23 12:47:07 -0600405 DestroyObject(instance, instance, kVulkanObjectTypeInstance, pAllocator, "VUID-vkDestroyInstance-instance-00630",
406 "VUID-vkDestroyInstance-instance-00631");
Gabríel Arthúr Pétursson3de74ca2018-03-18 01:50:54 +0000407
Mark Young6ba8abe2017-11-09 10:37:04 -0700408 layer_debug_utils_destroy_instance(instance_data->report_data);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600409 FreeLayerDataPtr(key, layer_data_map);
410
411 lock.unlock();
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600412}
413
414VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
415 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski9bd81192017-11-13 09:38:23 -0700416 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
Dave Houlton57ae22f2018-05-18 16:20:52 -0600417 ValidateObject(device, device, kVulkanObjectTypeDevice, true, "VUID-vkDestroyDevice-device-parameter", kVUIDUndefined);
418 DestroyObject(device_data->instance, device, kVulkanObjectTypeDevice, pAllocator, "VUID-vkDestroyDevice-device-00379",
419 "VUID-vkDestroyDevice-device-00380");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600420
421 // Report any remaining objects associated with this VkDevice object in LL
Dave Houlton57ae22f2018-05-18 16:20:52 -0600422 ReportUndestroyedObjects(device, "VUID-vkDestroyDevice-device-00378");
Gabríel Arthúr Péturssonfdcb5402018-03-20 21:52:06 +0000423 DestroyUndestroyedObjects(device);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600424
425 // Clean up Queue's MemRef Linked Lists
426 DestroyQueueDataStructures(device);
427
428 lock.unlock();
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600429 dispatch_key key = get_dispatch_key(device);
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600430 device_data->device_dispatch_table.DestroyDevice(device, pAllocator);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600431 FreeLayerDataPtr(key, layer_data_map);
432}
433
Mark Lobodzinski439645a2017-07-19 15:18:15 -0600434VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) {
435 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton57ae22f2018-05-18 16:20:52 -0600436 ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkGetDeviceQueue-device-parameter", kVUIDUndefined);
Mark Lobodzinski439645a2017-07-19 15:18:15 -0600437 lock.unlock();
438
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600439 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
440 device_data->device_dispatch_table.GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
Mark Lobodzinski439645a2017-07-19 15:18:15 -0600441
442 lock.lock();
443 CreateQueue(device, *pQueue);
444 AddQueueInfo(device, queueFamilyIndex, *pQueue);
445}
446
Yiwei Zhang991d88d2018-02-14 14:39:46 -0800447VKAPI_ATTR void VKAPI_CALL GetDeviceQueue2(VkDevice device, const VkDeviceQueueInfo2 *pQueueInfo, VkQueue *pQueue) {
448 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton57ae22f2018-05-18 16:20:52 -0600449 ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkGetDeviceQueue2-device-parameter", kVUIDUndefined);
Yiwei Zhang991d88d2018-02-14 14:39:46 -0800450 lock.unlock();
451
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600452 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
453 device_data->device_dispatch_table.GetDeviceQueue2(device, pQueueInfo, pQueue);
Yiwei Zhang991d88d2018-02-14 14:39:46 -0800454
455 lock.lock();
456 if (*pQueue != VK_NULL_HANDLE) {
457 CreateQueue(device, *pQueue);
458 AddQueueInfo(device, pQueueInfo->queueFamilyIndex, *pQueue);
459 }
460}
461
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600462VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
463 const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
464 const VkCopyDescriptorSet *pDescriptorCopies) {
465 bool skip = false;
466 {
467 std::lock_guard<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -0600468 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkUpdateDescriptorSets-device-parameter",
469 kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600470 if (pDescriptorCopies) {
471 for (uint32_t idx0 = 0; idx0 < descriptorCopyCount; ++idx0) {
472 if (pDescriptorCopies[idx0].dstSet) {
473 skip |= ValidateObject(device, pDescriptorCopies[idx0].dstSet, kVulkanObjectTypeDescriptorSet, false,
Dave Houlton57ae22f2018-05-18 16:20:52 -0600474 "VUID-VkCopyDescriptorSet-dstSet-parameter", "VUID-VkCopyDescriptorSet-commonparent");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600475 }
476 if (pDescriptorCopies[idx0].srcSet) {
477 skip |= ValidateObject(device, pDescriptorCopies[idx0].srcSet, kVulkanObjectTypeDescriptorSet, false,
Dave Houlton57ae22f2018-05-18 16:20:52 -0600478 "VUID-VkCopyDescriptorSet-srcSet-parameter", "VUID-VkCopyDescriptorSet-commonparent");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600479 }
480 }
481 }
482 if (pDescriptorWrites) {
483 for (uint32_t idx1 = 0; idx1 < descriptorWriteCount; ++idx1) {
Chris Forbes2c600e92017-10-20 11:13:20 -0700484 skip |= ValidateDescriptorWrite(device, &pDescriptorWrites[idx1], false);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600485 }
486 }
487 }
488 if (skip) {
489 return;
490 }
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600491 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
492 device_data->device_dispatch_table.UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
493 pDescriptorCopies);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600494}
495
Mark Lobodzinski2d26c5f2017-07-19 12:37:04 -0600496VKAPI_ATTR VkResult VKAPI_CALL CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount,
497 const VkComputePipelineCreateInfo *pCreateInfos,
498 const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
499 bool skip = VK_FALSE;
500 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -0600501 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkCreateComputePipelines-device-parameter",
502 kVUIDUndefined);
Mark Lobodzinski2d26c5f2017-07-19 12:37:04 -0600503 if (pCreateInfos) {
504 for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) {
505 if (pCreateInfos[idx0].basePipelineHandle) {
Dave Houlton379f1422018-05-23 12:47:07 -0600506 skip |=
507 ValidateObject(device, pCreateInfos[idx0].basePipelineHandle, kVulkanObjectTypePipeline, true,
508 "VUID-VkComputePipelineCreateInfo-flags-00697", "VUID-VkComputePipelineCreateInfo-commonparent");
Mark Lobodzinski2d26c5f2017-07-19 12:37:04 -0600509 }
510 if (pCreateInfos[idx0].layout) {
511 skip |= ValidateObject(device, pCreateInfos[idx0].layout, kVulkanObjectTypePipelineLayout, false,
Dave Houlton379f1422018-05-23 12:47:07 -0600512 "VUID-VkComputePipelineCreateInfo-layout-parameter",
513 "VUID-VkComputePipelineCreateInfo-commonparent");
Mark Lobodzinski2d26c5f2017-07-19 12:37:04 -0600514 }
515 if (pCreateInfos[idx0].stage.module) {
516 skip |= ValidateObject(device, pCreateInfos[idx0].stage.module, kVulkanObjectTypeShaderModule, false,
Dave Houlton57ae22f2018-05-18 16:20:52 -0600517 "VUID-VkPipelineShaderStageCreateInfo-module-parameter", kVUIDUndefined);
Mark Lobodzinski2d26c5f2017-07-19 12:37:04 -0600518 }
519 }
520 }
521 if (pipelineCache) {
Dave Houlton379f1422018-05-23 12:47:07 -0600522 skip |= ValidateObject(device, pipelineCache, kVulkanObjectTypePipelineCache, true,
523 "VUID-vkCreateComputePipelines-pipelineCache-parameter",
Dave Houlton57ae22f2018-05-18 16:20:52 -0600524 "VUID-vkCreateComputePipelines-pipelineCache-parent");
Mark Lobodzinski2d26c5f2017-07-19 12:37:04 -0600525 }
526 lock.unlock();
527 if (skip) {
528 for (uint32_t i = 0; i < createInfoCount; i++) {
529 pPipelines[i] = VK_NULL_HANDLE;
530 }
531 return VK_ERROR_VALIDATION_FAILED_EXT;
532 }
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600533 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
534 VkResult result = device_data->device_dispatch_table.CreateComputePipelines(device, pipelineCache, createInfoCount,
535 pCreateInfos, pAllocator, pPipelines);
536
Mark Lobodzinski2d26c5f2017-07-19 12:37:04 -0600537 lock.lock();
538 for (uint32_t idx1 = 0; idx1 < createInfoCount; ++idx1) {
539 if (pPipelines[idx1] != VK_NULL_HANDLE) {
540 CreateObject(device, pPipelines[idx1], kVulkanObjectTypePipeline, pAllocator);
541 }
542 }
543 lock.unlock();
544 return result;
545}
546
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600547VKAPI_ATTR VkResult VKAPI_CALL ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
548 VkDescriptorPoolResetFlags flags) {
549 bool skip = false;
550 std::unique_lock<std::mutex> lock(global_lock);
551 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
Dave Houlton379f1422018-05-23 12:47:07 -0600552 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkResetDescriptorPool-device-parameter",
553 kVUIDUndefined);
554 skip |=
555 ValidateObject(device, descriptorPool, kVulkanObjectTypeDescriptorPool, false,
556 "VUID-vkResetDescriptorPool-descriptorPool-parameter", "VUID-vkResetDescriptorPool-descriptorPool-parent");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600557 if (skip) {
558 return VK_ERROR_VALIDATION_FAILED_EXT;
559 }
560 // A DescriptorPool's descriptor sets are implicitly deleted when the pool is reset.
561 // Remove this pool's descriptor sets from our descriptorSet map.
562 auto itr = device_data->object_map[kVulkanObjectTypeDescriptorSet].begin();
563 while (itr != device_data->object_map[kVulkanObjectTypeDescriptorSet].end()) {
Mark Lobodzinskiefc64392017-07-18 13:15:47 -0600564 ObjTrackState *pNode = (*itr).second;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600565 auto del_itr = itr++;
566 if (pNode->parent_object == HandleToUint64(descriptorPool)) {
Dave Houlton379f1422018-05-23 12:47:07 -0600567 DestroyObject(device, (VkDescriptorSet)((*del_itr).first), kVulkanObjectTypeDescriptorSet, nullptr, kVUIDUndefined,
568 kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600569 }
570 }
571 lock.unlock();
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600572 VkResult result = device_data->device_dispatch_table.ResetDescriptorPool(device, descriptorPool, flags);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600573 return result;
574}
575
576VKAPI_ATTR VkResult VKAPI_CALL BeginCommandBuffer(VkCommandBuffer command_buffer, const VkCommandBufferBeginInfo *begin_info) {
577 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(command_buffer), layer_data_map);
578 bool skip = false;
579 {
580 std::lock_guard<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -0600581 skip |= ValidateObject(command_buffer, command_buffer, kVulkanObjectTypeCommandBuffer, false,
582 "VUID-vkBeginCommandBuffer-commandBuffer-parameter", kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600583 if (begin_info) {
Mark Lobodzinskiefc64392017-07-18 13:15:47 -0600584 ObjTrackState *pNode = device_data->object_map[kVulkanObjectTypeCommandBuffer][HandleToUint64(command_buffer)];
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600585 if ((begin_info->pInheritanceInfo) && (pNode->status & OBJSTATUS_COMMAND_BUFFER_SECONDARY) &&
586 (begin_info->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
Dave Houlton379f1422018-05-23 12:47:07 -0600587 skip |=
588 ValidateObject(command_buffer, begin_info->pInheritanceInfo->framebuffer, kVulkanObjectTypeFramebuffer, true,
589 "VUID-VkCommandBufferBeginInfo-flags-00055", "VUID-VkCommandBufferInheritanceInfo-commonparent");
590 skip |=
591 ValidateObject(command_buffer, begin_info->pInheritanceInfo->renderPass, kVulkanObjectTypeRenderPass, false,
592 "VUID-VkCommandBufferBeginInfo-flags-00053", "VUID-VkCommandBufferInheritanceInfo-commonparent");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600593 }
594 }
595 }
596 if (skip) {
597 return VK_ERROR_VALIDATION_FAILED_EXT;
598 }
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600599 VkResult result = device_data->device_dispatch_table.BeginCommandBuffer(command_buffer, begin_info);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600600 return result;
601}
602
603VKAPI_ATTR VkResult VKAPI_CALL CreateDebugReportCallbackEXT(VkInstance instance,
604 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
605 const VkAllocationCallbacks *pAllocator,
606 VkDebugReportCallbackEXT *pCallback) {
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600607 auto instance_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
608 VkResult result =
609 instance_data->instance_dispatch_table.CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pCallback);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600610 if (VK_SUCCESS == result) {
Mark Young6ba8abe2017-11-09 10:37:04 -0700611 result = layer_create_report_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pCallback);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600612 CreateObject(instance, *pCallback, kVulkanObjectTypeDebugReportCallbackEXT, pAllocator);
613 }
614 return result;
615}
616
617VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT msgCallback,
618 const VkAllocationCallbacks *pAllocator) {
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600619 auto instance_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
620 instance_data->instance_dispatch_table.DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
Mark Young6ba8abe2017-11-09 10:37:04 -0700621 layer_destroy_report_callback(instance_data->report_data, msgCallback, pAllocator);
Dave Houlton379f1422018-05-23 12:47:07 -0600622 DestroyObject(instance, msgCallback, kVulkanObjectTypeDebugReportCallbackEXT, pAllocator,
623 "VUID-vkDestroyDebugReportCallbackEXT-instance-01242", "VUID-vkDestroyDebugReportCallbackEXT-instance-01243");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600624}
625
626VKAPI_ATTR void VKAPI_CALL DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags,
627 VkDebugReportObjectTypeEXT objType, uint64_t object, size_t location,
628 int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600629 auto instance_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
630 instance_data->instance_dispatch_table.DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix,
631 pMsg);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600632}
633
Mark Young6ba8abe2017-11-09 10:37:04 -0700634// VK_EXT_debug_utils commands
635VKAPI_ATTR VkResult VKAPI_CALL SetDebugUtilsObjectNameEXT(VkDevice device, const VkDebugUtilsObjectNameInfoEXT *pNameInfo) {
636 bool skip = VK_FALSE;
637 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton57ae22f2018-05-18 16:20:52 -0600638 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, kVUIDUndefined, kVUIDUndefined);
Mark Young6ba8abe2017-11-09 10:37:04 -0700639 lock.unlock();
640 if (skip) {
641 return VK_ERROR_VALIDATION_FAILED_EXT;
642 }
643 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
644 if (pNameInfo->pObjectName) {
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600645 lock.lock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700646 dev_data->report_data->debugUtilsObjectNameMap->insert(
647 std::make_pair<uint64_t, std::string>((uint64_t &&) pNameInfo->objectHandle, pNameInfo->pObjectName));
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600648 lock.unlock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700649 } else {
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600650 lock.lock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700651 dev_data->report_data->debugUtilsObjectNameMap->erase(pNameInfo->objectHandle);
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600652 lock.unlock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700653 }
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600654 VkResult result = dev_data->device_dispatch_table.SetDebugUtilsObjectNameEXT(device, pNameInfo);
Mark Young6ba8abe2017-11-09 10:37:04 -0700655 return result;
656}
657
658VKAPI_ATTR VkResult VKAPI_CALL SetDebugUtilsObjectTagEXT(VkDevice device, const VkDebugUtilsObjectTagInfoEXT *pTagInfo) {
659 bool skip = VK_FALSE;
660 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton57ae22f2018-05-18 16:20:52 -0600661 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, kVUIDUndefined, kVUIDUndefined);
Mark Young6ba8abe2017-11-09 10:37:04 -0700662 lock.unlock();
663 if (skip) {
664 return VK_ERROR_VALIDATION_FAILED_EXT;
665 }
666 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600667 VkResult result = dev_data->device_dispatch_table.SetDebugUtilsObjectTagEXT(device, pTagInfo);
Mark Young6ba8abe2017-11-09 10:37:04 -0700668 return result;
669}
670
671VKAPI_ATTR void VKAPI_CALL QueueBeginDebugUtilsLabelEXT(VkQueue queue, const VkDebugUtilsLabelEXT *pLabelInfo) {
672 bool skip = VK_FALSE;
673 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton57ae22f2018-05-18 16:20:52 -0600674 skip |= ValidateObject(queue, queue, kVulkanObjectTypeQueue, false, kVUIDUndefined, kVUIDUndefined);
Mark Young6ba8abe2017-11-09 10:37:04 -0700675 lock.unlock();
676 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
677 if (!skip) {
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600678 lock.lock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700679 BeginQueueDebugUtilsLabel(dev_data->report_data, queue, pLabelInfo);
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600680 lock.unlock();
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600681 dev_data->device_dispatch_table.QueueBeginDebugUtilsLabelEXT(queue, pLabelInfo);
Mark Young6ba8abe2017-11-09 10:37:04 -0700682 }
683}
684
685VKAPI_ATTR void VKAPI_CALL QueueEndDebugUtilsLabelEXT(VkQueue queue) {
686 bool skip = VK_FALSE;
687 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton57ae22f2018-05-18 16:20:52 -0600688 skip |= ValidateObject(queue, queue, kVulkanObjectTypeQueue, false, kVUIDUndefined, kVUIDUndefined);
Mark Young6ba8abe2017-11-09 10:37:04 -0700689 lock.unlock();
690 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
691 if (!skip) {
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600692 dev_data->device_dispatch_table.QueueEndDebugUtilsLabelEXT(queue);
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600693 lock.lock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700694 EndQueueDebugUtilsLabel(dev_data->report_data, queue);
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600695 lock.unlock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700696 }
697}
698
699VKAPI_ATTR void VKAPI_CALL QueueInsertDebugUtilsLabelEXT(VkQueue queue, const VkDebugUtilsLabelEXT *pLabelInfo) {
700 bool skip = VK_FALSE;
701 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton57ae22f2018-05-18 16:20:52 -0600702 skip |= ValidateObject(queue, queue, kVulkanObjectTypeQueue, false, kVUIDUndefined, kVUIDUndefined);
Mark Young6ba8abe2017-11-09 10:37:04 -0700703 lock.unlock();
704 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
705 if (!skip) {
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600706 lock.lock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700707 InsertQueueDebugUtilsLabel(dev_data->report_data, queue, pLabelInfo);
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600708 lock.unlock();
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600709 dev_data->device_dispatch_table.QueueInsertDebugUtilsLabelEXT(queue, pLabelInfo);
Mark Young6ba8abe2017-11-09 10:37:04 -0700710 }
711}
712
713VKAPI_ATTR void VKAPI_CALL CmdBeginDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT *pLabelInfo) {
714 bool skip = VK_FALSE;
715 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -0600716 skip |= ValidateObject(commandBuffer, commandBuffer, kVulkanObjectTypeCommandBuffer, false, kVUIDUndefined, kVUIDUndefined);
Mark Young6ba8abe2017-11-09 10:37:04 -0700717 lock.unlock();
718 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
719 if (!skip) {
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600720 lock.lock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700721 BeginCmdDebugUtilsLabel(dev_data->report_data, commandBuffer, pLabelInfo);
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600722 lock.unlock();
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600723 dev_data->device_dispatch_table.CmdBeginDebugUtilsLabelEXT(commandBuffer, pLabelInfo);
Mark Young6ba8abe2017-11-09 10:37:04 -0700724 }
725}
726
727VKAPI_ATTR void VKAPI_CALL CmdEndDebugUtilsLabelEXT(VkCommandBuffer commandBuffer) {
728 bool skip = VK_FALSE;
729 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -0600730 skip |= ValidateObject(commandBuffer, commandBuffer, kVulkanObjectTypeCommandBuffer, false, kVUIDUndefined, kVUIDUndefined);
Mark Young6ba8abe2017-11-09 10:37:04 -0700731 lock.unlock();
732 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
733 if (!skip) {
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600734 dev_data->device_dispatch_table.CmdEndDebugUtilsLabelEXT(commandBuffer);
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600735 lock.lock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700736 EndCmdDebugUtilsLabel(dev_data->report_data, commandBuffer);
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600737 lock.unlock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700738 }
739}
740
741VKAPI_ATTR void VKAPI_CALL CmdInsertDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT *pLabelInfo) {
742 bool skip = VK_FALSE;
743 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -0600744 skip |= ValidateObject(commandBuffer, commandBuffer, kVulkanObjectTypeCommandBuffer, false, kVUIDUndefined, kVUIDUndefined);
Mark Young6ba8abe2017-11-09 10:37:04 -0700745 lock.unlock();
746 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
747 if (!skip) {
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600748 lock.lock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700749 InsertCmdDebugUtilsLabel(dev_data->report_data, commandBuffer, pLabelInfo);
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600750 lock.unlock();
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600751 dev_data->device_dispatch_table.CmdInsertDebugUtilsLabelEXT(commandBuffer, pLabelInfo);
Mark Young6ba8abe2017-11-09 10:37:04 -0700752 }
753}
754
755VKAPI_ATTR VkResult VKAPI_CALL CreateDebugUtilsMessengerEXT(VkInstance instance,
756 const VkDebugUtilsMessengerCreateInfoEXT *pCreateInfo,
757 const VkAllocationCallbacks *pAllocator,
758 VkDebugUtilsMessengerEXT *pMessenger) {
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600759 auto instance_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
760 VkResult result =
761 instance_data->instance_dispatch_table.CreateDebugUtilsMessengerEXT(instance, pCreateInfo, pAllocator, pMessenger);
Mark Young6ba8abe2017-11-09 10:37:04 -0700762 if (VK_SUCCESS == result) {
Mark Young6ba8abe2017-11-09 10:37:04 -0700763 result = layer_create_messenger_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMessenger);
764 CreateObject(instance, *pMessenger, kVulkanObjectTypeDebugUtilsMessengerEXT, pAllocator);
765 }
766 return result;
767}
768
769VKAPI_ATTR void VKAPI_CALL DestroyDebugUtilsMessengerEXT(VkInstance instance, VkDebugUtilsMessengerEXT messenger,
770 const VkAllocationCallbacks *pAllocator) {
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600771 auto instance_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
772 instance_data->instance_dispatch_table.DestroyDebugUtilsMessengerEXT(instance, messenger, pAllocator);
Mark Young6ba8abe2017-11-09 10:37:04 -0700773 layer_destroy_messenger_callback(instance_data->report_data, messenger, pAllocator);
Dave Houlton379f1422018-05-23 12:47:07 -0600774 DestroyObject(instance, messenger, kVulkanObjectTypeDebugUtilsMessengerEXT, pAllocator, kVUIDUndefined, kVUIDUndefined);
Mark Young6ba8abe2017-11-09 10:37:04 -0700775}
776
777VKAPI_ATTR void VKAPI_CALL SubmitDebugUtilsMessageEXT(VkInstance instance, VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
778 VkDebugUtilsMessageTypeFlagsEXT messageTypes,
779 const VkDebugUtilsMessengerCallbackDataEXT *pCallbackData) {
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600780 auto instance_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
781 instance_data->instance_dispatch_table.SubmitDebugUtilsMessageEXT(instance, messageSeverity, messageTypes, pCallbackData);
Mark Young6ba8abe2017-11-09 10:37:04 -0700782}
783
784static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION},
785 {VK_EXT_DEBUG_UTILS_EXTENSION_NAME, VK_EXT_DEBUG_UTILS_SPEC_VERSION}};
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600786
787static const VkLayerProperties globalLayerProps = {"VK_LAYER_LUNARG_object_tracker",
788 VK_LAYER_API_VERSION, // specVersion
789 1, // implementationVersion
790 "LunarG Validation Layer"};
791
792VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
793 return util_GetLayerProperties(1, &globalLayerProps, pCount, pProperties);
794}
795
796VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
797 VkLayerProperties *pProperties) {
798 return util_GetLayerProperties(1, &globalLayerProps, pCount, pProperties);
799}
800
801VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
802 VkExtensionProperties *pProperties) {
803 if (pLayerName && !strcmp(pLayerName, globalLayerProps.layerName))
804 return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
805
806 return VK_ERROR_LAYER_NOT_PRESENT;
807}
808
809VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, const char *pLayerName,
810 uint32_t *pCount, VkExtensionProperties *pProperties) {
811 if (pLayerName && !strcmp(pLayerName, globalLayerProps.layerName))
812 return util_GetExtensionProperties(0, nullptr, pCount, pProperties);
813
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600814 auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
815 return instance_data->instance_dispatch_table.EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600816}
817
818VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCreateInfo,
819 const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
820 std::lock_guard<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -0600821 bool skip = ValidateObject(physicalDevice, physicalDevice, kVulkanObjectTypePhysicalDevice, false,
822 "VUID-vkCreateDevice-physicalDevice-parameter", kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600823 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
824
825 layer_data *phy_dev_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
826 VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
827
828 assert(chain_info->u.pLayerInfo);
829 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
830 PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
831 PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(phy_dev_data->instance, "vkCreateDevice");
832 if (fpCreateDevice == NULL) {
833 return VK_ERROR_INITIALIZATION_FAILED;
834 }
835
836 // Advance the link info for the next element on the chain
837 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
838
839 VkResult result = fpCreateDevice(physicalDevice, pCreateInfo, pAllocator, pDevice);
840 if (result != VK_SUCCESS) {
841 return result;
842 }
843
844 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
Mark Young6ba8abe2017-11-09 10:37:04 -0700845 device_data->report_data = layer_debug_utils_create_device(phy_dev_data->report_data, *pDevice);
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600846 layer_init_device_dispatch_table(*pDevice, &device_data->device_dispatch_table, fpGetDeviceProcAddr);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600847
848 // Add link back to physDev
849 device_data->physical_device = physicalDevice;
Mark Lobodzinski9bd81192017-11-13 09:38:23 -0700850 device_data->instance = phy_dev_data->instance;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600851
Mark Lobodzinski9bd81192017-11-13 09:38:23 -0700852 CreateObject(phy_dev_data->instance, *pDevice, kVulkanObjectTypeDevice, pAllocator);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600853
854 return result;
855}
856
Mark Lobodzinski216843a2017-07-21 13:23:13 -0600857VKAPI_ATTR VkResult VKAPI_CALL GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
858 VkImage *pSwapchainImages) {
Mark Lobodzinski09fa2d42017-07-21 10:16:53 -0600859 bool skip = false;
Mark Lobodzinski216843a2017-07-21 13:23:13 -0600860 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -0600861 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkGetSwapchainImagesKHR-device-parameter",
Dave Houlton57ae22f2018-05-18 16:20:52 -0600862 kVUIDUndefined);
Dave Houlton379f1422018-05-23 12:47:07 -0600863 skip |= ValidateObject(device, swapchain, kVulkanObjectTypeSwapchainKHR, false,
864 "VUID-vkGetSwapchainImagesKHR-swapchain-parameter", kVUIDUndefined);
Mark Lobodzinski216843a2017-07-21 13:23:13 -0600865 lock.unlock();
Mark Lobodzinski09fa2d42017-07-21 10:16:53 -0600866 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
867
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600868 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
869 VkResult result =
870 device_data->device_dispatch_table.GetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages);
Mark Lobodzinski216843a2017-07-21 13:23:13 -0600871 if (pSwapchainImages != NULL) {
872 lock.lock();
873 for (uint32_t i = 0; i < *pSwapchainImageCount; i++) {
874 CreateSwapchainImageObject(device, pSwapchainImages[i], swapchain);
875 }
876 lock.unlock();
877 }
878 return result;
879}
880
Petr Kraus42f6f8d2017-12-17 17:37:33 +0100881VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
882 const VkAllocationCallbacks *pAllocator,
883 VkDescriptorSetLayout *pSetLayout) {
884 bool skip = false;
885 {
886 std::lock_guard<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -0600887 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkCreateDescriptorSetLayout-device-parameter",
888 kVUIDUndefined);
Petr Kraus42f6f8d2017-12-17 17:37:33 +0100889 if (pCreateInfo) {
890 if (pCreateInfo->pBindings) {
891 for (uint32_t binding_index = 0; binding_index < pCreateInfo->bindingCount; ++binding_index) {
892 const VkDescriptorSetLayoutBinding &binding = pCreateInfo->pBindings[binding_index];
893 const bool is_sampler_type = binding.descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER ||
894 binding.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
895 if (binding.pImmutableSamplers && is_sampler_type) {
896 for (uint32_t index2 = 0; index2 < binding.descriptorCount; ++index2) {
897 const VkSampler sampler = binding.pImmutableSamplers[index2];
Dave Houlton379f1422018-05-23 12:47:07 -0600898 skip |= ValidateObject(device, sampler, kVulkanObjectTypeSampler, false,
899 "VUID-VkDescriptorSetLayoutBinding-descriptorType-00282", kVUIDUndefined);
Petr Kraus42f6f8d2017-12-17 17:37:33 +0100900 }
901 }
902 }
903 }
904 }
905 }
906 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600907 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
908 VkResult result = device_data->device_dispatch_table.CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
Petr Kraus42f6f8d2017-12-17 17:37:33 +0100909 if (VK_SUCCESS == result) {
910 std::lock_guard<std::mutex> lock(global_lock);
911 CreateObject(device, *pSetLayout, kVulkanObjectTypeDescriptorSetLayout, pAllocator);
912 }
913 return result;
914}
915
Mark Lobodzinski88a1a662018-07-02 14:09:39 -0600916static inline bool ValidateSamplerObjects(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo) {
917 bool skip = false;
918 if (pCreateInfo->pBindings) {
919 for (uint32_t index1 = 0; index1 < pCreateInfo->bindingCount; ++index1) {
920 for (uint32_t index2 = 0; index2 < pCreateInfo->pBindings[index1].descriptorCount; ++index2) {
921 if (pCreateInfo->pBindings[index1].pImmutableSamplers) {
922 skip |=
923 ValidateObject(device, pCreateInfo->pBindings[index1].pImmutableSamplers[index2], kVulkanObjectTypeSampler,
924 true, "VUID-VkDescriptorSetLayoutBinding-descriptorType-00282", kVUIDUndefined);
925 }
926 }
927 }
928 }
929 return skip;
930}
931
Mark Lobodzinski5a1c8d22018-07-02 10:28:12 -0600932VKAPI_ATTR void VKAPI_CALL GetDescriptorSetLayoutSupport(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
933 VkDescriptorSetLayoutSupport *pSupport) {
934 bool skip = false;
935 {
936 std::lock_guard<std::mutex> lock(global_lock);
937 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false,
938 "VUID-vkGetDescriptorSetLayoutSupport-device-parameter", kVUIDUndefined);
939 if (pCreateInfo) {
Mark Lobodzinski88a1a662018-07-02 14:09:39 -0600940 skip |= ValidateSamplerObjects(device, pCreateInfo);
Mark Lobodzinski5a1c8d22018-07-02 10:28:12 -0600941 }
942 }
943 if (skip) return;
944 GetLayerDataPtr(get_dispatch_key(device), layer_data_map)
945 ->device_dispatch_table.GetDescriptorSetLayoutSupport(device, pCreateInfo, pSupport);
946}
947
948VKAPI_ATTR void VKAPI_CALL GetDescriptorSetLayoutSupportKHR(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
949 VkDescriptorSetLayoutSupport *pSupport) {
950 bool skip = false;
951 {
952 std::lock_guard<std::mutex> lock(global_lock);
953 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false,
954 "VUID-vkGetDescriptorSetLayoutSupportKHR-device-parameter", kVUIDUndefined);
955 if (pCreateInfo) {
Mark Lobodzinski88a1a662018-07-02 14:09:39 -0600956 skip |= ValidateSamplerObjects(device, pCreateInfo);
Mark Lobodzinski5a1c8d22018-07-02 10:28:12 -0600957 }
958 }
959 if (skip) return;
960 GetLayerDataPtr(get_dispatch_key(device), layer_data_map)
961 ->device_dispatch_table.GetDescriptorSetLayoutSupportKHR(device, pCreateInfo, pSupport);
962}
963
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600964VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice,
965 uint32_t *pQueueFamilyPropertyCount,
966 VkQueueFamilyProperties *pQueueFamilyProperties) {
967 bool skip = false;
968 {
969 std::lock_guard<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -0600970 skip |= ValidateObject(physicalDevice, physicalDevice, kVulkanObjectTypePhysicalDevice, false,
971 "VUID-vkGetPhysicalDeviceQueueFamilyProperties-physicalDevice-parameter", kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600972 }
973 if (skip) {
974 return;
975 }
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -0600976 auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
977 instance_data->instance_dispatch_table.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pQueueFamilyPropertyCount,
978 pQueueFamilyProperties);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600979 std::lock_guard<std::mutex> lock(global_lock);
980 if (pQueueFamilyProperties != NULL) {
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600981 if (instance_data->queue_family_properties.size() < *pQueueFamilyPropertyCount) {
982 instance_data->queue_family_properties.resize(*pQueueFamilyPropertyCount);
983 }
984 for (uint32_t i = 0; i < *pQueueFamilyPropertyCount; i++) {
985 instance_data->queue_family_properties[i] = pQueueFamilyProperties[i];
986 }
987 }
988}
989
990VKAPI_ATTR VkResult VKAPI_CALL CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
991 VkInstance *pInstance) {
992 VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
993
994 assert(chain_info->u.pLayerInfo);
995 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
996 PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
997 if (fpCreateInstance == NULL) {
998 return VK_ERROR_INITIALIZATION_FAILED;
999 }
1000
1001 // Advance the link info for the next element on the chain
1002 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
1003
1004 VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
1005 if (result != VK_SUCCESS) {
1006 return result;
1007 }
1008
1009 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(*pInstance), layer_data_map);
1010 instance_data->instance = *pInstance;
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -06001011 layer_init_instance_dispatch_table(*pInstance, &instance_data->instance_dispatch_table, fpGetInstanceProcAddr);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001012
1013 // Look for one or more debug report create info structures, and copy the
1014 // callback(s) for each one found (for use by vkDestroyInstance)
Mark Young6ba8abe2017-11-09 10:37:04 -07001015 layer_copy_tmp_debug_messengers(pCreateInfo->pNext, &instance_data->num_tmp_debug_messengers,
1016 &instance_data->tmp_messenger_create_infos, &instance_data->tmp_debug_messengers);
1017 layer_copy_tmp_report_callbacks(pCreateInfo->pNext, &instance_data->num_tmp_report_callbacks,
1018 &instance_data->tmp_report_create_infos, &instance_data->tmp_report_callbacks);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001019
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -06001020 instance_data->report_data =
1021 debug_utils_create_instance(&instance_data->instance_dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount,
1022 pCreateInfo->ppEnabledExtensionNames);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001023
1024 InitObjectTracker(instance_data, pAllocator);
1025
1026 CreateObject(*pInstance, *pInstance, kVulkanObjectTypeInstance, pAllocator);
1027
1028 return result;
1029}
1030
1031VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
1032 VkPhysicalDevice *pPhysicalDevices) {
1033 bool skip = VK_FALSE;
1034 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -06001035 skip |= ValidateObject(instance, instance, kVulkanObjectTypeInstance, false,
1036 "VUID-vkEnumeratePhysicalDevices-instance-parameter", kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001037 lock.unlock();
1038 if (skip) {
1039 return VK_ERROR_VALIDATION_FAILED_EXT;
1040 }
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -06001041 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
1042 VkResult result =
1043 instance_data->instance_dispatch_table.EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001044 lock.lock();
1045 if (result == VK_SUCCESS) {
1046 if (pPhysicalDevices) {
1047 for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
1048 CreateObject(instance, pPhysicalDevices[i], kVulkanObjectTypePhysicalDevice, nullptr);
1049 }
1050 }
1051 }
1052 lock.unlock();
1053 return result;
1054}
1055
1056VKAPI_ATTR VkResult VKAPI_CALL AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pAllocateInfo,
1057 VkCommandBuffer *pCommandBuffers) {
1058 bool skip = VK_FALSE;
1059 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -06001060 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkAllocateCommandBuffers-device-parameter",
Dave Houlton57ae22f2018-05-18 16:20:52 -06001061 kVUIDUndefined);
Dave Houlton379f1422018-05-23 12:47:07 -06001062 skip |= ValidateObject(device, pAllocateInfo->commandPool, kVulkanObjectTypeCommandPool, false,
1063 "VUID-VkCommandBufferAllocateInfo-commandPool-parameter", kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001064 lock.unlock();
1065
1066 if (skip) {
1067 return VK_ERROR_VALIDATION_FAILED_EXT;
1068 }
1069
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -06001070 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
1071 VkResult result = device_data->device_dispatch_table.AllocateCommandBuffers(device, pAllocateInfo, pCommandBuffers);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001072
1073 lock.lock();
1074 for (uint32_t i = 0; i < pAllocateInfo->commandBufferCount; i++) {
1075 AllocateCommandBuffer(device, pAllocateInfo->commandPool, pCommandBuffers[i], pAllocateInfo->level);
1076 }
1077 lock.unlock();
1078
1079 return result;
1080}
1081
1082VKAPI_ATTR VkResult VKAPI_CALL AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
1083 VkDescriptorSet *pDescriptorSets) {
1084 bool skip = VK_FALSE;
1085 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -06001086 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkAllocateDescriptorSets-device-parameter",
1087 kVUIDUndefined);
1088 skip |= ValidateObject(device, pAllocateInfo->descriptorPool, kVulkanObjectTypeDescriptorPool, false,
1089 "VUID-VkDescriptorSetAllocateInfo-descriptorPool-parameter",
Dave Houlton57ae22f2018-05-18 16:20:52 -06001090 "VUID-VkDescriptorSetAllocateInfo-commonparent");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001091 for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
1092 skip |= ValidateObject(device, pAllocateInfo->pSetLayouts[i], kVulkanObjectTypeDescriptorSetLayout, false,
Dave Houlton379f1422018-05-23 12:47:07 -06001093 "VUID-VkDescriptorSetAllocateInfo-pSetLayouts-parameter",
1094 "VUID-VkDescriptorSetAllocateInfo-commonparent");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001095 }
1096 lock.unlock();
1097 if (skip) {
1098 return VK_ERROR_VALIDATION_FAILED_EXT;
1099 }
1100
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -06001101 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
1102 VkResult result = device_data->device_dispatch_table.AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001103
1104 if (VK_SUCCESS == result) {
1105 lock.lock();
1106 for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
1107 AllocateDescriptorSet(device, pAllocateInfo->descriptorPool, pDescriptorSets[i]);
1108 }
1109 lock.unlock();
1110 }
1111
1112 return result;
1113}
1114
1115VKAPI_ATTR void VKAPI_CALL FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
1116 const VkCommandBuffer *pCommandBuffers) {
1117 bool skip = false;
1118 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton57ae22f2018-05-18 16:20:52 -06001119 ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkFreeCommandBuffers-device-parameter", kVUIDUndefined);
Dave Houlton379f1422018-05-23 12:47:07 -06001120 ValidateObject(device, commandPool, kVulkanObjectTypeCommandPool, false, "VUID-vkFreeCommandBuffers-commandPool-parameter",
1121 "VUID-vkFreeCommandBuffers-commandPool-parent");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001122 for (uint32_t i = 0; i < commandBufferCount; i++) {
1123 if (pCommandBuffers[i] != VK_NULL_HANDLE) {
1124 skip |= ValidateCommandBuffer(device, commandPool, pCommandBuffers[i]);
1125 }
1126 }
1127
1128 for (uint32_t i = 0; i < commandBufferCount; i++) {
Dave Houlton379f1422018-05-23 12:47:07 -06001129 DestroyObject(device, pCommandBuffers[i], kVulkanObjectTypeCommandBuffer, nullptr, kVUIDUndefined, kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001130 }
1131
1132 lock.unlock();
1133 if (!skip) {
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -06001134 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
1135 device_data->device_dispatch_table.FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001136 }
1137}
1138
1139VKAPI_ATTR void VKAPI_CALL DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
1140 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
1141 std::unique_lock<std::mutex> lock(global_lock);
1142 // A swapchain's images are implicitly deleted when the swapchain is deleted.
1143 // Remove this swapchain's images from our map of such images.
Mark Lobodzinskiefc64392017-07-18 13:15:47 -06001144 std::unordered_map<uint64_t, ObjTrackState *>::iterator itr = device_data->swapchainImageMap.begin();
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001145 while (itr != device_data->swapchainImageMap.end()) {
Mark Lobodzinskiefc64392017-07-18 13:15:47 -06001146 ObjTrackState *pNode = (*itr).second;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001147 if (pNode->parent_object == HandleToUint64(swapchain)) {
1148 delete pNode;
1149 auto delete_item = itr++;
1150 device_data->swapchainImageMap.erase(delete_item);
1151 } else {
1152 ++itr;
1153 }
1154 }
Dave Houlton57ae22f2018-05-18 16:20:52 -06001155 DestroyObject(device, swapchain, kVulkanObjectTypeSwapchainKHR, pAllocator, "VUID-vkDestroySwapchainKHR-swapchain-01283",
1156 "VUID-vkDestroySwapchainKHR-swapchain-01284");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001157 lock.unlock();
1158
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -06001159 device_data->device_dispatch_table.DestroySwapchainKHR(device, swapchain, pAllocator);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001160}
1161
1162VKAPI_ATTR VkResult VKAPI_CALL FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t descriptorSetCount,
1163 const VkDescriptorSet *pDescriptorSets) {
1164 bool skip = false;
1165 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
1166 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -06001167 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkFreeDescriptorSets-device-parameter",
1168 kVUIDUndefined);
1169 skip |= ValidateObject(device, descriptorPool, kVulkanObjectTypeDescriptorPool, false,
1170 "VUID-vkFreeDescriptorSets-descriptorPool-parameter", "VUID-vkFreeDescriptorSets-descriptorPool-parent");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001171 for (uint32_t i = 0; i < descriptorSetCount; i++) {
1172 if (pDescriptorSets[i] != VK_NULL_HANDLE) {
1173 skip |= ValidateDescriptorSet(device, descriptorPool, pDescriptorSets[i]);
1174 }
1175 }
1176
1177 for (uint32_t i = 0; i < descriptorSetCount; i++) {
Dave Houlton379f1422018-05-23 12:47:07 -06001178 DestroyObject(device, pDescriptorSets[i], kVulkanObjectTypeDescriptorSet, nullptr, kVUIDUndefined, kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001179 }
1180
1181 lock.unlock();
1182 if (!skip) {
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -06001183 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
1184 result = device_data->device_dispatch_table.FreeDescriptorSets(device, descriptorPool, descriptorSetCount, pDescriptorSets);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001185 }
1186 return result;
1187}
1188
1189VKAPI_ATTR void VKAPI_CALL DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
1190 const VkAllocationCallbacks *pAllocator) {
1191 bool skip = VK_FALSE;
1192 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
1193 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -06001194 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkDestroyDescriptorPool-device-parameter",
1195 kVUIDUndefined);
1196 skip |= ValidateObject(device, descriptorPool, kVulkanObjectTypeDescriptorPool, true,
1197 "VUID-vkDestroyDescriptorPool-descriptorPool-parameter",
Dave Houlton57ae22f2018-05-18 16:20:52 -06001198 "VUID-vkDestroyDescriptorPool-descriptorPool-parent");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001199 lock.unlock();
1200 if (skip) {
1201 return;
1202 }
1203 // A DescriptorPool's descriptor sets are implicitly deleted when the pool is deleted.
1204 // Remove this pool's descriptor sets from our descriptorSet map.
1205 lock.lock();
Mark Lobodzinskiefc64392017-07-18 13:15:47 -06001206 std::unordered_map<uint64_t, ObjTrackState *>::iterator itr = device_data->object_map[kVulkanObjectTypeDescriptorSet].begin();
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001207 while (itr != device_data->object_map[kVulkanObjectTypeDescriptorSet].end()) {
Mark Lobodzinskiefc64392017-07-18 13:15:47 -06001208 ObjTrackState *pNode = (*itr).second;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001209 auto del_itr = itr++;
1210 if (pNode->parent_object == HandleToUint64(descriptorPool)) {
Dave Houlton379f1422018-05-23 12:47:07 -06001211 DestroyObject(device, (VkDescriptorSet)((*del_itr).first), kVulkanObjectTypeDescriptorSet, nullptr, kVUIDUndefined,
1212 kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001213 }
1214 }
Dave Houlton379f1422018-05-23 12:47:07 -06001215 DestroyObject(device, descriptorPool, kVulkanObjectTypeDescriptorPool, pAllocator,
1216 "VUID-vkDestroyDescriptorPool-descriptorPool-00304", "VUID-vkDestroyDescriptorPool-descriptorPool-00305");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001217 lock.unlock();
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -06001218 device_data->device_dispatch_table.DestroyDescriptorPool(device, descriptorPool, pAllocator);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001219}
1220
1221VKAPI_ATTR void VKAPI_CALL DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
1222 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
1223 bool skip = false;
1224 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -06001225 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkDestroyCommandPool-device-parameter",
1226 kVUIDUndefined);
1227 skip |= ValidateObject(device, commandPool, kVulkanObjectTypeCommandPool, true,
1228 "VUID-vkDestroyCommandPool-commandPool-parameter", "VUID-vkDestroyCommandPool-commandPool-parent");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001229 lock.unlock();
1230 if (skip) {
1231 return;
1232 }
1233 lock.lock();
1234 // A CommandPool's command buffers are implicitly deleted when the pool is deleted.
1235 // Remove this pool's cmdBuffers from our cmd buffer map.
1236 auto itr = device_data->object_map[kVulkanObjectTypeCommandBuffer].begin();
1237 auto del_itr = itr;
1238 while (itr != device_data->object_map[kVulkanObjectTypeCommandBuffer].end()) {
Mark Lobodzinskiefc64392017-07-18 13:15:47 -06001239 ObjTrackState *pNode = (*itr).second;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001240 del_itr = itr++;
1241 if (pNode->parent_object == HandleToUint64(commandPool)) {
1242 skip |= ValidateCommandBuffer(device, commandPool, reinterpret_cast<VkCommandBuffer>((*del_itr).first));
1243 DestroyObject(device, reinterpret_cast<VkCommandBuffer>((*del_itr).first), kVulkanObjectTypeCommandBuffer, nullptr,
Dave Houlton57ae22f2018-05-18 16:20:52 -06001244 kVUIDUndefined, kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001245 }
1246 }
Dave Houlton57ae22f2018-05-18 16:20:52 -06001247 DestroyObject(device, commandPool, kVulkanObjectTypeCommandPool, pAllocator, "VUID-vkDestroyCommandPool-commandPool-00042",
1248 "VUID-vkDestroyCommandPool-commandPool-00043");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001249 lock.unlock();
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -06001250 device_data->device_dispatch_table.DestroyCommandPool(device, commandPool, pAllocator);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001251}
1252
Mark Lobodzinski14ddc192017-10-25 16:57:04 -06001253// Note: This is the core version of this routine. The extension version is below.
1254VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice,
1255 uint32_t *pQueueFamilyPropertyCount,
1256 VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
1257 bool skip = false;
1258 {
1259 std::lock_guard<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -06001260 skip |=
1261 ValidateObject(physicalDevice, physicalDevice, kVulkanObjectTypePhysicalDevice, false, kVUIDUndefined, kVUIDUndefined);
Mark Lobodzinski14ddc192017-10-25 16:57:04 -06001262 }
1263 if (skip) {
1264 return;
1265 }
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -06001266 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
1267 instance_data->instance_dispatch_table.GetPhysicalDeviceQueueFamilyProperties2(physicalDevice, pQueueFamilyPropertyCount,
1268 pQueueFamilyProperties);
Mark Lobodzinski14ddc192017-10-25 16:57:04 -06001269 std::lock_guard<std::mutex> lock(global_lock);
1270 if (pQueueFamilyProperties != NULL) {
Mark Lobodzinski14ddc192017-10-25 16:57:04 -06001271 if (instance_data->queue_family_properties.size() < *pQueueFamilyPropertyCount) {
1272 instance_data->queue_family_properties.resize(*pQueueFamilyPropertyCount);
1273 }
1274 for (uint32_t i = 0; i < *pQueueFamilyPropertyCount; i++) {
1275 instance_data->queue_family_properties[i] = pQueueFamilyProperties[i].queueFamilyProperties;
1276 }
1277 }
1278}
1279
1280// Note: This is the extension version of this routine. The core version is above.
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001281VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice,
1282 uint32_t *pQueueFamilyPropertyCount,
1283 VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
1284 bool skip = false;
1285 {
1286 std::lock_guard<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -06001287 skip |=
1288 ValidateObject(physicalDevice, physicalDevice, kVulkanObjectTypePhysicalDevice, false, kVUIDUndefined, kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001289 }
1290 if (skip) {
1291 return;
1292 }
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -06001293 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
1294 instance_data->instance_dispatch_table.GetPhysicalDeviceQueueFamilyProperties2KHR(physicalDevice, pQueueFamilyPropertyCount,
1295 pQueueFamilyProperties);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001296 std::lock_guard<std::mutex> lock(global_lock);
1297 if (pQueueFamilyProperties != NULL) {
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001298 if (instance_data->queue_family_properties.size() < *pQueueFamilyPropertyCount) {
1299 instance_data->queue_family_properties.resize(*pQueueFamilyPropertyCount);
1300 }
1301 for (uint32_t i = 0; i < *pQueueFamilyPropertyCount; i++) {
1302 instance_data->queue_family_properties[i] = pQueueFamilyProperties[i].queueFamilyProperties;
1303 }
1304 }
1305}
1306
Shannon McPherson9d5167f2018-05-02 15:24:37 -06001307VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceDisplayPropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
1308 VkDisplayPropertiesKHR *pProperties) {
1309 bool skip = false;
1310 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -06001311 skip |= ValidateObject(physicalDevice, physicalDevice, kVulkanObjectTypePhysicalDevice, false,
1312 "VUID-vkGetPhysicalDeviceDisplayPropertiesKHR-physicalDevice-parameter", kVUIDUndefined);
Shannon McPherson9d5167f2018-05-02 15:24:37 -06001313 lock.unlock();
1314
1315 if (skip) {
1316 return VK_ERROR_VALIDATION_FAILED_EXT;
1317 }
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -06001318 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
1319 VkResult result =
1320 instance_data->instance_dispatch_table.GetPhysicalDeviceDisplayPropertiesKHR(physicalDevice, pPropertyCount, pProperties);
Shannon McPherson9d5167f2018-05-02 15:24:37 -06001321
1322 lock.lock();
1323 if (result == VK_SUCCESS) {
1324 if (pProperties) {
1325 for (uint32_t i = 0; i < *pPropertyCount; ++i) {
1326 CreateObject(physicalDevice, pProperties[i].display, kVulkanObjectTypeDisplayKHR, nullptr);
1327 }
1328 }
1329 }
1330 lock.unlock();
1331
1332 return result;
1333}
1334
1335VKAPI_ATTR VkResult VKAPI_CALL GetDisplayModePropertiesKHR(VkPhysicalDevice physicalDevice, VkDisplayKHR display,
1336 uint32_t *pPropertyCount, VkDisplayModePropertiesKHR *pProperties) {
1337 bool skip = false;
1338 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -06001339 skip |= ValidateObject(physicalDevice, physicalDevice, kVulkanObjectTypePhysicalDevice, false,
1340 "VUID-vkGetDisplayModePropertiesKHR-physicalDevice-parameter", kVUIDUndefined);
1341 skip |= ValidateObject(physicalDevice, display, kVulkanObjectTypeDisplayKHR, false,
1342 "VUID-vkGetDisplayModePropertiesKHR-display-parameter", kVUIDUndefined);
Shannon McPherson9d5167f2018-05-02 15:24:37 -06001343 lock.unlock();
1344
1345 if (skip) {
1346 return VK_ERROR_VALIDATION_FAILED_EXT;
1347 }
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -06001348 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
1349 VkResult result =
1350 instance_data->instance_dispatch_table.GetDisplayModePropertiesKHR(physicalDevice, display, pPropertyCount, pProperties);
Shannon McPherson9d5167f2018-05-02 15:24:37 -06001351
1352 lock.lock();
1353 if (result == VK_SUCCESS) {
1354 if (pProperties) {
1355 for (uint32_t i = 0; i < *pPropertyCount; ++i) {
1356 CreateObject(physicalDevice, pProperties[i].displayMode, kVulkanObjectTypeDisplayModeKHR, nullptr);
1357 }
1358 }
1359 }
1360 lock.unlock();
1361
1362 return result;
1363}
1364
Mark Lobodzinskidfe5e172017-07-19 13:03:22 -06001365VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectNameEXT(VkDevice device, const VkDebugMarkerObjectNameInfoEXT *pNameInfo) {
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001366 bool skip = VK_FALSE;
1367 std::unique_lock<std::mutex> lock(global_lock);
1368 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
1369 if (pNameInfo->pObjectName) {
1370 dev_data->report_data->debugObjectNameMap->insert(
1371 std::make_pair<uint64_t, std::string>((uint64_t &&) pNameInfo->object, pNameInfo->pObjectName));
1372 } else {
1373 dev_data->report_data->debugObjectNameMap->erase(pNameInfo->object);
1374 }
Dave Houlton379f1422018-05-23 12:47:07 -06001375 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkDebugMarkerSetObjectNameEXT-device-parameter",
1376 kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001377 lock.unlock();
1378 if (skip) {
1379 return VK_ERROR_VALIDATION_FAILED_EXT;
1380 }
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -06001381 VkResult result = dev_data->device_dispatch_table.DebugMarkerSetObjectNameEXT(device, pNameInfo);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001382 return result;
1383}
1384
1385VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName) {
1386 assert(instance);
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -06001387 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
1388 if (instance_data->instance_dispatch_table.GetPhysicalDeviceProcAddr == NULL) {
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001389 return NULL;
1390 }
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -06001391 return instance_data->instance_dispatch_table.GetPhysicalDeviceProcAddr(instance, funcName);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001392}
1393
1394VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char *funcName) {
1395 const auto item = name_to_funcptr_map.find(funcName);
1396 if (item != name_to_funcptr_map.end()) {
1397 return reinterpret_cast<PFN_vkVoidFunction>(item->second);
1398 }
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -06001399 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
1400 if (!device_data->device_dispatch_table.GetDeviceProcAddr) return NULL;
1401 return device_data->device_dispatch_table.GetDeviceProcAddr(device, funcName);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001402}
1403
1404VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
1405 const auto item = name_to_funcptr_map.find(funcName);
1406 if (item != name_to_funcptr_map.end()) {
1407 return reinterpret_cast<PFN_vkVoidFunction>(item->second);
1408 }
Mark Lobodzinski17de5fd2018-06-22 15:09:53 -06001409 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
1410 if (!instance_data->instance_dispatch_table.GetInstanceProcAddr) return nullptr;
1411 return instance_data->instance_dispatch_table.GetInstanceProcAddr(instance, funcName);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001412}
1413
Piers Daniell16c253f2018-05-30 14:34:05 -06001414VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceDisplayProperties2KHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
1415 VkDisplayProperties2KHR *pProperties) {
1416 bool skip = false;
1417 {
1418 std::lock_guard<std::mutex> lock(global_lock);
1419 skip |=
1420 ValidateObject(physicalDevice, physicalDevice, kVulkanObjectTypePhysicalDevice, false, kVUIDUndefined, kVUIDUndefined);
1421 }
1422 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
Mark Lobodzinski72ec0d72018-06-26 08:55:40 -06001423 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
1424 VkResult result =
1425 instance_data->instance_dispatch_table.GetPhysicalDeviceDisplayProperties2KHR(physicalDevice, pPropertyCount, pProperties);
Piers Daniell16c253f2018-05-30 14:34:05 -06001426 if (pProperties && (VK_SUCCESS == result || VK_INCOMPLETE == result)) {
1427 std::lock_guard<std::mutex> lock(global_lock);
1428 for (uint32_t index = 0; index < *pPropertyCount; ++index) {
1429 CreateObject(physicalDevice, pProperties[index].displayProperties.display, kVulkanObjectTypeDisplayKHR, nullptr);
1430 }
1431 }
1432
1433 return result;
1434}
1435
1436VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex,
1437 uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) {
1438 bool skip = false;
1439 {
1440 std::lock_guard<std::mutex> lock(global_lock);
1441 skip |= ValidateObject(physicalDevice, physicalDevice, kVulkanObjectTypePhysicalDevice, false,
1442 "VUID-vkGetDisplayPlaneSupportedDisplaysKHR-physicalDevice-parameter", kVUIDUndefined);
1443 }
1444 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
Mark Lobodzinski72ec0d72018-06-26 08:55:40 -06001445 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
1446 VkResult result = instance_data->instance_dispatch_table.GetDisplayPlaneSupportedDisplaysKHR(physicalDevice, planeIndex,
1447 pDisplayCount, pDisplays);
Piers Daniell16c253f2018-05-30 14:34:05 -06001448 if (pDisplays && (VK_SUCCESS == result || VK_INCOMPLETE == result)) {
1449 std::lock_guard<std::mutex> lock(global_lock);
1450 for (uint32_t index = 0; index < *pDisplayCount; ++index) {
1451 CreateObject(physicalDevice, pDisplays[index], kVulkanObjectTypeDisplayKHR, nullptr);
1452 }
1453 }
1454
1455 return result;
1456}
1457
1458VKAPI_ATTR VkResult VKAPI_CALL GetDisplayModeProperties2KHR(VkPhysicalDevice physicalDevice, VkDisplayKHR display,
1459 uint32_t *pPropertyCount, VkDisplayModeProperties2KHR *pProperties) {
1460 bool skip = false;
1461 {
1462 std::lock_guard<std::mutex> lock(global_lock);
1463 skip |=
1464 ValidateObject(physicalDevice, physicalDevice, kVulkanObjectTypePhysicalDevice, false, kVUIDUndefined, kVUIDUndefined);
1465 skip |= ValidateObject(physicalDevice, display, kVulkanObjectTypeDisplayKHR, false, kVUIDUndefined, kVUIDUndefined);
1466 }
1467 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
Mark Lobodzinski72ec0d72018-06-26 08:55:40 -06001468 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
1469 VkResult result =
1470 instance_data->instance_dispatch_table.GetDisplayModeProperties2KHR(physicalDevice, display, pPropertyCount, pProperties);
Piers Daniell16c253f2018-05-30 14:34:05 -06001471 if (pProperties && (VK_SUCCESS == result || VK_INCOMPLETE == result)) {
1472 std::lock_guard<std::mutex> lock(global_lock);
1473 for (uint32_t index = 0; index < *pPropertyCount; ++index) {
1474 CreateObject(physicalDevice, pProperties[index].displayModeProperties.displayMode, kVulkanObjectTypeDisplayModeKHR,
1475 nullptr);
1476 }
1477 }
1478
1479 return result;
1480}
1481
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001482} // namespace object_tracker
1483
1484VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
1485 VkExtensionProperties *pProperties) {
1486 return object_tracker::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
1487}
1488
1489VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(uint32_t *pCount,
1490 VkLayerProperties *pProperties) {
1491 return object_tracker::EnumerateInstanceLayerProperties(pCount, pProperties);
1492}
1493
1494VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
1495 VkLayerProperties *pProperties) {
1496 // The layer command handles VK_NULL_HANDLE just fine internally
1497 assert(physicalDevice == VK_NULL_HANDLE);
1498 return object_tracker::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
1499}
1500
1501VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
1502 return object_tracker::GetDeviceProcAddr(dev, funcName);
1503}
1504
1505VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
1506 return object_tracker::GetInstanceProcAddr(instance, funcName);
1507}
1508
1509VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
1510 const char *pLayerName, uint32_t *pCount,
1511 VkExtensionProperties *pProperties) {
1512 // The layer command handles VK_NULL_HANDLE just fine internally
1513 assert(physicalDevice == VK_NULL_HANDLE);
1514 return object_tracker::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
1515}
1516
1517VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_layerGetPhysicalDeviceProcAddr(VkInstance instance,
1518 const char *funcName) {
1519 return object_tracker::GetPhysicalDeviceProcAddr(instance, funcName);
1520}
1521
1522VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct) {
1523 assert(pVersionStruct != NULL);
1524 assert(pVersionStruct->sType == LAYER_NEGOTIATE_INTERFACE_STRUCT);
1525
1526 // Fill in the function pointers if our version is at least capable of having the structure contain them.
1527 if (pVersionStruct->loaderLayerInterfaceVersion >= 2) {
1528 pVersionStruct->pfnGetInstanceProcAddr = vkGetInstanceProcAddr;
1529 pVersionStruct->pfnGetDeviceProcAddr = vkGetDeviceProcAddr;
1530 pVersionStruct->pfnGetPhysicalDeviceProcAddr = vk_layerGetPhysicalDeviceProcAddr;
1531 }
1532
1533 if (pVersionStruct->loaderLayerInterfaceVersion < CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
1534 object_tracker::loader_layer_if_version = pVersionStruct->loaderLayerInterfaceVersion;
1535 } else if (pVersionStruct->loaderLayerInterfaceVersion > CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
1536 pVersionStruct->loaderLayerInterfaceVersion = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
1537 }
1538
1539 return VK_SUCCESS;
1540}