blob: 7e95f2de0b8c6e4b033c8ee5cf3cda4ee76c249c [file] [log] [blame]
Mark Lobodzinski91e50bf2020-01-14 09:55:11 -07001/* Copyright (c) 2015-2020 The Khronos Group Inc.
2 * Copyright (c) 2015-2020 Valve Corporation
3 * Copyright (c) 2015-2020 LunarG, Inc.
Camdeneaa86ea2019-07-26 11:00:09 -06004 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 * Author: Camden Stocker <camden@lunarg.com>
18 */
19
Mark Lobodzinski57b8ae82020-02-20 16:37:14 -070020#include "best_practices_validation.h"
Camden5b184be2019-08-13 07:50:19 -060021#include "layer_chassis_dispatch.h"
Camden Stocker0a660ce2019-08-27 15:30:40 -060022#include "best_practices_error_enums.h"
Camden5b184be2019-08-13 07:50:19 -060023
24#include <string>
25#include <iomanip>
26
27// get the API name is proper format
Jeff Bolz46c0ea02019-10-09 13:06:29 -050028std::string BestPractices::GetAPIVersionName(uint32_t version) const {
Camden5b184be2019-08-13 07:50:19 -060029 std::stringstream version_name;
30 uint32_t major = VK_VERSION_MAJOR(version);
31 uint32_t minor = VK_VERSION_MINOR(version);
32 uint32_t patch = VK_VERSION_PATCH(version);
33
34 version_name << major << "." << minor << "." << patch << " (0x" << std::setfill('0') << std::setw(8) << std::hex << version
35 << ")";
36
37 return version_name.str();
38}
39
Attilio Provenzano19d6a982020-02-27 12:41:41 +000040struct VendorSpecificInfo {
41 bool CHECK_ENABLED::*check;
42 std::string name;
43};
44
45const std::map<BPVendorFlagBits, VendorSpecificInfo> vendor_info = {
46 {kBPVendorArm, {&CHECK_ENABLED::vendor_specific_arm, "Arm"}},
47};
48
49bool BestPractices::VendorCheckEnabled(BPVendorFlags vendors) const {
50 for (const auto& vendor : vendor_info) {
51 if (vendors & vendor.first && enabled.*(vendor.second.check)) {
52 return true;
53 }
54 }
55 return false;
56}
57
58const char* VendorSpecificTag(BPVendorFlags vendors) {
59 // Cache built vendor tags in a map
60 static std::unordered_map<BPVendorFlags, std::string> tag_map;
61
62 auto res = tag_map.find(vendors);
63 if (res == tag_map.end()) {
64 // Build the vendor tag string
65 std::stringstream vendor_tag;
66
67 vendor_tag << "[";
68 bool first_vendor = true;
69 for (const auto& vendor : vendor_info) {
70 if (vendors & vendor.first) {
71 if (!first_vendor) {
72 vendor_tag << ", ";
73 }
74 vendor_tag << vendor.second.name;
75 first_vendor = false;
76 }
77 }
78 vendor_tag << "]";
79
80 tag_map[vendors] = vendor_tag.str();
81 res = tag_map.find(vendors);
82 }
83
84 return res->second.c_str();
85}
86
Mark Lobodzinski6167e102020-02-24 17:03:55 -070087const char* DepReasonToString(ExtDeprecationReason reason) {
88 switch (reason) {
89 case kExtPromoted:
90 return "promoted to";
91 break;
92 case kExtObsoleted:
93 return "obsoleted by";
94 break;
95 case kExtDeprecated:
96 return "deprecated by";
97 break;
98 default:
99 return "";
100 break;
101 }
102}
103
104bool BestPractices::ValidateDeprecatedExtensions(const char* api_name, const char* extension_name, uint32_t version,
105 const char* vuid) const {
106 bool skip = false;
107 auto dep_info_it = deprecated_extensions.find(extension_name);
108 if (dep_info_it != deprecated_extensions.end()) {
109 auto dep_info = dep_info_it->second;
110 if ((dep_info.target.compare("VK_VERSION_1_1") && (version >= VK_VERSION_1_1)) ||
111 (dep_info.target.compare("VK_VERSION_1_2") && (version >= VK_VERSION_1_2))) {
112 skip |=
113 LogWarning(instance, vuid, "%s(): Attempting to enable deprecated extension %s, but this extension has been %s %s.",
114 api_name, extension_name, DepReasonToString(dep_info.reason), (dep_info.target).c_str());
115 } else if (!dep_info.target.find("VK_VERSION")) {
116 if (dep_info.target.length() == 0) {
117 skip |= LogWarning(instance, vuid,
118 "%s(): Attempting to enable deprecated extension %s, but this extension has been deprecated "
119 "without replacement.",
120 api_name, extension_name);
121 } else {
122 skip |= LogWarning(instance, vuid,
123 "%s(): Attempting to enable deprecated extension %s, but this extension has been %s %s.",
124 api_name, extension_name, DepReasonToString(dep_info.reason), (dep_info.target).c_str());
125 }
126 }
127 }
128 return skip;
129}
130
Camden5b184be2019-08-13 07:50:19 -0600131bool BestPractices::PreCallValidateCreateInstance(const VkInstanceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator,
Jeff Bolz5c801d12019-10-09 10:38:45 -0500132 VkInstance* pInstance) const {
Camden5b184be2019-08-13 07:50:19 -0600133 bool skip = false;
134
135 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
136 if (white_list(pCreateInfo->ppEnabledExtensionNames[i], kDeviceExtensionNames)) {
Camden Stocker11ecf512020-01-21 16:06:49 -0800137 skip |= LogWarning(instance, kVUID_BestPractices_CreateInstance_ExtensionMismatch,
Mark Lobodzinskib6e2a282020-01-29 16:03:26 -0700138 "vkCreateInstance(): Attempting to enable Device Extension %s at CreateInstance time.",
139 pCreateInfo->ppEnabledExtensionNames[i]);
Camden5b184be2019-08-13 07:50:19 -0600140 }
Mark Lobodzinski6167e102020-02-24 17:03:55 -0700141 skip |= ValidateDeprecatedExtensions("CreateInstance", pCreateInfo->ppEnabledExtensionNames[i],
142 pCreateInfo->pApplicationInfo->apiVersion,
143 kVUID_BestPractices_CreateInstance_DeprecatedExtension);
Camden5b184be2019-08-13 07:50:19 -0600144 }
145
146 return skip;
147}
148
149void BestPractices::PreCallRecordCreateInstance(const VkInstanceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator,
150 VkInstance* pInstance) {
Mark Lobodzinski97484d62020-03-03 11:57:41 -0700151 ValidationStateTracker::PreCallRecordCreateInstance(pCreateInfo, pAllocator, pInstance);
Camden5b184be2019-08-13 07:50:19 -0600152 instance_api_version = pCreateInfo->pApplicationInfo->apiVersion;
153}
154
155bool BestPractices::PreCallValidateCreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo,
Jeff Bolz5c801d12019-10-09 10:38:45 -0500156 const VkAllocationCallbacks* pAllocator, VkDevice* pDevice) const {
Camden5b184be2019-08-13 07:50:19 -0600157 bool skip = false;
158
159 // get API version of physical device passed when creating device.
160 VkPhysicalDeviceProperties physical_device_properties{};
161 DispatchGetPhysicalDeviceProperties(physicalDevice, &physical_device_properties);
Jeff Bolz46c0ea02019-10-09 13:06:29 -0500162 auto device_api_version = physical_device_properties.apiVersion;
Camden5b184be2019-08-13 07:50:19 -0600163
164 // check api versions and warn if instance api Version is higher than version on device.
165 if (instance_api_version > device_api_version) {
166 std::string inst_api_name = GetAPIVersionName(instance_api_version);
167 std::string dev_api_name = GetAPIVersionName(device_api_version);
168
Mark Lobodzinskib6e2a282020-01-29 16:03:26 -0700169 skip |= LogWarning(device, kVUID_BestPractices_CreateDevice_API_Mismatch,
170 "vkCreateDevice(): API Version of current instance, %s is higher than API Version on device, %s",
171 inst_api_name.c_str(), dev_api_name.c_str());
Camden5b184be2019-08-13 07:50:19 -0600172 }
173
174 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
175 if (white_list(pCreateInfo->ppEnabledExtensionNames[i], kInstanceExtensionNames)) {
Camden Stocker11ecf512020-01-21 16:06:49 -0800176 skip |= LogWarning(instance, kVUID_BestPractices_CreateDevice_ExtensionMismatch,
Mark Lobodzinskib6e2a282020-01-29 16:03:26 -0700177 "vkCreateDevice(): Attempting to enable Instance Extension %s at CreateDevice time.",
178 pCreateInfo->ppEnabledExtensionNames[i]);
Camden5b184be2019-08-13 07:50:19 -0600179 }
Mark Lobodzinski6167e102020-02-24 17:03:55 -0700180 skip |= ValidateDeprecatedExtensions("CreateDevice", pCreateInfo->ppEnabledExtensionNames[i], instance_api_version,
181 kVUID_BestPractices_CreateDevice_DeprecatedExtension);
Camden5b184be2019-08-13 07:50:19 -0600182 }
183
Camden83a9c372019-08-14 11:41:38 -0600184 auto pd_state = GetPhysicalDeviceState(physicalDevice);
Corta48da1d2019-09-20 18:59:07 +0200185 if ((pd_state->vkGetPhysicalDeviceFeaturesState == UNCALLED) && (pCreateInfo->pEnabledFeatures != NULL)) {
Mark Lobodzinskib6e2a282020-01-29 16:03:26 -0700186 skip |= LogWarning(device, kVUID_BestPractices_CreateDevice_PDFeaturesNotCalled,
187 "vkCreateDevice() called before getting physical device features from vkGetPhysicalDeviceFeatures().");
Camden83a9c372019-08-14 11:41:38 -0600188 }
189
Camden5b184be2019-08-13 07:50:19 -0600190 return skip;
191}
192
193bool BestPractices::PreCallValidateCreateBuffer(VkDevice device, const VkBufferCreateInfo* pCreateInfo,
Jeff Bolz5c801d12019-10-09 10:38:45 -0500194 const VkAllocationCallbacks* pAllocator, VkBuffer* pBuffer) const {
Camden5b184be2019-08-13 07:50:19 -0600195 bool skip = false;
196
197 if ((pCreateInfo->queueFamilyIndexCount > 1) && (pCreateInfo->sharingMode == VK_SHARING_MODE_EXCLUSIVE)) {
198 std::stringstream bufferHex;
199 bufferHex << "0x" << std::hex << HandleToUint64(pBuffer);
200
Mark Lobodzinskib6e2a282020-01-29 16:03:26 -0700201 skip |= LogWarning(
202 device, kVUID_BestPractices_SharingModeExclusive,
203 "Warning: Buffer (%s) specifies a sharing mode of VK_SHARING_MODE_EXCLUSIVE while specifying multiple queues "
204 "(queueFamilyIndexCount of %" PRIu32 ").",
205 bufferHex.str().c_str(), pCreateInfo->queueFamilyIndexCount);
Camden5b184be2019-08-13 07:50:19 -0600206 }
207
208 return skip;
209}
210
211bool BestPractices::PreCallValidateCreateImage(VkDevice device, const VkImageCreateInfo* pCreateInfo,
Jeff Bolz5c801d12019-10-09 10:38:45 -0500212 const VkAllocationCallbacks* pAllocator, VkImage* pImage) const {
Camden5b184be2019-08-13 07:50:19 -0600213 bool skip = false;
214
215 if ((pCreateInfo->queueFamilyIndexCount > 1) && (pCreateInfo->sharingMode == VK_SHARING_MODE_EXCLUSIVE)) {
216 std::stringstream imageHex;
217 imageHex << "0x" << std::hex << HandleToUint64(pImage);
218
219 skip |=
Mark Lobodzinskib6e2a282020-01-29 16:03:26 -0700220 LogWarning(device, kVUID_BestPractices_SharingModeExclusive,
221 "Warning: Image (%s) specifies a sharing mode of VK_SHARING_MODE_EXCLUSIVE while specifying multiple queues "
222 "(queueFamilyIndexCount of %" PRIu32 ").",
223 imageHex.str().c_str(), pCreateInfo->queueFamilyIndexCount);
Camden5b184be2019-08-13 07:50:19 -0600224 }
225
Attilio Provenzano02859b22020-02-27 14:17:28 +0000226 if (VendorCheckEnabled(kBPVendorArm)) {
227 if (pCreateInfo->samples > kMaxEfficientSamplesArm) {
228 skip |= LogPerformanceWarning(
229 device, kVUID_BestPractices_CreateImage_TooLargeSampleCount,
230 "%s vkCreateImage(): Trying to create an image with %u samples. "
231 "The hardware revision may not have full throughput for framebuffers with more than %u samples.",
232 VendorSpecificTag(kBPVendorArm), static_cast<uint32_t>(pCreateInfo->samples), kMaxEfficientSamplesArm);
233 }
234
235 if (pCreateInfo->samples > VK_SAMPLE_COUNT_1_BIT && !(pCreateInfo->usage & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT)) {
236 skip |= LogPerformanceWarning(
237 device, kVUID_BestPractices_CreateImage_NonTransientMSImage,
238 "%s vkCreateImage(): Trying to create a multisampled image, but createInfo.usage did not have "
239 "VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT set. Multisampled images may be resolved on-chip, "
240 "and do not need to be backed by physical storage. "
241 "TRANSIENT_ATTACHMENT allows tiled GPUs to not back the multisampled image with physical memory.",
242 VendorSpecificTag(kBPVendorArm));
243 }
244 }
245
Camden5b184be2019-08-13 07:50:19 -0600246 return skip;
247}
248
249bool BestPractices::PreCallValidateCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR* pCreateInfo,
Jeff Bolz5c801d12019-10-09 10:38:45 -0500250 const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchain) const {
Camden5b184be2019-08-13 07:50:19 -0600251 bool skip = false;
252
Camden83a9c372019-08-14 11:41:38 -0600253 auto physical_device_state = GetPhysicalDeviceState();
254
255 if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState == UNCALLED) {
Mark Lobodzinskib6e2a282020-01-29 16:03:26 -0700256 skip |= LogWarning(
257 device, kVUID_BestPractices_Swapchain_GetSurfaceNotCalled,
Camden83a9c372019-08-14 11:41:38 -0600258 "vkCreateSwapchainKHR() called before getting surface capabilities from vkGetPhysicalDeviceSurfaceCapabilitiesKHR().");
259 }
260
261 if (physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState != QUERY_DETAILS) {
Mark Lobodzinskib6e2a282020-01-29 16:03:26 -0700262 skip |= LogWarning(device, kVUID_BestPractices_Swapchain_GetSurfaceNotCalled,
263 "vkCreateSwapchainKHR() called before getting surface present mode(s) from "
264 "vkGetPhysicalDeviceSurfacePresentModesKHR().");
Camden83a9c372019-08-14 11:41:38 -0600265 }
266
267 if (physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState != QUERY_DETAILS) {
Mark Lobodzinskib6e2a282020-01-29 16:03:26 -0700268 skip |= LogWarning(
269 device, kVUID_BestPractices_Swapchain_GetSurfaceNotCalled,
270 "vkCreateSwapchainKHR() called before getting surface format(s) from vkGetPhysicalDeviceSurfaceFormatsKHR().");
Camden83a9c372019-08-14 11:41:38 -0600271 }
272
Camden5b184be2019-08-13 07:50:19 -0600273 if ((pCreateInfo->queueFamilyIndexCount > 1) && (pCreateInfo->imageSharingMode == VK_SHARING_MODE_EXCLUSIVE)) {
Mark Lobodzinskib6e2a282020-01-29 16:03:26 -0700274 skip |=
275 LogWarning(device, kVUID_BestPractices_SharingModeExclusive,
276 "Warning: A Swapchain is being created which specifies a sharing mode of VK_SHARING_MODE_EXCULSIVE while "
277 "specifying multiple queues (queueFamilyIndexCount of %" PRIu32 ").",
278 pCreateInfo->queueFamilyIndexCount);
Camden5b184be2019-08-13 07:50:19 -0600279 }
280
281 return skip;
282}
283
284bool BestPractices::PreCallValidateCreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
285 const VkSwapchainCreateInfoKHR* pCreateInfos,
Jeff Bolz5c801d12019-10-09 10:38:45 -0500286 const VkAllocationCallbacks* pAllocator,
287 VkSwapchainKHR* pSwapchains) const {
Camden5b184be2019-08-13 07:50:19 -0600288 bool skip = false;
289
290 for (uint32_t i = 0; i < swapchainCount; i++) {
291 if ((pCreateInfos[i].queueFamilyIndexCount > 1) && (pCreateInfos[i].imageSharingMode == VK_SHARING_MODE_EXCLUSIVE)) {
Mark Lobodzinskib6e2a282020-01-29 16:03:26 -0700292 skip |= LogWarning(
293 device, kVUID_BestPractices_SharingModeExclusive,
294 "Warning: A shared swapchain (index %" PRIu32
295 ") is being created which specifies a sharing mode of VK_SHARING_MODE_EXCLUSIVE while specifying multiple "
296 "queues (queueFamilyIndexCount of %" PRIu32 ").",
297 i, pCreateInfos[i].queueFamilyIndexCount);
Camden5b184be2019-08-13 07:50:19 -0600298 }
299 }
300
301 return skip;
302}
303
304bool BestPractices::PreCallValidateCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo* pCreateInfo,
Jeff Bolz5c801d12019-10-09 10:38:45 -0500305 const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass) const {
Camden5b184be2019-08-13 07:50:19 -0600306 bool skip = false;
307
308 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
309 VkFormat format = pCreateInfo->pAttachments[i].format;
310 if (pCreateInfo->pAttachments[i].initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
311 if ((FormatIsColor(format) || FormatHasDepth(format)) &&
312 pCreateInfo->pAttachments[i].loadOp == VK_ATTACHMENT_LOAD_OP_LOAD) {
Mark Lobodzinskib6e2a282020-01-29 16:03:26 -0700313 skip |= LogWarning(device, kVUID_BestPractices_RenderPass_Attatchment,
314 "Render pass has an attachment with loadOp == VK_ATTACHMENT_LOAD_OP_LOAD and "
315 "initialLayout == VK_IMAGE_LAYOUT_UNDEFINED. This is probably not what you "
316 "intended. Consider using VK_ATTACHMENT_LOAD_OP_DONT_CARE instead if the "
317 "image truely is undefined at the start of the render pass.");
Camden5b184be2019-08-13 07:50:19 -0600318 }
319 if (FormatHasStencil(format) && pCreateInfo->pAttachments[i].stencilLoadOp == VK_ATTACHMENT_LOAD_OP_LOAD) {
Mark Lobodzinskib6e2a282020-01-29 16:03:26 -0700320 skip |= LogWarning(device, kVUID_BestPractices_RenderPass_Attatchment,
321 "Render pass has an attachment with stencilLoadOp == VK_ATTACHMENT_LOAD_OP_LOAD "
322 "and initialLayout == VK_IMAGE_LAYOUT_UNDEFINED. This is probably not what you "
323 "intended. Consider using VK_ATTACHMENT_LOAD_OP_DONT_CARE instead if the "
324 "image truely is undefined at the start of the render pass.");
Camden5b184be2019-08-13 07:50:19 -0600325 }
326 }
Attilio Provenzano1d9a8362020-02-27 12:23:51 +0000327
328 const auto& attachment = pCreateInfo->pAttachments[i];
329 if (attachment.samples > VK_SAMPLE_COUNT_1_BIT) {
330 bool access_requires_memory =
331 attachment.loadOp == VK_ATTACHMENT_LOAD_OP_LOAD || attachment.storeOp == VK_ATTACHMENT_STORE_OP_STORE;
332
333 if (FormatHasStencil(format)) {
334 access_requires_memory |= attachment.stencilLoadOp == VK_ATTACHMENT_LOAD_OP_LOAD ||
335 attachment.stencilStoreOp == VK_ATTACHMENT_STORE_OP_STORE;
336 }
337
338 if (access_requires_memory) {
339 skip |= LogPerformanceWarning(
340 device, kVUID_BestPractices_CreateRenderPass_ImageRequiresMemory,
341 "Attachment %u in the VkRenderPass is a multisampled image with %u samples, but it uses loadOp/storeOp "
342 "which requires accessing data from memory. Multisampled images should always be loadOp = CLEAR or DONT_CARE, "
343 "storeOp = DONT_CARE. This allows the implementation to use lazily allocated memory effectively.",
344 i, static_cast<uint32_t>(attachment.samples));
345 }
346 }
Camden5b184be2019-08-13 07:50:19 -0600347 }
348
349 for (uint32_t dependency = 0; dependency < pCreateInfo->dependencyCount; dependency++) {
350 skip |= CheckPipelineStageFlags("vkCreateRenderPass", pCreateInfo->pDependencies[dependency].srcStageMask);
351 skip |= CheckPipelineStageFlags("vkCreateRenderPass", pCreateInfo->pDependencies[dependency].dstStageMask);
352 }
353
354 return skip;
355}
356
Attilio Provenzano1d9a8362020-02-27 12:23:51 +0000357bool BestPractices::PreCallValidateCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo* pCreateInfo,
358 const VkAllocationCallbacks* pAllocator, VkFramebuffer* pFramebuffer) const {
359 bool skip = false;
360
361 // Check for non-transient attachments that should be transient and vice versa
362 auto rp_state = GetRenderPassState(pCreateInfo->renderPass);
363 if (rp_state) {
364 const VkRenderPassCreateInfo2* rpci = rp_state->createInfo.ptr();
365 const VkImageView* image_views = pCreateInfo->pAttachments;
366
367 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
368 auto& attachment = rpci->pAttachments[i];
369 bool attachment_should_be_transient =
370 (attachment.loadOp != VK_ATTACHMENT_LOAD_OP_LOAD && attachment.storeOp != VK_ATTACHMENT_STORE_OP_STORE);
371
372 if (FormatHasStencil(attachment.format)) {
373 attachment_should_be_transient &= (attachment.stencilLoadOp != VK_ATTACHMENT_LOAD_OP_LOAD &&
374 attachment.stencilStoreOp != VK_ATTACHMENT_STORE_OP_STORE);
375 }
376
377 auto view_state = GetImageViewState(image_views[i]);
378 if (view_state) {
379 auto& ivci = view_state->create_info;
380 auto& ici = GetImageState(ivci.image)->createInfo;
381
382 bool image_is_transient = (ici.usage & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT) != 0;
383
384 // The check for an image that should not be transient applies to all GPUs
385 if (!attachment_should_be_transient && image_is_transient) {
386 skip |= LogPerformanceWarning(
387 device, kVUID_BestPractices_CreateFramebuffer_AttachmentShouldNotBeTransient,
388 "Attachment %u in VkFramebuffer uses loadOp/storeOps which need to access physical memory, "
389 "but the image backing the image view has VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT set. "
390 "Physical memory will need to be backed lazily to this image, potentially causing stalls.",
391 i);
392 }
393
394 bool supports_lazy = false;
395 for (uint32_t j = 0; j < phys_dev_mem_props.memoryTypeCount; j++) {
396 if (phys_dev_mem_props.memoryTypes[j].propertyFlags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) {
397 supports_lazy = true;
398 }
399 }
400
401 // The check for an image that should be transient only applies to GPUs supporting
402 // lazily allocated memory
403 if (supports_lazy && attachment_should_be_transient && !image_is_transient) {
404 skip |= LogPerformanceWarning(
405 device, kVUID_BestPractices_CreateFramebuffer_AttachmentShouldBeTransient,
406 "Attachment %u in VkFramebuffer uses loadOp/storeOps which never have to be backed by physical memory, "
407 "but the image backing the image view does not have VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT set. "
408 "You can save physical memory by using transient attachment backed by lazily allocated memory here.",
409 i);
410 }
411 }
412 }
413 }
414
415 return skip;
416}
417
Camden5b184be2019-08-13 07:50:19 -0600418bool BestPractices::PreCallValidateAllocateMemory(VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo,
Jeff Bolz5c801d12019-10-09 10:38:45 -0500419 const VkAllocationCallbacks* pAllocator, VkDeviceMemory* pMemory) const {
Camden5b184be2019-08-13 07:50:19 -0600420 bool skip = false;
421
Jeff Bolz46c0ea02019-10-09 13:06:29 -0500422 if (num_mem_objects + 1 > kMemoryObjectWarningLimit) {
Mark Lobodzinskif95a2662020-01-29 15:43:32 -0700423 skip |= LogPerformanceWarning(device, kVUID_BestPractices_AllocateMemory_TooManyObjects,
424 "Performance Warning: This app has > %" PRIu32 " memory objects.", kMemoryObjectWarningLimit);
Camden5b184be2019-08-13 07:50:19 -0600425 }
426
Attilio Provenzanof31788e2020-02-27 12:00:36 +0000427 if (pAllocateInfo->allocationSize < kMinDeviceAllocationSize) {
428 skip |= LogPerformanceWarning(
429 device, kVUID_BestPractices_AllocateMemory_SmallAllocation,
430 "vkAllocateMemory(): Allocating a VkDeviceMemory of size %llu. This is a very small allocation (current "
431 "threshold is %llu bytes). "
432 "You should make large allocations and sub-allocate from one large VkDeviceMemory.",
433 pAllocateInfo->allocationSize, kMinDeviceAllocationSize);
434 }
435
Camden83a9c372019-08-14 11:41:38 -0600436 // TODO: Insert get check for GetPhysicalDeviceMemoryProperties once the state is tracked in the StateTracker
437
438 return skip;
439}
440
Jeff Bolz46c0ea02019-10-09 13:06:29 -0500441void BestPractices::PostCallRecordAllocateMemory(VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo,
442 const VkAllocationCallbacks* pAllocator, VkDeviceMemory* pMemory,
443 VkResult result) {
Camden Stocker9738af92019-10-16 13:54:03 -0700444 ValidationStateTracker::PostCallRecordAllocateMemory(device, pAllocateInfo, pAllocator, pMemory, result);
Mark Lobodzinski205b7a02020-02-21 13:23:17 -0700445 if (result != VK_SUCCESS) {
446 static std::vector<VkResult> error_codes = {VK_ERROR_OUT_OF_HOST_MEMORY, VK_ERROR_OUT_OF_DEVICE_MEMORY,
447 VK_ERROR_TOO_MANY_OBJECTS, VK_ERROR_INVALID_EXTERNAL_HANDLE,
448 VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS_KHR};
449 static std::vector<VkResult> success_codes = {};
450 ValidateReturnCodes("vkReleaseFullScreenExclusiveModeEXT", result, error_codes, success_codes);
451 return;
452 }
453 num_mem_objects++;
454}
Camden Stocker9738af92019-10-16 13:54:03 -0700455
Mark Lobodzinski205b7a02020-02-21 13:23:17 -0700456void BestPractices::ValidateReturnCodes(const char* api_name, VkResult result, const std::vector<VkResult>& success_codes,
457 const std::vector<VkResult>& error_codes) const {
458 auto error = std::find(error_codes.begin(), error_codes.end(), result);
459 if (error != error_codes.end()) {
460 LogWarning(instance, kVUID_BestPractices_NonSuccess_Result, "%s(): Returned error %s.", api_name, string_VkResult(result));
461 return;
462 }
463 auto success = std::find(success_codes.begin(), success_codes.end(), result);
464 if (success != success_codes.end()) {
465 LogWarning(instance, kVUID_BestPractices_Error_Result, "%s(): Returned non-success return code %s.", api_name,
466 string_VkResult(result));
Jeff Bolz46c0ea02019-10-09 13:06:29 -0500467 }
468}
469
Jeff Bolz5c801d12019-10-09 10:38:45 -0500470bool BestPractices::PreCallValidateFreeMemory(VkDevice device, VkDeviceMemory memory,
471 const VkAllocationCallbacks* pAllocator) const {
Mark Lobodzinski91e50bf2020-01-14 09:55:11 -0700472 if (memory == VK_NULL_HANDLE) return false;
Camden83a9c372019-08-14 11:41:38 -0600473 bool skip = false;
474
Camden Stocker9738af92019-10-16 13:54:03 -0700475 const DEVICE_MEMORY_STATE* mem_info = ValidationStateTracker::GetDevMemState(memory);
Camden83a9c372019-08-14 11:41:38 -0600476
477 for (auto& obj : mem_info->obj_bindings) {
Mark Lobodzinskib6e2a282020-01-29 16:03:26 -0700478 skip |= LogWarning(device, layer_name.c_str(), "VK Object %s still has a reference to mem obj %s.",
479 report_data->FormatHandle(obj).c_str(), report_data->FormatHandle(mem_info->mem).c_str());
Camden83a9c372019-08-14 11:41:38 -0600480 }
481
Camden5b184be2019-08-13 07:50:19 -0600482 return skip;
483}
484
485void BestPractices::PreCallRecordFreeMemory(VkDevice device, VkDeviceMemory memory, const VkAllocationCallbacks* pAllocator) {
Mark Lobodzinski97484d62020-03-03 11:57:41 -0700486 ValidationStateTracker::PreCallRecordFreeMemory(device, memory, pAllocator);
Camden5b184be2019-08-13 07:50:19 -0600487 if (memory != VK_NULL_HANDLE) {
488 num_mem_objects--;
489 }
490}
491
Attilio Provenzanof31788e2020-02-27 12:00:36 +0000492bool BestPractices::ValidateBindBufferMemory(VkBuffer buffer, VkDeviceMemory memory, const char* api_name) const {
Camden Stockerb603cc82019-09-03 10:09:02 -0600493 bool skip = false;
Jeff Bolz46c0ea02019-10-09 13:06:29 -0500494 const BUFFER_STATE* buffer_state = GetBufferState(buffer);
Camden Stockerb603cc82019-09-03 10:09:02 -0600495
sfricke-samsunge2441192019-11-06 14:07:57 -0800496 if (!buffer_state->memory_requirements_checked && !buffer_state->external_memory_handle) {
Mark Lobodzinskib6e2a282020-01-29 16:03:26 -0700497 skip |= LogWarning(device, kVUID_BestPractices_BufferMemReqNotCalled,
498 "%s: Binding memory to %s but vkGetBufferMemoryRequirements() has not been called on that buffer.",
499 api_name, report_data->FormatHandle(buffer).c_str());
Camden Stockerb603cc82019-09-03 10:09:02 -0600500 }
501
Attilio Provenzanof31788e2020-02-27 12:00:36 +0000502 const DEVICE_MEMORY_STATE* mem_state = GetDevMemState(memory);
503
504 if (mem_state->alloc_info.allocationSize == buffer_state->createInfo.size &&
505 mem_state->alloc_info.allocationSize < kMinDedicatedAllocationSize) {
506 skip |= LogPerformanceWarning(
507 device, kVUID_BestPractices_SmallDedicatedAllocation,
508 "%s: Trying to bind %s to a memory block which is fully consumed by the buffer. "
509 "The required size of the allocation is %llu, but smaller buffers like this should be sub-allocated from "
510 "larger memory blocks. (Current threshold is %llu bytes.)",
511 api_name, report_data->FormatHandle(buffer).c_str(), mem_state->alloc_info.allocationSize, kMinDedicatedAllocationSize);
512 }
513
Camden Stockerb603cc82019-09-03 10:09:02 -0600514 return skip;
515}
516
517bool BestPractices::PreCallValidateBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory memory,
Jeff Bolz5c801d12019-10-09 10:38:45 -0500518 VkDeviceSize memoryOffset) const {
Camden Stockerb603cc82019-09-03 10:09:02 -0600519 bool skip = false;
520 const char* api_name = "BindBufferMemory()";
521
Attilio Provenzanof31788e2020-02-27 12:00:36 +0000522 skip |= ValidateBindBufferMemory(buffer, memory, api_name);
Camden Stockerb603cc82019-09-03 10:09:02 -0600523
524 return skip;
525}
526
527bool BestPractices::PreCallValidateBindBufferMemory2(VkDevice device, uint32_t bindInfoCount,
Jeff Bolz5c801d12019-10-09 10:38:45 -0500528 const VkBindBufferMemoryInfo* pBindInfos) const {
Camden Stocker8b798ab2019-09-03 10:33:28 -0600529 char api_name[64];
530 bool skip = false;
531
532 for (uint32_t i = 0; i < bindInfoCount; i++) {
533 sprintf(api_name, "vkBindBufferMemory2() pBindInfos[%u]", i);
Attilio Provenzanof31788e2020-02-27 12:00:36 +0000534 skip |= ValidateBindBufferMemory(pBindInfos[i].buffer, pBindInfos[i].memory, api_name);
Camden Stocker8b798ab2019-09-03 10:33:28 -0600535 }
536
537 return skip;
538}
Camden Stockerb603cc82019-09-03 10:09:02 -0600539
540bool BestPractices::PreCallValidateBindBufferMemory2KHR(VkDevice device, uint32_t bindInfoCount,
Jeff Bolz5c801d12019-10-09 10:38:45 -0500541 const VkBindBufferMemoryInfo* pBindInfos) const {
Camden Stocker8b798ab2019-09-03 10:33:28 -0600542 char api_name[64];
543 bool skip = false;
Camden Stockerb603cc82019-09-03 10:09:02 -0600544
Camden Stocker8b798ab2019-09-03 10:33:28 -0600545 for (uint32_t i = 0; i < bindInfoCount; i++) {
546 sprintf(api_name, "vkBindBufferMemory2KHR() pBindInfos[%u]", i);
Attilio Provenzanof31788e2020-02-27 12:00:36 +0000547 skip |= ValidateBindBufferMemory(pBindInfos[i].buffer, pBindInfos[i].memory, api_name);
Camden Stocker8b798ab2019-09-03 10:33:28 -0600548 }
549
550 return skip;
551}
552
Attilio Provenzanof31788e2020-02-27 12:00:36 +0000553bool BestPractices::ValidateBindImageMemory(VkImage image, VkDeviceMemory memory, const char* api_name) const {
Camden Stocker8b798ab2019-09-03 10:33:28 -0600554 bool skip = false;
Jeff Bolz46c0ea02019-10-09 13:06:29 -0500555 const IMAGE_STATE* image_state = GetImageState(image);
Camden Stocker8b798ab2019-09-03 10:33:28 -0600556
sfricke-samsunge2441192019-11-06 14:07:57 -0800557 if (!image_state->memory_requirements_checked && !image_state->external_memory_handle) {
Mark Lobodzinskib6e2a282020-01-29 16:03:26 -0700558 skip |= LogWarning(device, kVUID_BestPractices_ImageMemReqNotCalled,
559 "%s: Binding memory to %s but vkGetImageMemoryRequirements() has not been called on that image.",
560 api_name, report_data->FormatHandle(image).c_str());
Camden Stocker8b798ab2019-09-03 10:33:28 -0600561 }
562
Attilio Provenzanof31788e2020-02-27 12:00:36 +0000563 const DEVICE_MEMORY_STATE* mem_state = GetDevMemState(memory);
564
565 if (mem_state->alloc_info.allocationSize == image_state->requirements.size &&
566 mem_state->alloc_info.allocationSize < kMinDedicatedAllocationSize) {
567 skip |= LogPerformanceWarning(
568 device, kVUID_BestPractices_SmallDedicatedAllocation,
569 "%s: Trying to bind %s to a memory block which is fully consumed by the image. "
570 "The required size of the allocation is %llu, but smaller images like this should be sub-allocated from "
571 "larger memory blocks. (Current threshold is %llu bytes.)",
572 api_name, report_data->FormatHandle(image).c_str(), mem_state->alloc_info.allocationSize, kMinDedicatedAllocationSize);
573 }
574
575 // If we're binding memory to a image which was created as TRANSIENT and the image supports LAZY allocation,
576 // make sure this type is actually used.
577 // This warning will only trigger if this layer is run on a platform that supports LAZILY_ALLOCATED_BIT
578 // (i.e.most tile - based renderers)
579 if (image_state->createInfo.usage & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT) {
580 bool supports_lazy = false;
581 uint32_t suggested_type = 0;
582
583 for (uint32_t i = 0; i < phys_dev_mem_props.memoryTypeCount; i++) {
584 if ((1u << i) & image_state->requirements.memoryTypeBits) {
585 if (phys_dev_mem_props.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) {
586 supports_lazy = true;
587 suggested_type = i;
588 break;
589 }
590 }
591 }
592
593 uint32_t allocated_properties = phys_dev_mem_props.memoryTypes[mem_state->alloc_info.memoryTypeIndex].propertyFlags;
594
595 if (supports_lazy && (allocated_properties & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) == 0) {
596 skip |= LogPerformanceWarning(
597 device, kVUID_BestPractices_NonLazyTransientImage,
598 "%s: Attempting to bind memory type % u to VkImage which was created with TRANSIENT_ATTACHMENT_BIT,"
599 "but this memory type is not LAZILY_ALLOCATED_BIT. You should use memory type %u here instead to save "
600 "%llu bytes of physical memory.",
601 api_name, mem_state->alloc_info.memoryTypeIndex, suggested_type, image_state->requirements.size);
602 }
603 }
604
Camden Stocker8b798ab2019-09-03 10:33:28 -0600605 return skip;
606}
607
608bool BestPractices::PreCallValidateBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory memory,
Jeff Bolz5c801d12019-10-09 10:38:45 -0500609 VkDeviceSize memoryOffset) const {
Camden Stocker8b798ab2019-09-03 10:33:28 -0600610 bool skip = false;
611 const char* api_name = "vkBindImageMemory()";
612
Attilio Provenzanof31788e2020-02-27 12:00:36 +0000613 skip |= ValidateBindImageMemory(image, memory, api_name);
Camden Stocker8b798ab2019-09-03 10:33:28 -0600614
615 return skip;
616}
617
618bool BestPractices::PreCallValidateBindImageMemory2(VkDevice device, uint32_t bindInfoCount,
Jeff Bolz5c801d12019-10-09 10:38:45 -0500619 const VkBindImageMemoryInfo* pBindInfos) const {
Camden Stocker8b798ab2019-09-03 10:33:28 -0600620 char api_name[64];
621 bool skip = false;
622
623 for (uint32_t i = 0; i < bindInfoCount; i++) {
624 sprintf(api_name, "vkBindImageMemory2() pBindInfos[%u]", i);
Attilio Provenzanof31788e2020-02-27 12:00:36 +0000625 skip |= ValidateBindImageMemory(pBindInfos[i].image, pBindInfos[i].memory, api_name);
Camden Stocker8b798ab2019-09-03 10:33:28 -0600626 }
627
628 return skip;
629}
630
631bool BestPractices::PreCallValidateBindImageMemory2KHR(VkDevice device, uint32_t bindInfoCount,
Jeff Bolz5c801d12019-10-09 10:38:45 -0500632 const VkBindImageMemoryInfo* pBindInfos) const {
Camden Stocker8b798ab2019-09-03 10:33:28 -0600633 char api_name[64];
634 bool skip = false;
635
636 for (uint32_t i = 0; i < bindInfoCount; i++) {
637 sprintf(api_name, "vkBindImageMemory2KHR() pBindInfos[%u]", i);
Attilio Provenzanof31788e2020-02-27 12:00:36 +0000638 skip |= ValidateBindImageMemory(pBindInfos[i].image, pBindInfos[i].memory, api_name);
Camden Stocker8b798ab2019-09-03 10:33:28 -0600639 }
640
641 return skip;
642}
Camden83a9c372019-08-14 11:41:38 -0600643
Attilio Provenzano02859b22020-02-27 14:17:28 +0000644static inline bool FormatHasFullThroughputBlendingArm(VkFormat format) {
645 switch (format) {
646 case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
647 case VK_FORMAT_R16_SFLOAT:
648 case VK_FORMAT_R16G16_SFLOAT:
649 case VK_FORMAT_R16G16B16_SFLOAT:
650 case VK_FORMAT_R16G16B16A16_SFLOAT:
651 case VK_FORMAT_R32_SFLOAT:
652 case VK_FORMAT_R32G32_SFLOAT:
653 case VK_FORMAT_R32G32B32_SFLOAT:
654 case VK_FORMAT_R32G32B32A32_SFLOAT:
655 return false;
656
657 default:
658 return true;
659 }
660}
661
662bool BestPractices::ValidateMultisampledBlendingArm(uint32_t createInfoCount,
663 const VkGraphicsPipelineCreateInfo* pCreateInfos) const {
664 bool skip = false;
665
666 for (uint32_t i = 0; i < createInfoCount; i++) {
667 auto pCreateInfo = &pCreateInfos[i];
668
669 if (!pCreateInfo->pColorBlendState || !pCreateInfo->pMultisampleState ||
670 pCreateInfo->pMultisampleState->rasterizationSamples == VK_SAMPLE_COUNT_1_BIT ||
671 pCreateInfo->pMultisampleState->sampleShadingEnable) {
672 return skip;
673 }
674
675 auto rp_state = GetRenderPassState(pCreateInfo->renderPass);
676 auto& subpass = rp_state->createInfo.pSubpasses[pCreateInfo->subpass];
677
678 for (uint32_t j = 0; j < pCreateInfo->pColorBlendState->attachmentCount; j++) {
679 auto& blend_att = pCreateInfo->pColorBlendState->pAttachments[j];
680 uint32_t att = subpass.pColorAttachments[j].attachment;
681
682 if (att != VK_ATTACHMENT_UNUSED && blend_att.blendEnable && blend_att.colorWriteMask) {
683 if (!FormatHasFullThroughputBlendingArm(rp_state->createInfo.pAttachments[att].format)) {
684 skip |= LogPerformanceWarning(device, kVUID_BestPractices_CreatePipelines_MultisampledBlending,
685 "%s vkCreateGraphicsPipelines() - createInfo #%u: Pipeline is multisampled and "
686 "color attachment #%u makes use "
687 "of a format which cannot be blended at full throughput when using MSAA.",
688 VendorSpecificTag(kBPVendorArm), i, j);
689 }
690 }
691 }
692 }
693
694 return skip;
695}
696
Camden5b184be2019-08-13 07:50:19 -0600697bool BestPractices::PreCallValidateCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount,
698 const VkGraphicsPipelineCreateInfo* pCreateInfos,
Mark Lobodzinski2a162a02019-09-06 11:02:12 -0600699 const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines,
Jeff Bolz5c801d12019-10-09 10:38:45 -0500700 void* cgpl_state_data) const {
Mark Lobodzinski8317a3e2019-09-20 10:07:08 -0600701 bool skip = StateTracker::PreCallValidateCreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos,
702 pAllocator, pPipelines, cgpl_state_data);
Camden5b184be2019-08-13 07:50:19 -0600703
704 if ((createInfoCount > 1) && (!pipelineCache)) {
Mark Lobodzinskif95a2662020-01-29 15:43:32 -0700705 skip |= LogPerformanceWarning(
706 device, kVUID_BestPractices_CreatePipelines_MultiplePipelines,
707 "Performance Warning: This vkCreateGraphicsPipelines call is creating multiple pipelines but is not using a "
708 "pipeline cache, which may help with performance");
Camden5b184be2019-08-13 07:50:19 -0600709 }
710
Attilio Provenzano1d9a8362020-02-27 12:23:51 +0000711 for (uint32_t i = 0; i < createInfoCount; i++) {
712 auto& createInfo = pCreateInfos[i];
713
714 auto& vertexInput = *createInfo.pVertexInputState;
715 uint32_t count = 0;
716 for (uint32_t j = 0; j < vertexInput.vertexBindingDescriptionCount; j++) {
717 if (vertexInput.pVertexBindingDescriptions[j].inputRate == VK_VERTEX_INPUT_RATE_INSTANCE) {
718 count++;
719 }
720 }
721
722 if (count > kMaxInstancedVertexBuffers) {
723 skip |= LogPerformanceWarning(
724 device, kVUID_BestPractices_CreatePipelines_TooManyInstancedVertexBuffers,
725 "The pipeline is using %u instanced vertex buffers (current limit: %u), but this can be inefficient on the "
726 "GPU. If using instanced vertex attributes prefer interleaving them in a single buffer.",
727 count, kMaxInstancedVertexBuffers);
728 }
Attilio Provenzano02859b22020-02-27 14:17:28 +0000729
730 skip |= VendorCheckEnabled(kBPVendorArm) && ValidateMultisampledBlendingArm(createInfoCount, pCreateInfos);
Attilio Provenzano1d9a8362020-02-27 12:23:51 +0000731 }
732
Camden5b184be2019-08-13 07:50:19 -0600733 return skip;
734}
735
736bool BestPractices::PreCallValidateCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount,
737 const VkComputePipelineCreateInfo* pCreateInfos,
Mark Lobodzinski2a162a02019-09-06 11:02:12 -0600738 const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines,
Jeff Bolz5c801d12019-10-09 10:38:45 -0500739 void* ccpl_state_data) const {
Mark Lobodzinski8317a3e2019-09-20 10:07:08 -0600740 bool skip = StateTracker::PreCallValidateCreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos,
741 pAllocator, pPipelines, ccpl_state_data);
Camden5b184be2019-08-13 07:50:19 -0600742
743 if ((createInfoCount > 1) && (!pipelineCache)) {
Mark Lobodzinskif95a2662020-01-29 15:43:32 -0700744 skip |= LogPerformanceWarning(
745 device, kVUID_BestPractices_CreatePipelines_MultiplePipelines,
746 "Performance Warning: This vkCreateComputePipelines call is creating multiple pipelines but is not using a "
747 "pipeline cache, which may help with performance");
Camden5b184be2019-08-13 07:50:19 -0600748 }
749
750 return skip;
751}
752
Jeff Bolz46c0ea02019-10-09 13:06:29 -0500753bool BestPractices::CheckPipelineStageFlags(std::string api_name, const VkPipelineStageFlags flags) const {
Camden5b184be2019-08-13 07:50:19 -0600754 bool skip = false;
755
756 if (flags & VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT) {
Mark Lobodzinskib6e2a282020-01-29 16:03:26 -0700757 skip |= LogWarning(device, kVUID_BestPractices_PipelineStageFlags,
758 "You are using VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT when %s is called\n", api_name.c_str());
Camden5b184be2019-08-13 07:50:19 -0600759 } else if (flags & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) {
Mark Lobodzinskib6e2a282020-01-29 16:03:26 -0700760 skip |= LogWarning(device, kVUID_BestPractices_PipelineStageFlags,
761 "You are using VK_PIPELINE_STAGE_ALL_COMMANDS_BIT when %s is called\n", api_name.c_str());
Camden5b184be2019-08-13 07:50:19 -0600762 }
763
764 return skip;
765}
766
Jeff Bolz5c801d12019-10-09 10:38:45 -0500767bool BestPractices::PreCallValidateQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits,
768 VkFence fence) const {
Camden5b184be2019-08-13 07:50:19 -0600769 bool skip = false;
770
771 for (uint32_t submit = 0; submit < submitCount; submit++) {
772 for (uint32_t semaphore = 0; semaphore < pSubmits[submit].waitSemaphoreCount; semaphore++) {
773 skip |= CheckPipelineStageFlags("vkQueueSubmit", pSubmits[submit].pWaitDstStageMask[semaphore]);
774 }
775 }
776
777 return skip;
778}
779
Attilio Provenzano746e43e2020-02-27 11:23:50 +0000780bool BestPractices::PreCallValidateCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo* pCreateInfo,
781 const VkAllocationCallbacks* pAllocator, VkCommandPool* pCommandPool) const {
782 bool skip = false;
783
784 if (pCreateInfo->flags & VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT) {
785 skip |= LogPerformanceWarning(
786 device, kVUID_BestPractices_CreateCommandPool_CommandBufferReset,
787 "vkCreateCommandPool(): VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT is set. Consider resetting entire "
788 "pool instead.");
789 }
790
791 return skip;
792}
793
794bool BestPractices::PreCallValidateBeginCommandBuffer(VkCommandBuffer commandBuffer,
795 const VkCommandBufferBeginInfo* pBeginInfo) const {
796 bool skip = false;
797
798 if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
799 skip |= LogPerformanceWarning(device, kVUID_BestPractices_BeginCommandBuffer_SimultaneousUse,
800 "vkBeginCommandBuffer(): VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT is set.");
801 }
802
Attilio Provenzano02859b22020-02-27 14:17:28 +0000803 if (!(pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT)) {
804 skip |= VendorCheckEnabled(kBPVendorArm) &&
805 LogPerformanceWarning(device, kVUID_BestPractices_BeginCommandBuffer_OneTimeSubmit,
806 "%s vkBeginCommandBuffer(): VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT is not set. "
807 "For best performance on Mali GPUs, consider setting ONE_TIME_SUBMIT by default.",
808 VendorSpecificTag(kBPVendorArm));
809 }
810
Attilio Provenzano746e43e2020-02-27 11:23:50 +0000811 return skip;
812}
813
Jeff Bolz5c801d12019-10-09 10:38:45 -0500814bool BestPractices::PreCallValidateCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) const {
Camden5b184be2019-08-13 07:50:19 -0600815 bool skip = false;
816
817 skip |= CheckPipelineStageFlags("vkCmdSetEvent", stageMask);
818
819 return skip;
820}
821
Jeff Bolz5c801d12019-10-09 10:38:45 -0500822bool BestPractices::PreCallValidateCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event,
823 VkPipelineStageFlags stageMask) const {
Camden5b184be2019-08-13 07:50:19 -0600824 bool skip = false;
825
826 skip |= CheckPipelineStageFlags("vkCmdResetEvent", stageMask);
827
828 return skip;
829}
830
831bool BestPractices::PreCallValidateCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents,
832 VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
833 uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers,
834 uint32_t bufferMemoryBarrierCount,
835 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
836 uint32_t imageMemoryBarrierCount,
Jeff Bolz5c801d12019-10-09 10:38:45 -0500837 const VkImageMemoryBarrier* pImageMemoryBarriers) const {
Camden5b184be2019-08-13 07:50:19 -0600838 bool skip = false;
839
840 skip |= CheckPipelineStageFlags("vkCmdWaitEvents", srcStageMask);
841 skip |= CheckPipelineStageFlags("vkCmdWaitEvents", dstStageMask);
842
843 return skip;
844}
845
846bool BestPractices::PreCallValidateCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
847 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
848 uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers,
849 uint32_t bufferMemoryBarrierCount,
850 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
851 uint32_t imageMemoryBarrierCount,
Jeff Bolz5c801d12019-10-09 10:38:45 -0500852 const VkImageMemoryBarrier* pImageMemoryBarriers) const {
Camden5b184be2019-08-13 07:50:19 -0600853 bool skip = false;
854
855 skip |= CheckPipelineStageFlags("vkCmdPipelineBarrier", srcStageMask);
856 skip |= CheckPipelineStageFlags("vkCmdPipelineBarrier", dstStageMask);
857
858 return skip;
859}
860
861bool BestPractices::PreCallValidateCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
Jeff Bolz5c801d12019-10-09 10:38:45 -0500862 VkQueryPool queryPool, uint32_t query) const {
Camden5b184be2019-08-13 07:50:19 -0600863 bool skip = false;
864
865 skip |= CheckPipelineStageFlags("vkCmdWriteTimestamp", pipelineStage);
866
867 return skip;
868}
869
Attilio Provenzano02859b22020-02-27 14:17:28 +0000870static inline bool RenderPassUsesAttachmentOnTile(const safe_VkRenderPassCreateInfo2& createInfo, uint32_t attachment) {
871 for (uint32_t subpass = 0; subpass < createInfo.subpassCount; subpass++) {
872 auto& subpassInfo = createInfo.pSubpasses[subpass];
873
874 // If an attachment is ever used as a color attachment,
875 // resolve attachment or depth stencil attachment,
876 // it needs to exist on tile at some point.
877
878 for (uint32_t i = 0; i < subpassInfo.colorAttachmentCount; i++)
879 if (subpassInfo.pColorAttachments[i].attachment == attachment) return true;
880
881 if (subpassInfo.pResolveAttachments) {
882 for (uint32_t i = 0; i < subpassInfo.colorAttachmentCount; i++)
883 if (subpassInfo.pResolveAttachments[i].attachment == attachment) return true;
884 }
885
886 if (subpassInfo.pDepthStencilAttachment && subpassInfo.pDepthStencilAttachment->attachment == attachment) return true;
887 }
888
889 return false;
890}
891
892bool BestPractices::ValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, RenderPassCreateVersion rp_version,
893 const VkRenderPassBeginInfo* pRenderPassBegin) const {
894 bool skip = false;
895
896 if (!pRenderPassBegin) {
897 return skip;
898 }
899
900 auto rp_state = GetRenderPassState(pRenderPassBegin->renderPass);
901 if (rp_state) {
902 // Check if any attachments have LOAD operation on them
903 for (uint32_t att = 0; att < rp_state->createInfo.attachmentCount; att++) {
904 auto& attachment = rp_state->createInfo.pAttachments[att];
905
906 bool attachmentHasReadback = false;
907 if (!FormatHasStencil(attachment.format) && attachment.loadOp == VK_ATTACHMENT_LOAD_OP_LOAD) {
908 attachmentHasReadback = true;
909 }
910
911 if (FormatHasStencil(attachment.format) && attachment.stencilLoadOp == VK_ATTACHMENT_LOAD_OP_LOAD) {
912 attachmentHasReadback = true;
913 }
914
915 bool attachmentNeedsReadback = false;
916
917 // Check if the attachment is actually used in any subpass on-tile
918 if (attachmentHasReadback && RenderPassUsesAttachmentOnTile(rp_state->createInfo, att)) {
919 attachmentNeedsReadback = true;
920 }
921
922 // Using LOAD_OP_LOAD is expensive on tiled GPUs, so flag it as a potential improvement
923 if (attachmentNeedsReadback) {
924 skip |= VendorCheckEnabled(kBPVendorArm) &&
925 LogPerformanceWarning(
926 device, kVUID_BestPractices_BeginRenderPass_AttachmentNeedsReadback,
927 "%s Attachment #%u in render pass has begun with VK_ATTACHMENT_LOAD_OP_LOAD.\n"
928 "Submitting this renderpass will cause the driver to inject a readback of the attachment "
929 "which will copy in total %u pixels (renderArea = { %d, %d, %u, %u }) to the tile buffer.",
930 VendorSpecificTag(kBPVendorArm), att,
931 pRenderPassBegin->renderArea.extent.width * pRenderPassBegin->renderArea.extent.height,
932 pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y,
933 pRenderPassBegin->renderArea.extent.width, pRenderPassBegin->renderArea.extent.height);
934 }
935 }
936 }
937
938 return skip;
939}
940
941bool BestPractices::PreCallValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin,
942 VkSubpassContents contents) const {
943 bool skip = ValidateCmdBeginRenderPass(commandBuffer, RENDER_PASS_VERSION_1, pRenderPassBegin);
944 return skip;
945}
946
947bool BestPractices::PreCallValidateCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
948 const VkRenderPassBeginInfo* pRenderPassBegin,
949 const VkSubpassBeginInfoKHR* pSubpassBeginInfo) const {
950 bool skip = ValidateCmdBeginRenderPass(commandBuffer, RENDER_PASS_VERSION_2, pRenderPassBegin);
951 return skip;
952}
953
954bool BestPractices::PreCallValidateCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin,
955 const VkSubpassBeginInfoKHR* pSubpassBeginInfo) const {
956 bool skip = ValidateCmdBeginRenderPass(commandBuffer, RENDER_PASS_VERSION_2, pRenderPassBegin);
957 return skip;
958}
959
Mark Lobodzinski4c4cf942019-12-20 11:09:51 -0700960// Generic function to handle validation for all CmdDraw* type functions
961bool BestPractices::ValidateCmdDrawType(VkCommandBuffer cmd_buffer, const char* caller) const {
962 bool skip = false;
963 const CMD_BUFFER_STATE* cb_state = GetCBState(cmd_buffer);
964 if (cb_state) {
965 const auto last_bound_it = cb_state->lastBound.find(VK_PIPELINE_BIND_POINT_GRAPHICS);
966 const PIPELINE_STATE* pipeline_state = nullptr;
967 if (last_bound_it != cb_state->lastBound.cend()) {
968 pipeline_state = last_bound_it->second.pipeline_state;
969 }
970 const auto& current_vtx_bfr_binding_info = cb_state->current_vertex_buffer_binding_info.vertex_buffer_bindings;
971 // Verify vertex binding
972 if (pipeline_state->vertex_binding_descriptions_.size() <= 0) {
973 if ((!current_vtx_bfr_binding_info.empty()) && (!cb_state->vertex_buffer_used)) {
Mark Lobodzinskif95a2662020-01-29 15:43:32 -0700974 skip |= LogPerformanceWarning(cb_state->commandBuffer, kVUID_BestPractices_DrawState_VtxIndexOutOfBounds,
975 "Vertex buffers are bound to %s but no vertex buffers are attached to %s.",
976 report_data->FormatHandle(cb_state->commandBuffer).c_str(),
977 report_data->FormatHandle(pipeline_state->pipeline).c_str());
Mark Lobodzinski4c4cf942019-12-20 11:09:51 -0700978 }
979 }
980 }
981 return skip;
982}
983
Camden5b184be2019-08-13 07:50:19 -0600984bool BestPractices::PreCallValidateCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
Jeff Bolz5c801d12019-10-09 10:38:45 -0500985 uint32_t firstVertex, uint32_t firstInstance) const {
Camden5b184be2019-08-13 07:50:19 -0600986 bool skip = false;
987
988 if (instanceCount == 0) {
Mark Lobodzinskib6e2a282020-01-29 16:03:26 -0700989 skip |= LogWarning(device, kVUID_BestPractices_CmdDraw_InstanceCountZero,
990 "Warning: You are calling vkCmdDraw() with an instanceCount of Zero.");
Mark Lobodzinski4c4cf942019-12-20 11:09:51 -0700991 skip |= ValidateCmdDrawType(commandBuffer, "vkCmdDraw()");
Camden5b184be2019-08-13 07:50:19 -0600992 }
993
994 return skip;
995}
996
997bool BestPractices::PreCallValidateCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
Jeff Bolz5c801d12019-10-09 10:38:45 -0500998 uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) const {
Camden5b184be2019-08-13 07:50:19 -0600999 bool skip = false;
1000
1001 if (instanceCount == 0) {
Mark Lobodzinskib6e2a282020-01-29 16:03:26 -07001002 skip |= LogWarning(device, kVUID_BestPractices_CmdDraw_InstanceCountZero,
1003 "Warning: You are calling vkCmdDrawIndexed() with an instanceCount of Zero.");
Camden5b184be2019-08-13 07:50:19 -06001004 }
Mark Lobodzinski4c4cf942019-12-20 11:09:51 -07001005 skip |= ValidateCmdDrawType(commandBuffer, "vkCmdDrawIndexed()");
1006
Attilio Provenzano02859b22020-02-27 14:17:28 +00001007 // Check if we reached the limit for small indexed draw calls.
1008 // Note that we cannot update the draw call count here, so we do it in PreCallRecordCmdDrawIndexed.
1009 const CMD_BUFFER_STATE* cmd_state = GetCBState(commandBuffer);
1010 if ((indexCount * instanceCount) <= kSmallIndexedDrawcallIndices &&
1011 (cmd_state->small_indexed_draw_call_count == kMaxSmallIndexedDrawcalls - 1)) {
1012 skip |= VendorCheckEnabled(kBPVendorArm) &&
1013 LogPerformanceWarning(device, kVUID_BestPractices_CmdDrawIndexed_ManySmallIndexedDrawcalls,
1014 "The command buffer contains many small indexed drawcalls "
1015 "(at least %u drawcalls with less than %u indices each). This may cause pipeline bubbles. "
1016 "You can try batching drawcalls or instancing when applicable.",
1017 VendorSpecificTag(kBPVendorArm), kMaxSmallIndexedDrawcalls, kSmallIndexedDrawcallIndices);
1018 }
1019
Mark Lobodzinski4c4cf942019-12-20 11:09:51 -07001020 return skip;
1021}
1022
Attilio Provenzano02859b22020-02-27 14:17:28 +00001023void BestPractices::PreCallRecordCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
1024 uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) {
1025 ValidationStateTracker::PreCallRecordCmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset,
1026 firstInstance);
1027
1028 CMD_BUFFER_STATE* cmd_state = GetCBState(commandBuffer);
1029 if ((indexCount * instanceCount) <= kSmallIndexedDrawcallIndices) {
1030 cmd_state->small_indexed_draw_call_count++;
1031 }
1032}
1033
Mark Lobodzinski4c4cf942019-12-20 11:09:51 -07001034bool BestPractices::PreCallValidateCmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer,
1035 VkDeviceSize offset, VkBuffer countBuffer,
1036 VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
1037 uint32_t stride) const {
1038 bool skip = ValidateCmdDrawType(commandBuffer, "vkCmdDrawIndexedIndirectCountKHR()");
Camden5b184be2019-08-13 07:50:19 -06001039
1040 return skip;
1041}
1042
1043bool BestPractices::PreCallValidateCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
Jeff Bolz5c801d12019-10-09 10:38:45 -05001044 uint32_t drawCount, uint32_t stride) const {
Camden5b184be2019-08-13 07:50:19 -06001045 bool skip = false;
1046
1047 if (drawCount == 0) {
Mark Lobodzinskib6e2a282020-01-29 16:03:26 -07001048 skip |= LogWarning(device, kVUID_BestPractices_CmdDraw_DrawCountZero,
1049 "Warning: You are calling vkCmdDrawIndirect() with a drawCount of Zero.");
Mark Lobodzinski4c4cf942019-12-20 11:09:51 -07001050 skip |= ValidateCmdDrawType(commandBuffer, "vkCmdDrawIndirect()");
Camden5b184be2019-08-13 07:50:19 -06001051 }
1052
1053 return skip;
1054}
1055
1056bool BestPractices::PreCallValidateCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
Jeff Bolz5c801d12019-10-09 10:38:45 -05001057 uint32_t drawCount, uint32_t stride) const {
Camden5b184be2019-08-13 07:50:19 -06001058 bool skip = false;
1059
1060 if (drawCount == 0) {
Mark Lobodzinskib6e2a282020-01-29 16:03:26 -07001061 skip |= LogWarning(device, kVUID_BestPractices_CmdDraw_DrawCountZero,
1062 "Warning: You are calling vkCmdDrawIndexedIndirect() with a drawCount of Zero.");
Mark Lobodzinski4c4cf942019-12-20 11:09:51 -07001063 skip |= ValidateCmdDrawType(commandBuffer, "vkCmdDrawIndexedIndirect()");
Camden5b184be2019-08-13 07:50:19 -06001064 }
1065
1066 return skip;
1067}
1068
1069bool BestPractices::PreCallValidateCmdDispatch(VkCommandBuffer commandBuffer, uint32_t groupCountX, uint32_t groupCountY,
Jeff Bolz5c801d12019-10-09 10:38:45 -05001070 uint32_t groupCountZ) const {
Camden5b184be2019-08-13 07:50:19 -06001071 bool skip = false;
1072
1073 if ((groupCountX == 0) || (groupCountY == 0) || (groupCountZ == 0)) {
Mark Lobodzinskib6e2a282020-01-29 16:03:26 -07001074 skip |= LogWarning(device, kVUID_BestPractices_CmdDispatch_GroupCountZero,
1075 "Warning: You are calling vkCmdDispatch() while one or more groupCounts are zero (groupCountX = %" PRIu32
1076 ", groupCountY = %" PRIu32 ", groupCountZ = %" PRIu32 ").",
1077 groupCountX, groupCountY, groupCountZ);
Camden5b184be2019-08-13 07:50:19 -06001078 }
1079
1080 return skip;
1081}
Camden83a9c372019-08-14 11:41:38 -06001082
Camden Stocker9c051442019-11-06 14:28:43 -08001083bool BestPractices::ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(VkPhysicalDevice physicalDevice,
1084 const char* api_name) const {
1085 bool skip = false;
1086 const auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
1087
1088 if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState == UNCALLED) {
Mark Lobodzinskib6e2a282020-01-29 16:03:26 -07001089 skip |= LogWarning(physicalDevice, kVUID_BestPractices_DisplayPlane_PropertiesNotCalled,
1090 "Potential problem with calling %s() without first retrieving properties from "
1091 "vkGetPhysicalDeviceDisplayPlanePropertiesKHR or vkGetPhysicalDeviceDisplayPlaneProperties2KHR.",
1092 api_name);
Camden Stocker9c051442019-11-06 14:28:43 -08001093 }
1094
1095 return skip;
1096}
1097
Camden83a9c372019-08-14 11:41:38 -06001098bool BestPractices::PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex,
Jeff Bolz5c801d12019-10-09 10:38:45 -05001099 uint32_t* pDisplayCount, VkDisplayKHR* pDisplays) const {
Camden83a9c372019-08-14 11:41:38 -06001100 bool skip = false;
1101
Camden Stocker9c051442019-11-06 14:28:43 -08001102 skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(physicalDevice, "vkGetDisplayPlaneSupportedDisplaysKHR");
Camden83a9c372019-08-14 11:41:38 -06001103
Camden Stocker9c051442019-11-06 14:28:43 -08001104 return skip;
1105}
1106
1107bool BestPractices::PreCallValidateGetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode,
1108 uint32_t planeIndex,
1109 VkDisplayPlaneCapabilitiesKHR* pCapabilities) const {
1110 bool skip = false;
1111
1112 skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(physicalDevice, "vkGetDisplayPlaneCapabilitiesKHR");
1113
1114 return skip;
1115}
1116
1117bool BestPractices::PreCallValidateGetDisplayPlaneCapabilities2KHR(VkPhysicalDevice physicalDevice,
1118 const VkDisplayPlaneInfo2KHR* pDisplayPlaneInfo,
1119 VkDisplayPlaneCapabilities2KHR* pCapabilities) const {
1120 bool skip = false;
1121
1122 skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(physicalDevice, "vkGetDisplayPlaneCapabilities2KHR");
Camden83a9c372019-08-14 11:41:38 -06001123
1124 return skip;
1125}
Camden05de2d42019-08-19 10:23:56 -06001126
1127bool BestPractices::PreCallValidateGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pSwapchainImageCount,
Jeff Bolz5c801d12019-10-09 10:38:45 -05001128 VkImage* pSwapchainImages) const {
Camden05de2d42019-08-19 10:23:56 -06001129 bool skip = false;
1130
1131 auto swapchain_state = GetSwapchainState(swapchain);
1132
1133 if (swapchain_state && pSwapchainImages) {
1134 // Compare the preliminary value of *pSwapchainImageCount with the value this time:
1135 if (swapchain_state->vkGetSwapchainImagesKHRState == UNCALLED) {
Mark Lobodzinskib6e2a282020-01-29 16:03:26 -07001136 skip |=
1137 LogWarning(device, kVUID_Core_Swapchain_PriorCount,
1138 "vkGetSwapchainImagesKHR() called with non-NULL pSwapchainImageCount; but no prior positive value has "
1139 "been seen for pSwapchainImages.");
Camden05de2d42019-08-19 10:23:56 -06001140 }
1141 }
1142
1143 return skip;
1144}
1145
1146// Common function to handle validation for GetPhysicalDeviceQueueFamilyProperties & 2KHR version
Mark Lobodzinskib6e2a282020-01-29 16:03:26 -07001147bool BestPractices::ValidateCommonGetPhysicalDeviceQueueFamilyProperties(const PHYSICAL_DEVICE_STATE* pd_state,
1148 uint32_t requested_queue_family_property_count,
1149 bool qfp_null, const char* caller_name) const {
Camden05de2d42019-08-19 10:23:56 -06001150 bool skip = false;
1151 if (!qfp_null) {
1152 // Verify that for each physical device, this command is called first with NULL pQueueFamilyProperties in order to get count
1153 if (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
Mark Lobodzinskib6e2a282020-01-29 16:03:26 -07001154 skip |= LogWarning(
1155 pd_state->phys_device, kVUID_Core_DevLimit_MissingQueryCount,
Camden05de2d42019-08-19 10:23:56 -06001156 "%s is called with non-NULL pQueueFamilyProperties before obtaining pQueueFamilyPropertyCount. It is recommended "
1157 "to first call %s with NULL pQueueFamilyProperties in order to obtain the maximal pQueueFamilyPropertyCount.",
1158 caller_name, caller_name);
1159 // Then verify that pCount that is passed in on second call matches what was returned
1160 } else if (pd_state->queue_family_known_count != requested_queue_family_property_count) {
Mark Lobodzinskib6e2a282020-01-29 16:03:26 -07001161 skip |= LogWarning(
1162 pd_state->phys_device, kVUID_Core_DevLimit_CountMismatch,
Camden05de2d42019-08-19 10:23:56 -06001163 "%s is called with non-NULL pQueueFamilyProperties and pQueueFamilyPropertyCount value %" PRIu32
1164 ", but the largest previously returned pQueueFamilyPropertyCount for this physicalDevice is %" PRIu32
1165 ". It is recommended to instead receive all the properties by calling %s with pQueueFamilyPropertyCount that was "
1166 "previously obtained by calling %s with NULL pQueueFamilyProperties.",
1167 caller_name, requested_queue_family_property_count, pd_state->queue_family_known_count, caller_name, caller_name);
1168 }
1169 }
1170
1171 return skip;
1172}
1173
Jeff Bolz5c801d12019-10-09 10:38:45 -05001174bool BestPractices::PreCallValidateBindAccelerationStructureMemoryNV(
1175 VkDevice device, uint32_t bindInfoCount, const VkBindAccelerationStructureMemoryInfoNV* pBindInfos) const {
Camden Stocker82510582019-09-03 14:00:16 -06001176 bool skip = false;
1177
1178 for (uint32_t i = 0; i < bindInfoCount; i++) {
1179 const ACCELERATION_STRUCTURE_STATE* as_state = GetAccelerationStructureState(pBindInfos[i].accelerationStructure);
1180 if (!as_state->memory_requirements_checked) {
1181 // There's not an explicit requirement in the spec to call vkGetImageMemoryRequirements() prior to calling
1182 // BindAccelerationStructureMemoryNV but it's implied in that memory being bound must conform with
1183 // VkAccelerationStructureMemoryRequirementsInfoNV from vkGetAccelerationStructureMemoryRequirementsNV
Mark Lobodzinskib6e2a282020-01-29 16:03:26 -07001184 skip |= LogWarning(
1185 device, kVUID_BestPractices_BindAccelNV_NoMemReqQuery,
Camden Stocker82510582019-09-03 14:00:16 -06001186 "vkBindAccelerationStructureMemoryNV(): "
1187 "Binding memory to %s but vkGetAccelerationStructureMemoryRequirementsNV() has not been called on that structure.",
1188 report_data->FormatHandle(pBindInfos[i].accelerationStructure).c_str());
1189 }
1190 }
1191
1192 return skip;
1193}
1194
Camden05de2d42019-08-19 10:23:56 -06001195bool BestPractices::PreCallValidateGetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice,
1196 uint32_t* pQueueFamilyPropertyCount,
Jeff Bolz5c801d12019-10-09 10:38:45 -05001197 VkQueueFamilyProperties* pQueueFamilyProperties) const {
Camden05de2d42019-08-19 10:23:56 -06001198 const auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
1199 assert(physical_device_state);
Mark Lobodzinskib6e2a282020-01-29 16:03:26 -07001200 return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(physical_device_state, *pQueueFamilyPropertyCount,
Camden05de2d42019-08-19 10:23:56 -06001201 (nullptr == pQueueFamilyProperties),
1202 "vkGetPhysicalDeviceQueueFamilyProperties()");
1203}
1204
Jeff Bolz5c801d12019-10-09 10:38:45 -05001205bool BestPractices::PreCallValidateGetPhysicalDeviceQueueFamilyProperties2(
1206 VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount,
1207 VkQueueFamilyProperties2KHR* pQueueFamilyProperties) const {
Camden05de2d42019-08-19 10:23:56 -06001208 const auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
1209 assert(physical_device_state);
Mark Lobodzinskib6e2a282020-01-29 16:03:26 -07001210 return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(physical_device_state, *pQueueFamilyPropertyCount,
Camden05de2d42019-08-19 10:23:56 -06001211 (nullptr == pQueueFamilyProperties),
1212 "vkGetPhysicalDeviceQueueFamilyProperties2()");
1213}
1214
Jeff Bolz5c801d12019-10-09 10:38:45 -05001215bool BestPractices::PreCallValidateGetPhysicalDeviceQueueFamilyProperties2KHR(
1216 VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount,
1217 VkQueueFamilyProperties2KHR* pQueueFamilyProperties) const {
Camden05de2d42019-08-19 10:23:56 -06001218 auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
1219 assert(physical_device_state);
Mark Lobodzinskib6e2a282020-01-29 16:03:26 -07001220 return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(physical_device_state, *pQueueFamilyPropertyCount,
Camden05de2d42019-08-19 10:23:56 -06001221 (nullptr == pQueueFamilyProperties),
1222 "vkGetPhysicalDeviceQueueFamilyProperties2KHR()");
1223}
1224
1225bool BestPractices::PreCallValidateGetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
1226 uint32_t* pSurfaceFormatCount,
Jeff Bolz5c801d12019-10-09 10:38:45 -05001227 VkSurfaceFormatKHR* pSurfaceFormats) const {
Camden05de2d42019-08-19 10:23:56 -06001228 if (!pSurfaceFormats) return false;
1229 const auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
1230 const auto& call_state = physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState;
1231 bool skip = false;
1232 if (call_state == UNCALLED) {
1233 // Since we haven't recorded a preliminary value of *pSurfaceFormatCount, that likely means that the application didn't
1234 // previously call this function with a NULL value of pSurfaceFormats:
Mark Lobodzinskib6e2a282020-01-29 16:03:26 -07001235 skip |= LogWarning(physicalDevice, kVUID_Core_DevLimit_MustQueryCount,
1236 "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount; but no prior "
1237 "positive value has been seen for pSurfaceFormats.");
Camden05de2d42019-08-19 10:23:56 -06001238 } else {
1239 auto prev_format_count = (uint32_t)physical_device_state->surface_formats.size();
Peter Chene191bd72019-09-16 13:04:37 -04001240 if (*pSurfaceFormatCount > prev_format_count) {
Mark Lobodzinskib6e2a282020-01-29 16:03:26 -07001241 skip |= LogWarning(physicalDevice, kVUID_Core_DevLimit_CountMismatch,
1242 "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount, and with "
1243 "pSurfaceFormats set to a value (%u) that is greater than the value (%u) that was returned "
1244 "when pSurfaceFormatCount was NULL.",
1245 *pSurfaceFormatCount, prev_format_count);
Camden05de2d42019-08-19 10:23:56 -06001246 }
1247 }
1248 return skip;
1249}
Camden Stocker23cc47d2019-09-03 14:53:57 -06001250
1251bool BestPractices::PreCallValidateQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo* pBindInfo,
Jeff Bolz5c801d12019-10-09 10:38:45 -05001252 VkFence fence) const {
Camden Stocker23cc47d2019-09-03 14:53:57 -06001253 bool skip = false;
1254
1255 for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; bindIdx++) {
1256 const VkBindSparseInfo& bindInfo = pBindInfo[bindIdx];
1257 // Store sparse binding image_state and after binding is complete make sure that any requiring metadata have it bound
Jeff Bolz46c0ea02019-10-09 13:06:29 -05001258 std::unordered_set<const IMAGE_STATE*> sparse_images;
1259 // Track images getting metadata bound by this call in a set, it'll be recorded into the image_state
1260 // in RecordQueueBindSparse.
1261 std::unordered_set<const IMAGE_STATE*> sparse_images_with_metadata;
Camden Stocker23cc47d2019-09-03 14:53:57 -06001262 // If we're binding sparse image memory make sure reqs were queried and note if metadata is required and bound
1263 for (uint32_t i = 0; i < bindInfo.imageBindCount; ++i) {
1264 const auto& image_bind = bindInfo.pImageBinds[i];
1265 auto image_state = GetImageState(image_bind.image);
1266 if (!image_state)
1267 continue; // Param/Object validation should report image_bind.image handles being invalid, so just skip here.
1268 sparse_images.insert(image_state);
1269 if (image_state->createInfo.flags & VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT) {
1270 if (!image_state->get_sparse_reqs_called || image_state->sparse_requirements.empty()) {
1271 // For now just warning if sparse image binding occurs without calling to get reqs first
Mark Lobodzinskib6e2a282020-01-29 16:03:26 -07001272 skip |= LogWarning(image_state->image, kVUID_Core_MemTrack_InvalidState,
1273 "vkQueueBindSparse(): Binding sparse memory to %s without first calling "
1274 "vkGetImageSparseMemoryRequirements[2KHR]() to retrieve requirements.",
1275 report_data->FormatHandle(image_state->image).c_str());
Camden Stocker23cc47d2019-09-03 14:53:57 -06001276 }
1277 }
1278 if (!image_state->memory_requirements_checked) {
1279 // For now just warning if sparse image binding occurs without calling to get reqs first
Mark Lobodzinskib6e2a282020-01-29 16:03:26 -07001280 skip |= LogWarning(image_state->image, kVUID_Core_MemTrack_InvalidState,
1281 "vkQueueBindSparse(): Binding sparse memory to %s without first calling "
1282 "vkGetImageMemoryRequirements() to retrieve requirements.",
1283 report_data->FormatHandle(image_state->image).c_str());
Camden Stocker23cc47d2019-09-03 14:53:57 -06001284 }
1285 }
1286 for (uint32_t i = 0; i < bindInfo.imageOpaqueBindCount; ++i) {
1287 const auto& image_opaque_bind = bindInfo.pImageOpaqueBinds[i];
1288 auto image_state = GetImageState(bindInfo.pImageOpaqueBinds[i].image);
1289 if (!image_state)
1290 continue; // Param/Object validation should report image_bind.image handles being invalid, so just skip here.
1291 sparse_images.insert(image_state);
1292 if (image_state->createInfo.flags & VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT) {
1293 if (!image_state->get_sparse_reqs_called || image_state->sparse_requirements.empty()) {
1294 // For now just warning if sparse image binding occurs without calling to get reqs first
Mark Lobodzinskib6e2a282020-01-29 16:03:26 -07001295 skip |= LogWarning(image_state->image, kVUID_Core_MemTrack_InvalidState,
1296 "vkQueueBindSparse(): Binding opaque sparse memory to %s without first calling "
1297 "vkGetImageSparseMemoryRequirements[2KHR]() to retrieve requirements.",
1298 report_data->FormatHandle(image_state->image).c_str());
Camden Stocker23cc47d2019-09-03 14:53:57 -06001299 }
1300 }
1301 if (!image_state->memory_requirements_checked) {
1302 // For now just warning if sparse image binding occurs without calling to get reqs first
Mark Lobodzinskib6e2a282020-01-29 16:03:26 -07001303 skip |= LogWarning(image_state->image, kVUID_Core_MemTrack_InvalidState,
1304 "vkQueueBindSparse(): Binding opaque sparse memory to %s without first calling "
1305 "vkGetImageMemoryRequirements() to retrieve requirements.",
1306 report_data->FormatHandle(image_state->image).c_str());
Camden Stocker23cc47d2019-09-03 14:53:57 -06001307 }
1308 for (uint32_t j = 0; j < image_opaque_bind.bindCount; ++j) {
1309 if (image_opaque_bind.pBinds[j].flags & VK_SPARSE_MEMORY_BIND_METADATA_BIT) {
Jeff Bolz46c0ea02019-10-09 13:06:29 -05001310 sparse_images_with_metadata.insert(image_state);
Camden Stocker23cc47d2019-09-03 14:53:57 -06001311 }
1312 }
1313 }
1314 for (const auto& sparse_image_state : sparse_images) {
Jeff Bolz46c0ea02019-10-09 13:06:29 -05001315 if (sparse_image_state->sparse_metadata_required && !sparse_image_state->sparse_metadata_bound &&
1316 sparse_images_with_metadata.find(sparse_image_state) == sparse_images_with_metadata.end()) {
Camden Stocker23cc47d2019-09-03 14:53:57 -06001317 // Warn if sparse image binding metadata required for image with sparse binding, but metadata not bound
Mark Lobodzinskib6e2a282020-01-29 16:03:26 -07001318 skip |= LogWarning(sparse_image_state->image, kVUID_Core_MemTrack_InvalidState,
1319 "vkQueueBindSparse(): Binding sparse memory to %s which requires a metadata aspect but no "
1320 "binding with VK_SPARSE_MEMORY_BIND_METADATA_BIT set was made.",
1321 report_data->FormatHandle(sparse_image_state->image).c_str());
Camden Stocker23cc47d2019-09-03 14:53:57 -06001322 }
1323 }
1324 }
1325
1326 return skip;
1327}
Jeff Bolz46c0ea02019-10-09 13:06:29 -05001328
1329void BestPractices::PostCallRecordQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo* pBindInfo,
1330 VkFence fence, VkResult result) {
Mark Lobodzinski97484d62020-03-03 11:57:41 -07001331 ValidationStateTracker::PostCallRecordQueueBindSparse(queue, bindInfoCount, pBindInfo, fence, result);
1332
Mark Lobodzinski205b7a02020-02-21 13:23:17 -07001333 if (result != VK_SUCCESS) {
1334 static std::vector<VkResult> error_codes = {VK_ERROR_OUT_OF_HOST_MEMORY, VK_ERROR_OUT_OF_DEVICE_MEMORY,
1335 VK_ERROR_DEVICE_LOST};
1336 static std::vector<VkResult> success_codes = {};
1337 ValidateReturnCodes("vkReleaseFullScreenExclusiveModeEXT", result, error_codes, success_codes);
1338 return;
1339 }
Jeff Bolz46c0ea02019-10-09 13:06:29 -05001340
1341 for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; bindIdx++) {
1342 const VkBindSparseInfo& bindInfo = pBindInfo[bindIdx];
1343 for (uint32_t i = 0; i < bindInfo.imageOpaqueBindCount; ++i) {
1344 const auto& image_opaque_bind = bindInfo.pImageOpaqueBinds[i];
1345 auto image_state = GetImageState(bindInfo.pImageOpaqueBinds[i].image);
1346 if (!image_state)
1347 continue; // Param/Object validation should report image_bind.image handles being invalid, so just skip here.
1348 for (uint32_t j = 0; j < image_opaque_bind.bindCount; ++j) {
1349 if (image_opaque_bind.pBinds[j].flags & VK_SPARSE_MEMORY_BIND_METADATA_BIT) {
1350 image_state->sparse_metadata_bound = true;
1351 }
1352 }
1353 }
1354 }
1355}
Camden Stocker0e0f89b2019-10-16 12:24:31 -07001356
1357bool BestPractices::PreCallValidateCmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
Camden Stockerf55721f2019-09-09 11:04:49 -06001358 const VkClearAttachment* pAttachments, uint32_t rectCount,
1359 const VkClearRect* pRects) const {
Camden Stocker0e0f89b2019-10-16 12:24:31 -07001360 bool skip = false;
1361 const CMD_BUFFER_STATE* cb_node = GetCBState(commandBuffer);
1362 if (!cb_node) return skip;
1363
Camden Stockerf55721f2019-09-09 11:04:49 -06001364 // Warn if this is issued prior to Draw Cmd and clearing the entire attachment
Camden Stocker0e0f89b2019-10-16 12:24:31 -07001365 if (!cb_node->hasDrawCmd && (cb_node->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) &&
1366 (cb_node->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) {
1367 // There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass)
1368 // This warning should be made more specific. It'd be best to avoid triggering this test if it's a use that must call
1369 // CmdClearAttachments.
Mark Lobodzinskif95a2662020-01-29 15:43:32 -07001370 skip |= LogPerformanceWarning(commandBuffer, kVUID_BestPractices_DrawState_ClearCmdBeforeDraw,
1371 "vkCmdClearAttachments() issued on %s prior to any Draw Cmds. It is recommended you "
1372 "use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.",
1373 report_data->FormatHandle(commandBuffer).c_str());
Camden Stocker0e0f89b2019-10-16 12:24:31 -07001374 }
1375
Attilio Provenzano1d9a8362020-02-27 12:23:51 +00001376 // Check for uses of ClearAttachments along with LOAD_OP_LOAD,
1377 // as it can be more efficient to just use LOAD_OP_CLEAR
1378 const RENDER_PASS_STATE* rp = cb_node->activeRenderPass;
1379 if (rp) {
1380 const auto& subpass = rp->createInfo.pSubpasses[cb_node->activeSubpass];
1381
1382 for (uint32_t i = 0; i < attachmentCount; i++) {
1383 auto& attachment = pAttachments[i];
1384 if (attachment.aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
1385 uint32_t color_attachment = attachment.colorAttachment;
1386 uint32_t fb_attachment = subpass.pColorAttachments[color_attachment].attachment;
1387
1388 if (fb_attachment != VK_ATTACHMENT_UNUSED) {
1389 if (rp->createInfo.pAttachments[fb_attachment].loadOp == VK_ATTACHMENT_LOAD_OP_LOAD) {
1390 skip |= LogPerformanceWarning(
1391 device, kVUID_BestPractices_ClearAttachments_ClearAfterLoad,
1392 "vkCmdClearAttachments() issued on %s for color attachment #%u in this subpass, "
1393 "but LOAD_OP_LOAD was used. If you need to clear the framebuffer, always use LOAD_OP_CLEAR as "
1394 "it is more efficient.",
1395 report_data->FormatHandle(commandBuffer).c_str(), color_attachment);
1396 }
1397 }
1398 }
1399
1400 if (subpass.pDepthStencilAttachment && attachment.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) {
1401 uint32_t fb_attachment = subpass.pDepthStencilAttachment->attachment;
1402
1403 if (fb_attachment != VK_ATTACHMENT_UNUSED) {
1404 if (rp->createInfo.pAttachments[fb_attachment].loadOp == VK_ATTACHMENT_LOAD_OP_LOAD) {
1405 skip |= LogPerformanceWarning(
1406 device, kVUID_BestPractices_ClearAttachments_ClearAfterLoad,
1407 "vkCmdClearAttachments() issued on %s for the depth attachment in this subpass, "
1408 "but LOAD_OP_LOAD was used. If you need to clear the framebuffer, always use LOAD_OP_CLEAR as "
1409 "it is more efficient.",
1410 report_data->FormatHandle(commandBuffer).c_str());
1411 }
1412 }
1413 }
1414
1415 if (subpass.pDepthStencilAttachment && attachment.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) {
1416 uint32_t fb_attachment = subpass.pDepthStencilAttachment->attachment;
1417
1418 if (fb_attachment != VK_ATTACHMENT_UNUSED) {
1419 if (rp->createInfo.pAttachments[fb_attachment].stencilLoadOp == VK_ATTACHMENT_LOAD_OP_LOAD) {
1420 skip |= LogPerformanceWarning(
1421 device, kVUID_BestPractices_ClearAttachments_ClearAfterLoad,
1422 "vkCmdClearAttachments() issued on %s for the stencil attachment in this subpass, "
1423 "but LOAD_OP_LOAD was used. If you need to clear the framebuffer, always use LOAD_OP_CLEAR as "
1424 "it is more efficient.",
1425 report_data->FormatHandle(commandBuffer).c_str());
1426 }
1427 }
1428 }
1429 }
1430 }
1431
Camden Stockerf55721f2019-09-09 11:04:49 -06001432 return skip;
Camden Stocker0e0f89b2019-10-16 12:24:31 -07001433}
Attilio Provenzano02859b22020-02-27 14:17:28 +00001434
1435bool BestPractices::PreCallValidateCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
1436 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
1437 const VkImageResolve* pRegions) const {
1438 bool skip = false;
1439
1440 skip |= VendorCheckEnabled(kBPVendorArm) &&
1441 LogPerformanceWarning(device, kVUID_BestPractices_CmdResolveImage_ResolvingImage,
1442 "%s Attempting to use vkCmdResolveImage to resolve a multisampled image. "
1443 "This is a very slow and extremely bandwidth intensive path. "
1444 "You should always resolve multisampled images on-tile with pResolveAttachments in VkRenderPass.",
1445 VendorSpecificTag(kBPVendorArm));
1446
1447 return skip;
1448}
1449
1450bool BestPractices::PreCallValidateCreateSampler(VkDevice device, const VkSamplerCreateInfo* pCreateInfo,
1451 const VkAllocationCallbacks* pAllocator, VkSampler* pSampler) const {
1452 bool skip = false;
1453
1454 if (VendorCheckEnabled(kBPVendorArm)) {
1455 if ((pCreateInfo->addressModeU != pCreateInfo->addressModeV) || (pCreateInfo->addressModeV != pCreateInfo->addressModeW)) {
1456 skip |= LogPerformanceWarning(
1457 device, kVUID_BestPractices_CreateSampler_DifferentWrappingModes,
1458 "%s Creating a sampler object with wrapping modes which do not match (U = %u, V = %u, W = %u). "
1459 "This may cause reduced performance even if only U (1D image) or U/V wrapping modes (2D "
1460 "image) are actually used. If you need different wrapping modes, disregard this warning.",
1461 VendorSpecificTag(kBPVendorArm));
1462 }
1463
1464 if ((pCreateInfo->minLod != 0.0f) || (pCreateInfo->maxLod < VK_LOD_CLAMP_NONE)) {
1465 skip |= LogPerformanceWarning(
1466 device, kVUID_BestPractices_CreateSampler_LodClamping,
1467 "%s Creating a sampler object with LOD clamping (minLod = %f, maxLod = %f). This may cause reduced performance. "
1468 "Instead of clamping LOD in the sampler, consider using an VkImageView which restricts the mip-levels, set minLod "
1469 "to 0.0, and maxLod to VK_LOD_CLAMP_NONE.",
1470 VendorSpecificTag(kBPVendorArm), pCreateInfo->minLod, pCreateInfo->maxLod);
1471 }
1472
1473 if (pCreateInfo->mipLodBias != 0.0f) {
1474 skip |=
1475 LogPerformanceWarning(device, kVUID_BestPractices_CreateSampler_LodBias,
1476 "%s Creating a sampler object with LOD bias != 0.0 (%f). This will lead to less efficient "
1477 "descriptors being created and may cause reduced performance.",
1478 VendorSpecificTag(kBPVendorArm), pCreateInfo->mipLodBias);
1479 }
1480
1481 if ((pCreateInfo->addressModeU == VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER ||
1482 pCreateInfo->addressModeV == VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER ||
1483 pCreateInfo->addressModeW == VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER) &&
1484 (pCreateInfo->borderColor != VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK)) {
1485 skip |= LogPerformanceWarning(
1486 device, kVUID_BestPractices_CreateSampler_BorderClampColor,
1487 "%s Creating a sampler object with border clamping and borderColor != VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK. "
1488 "This will lead to less efficient descriptors being created and may cause reduced performance. "
1489 "If possible, use VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK as the border color.",
1490 VendorSpecificTag(kBPVendorArm));
1491 }
1492
1493 if (pCreateInfo->unnormalizedCoordinates) {
1494 skip |= LogPerformanceWarning(
1495 device, kVUID_BestPractices_CreateSampler_UnnormalizedCoordinates,
1496 "%s Creating a sampler object with unnormalized coordinates. This will lead to less efficient "
1497 "descriptors being created and may cause reduced performance.",
1498 VendorSpecificTag(kBPVendorArm));
1499 }
1500
1501 if (pCreateInfo->anisotropyEnable) {
1502 skip |= LogPerformanceWarning(
1503 device, kVUID_BestPractices_CreateSampler_Anisotropy,
1504 "%s Creating a sampler object with anisotropy. This will lead to less efficient descriptors being created "
1505 "and may cause reduced performance.",
1506 VendorSpecificTag(kBPVendorArm));
1507 }
1508 }
1509
1510 return skip;
1511}