blob: 8019c17d240a7cf96204e4733672fe1331471680 [file] [log] [blame]
sfricke-samsung8f658d42020-05-03 20:12:24 -07001/* Copyright (c) 2015-2017, 2019-2020 The Khronos Group Inc.
2 * Copyright (c) 2015-2017, 2019-2020 Valve Corporation
3 * Copyright (c) 2015-2017, 2019-2020 LunarG, Inc.
Mark Lobodzinski6f2274e2015-09-22 09:33:21 -06004 *
Jon Ashburn3ebf1252016-04-19 11:30:31 -06005 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
Mark Lobodzinski6f2274e2015-09-22 09:33:21 -06008 *
Jon Ashburn3ebf1252016-04-19 11:30:31 -06009 * http://www.apache.org/licenses/LICENSE-2.0
Mark Lobodzinski6f2274e2015-09-22 09:33:21 -060010 *
Jon Ashburn3ebf1252016-04-19 11:30:31 -060011 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
Mark Lobodzinski6f2274e2015-09-22 09:33:21 -060016 *
Jon Ashburne922f712015-11-03 13:41:23 -070017 * Author: Mark Lobodzinski <mark@lunarg.com>
18 * Author: Courtney Goeltzenleuchter <courtney@LunarG.com>
Dave Houlton59a20702017-02-02 17:26:23 -070019 * Author: Dave Houlton <daveh@lunarg.com>
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -070020 */
21
Mark Lobodzinski6f2274e2015-09-22 09:33:21 -060022#pragma once
John Zulauff05b3772019-04-03 18:04:23 -060023
24#include <cassert>
25#include <cstddef>
26#include <functional>
Mark Lobodzinski6f2274e2015-09-22 09:33:21 -060027#include <stdbool.h>
John Zulauf965d88d2018-04-12 15:47:26 -060028#include <string>
Mark Lobodzinski1079e1b2016-03-15 14:21:59 -060029#include <vector>
Mark Lobodzinskia0555012018-08-15 16:43:49 -060030#include <set>
Mark Lobodzinski44145db2020-08-11 08:01:47 -060031#include <iomanip>
John Zulauf2c2ccd42019-04-05 13:13:13 -060032#include "cast_utils.h"
Dave Houlton3c9fca72017-03-27 17:25:54 -060033#include "vk_format_utils.h"
Mark Lobodzinski1079e1b2016-03-15 14:21:59 -060034#include "vk_layer_logging.h"
35
Courtney Goeltzenleuchterd2635502015-10-21 17:08:06 -060036#ifndef WIN32
Mark Lobodzinski64318ba2017-01-26 13:34:13 -070037#include <strings.h> // For ffs()
Courtney Goeltzenleuchter3698c622015-10-27 11:23:21 -060038#else
Mark Lobodzinski64318ba2017-01-26 13:34:13 -070039#include <intrin.h> // For __lzcnt()
Courtney Goeltzenleuchterd2635502015-10-21 17:08:06 -060040#endif
Mark Lobodzinski6f2274e2015-09-22 09:33:21 -060041
Petr Krausc3382e92019-12-14 00:41:30 +010042#define STRINGIFY(s) STRINGIFY_HELPER(s)
43#define STRINGIFY_HELPER(s) #s
44
Mark Lobodzinski6f2274e2015-09-22 09:33:21 -060045#ifdef __cplusplus
John Zulauf1507ee42020-05-18 11:33:09 -060046static inline VkExtent3D CastTo3D(const VkExtent2D &d2) {
47 VkExtent3D d3 = {d2.width, d2.height, 1};
48 return d3;
49}
50
51static inline VkOffset3D CastTo3D(const VkOffset2D &d2) {
52 VkOffset3D d3 = {d2.x, d2.y, 0};
53 return d3;
54}
55
Mark Lobodzinski44145db2020-08-11 08:01:47 -060056// Convert integer API version to a string
57static inline std::string StringAPIVersion(uint32_t version) {
58 std::stringstream version_name;
59 uint32_t major = VK_VERSION_MAJOR(version);
60 uint32_t minor = VK_VERSION_MINOR(version);
61 uint32_t patch = VK_VERSION_PATCH(version);
62 version_name << major << "." << minor << "." << patch << " (0x" << std::setfill('0') << std::setw(8) << std::hex << version
63 << ")";
64 return version_name.str();
65}
66
John Zulauf965d88d2018-04-12 15:47:26 -060067// Traits objects to allow string_join to operate on collections of const char *
68template <typename String>
69struct StringJoinSizeTrait {
70 static size_t size(const String &str) { return str.size(); }
71};
72
73template <>
74struct StringJoinSizeTrait<const char *> {
75 static size_t size(const char *str) {
76 if (!str) return 0;
77 return strlen(str);
78 }
79};
80// Similar to perl/python join
81// * String must support size, reserve, append, and be default constructable
82// * StringCollection must support size, const forward iteration, and store
83// strings compatible with String::append
84// * Accessor trait can be set if default accessors (compatible with string
85// and const char *) don't support size(StringCollection::value_type &)
86//
87// Return type based on sep type
88template <typename String = std::string, typename StringCollection = std::vector<String>,
89 typename Accessor = StringJoinSizeTrait<typename StringCollection::value_type>>
90static inline String string_join(const String &sep, const StringCollection &strings) {
91 String joined;
92 const size_t count = strings.size();
93 if (!count) return joined;
94
95 // Prereserved storage, s.t. we will execute in linear time (avoids reallocation copies)
96 size_t reserve = (count - 1) * sep.size();
97 for (const auto &str : strings) {
98 reserve += Accessor::size(str); // abstracted to allow const char * type in StringCollection
99 }
100 joined.reserve(reserve + 1);
101
102 // Seps only occur *between* strings entries, so first is special
103 auto current = strings.cbegin();
104 joined.append(*current);
105 ++current;
106 for (; current != strings.cend(); ++current) {
107 joined.append(sep);
108 joined.append(*current);
109 }
110 return joined;
111}
112
113// Requires StringCollection::value_type has a const char * constructor and is compatible the string_join::String above
114template <typename StringCollection = std::vector<std::string>, typename SepString = std::string>
115static inline SepString string_join(const char *sep, const StringCollection &strings) {
116 return string_join<SepString, StringCollection>(SepString(sep), strings);
117}
118
Petr Kraus168417e2019-09-07 16:45:40 +0200119static inline std::string string_trim(const std::string &s) {
120 const char *whitespace = " \t\f\v\n\r";
121
122 const auto trimmed_beg = s.find_first_not_of(whitespace);
123 if (trimmed_beg == std::string::npos) return "";
124
125 const auto trimmed_end = s.find_last_not_of(whitespace);
126 assert(trimmed_end != std::string::npos && trimmed_beg <= trimmed_end);
127
128 return s.substr(trimmed_beg, trimmed_end - trimmed_beg + 1);
129}
130
John Zulaufdf851b12018-06-12 14:49:04 -0600131// Perl/Python style join operation for general types using stream semantics
132// Note: won't be as fast as string_join above, but simpler to use (and code)
133// Note: Modifiable reference doesn't match the google style but does match std style for stream handling and algorithms
134template <typename Stream, typename String, typename ForwardIt>
135Stream &stream_join(Stream &stream, const String &sep, ForwardIt first, ForwardIt last) {
136 if (first != last) {
137 stream << *first;
138 ++first;
139 while (first != last) {
140 stream << sep << *first;
141 ++first;
142 }
143 }
144 return stream;
145}
146
147// stream_join For whole collections with forward iterators
148template <typename Stream, typename String, typename Collection>
149Stream &stream_join(Stream &stream, const String &sep, const Collection &values) {
150 return stream_join(stream, sep, values.cbegin(), values.cend());
151}
152
Mark Lobodzinskic1b5b882018-06-25 14:54:04 -0600153typedef void *dispatch_key;
154static inline dispatch_key get_dispatch_key(const void *object) { return (dispatch_key) * (VkLayerDispatchTable **)object; }
155
156VK_LAYER_EXPORT VkLayerInstanceCreateInfo *get_chain_info(const VkInstanceCreateInfo *pCreateInfo, VkLayerFunction func);
157VK_LAYER_EXPORT VkLayerDeviceCreateInfo *get_chain_info(const VkDeviceCreateInfo *pCreateInfo, VkLayerFunction func);
158
Chris Mayer334e72f2018-11-29 14:25:41 +0100159static inline bool IsPowerOfTwo(unsigned x) { return x && !(x & (x - 1)); }
Chris Mayer9bc9d092018-11-12 12:29:10 +0100160
sfricke-samsung8f658d42020-05-03 20:12:24 -0700161static inline uint32_t SampleCountSize(VkSampleCountFlagBits sample_count) {
162 uint32_t size = 0;
163 switch (sample_count) {
164 case VK_SAMPLE_COUNT_1_BIT:
165 size = 1;
166 break;
167 case VK_SAMPLE_COUNT_2_BIT:
168 size = 2;
169 break;
170 case VK_SAMPLE_COUNT_4_BIT:
171 size = 4;
172 break;
173 case VK_SAMPLE_COUNT_8_BIT:
174 size = 8;
175 break;
176 case VK_SAMPLE_COUNT_16_BIT:
177 size = 16;
178 break;
179 case VK_SAMPLE_COUNT_32_BIT:
180 size = 32;
181 break;
182 case VK_SAMPLE_COUNT_64_BIT:
183 size = 64;
184 break;
185 default:
186 size = 0;
187 }
188 return size;
189}
190
sfricke-samsungbd0e8052020-06-06 01:36:39 -0700191static inline bool IsIdentitySwizzle(VkComponentMapping components) {
192 // clang-format off
193 return (
194 ((components.r == VK_COMPONENT_SWIZZLE_IDENTITY) || (components.r == VK_COMPONENT_SWIZZLE_R)) &&
195 ((components.g == VK_COMPONENT_SWIZZLE_IDENTITY) || (components.g == VK_COMPONENT_SWIZZLE_G)) &&
196 ((components.b == VK_COMPONENT_SWIZZLE_IDENTITY) || (components.b == VK_COMPONENT_SWIZZLE_B)) &&
197 ((components.a == VK_COMPONENT_SWIZZLE_IDENTITY) || (components.a == VK_COMPONENT_SWIZZLE_A))
198 );
199 // clang-format on
200}
201
Mark Lobodzinski6f2274e2015-09-22 09:33:21 -0600202extern "C" {
203#endif
204
Jon Ashburnd883d812016-03-24 08:32:09 -0600205#define VK_LAYER_API_VERSION VK_MAKE_VERSION(1, 0, VK_HEADER_VERSION)
Mark Lobodzinskiadaac9d2016-01-08 11:07:56 -0700206
Mark Lobodzinskia9f33492016-01-11 14:17:05 -0700207typedef enum VkStringErrorFlagBits {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700208 VK_STRING_ERROR_NONE = 0x00000000,
209 VK_STRING_ERROR_LENGTH = 0x00000001,
210 VK_STRING_ERROR_BAD_DATA = 0x00000002,
Mark Lobodzinskia9f33492016-01-11 14:17:05 -0700211} VkStringErrorFlagBits;
212typedef VkFlags VkStringErrorFlags;
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700213
Petr Kraus4ed81e32019-09-02 23:41:19 +0200214VK_LAYER_EXPORT void layer_debug_report_actions(debug_report_data *report_data, const VkAllocationCallbacks *pAllocator,
215 const char *layer_identifier);
Mark Young6ba8abe2017-11-09 10:37:04 -0700216
Petr Kraus4ed81e32019-09-02 23:41:19 +0200217VK_LAYER_EXPORT void layer_debug_messenger_actions(debug_report_data *report_data, const VkAllocationCallbacks *pAllocator,
218 const char *layer_identifier);
Mark Lobodzinski1079e1b2016-03-15 14:21:59 -0600219
Mike Stroyana551bc02016-09-28 09:42:28 -0600220VK_LAYER_EXPORT VkStringErrorFlags vk_string_validate(const int max_length, const char *char_array);
Mark Lobodzinskia0555012018-08-15 16:43:49 -0600221VK_LAYER_EXPORT bool white_list(const char *item, const std::set<std::string> &whitelist);
Mark Lobodzinski6f2274e2015-09-22 09:33:21 -0600222
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700223static inline int u_ffs(int val) {
Courtney Goeltzenleuchterd2635502015-10-21 17:08:06 -0600224#ifdef WIN32
Mark Lobodzinski5ddf6c32015-12-16 17:47:28 -0700225 unsigned long bit_pos = 0;
Mike Stroyandebb9842016-01-07 10:05:21 -0700226 if (_BitScanForward(&bit_pos, val) != 0) {
Mark Lobodzinski5ddf6c32015-12-16 17:47:28 -0700227 bit_pos += 1;
228 }
229 return bit_pos;
Courtney Goeltzenleuchterd2635502015-10-21 17:08:06 -0600230#else
Mark Lobodzinski5ddf6c32015-12-16 17:47:28 -0700231 return ffs(val);
Courtney Goeltzenleuchterd2635502015-10-21 17:08:06 -0600232#endif
233}
Mark Lobodzinski6f2274e2015-09-22 09:33:21 -0600234
235#ifdef __cplusplus
236}
237#endif
Jeff Bolz89b9a502019-08-20 08:58:51 -0500238
Bruce Dawson5fab7f82020-06-09 16:23:07 -0700239// Minimum Visual Studio 2015 Update 2, or libc++ with C++17
240#if defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && NTDDI_VERSION > NTDDI_WIN10_RS2 && \
241 (!defined(_LIBCPP_VERSION) || __cplusplus >= 201703)
Jeff Bolz89b9a502019-08-20 08:58:51 -0500242#include <shared_mutex>
243#endif
244
Jeff Bolzcaeccc72019-10-15 15:35:26 -0500245class ReadWriteLock {
246 private:
Bruce Dawson5fab7f82020-06-09 16:23:07 -0700247#if defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && NTDDI_VERSION > NTDDI_WIN10_RS2 && \
248 (!defined(_LIBCPP_VERSION) || __cplusplus >= 201703)
Jeff Bolzcaeccc72019-10-15 15:35:26 -0500249 typedef std::shared_mutex lock_t;
250#else
251 typedef std::mutex lock_t;
252#endif
253
254 public:
255 void lock() { m_lock.lock(); }
256 bool try_lock() { return m_lock.try_lock(); }
257 void unlock() { m_lock.unlock(); }
Bruce Dawson5fab7f82020-06-09 16:23:07 -0700258#if defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && NTDDI_VERSION > NTDDI_WIN10_RS2 && \
259 (!defined(_LIBCPP_VERSION) || __cplusplus >= 201703)
Jeff Bolzcaeccc72019-10-15 15:35:26 -0500260 void lock_shared() { m_lock.lock_shared(); }
261 bool try_lock_shared() { return m_lock.try_lock_shared(); }
262 void unlock_shared() { m_lock.unlock_shared(); }
263#else
264 void lock_shared() { lock(); }
265 bool try_lock_shared() { return try_lock(); }
266 void unlock_shared() { unlock(); }
267#endif
268 private:
269 lock_t m_lock;
270};
271
Bruce Dawson5fab7f82020-06-09 16:23:07 -0700272#if defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && NTDDI_VERSION > NTDDI_WIN10_RS2 && \
273 (!defined(_LIBCPP_VERSION) || __cplusplus >= 201703)
Jeff Bolzcaeccc72019-10-15 15:35:26 -0500274typedef std::shared_lock<ReadWriteLock> read_lock_guard_t;
275typedef std::unique_lock<ReadWriteLock> write_lock_guard_t;
276#else
277typedef std::unique_lock<ReadWriteLock> read_lock_guard_t;
278typedef std::unique_lock<ReadWriteLock> write_lock_guard_t;
279#endif
280
Jeff Bolz89b9a502019-08-20 08:58:51 -0500281// Limited concurrent_unordered_map that supports internally-synchronized
282// insert/erase/access. Splits locking across N buckets and uses shared_mutex
283// for read/write locking. Iterators are not supported. The following
284// operations are supported:
285//
286// insert_or_assign: Insert a new element or update an existing element.
Jeff Bolzfd3bb242019-08-22 06:10:49 -0500287// insert: Insert a new element and return whether it was inserted.
Jeff Bolz89b9a502019-08-20 08:58:51 -0500288// erase: Remove an element.
Jeff Bolzfd3bb242019-08-22 06:10:49 -0500289// contains: Returns true if the key is in the map.
Jeff Bolz89b9a502019-08-20 08:58:51 -0500290// find: Returns != end() if found, value is in ret->second.
291// pop: Erases and returns the erased value if found.
292//
293// find/end: find returns a vaguely iterator-like type that can be compared to
294// end and can use iter->second to retrieve the reference. This is to ease porting
295// for existing code that combines the existence check and lookup in a single
296// operation (and thus a single lock). i.e.:
297//
298// auto iter = map.find(key);
299// if (iter != map.end()) {
300// T t = iter->second;
301// ...
Jeff Bolzfd3bb242019-08-22 06:10:49 -0500302//
303// snapshot: Return an array of elements (key, value pairs) that satisfy an optional
304// predicate. This can be used as a substitute for iterators in exceptional cases.
Jeff Bolzb3514112019-08-23 21:46:18 -0500305template <typename Key, typename T, int BUCKETSLOG2 = 2, typename Hash = std::hash<Key>>
Jeff Bolz89b9a502019-08-20 08:58:51 -0500306class vl_concurrent_unordered_map {
Petr Kraus4ed81e32019-09-02 23:41:19 +0200307 public:
Jeff Bolz89b9a502019-08-20 08:58:51 -0500308 void insert_or_assign(const Key &key, const T &value) {
309 uint32_t h = ConcurrentMapHashObject(key);
310 write_lock_guard_t lock(locks[h].lock);
311 maps[h][key] = value;
312 }
313
Jeff Bolzfd3bb242019-08-22 06:10:49 -0500314 bool insert(const Key &key, const T &value) {
315 uint32_t h = ConcurrentMapHashObject(key);
316 write_lock_guard_t lock(locks[h].lock);
317 auto ret = maps[h].insert(typename std::unordered_map<Key, T>::value_type(key, value));
318 return ret.second;
319 }
320
Jeff Bolz89b9a502019-08-20 08:58:51 -0500321 // returns size_type
322 size_t erase(const Key &key) {
323 uint32_t h = ConcurrentMapHashObject(key);
324 write_lock_guard_t lock(locks[h].lock);
325 return maps[h].erase(key);
326 }
327
Jeff Bolz46c0ea02019-10-09 13:06:29 -0500328 bool contains(const Key &key) const {
Jeff Bolzfd3bb242019-08-22 06:10:49 -0500329 uint32_t h = ConcurrentMapHashObject(key);
330 read_lock_guard_t lock(locks[h].lock);
331 return maps[h].count(key) != 0;
332 }
333
Jeff Bolz89b9a502019-08-20 08:58:51 -0500334 // type returned by find() and end().
335 class FindResult {
Petr Kraus4ed81e32019-09-02 23:41:19 +0200336 public:
Jeff Bolzfd3bb242019-08-22 06:10:49 -0500337 FindResult(bool a, T b) : result(a, std::move(b)) {}
Jeff Bolz89b9a502019-08-20 08:58:51 -0500338
339 // == and != only support comparing against end()
340 bool operator==(const FindResult &other) const {
341 if (result.first == false && other.result.first == false) {
342 return true;
343 }
344 return false;
345 }
346 bool operator!=(const FindResult &other) const { return !(*this == other); }
347
348 // Make -> act kind of like an iterator.
349 std::pair<bool, T> *operator->() { return &result; }
350 const std::pair<bool, T> *operator->() const { return &result; }
351
Petr Kraus4ed81e32019-09-02 23:41:19 +0200352 private:
Jeff Bolz89b9a502019-08-20 08:58:51 -0500353 // (found, reference to element)
354 std::pair<bool, T> result;
355 };
356
Jeff Bolzfd3bb242019-08-22 06:10:49 -0500357 // find()/end() return a FindResult containing a copy of the value. For end(),
358 // return a default value.
Jeff Bolz46c0ea02019-10-09 13:06:29 -0500359 FindResult end() const { return FindResult(false, T()); }
Jeff Bolz89b9a502019-08-20 08:58:51 -0500360
Jeff Bolz46c0ea02019-10-09 13:06:29 -0500361 FindResult find(const Key &key) const {
Jeff Bolz89b9a502019-08-20 08:58:51 -0500362 uint32_t h = ConcurrentMapHashObject(key);
363 read_lock_guard_t lock(locks[h].lock);
364
365 auto itr = maps[h].find(key);
366 bool found = itr != maps[h].end();
367
Jeff Bolzfd3bb242019-08-22 06:10:49 -0500368 if (found) {
369 return FindResult(true, itr->second);
370 } else {
371 return end();
372 }
Jeff Bolz89b9a502019-08-20 08:58:51 -0500373 }
374
375 FindResult pop(const Key &key) {
376 uint32_t h = ConcurrentMapHashObject(key);
377 write_lock_guard_t lock(locks[h].lock);
378
379 auto itr = maps[h].find(key);
380 bool found = itr != maps[h].end();
381
382 if (found) {
Jeff Bolzfd3bb242019-08-22 06:10:49 -0500383 auto ret = std::move(FindResult(true, itr->second));
Jeff Bolz89b9a502019-08-20 08:58:51 -0500384 maps[h].erase(itr);
385 return ret;
386 } else {
387 return end();
388 }
389 }
390
Jeff Bolz46c0ea02019-10-09 13:06:29 -0500391 std::vector<std::pair<const Key, T>> snapshot(std::function<bool(T)> f = nullptr) const {
Jeff Bolzfd3bb242019-08-22 06:10:49 -0500392 std::vector<std::pair<const Key, T>> ret;
393 for (int h = 0; h < BUCKETS; ++h) {
394 read_lock_guard_t lock(locks[h].lock);
395 for (auto j : maps[h]) {
396 if (!f || f(j.second)) {
397 ret.push_back(j);
398 }
399 }
400 }
401 return ret;
402 }
403
Petr Kraus4ed81e32019-09-02 23:41:19 +0200404 private:
Jeff Bolz89b9a502019-08-20 08:58:51 -0500405 static const int BUCKETS = (1 << BUCKETSLOG2);
Jeff Bolz89b9a502019-08-20 08:58:51 -0500406
Jeff Bolzb3514112019-08-23 21:46:18 -0500407 std::unordered_map<Key, T, Hash> maps[BUCKETS];
Jeff Bolz89b9a502019-08-20 08:58:51 -0500408 struct {
Jeff Bolzcaeccc72019-10-15 15:35:26 -0500409 mutable ReadWriteLock lock;
Jeff Bolz89b9a502019-08-20 08:58:51 -0500410 // Put each lock on its own cache line to avoid false cache line sharing.
Jeff Bolzcaeccc72019-10-15 15:35:26 -0500411 char padding[(-int(sizeof(ReadWriteLock))) & 63];
Jeff Bolz89b9a502019-08-20 08:58:51 -0500412 } locks[BUCKETS];
413
414 uint32_t ConcurrentMapHashObject(const Key &object) const {
415 uint64_t u64 = (uint64_t)(uintptr_t)object;
416 uint32_t hash = (uint32_t)(u64 >> 32) + (uint32_t)u64;
417 hash ^= (hash >> BUCKETSLOG2) ^ (hash >> (2 * BUCKETSLOG2));
418 hash &= (BUCKETS - 1);
419 return hash;
420 }
421};