blob: 8fd29a534e82b165e22b78df7ff19370db72d371 [file] [log] [blame]
John Zulauff05b3772019-04-03 18:04:23 -06001/* Copyright (c) 2015-2017, 2019 The Khronos Group Inc.
2 * Copyright (c) 2015-2017, 2019 Valve Corporation
3 * Copyright (c) 2015-2017, 2019 LunarG, Inc.
Mark Lobodzinski6f2274e2015-09-22 09:33:21 -06004 *
Jon Ashburn3ebf1252016-04-19 11:30:31 -06005 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
Mark Lobodzinski6f2274e2015-09-22 09:33:21 -06008 *
Jon Ashburn3ebf1252016-04-19 11:30:31 -06009 * http://www.apache.org/licenses/LICENSE-2.0
Mark Lobodzinski6f2274e2015-09-22 09:33:21 -060010 *
Jon Ashburn3ebf1252016-04-19 11:30:31 -060011 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
Mark Lobodzinski6f2274e2015-09-22 09:33:21 -060016 *
Jon Ashburne922f712015-11-03 13:41:23 -070017 * Author: Mark Lobodzinski <mark@lunarg.com>
18 * Author: Courtney Goeltzenleuchter <courtney@LunarG.com>
Dave Houlton59a20702017-02-02 17:26:23 -070019 * Author: Dave Houlton <daveh@lunarg.com>
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -070020 */
21
Mark Lobodzinski6f2274e2015-09-22 09:33:21 -060022#pragma once
John Zulauff05b3772019-04-03 18:04:23 -060023
24#include <cassert>
25#include <cstddef>
26#include <functional>
Mark Lobodzinski6f2274e2015-09-22 09:33:21 -060027#include <stdbool.h>
John Zulauf965d88d2018-04-12 15:47:26 -060028#include <string>
Mark Lobodzinski1079e1b2016-03-15 14:21:59 -060029#include <vector>
Mark Lobodzinskia0555012018-08-15 16:43:49 -060030#include <set>
John Zulauf2c2ccd42019-04-05 13:13:13 -060031#include "cast_utils.h"
Dave Houlton3c9fca72017-03-27 17:25:54 -060032#include "vk_format_utils.h"
Mark Lobodzinski1079e1b2016-03-15 14:21:59 -060033#include "vk_layer_logging.h"
34
Courtney Goeltzenleuchterd2635502015-10-21 17:08:06 -060035#ifndef WIN32
Mark Lobodzinski64318ba2017-01-26 13:34:13 -070036#include <strings.h> // For ffs()
Courtney Goeltzenleuchter3698c622015-10-27 11:23:21 -060037#else
Mark Lobodzinski64318ba2017-01-26 13:34:13 -070038#include <intrin.h> // For __lzcnt()
Courtney Goeltzenleuchterd2635502015-10-21 17:08:06 -060039#endif
Mark Lobodzinski6f2274e2015-09-22 09:33:21 -060040
41#ifdef __cplusplus
John Zulauf965d88d2018-04-12 15:47:26 -060042// Traits objects to allow string_join to operate on collections of const char *
43template <typename String>
44struct StringJoinSizeTrait {
45 static size_t size(const String &str) { return str.size(); }
46};
47
48template <>
49struct StringJoinSizeTrait<const char *> {
50 static size_t size(const char *str) {
51 if (!str) return 0;
52 return strlen(str);
53 }
54};
55// Similar to perl/python join
56// * String must support size, reserve, append, and be default constructable
57// * StringCollection must support size, const forward iteration, and store
58// strings compatible with String::append
59// * Accessor trait can be set if default accessors (compatible with string
60// and const char *) don't support size(StringCollection::value_type &)
61//
62// Return type based on sep type
63template <typename String = std::string, typename StringCollection = std::vector<String>,
64 typename Accessor = StringJoinSizeTrait<typename StringCollection::value_type>>
65static inline String string_join(const String &sep, const StringCollection &strings) {
66 String joined;
67 const size_t count = strings.size();
68 if (!count) return joined;
69
70 // Prereserved storage, s.t. we will execute in linear time (avoids reallocation copies)
71 size_t reserve = (count - 1) * sep.size();
72 for (const auto &str : strings) {
73 reserve += Accessor::size(str); // abstracted to allow const char * type in StringCollection
74 }
75 joined.reserve(reserve + 1);
76
77 // Seps only occur *between* strings entries, so first is special
78 auto current = strings.cbegin();
79 joined.append(*current);
80 ++current;
81 for (; current != strings.cend(); ++current) {
82 joined.append(sep);
83 joined.append(*current);
84 }
85 return joined;
86}
87
88// Requires StringCollection::value_type has a const char * constructor and is compatible the string_join::String above
89template <typename StringCollection = std::vector<std::string>, typename SepString = std::string>
90static inline SepString string_join(const char *sep, const StringCollection &strings) {
91 return string_join<SepString, StringCollection>(SepString(sep), strings);
92}
93
John Zulaufdf851b12018-06-12 14:49:04 -060094// Perl/Python style join operation for general types using stream semantics
95// Note: won't be as fast as string_join above, but simpler to use (and code)
96// Note: Modifiable reference doesn't match the google style but does match std style for stream handling and algorithms
97template <typename Stream, typename String, typename ForwardIt>
98Stream &stream_join(Stream &stream, const String &sep, ForwardIt first, ForwardIt last) {
99 if (first != last) {
100 stream << *first;
101 ++first;
102 while (first != last) {
103 stream << sep << *first;
104 ++first;
105 }
106 }
107 return stream;
108}
109
110// stream_join For whole collections with forward iterators
111template <typename Stream, typename String, typename Collection>
112Stream &stream_join(Stream &stream, const String &sep, const Collection &values) {
113 return stream_join(stream, sep, values.cbegin(), values.cend());
114}
115
Mark Lobodzinskic1b5b882018-06-25 14:54:04 -0600116typedef void *dispatch_key;
117static inline dispatch_key get_dispatch_key(const void *object) { return (dispatch_key) * (VkLayerDispatchTable **)object; }
118
119VK_LAYER_EXPORT VkLayerInstanceCreateInfo *get_chain_info(const VkInstanceCreateInfo *pCreateInfo, VkLayerFunction func);
120VK_LAYER_EXPORT VkLayerDeviceCreateInfo *get_chain_info(const VkDeviceCreateInfo *pCreateInfo, VkLayerFunction func);
121
Chris Mayer334e72f2018-11-29 14:25:41 +0100122static inline bool IsPowerOfTwo(unsigned x) { return x && !(x & (x - 1)); }
Chris Mayer9bc9d092018-11-12 12:29:10 +0100123
Mark Lobodzinski6f2274e2015-09-22 09:33:21 -0600124extern "C" {
125#endif
126
Jon Ashburnd883d812016-03-24 08:32:09 -0600127#define VK_LAYER_API_VERSION VK_MAKE_VERSION(1, 0, VK_HEADER_VERSION)
Mark Lobodzinskiadaac9d2016-01-08 11:07:56 -0700128
Mark Lobodzinskia9f33492016-01-11 14:17:05 -0700129typedef enum VkStringErrorFlagBits {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700130 VK_STRING_ERROR_NONE = 0x00000000,
131 VK_STRING_ERROR_LENGTH = 0x00000001,
132 VK_STRING_ERROR_BAD_DATA = 0x00000002,
Mark Lobodzinskia9f33492016-01-11 14:17:05 -0700133} VkStringErrorFlagBits;
134typedef VkFlags VkStringErrorFlags;
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700135
Petr Kraus4ed81e32019-09-02 23:41:19 +0200136VK_LAYER_EXPORT void layer_debug_report_actions(debug_report_data *report_data, const VkAllocationCallbacks *pAllocator,
137 const char *layer_identifier);
Mark Young6ba8abe2017-11-09 10:37:04 -0700138
Petr Kraus4ed81e32019-09-02 23:41:19 +0200139VK_LAYER_EXPORT void layer_debug_messenger_actions(debug_report_data *report_data, const VkAllocationCallbacks *pAllocator,
140 const char *layer_identifier);
Mark Lobodzinski1079e1b2016-03-15 14:21:59 -0600141
Mike Stroyana551bc02016-09-28 09:42:28 -0600142VK_LAYER_EXPORT VkStringErrorFlags vk_string_validate(const int max_length, const char *char_array);
Mark Lobodzinskia0555012018-08-15 16:43:49 -0600143VK_LAYER_EXPORT bool white_list(const char *item, const std::set<std::string> &whitelist);
Mark Lobodzinski6f2274e2015-09-22 09:33:21 -0600144
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700145static inline int u_ffs(int val) {
Courtney Goeltzenleuchterd2635502015-10-21 17:08:06 -0600146#ifdef WIN32
Mark Lobodzinski5ddf6c32015-12-16 17:47:28 -0700147 unsigned long bit_pos = 0;
Mike Stroyandebb9842016-01-07 10:05:21 -0700148 if (_BitScanForward(&bit_pos, val) != 0) {
Mark Lobodzinski5ddf6c32015-12-16 17:47:28 -0700149 bit_pos += 1;
150 }
151 return bit_pos;
Courtney Goeltzenleuchterd2635502015-10-21 17:08:06 -0600152#else
Mark Lobodzinski5ddf6c32015-12-16 17:47:28 -0700153 return ffs(val);
Courtney Goeltzenleuchterd2635502015-10-21 17:08:06 -0600154#endif
155}
Mark Lobodzinski6f2274e2015-09-22 09:33:21 -0600156
157#ifdef __cplusplus
158}
159#endif
Jeff Bolz89b9a502019-08-20 08:58:51 -0500160
161// shared_mutex support added in MSVC 2015 update 2
Tobin Ehlise2a6c442019-08-22 08:11:53 -0600162#if defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && NTDDI_VERSION > NTDDI_WIN10_RS2
Jeff Bolz89b9a502019-08-20 08:58:51 -0500163#include <shared_mutex>
164#endif
165
166// Limited concurrent_unordered_map that supports internally-synchronized
167// insert/erase/access. Splits locking across N buckets and uses shared_mutex
168// for read/write locking. Iterators are not supported. The following
169// operations are supported:
170//
171// insert_or_assign: Insert a new element or update an existing element.
Jeff Bolzfd3bb242019-08-22 06:10:49 -0500172// insert: Insert a new element and return whether it was inserted.
Jeff Bolz89b9a502019-08-20 08:58:51 -0500173// erase: Remove an element.
Jeff Bolzfd3bb242019-08-22 06:10:49 -0500174// contains: Returns true if the key is in the map.
Jeff Bolz89b9a502019-08-20 08:58:51 -0500175// find: Returns != end() if found, value is in ret->second.
176// pop: Erases and returns the erased value if found.
177//
178// find/end: find returns a vaguely iterator-like type that can be compared to
179// end and can use iter->second to retrieve the reference. This is to ease porting
180// for existing code that combines the existence check and lookup in a single
181// operation (and thus a single lock). i.e.:
182//
183// auto iter = map.find(key);
184// if (iter != map.end()) {
185// T t = iter->second;
186// ...
Jeff Bolzfd3bb242019-08-22 06:10:49 -0500187//
188// snapshot: Return an array of elements (key, value pairs) that satisfy an optional
189// predicate. This can be used as a substitute for iterators in exceptional cases.
Jeff Bolzb3514112019-08-23 21:46:18 -0500190template <typename Key, typename T, int BUCKETSLOG2 = 2, typename Hash = std::hash<Key>>
Jeff Bolz89b9a502019-08-20 08:58:51 -0500191class vl_concurrent_unordered_map {
Petr Kraus4ed81e32019-09-02 23:41:19 +0200192 public:
Jeff Bolz89b9a502019-08-20 08:58:51 -0500193 void insert_or_assign(const Key &key, const T &value) {
194 uint32_t h = ConcurrentMapHashObject(key);
195 write_lock_guard_t lock(locks[h].lock);
196 maps[h][key] = value;
197 }
198
Jeff Bolzfd3bb242019-08-22 06:10:49 -0500199 bool insert(const Key &key, const T &value) {
200 uint32_t h = ConcurrentMapHashObject(key);
201 write_lock_guard_t lock(locks[h].lock);
202 auto ret = maps[h].insert(typename std::unordered_map<Key, T>::value_type(key, value));
203 return ret.second;
204 }
205
Jeff Bolz89b9a502019-08-20 08:58:51 -0500206 // returns size_type
207 size_t erase(const Key &key) {
208 uint32_t h = ConcurrentMapHashObject(key);
209 write_lock_guard_t lock(locks[h].lock);
210 return maps[h].erase(key);
211 }
212
Jeff Bolz46c0ea02019-10-09 13:06:29 -0500213 bool contains(const Key &key) const {
Jeff Bolzfd3bb242019-08-22 06:10:49 -0500214 uint32_t h = ConcurrentMapHashObject(key);
215 read_lock_guard_t lock(locks[h].lock);
216 return maps[h].count(key) != 0;
217 }
218
Jeff Bolz89b9a502019-08-20 08:58:51 -0500219 // type returned by find() and end().
220 class FindResult {
Petr Kraus4ed81e32019-09-02 23:41:19 +0200221 public:
Jeff Bolzfd3bb242019-08-22 06:10:49 -0500222 FindResult(bool a, T b) : result(a, std::move(b)) {}
Jeff Bolz89b9a502019-08-20 08:58:51 -0500223
224 // == and != only support comparing against end()
225 bool operator==(const FindResult &other) const {
226 if (result.first == false && other.result.first == false) {
227 return true;
228 }
229 return false;
230 }
231 bool operator!=(const FindResult &other) const { return !(*this == other); }
232
233 // Make -> act kind of like an iterator.
234 std::pair<bool, T> *operator->() { return &result; }
235 const std::pair<bool, T> *operator->() const { return &result; }
236
Petr Kraus4ed81e32019-09-02 23:41:19 +0200237 private:
Jeff Bolz89b9a502019-08-20 08:58:51 -0500238 // (found, reference to element)
239 std::pair<bool, T> result;
240 };
241
Jeff Bolzfd3bb242019-08-22 06:10:49 -0500242 // find()/end() return a FindResult containing a copy of the value. For end(),
243 // return a default value.
Jeff Bolz46c0ea02019-10-09 13:06:29 -0500244 FindResult end() const { return FindResult(false, T()); }
Jeff Bolz89b9a502019-08-20 08:58:51 -0500245
Jeff Bolz46c0ea02019-10-09 13:06:29 -0500246 FindResult find(const Key &key) const {
Jeff Bolz89b9a502019-08-20 08:58:51 -0500247 uint32_t h = ConcurrentMapHashObject(key);
248 read_lock_guard_t lock(locks[h].lock);
249
250 auto itr = maps[h].find(key);
251 bool found = itr != maps[h].end();
252
Jeff Bolzfd3bb242019-08-22 06:10:49 -0500253 if (found) {
254 return FindResult(true, itr->second);
255 } else {
256 return end();
257 }
Jeff Bolz89b9a502019-08-20 08:58:51 -0500258 }
259
260 FindResult pop(const Key &key) {
261 uint32_t h = ConcurrentMapHashObject(key);
262 write_lock_guard_t lock(locks[h].lock);
263
264 auto itr = maps[h].find(key);
265 bool found = itr != maps[h].end();
266
267 if (found) {
Jeff Bolzfd3bb242019-08-22 06:10:49 -0500268 auto ret = std::move(FindResult(true, itr->second));
Jeff Bolz89b9a502019-08-20 08:58:51 -0500269 maps[h].erase(itr);
270 return ret;
271 } else {
272 return end();
273 }
274 }
275
Jeff Bolz46c0ea02019-10-09 13:06:29 -0500276 std::vector<std::pair<const Key, T>> snapshot(std::function<bool(T)> f = nullptr) const {
Jeff Bolzfd3bb242019-08-22 06:10:49 -0500277 std::vector<std::pair<const Key, T>> ret;
278 for (int h = 0; h < BUCKETS; ++h) {
279 read_lock_guard_t lock(locks[h].lock);
280 for (auto j : maps[h]) {
281 if (!f || f(j.second)) {
282 ret.push_back(j);
283 }
284 }
285 }
286 return ret;
287 }
288
Petr Kraus4ed81e32019-09-02 23:41:19 +0200289 private:
Jeff Bolz89b9a502019-08-20 08:58:51 -0500290 static const int BUCKETS = (1 << BUCKETSLOG2);
291// shared_mutex support added in MSVC 2015 update 2
Tobin Ehlise2a6c442019-08-22 08:11:53 -0600292#if defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && NTDDI_VERSION > NTDDI_WIN10_RS2
Jeff Bolz89b9a502019-08-20 08:58:51 -0500293#include <shared_mutex>
294 typedef std::shared_mutex lock_t;
295 typedef std::shared_lock<lock_t> read_lock_guard_t;
296 typedef std::unique_lock<lock_t> write_lock_guard_t;
297#else
298 typedef std::mutex lock_t;
299 typedef std::unique_lock<lock_t> read_lock_guard_t;
300 typedef std::unique_lock<lock_t> write_lock_guard_t;
301#endif
302
Jeff Bolzb3514112019-08-23 21:46:18 -0500303 std::unordered_map<Key, T, Hash> maps[BUCKETS];
Jeff Bolz89b9a502019-08-20 08:58:51 -0500304 struct {
Jeff Bolz46c0ea02019-10-09 13:06:29 -0500305 mutable lock_t lock;
Jeff Bolz89b9a502019-08-20 08:58:51 -0500306 // Put each lock on its own cache line to avoid false cache line sharing.
Tobin Ehlisc8887f82019-08-23 07:26:25 -0600307 char padding[(-int(sizeof(lock_t))) & 63];
Jeff Bolz89b9a502019-08-20 08:58:51 -0500308 } locks[BUCKETS];
309
310 uint32_t ConcurrentMapHashObject(const Key &object) const {
311 uint64_t u64 = (uint64_t)(uintptr_t)object;
312 uint32_t hash = (uint32_t)(u64 >> 32) + (uint32_t)u64;
313 hash ^= (hash >> BUCKETSLOG2) ^ (hash >> (2 * BUCKETSLOG2));
314 hash &= (BUCKETS - 1);
315 return hash;
316 }
317};