blob: 300854bac73801b5cd7d9dbae6377b101986d481 [file] [log] [blame]
Jeremy Gebben65975ed2021-10-29 11:16:10 -06001/* Copyright (c) 2015-2017, 2019-2022 The Khronos Group Inc.
2 * Copyright (c) 2015-2017, 2019-2022 Valve Corporation
3 * Copyright (c) 2015-2017, 2019-2022 LunarG, Inc.
Mark Lobodzinski6f2274e2015-09-22 09:33:21 -06004 *
Jon Ashburn3ebf1252016-04-19 11:30:31 -06005 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
Mark Lobodzinski6f2274e2015-09-22 09:33:21 -06008 *
Jon Ashburn3ebf1252016-04-19 11:30:31 -06009 * http://www.apache.org/licenses/LICENSE-2.0
Mark Lobodzinski6f2274e2015-09-22 09:33:21 -060010 *
Jon Ashburn3ebf1252016-04-19 11:30:31 -060011 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
Mark Lobodzinski6f2274e2015-09-22 09:33:21 -060016 *
Jon Ashburne922f712015-11-03 13:41:23 -070017 * Author: Mark Lobodzinski <mark@lunarg.com>
18 * Author: Courtney Goeltzenleuchter <courtney@LunarG.com>
Dave Houlton59a20702017-02-02 17:26:23 -070019 * Author: Dave Houlton <daveh@lunarg.com>
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -070020 */
21
Mark Lobodzinski6f2274e2015-09-22 09:33:21 -060022#pragma once
John Zulauff05b3772019-04-03 18:04:23 -060023
24#include <cassert>
25#include <cstddef>
26#include <functional>
Mark Lobodzinski6f2274e2015-09-22 09:33:21 -060027#include <stdbool.h>
John Zulauf965d88d2018-04-12 15:47:26 -060028#include <string>
Mark Lobodzinski1079e1b2016-03-15 14:21:59 -060029#include <vector>
Mark Lobodzinski44145db2020-08-11 08:01:47 -060030#include <iomanip>
John Zulauf2c2ccd42019-04-05 13:13:13 -060031#include "cast_utils.h"
Dave Houlton3c9fca72017-03-27 17:25:54 -060032#include "vk_format_utils.h"
Mark Lobodzinski1079e1b2016-03-15 14:21:59 -060033#include "vk_layer_logging.h"
34
Courtney Goeltzenleuchterd2635502015-10-21 17:08:06 -060035#ifndef WIN32
Mark Lobodzinski64318ba2017-01-26 13:34:13 -070036#include <strings.h> // For ffs()
Courtney Goeltzenleuchter3698c622015-10-27 11:23:21 -060037#else
Mark Lobodzinski64318ba2017-01-26 13:34:13 -070038#include <intrin.h> // For __lzcnt()
Courtney Goeltzenleuchterd2635502015-10-21 17:08:06 -060039#endif
Mark Lobodzinski6f2274e2015-09-22 09:33:21 -060040
Petr Krausc3382e92019-12-14 00:41:30 +010041#define STRINGIFY(s) STRINGIFY_HELPER(s)
42#define STRINGIFY_HELPER(s) #s
43
Mark Lobodzinski6f2274e2015-09-22 09:33:21 -060044#ifdef __cplusplus
John Zulauf1507ee42020-05-18 11:33:09 -060045static inline VkExtent3D CastTo3D(const VkExtent2D &d2) {
46 VkExtent3D d3 = {d2.width, d2.height, 1};
47 return d3;
48}
49
50static inline VkOffset3D CastTo3D(const VkOffset2D &d2) {
51 VkOffset3D d3 = {d2.x, d2.y, 0};
52 return d3;
53}
54
Mark Lobodzinski44145db2020-08-11 08:01:47 -060055// Convert integer API version to a string
56static inline std::string StringAPIVersion(uint32_t version) {
57 std::stringstream version_name;
58 uint32_t major = VK_VERSION_MAJOR(version);
59 uint32_t minor = VK_VERSION_MINOR(version);
60 uint32_t patch = VK_VERSION_PATCH(version);
61 version_name << major << "." << minor << "." << patch << " (0x" << std::setfill('0') << std::setw(8) << std::hex << version
62 << ")";
63 return version_name.str();
64}
65
John Zulauf965d88d2018-04-12 15:47:26 -060066// Traits objects to allow string_join to operate on collections of const char *
67template <typename String>
68struct StringJoinSizeTrait {
69 static size_t size(const String &str) { return str.size(); }
70};
71
72template <>
73struct StringJoinSizeTrait<const char *> {
74 static size_t size(const char *str) {
75 if (!str) return 0;
76 return strlen(str);
77 }
78};
79// Similar to perl/python join
80// * String must support size, reserve, append, and be default constructable
81// * StringCollection must support size, const forward iteration, and store
82// strings compatible with String::append
83// * Accessor trait can be set if default accessors (compatible with string
84// and const char *) don't support size(StringCollection::value_type &)
85//
86// Return type based on sep type
87template <typename String = std::string, typename StringCollection = std::vector<String>,
88 typename Accessor = StringJoinSizeTrait<typename StringCollection::value_type>>
89static inline String string_join(const String &sep, const StringCollection &strings) {
90 String joined;
91 const size_t count = strings.size();
92 if (!count) return joined;
93
94 // Prereserved storage, s.t. we will execute in linear time (avoids reallocation copies)
95 size_t reserve = (count - 1) * sep.size();
96 for (const auto &str : strings) {
97 reserve += Accessor::size(str); // abstracted to allow const char * type in StringCollection
98 }
99 joined.reserve(reserve + 1);
100
101 // Seps only occur *between* strings entries, so first is special
102 auto current = strings.cbegin();
103 joined.append(*current);
104 ++current;
105 for (; current != strings.cend(); ++current) {
106 joined.append(sep);
107 joined.append(*current);
108 }
109 return joined;
110}
111
112// Requires StringCollection::value_type has a const char * constructor and is compatible the string_join::String above
113template <typename StringCollection = std::vector<std::string>, typename SepString = std::string>
114static inline SepString string_join(const char *sep, const StringCollection &strings) {
115 return string_join<SepString, StringCollection>(SepString(sep), strings);
116}
117
Petr Kraus168417e2019-09-07 16:45:40 +0200118static inline std::string string_trim(const std::string &s) {
119 const char *whitespace = " \t\f\v\n\r";
120
121 const auto trimmed_beg = s.find_first_not_of(whitespace);
122 if (trimmed_beg == std::string::npos) return "";
123
124 const auto trimmed_end = s.find_last_not_of(whitespace);
125 assert(trimmed_end != std::string::npos && trimmed_beg <= trimmed_end);
126
127 return s.substr(trimmed_beg, trimmed_end - trimmed_beg + 1);
128}
129
John Zulaufdf851b12018-06-12 14:49:04 -0600130// Perl/Python style join operation for general types using stream semantics
131// Note: won't be as fast as string_join above, but simpler to use (and code)
132// Note: Modifiable reference doesn't match the google style but does match std style for stream handling and algorithms
133template <typename Stream, typename String, typename ForwardIt>
134Stream &stream_join(Stream &stream, const String &sep, ForwardIt first, ForwardIt last) {
135 if (first != last) {
136 stream << *first;
137 ++first;
138 while (first != last) {
139 stream << sep << *first;
140 ++first;
141 }
142 }
143 return stream;
144}
145
146// stream_join For whole collections with forward iterators
147template <typename Stream, typename String, typename Collection>
148Stream &stream_join(Stream &stream, const String &sep, const Collection &values) {
149 return stream_join(stream, sep, values.cbegin(), values.cend());
150}
151
Mark Lobodzinskic1b5b882018-06-25 14:54:04 -0600152typedef void *dispatch_key;
153static inline dispatch_key get_dispatch_key(const void *object) { return (dispatch_key) * (VkLayerDispatchTable **)object; }
154
155VK_LAYER_EXPORT VkLayerInstanceCreateInfo *get_chain_info(const VkInstanceCreateInfo *pCreateInfo, VkLayerFunction func);
156VK_LAYER_EXPORT VkLayerDeviceCreateInfo *get_chain_info(const VkDeviceCreateInfo *pCreateInfo, VkLayerFunction func);
157
Chris Mayer334e72f2018-11-29 14:25:41 +0100158static inline bool IsPowerOfTwo(unsigned x) { return x && !(x & (x - 1)); }
Chris Mayer9bc9d092018-11-12 12:29:10 +0100159
jweinstein2b8e95f2022-03-03 19:23:36 -0800160// Returns the 0-based index of the MSB, like the x86 bit scan reverse (bsr) instruction
161// Note: an input mask of 0 yields -1
162static inline int MostSignificantBit(uint32_t mask) {
163#if defined __GNUC__
164 return mask ? __builtin_clz(mask) ^ 31 : -1;
165#elif defined _MSC_VER
166 unsigned long bit_pos;
167 return _BitScanReverse(&bit_pos, mask) ? int(bit_pos) : -1;
168#else
169 for (int k = 31; k >= 0; --k) {
Aaron Hagan01370922021-11-12 19:22:50 -0500170 if (((mask >> k) & 1) != 0) {
jweinstein2b8e95f2022-03-03 19:23:36 -0800171 return k;
Aaron Hagan01370922021-11-12 19:22:50 -0500172 }
173 }
jweinstein2b8e95f2022-03-03 19:23:36 -0800174 return -1;
175#endif
Aaron Hagan01370922021-11-12 19:22:50 -0500176}
177
sfricke-samsung8f658d42020-05-03 20:12:24 -0700178static inline uint32_t SampleCountSize(VkSampleCountFlagBits sample_count) {
179 uint32_t size = 0;
180 switch (sample_count) {
181 case VK_SAMPLE_COUNT_1_BIT:
182 size = 1;
183 break;
184 case VK_SAMPLE_COUNT_2_BIT:
185 size = 2;
186 break;
187 case VK_SAMPLE_COUNT_4_BIT:
188 size = 4;
189 break;
190 case VK_SAMPLE_COUNT_8_BIT:
191 size = 8;
192 break;
193 case VK_SAMPLE_COUNT_16_BIT:
194 size = 16;
195 break;
196 case VK_SAMPLE_COUNT_32_BIT:
197 size = 32;
198 break;
199 case VK_SAMPLE_COUNT_64_BIT:
200 size = 64;
201 break;
202 default:
203 size = 0;
204 }
205 return size;
206}
207
ziga-lunargb65dbfb2022-03-19 18:45:09 +0100208static inline bool IsImageLayoutReadOnly(VkImageLayout layout) {
209 constexpr std::array<VkImageLayout, 7> read_only_layouts = {
210 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL,
211 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
212 VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL,
213 VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL,
214 VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL,
215 VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL,
216 VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL,
217 };
218 return std::any_of(read_only_layouts.begin(), read_only_layouts.end(),
219 [layout](const VkImageLayout read_only_layout) { return layout == read_only_layout; });
220}
221
ziga-lunarge23f82f2022-05-12 16:57:01 +0200222static inline bool IsImageLayoutDepthReadOnly(VkImageLayout layout) {
223 constexpr std::array<VkImageLayout, 7> read_only_layouts = {
224 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL,
225 VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL,
226 VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL,
227 VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL,
228 };
229 return std::any_of(read_only_layouts.begin(), read_only_layouts.end(),
230 [layout](const VkImageLayout read_only_layout) { return layout == read_only_layout; });
231}
232
233static inline bool IsImageLayoutStencilReadOnly(VkImageLayout layout) {
234 constexpr std::array<VkImageLayout, 7> read_only_layouts = {
235 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL,
236 VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL,
237 VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL,
238 VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL,
239 };
240 return std::any_of(read_only_layouts.begin(), read_only_layouts.end(),
241 [layout](const VkImageLayout read_only_layout) { return layout == read_only_layout; });
242}
243
sfricke-samsungbd0e8052020-06-06 01:36:39 -0700244static inline bool IsIdentitySwizzle(VkComponentMapping components) {
245 // clang-format off
246 return (
247 ((components.r == VK_COMPONENT_SWIZZLE_IDENTITY) || (components.r == VK_COMPONENT_SWIZZLE_R)) &&
248 ((components.g == VK_COMPONENT_SWIZZLE_IDENTITY) || (components.g == VK_COMPONENT_SWIZZLE_G)) &&
249 ((components.b == VK_COMPONENT_SWIZZLE_IDENTITY) || (components.b == VK_COMPONENT_SWIZZLE_B)) &&
250 ((components.a == VK_COMPONENT_SWIZZLE_IDENTITY) || (components.a == VK_COMPONENT_SWIZZLE_A))
251 );
252 // clang-format on
253}
254
sfricke-samsungdcb31412021-08-29 22:16:34 -0700255static inline VkDeviceSize GetIndexAlignment(VkIndexType indexType) {
256 switch (indexType) {
257 case VK_INDEX_TYPE_UINT16:
258 return 2;
259 case VK_INDEX_TYPE_UINT32:
260 return 4;
261 case VK_INDEX_TYPE_UINT8_EXT:
262 return 1;
263 default:
264 // Not a real index type. Express no alignment requirement here; we expect upper layer
265 // to have already picked up on the enum being nonsense.
266 return 1;
267 }
268}
269
sfricke-samsunge3086292021-11-18 23:02:35 -0800270static inline uint32_t GetPlaneIndex(VkImageAspectFlags aspect) {
271 // Returns an out of bounds index on error
272 switch (aspect) {
273 case VK_IMAGE_ASPECT_PLANE_0_BIT:
274 return 0;
275 break;
276 case VK_IMAGE_ASPECT_PLANE_1_BIT:
277 return 1;
278 break;
279 case VK_IMAGE_ASPECT_PLANE_2_BIT:
280 return 2;
281 break;
282 default:
283 // If more than one plane bit is set, return error condition
284 return FORMAT_MAX_PLANES;
285 break;
286 }
287}
288
sjfricke69877a72022-08-10 09:20:01 +0900289// all "advanced blend operation" found in spec
290static inline bool IsAdvanceBlendOperation(const VkBlendOp blend_op) {
291 return (static_cast<int>(blend_op) >= VK_BLEND_OP_ZERO_EXT) && (static_cast<int>(blend_op) <= VK_BLEND_OP_BLUE_EXT);
292}
293
sfricke-samsunged028b02021-09-06 23:14:51 -0700294// Perform a zero-tolerant modulo operation
295static inline VkDeviceSize SafeModulo(VkDeviceSize dividend, VkDeviceSize divisor) {
296 VkDeviceSize result = 0;
297 if (divisor != 0) {
298 result = dividend % divisor;
299 }
300 return result;
301}
302
303static inline VkDeviceSize SafeDivision(VkDeviceSize dividend, VkDeviceSize divisor) {
304 VkDeviceSize result = 0;
305 if (divisor != 0) {
306 result = dividend / divisor;
307 }
308 return result;
309}
310
Mark Lobodzinski6f2274e2015-09-22 09:33:21 -0600311extern "C" {
312#endif
313
Marijn Suijten50fe43d2022-07-07 15:04:26 +0200314#define VK_LAYER_API_VERSION VK_HEADER_VERSION_COMPLETE
Mark Lobodzinskiadaac9d2016-01-08 11:07:56 -0700315
Mark Lobodzinskia9f33492016-01-11 14:17:05 -0700316typedef enum VkStringErrorFlagBits {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700317 VK_STRING_ERROR_NONE = 0x00000000,
318 VK_STRING_ERROR_LENGTH = 0x00000001,
319 VK_STRING_ERROR_BAD_DATA = 0x00000002,
Mark Lobodzinskia9f33492016-01-11 14:17:05 -0700320} VkStringErrorFlagBits;
321typedef VkFlags VkStringErrorFlags;
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700322
Petr Kraus4ed81e32019-09-02 23:41:19 +0200323VK_LAYER_EXPORT void layer_debug_report_actions(debug_report_data *report_data, const VkAllocationCallbacks *pAllocator,
324 const char *layer_identifier);
Mark Young6ba8abe2017-11-09 10:37:04 -0700325
Petr Kraus4ed81e32019-09-02 23:41:19 +0200326VK_LAYER_EXPORT void layer_debug_messenger_actions(debug_report_data *report_data, const VkAllocationCallbacks *pAllocator,
327 const char *layer_identifier);
Mark Lobodzinski1079e1b2016-03-15 14:21:59 -0600328
Mike Stroyana551bc02016-09-28 09:42:28 -0600329VK_LAYER_EXPORT VkStringErrorFlags vk_string_validate(const int max_length, const char *char_array);
Mark Lobodzinskia0555012018-08-15 16:43:49 -0600330VK_LAYER_EXPORT bool white_list(const char *item, const std::set<std::string> &whitelist);
Mark Lobodzinski6f2274e2015-09-22 09:33:21 -0600331
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700332static inline int u_ffs(int val) {
Courtney Goeltzenleuchterd2635502015-10-21 17:08:06 -0600333#ifdef WIN32
Mark Lobodzinski5ddf6c32015-12-16 17:47:28 -0700334 unsigned long bit_pos = 0;
Mike Stroyandebb9842016-01-07 10:05:21 -0700335 if (_BitScanForward(&bit_pos, val) != 0) {
Mark Lobodzinski5ddf6c32015-12-16 17:47:28 -0700336 bit_pos += 1;
337 }
338 return bit_pos;
Courtney Goeltzenleuchterd2635502015-10-21 17:08:06 -0600339#else
Mark Lobodzinski5ddf6c32015-12-16 17:47:28 -0700340 return ffs(val);
Courtney Goeltzenleuchterd2635502015-10-21 17:08:06 -0600341#endif
342}
Mark Lobodzinski6f2274e2015-09-22 09:33:21 -0600343
344#ifdef __cplusplus
345}
346#endif
Jeff Bolz89b9a502019-08-20 08:58:51 -0500347
Jeremy Gebben32811462021-09-20 12:54:20 -0600348#ifdef __cplusplus
Jeremy Gebbena7c3a352021-09-20 12:54:20 -0600349// clang sets _MSC_VER to 1800 and _MSC_FULL_VER to 180000000, but we only want to clean up after MSVC.
350#if defined(_MSC_FULL_VER) && !defined(__clang__)
Bruce Dawson5fab7f82020-06-09 16:23:07 -0700351// Minimum Visual Studio 2015 Update 2, or libc++ with C++17
Jeremy Gebbena7c3a352021-09-20 12:54:20 -0600352// But, before Visual Studio 2017 version 15.7, __cplusplus is not set
353// correctly. See:
354// https://docs.microsoft.com/en-us/cpp/build/reference/zc-cplusplus?view=msvc-160
355// Also, according to commit e2a6c442cb1e4, SDKs older than NTDDI_WIN10_RS2 do not
356// support shared_mutex.
357#if _MSC_FULL_VER >= 190023918 && NTDDI_VERSION > NTDDI_WIN10_RS2 && (!defined(_LIBCPP_VERSION) || __cplusplus >= 201703)
Jeremy Gebben32811462021-09-20 12:54:20 -0600358#define VVL_USE_SHARED_MUTEX 1
359#endif
360#elif __cplusplus >= 201703
361#define VVL_USE_SHARED_MUTEX 1
Jeremy Gebben7891d762021-10-11 17:17:58 -0600362#elif __cplusplus >= 201402
363#define VVL_USE_SHARED_TIMED_MUTEX 1
Jeremy Gebben32811462021-09-20 12:54:20 -0600364#endif
365
Jeremy Gebben7891d762021-10-11 17:17:58 -0600366#if defined(VVL_USE_SHARED_MUTEX) || defined(VVL_USE_SHARED_TIMED_MUTEX)
Jeff Bolz89b9a502019-08-20 08:58:51 -0500367#include <shared_mutex>
368#endif
369
Jeff Bolzcaeccc72019-10-15 15:35:26 -0500370class ReadWriteLock {
371 private:
Jeremy Gebben32811462021-09-20 12:54:20 -0600372#if defined(VVL_USE_SHARED_MUTEX)
Jeremy Gebben2e5b41b2021-10-11 16:41:49 -0600373 typedef std::shared_mutex Lock;
Jeremy Gebben7891d762021-10-11 17:17:58 -0600374#elif defined(VVL_USE_SHARED_TIMED_MUTEX)
Jeremy Gebben2e5b41b2021-10-11 16:41:49 -0600375 typedef std::shared_timed_mutex Lock;
Jeff Bolzcaeccc72019-10-15 15:35:26 -0500376#else
Jeremy Gebben2e5b41b2021-10-11 16:41:49 -0600377 typedef std::mutex Lock;
Jeff Bolzcaeccc72019-10-15 15:35:26 -0500378#endif
379
380 public:
381 void lock() { m_lock.lock(); }
382 bool try_lock() { return m_lock.try_lock(); }
383 void unlock() { m_lock.unlock(); }
Jeremy Gebben7891d762021-10-11 17:17:58 -0600384#if defined(VVL_USE_SHARED_MUTEX) || defined(VVL_USE_SHARED_TIMED_MUTEX)
Jeff Bolzcaeccc72019-10-15 15:35:26 -0500385 void lock_shared() { m_lock.lock_shared(); }
386 bool try_lock_shared() { return m_lock.try_lock_shared(); }
387 void unlock_shared() { m_lock.unlock_shared(); }
388#else
389 void lock_shared() { lock(); }
390 bool try_lock_shared() { return try_lock(); }
391 void unlock_shared() { unlock(); }
392#endif
393 private:
Jeremy Gebben2e5b41b2021-10-11 16:41:49 -0600394 Lock m_lock;
Jeff Bolzcaeccc72019-10-15 15:35:26 -0500395};
396
Jeremy Gebben7891d762021-10-11 17:17:58 -0600397#if defined(VVL_USE_SHARED_MUTEX) || defined(VVL_USE_SHARED_TIMED_MUTEX)
Jeremy Gebben2e5b41b2021-10-11 16:41:49 -0600398typedef std::shared_lock<ReadWriteLock> ReadLockGuard;
Jeff Bolzcaeccc72019-10-15 15:35:26 -0500399#else
Jeremy Gebben2e5b41b2021-10-11 16:41:49 -0600400typedef std::unique_lock<ReadWriteLock> ReadLockGuard;
Jeff Bolzcaeccc72019-10-15 15:35:26 -0500401#endif
Jeremy Gebben2e5b41b2021-10-11 16:41:49 -0600402typedef std::unique_lock<ReadWriteLock> WriteLockGuard;
Jeff Bolzcaeccc72019-10-15 15:35:26 -0500403
Jeremy Gebben332d4dd2022-01-01 12:40:02 -0700404// helper class for the very common case of getting and then locking a command buffer (or other state object)
405template <typename T, typename Guard>
406class LockedSharedPtr : public std::shared_ptr<T> {
407 public:
408 LockedSharedPtr(std::shared_ptr<T> &&ptr, Guard &&guard) : std::shared_ptr<T>(std::move(ptr)), guard_(std::move(guard)) {}
409 LockedSharedPtr() : std::shared_ptr<T>(), guard_() {}
410
411 private:
412 Guard guard_;
413};
414
Jeff Bolz89b9a502019-08-20 08:58:51 -0500415// Limited concurrent_unordered_map that supports internally-synchronized
416// insert/erase/access. Splits locking across N buckets and uses shared_mutex
417// for read/write locking. Iterators are not supported. The following
418// operations are supported:
419//
420// insert_or_assign: Insert a new element or update an existing element.
Jeff Bolzfd3bb242019-08-22 06:10:49 -0500421// insert: Insert a new element and return whether it was inserted.
Jeff Bolz89b9a502019-08-20 08:58:51 -0500422// erase: Remove an element.
Jeff Bolzfd3bb242019-08-22 06:10:49 -0500423// contains: Returns true if the key is in the map.
Jeff Bolz89b9a502019-08-20 08:58:51 -0500424// find: Returns != end() if found, value is in ret->second.
425// pop: Erases and returns the erased value if found.
426//
427// find/end: find returns a vaguely iterator-like type that can be compared to
428// end and can use iter->second to retrieve the reference. This is to ease porting
429// for existing code that combines the existence check and lookup in a single
430// operation (and thus a single lock). i.e.:
431//
432// auto iter = map.find(key);
433// if (iter != map.end()) {
434// T t = iter->second;
435// ...
Jeff Bolzfd3bb242019-08-22 06:10:49 -0500436//
437// snapshot: Return an array of elements (key, value pairs) that satisfy an optional
438// predicate. This can be used as a substitute for iterators in exceptional cases.
Jeremy Gebbencbf22862021-03-03 12:01:22 -0700439template <typename Key, typename T, int BUCKETSLOG2 = 2, typename Hash = layer_data::hash<Key>>
Jeff Bolz89b9a502019-08-20 08:58:51 -0500440class vl_concurrent_unordered_map {
Petr Kraus4ed81e32019-09-02 23:41:19 +0200441 public:
aitor-lunarg73e2de32022-03-23 20:44:00 +0100442 template <typename... Args>
443 void insert_or_assign(const Key &key, Args &&...args) {
Jeff Bolz89b9a502019-08-20 08:58:51 -0500444 uint32_t h = ConcurrentMapHashObject(key);
Jeremy Gebben2e5b41b2021-10-11 16:41:49 -0600445 WriteLockGuard lock(locks[h].lock);
aitor-lunarg73e2de32022-03-23 20:44:00 +0100446 maps[h][key] = {std::forward<Args>(args)...};
Jeff Bolz89b9a502019-08-20 08:58:51 -0500447 }
448
aitor-lunarg73e2de32022-03-23 20:44:00 +0100449 template <typename... Args>
450 bool insert(const Key &key, Args &&...args) {
Jeff Bolzfd3bb242019-08-22 06:10:49 -0500451 uint32_t h = ConcurrentMapHashObject(key);
Jeremy Gebben2e5b41b2021-10-11 16:41:49 -0600452 WriteLockGuard lock(locks[h].lock);
aitor-lunarg73e2de32022-03-23 20:44:00 +0100453 auto ret = maps[h].emplace(key, std::forward<Args>(args)...);
Jeff Bolzfd3bb242019-08-22 06:10:49 -0500454 return ret.second;
455 }
456
Jeff Bolz89b9a502019-08-20 08:58:51 -0500457 // returns size_type
458 size_t erase(const Key &key) {
459 uint32_t h = ConcurrentMapHashObject(key);
Jeremy Gebben2e5b41b2021-10-11 16:41:49 -0600460 WriteLockGuard lock(locks[h].lock);
Jeff Bolz89b9a502019-08-20 08:58:51 -0500461 return maps[h].erase(key);
462 }
463
Jeff Bolz46c0ea02019-10-09 13:06:29 -0500464 bool contains(const Key &key) const {
Jeff Bolzfd3bb242019-08-22 06:10:49 -0500465 uint32_t h = ConcurrentMapHashObject(key);
Jeremy Gebben2e5b41b2021-10-11 16:41:49 -0600466 ReadLockGuard lock(locks[h].lock);
Jeff Bolzfd3bb242019-08-22 06:10:49 -0500467 return maps[h].count(key) != 0;
468 }
469
Jeff Bolz89b9a502019-08-20 08:58:51 -0500470 // type returned by find() and end().
471 class FindResult {
Petr Kraus4ed81e32019-09-02 23:41:19 +0200472 public:
Jeff Bolzfd3bb242019-08-22 06:10:49 -0500473 FindResult(bool a, T b) : result(a, std::move(b)) {}
Jeff Bolz89b9a502019-08-20 08:58:51 -0500474
475 // == and != only support comparing against end()
476 bool operator==(const FindResult &other) const {
477 if (result.first == false && other.result.first == false) {
478 return true;
479 }
480 return false;
481 }
482 bool operator!=(const FindResult &other) const { return !(*this == other); }
483
484 // Make -> act kind of like an iterator.
485 std::pair<bool, T> *operator->() { return &result; }
486 const std::pair<bool, T> *operator->() const { return &result; }
487
Petr Kraus4ed81e32019-09-02 23:41:19 +0200488 private:
Jeff Bolz89b9a502019-08-20 08:58:51 -0500489 // (found, reference to element)
490 std::pair<bool, T> result;
491 };
492
Jeff Bolzfd3bb242019-08-22 06:10:49 -0500493 // find()/end() return a FindResult containing a copy of the value. For end(),
494 // return a default value.
Jeff Bolz46c0ea02019-10-09 13:06:29 -0500495 FindResult end() const { return FindResult(false, T()); }
Jeremy Gebben51499ca2022-01-11 08:36:13 -0700496 FindResult cend() const { return end(); }
Jeff Bolz89b9a502019-08-20 08:58:51 -0500497
Jeff Bolz46c0ea02019-10-09 13:06:29 -0500498 FindResult find(const Key &key) const {
Jeff Bolz89b9a502019-08-20 08:58:51 -0500499 uint32_t h = ConcurrentMapHashObject(key);
Jeremy Gebben2e5b41b2021-10-11 16:41:49 -0600500 ReadLockGuard lock(locks[h].lock);
Jeff Bolz89b9a502019-08-20 08:58:51 -0500501
502 auto itr = maps[h].find(key);
503 bool found = itr != maps[h].end();
504
Jeff Bolzfd3bb242019-08-22 06:10:49 -0500505 if (found) {
506 return FindResult(true, itr->second);
507 } else {
508 return end();
509 }
Jeff Bolz89b9a502019-08-20 08:58:51 -0500510 }
511
512 FindResult pop(const Key &key) {
513 uint32_t h = ConcurrentMapHashObject(key);
Jeremy Gebben2e5b41b2021-10-11 16:41:49 -0600514 WriteLockGuard lock(locks[h].lock);
Jeff Bolz89b9a502019-08-20 08:58:51 -0500515
516 auto itr = maps[h].find(key);
517 bool found = itr != maps[h].end();
518
519 if (found) {
arno-lunarg0dd88812022-09-01 12:33:34 +0200520 auto ret = FindResult(true, itr->second);
Jeff Bolz89b9a502019-08-20 08:58:51 -0500521 maps[h].erase(itr);
522 return ret;
523 } else {
524 return end();
525 }
526 }
527
Jeff Bolz46c0ea02019-10-09 13:06:29 -0500528 std::vector<std::pair<const Key, T>> snapshot(std::function<bool(T)> f = nullptr) const {
Jeff Bolzfd3bb242019-08-22 06:10:49 -0500529 std::vector<std::pair<const Key, T>> ret;
530 for (int h = 0; h < BUCKETS; ++h) {
Jeremy Gebben2e5b41b2021-10-11 16:41:49 -0600531 ReadLockGuard lock(locks[h].lock);
John Zulauf79f06582021-02-27 18:38:39 -0700532 for (const auto &j : maps[h]) {
Jeff Bolzfd3bb242019-08-22 06:10:49 -0500533 if (!f || f(j.second)) {
Jeremy Gebbencbf22862021-03-03 12:01:22 -0700534 ret.emplace_back(j.first, j.second);
Jeff Bolzfd3bb242019-08-22 06:10:49 -0500535 }
536 }
537 }
538 return ret;
539 }
540
Jeremy Gebben65975ed2021-10-29 11:16:10 -0600541 void clear() {
542 for (int h = 0; h < BUCKETS; ++h) {
Jeremy Gebbene406ff12022-01-07 10:34:40 -0700543 WriteLockGuard lock(locks[h].lock);
Jeremy Gebben65975ed2021-10-29 11:16:10 -0600544 maps[h].clear();
545 }
546 }
547
548 size_t size() const {
549 size_t result = 0;
550 for (int h = 0; h < BUCKETS; ++h) {
551 ReadLockGuard lock(locks[h].lock);
552 result += maps[h].size();
553 }
554 return result;
555 }
556
557 bool empty() const {
558 bool result = 0;
559 for (int h = 0; h < BUCKETS; ++h) {
560 ReadLockGuard lock(locks[h].lock);
561 result |= maps[h].empty();
562 }
563 return result;
564 }
565
Petr Kraus4ed81e32019-09-02 23:41:19 +0200566 private:
Jeff Bolz89b9a502019-08-20 08:58:51 -0500567 static const int BUCKETS = (1 << BUCKETSLOG2);
Jeff Bolz89b9a502019-08-20 08:58:51 -0500568
Jeremy Gebbencbf22862021-03-03 12:01:22 -0700569 layer_data::unordered_map<Key, T, Hash> maps[BUCKETS];
Jeff Bolz89b9a502019-08-20 08:58:51 -0500570 struct {
Jeff Bolzcaeccc72019-10-15 15:35:26 -0500571 mutable ReadWriteLock lock;
Jeff Bolz89b9a502019-08-20 08:58:51 -0500572 // Put each lock on its own cache line to avoid false cache line sharing.
Jeff Bolzcaeccc72019-10-15 15:35:26 -0500573 char padding[(-int(sizeof(ReadWriteLock))) & 63];
Jeff Bolz89b9a502019-08-20 08:58:51 -0500574 } locks[BUCKETS];
575
576 uint32_t ConcurrentMapHashObject(const Key &object) const {
577 uint64_t u64 = (uint64_t)(uintptr_t)object;
578 uint32_t hash = (uint32_t)(u64 >> 32) + (uint32_t)u64;
579 hash ^= (hash >> BUCKETSLOG2) ^ (hash >> (2 * BUCKETSLOG2));
580 hash &= (BUCKETS - 1);
581 return hash;
582 }
583};
Jeremy Gebben32811462021-09-20 12:54:20 -0600584#endif