blob: 16ac94b9c32be51b4f45e74fb69b964e4b8b87a2 [file] [log] [blame]
Adam Sawickiae5c4662019-01-02 10:23:35 +01001//
2// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
3//
4// Permission is hereby granted, free of charge, to any person obtaining a copy
5// of this software and associated documentation files (the "Software"), to deal
6// in the Software without restriction, including without limitation the rights
7// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8// copies of the Software, and to permit persons to whom the Software is
9// furnished to do so, subject to the following conditions:
10//
11// The above copyright notice and this permission notice shall be included in
12// all copies or substantial portions of the Software.
13//
14// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20// THE SOFTWARE.
21//
22
Adam Sawickif1a793c2018-03-13 15:42:22 +010023#include "Tests.h"
24#include "VmaUsage.h"
25#include "Common.h"
Adam Sawickib8333fb2018-03-13 16:15:53 +010026#include <atomic>
27#include <thread>
28#include <mutex>
Adam Sawicki94ce3d72019-04-17 14:59:25 +020029#include <functional>
Adam Sawickif1a793c2018-03-13 15:42:22 +010030
31#ifdef _WIN32
32
Adam Sawicki33d2ce72018-08-27 13:59:13 +020033static const char* CODE_DESCRIPTION = "Foo";
34
Adam Sawickif2975342018-10-16 13:49:02 +020035extern VkCommandBuffer g_hTemporaryCommandBuffer;
Adam Sawicki1f84f622019-07-02 13:40:01 +020036extern const VkAllocationCallbacks* g_Allocs;
Adam Sawickif2975342018-10-16 13:49:02 +020037void BeginSingleTimeCommands();
38void EndSingleTimeCommands();
39
Adam Sawickibdb89a92018-12-13 11:56:30 +010040#ifndef VMA_DEBUG_MARGIN
41 #define VMA_DEBUG_MARGIN 0
42#endif
43
Adam Sawicki0a607132018-08-24 11:18:41 +020044enum CONFIG_TYPE {
45 CONFIG_TYPE_MINIMUM,
46 CONFIG_TYPE_SMALL,
47 CONFIG_TYPE_AVERAGE,
48 CONFIG_TYPE_LARGE,
49 CONFIG_TYPE_MAXIMUM,
50 CONFIG_TYPE_COUNT
51};
52
Adam Sawickif2975342018-10-16 13:49:02 +020053static constexpr CONFIG_TYPE ConfigType = CONFIG_TYPE_SMALL;
54//static constexpr CONFIG_TYPE ConfigType = CONFIG_TYPE_LARGE;
Adam Sawicki0a607132018-08-24 11:18:41 +020055
Adam Sawickib8333fb2018-03-13 16:15:53 +010056enum class FREE_ORDER { FORWARD, BACKWARD, RANDOM, COUNT };
57
Adam Sawicki0667e332018-08-24 17:26:44 +020058static const char* FREE_ORDER_NAMES[] = {
59 "FORWARD",
60 "BACKWARD",
61 "RANDOM",
Adam Sawicki0a607132018-08-24 11:18:41 +020062};
63
Adam Sawicki80927152018-09-07 17:27:23 +020064// Copy of internal VmaAlgorithmToStr.
65static const char* AlgorithmToStr(uint32_t algorithm)
66{
67 switch(algorithm)
68 {
69 case VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT:
70 return "Linear";
71 case VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT:
72 return "Buddy";
73 case 0:
74 return "Default";
75 default:
76 assert(0);
77 return "";
78 }
79}
80
Adam Sawickib8333fb2018-03-13 16:15:53 +010081struct AllocationSize
82{
83 uint32_t Probability;
84 VkDeviceSize BufferSizeMin, BufferSizeMax;
85 uint32_t ImageSizeMin, ImageSizeMax;
86};
87
88struct Config
89{
90 uint32_t RandSeed;
91 VkDeviceSize BeginBytesToAllocate;
92 uint32_t AdditionalOperationCount;
93 VkDeviceSize MaxBytesToAllocate;
94 uint32_t MemUsageProbability[4]; // For VMA_MEMORY_USAGE_*
95 std::vector<AllocationSize> AllocationSizes;
96 uint32_t ThreadCount;
97 uint32_t ThreadsUsingCommonAllocationsProbabilityPercent;
98 FREE_ORDER FreeOrder;
Adam Sawicki0667e332018-08-24 17:26:44 +020099 VmaAllocationCreateFlags AllocationStrategy; // For VMA_ALLOCATION_CREATE_STRATEGY_*
Adam Sawickib8333fb2018-03-13 16:15:53 +0100100};
101
102struct Result
103{
104 duration TotalTime;
105 duration AllocationTimeMin, AllocationTimeAvg, AllocationTimeMax;
106 duration DeallocationTimeMin, DeallocationTimeAvg, DeallocationTimeMax;
107 VkDeviceSize TotalMemoryAllocated;
108 VkDeviceSize FreeRangeSizeAvg, FreeRangeSizeMax;
109};
110
111void TestDefragmentationSimple();
112void TestDefragmentationFull();
113
114struct PoolTestConfig
115{
116 uint32_t RandSeed;
117 uint32_t ThreadCount;
118 VkDeviceSize PoolSize;
119 uint32_t FrameCount;
120 uint32_t TotalItemCount;
121 // Range for number of items used in each frame.
122 uint32_t UsedItemCountMin, UsedItemCountMax;
123 // Percent of items to make unused, and possibly make some others used in each frame.
124 uint32_t ItemsToMakeUnusedPercent;
125 std::vector<AllocationSize> AllocationSizes;
126
127 VkDeviceSize CalcAvgResourceSize() const
128 {
129 uint32_t probabilitySum = 0;
130 VkDeviceSize sizeSum = 0;
131 for(size_t i = 0; i < AllocationSizes.size(); ++i)
132 {
133 const AllocationSize& allocSize = AllocationSizes[i];
134 if(allocSize.BufferSizeMax > 0)
135 sizeSum += (allocSize.BufferSizeMin + allocSize.BufferSizeMax) / 2 * allocSize.Probability;
136 else
137 {
138 const VkDeviceSize avgDimension = (allocSize.ImageSizeMin + allocSize.ImageSizeMax) / 2;
139 sizeSum += avgDimension * avgDimension * 4 * allocSize.Probability;
140 }
141 probabilitySum += allocSize.Probability;
142 }
143 return sizeSum / probabilitySum;
144 }
145
146 bool UsesBuffers() const
147 {
148 for(size_t i = 0; i < AllocationSizes.size(); ++i)
149 if(AllocationSizes[i].BufferSizeMax > 0)
150 return true;
151 return false;
152 }
153
154 bool UsesImages() const
155 {
156 for(size_t i = 0; i < AllocationSizes.size(); ++i)
157 if(AllocationSizes[i].ImageSizeMax > 0)
158 return true;
159 return false;
160 }
161};
162
163struct PoolTestResult
164{
165 duration TotalTime;
166 duration AllocationTimeMin, AllocationTimeAvg, AllocationTimeMax;
167 duration DeallocationTimeMin, DeallocationTimeAvg, DeallocationTimeMax;
168 size_t LostAllocationCount, LostAllocationTotalSize;
169 size_t FailedAllocationCount, FailedAllocationTotalSize;
170};
171
172static const uint32_t IMAGE_BYTES_PER_PIXEL = 1;
173
Adam Sawicki51fa9662018-10-03 13:44:29 +0200174uint32_t g_FrameIndex = 0;
Adam Sawicki8cfe05f2018-08-22 16:48:17 +0200175
Adam Sawickib8333fb2018-03-13 16:15:53 +0100176struct BufferInfo
177{
178 VkBuffer Buffer = VK_NULL_HANDLE;
179 VmaAllocation Allocation = VK_NULL_HANDLE;
180};
181
Adam Sawicki40ffe982019-10-11 15:56:02 +0200182static uint32_t MemoryTypeToHeap(uint32_t memoryTypeIndex)
183{
184 const VkPhysicalDeviceMemoryProperties* props;
185 vmaGetMemoryProperties(g_hAllocator, &props);
186 return props->memoryTypes[memoryTypeIndex].heapIndex;
187}
188
Adam Sawicki0a3fb6c2018-08-27 14:40:27 +0200189static uint32_t GetAllocationStrategyCount()
190{
191 uint32_t strategyCount = 0;
192 switch(ConfigType)
193 {
194 case CONFIG_TYPE_MINIMUM: strategyCount = 1; break;
195 case CONFIG_TYPE_SMALL: strategyCount = 1; break;
196 case CONFIG_TYPE_AVERAGE: strategyCount = 2; break;
197 case CONFIG_TYPE_LARGE: strategyCount = 2; break;
198 case CONFIG_TYPE_MAXIMUM: strategyCount = 3; break;
199 default: assert(0);
200 }
201 return strategyCount;
202}
203
204static const char* GetAllocationStrategyName(VmaAllocationCreateFlags allocStrategy)
205{
206 switch(allocStrategy)
207 {
208 case VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT: return "BEST_FIT"; break;
209 case VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT: return "WORST_FIT"; break;
210 case VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT: return "FIRST_FIT"; break;
211 case 0: return "Default"; break;
212 default: assert(0); return "";
213 }
214}
215
Adam Sawickib8333fb2018-03-13 16:15:53 +0100216static void InitResult(Result& outResult)
217{
218 outResult.TotalTime = duration::zero();
219 outResult.AllocationTimeMin = duration::max();
220 outResult.AllocationTimeAvg = duration::zero();
221 outResult.AllocationTimeMax = duration::min();
222 outResult.DeallocationTimeMin = duration::max();
223 outResult.DeallocationTimeAvg = duration::zero();
224 outResult.DeallocationTimeMax = duration::min();
225 outResult.TotalMemoryAllocated = 0;
226 outResult.FreeRangeSizeAvg = 0;
227 outResult.FreeRangeSizeMax = 0;
228}
229
230class TimeRegisterObj
231{
232public:
233 TimeRegisterObj(duration& min, duration& sum, duration& max) :
234 m_Min(min),
235 m_Sum(sum),
236 m_Max(max),
237 m_TimeBeg(std::chrono::high_resolution_clock::now())
238 {
239 }
240
241 ~TimeRegisterObj()
242 {
243 duration d = std::chrono::high_resolution_clock::now() - m_TimeBeg;
244 m_Sum += d;
245 if(d < m_Min) m_Min = d;
246 if(d > m_Max) m_Max = d;
247 }
248
249private:
250 duration& m_Min;
251 duration& m_Sum;
252 duration& m_Max;
253 time_point m_TimeBeg;
254};
255
256struct PoolTestThreadResult
257{
258 duration AllocationTimeMin, AllocationTimeSum, AllocationTimeMax;
259 duration DeallocationTimeMin, DeallocationTimeSum, DeallocationTimeMax;
260 size_t AllocationCount, DeallocationCount;
261 size_t LostAllocationCount, LostAllocationTotalSize;
262 size_t FailedAllocationCount, FailedAllocationTotalSize;
263};
264
265class AllocationTimeRegisterObj : public TimeRegisterObj
266{
267public:
268 AllocationTimeRegisterObj(Result& result) :
269 TimeRegisterObj(result.AllocationTimeMin, result.AllocationTimeAvg, result.AllocationTimeMax)
270 {
271 }
272};
273
274class DeallocationTimeRegisterObj : public TimeRegisterObj
275{
276public:
277 DeallocationTimeRegisterObj(Result& result) :
278 TimeRegisterObj(result.DeallocationTimeMin, result.DeallocationTimeAvg, result.DeallocationTimeMax)
279 {
280 }
281};
282
283class PoolAllocationTimeRegisterObj : public TimeRegisterObj
284{
285public:
286 PoolAllocationTimeRegisterObj(PoolTestThreadResult& result) :
287 TimeRegisterObj(result.AllocationTimeMin, result.AllocationTimeSum, result.AllocationTimeMax)
288 {
289 }
290};
291
292class PoolDeallocationTimeRegisterObj : public TimeRegisterObj
293{
294public:
295 PoolDeallocationTimeRegisterObj(PoolTestThreadResult& result) :
296 TimeRegisterObj(result.DeallocationTimeMin, result.DeallocationTimeSum, result.DeallocationTimeMax)
297 {
298 }
299};
300
Adam Sawicki33d2ce72018-08-27 13:59:13 +0200301static void CurrentTimeToStr(std::string& out)
302{
303 time_t rawTime; time(&rawTime);
304 struct tm timeInfo; localtime_s(&timeInfo, &rawTime);
305 char timeStr[128];
306 strftime(timeStr, _countof(timeStr), "%c", &timeInfo);
307 out = timeStr;
308}
309
Adam Sawickib8333fb2018-03-13 16:15:53 +0100310VkResult MainTest(Result& outResult, const Config& config)
311{
312 assert(config.ThreadCount > 0);
313
314 InitResult(outResult);
315
316 RandomNumberGenerator mainRand{config.RandSeed};
317
318 time_point timeBeg = std::chrono::high_resolution_clock::now();
319
320 std::atomic<size_t> allocationCount = 0;
321 VkResult res = VK_SUCCESS;
322
323 uint32_t memUsageProbabilitySum =
324 config.MemUsageProbability[0] + config.MemUsageProbability[1] +
325 config.MemUsageProbability[2] + config.MemUsageProbability[3];
326 assert(memUsageProbabilitySum > 0);
327
328 uint32_t allocationSizeProbabilitySum = std::accumulate(
329 config.AllocationSizes.begin(),
330 config.AllocationSizes.end(),
331 0u,
332 [](uint32_t sum, const AllocationSize& allocSize) {
333 return sum + allocSize.Probability;
334 });
335
336 struct Allocation
337 {
338 VkBuffer Buffer;
339 VkImage Image;
340 VmaAllocation Alloc;
341 };
342
343 std::vector<Allocation> commonAllocations;
344 std::mutex commonAllocationsMutex;
345
346 auto Allocate = [&](
347 VkDeviceSize bufferSize,
348 const VkExtent2D imageExtent,
349 RandomNumberGenerator& localRand,
350 VkDeviceSize& totalAllocatedBytes,
351 std::vector<Allocation>& allocations) -> VkResult
352 {
353 assert((bufferSize == 0) != (imageExtent.width == 0 && imageExtent.height == 0));
354
355 uint32_t memUsageIndex = 0;
356 uint32_t memUsageRand = localRand.Generate() % memUsageProbabilitySum;
357 while(memUsageRand >= config.MemUsageProbability[memUsageIndex])
358 memUsageRand -= config.MemUsageProbability[memUsageIndex++];
359
360 VmaAllocationCreateInfo memReq = {};
361 memReq.usage = (VmaMemoryUsage)(VMA_MEMORY_USAGE_GPU_ONLY + memUsageIndex);
Adam Sawicki0667e332018-08-24 17:26:44 +0200362 memReq.flags |= config.AllocationStrategy;
Adam Sawickib8333fb2018-03-13 16:15:53 +0100363
364 Allocation allocation = {};
365 VmaAllocationInfo allocationInfo;
366
367 // Buffer
368 if(bufferSize > 0)
369 {
370 assert(imageExtent.width == 0);
371 VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
372 bufferInfo.size = bufferSize;
373 bufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
374
375 {
376 AllocationTimeRegisterObj timeRegisterObj{outResult};
377 res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &memReq, &allocation.Buffer, &allocation.Alloc, &allocationInfo);
378 }
379 }
380 // Image
381 else
382 {
383 VkImageCreateInfo imageInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
384 imageInfo.imageType = VK_IMAGE_TYPE_2D;
385 imageInfo.extent.width = imageExtent.width;
386 imageInfo.extent.height = imageExtent.height;
387 imageInfo.extent.depth = 1;
388 imageInfo.mipLevels = 1;
389 imageInfo.arrayLayers = 1;
390 imageInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
391 imageInfo.tiling = memReq.usage == VMA_MEMORY_USAGE_GPU_ONLY ?
392 VK_IMAGE_TILING_OPTIMAL :
393 VK_IMAGE_TILING_LINEAR;
394 imageInfo.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED;
395 switch(memReq.usage)
396 {
397 case VMA_MEMORY_USAGE_GPU_ONLY:
398 switch(localRand.Generate() % 3)
399 {
400 case 0:
401 imageInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
402 break;
403 case 1:
404 imageInfo.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
405 break;
406 case 2:
407 imageInfo.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
408 break;
409 }
410 break;
411 case VMA_MEMORY_USAGE_CPU_ONLY:
412 case VMA_MEMORY_USAGE_CPU_TO_GPU:
413 imageInfo.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
414 break;
415 case VMA_MEMORY_USAGE_GPU_TO_CPU:
416 imageInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT;
417 break;
418 }
419 imageInfo.samples = VK_SAMPLE_COUNT_1_BIT;
420 imageInfo.flags = 0;
421
422 {
423 AllocationTimeRegisterObj timeRegisterObj{outResult};
424 res = vmaCreateImage(g_hAllocator, &imageInfo, &memReq, &allocation.Image, &allocation.Alloc, &allocationInfo);
425 }
426 }
427
428 if(res == VK_SUCCESS)
429 {
430 ++allocationCount;
431 totalAllocatedBytes += allocationInfo.size;
432 bool useCommonAllocations = localRand.Generate() % 100 < config.ThreadsUsingCommonAllocationsProbabilityPercent;
433 if(useCommonAllocations)
434 {
435 std::unique_lock<std::mutex> lock(commonAllocationsMutex);
436 commonAllocations.push_back(allocation);
437 }
438 else
439 allocations.push_back(allocation);
440 }
441 else
442 {
Adam Sawickib8d34d52018-10-03 17:41:20 +0200443 TEST(0);
Adam Sawickib8333fb2018-03-13 16:15:53 +0100444 }
445 return res;
446 };
447
448 auto GetNextAllocationSize = [&](
449 VkDeviceSize& outBufSize,
450 VkExtent2D& outImageSize,
451 RandomNumberGenerator& localRand)
452 {
453 outBufSize = 0;
454 outImageSize = {0, 0};
455
456 uint32_t allocSizeIndex = 0;
457 uint32_t r = localRand.Generate() % allocationSizeProbabilitySum;
458 while(r >= config.AllocationSizes[allocSizeIndex].Probability)
459 r -= config.AllocationSizes[allocSizeIndex++].Probability;
460
461 const AllocationSize& allocSize = config.AllocationSizes[allocSizeIndex];
462 if(allocSize.BufferSizeMax > 0)
463 {
464 assert(allocSize.ImageSizeMax == 0);
465 if(allocSize.BufferSizeMax == allocSize.BufferSizeMin)
466 outBufSize = allocSize.BufferSizeMin;
467 else
468 {
469 outBufSize = allocSize.BufferSizeMin + localRand.Generate() % (allocSize.BufferSizeMax - allocSize.BufferSizeMin);
470 outBufSize = outBufSize / 16 * 16;
471 }
472 }
473 else
474 {
475 if(allocSize.ImageSizeMax == allocSize.ImageSizeMin)
476 outImageSize.width = outImageSize.height = allocSize.ImageSizeMax;
477 else
478 {
479 outImageSize.width = allocSize.ImageSizeMin + localRand.Generate() % (allocSize.ImageSizeMax - allocSize.ImageSizeMin);
480 outImageSize.height = allocSize.ImageSizeMin + localRand.Generate() % (allocSize.ImageSizeMax - allocSize.ImageSizeMin);
481 }
482 }
483 };
484
485 std::atomic<uint32_t> numThreadsReachedMaxAllocations = 0;
486 HANDLE threadsFinishEvent = CreateEvent(NULL, TRUE, FALSE, NULL);
487
488 auto ThreadProc = [&](uint32_t randSeed) -> void
489 {
490 RandomNumberGenerator threadRand(randSeed);
491 VkDeviceSize threadTotalAllocatedBytes = 0;
492 std::vector<Allocation> threadAllocations;
493 VkDeviceSize threadBeginBytesToAllocate = config.BeginBytesToAllocate / config.ThreadCount;
494 VkDeviceSize threadMaxBytesToAllocate = config.MaxBytesToAllocate / config.ThreadCount;
495 uint32_t threadAdditionalOperationCount = config.AdditionalOperationCount / config.ThreadCount;
496
497 // BEGIN ALLOCATIONS
498 for(;;)
499 {
500 VkDeviceSize bufferSize = 0;
501 VkExtent2D imageExtent = {};
502 GetNextAllocationSize(bufferSize, imageExtent, threadRand);
503 if(threadTotalAllocatedBytes + bufferSize + imageExtent.width * imageExtent.height * IMAGE_BYTES_PER_PIXEL <
504 threadBeginBytesToAllocate)
505 {
506 if(Allocate(bufferSize, imageExtent, threadRand, threadTotalAllocatedBytes, threadAllocations) != VK_SUCCESS)
507 break;
508 }
509 else
510 break;
511 }
512
513 // ADDITIONAL ALLOCATIONS AND FREES
514 for(size_t i = 0; i < threadAdditionalOperationCount; ++i)
515 {
516 VkDeviceSize bufferSize = 0;
517 VkExtent2D imageExtent = {};
518 GetNextAllocationSize(bufferSize, imageExtent, threadRand);
519
520 // true = allocate, false = free
521 bool allocate = threadRand.Generate() % 2 != 0;
522
523 if(allocate)
524 {
525 if(threadTotalAllocatedBytes +
526 bufferSize +
527 imageExtent.width * imageExtent.height * IMAGE_BYTES_PER_PIXEL <
528 threadMaxBytesToAllocate)
529 {
530 if(Allocate(bufferSize, imageExtent, threadRand, threadTotalAllocatedBytes, threadAllocations) != VK_SUCCESS)
531 break;
532 }
533 }
534 else
535 {
536 bool useCommonAllocations = threadRand.Generate() % 100 < config.ThreadsUsingCommonAllocationsProbabilityPercent;
537 if(useCommonAllocations)
538 {
539 std::unique_lock<std::mutex> lock(commonAllocationsMutex);
540 if(!commonAllocations.empty())
541 {
542 size_t indexToFree = threadRand.Generate() % commonAllocations.size();
543 VmaAllocationInfo allocationInfo;
544 vmaGetAllocationInfo(g_hAllocator, commonAllocations[indexToFree].Alloc, &allocationInfo);
545 if(threadTotalAllocatedBytes >= allocationInfo.size)
546 {
547 DeallocationTimeRegisterObj timeRegisterObj{outResult};
548 if(commonAllocations[indexToFree].Buffer != VK_NULL_HANDLE)
549 vmaDestroyBuffer(g_hAllocator, commonAllocations[indexToFree].Buffer, commonAllocations[indexToFree].Alloc);
550 else
551 vmaDestroyImage(g_hAllocator, commonAllocations[indexToFree].Image, commonAllocations[indexToFree].Alloc);
552 threadTotalAllocatedBytes -= allocationInfo.size;
553 commonAllocations.erase(commonAllocations.begin() + indexToFree);
554 }
555 }
556 }
557 else
558 {
559 if(!threadAllocations.empty())
560 {
561 size_t indexToFree = threadRand.Generate() % threadAllocations.size();
562 VmaAllocationInfo allocationInfo;
563 vmaGetAllocationInfo(g_hAllocator, threadAllocations[indexToFree].Alloc, &allocationInfo);
564 if(threadTotalAllocatedBytes >= allocationInfo.size)
565 {
566 DeallocationTimeRegisterObj timeRegisterObj{outResult};
567 if(threadAllocations[indexToFree].Buffer != VK_NULL_HANDLE)
568 vmaDestroyBuffer(g_hAllocator, threadAllocations[indexToFree].Buffer, threadAllocations[indexToFree].Alloc);
569 else
570 vmaDestroyImage(g_hAllocator, threadAllocations[indexToFree].Image, threadAllocations[indexToFree].Alloc);
571 threadTotalAllocatedBytes -= allocationInfo.size;
572 threadAllocations.erase(threadAllocations.begin() + indexToFree);
573 }
574 }
575 }
576 }
577 }
578
579 ++numThreadsReachedMaxAllocations;
580
581 WaitForSingleObject(threadsFinishEvent, INFINITE);
582
583 // DEALLOCATION
584 while(!threadAllocations.empty())
585 {
586 size_t indexToFree = 0;
587 switch(config.FreeOrder)
588 {
589 case FREE_ORDER::FORWARD:
590 indexToFree = 0;
591 break;
592 case FREE_ORDER::BACKWARD:
593 indexToFree = threadAllocations.size() - 1;
594 break;
595 case FREE_ORDER::RANDOM:
596 indexToFree = mainRand.Generate() % threadAllocations.size();
597 break;
598 }
599
600 {
601 DeallocationTimeRegisterObj timeRegisterObj{outResult};
602 if(threadAllocations[indexToFree].Buffer != VK_NULL_HANDLE)
603 vmaDestroyBuffer(g_hAllocator, threadAllocations[indexToFree].Buffer, threadAllocations[indexToFree].Alloc);
604 else
605 vmaDestroyImage(g_hAllocator, threadAllocations[indexToFree].Image, threadAllocations[indexToFree].Alloc);
606 }
607 threadAllocations.erase(threadAllocations.begin() + indexToFree);
608 }
609 };
610
611 uint32_t threadRandSeed = mainRand.Generate();
612 std::vector<std::thread> bkgThreads;
613 for(size_t i = 0; i < config.ThreadCount; ++i)
614 {
615 bkgThreads.emplace_back(std::bind(ThreadProc, threadRandSeed + (uint32_t)i));
616 }
617
618 // Wait for threads reached max allocations
619 while(numThreadsReachedMaxAllocations < config.ThreadCount)
620 Sleep(0);
621
622 // CALCULATE MEMORY STATISTICS ON FINAL USAGE
623 VmaStats vmaStats = {};
624 vmaCalculateStats(g_hAllocator, &vmaStats);
625 outResult.TotalMemoryAllocated = vmaStats.total.usedBytes + vmaStats.total.unusedBytes;
626 outResult.FreeRangeSizeMax = vmaStats.total.unusedRangeSizeMax;
627 outResult.FreeRangeSizeAvg = vmaStats.total.unusedRangeSizeAvg;
628
629 // Signal threads to deallocate
630 SetEvent(threadsFinishEvent);
631
632 // Wait for threads finished
633 for(size_t i = 0; i < bkgThreads.size(); ++i)
634 bkgThreads[i].join();
635 bkgThreads.clear();
636
637 CloseHandle(threadsFinishEvent);
638
639 // Deallocate remaining common resources
640 while(!commonAllocations.empty())
641 {
642 size_t indexToFree = 0;
643 switch(config.FreeOrder)
644 {
645 case FREE_ORDER::FORWARD:
646 indexToFree = 0;
647 break;
648 case FREE_ORDER::BACKWARD:
649 indexToFree = commonAllocations.size() - 1;
650 break;
651 case FREE_ORDER::RANDOM:
652 indexToFree = mainRand.Generate() % commonAllocations.size();
653 break;
654 }
655
656 {
657 DeallocationTimeRegisterObj timeRegisterObj{outResult};
658 if(commonAllocations[indexToFree].Buffer != VK_NULL_HANDLE)
659 vmaDestroyBuffer(g_hAllocator, commonAllocations[indexToFree].Buffer, commonAllocations[indexToFree].Alloc);
660 else
661 vmaDestroyImage(g_hAllocator, commonAllocations[indexToFree].Image, commonAllocations[indexToFree].Alloc);
662 }
663 commonAllocations.erase(commonAllocations.begin() + indexToFree);
664 }
665
666 if(allocationCount)
667 {
668 outResult.AllocationTimeAvg /= allocationCount;
669 outResult.DeallocationTimeAvg /= allocationCount;
670 }
671
672 outResult.TotalTime = std::chrono::high_resolution_clock::now() - timeBeg;
673
674 return res;
675}
676
Adam Sawicki51fa9662018-10-03 13:44:29 +0200677void SaveAllocatorStatsToFile(const wchar_t* filePath)
Adam Sawickib8333fb2018-03-13 16:15:53 +0100678{
Adam Sawicki4d844e22019-01-24 16:21:05 +0100679 wprintf(L"Saving JSON dump to file \"%s\"\n", filePath);
Adam Sawickib8333fb2018-03-13 16:15:53 +0100680 char* stats;
Adam Sawickie44c6262018-06-15 14:30:39 +0200681 vmaBuildStatsString(g_hAllocator, &stats, VK_TRUE);
Adam Sawickib8333fb2018-03-13 16:15:53 +0100682 SaveFile(filePath, stats, strlen(stats));
Adam Sawickie44c6262018-06-15 14:30:39 +0200683 vmaFreeStatsString(g_hAllocator, stats);
Adam Sawickib8333fb2018-03-13 16:15:53 +0100684}
685
686struct AllocInfo
687{
Adam Sawickiff0f7b82018-10-18 14:44:05 +0200688 VmaAllocation m_Allocation = VK_NULL_HANDLE;
689 VkBuffer m_Buffer = VK_NULL_HANDLE;
690 VkImage m_Image = VK_NULL_HANDLE;
Adam Sawickia52012d2019-12-23 15:28:51 +0100691 VkImageLayout m_ImageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
Adam Sawickiff0f7b82018-10-18 14:44:05 +0200692 uint32_t m_StartValue = 0;
Adam Sawickib8333fb2018-03-13 16:15:53 +0100693 union
694 {
695 VkBufferCreateInfo m_BufferInfo;
696 VkImageCreateInfo m_ImageInfo;
697 };
Adam Sawickiff0f7b82018-10-18 14:44:05 +0200698
Adam Sawickic467e282019-12-23 16:38:31 +0100699 // After defragmentation.
700 VkBuffer m_NewBuffer = VK_NULL_HANDLE;
701 VkImage m_NewImage = VK_NULL_HANDLE;
702
Adam Sawickiff0f7b82018-10-18 14:44:05 +0200703 void CreateBuffer(
704 const VkBufferCreateInfo& bufCreateInfo,
705 const VmaAllocationCreateInfo& allocCreateInfo);
Adam Sawickia52012d2019-12-23 15:28:51 +0100706 void CreateImage(
707 const VkImageCreateInfo& imageCreateInfo,
708 const VmaAllocationCreateInfo& allocCreateInfo,
709 VkImageLayout layout);
Adam Sawickiff0f7b82018-10-18 14:44:05 +0200710 void Destroy();
Adam Sawickib8333fb2018-03-13 16:15:53 +0100711};
712
Adam Sawickiff0f7b82018-10-18 14:44:05 +0200713void AllocInfo::CreateBuffer(
714 const VkBufferCreateInfo& bufCreateInfo,
715 const VmaAllocationCreateInfo& allocCreateInfo)
716{
717 m_BufferInfo = bufCreateInfo;
718 VkResult res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &m_Buffer, &m_Allocation, nullptr);
719 TEST(res == VK_SUCCESS);
720}
Adam Sawickia52012d2019-12-23 15:28:51 +0100721void AllocInfo::CreateImage(
722 const VkImageCreateInfo& imageCreateInfo,
723 const VmaAllocationCreateInfo& allocCreateInfo,
724 VkImageLayout layout)
725{
726 m_ImageInfo = imageCreateInfo;
727 m_ImageLayout = layout;
728 VkResult res = vmaCreateImage(g_hAllocator, &imageCreateInfo, &allocCreateInfo, &m_Image, &m_Allocation, nullptr);
729 TEST(res == VK_SUCCESS);
730}
Adam Sawickiff0f7b82018-10-18 14:44:05 +0200731
732void AllocInfo::Destroy()
733{
734 if(m_Image)
735 {
Adam Sawickic467e282019-12-23 16:38:31 +0100736 assert(!m_Buffer);
Adam Sawicki1f84f622019-07-02 13:40:01 +0200737 vkDestroyImage(g_hDevice, m_Image, g_Allocs);
Adam Sawickiddcbf8c2019-11-22 15:22:42 +0100738 m_Image = VK_NULL_HANDLE;
Adam Sawickiff0f7b82018-10-18 14:44:05 +0200739 }
740 if(m_Buffer)
741 {
Adam Sawickic467e282019-12-23 16:38:31 +0100742 assert(!m_Image);
Adam Sawicki1f84f622019-07-02 13:40:01 +0200743 vkDestroyBuffer(g_hDevice, m_Buffer, g_Allocs);
Adam Sawickiddcbf8c2019-11-22 15:22:42 +0100744 m_Buffer = VK_NULL_HANDLE;
Adam Sawickiff0f7b82018-10-18 14:44:05 +0200745 }
746 if(m_Allocation)
747 {
748 vmaFreeMemory(g_hAllocator, m_Allocation);
Adam Sawickiddcbf8c2019-11-22 15:22:42 +0100749 m_Allocation = VK_NULL_HANDLE;
Adam Sawickiff0f7b82018-10-18 14:44:05 +0200750 }
751}
752
Adam Sawickif2975342018-10-16 13:49:02 +0200753class StagingBufferCollection
754{
755public:
756 StagingBufferCollection() { }
757 ~StagingBufferCollection();
758 // Returns false if maximum total size of buffers would be exceeded.
759 bool AcquireBuffer(VkDeviceSize size, VkBuffer& outBuffer, void*& outMappedPtr);
760 void ReleaseAllBuffers();
761
762private:
763 static const VkDeviceSize MAX_TOTAL_SIZE = 256ull * 1024 * 1024;
764 struct BufInfo
765 {
766 VmaAllocation Allocation = VK_NULL_HANDLE;
767 VkBuffer Buffer = VK_NULL_HANDLE;
768 VkDeviceSize Size = VK_WHOLE_SIZE;
769 void* MappedPtr = nullptr;
770 bool Used = false;
771 };
772 std::vector<BufInfo> m_Bufs;
773 // Including both used and unused.
774 VkDeviceSize m_TotalSize = 0;
775};
776
777StagingBufferCollection::~StagingBufferCollection()
778{
779 for(size_t i = m_Bufs.size(); i--; )
780 {
781 vmaDestroyBuffer(g_hAllocator, m_Bufs[i].Buffer, m_Bufs[i].Allocation);
782 }
783}
784
785bool StagingBufferCollection::AcquireBuffer(VkDeviceSize size, VkBuffer& outBuffer, void*& outMappedPtr)
786{
787 assert(size <= MAX_TOTAL_SIZE);
788
789 // Try to find existing unused buffer with best size.
790 size_t bestIndex = SIZE_MAX;
791 for(size_t i = 0, count = m_Bufs.size(); i < count; ++i)
792 {
793 BufInfo& currBufInfo = m_Bufs[i];
794 if(!currBufInfo.Used && currBufInfo.Size >= size &&
795 (bestIndex == SIZE_MAX || currBufInfo.Size < m_Bufs[bestIndex].Size))
796 {
797 bestIndex = i;
798 }
799 }
800
801 if(bestIndex != SIZE_MAX)
802 {
803 m_Bufs[bestIndex].Used = true;
804 outBuffer = m_Bufs[bestIndex].Buffer;
805 outMappedPtr = m_Bufs[bestIndex].MappedPtr;
806 return true;
807 }
808
809 // Allocate new buffer with requested size.
810 if(m_TotalSize + size <= MAX_TOTAL_SIZE)
811 {
812 BufInfo bufInfo;
813 bufInfo.Size = size;
814 bufInfo.Used = true;
815
816 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
817 bufCreateInfo.size = size;
818 bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
819
820 VmaAllocationCreateInfo allocCreateInfo = {};
821 allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
822 allocCreateInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT;
823
824 VmaAllocationInfo allocInfo;
825 VkResult res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &bufInfo.Buffer, &bufInfo.Allocation, &allocInfo);
826 bufInfo.MappedPtr = allocInfo.pMappedData;
827 TEST(res == VK_SUCCESS && bufInfo.MappedPtr);
828
829 outBuffer = bufInfo.Buffer;
830 outMappedPtr = bufInfo.MappedPtr;
831
832 m_Bufs.push_back(std::move(bufInfo));
833
834 m_TotalSize += size;
835
836 return true;
837 }
838
839 // There are some unused but smaller buffers: Free them and try again.
840 bool hasUnused = false;
841 for(size_t i = 0, count = m_Bufs.size(); i < count; ++i)
842 {
843 if(!m_Bufs[i].Used)
844 {
845 hasUnused = true;
846 break;
847 }
848 }
849 if(hasUnused)
850 {
851 for(size_t i = m_Bufs.size(); i--; )
852 {
853 if(!m_Bufs[i].Used)
854 {
855 m_TotalSize -= m_Bufs[i].Size;
856 vmaDestroyBuffer(g_hAllocator, m_Bufs[i].Buffer, m_Bufs[i].Allocation);
857 m_Bufs.erase(m_Bufs.begin() + i);
858 }
859 }
860
861 return AcquireBuffer(size, outBuffer, outMappedPtr);
862 }
863
864 return false;
865}
866
867void StagingBufferCollection::ReleaseAllBuffers()
868{
869 for(size_t i = 0, count = m_Bufs.size(); i < count; ++i)
870 {
871 m_Bufs[i].Used = false;
872 }
873}
874
875static void UploadGpuData(const AllocInfo* allocInfo, size_t allocInfoCount)
876{
877 StagingBufferCollection stagingBufs;
878
879 bool cmdBufferStarted = false;
880 for(size_t allocInfoIndex = 0; allocInfoIndex < allocInfoCount; ++allocInfoIndex)
881 {
882 const AllocInfo& currAllocInfo = allocInfo[allocInfoIndex];
883 if(currAllocInfo.m_Buffer)
884 {
885 const VkDeviceSize size = currAllocInfo.m_BufferInfo.size;
886
887 VkBuffer stagingBuf = VK_NULL_HANDLE;
888 void* stagingBufMappedPtr = nullptr;
889 if(!stagingBufs.AcquireBuffer(size, stagingBuf, stagingBufMappedPtr))
890 {
891 TEST(cmdBufferStarted);
892 EndSingleTimeCommands();
893 stagingBufs.ReleaseAllBuffers();
894 cmdBufferStarted = false;
895
896 bool ok = stagingBufs.AcquireBuffer(size, stagingBuf, stagingBufMappedPtr);
897 TEST(ok);
898 }
899
900 // Fill staging buffer.
901 {
902 assert(size % sizeof(uint32_t) == 0);
903 uint32_t* stagingValPtr = (uint32_t*)stagingBufMappedPtr;
904 uint32_t val = currAllocInfo.m_StartValue;
905 for(size_t i = 0; i < size / sizeof(uint32_t); ++i)
906 {
907 *stagingValPtr = val;
908 ++stagingValPtr;
909 ++val;
910 }
911 }
912
913 // Issue copy command from staging buffer to destination buffer.
914 if(!cmdBufferStarted)
915 {
916 cmdBufferStarted = true;
917 BeginSingleTimeCommands();
918 }
919
920 VkBufferCopy copy = {};
921 copy.srcOffset = 0;
922 copy.dstOffset = 0;
923 copy.size = size;
924 vkCmdCopyBuffer(g_hTemporaryCommandBuffer, stagingBuf, currAllocInfo.m_Buffer, 1, &copy);
925 }
926 else
927 {
Adam Sawickia52012d2019-12-23 15:28:51 +0100928 TEST(currAllocInfo.m_ImageInfo.format == VK_FORMAT_R8G8B8A8_UNORM && "Only RGBA8 images are currently supported.");
929 TEST(currAllocInfo.m_ImageInfo.mipLevels == 1 && "Only single mip images are currently supported.");
930
Adam Sawickic467e282019-12-23 16:38:31 +0100931 const VkDeviceSize size = (VkDeviceSize)currAllocInfo.m_ImageInfo.extent.width * currAllocInfo.m_ImageInfo.extent.height * sizeof(uint32_t);
Adam Sawickia52012d2019-12-23 15:28:51 +0100932
933 VkBuffer stagingBuf = VK_NULL_HANDLE;
934 void* stagingBufMappedPtr = nullptr;
935 if(!stagingBufs.AcquireBuffer(size, stagingBuf, stagingBufMappedPtr))
936 {
937 TEST(cmdBufferStarted);
938 EndSingleTimeCommands();
939 stagingBufs.ReleaseAllBuffers();
940 cmdBufferStarted = false;
941
942 bool ok = stagingBufs.AcquireBuffer(size, stagingBuf, stagingBufMappedPtr);
943 TEST(ok);
944 }
945
946 // Fill staging buffer.
947 {
948 assert(size % sizeof(uint32_t) == 0);
949 uint32_t *stagingValPtr = (uint32_t *)stagingBufMappedPtr;
950 uint32_t val = currAllocInfo.m_StartValue;
951 for(size_t i = 0; i < size / sizeof(uint32_t); ++i)
952 {
953 *stagingValPtr = val;
954 ++stagingValPtr;
955 ++val;
956 }
957 }
958
959 // Issue copy command from staging buffer to destination buffer.
960 if(!cmdBufferStarted)
961 {
962 cmdBufferStarted = true;
963 BeginSingleTimeCommands();
964 }
965
966
967 // Transfer to transfer dst layout
968 VkImageSubresourceRange subresourceRange = {
969 VK_IMAGE_ASPECT_COLOR_BIT,
970 0, VK_REMAINING_MIP_LEVELS,
971 0, VK_REMAINING_ARRAY_LAYERS
972 };
973
974 VkImageMemoryBarrier barrier = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER };
975 barrier.srcAccessMask = 0;
976 barrier.dstAccessMask = 0;
977 barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
978 barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
979 barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
980 barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
981 barrier.image = currAllocInfo.m_Image;
982 barrier.subresourceRange = subresourceRange;
983
984 vkCmdPipelineBarrier(g_hTemporaryCommandBuffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0,
985 0, nullptr,
986 0, nullptr,
987 1, &barrier);
988
989 // Copy image date
990 VkBufferImageCopy copy = {};
991 copy.bufferOffset = 0;
992 copy.bufferRowLength = 0;
993 copy.bufferImageHeight = 0;
994 copy.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
995 copy.imageSubresource.layerCount = 1;
996 copy.imageExtent = currAllocInfo.m_ImageInfo.extent;
997
998 vkCmdCopyBufferToImage(g_hTemporaryCommandBuffer, stagingBuf, currAllocInfo.m_Image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &copy);
999
1000 // Transfer to desired layout
1001 barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1002 barrier.dstAccessMask = VK_ACCESS_MEMORY_READ_BIT;
1003 barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
1004 barrier.newLayout = currAllocInfo.m_ImageLayout;
1005
1006 vkCmdPipelineBarrier(g_hTemporaryCommandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0,
1007 0, nullptr,
1008 0, nullptr,
1009 1, &barrier);
Adam Sawickif2975342018-10-16 13:49:02 +02001010 }
1011 }
1012
1013 if(cmdBufferStarted)
1014 {
1015 EndSingleTimeCommands();
1016 stagingBufs.ReleaseAllBuffers();
1017 }
1018}
1019
1020static void ValidateGpuData(const AllocInfo* allocInfo, size_t allocInfoCount)
1021{
1022 StagingBufferCollection stagingBufs;
1023
1024 bool cmdBufferStarted = false;
1025 size_t validateAllocIndexOffset = 0;
1026 std::vector<void*> validateStagingBuffers;
1027 for(size_t allocInfoIndex = 0; allocInfoIndex < allocInfoCount; ++allocInfoIndex)
1028 {
1029 const AllocInfo& currAllocInfo = allocInfo[allocInfoIndex];
1030 if(currAllocInfo.m_Buffer)
1031 {
1032 const VkDeviceSize size = currAllocInfo.m_BufferInfo.size;
1033
1034 VkBuffer stagingBuf = VK_NULL_HANDLE;
1035 void* stagingBufMappedPtr = nullptr;
1036 if(!stagingBufs.AcquireBuffer(size, stagingBuf, stagingBufMappedPtr))
1037 {
1038 TEST(cmdBufferStarted);
1039 EndSingleTimeCommands();
1040 cmdBufferStarted = false;
1041
1042 for(size_t validateIndex = 0;
1043 validateIndex < validateStagingBuffers.size();
1044 ++validateIndex)
1045 {
1046 const size_t validateAllocIndex = validateIndex + validateAllocIndexOffset;
1047 const VkDeviceSize validateSize = allocInfo[validateAllocIndex].m_BufferInfo.size;
1048 TEST(validateSize % sizeof(uint32_t) == 0);
1049 const uint32_t* stagingValPtr = (const uint32_t*)validateStagingBuffers[validateIndex];
1050 uint32_t val = allocInfo[validateAllocIndex].m_StartValue;
1051 bool valid = true;
1052 for(size_t i = 0; i < validateSize / sizeof(uint32_t); ++i)
1053 {
1054 if(*stagingValPtr != val)
1055 {
1056 valid = false;
1057 break;
1058 }
1059 ++stagingValPtr;
1060 ++val;
1061 }
1062 TEST(valid);
1063 }
1064
1065 stagingBufs.ReleaseAllBuffers();
1066
1067 validateAllocIndexOffset = allocInfoIndex;
1068 validateStagingBuffers.clear();
1069
1070 bool ok = stagingBufs.AcquireBuffer(size, stagingBuf, stagingBufMappedPtr);
1071 TEST(ok);
1072 }
1073
1074 // Issue copy command from staging buffer to destination buffer.
1075 if(!cmdBufferStarted)
1076 {
1077 cmdBufferStarted = true;
1078 BeginSingleTimeCommands();
1079 }
1080
1081 VkBufferCopy copy = {};
1082 copy.srcOffset = 0;
1083 copy.dstOffset = 0;
1084 copy.size = size;
1085 vkCmdCopyBuffer(g_hTemporaryCommandBuffer, currAllocInfo.m_Buffer, stagingBuf, 1, &copy);
1086
1087 // Sava mapped pointer for later validation.
1088 validateStagingBuffers.push_back(stagingBufMappedPtr);
1089 }
1090 else
1091 {
1092 TEST(0 && "Images not currently supported.");
1093 }
1094 }
1095
1096 if(cmdBufferStarted)
1097 {
1098 EndSingleTimeCommands();
1099
1100 for(size_t validateIndex = 0;
1101 validateIndex < validateStagingBuffers.size();
1102 ++validateIndex)
1103 {
1104 const size_t validateAllocIndex = validateIndex + validateAllocIndexOffset;
1105 const VkDeviceSize validateSize = allocInfo[validateAllocIndex].m_BufferInfo.size;
1106 TEST(validateSize % sizeof(uint32_t) == 0);
1107 const uint32_t* stagingValPtr = (const uint32_t*)validateStagingBuffers[validateIndex];
1108 uint32_t val = allocInfo[validateAllocIndex].m_StartValue;
1109 bool valid = true;
1110 for(size_t i = 0; i < validateSize / sizeof(uint32_t); ++i)
1111 {
1112 if(*stagingValPtr != val)
1113 {
1114 valid = false;
1115 break;
1116 }
1117 ++stagingValPtr;
1118 ++val;
1119 }
1120 TEST(valid);
1121 }
1122
1123 stagingBufs.ReleaseAllBuffers();
1124 }
1125}
1126
Adam Sawickib8333fb2018-03-13 16:15:53 +01001127static void GetMemReq(VmaAllocationCreateInfo& outMemReq)
1128{
1129 outMemReq = {};
1130 outMemReq.usage = VMA_MEMORY_USAGE_CPU_TO_GPU;
1131 //outMemReq.flags = VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT;
1132}
1133
1134static void CreateBuffer(
1135 VmaPool pool,
1136 const VkBufferCreateInfo& bufCreateInfo,
1137 bool persistentlyMapped,
1138 AllocInfo& outAllocInfo)
1139{
1140 outAllocInfo = {};
1141 outAllocInfo.m_BufferInfo = bufCreateInfo;
1142
1143 VmaAllocationCreateInfo allocCreateInfo = {};
1144 allocCreateInfo.pool = pool;
1145 if(persistentlyMapped)
1146 allocCreateInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT;
1147
1148 VmaAllocationInfo vmaAllocInfo = {};
1149 ERR_GUARD_VULKAN( vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &outAllocInfo.m_Buffer, &outAllocInfo.m_Allocation, &vmaAllocInfo) );
1150
1151 // Setup StartValue and fill.
1152 {
1153 outAllocInfo.m_StartValue = (uint32_t)rand();
1154 uint32_t* data = (uint32_t*)vmaAllocInfo.pMappedData;
Adam Sawickib8d34d52018-10-03 17:41:20 +02001155 TEST((data != nullptr) == persistentlyMapped);
Adam Sawickib8333fb2018-03-13 16:15:53 +01001156 if(!persistentlyMapped)
1157 {
1158 ERR_GUARD_VULKAN( vmaMapMemory(g_hAllocator, outAllocInfo.m_Allocation, (void**)&data) );
1159 }
1160
1161 uint32_t value = outAllocInfo.m_StartValue;
Adam Sawickib8d34d52018-10-03 17:41:20 +02001162 TEST(bufCreateInfo.size % 4 == 0);
Adam Sawickib8333fb2018-03-13 16:15:53 +01001163 for(size_t i = 0; i < bufCreateInfo.size / sizeof(uint32_t); ++i)
1164 data[i] = value++;
1165
1166 if(!persistentlyMapped)
1167 vmaUnmapMemory(g_hAllocator, outAllocInfo.m_Allocation);
1168 }
1169}
1170
Adam Sawickiff0f7b82018-10-18 14:44:05 +02001171static void CreateAllocation(AllocInfo& outAllocation)
Adam Sawickib8333fb2018-03-13 16:15:53 +01001172{
1173 outAllocation.m_Allocation = nullptr;
1174 outAllocation.m_Buffer = nullptr;
1175 outAllocation.m_Image = nullptr;
1176 outAllocation.m_StartValue = (uint32_t)rand();
1177
1178 VmaAllocationCreateInfo vmaMemReq;
1179 GetMemReq(vmaMemReq);
1180
1181 VmaAllocationInfo allocInfo;
1182
1183 const bool isBuffer = true;//(rand() & 0x1) != 0;
1184 const bool isLarge = (rand() % 16) == 0;
1185 if(isBuffer)
1186 {
1187 const uint32_t bufferSize = isLarge ?
1188 (rand() % 10 + 1) * (1024 * 1024) : // 1 MB ... 10 MB
1189 (rand() % 1024 + 1) * 1024; // 1 KB ... 1 MB
1190
1191 VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
1192 bufferInfo.size = bufferSize;
1193 bufferInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
1194
Adam Sawickiff0f7b82018-10-18 14:44:05 +02001195 VkResult res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &vmaMemReq, &outAllocation.m_Buffer, &outAllocation.m_Allocation, &allocInfo);
Adam Sawickib8333fb2018-03-13 16:15:53 +01001196 outAllocation.m_BufferInfo = bufferInfo;
Adam Sawickib8d34d52018-10-03 17:41:20 +02001197 TEST(res == VK_SUCCESS);
Adam Sawickib8333fb2018-03-13 16:15:53 +01001198 }
1199 else
1200 {
1201 const uint32_t imageSizeX = isLarge ?
1202 1024 + rand() % (4096 - 1024) : // 1024 ... 4096
1203 rand() % 1024 + 1; // 1 ... 1024
1204 const uint32_t imageSizeY = isLarge ?
1205 1024 + rand() % (4096 - 1024) : // 1024 ... 4096
1206 rand() % 1024 + 1; // 1 ... 1024
1207
1208 VkImageCreateInfo imageInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
1209 imageInfo.imageType = VK_IMAGE_TYPE_2D;
1210 imageInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
1211 imageInfo.extent.width = imageSizeX;
1212 imageInfo.extent.height = imageSizeY;
1213 imageInfo.extent.depth = 1;
1214 imageInfo.mipLevels = 1;
1215 imageInfo.arrayLayers = 1;
1216 imageInfo.samples = VK_SAMPLE_COUNT_1_BIT;
1217 imageInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
1218 imageInfo.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED;
1219 imageInfo.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
1220
Adam Sawickiff0f7b82018-10-18 14:44:05 +02001221 VkResult res = vmaCreateImage(g_hAllocator, &imageInfo, &vmaMemReq, &outAllocation.m_Image, &outAllocation.m_Allocation, &allocInfo);
Adam Sawickib8333fb2018-03-13 16:15:53 +01001222 outAllocation.m_ImageInfo = imageInfo;
Adam Sawickib8d34d52018-10-03 17:41:20 +02001223 TEST(res == VK_SUCCESS);
Adam Sawickib8333fb2018-03-13 16:15:53 +01001224 }
1225
1226 uint32_t* data = (uint32_t*)allocInfo.pMappedData;
1227 if(allocInfo.pMappedData == nullptr)
1228 {
Adam Sawickiff0f7b82018-10-18 14:44:05 +02001229 VkResult res = vmaMapMemory(g_hAllocator, outAllocation.m_Allocation, (void**)&data);
Adam Sawickib8d34d52018-10-03 17:41:20 +02001230 TEST(res == VK_SUCCESS);
Adam Sawickib8333fb2018-03-13 16:15:53 +01001231 }
1232
1233 uint32_t value = outAllocation.m_StartValue;
Adam Sawickib8d34d52018-10-03 17:41:20 +02001234 TEST(allocInfo.size % 4 == 0);
Adam Sawickib8333fb2018-03-13 16:15:53 +01001235 for(size_t i = 0; i < allocInfo.size / sizeof(uint32_t); ++i)
1236 data[i] = value++;
1237
1238 if(allocInfo.pMappedData == nullptr)
Adam Sawickiff0f7b82018-10-18 14:44:05 +02001239 vmaUnmapMemory(g_hAllocator, outAllocation.m_Allocation);
Adam Sawickib8333fb2018-03-13 16:15:53 +01001240}
1241
1242static void DestroyAllocation(const AllocInfo& allocation)
1243{
1244 if(allocation.m_Buffer)
1245 vmaDestroyBuffer(g_hAllocator, allocation.m_Buffer, allocation.m_Allocation);
1246 else
1247 vmaDestroyImage(g_hAllocator, allocation.m_Image, allocation.m_Allocation);
1248}
1249
1250static void DestroyAllAllocations(std::vector<AllocInfo>& allocations)
1251{
1252 for(size_t i = allocations.size(); i--; )
1253 DestroyAllocation(allocations[i]);
1254 allocations.clear();
1255}
1256
1257static void ValidateAllocationData(const AllocInfo& allocation)
1258{
1259 VmaAllocationInfo allocInfo;
1260 vmaGetAllocationInfo(g_hAllocator, allocation.m_Allocation, &allocInfo);
1261
1262 uint32_t* data = (uint32_t*)allocInfo.pMappedData;
1263 if(allocInfo.pMappedData == nullptr)
1264 {
1265 VkResult res = vmaMapMemory(g_hAllocator, allocation.m_Allocation, (void**)&data);
Adam Sawickib8d34d52018-10-03 17:41:20 +02001266 TEST(res == VK_SUCCESS);
Adam Sawickib8333fb2018-03-13 16:15:53 +01001267 }
1268
1269 uint32_t value = allocation.m_StartValue;
1270 bool ok = true;
1271 size_t i;
Adam Sawickib8d34d52018-10-03 17:41:20 +02001272 TEST(allocInfo.size % 4 == 0);
Adam Sawickib8333fb2018-03-13 16:15:53 +01001273 for(i = 0; i < allocInfo.size / sizeof(uint32_t); ++i)
1274 {
1275 if(data[i] != value++)
1276 {
1277 ok = false;
1278 break;
1279 }
1280 }
Adam Sawickib8d34d52018-10-03 17:41:20 +02001281 TEST(ok);
Adam Sawickib8333fb2018-03-13 16:15:53 +01001282
1283 if(allocInfo.pMappedData == nullptr)
1284 vmaUnmapMemory(g_hAllocator, allocation.m_Allocation);
1285}
1286
1287static void RecreateAllocationResource(AllocInfo& allocation)
1288{
1289 VmaAllocationInfo allocInfo;
1290 vmaGetAllocationInfo(g_hAllocator, allocation.m_Allocation, &allocInfo);
1291
1292 if(allocation.m_Buffer)
1293 {
Adam Sawicki1f84f622019-07-02 13:40:01 +02001294 vkDestroyBuffer(g_hDevice, allocation.m_Buffer, g_Allocs);
Adam Sawickib8333fb2018-03-13 16:15:53 +01001295
Adam Sawicki1f84f622019-07-02 13:40:01 +02001296 VkResult res = vkCreateBuffer(g_hDevice, &allocation.m_BufferInfo, g_Allocs, &allocation.m_Buffer);
Adam Sawickib8d34d52018-10-03 17:41:20 +02001297 TEST(res == VK_SUCCESS);
Adam Sawickib8333fb2018-03-13 16:15:53 +01001298
1299 // Just to silence validation layer warnings.
1300 VkMemoryRequirements vkMemReq;
1301 vkGetBufferMemoryRequirements(g_hDevice, allocation.m_Buffer, &vkMemReq);
Adam Sawicki2af57d72018-12-06 15:35:05 +01001302 TEST(vkMemReq.size >= allocation.m_BufferInfo.size);
Adam Sawickib8333fb2018-03-13 16:15:53 +01001303
Adam Sawickiaf88c1b2019-07-02 12:34:26 +02001304 res = vmaBindBufferMemory(g_hAllocator, allocation.m_Allocation, allocation.m_Buffer);
Adam Sawickib8d34d52018-10-03 17:41:20 +02001305 TEST(res == VK_SUCCESS);
Adam Sawickib8333fb2018-03-13 16:15:53 +01001306 }
1307 else
1308 {
Adam Sawicki1f84f622019-07-02 13:40:01 +02001309 vkDestroyImage(g_hDevice, allocation.m_Image, g_Allocs);
Adam Sawickib8333fb2018-03-13 16:15:53 +01001310
Adam Sawicki1f84f622019-07-02 13:40:01 +02001311 VkResult res = vkCreateImage(g_hDevice, &allocation.m_ImageInfo, g_Allocs, &allocation.m_Image);
Adam Sawickib8d34d52018-10-03 17:41:20 +02001312 TEST(res == VK_SUCCESS);
Adam Sawickib8333fb2018-03-13 16:15:53 +01001313
1314 // Just to silence validation layer warnings.
1315 VkMemoryRequirements vkMemReq;
1316 vkGetImageMemoryRequirements(g_hDevice, allocation.m_Image, &vkMemReq);
1317
Adam Sawickiaf88c1b2019-07-02 12:34:26 +02001318 res = vmaBindImageMemory(g_hAllocator, allocation.m_Allocation, allocation.m_Image);
Adam Sawickib8d34d52018-10-03 17:41:20 +02001319 TEST(res == VK_SUCCESS);
Adam Sawickib8333fb2018-03-13 16:15:53 +01001320 }
1321}
1322
1323static void Defragment(AllocInfo* allocs, size_t allocCount,
1324 const VmaDefragmentationInfo* defragmentationInfo = nullptr,
1325 VmaDefragmentationStats* defragmentationStats = nullptr)
1326{
1327 std::vector<VmaAllocation> vmaAllocs(allocCount);
1328 for(size_t i = 0; i < allocCount; ++i)
1329 vmaAllocs[i] = allocs[i].m_Allocation;
1330
1331 std::vector<VkBool32> allocChanged(allocCount);
1332
1333 ERR_GUARD_VULKAN( vmaDefragment(g_hAllocator, vmaAllocs.data(), allocCount, allocChanged.data(),
1334 defragmentationInfo, defragmentationStats) );
1335
1336 for(size_t i = 0; i < allocCount; ++i)
1337 {
1338 if(allocChanged[i])
1339 {
1340 RecreateAllocationResource(allocs[i]);
1341 }
1342 }
1343}
1344
1345static void ValidateAllocationsData(const AllocInfo* allocs, size_t allocCount)
1346{
1347 std::for_each(allocs, allocs + allocCount, [](const AllocInfo& allocInfo) {
1348 ValidateAllocationData(allocInfo);
1349 });
1350}
1351
1352void TestDefragmentationSimple()
1353{
1354 wprintf(L"Test defragmentation simple\n");
1355
1356 RandomNumberGenerator rand(667);
1357
1358 const VkDeviceSize BUF_SIZE = 0x10000;
1359 const VkDeviceSize BLOCK_SIZE = BUF_SIZE * 8;
1360
1361 const VkDeviceSize MIN_BUF_SIZE = 32;
1362 const VkDeviceSize MAX_BUF_SIZE = BUF_SIZE * 4;
1363 auto RandomBufSize = [&]() -> VkDeviceSize {
1364 return align_up<VkDeviceSize>(rand.Generate() % (MAX_BUF_SIZE - MIN_BUF_SIZE + 1) + MIN_BUF_SIZE, 32);
1365 };
1366
1367 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
1368 bufCreateInfo.size = BUF_SIZE;
1369 bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
1370
1371 VmaAllocationCreateInfo exampleAllocCreateInfo = {};
1372 exampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
1373
1374 uint32_t memTypeIndex = UINT32_MAX;
1375 vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &bufCreateInfo, &exampleAllocCreateInfo, &memTypeIndex);
1376
1377 VmaPoolCreateInfo poolCreateInfo = {};
1378 poolCreateInfo.blockSize = BLOCK_SIZE;
1379 poolCreateInfo.memoryTypeIndex = memTypeIndex;
1380
1381 VmaPool pool;
1382 ERR_GUARD_VULKAN( vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool) );
1383
Adam Sawickie1681912018-11-23 17:50:12 +01001384 // Defragmentation of empty pool.
1385 {
1386 VmaDefragmentationInfo2 defragInfo = {};
1387 defragInfo.maxCpuBytesToMove = VK_WHOLE_SIZE;
1388 defragInfo.maxCpuAllocationsToMove = UINT32_MAX;
1389 defragInfo.poolCount = 1;
1390 defragInfo.pPools = &pool;
1391
1392 VmaDefragmentationStats defragStats = {};
1393 VmaDefragmentationContext defragCtx = nullptr;
1394 VkResult res = vmaDefragmentationBegin(g_hAllocator, &defragInfo, &defragStats, &defragCtx);
1395 TEST(res >= VK_SUCCESS);
1396 vmaDefragmentationEnd(g_hAllocator, defragCtx);
1397 TEST(defragStats.allocationsMoved == 0 && defragStats.bytesFreed == 0 &&
1398 defragStats.bytesMoved == 0 && defragStats.deviceMemoryBlocksFreed == 0);
1399 }
1400
Adam Sawickib8333fb2018-03-13 16:15:53 +01001401 std::vector<AllocInfo> allocations;
1402
1403 // persistentlyMappedOption = 0 - not persistently mapped.
1404 // persistentlyMappedOption = 1 - persistently mapped.
1405 for(uint32_t persistentlyMappedOption = 0; persistentlyMappedOption < 2; ++persistentlyMappedOption)
1406 {
1407 wprintf(L" Persistently mapped option = %u\n", persistentlyMappedOption);
1408 const bool persistentlyMapped = persistentlyMappedOption != 0;
1409
1410 // # Test 1
1411 // Buffers of fixed size.
1412 // Fill 2 blocks. Remove odd buffers. Defragment everything.
1413 // Expected result: at least 1 block freed.
1414 {
1415 for(size_t i = 0; i < BLOCK_SIZE / BUF_SIZE * 2; ++i)
1416 {
1417 AllocInfo allocInfo;
1418 CreateBuffer(pool, bufCreateInfo, persistentlyMapped, allocInfo);
1419 allocations.push_back(allocInfo);
1420 }
1421
1422 for(size_t i = 1; i < allocations.size(); ++i)
1423 {
1424 DestroyAllocation(allocations[i]);
1425 allocations.erase(allocations.begin() + i);
1426 }
1427
1428 VmaDefragmentationStats defragStats;
1429 Defragment(allocations.data(), allocations.size(), nullptr, &defragStats);
Adam Sawickib8d34d52018-10-03 17:41:20 +02001430 TEST(defragStats.allocationsMoved > 0 && defragStats.bytesMoved > 0);
1431 TEST(defragStats.deviceMemoryBlocksFreed >= 1);
Adam Sawickib8333fb2018-03-13 16:15:53 +01001432
1433 ValidateAllocationsData(allocations.data(), allocations.size());
1434
1435 DestroyAllAllocations(allocations);
1436 }
1437
1438 // # Test 2
1439 // Buffers of fixed size.
1440 // Fill 2 blocks. Remove odd buffers. Defragment one buffer at time.
1441 // Expected result: Each of 4 interations makes some progress.
1442 {
1443 for(size_t i = 0; i < BLOCK_SIZE / BUF_SIZE * 2; ++i)
1444 {
1445 AllocInfo allocInfo;
1446 CreateBuffer(pool, bufCreateInfo, persistentlyMapped, allocInfo);
1447 allocations.push_back(allocInfo);
1448 }
1449
1450 for(size_t i = 1; i < allocations.size(); ++i)
1451 {
1452 DestroyAllocation(allocations[i]);
1453 allocations.erase(allocations.begin() + i);
1454 }
1455
1456 VmaDefragmentationInfo defragInfo = {};
1457 defragInfo.maxAllocationsToMove = 1;
1458 defragInfo.maxBytesToMove = BUF_SIZE;
1459
1460 for(size_t i = 0; i < BLOCK_SIZE / BUF_SIZE / 2; ++i)
1461 {
1462 VmaDefragmentationStats defragStats;
1463 Defragment(allocations.data(), allocations.size(), &defragInfo, &defragStats);
Adam Sawickib8d34d52018-10-03 17:41:20 +02001464 TEST(defragStats.allocationsMoved > 0 && defragStats.bytesMoved > 0);
Adam Sawickib8333fb2018-03-13 16:15:53 +01001465 }
1466
1467 ValidateAllocationsData(allocations.data(), allocations.size());
1468
1469 DestroyAllAllocations(allocations);
1470 }
1471
1472 // # Test 3
1473 // Buffers of variable size.
1474 // Create a number of buffers. Remove some percent of them.
1475 // Defragment while having some percent of them unmovable.
1476 // Expected result: Just simple validation.
1477 {
1478 for(size_t i = 0; i < 100; ++i)
1479 {
1480 VkBufferCreateInfo localBufCreateInfo = bufCreateInfo;
1481 localBufCreateInfo.size = RandomBufSize();
1482
1483 AllocInfo allocInfo;
1484 CreateBuffer(pool, bufCreateInfo, persistentlyMapped, allocInfo);
1485 allocations.push_back(allocInfo);
1486 }
1487
1488 const uint32_t percentToDelete = 60;
1489 const size_t numberToDelete = allocations.size() * percentToDelete / 100;
1490 for(size_t i = 0; i < numberToDelete; ++i)
1491 {
1492 size_t indexToDelete = rand.Generate() % (uint32_t)allocations.size();
1493 DestroyAllocation(allocations[indexToDelete]);
1494 allocations.erase(allocations.begin() + indexToDelete);
1495 }
1496
1497 // Non-movable allocations will be at the beginning of allocations array.
1498 const uint32_t percentNonMovable = 20;
1499 const size_t numberNonMovable = allocations.size() * percentNonMovable / 100;
1500 for(size_t i = 0; i < numberNonMovable; ++i)
1501 {
1502 size_t indexNonMovable = i + rand.Generate() % (uint32_t)(allocations.size() - i);
1503 if(indexNonMovable != i)
1504 std::swap(allocations[i], allocations[indexNonMovable]);
1505 }
1506
1507 VmaDefragmentationStats defragStats;
1508 Defragment(
1509 allocations.data() + numberNonMovable,
1510 allocations.size() - numberNonMovable,
1511 nullptr, &defragStats);
1512
1513 ValidateAllocationsData(allocations.data(), allocations.size());
1514
1515 DestroyAllAllocations(allocations);
1516 }
1517 }
1518
Adam Sawicki647cf242018-11-23 17:58:00 +01001519 /*
1520 Allocation that must be move to an overlapping place using memmove().
1521 Create 2 buffers, second slightly bigger than the first. Delete first. Then defragment.
1522 */
Adam Sawickibdb89a92018-12-13 11:56:30 +01001523 if(VMA_DEBUG_MARGIN == 0) // FAST algorithm works only when DEBUG_MARGIN disabled.
Adam Sawicki647cf242018-11-23 17:58:00 +01001524 {
1525 AllocInfo allocInfo[2];
1526
1527 bufCreateInfo.size = BUF_SIZE;
1528 CreateBuffer(pool, bufCreateInfo, false, allocInfo[0]);
1529 const VkDeviceSize biggerBufSize = BUF_SIZE + BUF_SIZE / 256;
1530 bufCreateInfo.size = biggerBufSize;
1531 CreateBuffer(pool, bufCreateInfo, false, allocInfo[1]);
1532
1533 DestroyAllocation(allocInfo[0]);
1534
1535 VmaDefragmentationStats defragStats;
1536 Defragment(&allocInfo[1], 1, nullptr, &defragStats);
1537 // If this fails, it means we couldn't do memmove with overlapping regions.
1538 TEST(defragStats.allocationsMoved == 1 && defragStats.bytesMoved > 0);
1539
1540 ValidateAllocationsData(&allocInfo[1], 1);
1541 DestroyAllocation(allocInfo[1]);
1542 }
1543
Adam Sawickib8333fb2018-03-13 16:15:53 +01001544 vmaDestroyPool(g_hAllocator, pool);
1545}
1546
Adam Sawicki52076eb2018-11-22 16:14:50 +01001547void TestDefragmentationWholePool()
1548{
1549 wprintf(L"Test defragmentation whole pool\n");
1550
1551 RandomNumberGenerator rand(668);
1552
1553 const VkDeviceSize BUF_SIZE = 0x10000;
1554 const VkDeviceSize BLOCK_SIZE = BUF_SIZE * 8;
1555
1556 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
1557 bufCreateInfo.size = BUF_SIZE;
1558 bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
1559
1560 VmaAllocationCreateInfo exampleAllocCreateInfo = {};
1561 exampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
1562
1563 uint32_t memTypeIndex = UINT32_MAX;
1564 vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &bufCreateInfo, &exampleAllocCreateInfo, &memTypeIndex);
1565
1566 VmaPoolCreateInfo poolCreateInfo = {};
1567 poolCreateInfo.blockSize = BLOCK_SIZE;
1568 poolCreateInfo.memoryTypeIndex = memTypeIndex;
1569
1570 VmaDefragmentationStats defragStats[2];
1571 for(size_t caseIndex = 0; caseIndex < 2; ++caseIndex)
1572 {
1573 VmaPool pool;
1574 ERR_GUARD_VULKAN( vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool) );
1575
1576 std::vector<AllocInfo> allocations;
1577
1578 // Buffers of fixed size.
1579 // Fill 2 blocks. Remove odd buffers. Defragment all of them.
1580 for(size_t i = 0; i < BLOCK_SIZE / BUF_SIZE * 2; ++i)
1581 {
1582 AllocInfo allocInfo;
1583 CreateBuffer(pool, bufCreateInfo, false, allocInfo);
1584 allocations.push_back(allocInfo);
1585 }
1586
1587 for(size_t i = 1; i < allocations.size(); ++i)
1588 {
1589 DestroyAllocation(allocations[i]);
1590 allocations.erase(allocations.begin() + i);
1591 }
1592
1593 VmaDefragmentationInfo2 defragInfo = {};
1594 defragInfo.maxCpuAllocationsToMove = UINT32_MAX;
1595 defragInfo.maxCpuBytesToMove = VK_WHOLE_SIZE;
1596 std::vector<VmaAllocation> allocationsToDefrag;
1597 if(caseIndex == 0)
1598 {
1599 defragInfo.poolCount = 1;
1600 defragInfo.pPools = &pool;
1601 }
1602 else
1603 {
1604 const size_t allocCount = allocations.size();
1605 allocationsToDefrag.resize(allocCount);
1606 std::transform(
1607 allocations.begin(), allocations.end(),
1608 allocationsToDefrag.begin(),
1609 [](const AllocInfo& allocInfo) { return allocInfo.m_Allocation; });
1610 defragInfo.allocationCount = (uint32_t)allocCount;
1611 defragInfo.pAllocations = allocationsToDefrag.data();
1612 }
1613
1614 VmaDefragmentationContext defragCtx = VK_NULL_HANDLE;
1615 VkResult res = vmaDefragmentationBegin(g_hAllocator, &defragInfo, &defragStats[caseIndex], &defragCtx);
1616 TEST(res >= VK_SUCCESS);
1617 vmaDefragmentationEnd(g_hAllocator, defragCtx);
1618
1619 TEST(defragStats[caseIndex].allocationsMoved > 0 && defragStats[caseIndex].bytesMoved > 0);
1620
1621 ValidateAllocationsData(allocations.data(), allocations.size());
1622
1623 DestroyAllAllocations(allocations);
1624
1625 vmaDestroyPool(g_hAllocator, pool);
1626 }
1627
1628 TEST(defragStats[0].bytesMoved == defragStats[1].bytesMoved);
1629 TEST(defragStats[0].allocationsMoved == defragStats[1].allocationsMoved);
1630 TEST(defragStats[0].bytesFreed == defragStats[1].bytesFreed);
1631 TEST(defragStats[0].deviceMemoryBlocksFreed == defragStats[1].deviceMemoryBlocksFreed);
1632}
1633
Adam Sawickib8333fb2018-03-13 16:15:53 +01001634void TestDefragmentationFull()
1635{
1636 std::vector<AllocInfo> allocations;
1637
1638 // Create initial allocations.
1639 for(size_t i = 0; i < 400; ++i)
1640 {
1641 AllocInfo allocation;
Adam Sawickiff0f7b82018-10-18 14:44:05 +02001642 CreateAllocation(allocation);
Adam Sawickib8333fb2018-03-13 16:15:53 +01001643 allocations.push_back(allocation);
1644 }
1645
1646 // Delete random allocations
1647 const size_t allocationsToDeletePercent = 80;
1648 size_t allocationsToDelete = allocations.size() * allocationsToDeletePercent / 100;
1649 for(size_t i = 0; i < allocationsToDelete; ++i)
1650 {
1651 size_t index = (size_t)rand() % allocations.size();
1652 DestroyAllocation(allocations[index]);
1653 allocations.erase(allocations.begin() + index);
1654 }
1655
1656 for(size_t i = 0; i < allocations.size(); ++i)
1657 ValidateAllocationData(allocations[i]);
1658
Adam Sawicki0667e332018-08-24 17:26:44 +02001659 //SaveAllocatorStatsToFile(L"Before.csv");
Adam Sawickib8333fb2018-03-13 16:15:53 +01001660
1661 {
1662 std::vector<VmaAllocation> vmaAllocations(allocations.size());
1663 for(size_t i = 0; i < allocations.size(); ++i)
1664 vmaAllocations[i] = allocations[i].m_Allocation;
1665
1666 const size_t nonMovablePercent = 0;
1667 size_t nonMovableCount = vmaAllocations.size() * nonMovablePercent / 100;
1668 for(size_t i = 0; i < nonMovableCount; ++i)
1669 {
1670 size_t index = (size_t)rand() % vmaAllocations.size();
1671 vmaAllocations.erase(vmaAllocations.begin() + index);
1672 }
1673
1674 const uint32_t defragCount = 1;
1675 for(uint32_t defragIndex = 0; defragIndex < defragCount; ++defragIndex)
1676 {
1677 std::vector<VkBool32> allocationsChanged(vmaAllocations.size());
1678
1679 VmaDefragmentationInfo defragmentationInfo;
1680 defragmentationInfo.maxAllocationsToMove = UINT_MAX;
1681 defragmentationInfo.maxBytesToMove = SIZE_MAX;
1682
1683 wprintf(L"Defragmentation #%u\n", defragIndex);
1684
1685 time_point begTime = std::chrono::high_resolution_clock::now();
1686
1687 VmaDefragmentationStats stats;
1688 VkResult res = vmaDefragment(g_hAllocator, vmaAllocations.data(), vmaAllocations.size(), allocationsChanged.data(), &defragmentationInfo, &stats);
Adam Sawickib8d34d52018-10-03 17:41:20 +02001689 TEST(res >= 0);
Adam Sawickib8333fb2018-03-13 16:15:53 +01001690
1691 float defragmentDuration = ToFloatSeconds(std::chrono::high_resolution_clock::now() - begTime);
1692
1693 wprintf(L"Moved allocations %u, bytes %llu\n", stats.allocationsMoved, stats.bytesMoved);
1694 wprintf(L"Freed blocks %u, bytes %llu\n", stats.deviceMemoryBlocksFreed, stats.bytesFreed);
1695 wprintf(L"Time: %.2f s\n", defragmentDuration);
1696
1697 for(size_t i = 0; i < vmaAllocations.size(); ++i)
1698 {
1699 if(allocationsChanged[i])
1700 {
1701 RecreateAllocationResource(allocations[i]);
1702 }
1703 }
1704
1705 for(size_t i = 0; i < allocations.size(); ++i)
1706 ValidateAllocationData(allocations[i]);
1707
Adam Sawicki0667e332018-08-24 17:26:44 +02001708 //wchar_t fileName[MAX_PATH];
1709 //swprintf(fileName, MAX_PATH, L"After_%02u.csv", defragIndex);
1710 //SaveAllocatorStatsToFile(fileName);
Adam Sawickib8333fb2018-03-13 16:15:53 +01001711 }
1712 }
1713
1714 // Destroy all remaining allocations.
1715 DestroyAllAllocations(allocations);
1716}
1717
Adam Sawicki9a4f5082018-11-23 17:26:05 +01001718static void TestDefragmentationGpu()
Adam Sawickiff0f7b82018-10-18 14:44:05 +02001719{
Adam Sawicki9a4f5082018-11-23 17:26:05 +01001720 wprintf(L"Test defragmentation GPU\n");
Adam Sawicki05704002018-11-08 16:07:29 +01001721 g_MemoryAliasingWarningEnabled = false;
Adam Sawickiff0f7b82018-10-18 14:44:05 +02001722
1723 std::vector<AllocInfo> allocations;
1724
1725 // Create that many allocations to surely fill 3 new blocks of 256 MB.
Adam Sawickic6ede152018-11-16 17:04:14 +01001726 const VkDeviceSize bufSizeMin = 5ull * 1024 * 1024;
1727 const VkDeviceSize bufSizeMax = 10ull * 1024 * 1024;
Adam Sawickiff0f7b82018-10-18 14:44:05 +02001728 const VkDeviceSize totalSize = 3ull * 256 * 1024 * 1024;
Adam Sawickic6ede152018-11-16 17:04:14 +01001729 const size_t bufCount = (size_t)(totalSize / bufSizeMin);
1730 const size_t percentToLeave = 30;
1731 const size_t percentNonMovable = 3;
Adam Sawickiff0f7b82018-10-18 14:44:05 +02001732 RandomNumberGenerator rand = { 234522 };
1733
1734 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
Adam Sawickiff0f7b82018-10-18 14:44:05 +02001735
1736 VmaAllocationCreateInfo allocCreateInfo = {};
1737 allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
Adam Sawickic6ede152018-11-16 17:04:14 +01001738 allocCreateInfo.flags = 0;
Adam Sawickiff0f7b82018-10-18 14:44:05 +02001739
1740 // Create all intended buffers.
1741 for(size_t i = 0; i < bufCount; ++i)
1742 {
Adam Sawickic6ede152018-11-16 17:04:14 +01001743 bufCreateInfo.size = align_up(rand.Generate() % (bufSizeMax - bufSizeMin) + bufSizeMin, 32ull);
1744
1745 if(rand.Generate() % 100 < percentNonMovable)
1746 {
1747 bufCreateInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT |
1748 VK_BUFFER_USAGE_TRANSFER_DST_BIT |
1749 VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
1750 allocCreateInfo.pUserData = (void*)(uintptr_t)2;
1751 }
1752 else
1753 {
1754 // Different usage just to see different color in output from VmaDumpVis.
1755 bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT |
1756 VK_BUFFER_USAGE_TRANSFER_DST_BIT |
1757 VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
1758 // And in JSON dump.
1759 allocCreateInfo.pUserData = (void*)(uintptr_t)1;
1760 }
1761
Adam Sawickiff0f7b82018-10-18 14:44:05 +02001762 AllocInfo alloc;
1763 alloc.CreateBuffer(bufCreateInfo, allocCreateInfo);
1764 alloc.m_StartValue = rand.Generate();
1765 allocations.push_back(alloc);
1766 }
1767
1768 // Destroy some percentage of them.
1769 {
1770 const size_t buffersToDestroy = round_div<size_t>(bufCount * (100 - percentToLeave), 100);
1771 for(size_t i = 0; i < buffersToDestroy; ++i)
1772 {
1773 const size_t index = rand.Generate() % allocations.size();
1774 allocations[index].Destroy();
1775 allocations.erase(allocations.begin() + index);
1776 }
1777 }
1778
1779 // Fill them with meaningful data.
1780 UploadGpuData(allocations.data(), allocations.size());
1781
Adam Sawickic6ede152018-11-16 17:04:14 +01001782 wchar_t fileName[MAX_PATH];
Adam Sawicki9a4f5082018-11-23 17:26:05 +01001783 swprintf_s(fileName, L"GPU_defragmentation_A_before.json");
Adam Sawickic6ede152018-11-16 17:04:14 +01001784 SaveAllocatorStatsToFile(fileName);
Adam Sawickiff0f7b82018-10-18 14:44:05 +02001785
1786 // Defragment using GPU only.
1787 {
1788 const size_t allocCount = allocations.size();
Adam Sawicki440307e2018-10-18 15:05:19 +02001789
Adam Sawickic6ede152018-11-16 17:04:14 +01001790 std::vector<VmaAllocation> allocationPtrs;
1791 std::vector<VkBool32> allocationChanged;
1792 std::vector<size_t> allocationOriginalIndex;
1793
Adam Sawickiff0f7b82018-10-18 14:44:05 +02001794 for(size_t i = 0; i < allocCount; ++i)
1795 {
Adam Sawickic6ede152018-11-16 17:04:14 +01001796 VmaAllocationInfo allocInfo = {};
1797 vmaGetAllocationInfo(g_hAllocator, allocations[i].m_Allocation, &allocInfo);
1798 if((uintptr_t)allocInfo.pUserData == 1) // Movable
1799 {
1800 allocationPtrs.push_back(allocations[i].m_Allocation);
1801 allocationChanged.push_back(VK_FALSE);
1802 allocationOriginalIndex.push_back(i);
1803 }
Adam Sawickiff0f7b82018-10-18 14:44:05 +02001804 }
Adam Sawickic6ede152018-11-16 17:04:14 +01001805
1806 const size_t movableAllocCount = allocationPtrs.size();
Adam Sawickiff0f7b82018-10-18 14:44:05 +02001807
1808 BeginSingleTimeCommands();
1809
1810 VmaDefragmentationInfo2 defragInfo = {};
Adam Sawicki9a4f5082018-11-23 17:26:05 +01001811 defragInfo.flags = 0;
Adam Sawickic6ede152018-11-16 17:04:14 +01001812 defragInfo.allocationCount = (uint32_t)movableAllocCount;
Adam Sawickiff0f7b82018-10-18 14:44:05 +02001813 defragInfo.pAllocations = allocationPtrs.data();
Adam Sawicki440307e2018-10-18 15:05:19 +02001814 defragInfo.pAllocationsChanged = allocationChanged.data();
1815 defragInfo.maxGpuBytesToMove = VK_WHOLE_SIZE;
Adam Sawickiff0f7b82018-10-18 14:44:05 +02001816 defragInfo.maxGpuAllocationsToMove = UINT32_MAX;
1817 defragInfo.commandBuffer = g_hTemporaryCommandBuffer;
1818
1819 VmaDefragmentationStats stats = {};
1820 VmaDefragmentationContext ctx = VK_NULL_HANDLE;
1821 VkResult res = vmaDefragmentationBegin(g_hAllocator, &defragInfo, &stats, &ctx);
1822 TEST(res >= VK_SUCCESS);
1823
1824 EndSingleTimeCommands();
1825
1826 vmaDefragmentationEnd(g_hAllocator, ctx);
1827
Adam Sawickic6ede152018-11-16 17:04:14 +01001828 for(size_t i = 0; i < movableAllocCount; ++i)
Adam Sawickiff0f7b82018-10-18 14:44:05 +02001829 {
1830 if(allocationChanged[i])
1831 {
Adam Sawickic6ede152018-11-16 17:04:14 +01001832 const size_t origAllocIndex = allocationOriginalIndex[i];
1833 RecreateAllocationResource(allocations[origAllocIndex]);
Adam Sawickiff0f7b82018-10-18 14:44:05 +02001834 }
1835 }
1836
Adam Sawicki4d844e22019-01-24 16:21:05 +01001837 // If corruption detection is enabled, GPU defragmentation may not work on
1838 // memory types that have this detection active, e.g. on Intel.
Adam Sawickia1f727c2019-01-24 16:25:11 +01001839 #if !defined(VMA_DEBUG_DETECT_CORRUPTION) || VMA_DEBUG_DETECT_CORRUPTION == 0
Adam Sawicki4d844e22019-01-24 16:21:05 +01001840 TEST(stats.allocationsMoved > 0 && stats.bytesMoved > 0);
1841 TEST(stats.deviceMemoryBlocksFreed > 0 && stats.bytesFreed > 0);
Adam Sawickia1f727c2019-01-24 16:25:11 +01001842 #endif
Adam Sawickiff0f7b82018-10-18 14:44:05 +02001843 }
1844
1845 ValidateGpuData(allocations.data(), allocations.size());
1846
Adam Sawicki9a4f5082018-11-23 17:26:05 +01001847 swprintf_s(fileName, L"GPU_defragmentation_B_after.json");
Adam Sawickic6ede152018-11-16 17:04:14 +01001848 SaveAllocatorStatsToFile(fileName);
Adam Sawickiff0f7b82018-10-18 14:44:05 +02001849
1850 // Destroy all remaining buffers.
1851 for(size_t i = allocations.size(); i--; )
1852 {
1853 allocations[i].Destroy();
1854 }
Adam Sawicki05704002018-11-08 16:07:29 +01001855
1856 g_MemoryAliasingWarningEnabled = true;
Adam Sawickiff0f7b82018-10-18 14:44:05 +02001857}
1858
Adam Sawickic467e282019-12-23 16:38:31 +01001859static void ProcessDefragmentationStepInfo(VmaDefragmentationPassInfo &stepInfo)
Adam Sawickia52012d2019-12-23 15:28:51 +01001860{
1861 std::vector<VkImageMemoryBarrier> beginImageBarriers;
1862 std::vector<VkImageMemoryBarrier> finalizeImageBarriers;
1863
1864 VkPipelineStageFlags beginSrcStageMask = 0;
1865 VkPipelineStageFlags beginDstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1866
1867 VkPipelineStageFlags finalizeSrcStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1868 VkPipelineStageFlags finalizeDstStageMask = 0;
1869
1870 bool wantsMemoryBarrier = false;
1871
1872 VkMemoryBarrier beginMemoryBarrier = { VK_STRUCTURE_TYPE_MEMORY_BARRIER };
1873 VkMemoryBarrier finalizeMemoryBarrier = { VK_STRUCTURE_TYPE_MEMORY_BARRIER };
1874
Adam Sawickic467e282019-12-23 16:38:31 +01001875 for(uint32_t i = 0; i < stepInfo.moveCount; ++i)
Adam Sawickia52012d2019-12-23 15:28:51 +01001876 {
1877 VmaAllocationInfo info;
1878 vmaGetAllocationInfo(g_hAllocator, stepInfo.pMoves[i].allocation, &info);
1879
1880 AllocInfo *allocInfo = (AllocInfo *)info.pUserData;
1881
1882 if(allocInfo->m_Image)
1883 {
1884 VkImage newImage;
1885
1886 const VkResult result = vkCreateImage(g_hDevice, &allocInfo->m_ImageInfo, g_Allocs, &newImage);
1887 TEST(result >= VK_SUCCESS);
1888
1889 vkBindImageMemory(g_hDevice, newImage, stepInfo.pMoves[i].memory, stepInfo.pMoves[i].offset);
Adam Sawickic467e282019-12-23 16:38:31 +01001890 allocInfo->m_NewImage = newImage;
Adam Sawickia52012d2019-12-23 15:28:51 +01001891
1892 // Keep track of our pipeline stages that we need to wait/signal on
1893 beginSrcStageMask |= VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1894 finalizeDstStageMask |= VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1895
1896 // We need one pipeline barrier and two image layout transitions here
1897 // First we'll have to turn our newly created image into VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
1898 // And the second one is turning the old image into VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL
1899
1900 VkImageSubresourceRange subresourceRange = {
1901 VK_IMAGE_ASPECT_COLOR_BIT,
1902 0, VK_REMAINING_MIP_LEVELS,
1903 0, VK_REMAINING_ARRAY_LAYERS
1904 };
1905
1906 VkImageMemoryBarrier barrier = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER };
1907 barrier.srcAccessMask = 0;
1908 barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
1909 barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
1910 barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
1911 barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
1912 barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
1913 barrier.image = newImage;
1914 barrier.subresourceRange = subresourceRange;
1915
1916 beginImageBarriers.push_back(barrier);
1917
1918 // Second barrier to convert the existing image. This one actually needs a real barrier
1919 barrier.srcAccessMask = VK_ACCESS_MEMORY_WRITE_BIT;
1920 barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
1921 barrier.oldLayout = allocInfo->m_ImageLayout;
1922 barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
1923 barrier.image = allocInfo->m_Image;
1924
1925 beginImageBarriers.push_back(barrier);
1926
1927 // And lastly we need a barrier that turns our new image into the layout of the old one
1928 barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1929 barrier.dstAccessMask = VK_ACCESS_MEMORY_READ_BIT;
1930 barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
1931 barrier.newLayout = allocInfo->m_ImageLayout;
1932 barrier.image = newImage;
1933
1934 finalizeImageBarriers.push_back(barrier);
1935 }
1936 else if(allocInfo->m_Buffer)
1937 {
1938 VkBuffer newBuffer;
1939
1940 const VkResult result = vkCreateBuffer(g_hDevice, &allocInfo->m_BufferInfo, g_Allocs, &newBuffer);
1941 TEST(result >= VK_SUCCESS);
1942
1943 vkBindBufferMemory(g_hDevice, newBuffer, stepInfo.pMoves[i].memory, stepInfo.pMoves[i].offset);
Adam Sawickic467e282019-12-23 16:38:31 +01001944 allocInfo->m_NewBuffer = newBuffer;
Adam Sawickia52012d2019-12-23 15:28:51 +01001945
1946 // Keep track of our pipeline stages that we need to wait/signal on
1947 beginSrcStageMask |= VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1948 finalizeDstStageMask |= VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1949
1950 beginMemoryBarrier.srcAccessMask |= VK_ACCESS_MEMORY_WRITE_BIT;
1951 beginMemoryBarrier.dstAccessMask |= VK_ACCESS_TRANSFER_READ_BIT;
1952
1953 finalizeMemoryBarrier.srcAccessMask |= VK_ACCESS_TRANSFER_WRITE_BIT;
1954 finalizeMemoryBarrier.dstAccessMask |= VK_ACCESS_MEMORY_READ_BIT;
1955
1956 wantsMemoryBarrier = true;
1957 }
1958 }
1959
1960 if(!beginImageBarriers.empty() || wantsMemoryBarrier)
1961 {
1962 const uint32_t memoryBarrierCount = wantsMemoryBarrier ? 1 : 0;
1963
1964 vkCmdPipelineBarrier(g_hTemporaryCommandBuffer, beginSrcStageMask, beginDstStageMask, 0,
1965 memoryBarrierCount, &beginMemoryBarrier,
1966 0, nullptr,
1967 (uint32_t)beginImageBarriers.size(), beginImageBarriers.data());
1968 }
1969
1970 for(uint32_t i = 0; i < stepInfo.moveCount; ++ i)
1971 {
1972 VmaAllocationInfo info;
1973 vmaGetAllocationInfo(g_hAllocator, stepInfo.pMoves[i].allocation, &info);
1974
1975 AllocInfo *allocInfo = (AllocInfo *)info.pUserData;
1976
1977 if(allocInfo->m_Image)
1978 {
1979 std::vector<VkImageCopy> imageCopies;
1980
1981 // Copy all mips of the source image into the target image
1982 VkOffset3D offset = { 0, 0, 0 };
1983 VkExtent3D extent = allocInfo->m_ImageInfo.extent;
1984
1985 VkImageSubresourceLayers subresourceLayers = {
1986 VK_IMAGE_ASPECT_COLOR_BIT,
1987 0,
1988 0, 1
1989 };
1990
1991 for(uint32_t mip = 0; mip < allocInfo->m_ImageInfo.mipLevels; ++ mip)
1992 {
1993 subresourceLayers.mipLevel = mip;
1994
1995 VkImageCopy imageCopy{
1996 subresourceLayers,
1997 offset,
1998 subresourceLayers,
1999 offset,
2000 extent
2001 };
2002
2003 imageCopies.push_back(imageCopy);
2004
2005 extent.width = std::max(uint32_t(1), extent.width >> 1);
2006 extent.height = std::max(uint32_t(1), extent.height >> 1);
2007 extent.depth = std::max(uint32_t(1), extent.depth >> 1);
2008 }
2009
2010 vkCmdCopyImage(
2011 g_hTemporaryCommandBuffer,
2012 allocInfo->m_Image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
Adam Sawickic467e282019-12-23 16:38:31 +01002013 allocInfo->m_NewImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
Adam Sawickia52012d2019-12-23 15:28:51 +01002014 (uint32_t)imageCopies.size(), imageCopies.data());
Adam Sawickia52012d2019-12-23 15:28:51 +01002015 }
2016 else if(allocInfo->m_Buffer)
2017 {
2018 VkBufferCopy region = {
2019 0,
2020 0,
2021 allocInfo->m_BufferInfo.size };
2022
2023 vkCmdCopyBuffer(g_hTemporaryCommandBuffer,
Adam Sawickic467e282019-12-23 16:38:31 +01002024 allocInfo->m_Buffer, allocInfo->m_NewBuffer,
Adam Sawickia52012d2019-12-23 15:28:51 +01002025 1, &region);
Adam Sawickia52012d2019-12-23 15:28:51 +01002026 }
2027 }
2028
Adam Sawickia52012d2019-12-23 15:28:51 +01002029 if(!finalizeImageBarriers.empty() || wantsMemoryBarrier)
2030 {
2031 const uint32_t memoryBarrierCount = wantsMemoryBarrier ? 1 : 0;
2032
2033 vkCmdPipelineBarrier(g_hTemporaryCommandBuffer, finalizeSrcStageMask, finalizeDstStageMask, 0,
2034 memoryBarrierCount, &finalizeMemoryBarrier,
2035 0, nullptr,
2036 (uint32_t)finalizeImageBarriers.size(), finalizeImageBarriers.data());
2037 }
2038}
2039
2040
2041static void TestDefragmentationIncrementalBasic()
2042{
2043 wprintf(L"Test defragmentation incremental basic\n");
2044 g_MemoryAliasingWarningEnabled = false;
2045
2046 std::vector<AllocInfo> allocations;
2047
2048 // Create that many allocations to surely fill 3 new blocks of 256 MB.
2049 const std::array<uint32_t, 3> imageSizes = { 256, 512, 1024 };
2050 const VkDeviceSize bufSizeMin = 5ull * 1024 * 1024;
2051 const VkDeviceSize bufSizeMax = 10ull * 1024 * 1024;
2052 const VkDeviceSize totalSize = 3ull * 256 * 1024 * 1024;
Adam Sawickic467e282019-12-23 16:38:31 +01002053 const size_t imageCount = totalSize / ((size_t)imageSizes[0] * imageSizes[0] * 4) / 2;
Adam Sawickia52012d2019-12-23 15:28:51 +01002054 const size_t bufCount = (size_t)(totalSize / bufSizeMin) / 2;
2055 const size_t percentToLeave = 30;
2056 RandomNumberGenerator rand = { 234522 };
2057
2058 VkImageCreateInfo imageInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
2059 imageInfo.imageType = VK_IMAGE_TYPE_2D;
2060 imageInfo.extent.depth = 1;
2061 imageInfo.mipLevels = 1;
2062 imageInfo.arrayLayers = 1;
2063 imageInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
2064 imageInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
2065 imageInfo.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED;
2066 imageInfo.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
2067 imageInfo.samples = VK_SAMPLE_COUNT_1_BIT;
2068
2069 VmaAllocationCreateInfo allocCreateInfo = {};
2070 allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
2071 allocCreateInfo.flags = 0;
2072
2073 // Create all intended images.
2074 for(size_t i = 0; i < imageCount; ++i)
2075 {
2076 const uint32_t size = imageSizes[rand.Generate() % 3];
2077
2078 imageInfo.extent.width = size;
2079 imageInfo.extent.height = size;
2080
2081 AllocInfo alloc;
2082 alloc.CreateImage(imageInfo, allocCreateInfo, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
2083 alloc.m_StartValue = 0;
2084
2085 allocations.push_back(alloc);
2086 }
2087
2088 // And all buffers
2089 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
2090
2091 for(size_t i = 0; i < bufCount; ++i)
2092 {
2093 bufCreateInfo.size = align_up<VkDeviceSize>(bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin), 16);
2094 bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
2095
2096 AllocInfo alloc;
2097 alloc.CreateBuffer(bufCreateInfo, allocCreateInfo);
2098 alloc.m_StartValue = 0;
2099
2100 allocations.push_back(alloc);
2101 }
2102
2103 // Destroy some percentage of them.
2104 {
2105 const size_t allocationsToDestroy = round_div<size_t>((imageCount + bufCount) * (100 - percentToLeave), 100);
2106 for(size_t i = 0; i < allocationsToDestroy; ++i)
2107 {
2108 const size_t index = rand.Generate() % allocations.size();
2109 allocations[index].Destroy();
2110 allocations.erase(allocations.begin() + index);
2111 }
2112 }
2113
2114 {
2115 // Set our user data pointers. A real application should probably be more clever here
2116 const size_t allocationCount = allocations.size();
2117 for(size_t i = 0; i < allocationCount; ++i)
2118 {
2119 AllocInfo &alloc = allocations[i];
2120 vmaSetAllocationUserData(g_hAllocator, alloc.m_Allocation, &alloc);
2121 }
2122 }
2123
2124 // Fill them with meaningful data.
2125 UploadGpuData(allocations.data(), allocations.size());
2126
2127 wchar_t fileName[MAX_PATH];
2128 swprintf_s(fileName, L"GPU_defragmentation_incremental_basic_A_before.json");
2129 SaveAllocatorStatsToFile(fileName);
2130
2131 // Defragment using GPU only.
2132 {
2133 const size_t allocCount = allocations.size();
2134
2135 std::vector<VmaAllocation> allocationPtrs;
2136
2137 for(size_t i = 0; i < allocCount; ++i)
2138 {
Adam Sawickia52012d2019-12-23 15:28:51 +01002139 allocationPtrs.push_back(allocations[i].m_Allocation);
2140 }
2141
2142 const size_t movableAllocCount = allocationPtrs.size();
2143
2144 VmaDefragmentationInfo2 defragInfo = {};
2145 defragInfo.flags = VMA_DEFRAGMENTATION_FLAG_INCREMENTAL;
2146 defragInfo.allocationCount = (uint32_t)movableAllocCount;
2147 defragInfo.pAllocations = allocationPtrs.data();
2148 defragInfo.maxGpuBytesToMove = VK_WHOLE_SIZE;
2149 defragInfo.maxGpuAllocationsToMove = UINT32_MAX;
2150
2151 VmaDefragmentationStats stats = {};
2152 VmaDefragmentationContext ctx = VK_NULL_HANDLE;
2153 VkResult res = vmaDefragmentationBegin(g_hAllocator, &defragInfo, &stats, &ctx);
2154 TEST(res >= VK_SUCCESS);
2155
2156 res = VK_NOT_READY;
2157
Adam Sawickic467e282019-12-23 16:38:31 +01002158 std::vector<VmaDefragmentationPassMoveInfo> moveInfo;
Adam Sawickia52012d2019-12-23 15:28:51 +01002159 moveInfo.resize(movableAllocCount);
2160
2161 while(res == VK_NOT_READY)
2162 {
Adam Sawickic467e282019-12-23 16:38:31 +01002163 VmaDefragmentationPassInfo stepInfo = {};
Adam Sawickia52012d2019-12-23 15:28:51 +01002164 stepInfo.pMoves = moveInfo.data();
2165 stepInfo.moveCount = (uint32_t)moveInfo.size();
2166
Adam Sawickic467e282019-12-23 16:38:31 +01002167 res = vmaBeginDefragmentationPass(g_hAllocator, ctx, &stepInfo);
Adam Sawickia52012d2019-12-23 15:28:51 +01002168 TEST(res >= VK_SUCCESS);
2169
2170 BeginSingleTimeCommands();
Adam Sawickic467e282019-12-23 16:38:31 +01002171 std::vector<void*> newHandles;
Adam Sawickia52012d2019-12-23 15:28:51 +01002172 ProcessDefragmentationStepInfo(stepInfo);
2173 EndSingleTimeCommands();
2174
Adam Sawickic467e282019-12-23 16:38:31 +01002175 res = vmaEndDefragmentationPass(g_hAllocator, ctx);
2176
2177 // Destroy old buffers/images and replace them with new handles.
2178 for(size_t i = 0; i < stepInfo.moveCount; ++i)
2179 {
2180 VmaAllocation const alloc = stepInfo.pMoves[i].allocation;
2181 VmaAllocationInfo vmaAllocInfo;
2182 vmaGetAllocationInfo(g_hAllocator, alloc, &vmaAllocInfo);
2183 AllocInfo* allocInfo = (AllocInfo*)vmaAllocInfo.pUserData;
2184 if(allocInfo->m_Buffer)
2185 {
2186 assert(allocInfo->m_NewBuffer && !allocInfo->m_Image && !allocInfo->m_NewImage);
2187 vkDestroyBuffer(g_hDevice, allocInfo->m_Buffer, g_Allocs);
2188 allocInfo->m_Buffer = allocInfo->m_NewBuffer;
2189 allocInfo->m_NewBuffer = VK_NULL_HANDLE;
2190 }
2191 else if(allocInfo->m_Image)
2192 {
2193 assert(allocInfo->m_NewImage && !allocInfo->m_Buffer && !allocInfo->m_NewBuffer);
2194 vkDestroyImage(g_hDevice, allocInfo->m_Image, g_Allocs);
2195 allocInfo->m_Image = allocInfo->m_NewImage;
2196 allocInfo->m_NewImage = VK_NULL_HANDLE;
2197 }
2198 else
2199 assert(0);
2200 }
Adam Sawickia52012d2019-12-23 15:28:51 +01002201 }
2202
2203 TEST(res >= VK_SUCCESS);
2204 vmaDefragmentationEnd(g_hAllocator, ctx);
2205
2206 // If corruption detection is enabled, GPU defragmentation may not work on
2207 // memory types that have this detection active, e.g. on Intel.
2208#if !defined(VMA_DEBUG_DETECT_CORRUPTION) || VMA_DEBUG_DETECT_CORRUPTION == 0
2209 TEST(stats.allocationsMoved > 0 && stats.bytesMoved > 0);
2210 TEST(stats.deviceMemoryBlocksFreed > 0 && stats.bytesFreed > 0);
2211#endif
2212 }
2213
2214 //ValidateGpuData(allocations.data(), allocations.size());
2215
2216 swprintf_s(fileName, L"GPU_defragmentation_incremental_basic_B_after.json");
2217 SaveAllocatorStatsToFile(fileName);
2218
Adam Sawickic467e282019-12-23 16:38:31 +01002219 // Destroy all remaining buffers and images.
Adam Sawickia52012d2019-12-23 15:28:51 +01002220 for(size_t i = allocations.size(); i--; )
2221 {
2222 allocations[i].Destroy();
2223 }
2224
2225 g_MemoryAliasingWarningEnabled = true;
2226}
2227
2228void TestDefragmentationIncrementalComplex()
2229{
2230 wprintf(L"Test defragmentation incremental complex\n");
2231 g_MemoryAliasingWarningEnabled = false;
2232
2233 std::vector<AllocInfo> allocations;
2234
2235 // Create that many allocations to surely fill 3 new blocks of 256 MB.
2236 const std::array<uint32_t, 3> imageSizes = { 256, 512, 1024 };
2237 const VkDeviceSize bufSizeMin = 5ull * 1024 * 1024;
2238 const VkDeviceSize bufSizeMax = 10ull * 1024 * 1024;
2239 const VkDeviceSize totalSize = 3ull * 256 * 1024 * 1024;
2240 const size_t imageCount = (size_t)(totalSize / (imageSizes[0] * imageSizes[0] * 4)) / 2;
2241 const size_t bufCount = (size_t)(totalSize / bufSizeMin) / 2;
2242 const size_t percentToLeave = 30;
2243 RandomNumberGenerator rand = { 234522 };
2244
2245 VkImageCreateInfo imageInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
2246 imageInfo.imageType = VK_IMAGE_TYPE_2D;
2247 imageInfo.extent.depth = 1;
2248 imageInfo.mipLevels = 1;
2249 imageInfo.arrayLayers = 1;
2250 imageInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
2251 imageInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
2252 imageInfo.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED;
2253 imageInfo.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
2254 imageInfo.samples = VK_SAMPLE_COUNT_1_BIT;
2255
2256 VmaAllocationCreateInfo allocCreateInfo = {};
2257 allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
2258 allocCreateInfo.flags = 0;
2259
2260 // Create all intended images.
2261 for(size_t i = 0; i < imageCount; ++i)
2262 {
2263 const uint32_t size = imageSizes[rand.Generate() % 3];
2264
2265 imageInfo.extent.width = size;
2266 imageInfo.extent.height = size;
2267
2268 AllocInfo alloc;
2269 alloc.CreateImage(imageInfo, allocCreateInfo, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
2270 alloc.m_StartValue = 0;
2271
2272 allocations.push_back(alloc);
2273 }
2274
2275 // And all buffers
2276 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
2277
2278 for(size_t i = 0; i < bufCount; ++i)
2279 {
2280 bufCreateInfo.size = align_up<VkDeviceSize>(bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin), 16);
2281 bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
2282
2283 AllocInfo alloc;
2284 alloc.CreateBuffer(bufCreateInfo, allocCreateInfo);
2285 alloc.m_StartValue = 0;
2286
2287 allocations.push_back(alloc);
2288 }
2289
2290 // Destroy some percentage of them.
2291 {
2292 const size_t allocationsToDestroy = round_div<size_t>((imageCount + bufCount) * (100 - percentToLeave), 100);
2293 for(size_t i = 0; i < allocationsToDestroy; ++i)
2294 {
2295 const size_t index = rand.Generate() % allocations.size();
2296 allocations[index].Destroy();
2297 allocations.erase(allocations.begin() + index);
2298 }
2299 }
2300
2301 {
2302 // Set our user data pointers. A real application should probably be more clever here
2303 const size_t allocationCount = allocations.size();
2304 for(size_t i = 0; i < allocationCount; ++i)
2305 {
2306 AllocInfo &alloc = allocations[i];
2307 vmaSetAllocationUserData(g_hAllocator, alloc.m_Allocation, &alloc);
2308 }
2309 }
2310
2311 // Fill them with meaningful data.
2312 UploadGpuData(allocations.data(), allocations.size());
2313
2314 wchar_t fileName[MAX_PATH];
2315 swprintf_s(fileName, L"GPU_defragmentation_incremental_complex_A_before.json");
2316 SaveAllocatorStatsToFile(fileName);
2317
2318 std::vector<AllocInfo> additionalAllocations;
2319
2320#define MakeAdditionalAllocation() \
2321 do { \
2322 { \
2323 bufCreateInfo.size = align_up<VkDeviceSize>(bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin), 16); \
2324 bufCreateInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT; \
2325 \
2326 AllocInfo alloc; \
2327 alloc.CreateBuffer(bufCreateInfo, allocCreateInfo); \
2328 \
2329 additionalAllocations.push_back(alloc); \
2330 } \
2331 } while(0)
2332
2333 // Defragment using GPU only.
2334 {
2335 const size_t allocCount = allocations.size();
2336
2337 std::vector<VmaAllocation> allocationPtrs;
2338
2339 for(size_t i = 0; i < allocCount; ++i)
2340 {
2341 VmaAllocationInfo allocInfo = {};
2342 vmaGetAllocationInfo(g_hAllocator, allocations[i].m_Allocation, &allocInfo);
2343
2344 allocationPtrs.push_back(allocations[i].m_Allocation);
2345 }
2346
2347 const size_t movableAllocCount = allocationPtrs.size();
2348
2349 VmaDefragmentationInfo2 defragInfo = {};
2350 defragInfo.flags = VMA_DEFRAGMENTATION_FLAG_INCREMENTAL;
2351 defragInfo.allocationCount = (uint32_t)movableAllocCount;
2352 defragInfo.pAllocations = allocationPtrs.data();
2353 defragInfo.maxGpuBytesToMove = VK_WHOLE_SIZE;
2354 defragInfo.maxGpuAllocationsToMove = UINT32_MAX;
2355
2356 VmaDefragmentationStats stats = {};
2357 VmaDefragmentationContext ctx = VK_NULL_HANDLE;
2358 VkResult res = vmaDefragmentationBegin(g_hAllocator, &defragInfo, &stats, &ctx);
2359 TEST(res >= VK_SUCCESS);
2360
2361 res = VK_NOT_READY;
2362
Adam Sawickic467e282019-12-23 16:38:31 +01002363 std::vector<VmaDefragmentationPassMoveInfo> moveInfo;
Adam Sawickia52012d2019-12-23 15:28:51 +01002364 moveInfo.resize(movableAllocCount);
2365
2366 MakeAdditionalAllocation();
2367
2368 while(res == VK_NOT_READY)
2369 {
Adam Sawickic467e282019-12-23 16:38:31 +01002370 VmaDefragmentationPassInfo stepInfo = {};
Adam Sawickia52012d2019-12-23 15:28:51 +01002371 stepInfo.pMoves = moveInfo.data();
2372 stepInfo.moveCount = (uint32_t)moveInfo.size();
2373
Adam Sawickic467e282019-12-23 16:38:31 +01002374 res = vmaBeginDefragmentationPass(g_hAllocator, ctx, &stepInfo);
Adam Sawickia52012d2019-12-23 15:28:51 +01002375 TEST(res >= VK_SUCCESS);
2376
2377 MakeAdditionalAllocation();
2378
2379 BeginSingleTimeCommands();
2380 ProcessDefragmentationStepInfo(stepInfo);
2381 EndSingleTimeCommands();
2382
Adam Sawickic467e282019-12-23 16:38:31 +01002383 res = vmaEndDefragmentationPass(g_hAllocator, ctx);
2384
2385 // Destroy old buffers/images and replace them with new handles.
2386 for(size_t i = 0; i < stepInfo.moveCount; ++i)
2387 {
2388 VmaAllocation const alloc = stepInfo.pMoves[i].allocation;
2389 VmaAllocationInfo vmaAllocInfo;
2390 vmaGetAllocationInfo(g_hAllocator, alloc, &vmaAllocInfo);
2391 AllocInfo* allocInfo = (AllocInfo*)vmaAllocInfo.pUserData;
2392 if(allocInfo->m_Buffer)
2393 {
2394 assert(allocInfo->m_NewBuffer && !allocInfo->m_Image && !allocInfo->m_NewImage);
2395 vkDestroyBuffer(g_hDevice, allocInfo->m_Buffer, g_Allocs);
2396 allocInfo->m_Buffer = allocInfo->m_NewBuffer;
2397 allocInfo->m_NewBuffer = VK_NULL_HANDLE;
2398 }
2399 else if(allocInfo->m_Image)
2400 {
2401 assert(allocInfo->m_NewImage && !allocInfo->m_Buffer && !allocInfo->m_NewBuffer);
2402 vkDestroyImage(g_hDevice, allocInfo->m_Image, g_Allocs);
2403 allocInfo->m_Image = allocInfo->m_NewImage;
2404 allocInfo->m_NewImage = VK_NULL_HANDLE;
2405 }
2406 else
2407 assert(0);
2408 }
Adam Sawickia52012d2019-12-23 15:28:51 +01002409
2410 MakeAdditionalAllocation();
2411 }
2412
2413 TEST(res >= VK_SUCCESS);
2414 vmaDefragmentationEnd(g_hAllocator, ctx);
2415
2416 // If corruption detection is enabled, GPU defragmentation may not work on
2417 // memory types that have this detection active, e.g. on Intel.
2418#if !defined(VMA_DEBUG_DETECT_CORRUPTION) || VMA_DEBUG_DETECT_CORRUPTION == 0
2419 TEST(stats.allocationsMoved > 0 && stats.bytesMoved > 0);
2420 TEST(stats.deviceMemoryBlocksFreed > 0 && stats.bytesFreed > 0);
2421#endif
2422 }
2423
2424 //ValidateGpuData(allocations.data(), allocations.size());
2425
2426 swprintf_s(fileName, L"GPU_defragmentation_incremental_complex_B_after.json");
2427 SaveAllocatorStatsToFile(fileName);
2428
2429 // Destroy all remaining buffers.
2430 for(size_t i = allocations.size(); i--; )
2431 {
2432 allocations[i].Destroy();
2433 }
2434
2435 for(size_t i = additionalAllocations.size(); i--; )
2436 {
2437 additionalAllocations[i].Destroy();
2438 }
2439
2440 g_MemoryAliasingWarningEnabled = true;
2441}
2442
2443
Adam Sawickib8333fb2018-03-13 16:15:53 +01002444static void TestUserData()
2445{
2446 VkResult res;
2447
2448 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
2449 bufCreateInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
2450 bufCreateInfo.size = 0x10000;
2451
2452 for(uint32_t testIndex = 0; testIndex < 2; ++testIndex)
2453 {
2454 // Opaque pointer
2455 {
2456
2457 void* numberAsPointer = (void*)(size_t)0xC2501FF3u;
2458 void* pointerToSomething = &res;
2459
2460 VmaAllocationCreateInfo allocCreateInfo = {};
2461 allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
2462 allocCreateInfo.pUserData = numberAsPointer;
2463 if(testIndex == 1)
2464 allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
2465
2466 VkBuffer buf; VmaAllocation alloc; VmaAllocationInfo allocInfo;
2467 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02002468 TEST(res == VK_SUCCESS);
2469 TEST(allocInfo.pUserData = numberAsPointer);
Adam Sawickib8333fb2018-03-13 16:15:53 +01002470
2471 vmaGetAllocationInfo(g_hAllocator, alloc, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02002472 TEST(allocInfo.pUserData == numberAsPointer);
Adam Sawickib8333fb2018-03-13 16:15:53 +01002473
2474 vmaSetAllocationUserData(g_hAllocator, alloc, pointerToSomething);
2475 vmaGetAllocationInfo(g_hAllocator, alloc, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02002476 TEST(allocInfo.pUserData == pointerToSomething);
Adam Sawickib8333fb2018-03-13 16:15:53 +01002477
2478 vmaDestroyBuffer(g_hAllocator, buf, alloc);
2479 }
2480
2481 // String
2482 {
2483 const char* name1 = "Buffer name \\\"\'<>&% \nSecond line .,;=";
2484 const char* name2 = "2";
2485 const size_t name1Len = strlen(name1);
2486
2487 char* name1Buf = new char[name1Len + 1];
2488 strcpy_s(name1Buf, name1Len + 1, name1);
2489
2490 VmaAllocationCreateInfo allocCreateInfo = {};
2491 allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
2492 allocCreateInfo.flags = VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT;
2493 allocCreateInfo.pUserData = name1Buf;
2494 if(testIndex == 1)
2495 allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
2496
2497 VkBuffer buf; VmaAllocation alloc; VmaAllocationInfo allocInfo;
2498 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02002499 TEST(res == VK_SUCCESS);
2500 TEST(allocInfo.pUserData != nullptr && allocInfo.pUserData != name1Buf);
2501 TEST(strcmp(name1, (const char*)allocInfo.pUserData) == 0);
Adam Sawickib8333fb2018-03-13 16:15:53 +01002502
2503 delete[] name1Buf;
2504
2505 vmaGetAllocationInfo(g_hAllocator, alloc, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02002506 TEST(strcmp(name1, (const char*)allocInfo.pUserData) == 0);
Adam Sawickib8333fb2018-03-13 16:15:53 +01002507
2508 vmaSetAllocationUserData(g_hAllocator, alloc, (void*)name2);
2509 vmaGetAllocationInfo(g_hAllocator, alloc, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02002510 TEST(strcmp(name2, (const char*)allocInfo.pUserData) == 0);
Adam Sawickib8333fb2018-03-13 16:15:53 +01002511
2512 vmaSetAllocationUserData(g_hAllocator, alloc, nullptr);
2513 vmaGetAllocationInfo(g_hAllocator, alloc, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02002514 TEST(allocInfo.pUserData == nullptr);
Adam Sawickib8333fb2018-03-13 16:15:53 +01002515
2516 vmaDestroyBuffer(g_hAllocator, buf, alloc);
2517 }
2518 }
2519}
2520
Adam Sawicki370ab182018-11-08 16:31:00 +01002521static void TestInvalidAllocations()
2522{
2523 VkResult res;
2524
2525 VmaAllocationCreateInfo allocCreateInfo = {};
2526 allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
2527
2528 // Try to allocate 0 bytes.
2529 {
2530 VkMemoryRequirements memReq = {};
2531 memReq.size = 0; // !!!
2532 memReq.alignment = 4;
2533 memReq.memoryTypeBits = UINT32_MAX;
2534 VmaAllocation alloc = VK_NULL_HANDLE;
2535 res = vmaAllocateMemory(g_hAllocator, &memReq, &allocCreateInfo, &alloc, nullptr);
2536 TEST(res == VK_ERROR_VALIDATION_FAILED_EXT && alloc == VK_NULL_HANDLE);
2537 }
2538
2539 // Try to create buffer with size = 0.
2540 {
2541 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
2542 bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
2543 bufCreateInfo.size = 0; // !!!
2544 VkBuffer buf = VK_NULL_HANDLE;
2545 VmaAllocation alloc = VK_NULL_HANDLE;
2546 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, nullptr);
2547 TEST(res == VK_ERROR_VALIDATION_FAILED_EXT && buf == VK_NULL_HANDLE && alloc == VK_NULL_HANDLE);
2548 }
2549
2550 // Try to create image with one dimension = 0.
2551 {
2552 VkImageCreateInfo imageCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
2553 imageCreateInfo.imageType = VK_IMAGE_TYPE_2D;
2554 imageCreateInfo.format = VK_FORMAT_B8G8R8A8_UNORM;
2555 imageCreateInfo.extent.width = 128;
2556 imageCreateInfo.extent.height = 0; // !!!
2557 imageCreateInfo.extent.depth = 1;
2558 imageCreateInfo.mipLevels = 1;
2559 imageCreateInfo.arrayLayers = 1;
2560 imageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
2561 imageCreateInfo.tiling = VK_IMAGE_TILING_LINEAR;
2562 imageCreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
2563 imageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED;
2564 VkImage image = VK_NULL_HANDLE;
2565 VmaAllocation alloc = VK_NULL_HANDLE;
2566 res = vmaCreateImage(g_hAllocator, &imageCreateInfo, &allocCreateInfo, &image, &alloc, nullptr);
2567 TEST(res == VK_ERROR_VALIDATION_FAILED_EXT && image == VK_NULL_HANDLE && alloc == VK_NULL_HANDLE);
2568 }
2569}
2570
Adam Sawickib8333fb2018-03-13 16:15:53 +01002571static void TestMemoryRequirements()
2572{
2573 VkResult res;
2574 VkBuffer buf;
2575 VmaAllocation alloc;
2576 VmaAllocationInfo allocInfo;
2577
2578 const VkPhysicalDeviceMemoryProperties* memProps;
2579 vmaGetMemoryProperties(g_hAllocator, &memProps);
2580
2581 VkBufferCreateInfo bufInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
2582 bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
2583 bufInfo.size = 128;
2584
2585 VmaAllocationCreateInfo allocCreateInfo = {};
2586
2587 // No requirements.
2588 res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02002589 TEST(res == VK_SUCCESS);
Adam Sawickib8333fb2018-03-13 16:15:53 +01002590 vmaDestroyBuffer(g_hAllocator, buf, alloc);
2591
2592 // Usage.
2593 allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
2594 allocCreateInfo.requiredFlags = 0;
2595 allocCreateInfo.preferredFlags = 0;
2596 allocCreateInfo.memoryTypeBits = UINT32_MAX;
2597
2598 res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02002599 TEST(res == VK_SUCCESS);
2600 TEST(memProps->memoryTypes[allocInfo.memoryType].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
Adam Sawickib8333fb2018-03-13 16:15:53 +01002601 vmaDestroyBuffer(g_hAllocator, buf, alloc);
2602
2603 // Required flags, preferred flags.
2604 allocCreateInfo.usage = VMA_MEMORY_USAGE_UNKNOWN;
2605 allocCreateInfo.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
2606 allocCreateInfo.preferredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
2607 allocCreateInfo.memoryTypeBits = 0;
2608
2609 res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02002610 TEST(res == VK_SUCCESS);
2611 TEST(memProps->memoryTypes[allocInfo.memoryType].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
2612 TEST(memProps->memoryTypes[allocInfo.memoryType].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
Adam Sawickib8333fb2018-03-13 16:15:53 +01002613 vmaDestroyBuffer(g_hAllocator, buf, alloc);
2614
2615 // memoryTypeBits.
2616 const uint32_t memType = allocInfo.memoryType;
2617 allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
2618 allocCreateInfo.requiredFlags = 0;
2619 allocCreateInfo.preferredFlags = 0;
2620 allocCreateInfo.memoryTypeBits = 1u << memType;
2621
2622 res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02002623 TEST(res == VK_SUCCESS);
2624 TEST(allocInfo.memoryType == memType);
Adam Sawickib8333fb2018-03-13 16:15:53 +01002625 vmaDestroyBuffer(g_hAllocator, buf, alloc);
2626
2627}
2628
2629static void TestBasics()
2630{
2631 VkResult res;
2632
2633 TestMemoryRequirements();
2634
2635 // Lost allocation
2636 {
2637 VmaAllocation alloc = VK_NULL_HANDLE;
2638 vmaCreateLostAllocation(g_hAllocator, &alloc);
Adam Sawickib8d34d52018-10-03 17:41:20 +02002639 TEST(alloc != VK_NULL_HANDLE);
Adam Sawickib8333fb2018-03-13 16:15:53 +01002640
2641 VmaAllocationInfo allocInfo;
2642 vmaGetAllocationInfo(g_hAllocator, alloc, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02002643 TEST(allocInfo.deviceMemory == VK_NULL_HANDLE);
2644 TEST(allocInfo.size == 0);
Adam Sawickib8333fb2018-03-13 16:15:53 +01002645
2646 vmaFreeMemory(g_hAllocator, alloc);
2647 }
2648
2649 // Allocation that is MAPPED and not necessarily HOST_VISIBLE.
2650 {
2651 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
2652 bufCreateInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
2653 bufCreateInfo.size = 128;
2654
2655 VmaAllocationCreateInfo allocCreateInfo = {};
2656 allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
2657 allocCreateInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT;
2658
2659 VkBuffer buf; VmaAllocation alloc; VmaAllocationInfo allocInfo;
2660 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02002661 TEST(res == VK_SUCCESS);
Adam Sawickib8333fb2018-03-13 16:15:53 +01002662
2663 vmaDestroyBuffer(g_hAllocator, buf, alloc);
2664
2665 // Same with OWN_MEMORY.
2666 allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
2667
2668 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02002669 TEST(res == VK_SUCCESS);
Adam Sawickib8333fb2018-03-13 16:15:53 +01002670
2671 vmaDestroyBuffer(g_hAllocator, buf, alloc);
2672 }
2673
2674 TestUserData();
Adam Sawicki370ab182018-11-08 16:31:00 +01002675
2676 TestInvalidAllocations();
Adam Sawickib8333fb2018-03-13 16:15:53 +01002677}
2678
Adam Sawickiddcbf8c2019-11-22 15:22:42 +01002679static void TestPool_MinBlockCount()
2680{
2681#if defined(VMA_DEBUG_MARGIN) && VMA_DEBUG_MARGIN > 0
2682 return;
2683#endif
2684
2685 wprintf(L"Test Pool MinBlockCount\n");
2686 VkResult res;
2687
2688 static const VkDeviceSize ALLOC_SIZE = 512ull * 1024;
2689 static const VkDeviceSize BLOCK_SIZE = ALLOC_SIZE * 2; // Each block can fit 2 allocations.
2690
2691 VmaAllocationCreateInfo allocCreateInfo = {};
2692 allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_COPY;
2693
2694 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
2695 bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
2696 bufCreateInfo.size = ALLOC_SIZE;
2697
2698 VmaPoolCreateInfo poolCreateInfo = {};
2699 poolCreateInfo.blockSize = BLOCK_SIZE;
2700 poolCreateInfo.minBlockCount = 2; // At least 2 blocks always present.
2701 res = vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &poolCreateInfo.memoryTypeIndex);
2702 TEST(res == VK_SUCCESS);
2703
2704 VmaPool pool = VK_NULL_HANDLE;
2705 res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool);
2706 TEST(res == VK_SUCCESS && pool != VK_NULL_HANDLE);
2707
2708 // Check that there are 2 blocks preallocated as requested.
2709 VmaPoolStats begPoolStats = {};
2710 vmaGetPoolStats(g_hAllocator, pool, &begPoolStats);
2711 TEST(begPoolStats.blockCount == 2 && begPoolStats.allocationCount == 0 && begPoolStats.size == BLOCK_SIZE * 2);
2712
2713 // Allocate 5 buffers to create 3 blocks.
2714 static const uint32_t BUF_COUNT = 5;
2715 allocCreateInfo.pool = pool;
2716 std::vector<AllocInfo> allocs(BUF_COUNT);
2717 for(uint32_t i = 0; i < BUF_COUNT; ++i)
2718 {
2719 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &allocs[i].m_Buffer, &allocs[i].m_Allocation, nullptr);
2720 TEST(res == VK_SUCCESS && allocs[i].m_Buffer != VK_NULL_HANDLE && allocs[i].m_Allocation != VK_NULL_HANDLE);
2721 }
2722
2723 // Check that there are really 3 blocks.
2724 VmaPoolStats poolStats2 = {};
2725 vmaGetPoolStats(g_hAllocator, pool, &poolStats2);
2726 TEST(poolStats2.blockCount == 3 && poolStats2.allocationCount == BUF_COUNT && poolStats2.size == BLOCK_SIZE * 3);
2727
2728 // Free two first allocations to make one block empty.
2729 allocs[0].Destroy();
2730 allocs[1].Destroy();
2731
2732 // Check that there are still 3 blocks due to hysteresis.
2733 VmaPoolStats poolStats3 = {};
2734 vmaGetPoolStats(g_hAllocator, pool, &poolStats3);
2735 TEST(poolStats3.blockCount == 3 && poolStats3.allocationCount == BUF_COUNT - 2 && poolStats2.size == BLOCK_SIZE * 3);
2736
2737 // Free the last allocation to make second block empty.
2738 allocs[BUF_COUNT - 1].Destroy();
2739
2740 // Check that there are now 2 blocks only.
2741 VmaPoolStats poolStats4 = {};
2742 vmaGetPoolStats(g_hAllocator, pool, &poolStats4);
2743 TEST(poolStats4.blockCount == 2 && poolStats4.allocationCount == BUF_COUNT - 3 && poolStats4.size == BLOCK_SIZE * 2);
2744
2745 // Cleanup.
2746 for(size_t i = allocs.size(); i--; )
2747 {
2748 allocs[i].Destroy();
2749 }
2750 vmaDestroyPool(g_hAllocator, pool);
2751}
2752
Adam Sawickib8333fb2018-03-13 16:15:53 +01002753void TestHeapSizeLimit()
2754{
Adam Sawickib3f51102019-11-18 13:05:56 +01002755 const VkDeviceSize HEAP_SIZE_LIMIT = 200ull * 1024 * 1024; // 200 MB
2756 const VkDeviceSize BLOCK_SIZE = 20ull * 1024 * 1024; // 20 MB
Adam Sawickib8333fb2018-03-13 16:15:53 +01002757
2758 VkDeviceSize heapSizeLimit[VK_MAX_MEMORY_HEAPS];
2759 for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
2760 {
2761 heapSizeLimit[i] = HEAP_SIZE_LIMIT;
2762 }
2763
2764 VmaAllocatorCreateInfo allocatorCreateInfo = {};
2765 allocatorCreateInfo.physicalDevice = g_hPhysicalDevice;
2766 allocatorCreateInfo.device = g_hDevice;
Adam Sawicki4ac8ff82019-11-18 14:47:33 +01002767 allocatorCreateInfo.instance = g_hVulkanInstance;
Adam Sawickib8333fb2018-03-13 16:15:53 +01002768 allocatorCreateInfo.pHeapSizeLimit = heapSizeLimit;
2769
2770 VmaAllocator hAllocator;
2771 VkResult res = vmaCreateAllocator(&allocatorCreateInfo, &hAllocator);
Adam Sawickib8d34d52018-10-03 17:41:20 +02002772 TEST(res == VK_SUCCESS);
Adam Sawickib8333fb2018-03-13 16:15:53 +01002773
2774 struct Item
2775 {
2776 VkBuffer hBuf;
2777 VmaAllocation hAlloc;
2778 };
2779 std::vector<Item> items;
2780
2781 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
2782 bufCreateInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
2783
Adam Sawicki4ac8ff82019-11-18 14:47:33 +01002784 // 1. Allocate two blocks of dedicated memory, half the size of BLOCK_SIZE.
2785 VmaAllocationInfo dedicatedAllocInfo;
Adam Sawickib8333fb2018-03-13 16:15:53 +01002786 {
2787 VmaAllocationCreateInfo allocCreateInfo = {};
2788 allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
2789 allocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
2790
2791 bufCreateInfo.size = BLOCK_SIZE / 2;
2792
2793 for(size_t i = 0; i < 2; ++i)
2794 {
2795 Item item;
Adam Sawicki4ac8ff82019-11-18 14:47:33 +01002796 res = vmaCreateBuffer(hAllocator, &bufCreateInfo, &allocCreateInfo, &item.hBuf, &item.hAlloc, &dedicatedAllocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02002797 TEST(res == VK_SUCCESS);
Adam Sawickib8333fb2018-03-13 16:15:53 +01002798 items.push_back(item);
2799 }
2800 }
2801
2802 // Create pool to make sure allocations must be out of this memory type.
2803 VmaPoolCreateInfo poolCreateInfo = {};
Adam Sawicki4ac8ff82019-11-18 14:47:33 +01002804 poolCreateInfo.memoryTypeIndex = dedicatedAllocInfo.memoryType;
Adam Sawickib8333fb2018-03-13 16:15:53 +01002805 poolCreateInfo.blockSize = BLOCK_SIZE;
2806
2807 VmaPool hPool;
2808 res = vmaCreatePool(hAllocator, &poolCreateInfo, &hPool);
Adam Sawickib8d34d52018-10-03 17:41:20 +02002809 TEST(res == VK_SUCCESS);
Adam Sawickib8333fb2018-03-13 16:15:53 +01002810
2811 // 2. Allocate normal buffers from all the remaining memory.
2812 {
2813 VmaAllocationCreateInfo allocCreateInfo = {};
2814 allocCreateInfo.pool = hPool;
2815
2816 bufCreateInfo.size = BLOCK_SIZE / 2;
2817
2818 const size_t bufCount = ((HEAP_SIZE_LIMIT / BLOCK_SIZE) - 1) * 2;
2819 for(size_t i = 0; i < bufCount; ++i)
2820 {
2821 Item item;
2822 res = vmaCreateBuffer(hAllocator, &bufCreateInfo, &allocCreateInfo, &item.hBuf, &item.hAlloc, nullptr);
Adam Sawickib8d34d52018-10-03 17:41:20 +02002823 TEST(res == VK_SUCCESS);
Adam Sawickib8333fb2018-03-13 16:15:53 +01002824 items.push_back(item);
2825 }
2826 }
2827
2828 // 3. Allocation of one more (even small) buffer should fail.
2829 {
2830 VmaAllocationCreateInfo allocCreateInfo = {};
2831 allocCreateInfo.pool = hPool;
2832
2833 bufCreateInfo.size = 128;
2834
2835 VkBuffer hBuf;
2836 VmaAllocation hAlloc;
2837 res = vmaCreateBuffer(hAllocator, &bufCreateInfo, &allocCreateInfo, &hBuf, &hAlloc, nullptr);
Adam Sawickib8d34d52018-10-03 17:41:20 +02002838 TEST(res == VK_ERROR_OUT_OF_DEVICE_MEMORY);
Adam Sawickib8333fb2018-03-13 16:15:53 +01002839 }
2840
2841 // Destroy everything.
2842 for(size_t i = items.size(); i--; )
2843 {
2844 vmaDestroyBuffer(hAllocator, items[i].hBuf, items[i].hAlloc);
2845 }
2846
2847 vmaDestroyPool(hAllocator, hPool);
2848
2849 vmaDestroyAllocator(hAllocator);
2850}
2851
Adam Sawicki212a4a62018-06-14 15:44:45 +02002852#if VMA_DEBUG_MARGIN
Adam Sawicki73b16652018-06-11 16:39:25 +02002853static void TestDebugMargin()
2854{
2855 if(VMA_DEBUG_MARGIN == 0)
2856 {
2857 return;
2858 }
2859
2860 VkBufferCreateInfo bufInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
Adam Sawicki212a4a62018-06-14 15:44:45 +02002861 bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
Adam Sawicki73b16652018-06-11 16:39:25 +02002862
2863 VmaAllocationCreateInfo allocCreateInfo = {};
Adam Sawicki212a4a62018-06-14 15:44:45 +02002864 allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
Adam Sawicki73b16652018-06-11 16:39:25 +02002865
2866 // Create few buffers of different size.
2867 const size_t BUF_COUNT = 10;
2868 BufferInfo buffers[BUF_COUNT];
2869 VmaAllocationInfo allocInfo[BUF_COUNT];
2870 for(size_t i = 0; i < 10; ++i)
2871 {
2872 bufInfo.size = (VkDeviceSize)(i + 1) * 64;
Adam Sawicki212a4a62018-06-14 15:44:45 +02002873 // Last one will be mapped.
2874 allocCreateInfo.flags = (i == BUF_COUNT - 1) ? VMA_ALLOCATION_CREATE_MAPPED_BIT : 0;
Adam Sawicki73b16652018-06-11 16:39:25 +02002875
2876 VkResult res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo, &buffers[i].Buffer, &buffers[i].Allocation, &allocInfo[i]);
Adam Sawickib8d34d52018-10-03 17:41:20 +02002877 TEST(res == VK_SUCCESS);
Adam Sawicki73b16652018-06-11 16:39:25 +02002878 // Margin is preserved also at the beginning of a block.
Adam Sawickib8d34d52018-10-03 17:41:20 +02002879 TEST(allocInfo[i].offset >= VMA_DEBUG_MARGIN);
Adam Sawicki212a4a62018-06-14 15:44:45 +02002880
2881 if(i == BUF_COUNT - 1)
2882 {
2883 // Fill with data.
Adam Sawickib8d34d52018-10-03 17:41:20 +02002884 TEST(allocInfo[i].pMappedData != nullptr);
Adam Sawicki212a4a62018-06-14 15:44:45 +02002885 // Uncomment this "+ 1" to overwrite past end of allocation and check corruption detection.
2886 memset(allocInfo[i].pMappedData, 0xFF, bufInfo.size /* + 1 */);
2887 }
Adam Sawicki73b16652018-06-11 16:39:25 +02002888 }
2889
2890 // Check if their offsets preserve margin between them.
2891 std::sort(allocInfo, allocInfo + BUF_COUNT, [](const VmaAllocationInfo& lhs, const VmaAllocationInfo& rhs) -> bool
2892 {
2893 if(lhs.deviceMemory != rhs.deviceMemory)
2894 {
2895 return lhs.deviceMemory < rhs.deviceMemory;
2896 }
2897 return lhs.offset < rhs.offset;
2898 });
2899 for(size_t i = 1; i < BUF_COUNT; ++i)
2900 {
2901 if(allocInfo[i].deviceMemory == allocInfo[i - 1].deviceMemory)
2902 {
Adam Sawickib8d34d52018-10-03 17:41:20 +02002903 TEST(allocInfo[i].offset >= allocInfo[i - 1].offset + VMA_DEBUG_MARGIN);
Adam Sawicki73b16652018-06-11 16:39:25 +02002904 }
2905 }
2906
Adam Sawicki212a4a62018-06-14 15:44:45 +02002907 VkResult res = vmaCheckCorruption(g_hAllocator, UINT32_MAX);
Adam Sawickib8d34d52018-10-03 17:41:20 +02002908 TEST(res == VK_SUCCESS);
Adam Sawicki212a4a62018-06-14 15:44:45 +02002909
Adam Sawicki73b16652018-06-11 16:39:25 +02002910 // Destroy all buffers.
2911 for(size_t i = BUF_COUNT; i--; )
2912 {
2913 vmaDestroyBuffer(g_hAllocator, buffers[i].Buffer, buffers[i].Allocation);
2914 }
2915}
Adam Sawicki212a4a62018-06-14 15:44:45 +02002916#endif
Adam Sawicki73b16652018-06-11 16:39:25 +02002917
Adam Sawicki0876c0d2018-06-20 15:18:11 +02002918static void TestLinearAllocator()
2919{
2920 wprintf(L"Test linear allocator\n");
2921
2922 RandomNumberGenerator rand{645332};
2923
2924 VkBufferCreateInfo sampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
2925 sampleBufCreateInfo.size = 1024; // Whatever.
2926 sampleBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
2927
2928 VmaAllocationCreateInfo sampleAllocCreateInfo = {};
2929 sampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
2930
2931 VmaPoolCreateInfo poolCreateInfo = {};
2932 VkResult res = vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &sampleBufCreateInfo, &sampleAllocCreateInfo, &poolCreateInfo.memoryTypeIndex);
Adam Sawickib8d34d52018-10-03 17:41:20 +02002933 TEST(res == VK_SUCCESS);
Adam Sawicki0876c0d2018-06-20 15:18:11 +02002934
Adam Sawickiee082772018-06-20 17:45:49 +02002935 poolCreateInfo.blockSize = 1024 * 300;
Adam Sawicki0876c0d2018-06-20 15:18:11 +02002936 poolCreateInfo.flags = VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT;
2937 poolCreateInfo.minBlockCount = poolCreateInfo.maxBlockCount = 1;
2938
2939 VmaPool pool = nullptr;
2940 res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool);
Adam Sawickib8d34d52018-10-03 17:41:20 +02002941 TEST(res == VK_SUCCESS);
Adam Sawicki0876c0d2018-06-20 15:18:11 +02002942
2943 VkBufferCreateInfo bufCreateInfo = sampleBufCreateInfo;
2944
2945 VmaAllocationCreateInfo allocCreateInfo = {};
2946 allocCreateInfo.pool = pool;
2947
2948 constexpr size_t maxBufCount = 100;
2949 std::vector<BufferInfo> bufInfo;
2950
2951 constexpr VkDeviceSize bufSizeMin = 16;
2952 constexpr VkDeviceSize bufSizeMax = 1024;
2953 VmaAllocationInfo allocInfo;
2954 VkDeviceSize prevOffset = 0;
2955
2956 // Test one-time free.
2957 for(size_t i = 0; i < 2; ++i)
2958 {
2959 // Allocate number of buffers of varying size that surely fit into this block.
2960 VkDeviceSize bufSumSize = 0;
2961 for(size_t i = 0; i < maxBufCount; ++i)
2962 {
Adam Sawickifd366b62019-01-24 15:26:43 +01002963 bufCreateInfo.size = align_up<VkDeviceSize>(bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin), 16);
Adam Sawicki0876c0d2018-06-20 15:18:11 +02002964 BufferInfo newBufInfo;
2965 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
2966 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02002967 TEST(res == VK_SUCCESS);
2968 TEST(i == 0 || allocInfo.offset > prevOffset);
Adam Sawicki0876c0d2018-06-20 15:18:11 +02002969 bufInfo.push_back(newBufInfo);
2970 prevOffset = allocInfo.offset;
2971 bufSumSize += bufCreateInfo.size;
2972 }
2973
2974 // Validate pool stats.
2975 VmaPoolStats stats;
2976 vmaGetPoolStats(g_hAllocator, pool, &stats);
Adam Sawickib8d34d52018-10-03 17:41:20 +02002977 TEST(stats.size == poolCreateInfo.blockSize);
2978 TEST(stats.unusedSize = poolCreateInfo.blockSize - bufSumSize);
2979 TEST(stats.allocationCount == bufInfo.size());
Adam Sawicki0876c0d2018-06-20 15:18:11 +02002980
2981 // Destroy the buffers in random order.
2982 while(!bufInfo.empty())
2983 {
2984 const size_t indexToDestroy = rand.Generate() % bufInfo.size();
2985 const BufferInfo& currBufInfo = bufInfo[indexToDestroy];
2986 vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
2987 bufInfo.erase(bufInfo.begin() + indexToDestroy);
2988 }
2989 }
2990
2991 // Test stack.
2992 {
2993 // Allocate number of buffers of varying size that surely fit into this block.
2994 for(size_t i = 0; i < maxBufCount; ++i)
2995 {
Adam Sawickifd366b62019-01-24 15:26:43 +01002996 bufCreateInfo.size = align_up<VkDeviceSize>(bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin), 16);
Adam Sawicki0876c0d2018-06-20 15:18:11 +02002997 BufferInfo newBufInfo;
2998 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
2999 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003000 TEST(res == VK_SUCCESS);
3001 TEST(i == 0 || allocInfo.offset > prevOffset);
Adam Sawicki0876c0d2018-06-20 15:18:11 +02003002 bufInfo.push_back(newBufInfo);
3003 prevOffset = allocInfo.offset;
3004 }
3005
3006 // Destroy few buffers from top of the stack.
3007 for(size_t i = 0; i < maxBufCount / 5; ++i)
3008 {
3009 const BufferInfo& currBufInfo = bufInfo.back();
3010 vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
3011 bufInfo.pop_back();
3012 }
3013
3014 // Create some more
3015 for(size_t i = 0; i < maxBufCount / 5; ++i)
3016 {
Adam Sawickifd366b62019-01-24 15:26:43 +01003017 bufCreateInfo.size = align_up<VkDeviceSize>(bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin), 16);
Adam Sawicki0876c0d2018-06-20 15:18:11 +02003018 BufferInfo newBufInfo;
3019 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
3020 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003021 TEST(res == VK_SUCCESS);
3022 TEST(i == 0 || allocInfo.offset > prevOffset);
Adam Sawicki0876c0d2018-06-20 15:18:11 +02003023 bufInfo.push_back(newBufInfo);
3024 prevOffset = allocInfo.offset;
3025 }
3026
3027 // Destroy the buffers in reverse order.
3028 while(!bufInfo.empty())
3029 {
3030 const BufferInfo& currBufInfo = bufInfo.back();
3031 vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
3032 bufInfo.pop_back();
3033 }
3034 }
3035
Adam Sawickiee082772018-06-20 17:45:49 +02003036 // Test ring buffer.
3037 {
3038 // Allocate number of buffers that surely fit into this block.
3039 bufCreateInfo.size = bufSizeMax;
3040 for(size_t i = 0; i < maxBufCount; ++i)
3041 {
3042 BufferInfo newBufInfo;
3043 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
3044 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003045 TEST(res == VK_SUCCESS);
3046 TEST(i == 0 || allocInfo.offset > prevOffset);
Adam Sawickiee082772018-06-20 17:45:49 +02003047 bufInfo.push_back(newBufInfo);
3048 prevOffset = allocInfo.offset;
3049 }
3050
3051 // Free and allocate new buffers so many times that we make sure we wrap-around at least once.
3052 const size_t buffersPerIter = maxBufCount / 10 - 1;
3053 const size_t iterCount = poolCreateInfo.blockSize / bufCreateInfo.size / buffersPerIter * 2;
3054 for(size_t iter = 0; iter < iterCount; ++iter)
3055 {
3056 for(size_t bufPerIter = 0; bufPerIter < buffersPerIter; ++bufPerIter)
3057 {
3058 const BufferInfo& currBufInfo = bufInfo.front();
3059 vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
3060 bufInfo.erase(bufInfo.begin());
3061 }
3062 for(size_t bufPerIter = 0; bufPerIter < buffersPerIter; ++bufPerIter)
3063 {
3064 BufferInfo newBufInfo;
3065 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
3066 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003067 TEST(res == VK_SUCCESS);
Adam Sawickiee082772018-06-20 17:45:49 +02003068 bufInfo.push_back(newBufInfo);
3069 }
3070 }
3071
3072 // Allocate buffers until we reach out-of-memory.
3073 uint32_t debugIndex = 0;
3074 while(res == VK_SUCCESS)
3075 {
3076 BufferInfo newBufInfo;
3077 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
3078 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
3079 if(res == VK_SUCCESS)
3080 {
3081 bufInfo.push_back(newBufInfo);
3082 }
3083 else
3084 {
Adam Sawickib8d34d52018-10-03 17:41:20 +02003085 TEST(res == VK_ERROR_OUT_OF_DEVICE_MEMORY);
Adam Sawickiee082772018-06-20 17:45:49 +02003086 }
3087 ++debugIndex;
3088 }
3089
3090 // Destroy the buffers in random order.
3091 while(!bufInfo.empty())
3092 {
3093 const size_t indexToDestroy = rand.Generate() % bufInfo.size();
3094 const BufferInfo& currBufInfo = bufInfo[indexToDestroy];
3095 vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
3096 bufInfo.erase(bufInfo.begin() + indexToDestroy);
3097 }
3098 }
3099
Adam Sawicki680b2252018-08-22 14:47:32 +02003100 // Test double stack.
3101 {
3102 // Allocate number of buffers of varying size that surely fit into this block, alternate from bottom/top.
3103 VkDeviceSize prevOffsetLower = 0;
3104 VkDeviceSize prevOffsetUpper = poolCreateInfo.blockSize;
3105 for(size_t i = 0; i < maxBufCount; ++i)
3106 {
3107 const bool upperAddress = (i % 2) != 0;
3108 if(upperAddress)
3109 allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT;
3110 else
3111 allocCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT;
Adam Sawickifd366b62019-01-24 15:26:43 +01003112 bufCreateInfo.size = align_up<VkDeviceSize>(bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin), 16);
Adam Sawicki680b2252018-08-22 14:47:32 +02003113 BufferInfo newBufInfo;
3114 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
3115 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003116 TEST(res == VK_SUCCESS);
Adam Sawicki680b2252018-08-22 14:47:32 +02003117 if(upperAddress)
3118 {
Adam Sawickib8d34d52018-10-03 17:41:20 +02003119 TEST(allocInfo.offset < prevOffsetUpper);
Adam Sawicki680b2252018-08-22 14:47:32 +02003120 prevOffsetUpper = allocInfo.offset;
3121 }
3122 else
3123 {
Adam Sawickib8d34d52018-10-03 17:41:20 +02003124 TEST(allocInfo.offset >= prevOffsetLower);
Adam Sawicki680b2252018-08-22 14:47:32 +02003125 prevOffsetLower = allocInfo.offset;
3126 }
Adam Sawickib8d34d52018-10-03 17:41:20 +02003127 TEST(prevOffsetLower < prevOffsetUpper);
Adam Sawicki680b2252018-08-22 14:47:32 +02003128 bufInfo.push_back(newBufInfo);
3129 }
3130
3131 // Destroy few buffers from top of the stack.
3132 for(size_t i = 0; i < maxBufCount / 5; ++i)
3133 {
3134 const BufferInfo& currBufInfo = bufInfo.back();
3135 vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
3136 bufInfo.pop_back();
3137 }
3138
3139 // Create some more
3140 for(size_t i = 0; i < maxBufCount / 5; ++i)
3141 {
3142 const bool upperAddress = (i % 2) != 0;
3143 if(upperAddress)
3144 allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT;
3145 else
3146 allocCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT;
Adam Sawickifd366b62019-01-24 15:26:43 +01003147 bufCreateInfo.size = align_up<VkDeviceSize>(bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin), 16);
Adam Sawicki680b2252018-08-22 14:47:32 +02003148 BufferInfo newBufInfo;
3149 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
3150 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003151 TEST(res == VK_SUCCESS);
Adam Sawicki680b2252018-08-22 14:47:32 +02003152 bufInfo.push_back(newBufInfo);
3153 }
3154
3155 // Destroy the buffers in reverse order.
3156 while(!bufInfo.empty())
3157 {
3158 const BufferInfo& currBufInfo = bufInfo.back();
3159 vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
3160 bufInfo.pop_back();
3161 }
3162
3163 // Create buffers on both sides until we reach out of memory.
3164 prevOffsetLower = 0;
3165 prevOffsetUpper = poolCreateInfo.blockSize;
3166 res = VK_SUCCESS;
3167 for(size_t i = 0; res == VK_SUCCESS; ++i)
3168 {
3169 const bool upperAddress = (i % 2) != 0;
3170 if(upperAddress)
3171 allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT;
3172 else
3173 allocCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT;
Adam Sawickifd366b62019-01-24 15:26:43 +01003174 bufCreateInfo.size = align_up<VkDeviceSize>(bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin), 16);
Adam Sawicki680b2252018-08-22 14:47:32 +02003175 BufferInfo newBufInfo;
3176 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
3177 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
3178 if(res == VK_SUCCESS)
3179 {
3180 if(upperAddress)
3181 {
Adam Sawickib8d34d52018-10-03 17:41:20 +02003182 TEST(allocInfo.offset < prevOffsetUpper);
Adam Sawicki680b2252018-08-22 14:47:32 +02003183 prevOffsetUpper = allocInfo.offset;
3184 }
3185 else
3186 {
Adam Sawickib8d34d52018-10-03 17:41:20 +02003187 TEST(allocInfo.offset >= prevOffsetLower);
Adam Sawicki680b2252018-08-22 14:47:32 +02003188 prevOffsetLower = allocInfo.offset;
3189 }
Adam Sawickib8d34d52018-10-03 17:41:20 +02003190 TEST(prevOffsetLower < prevOffsetUpper);
Adam Sawicki680b2252018-08-22 14:47:32 +02003191 bufInfo.push_back(newBufInfo);
3192 }
3193 }
3194
3195 // Destroy the buffers in random order.
3196 while(!bufInfo.empty())
3197 {
3198 const size_t indexToDestroy = rand.Generate() % bufInfo.size();
3199 const BufferInfo& currBufInfo = bufInfo[indexToDestroy];
3200 vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
3201 bufInfo.erase(bufInfo.begin() + indexToDestroy);
3202 }
3203
3204 // Create buffers on upper side only, constant size, until we reach out of memory.
3205 prevOffsetUpper = poolCreateInfo.blockSize;
3206 res = VK_SUCCESS;
3207 allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT;
3208 bufCreateInfo.size = bufSizeMax;
3209 for(size_t i = 0; res == VK_SUCCESS; ++i)
3210 {
3211 BufferInfo newBufInfo;
3212 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
3213 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
3214 if(res == VK_SUCCESS)
3215 {
Adam Sawickib8d34d52018-10-03 17:41:20 +02003216 TEST(allocInfo.offset < prevOffsetUpper);
Adam Sawicki680b2252018-08-22 14:47:32 +02003217 prevOffsetUpper = allocInfo.offset;
3218 bufInfo.push_back(newBufInfo);
3219 }
3220 }
3221
3222 // Destroy the buffers in reverse order.
3223 while(!bufInfo.empty())
3224 {
3225 const BufferInfo& currBufInfo = bufInfo.back();
3226 vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
3227 bufInfo.pop_back();
3228 }
3229 }
3230
Adam Sawicki8cfe05f2018-08-22 16:48:17 +02003231 // Test ring buffer with lost allocations.
3232 {
3233 // Allocate number of buffers until pool is full.
3234 // Notice CAN_BECOME_LOST flag and call to vmaSetCurrentFrameIndex.
3235 allocCreateInfo.flags = VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT;
3236 res = VK_SUCCESS;
3237 for(size_t i = 0; res == VK_SUCCESS; ++i)
3238 {
3239 vmaSetCurrentFrameIndex(g_hAllocator, ++g_FrameIndex);
3240
Adam Sawickifd366b62019-01-24 15:26:43 +01003241 bufCreateInfo.size = align_up<VkDeviceSize>(bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin), 16);
Adam Sawicki8cfe05f2018-08-22 16:48:17 +02003242
3243 BufferInfo newBufInfo;
3244 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
3245 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
3246 if(res == VK_SUCCESS)
3247 bufInfo.push_back(newBufInfo);
3248 }
3249
3250 // Free first half of it.
3251 {
3252 const size_t buffersToDelete = bufInfo.size() / 2;
3253 for(size_t i = 0; i < buffersToDelete; ++i)
3254 {
3255 vmaDestroyBuffer(g_hAllocator, bufInfo[i].Buffer, bufInfo[i].Allocation);
3256 }
3257 bufInfo.erase(bufInfo.begin(), bufInfo.begin() + buffersToDelete);
3258 }
3259
3260 // Allocate number of buffers until pool is full again.
Adam Sawicki0ebdf0c2018-08-22 17:02:44 +02003261 // This way we make sure ring buffers wraps around, front in in the middle.
Adam Sawicki8cfe05f2018-08-22 16:48:17 +02003262 res = VK_SUCCESS;
3263 for(size_t i = 0; res == VK_SUCCESS; ++i)
3264 {
3265 vmaSetCurrentFrameIndex(g_hAllocator, ++g_FrameIndex);
3266
Adam Sawickifd366b62019-01-24 15:26:43 +01003267 bufCreateInfo.size = align_up<VkDeviceSize>(bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin), 16);
Adam Sawicki8cfe05f2018-08-22 16:48:17 +02003268
3269 BufferInfo newBufInfo;
3270 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
3271 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
3272 if(res == VK_SUCCESS)
3273 bufInfo.push_back(newBufInfo);
3274 }
3275
3276 VkDeviceSize firstNewOffset;
3277 {
3278 vmaSetCurrentFrameIndex(g_hAllocator, ++g_FrameIndex);
3279
3280 // Allocate a large buffer with CAN_MAKE_OTHER_LOST.
3281 allocCreateInfo.flags = VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT;
3282 bufCreateInfo.size = bufSizeMax;
3283
3284 BufferInfo newBufInfo;
3285 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
3286 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003287 TEST(res == VK_SUCCESS);
Adam Sawicki8cfe05f2018-08-22 16:48:17 +02003288 bufInfo.push_back(newBufInfo);
3289 firstNewOffset = allocInfo.offset;
3290
3291 // Make sure at least one buffer from the beginning became lost.
3292 vmaGetAllocationInfo(g_hAllocator, bufInfo[0].Allocation, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003293 TEST(allocInfo.deviceMemory == VK_NULL_HANDLE);
Adam Sawicki8cfe05f2018-08-22 16:48:17 +02003294 }
3295
Adam Sawickifd366b62019-01-24 15:26:43 +01003296#if 0 // TODO Fix and uncomment. Failing on Intel.
Adam Sawicki8cfe05f2018-08-22 16:48:17 +02003297 // Allocate more buffers that CAN_MAKE_OTHER_LOST until we wrap-around with this.
3298 size_t newCount = 1;
3299 for(;;)
3300 {
3301 vmaSetCurrentFrameIndex(g_hAllocator, ++g_FrameIndex);
3302
Adam Sawickifd366b62019-01-24 15:26:43 +01003303 bufCreateInfo.size = align_up<VkDeviceSize>(bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin), 16);
Adam Sawicki8cfe05f2018-08-22 16:48:17 +02003304
3305 BufferInfo newBufInfo;
3306 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
3307 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
Adam Sawickifd366b62019-01-24 15:26:43 +01003308
Adam Sawickib8d34d52018-10-03 17:41:20 +02003309 TEST(res == VK_SUCCESS);
Adam Sawicki8cfe05f2018-08-22 16:48:17 +02003310 bufInfo.push_back(newBufInfo);
3311 ++newCount;
3312 if(allocInfo.offset < firstNewOffset)
3313 break;
3314 }
Adam Sawickifd366b62019-01-24 15:26:43 +01003315#endif
Adam Sawicki8cfe05f2018-08-22 16:48:17 +02003316
Adam Sawicki0ebdf0c2018-08-22 17:02:44 +02003317 // Delete buffers that are lost.
3318 for(size_t i = bufInfo.size(); i--; )
3319 {
3320 vmaGetAllocationInfo(g_hAllocator, bufInfo[i].Allocation, &allocInfo);
3321 if(allocInfo.deviceMemory == VK_NULL_HANDLE)
3322 {
3323 vmaDestroyBuffer(g_hAllocator, bufInfo[i].Buffer, bufInfo[i].Allocation);
3324 bufInfo.erase(bufInfo.begin() + i);
3325 }
3326 }
3327
3328 // Test vmaMakePoolAllocationsLost
3329 {
3330 vmaSetCurrentFrameIndex(g_hAllocator, ++g_FrameIndex);
3331
Adam Sawicki4d35a5d2019-01-24 15:51:59 +01003332 size_t lostAllocCount = 0;
Adam Sawicki0ebdf0c2018-08-22 17:02:44 +02003333 vmaMakePoolAllocationsLost(g_hAllocator, pool, &lostAllocCount);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003334 TEST(lostAllocCount > 0);
Adam Sawicki0ebdf0c2018-08-22 17:02:44 +02003335
3336 size_t realLostAllocCount = 0;
3337 for(size_t i = 0; i < bufInfo.size(); ++i)
3338 {
3339 vmaGetAllocationInfo(g_hAllocator, bufInfo[i].Allocation, &allocInfo);
3340 if(allocInfo.deviceMemory == VK_NULL_HANDLE)
3341 ++realLostAllocCount;
3342 }
Adam Sawickib8d34d52018-10-03 17:41:20 +02003343 TEST(realLostAllocCount == lostAllocCount);
Adam Sawicki0ebdf0c2018-08-22 17:02:44 +02003344 }
3345
Adam Sawicki8cfe05f2018-08-22 16:48:17 +02003346 // Destroy all the buffers in forward order.
3347 for(size_t i = 0; i < bufInfo.size(); ++i)
3348 vmaDestroyBuffer(g_hAllocator, bufInfo[i].Buffer, bufInfo[i].Allocation);
3349 bufInfo.clear();
3350 }
3351
Adam Sawicki70a683e2018-08-24 15:36:32 +02003352 vmaDestroyPool(g_hAllocator, pool);
3353}
Adam Sawickif799c4f2018-08-23 10:40:30 +02003354
Adam Sawicki70a683e2018-08-24 15:36:32 +02003355static void TestLinearAllocatorMultiBlock()
3356{
3357 wprintf(L"Test linear allocator multi block\n");
3358
3359 RandomNumberGenerator rand{345673};
3360
3361 VkBufferCreateInfo sampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
3362 sampleBufCreateInfo.size = 1024 * 1024;
3363 sampleBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
3364
3365 VmaAllocationCreateInfo sampleAllocCreateInfo = {};
3366 sampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
3367
3368 VmaPoolCreateInfo poolCreateInfo = {};
3369 poolCreateInfo.flags = VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT;
3370 VkResult res = vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &sampleBufCreateInfo, &sampleAllocCreateInfo, &poolCreateInfo.memoryTypeIndex);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003371 TEST(res == VK_SUCCESS);
Adam Sawicki70a683e2018-08-24 15:36:32 +02003372
3373 VmaPool pool = nullptr;
3374 res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003375 TEST(res == VK_SUCCESS);
Adam Sawicki70a683e2018-08-24 15:36:32 +02003376
3377 VkBufferCreateInfo bufCreateInfo = sampleBufCreateInfo;
3378
3379 VmaAllocationCreateInfo allocCreateInfo = {};
3380 allocCreateInfo.pool = pool;
3381
3382 std::vector<BufferInfo> bufInfo;
3383 VmaAllocationInfo allocInfo;
3384
3385 // Test one-time free.
3386 {
3387 // Allocate buffers until we move to a second block.
3388 VkDeviceMemory lastMem = VK_NULL_HANDLE;
3389 for(uint32_t i = 0; ; ++i)
3390 {
3391 BufferInfo newBufInfo;
3392 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
3393 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003394 TEST(res == VK_SUCCESS);
Adam Sawicki70a683e2018-08-24 15:36:32 +02003395 bufInfo.push_back(newBufInfo);
3396 if(lastMem && allocInfo.deviceMemory != lastMem)
3397 {
3398 break;
3399 }
3400 lastMem = allocInfo.deviceMemory;
3401 }
3402
Adam Sawickib8d34d52018-10-03 17:41:20 +02003403 TEST(bufInfo.size() > 2);
Adam Sawicki70a683e2018-08-24 15:36:32 +02003404
3405 // Make sure that pool has now two blocks.
3406 VmaPoolStats poolStats = {};
3407 vmaGetPoolStats(g_hAllocator, pool, &poolStats);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003408 TEST(poolStats.blockCount == 2);
Adam Sawicki70a683e2018-08-24 15:36:32 +02003409
3410 // Destroy all the buffers in random order.
3411 while(!bufInfo.empty())
3412 {
3413 const size_t indexToDestroy = rand.Generate() % bufInfo.size();
3414 const BufferInfo& currBufInfo = bufInfo[indexToDestroy];
3415 vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
3416 bufInfo.erase(bufInfo.begin() + indexToDestroy);
3417 }
3418
3419 // Make sure that pool has now at most one block.
3420 vmaGetPoolStats(g_hAllocator, pool, &poolStats);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003421 TEST(poolStats.blockCount <= 1);
Adam Sawicki70a683e2018-08-24 15:36:32 +02003422 }
3423
3424 // Test stack.
3425 {
3426 // Allocate buffers until we move to a second block.
3427 VkDeviceMemory lastMem = VK_NULL_HANDLE;
3428 for(uint32_t i = 0; ; ++i)
3429 {
3430 BufferInfo newBufInfo;
3431 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
3432 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003433 TEST(res == VK_SUCCESS);
Adam Sawicki70a683e2018-08-24 15:36:32 +02003434 bufInfo.push_back(newBufInfo);
3435 if(lastMem && allocInfo.deviceMemory != lastMem)
3436 {
3437 break;
3438 }
3439 lastMem = allocInfo.deviceMemory;
3440 }
3441
Adam Sawickib8d34d52018-10-03 17:41:20 +02003442 TEST(bufInfo.size() > 2);
Adam Sawicki70a683e2018-08-24 15:36:32 +02003443
3444 // Add few more buffers.
3445 for(uint32_t i = 0; i < 5; ++i)
3446 {
3447 BufferInfo newBufInfo;
3448 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
3449 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003450 TEST(res == VK_SUCCESS);
Adam Sawicki70a683e2018-08-24 15:36:32 +02003451 bufInfo.push_back(newBufInfo);
3452 }
3453
3454 // Make sure that pool has now two blocks.
3455 VmaPoolStats poolStats = {};
3456 vmaGetPoolStats(g_hAllocator, pool, &poolStats);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003457 TEST(poolStats.blockCount == 2);
Adam Sawicki70a683e2018-08-24 15:36:32 +02003458
3459 // Delete half of buffers, LIFO.
3460 for(size_t i = 0, countToDelete = bufInfo.size() / 2; i < countToDelete; ++i)
3461 {
3462 const BufferInfo& currBufInfo = bufInfo.back();
3463 vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
3464 bufInfo.pop_back();
3465 }
3466
3467 // Add one more buffer.
3468 BufferInfo newBufInfo;
3469 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
3470 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003471 TEST(res == VK_SUCCESS);
Adam Sawicki70a683e2018-08-24 15:36:32 +02003472 bufInfo.push_back(newBufInfo);
3473
3474 // Make sure that pool has now one block.
3475 vmaGetPoolStats(g_hAllocator, pool, &poolStats);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003476 TEST(poolStats.blockCount == 1);
Adam Sawicki70a683e2018-08-24 15:36:32 +02003477
3478 // Delete all the remaining buffers, LIFO.
3479 while(!bufInfo.empty())
3480 {
3481 const BufferInfo& currBufInfo = bufInfo.back();
3482 vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
3483 bufInfo.pop_back();
3484 }
Adam Sawickif799c4f2018-08-23 10:40:30 +02003485 }
3486
Adam Sawicki0876c0d2018-06-20 15:18:11 +02003487 vmaDestroyPool(g_hAllocator, pool);
3488}
3489
Adam Sawickifd11d752018-08-22 15:02:10 +02003490static void ManuallyTestLinearAllocator()
3491{
3492 VmaStats origStats;
3493 vmaCalculateStats(g_hAllocator, &origStats);
3494
3495 wprintf(L"Manually test linear allocator\n");
3496
3497 RandomNumberGenerator rand{645332};
3498
3499 VkBufferCreateInfo sampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
3500 sampleBufCreateInfo.size = 1024; // Whatever.
3501 sampleBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
3502
3503 VmaAllocationCreateInfo sampleAllocCreateInfo = {};
3504 sampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
3505
3506 VmaPoolCreateInfo poolCreateInfo = {};
3507 VkResult res = vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &sampleBufCreateInfo, &sampleAllocCreateInfo, &poolCreateInfo.memoryTypeIndex);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003508 TEST(res == VK_SUCCESS);
Adam Sawickifd11d752018-08-22 15:02:10 +02003509
3510 poolCreateInfo.blockSize = 10 * 1024;
3511 poolCreateInfo.flags = VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT;
3512 poolCreateInfo.minBlockCount = poolCreateInfo.maxBlockCount = 1;
3513
3514 VmaPool pool = nullptr;
3515 res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003516 TEST(res == VK_SUCCESS);
Adam Sawickifd11d752018-08-22 15:02:10 +02003517
3518 VkBufferCreateInfo bufCreateInfo = sampleBufCreateInfo;
3519
3520 VmaAllocationCreateInfo allocCreateInfo = {};
3521 allocCreateInfo.pool = pool;
3522
3523 std::vector<BufferInfo> bufInfo;
3524 VmaAllocationInfo allocInfo;
3525 BufferInfo newBufInfo;
3526
3527 // Test double stack.
3528 {
3529 /*
3530 Lower: Buffer 32 B, Buffer 1024 B, Buffer 32 B
3531 Upper: Buffer 16 B, Buffer 1024 B, Buffer 128 B
3532
3533 Totally:
3534 1 block allocated
3535 10240 Vulkan bytes
3536 6 new allocations
3537 2256 bytes in allocations
3538 */
3539
3540 bufCreateInfo.size = 32;
3541 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
3542 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003543 TEST(res == VK_SUCCESS);
Adam Sawickifd11d752018-08-22 15:02:10 +02003544 bufInfo.push_back(newBufInfo);
3545
3546 bufCreateInfo.size = 1024;
3547 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
3548 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003549 TEST(res == VK_SUCCESS);
Adam Sawickifd11d752018-08-22 15:02:10 +02003550 bufInfo.push_back(newBufInfo);
3551
3552 bufCreateInfo.size = 32;
3553 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
3554 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003555 TEST(res == VK_SUCCESS);
Adam Sawickifd11d752018-08-22 15:02:10 +02003556 bufInfo.push_back(newBufInfo);
3557
3558 allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT;
3559
3560 bufCreateInfo.size = 128;
3561 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
3562 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003563 TEST(res == VK_SUCCESS);
Adam Sawickifd11d752018-08-22 15:02:10 +02003564 bufInfo.push_back(newBufInfo);
3565
3566 bufCreateInfo.size = 1024;
3567 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
3568 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003569 TEST(res == VK_SUCCESS);
Adam Sawickifd11d752018-08-22 15:02:10 +02003570 bufInfo.push_back(newBufInfo);
3571
3572 bufCreateInfo.size = 16;
3573 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
3574 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003575 TEST(res == VK_SUCCESS);
Adam Sawickifd11d752018-08-22 15:02:10 +02003576 bufInfo.push_back(newBufInfo);
3577
3578 VmaStats currStats;
3579 vmaCalculateStats(g_hAllocator, &currStats);
3580 VmaPoolStats poolStats;
3581 vmaGetPoolStats(g_hAllocator, pool, &poolStats);
3582
3583 char* statsStr = nullptr;
3584 vmaBuildStatsString(g_hAllocator, &statsStr, VK_TRUE);
3585
3586 // PUT BREAKPOINT HERE TO CHECK.
3587 // Inspect: currStats versus origStats, poolStats, statsStr.
3588 int I = 0;
3589
3590 vmaFreeStatsString(g_hAllocator, statsStr);
3591
3592 // Destroy the buffers in reverse order.
3593 while(!bufInfo.empty())
3594 {
3595 const BufferInfo& currBufInfo = bufInfo.back();
3596 vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
3597 bufInfo.pop_back();
3598 }
3599 }
3600
3601 vmaDestroyPool(g_hAllocator, pool);
3602}
3603
Adam Sawicki80927152018-09-07 17:27:23 +02003604static void BenchmarkAlgorithmsCase(FILE* file,
3605 uint32_t algorithm,
Adam Sawicki0a3fb6c2018-08-27 14:40:27 +02003606 bool empty,
3607 VmaAllocationCreateFlags allocStrategy,
3608 FREE_ORDER freeOrder)
Adam Sawicki0a607132018-08-24 11:18:41 +02003609{
3610 RandomNumberGenerator rand{16223};
3611
3612 const VkDeviceSize bufSizeMin = 32;
3613 const VkDeviceSize bufSizeMax = 1024;
3614 const size_t maxBufCapacity = 10000;
3615 const uint32_t iterationCount = 10;
3616
3617 VkBufferCreateInfo sampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
3618 sampleBufCreateInfo.size = bufSizeMax;
3619 sampleBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
3620
3621 VmaAllocationCreateInfo sampleAllocCreateInfo = {};
3622 sampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
3623
3624 VmaPoolCreateInfo poolCreateInfo = {};
3625 VkResult res = vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &sampleBufCreateInfo, &sampleAllocCreateInfo, &poolCreateInfo.memoryTypeIndex);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003626 TEST(res == VK_SUCCESS);
Adam Sawicki0a607132018-08-24 11:18:41 +02003627
3628 poolCreateInfo.blockSize = bufSizeMax * maxBufCapacity;
Adam Sawicki80927152018-09-07 17:27:23 +02003629 poolCreateInfo.flags |= algorithm;
Adam Sawicki0a607132018-08-24 11:18:41 +02003630 poolCreateInfo.minBlockCount = poolCreateInfo.maxBlockCount = 1;
3631
3632 VmaPool pool = nullptr;
3633 res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003634 TEST(res == VK_SUCCESS);
Adam Sawicki0a607132018-08-24 11:18:41 +02003635
3636 // Buffer created just to get memory requirements. Never bound to any memory.
3637 VkBuffer dummyBuffer = VK_NULL_HANDLE;
Adam Sawicki1f84f622019-07-02 13:40:01 +02003638 res = vkCreateBuffer(g_hDevice, &sampleBufCreateInfo, g_Allocs, &dummyBuffer);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003639 TEST(res == VK_SUCCESS && dummyBuffer);
Adam Sawicki0a607132018-08-24 11:18:41 +02003640
3641 VkMemoryRequirements memReq = {};
3642 vkGetBufferMemoryRequirements(g_hDevice, dummyBuffer, &memReq);
3643
Adam Sawicki1f84f622019-07-02 13:40:01 +02003644 vkDestroyBuffer(g_hDevice, dummyBuffer, g_Allocs);
Adam Sawicki0a607132018-08-24 11:18:41 +02003645
3646 VmaAllocationCreateInfo allocCreateInfo = {};
3647 allocCreateInfo.pool = pool;
Adam Sawicki0a3fb6c2018-08-27 14:40:27 +02003648 allocCreateInfo.flags = allocStrategy;
Adam Sawicki0a607132018-08-24 11:18:41 +02003649
3650 VmaAllocation alloc;
3651 std::vector<VmaAllocation> baseAllocations;
3652
3653 if(!empty)
3654 {
Adam Sawicki1f7f8af2018-10-03 17:37:55 +02003655 // Make allocations up to 1/3 of pool size.
Adam Sawicki0a607132018-08-24 11:18:41 +02003656 VkDeviceSize totalSize = 0;
Adam Sawicki1f7f8af2018-10-03 17:37:55 +02003657 while(totalSize < poolCreateInfo.blockSize / 3)
Adam Sawicki0a607132018-08-24 11:18:41 +02003658 {
Adam Sawicki4d844e22019-01-24 16:21:05 +01003659 // This test intentionally allows sizes that are aligned to 4 or 16 bytes.
3660 // This is theoretically allowed and already uncovered one bug.
Adam Sawicki0a607132018-08-24 11:18:41 +02003661 memReq.size = bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin);
3662 res = vmaAllocateMemory(g_hAllocator, &memReq, &allocCreateInfo, &alloc, nullptr);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003663 TEST(res == VK_SUCCESS);
Adam Sawicki0a607132018-08-24 11:18:41 +02003664 baseAllocations.push_back(alloc);
3665 totalSize += memReq.size;
3666 }
3667
3668 // Delete half of them, choose randomly.
3669 size_t allocsToDelete = baseAllocations.size() / 2;
3670 for(size_t i = 0; i < allocsToDelete; ++i)
3671 {
3672 const size_t index = (size_t)rand.Generate() % baseAllocations.size();
3673 vmaFreeMemory(g_hAllocator, baseAllocations[index]);
3674 baseAllocations.erase(baseAllocations.begin() + index);
3675 }
3676 }
3677
3678 // BENCHMARK
Adam Sawicki1f7f8af2018-10-03 17:37:55 +02003679 const size_t allocCount = maxBufCapacity / 3;
Adam Sawicki0a607132018-08-24 11:18:41 +02003680 std::vector<VmaAllocation> testAllocations;
3681 testAllocations.reserve(allocCount);
3682 duration allocTotalDuration = duration::zero();
3683 duration freeTotalDuration = duration::zero();
3684 for(uint32_t iterationIndex = 0; iterationIndex < iterationCount; ++iterationIndex)
3685 {
3686 // Allocations
3687 time_point allocTimeBeg = std::chrono::high_resolution_clock::now();
3688 for(size_t i = 0; i < allocCount; ++i)
3689 {
3690 memReq.size = bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin);
3691 res = vmaAllocateMemory(g_hAllocator, &memReq, &allocCreateInfo, &alloc, nullptr);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003692 TEST(res == VK_SUCCESS);
Adam Sawicki0a607132018-08-24 11:18:41 +02003693 testAllocations.push_back(alloc);
3694 }
3695 allocTotalDuration += std::chrono::high_resolution_clock::now() - allocTimeBeg;
3696
3697 // Deallocations
3698 switch(freeOrder)
3699 {
3700 case FREE_ORDER::FORWARD:
3701 // Leave testAllocations unchanged.
3702 break;
3703 case FREE_ORDER::BACKWARD:
3704 std::reverse(testAllocations.begin(), testAllocations.end());
3705 break;
3706 case FREE_ORDER::RANDOM:
3707 std::shuffle(testAllocations.begin(), testAllocations.end(), MyUniformRandomNumberGenerator(rand));
3708 break;
3709 default: assert(0);
3710 }
3711
3712 time_point freeTimeBeg = std::chrono::high_resolution_clock::now();
3713 for(size_t i = 0; i < allocCount; ++i)
3714 vmaFreeMemory(g_hAllocator, testAllocations[i]);
3715 freeTotalDuration += std::chrono::high_resolution_clock::now() - freeTimeBeg;
3716
3717 testAllocations.clear();
3718 }
3719
3720 // Delete baseAllocations
3721 while(!baseAllocations.empty())
3722 {
3723 vmaFreeMemory(g_hAllocator, baseAllocations.back());
3724 baseAllocations.pop_back();
3725 }
3726
3727 vmaDestroyPool(g_hAllocator, pool);
3728
Adam Sawicki33d2ce72018-08-27 13:59:13 +02003729 const float allocTotalSeconds = ToFloatSeconds(allocTotalDuration);
3730 const float freeTotalSeconds = ToFloatSeconds(freeTotalDuration);
3731
Adam Sawicki80927152018-09-07 17:27:23 +02003732 printf(" Algorithm=%s %s Allocation=%s FreeOrder=%s: allocations %g s, free %g s\n",
3733 AlgorithmToStr(algorithm),
Adam Sawicki0667e332018-08-24 17:26:44 +02003734 empty ? "Empty" : "Not empty",
Adam Sawicki0a3fb6c2018-08-27 14:40:27 +02003735 GetAllocationStrategyName(allocStrategy),
Adam Sawicki0a607132018-08-24 11:18:41 +02003736 FREE_ORDER_NAMES[(size_t)freeOrder],
Adam Sawicki33d2ce72018-08-27 13:59:13 +02003737 allocTotalSeconds,
3738 freeTotalSeconds);
3739
3740 if(file)
3741 {
3742 std::string currTime;
3743 CurrentTimeToStr(currTime);
3744
Adam Sawicki80927152018-09-07 17:27:23 +02003745 fprintf(file, "%s,%s,%s,%u,%s,%s,%g,%g\n",
Adam Sawicki33d2ce72018-08-27 13:59:13 +02003746 CODE_DESCRIPTION, currTime.c_str(),
Adam Sawicki80927152018-09-07 17:27:23 +02003747 AlgorithmToStr(algorithm),
Adam Sawicki33d2ce72018-08-27 13:59:13 +02003748 empty ? 1 : 0,
Adam Sawicki0a3fb6c2018-08-27 14:40:27 +02003749 GetAllocationStrategyName(allocStrategy),
Adam Sawicki33d2ce72018-08-27 13:59:13 +02003750 FREE_ORDER_NAMES[(uint32_t)freeOrder],
3751 allocTotalSeconds,
3752 freeTotalSeconds);
3753 }
Adam Sawicki0a607132018-08-24 11:18:41 +02003754}
3755
Adam Sawicki80927152018-09-07 17:27:23 +02003756static void BenchmarkAlgorithms(FILE* file)
Adam Sawicki0a607132018-08-24 11:18:41 +02003757{
Adam Sawicki80927152018-09-07 17:27:23 +02003758 wprintf(L"Benchmark algorithms\n");
Adam Sawicki0a607132018-08-24 11:18:41 +02003759
Adam Sawicki33d2ce72018-08-27 13:59:13 +02003760 if(file)
3761 {
3762 fprintf(file,
3763 "Code,Time,"
Adam Sawicki80927152018-09-07 17:27:23 +02003764 "Algorithm,Empty,Allocation strategy,Free order,"
Adam Sawicki33d2ce72018-08-27 13:59:13 +02003765 "Allocation time (s),Deallocation time (s)\n");
3766 }
3767
Adam Sawicki0a607132018-08-24 11:18:41 +02003768 uint32_t freeOrderCount = 1;
3769 if(ConfigType >= CONFIG_TYPE::CONFIG_TYPE_LARGE)
3770 freeOrderCount = 3;
3771 else if(ConfigType >= CONFIG_TYPE::CONFIG_TYPE_SMALL)
3772 freeOrderCount = 2;
3773
3774 const uint32_t emptyCount = ConfigType >= CONFIG_TYPE::CONFIG_TYPE_SMALL ? 2 : 1;
Adam Sawicki0a3fb6c2018-08-27 14:40:27 +02003775 const uint32_t allocStrategyCount = GetAllocationStrategyCount();
Adam Sawicki0a607132018-08-24 11:18:41 +02003776
3777 for(uint32_t freeOrderIndex = 0; freeOrderIndex < freeOrderCount; ++freeOrderIndex)
3778 {
3779 FREE_ORDER freeOrder = FREE_ORDER::COUNT;
3780 switch(freeOrderIndex)
3781 {
3782 case 0: freeOrder = FREE_ORDER::BACKWARD; break;
3783 case 1: freeOrder = FREE_ORDER::FORWARD; break;
3784 case 2: freeOrder = FREE_ORDER::RANDOM; break;
3785 default: assert(0);
3786 }
3787
3788 for(uint32_t emptyIndex = 0; emptyIndex < emptyCount; ++emptyIndex)
3789 {
Adam Sawicki80927152018-09-07 17:27:23 +02003790 for(uint32_t algorithmIndex = 0; algorithmIndex < 3; ++algorithmIndex)
Adam Sawicki0a607132018-08-24 11:18:41 +02003791 {
Adam Sawicki80927152018-09-07 17:27:23 +02003792 uint32_t algorithm = 0;
3793 switch(algorithmIndex)
3794 {
3795 case 0:
3796 break;
3797 case 1:
3798 algorithm = VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT;
3799 break;
3800 case 2:
3801 algorithm = VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT;
3802 break;
3803 default:
3804 assert(0);
3805 }
Adam Sawicki0a3fb6c2018-08-27 14:40:27 +02003806
Adam Sawicki80927152018-09-07 17:27:23 +02003807 uint32_t currAllocStrategyCount = algorithm != 0 ? 1 : allocStrategyCount;
Adam Sawicki0a3fb6c2018-08-27 14:40:27 +02003808 for(uint32_t allocStrategyIndex = 0; allocStrategyIndex < currAllocStrategyCount; ++allocStrategyIndex)
3809 {
3810 VmaAllocatorCreateFlags strategy = 0;
Adam Sawicki80927152018-09-07 17:27:23 +02003811 if(currAllocStrategyCount > 1)
Adam Sawicki0a3fb6c2018-08-27 14:40:27 +02003812 {
3813 switch(allocStrategyIndex)
3814 {
3815 case 0: strategy = VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT; break;
3816 case 1: strategy = VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT; break;
3817 case 2: strategy = VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT; break;
3818 default: assert(0);
3819 }
3820 }
3821
Adam Sawicki80927152018-09-07 17:27:23 +02003822 BenchmarkAlgorithmsCase(
Adam Sawicki0a3fb6c2018-08-27 14:40:27 +02003823 file,
Adam Sawicki80927152018-09-07 17:27:23 +02003824 algorithm,
Adam Sawicki1f7f8af2018-10-03 17:37:55 +02003825 (emptyIndex == 0), // empty
Adam Sawicki0a3fb6c2018-08-27 14:40:27 +02003826 strategy,
3827 freeOrder); // freeOrder
3828 }
Adam Sawicki0a607132018-08-24 11:18:41 +02003829 }
3830 }
3831 }
3832}
3833
Adam Sawickib8333fb2018-03-13 16:15:53 +01003834static void TestPool_SameSize()
3835{
3836 const VkDeviceSize BUF_SIZE = 1024 * 1024;
3837 const size_t BUF_COUNT = 100;
3838 VkResult res;
3839
3840 RandomNumberGenerator rand{123};
3841
3842 VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
3843 bufferInfo.size = BUF_SIZE;
3844 bufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
3845
3846 uint32_t memoryTypeBits = UINT32_MAX;
3847 {
3848 VkBuffer dummyBuffer;
Adam Sawicki1f84f622019-07-02 13:40:01 +02003849 res = vkCreateBuffer(g_hDevice, &bufferInfo, g_Allocs, &dummyBuffer);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003850 TEST(res == VK_SUCCESS);
Adam Sawickib8333fb2018-03-13 16:15:53 +01003851
3852 VkMemoryRequirements memReq;
3853 vkGetBufferMemoryRequirements(g_hDevice, dummyBuffer, &memReq);
3854 memoryTypeBits = memReq.memoryTypeBits;
3855
Adam Sawicki1f84f622019-07-02 13:40:01 +02003856 vkDestroyBuffer(g_hDevice, dummyBuffer, g_Allocs);
Adam Sawickib8333fb2018-03-13 16:15:53 +01003857 }
3858
3859 VmaAllocationCreateInfo poolAllocInfo = {};
3860 poolAllocInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
3861 uint32_t memTypeIndex;
3862 res = vmaFindMemoryTypeIndex(
3863 g_hAllocator,
3864 memoryTypeBits,
3865 &poolAllocInfo,
3866 &memTypeIndex);
3867
3868 VmaPoolCreateInfo poolCreateInfo = {};
3869 poolCreateInfo.memoryTypeIndex = memTypeIndex;
3870 poolCreateInfo.blockSize = BUF_SIZE * BUF_COUNT / 4;
3871 poolCreateInfo.minBlockCount = 1;
3872 poolCreateInfo.maxBlockCount = 4;
3873 poolCreateInfo.frameInUseCount = 0;
3874
3875 VmaPool pool;
3876 res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003877 TEST(res == VK_SUCCESS);
Adam Sawickib8333fb2018-03-13 16:15:53 +01003878
Adam Sawickia020fb82019-11-02 14:43:06 +01003879 // Test pool name
3880 {
3881 static const char* const POOL_NAME = "Pool name";
3882 vmaSetPoolName(g_hAllocator, pool, POOL_NAME);
3883
3884 const char* fetchedPoolName = nullptr;
3885 vmaGetPoolName(g_hAllocator, pool, &fetchedPoolName);
3886 TEST(strcmp(fetchedPoolName, POOL_NAME) == 0);
3887
Adam Sawickia020fb82019-11-02 14:43:06 +01003888 vmaSetPoolName(g_hAllocator, pool, nullptr);
3889 }
3890
Adam Sawickib8333fb2018-03-13 16:15:53 +01003891 vmaSetCurrentFrameIndex(g_hAllocator, 1);
3892
3893 VmaAllocationCreateInfo allocInfo = {};
3894 allocInfo.pool = pool;
3895 allocInfo.flags = VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT |
3896 VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT;
3897
3898 struct BufItem
3899 {
3900 VkBuffer Buf;
3901 VmaAllocation Alloc;
3902 };
3903 std::vector<BufItem> items;
3904
3905 // Fill entire pool.
3906 for(size_t i = 0; i < BUF_COUNT; ++i)
3907 {
3908 BufItem item;
3909 res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003910 TEST(res == VK_SUCCESS);
Adam Sawickib8333fb2018-03-13 16:15:53 +01003911 items.push_back(item);
3912 }
3913
3914 // Make sure that another allocation would fail.
3915 {
3916 BufItem item;
3917 res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003918 TEST(res == VK_ERROR_OUT_OF_DEVICE_MEMORY);
Adam Sawickib8333fb2018-03-13 16:15:53 +01003919 }
3920
3921 // Validate that no buffer is lost. Also check that they are not mapped.
3922 for(size_t i = 0; i < items.size(); ++i)
3923 {
3924 VmaAllocationInfo allocInfo;
3925 vmaGetAllocationInfo(g_hAllocator, items[i].Alloc, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003926 TEST(allocInfo.deviceMemory != VK_NULL_HANDLE);
3927 TEST(allocInfo.pMappedData == nullptr);
Adam Sawickib8333fb2018-03-13 16:15:53 +01003928 }
3929
3930 // Free some percent of random items.
3931 {
3932 const size_t PERCENT_TO_FREE = 10;
3933 size_t itemsToFree = items.size() * PERCENT_TO_FREE / 100;
3934 for(size_t i = 0; i < itemsToFree; ++i)
3935 {
3936 size_t index = (size_t)rand.Generate() % items.size();
3937 vmaDestroyBuffer(g_hAllocator, items[index].Buf, items[index].Alloc);
3938 items.erase(items.begin() + index);
3939 }
3940 }
3941
3942 // Randomly allocate and free items.
3943 {
3944 const size_t OPERATION_COUNT = BUF_COUNT;
3945 for(size_t i = 0; i < OPERATION_COUNT; ++i)
3946 {
3947 bool allocate = rand.Generate() % 2 != 0;
3948 if(allocate)
3949 {
3950 if(items.size() < BUF_COUNT)
3951 {
3952 BufItem item;
3953 res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003954 TEST(res == VK_SUCCESS);
Adam Sawickib8333fb2018-03-13 16:15:53 +01003955 items.push_back(item);
3956 }
3957 }
3958 else // Free
3959 {
3960 if(!items.empty())
3961 {
3962 size_t index = (size_t)rand.Generate() % items.size();
3963 vmaDestroyBuffer(g_hAllocator, items[index].Buf, items[index].Alloc);
3964 items.erase(items.begin() + index);
3965 }
3966 }
3967 }
3968 }
3969
3970 // Allocate up to maximum.
3971 while(items.size() < BUF_COUNT)
3972 {
3973 BufItem item;
3974 res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003975 TEST(res == VK_SUCCESS);
Adam Sawickib8333fb2018-03-13 16:15:53 +01003976 items.push_back(item);
3977 }
3978
3979 // Validate that no buffer is lost.
3980 for(size_t i = 0; i < items.size(); ++i)
3981 {
3982 VmaAllocationInfo allocInfo;
3983 vmaGetAllocationInfo(g_hAllocator, items[i].Alloc, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003984 TEST(allocInfo.deviceMemory != VK_NULL_HANDLE);
Adam Sawickib8333fb2018-03-13 16:15:53 +01003985 }
3986
3987 // Next frame.
3988 vmaSetCurrentFrameIndex(g_hAllocator, 2);
3989
3990 // Allocate another BUF_COUNT buffers.
3991 for(size_t i = 0; i < BUF_COUNT; ++i)
3992 {
3993 BufItem item;
3994 res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr);
Adam Sawickib8d34d52018-10-03 17:41:20 +02003995 TEST(res == VK_SUCCESS);
Adam Sawickib8333fb2018-03-13 16:15:53 +01003996 items.push_back(item);
3997 }
3998
3999 // Make sure the first BUF_COUNT is lost. Delete them.
4000 for(size_t i = 0; i < BUF_COUNT; ++i)
4001 {
4002 VmaAllocationInfo allocInfo;
4003 vmaGetAllocationInfo(g_hAllocator, items[i].Alloc, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02004004 TEST(allocInfo.deviceMemory == VK_NULL_HANDLE);
Adam Sawickib8333fb2018-03-13 16:15:53 +01004005 vmaDestroyBuffer(g_hAllocator, items[i].Buf, items[i].Alloc);
4006 }
4007 items.erase(items.begin(), items.begin() + BUF_COUNT);
4008
4009 // Validate that no buffer is lost.
4010 for(size_t i = 0; i < items.size(); ++i)
4011 {
4012 VmaAllocationInfo allocInfo;
4013 vmaGetAllocationInfo(g_hAllocator, items[i].Alloc, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02004014 TEST(allocInfo.deviceMemory != VK_NULL_HANDLE);
Adam Sawickib8333fb2018-03-13 16:15:53 +01004015 }
4016
4017 // Free one item.
4018 vmaDestroyBuffer(g_hAllocator, items.back().Buf, items.back().Alloc);
4019 items.pop_back();
4020
4021 // Validate statistics.
4022 {
4023 VmaPoolStats poolStats = {};
4024 vmaGetPoolStats(g_hAllocator, pool, &poolStats);
Adam Sawickib8d34d52018-10-03 17:41:20 +02004025 TEST(poolStats.allocationCount == items.size());
4026 TEST(poolStats.size = BUF_COUNT * BUF_SIZE);
4027 TEST(poolStats.unusedRangeCount == 1);
4028 TEST(poolStats.unusedRangeSizeMax == BUF_SIZE);
4029 TEST(poolStats.unusedSize == BUF_SIZE);
Adam Sawickib8333fb2018-03-13 16:15:53 +01004030 }
4031
4032 // Free all remaining items.
4033 for(size_t i = items.size(); i--; )
4034 vmaDestroyBuffer(g_hAllocator, items[i].Buf, items[i].Alloc);
4035 items.clear();
4036
4037 // Allocate maximum items again.
4038 for(size_t i = 0; i < BUF_COUNT; ++i)
4039 {
4040 BufItem item;
4041 res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr);
Adam Sawickib8d34d52018-10-03 17:41:20 +02004042 TEST(res == VK_SUCCESS);
Adam Sawickib8333fb2018-03-13 16:15:53 +01004043 items.push_back(item);
4044 }
4045
4046 // Delete every other item.
4047 for(size_t i = 0; i < BUF_COUNT / 2; ++i)
4048 {
4049 vmaDestroyBuffer(g_hAllocator, items[i].Buf, items[i].Alloc);
4050 items.erase(items.begin() + i);
4051 }
4052
4053 // Defragment!
4054 {
4055 std::vector<VmaAllocation> allocationsToDefragment(items.size());
4056 for(size_t i = 0; i < items.size(); ++i)
4057 allocationsToDefragment[i] = items[i].Alloc;
4058
4059 VmaDefragmentationStats defragmentationStats;
4060 res = vmaDefragment(g_hAllocator, allocationsToDefragment.data(), items.size(), nullptr, nullptr, &defragmentationStats);
Adam Sawickib8d34d52018-10-03 17:41:20 +02004061 TEST(res == VK_SUCCESS);
4062 TEST(defragmentationStats.deviceMemoryBlocksFreed == 2);
Adam Sawickib8333fb2018-03-13 16:15:53 +01004063 }
4064
4065 // Free all remaining items.
4066 for(size_t i = items.size(); i--; )
4067 vmaDestroyBuffer(g_hAllocator, items[i].Buf, items[i].Alloc);
4068 items.clear();
4069
4070 ////////////////////////////////////////////////////////////////////////////////
4071 // Test for vmaMakePoolAllocationsLost
4072
4073 // Allocate 4 buffers on frame 10.
4074 vmaSetCurrentFrameIndex(g_hAllocator, 10);
4075 for(size_t i = 0; i < 4; ++i)
4076 {
4077 BufItem item;
4078 res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr);
Adam Sawickib8d34d52018-10-03 17:41:20 +02004079 TEST(res == VK_SUCCESS);
Adam Sawickib8333fb2018-03-13 16:15:53 +01004080 items.push_back(item);
4081 }
4082
4083 // Touch first 2 of them on frame 11.
4084 vmaSetCurrentFrameIndex(g_hAllocator, 11);
4085 for(size_t i = 0; i < 2; ++i)
4086 {
4087 VmaAllocationInfo allocInfo;
4088 vmaGetAllocationInfo(g_hAllocator, items[i].Alloc, &allocInfo);
4089 }
4090
4091 // vmaMakePoolAllocationsLost. Only remaining 2 should be lost.
4092 size_t lostCount = 0xDEADC0DE;
4093 vmaMakePoolAllocationsLost(g_hAllocator, pool, &lostCount);
Adam Sawickib8d34d52018-10-03 17:41:20 +02004094 TEST(lostCount == 2);
Adam Sawickib8333fb2018-03-13 16:15:53 +01004095
4096 // Make another call. Now 0 should be lost.
4097 vmaMakePoolAllocationsLost(g_hAllocator, pool, &lostCount);
Adam Sawickib8d34d52018-10-03 17:41:20 +02004098 TEST(lostCount == 0);
Adam Sawickib8333fb2018-03-13 16:15:53 +01004099
4100 // Make another call, with null count. Should not crash.
4101 vmaMakePoolAllocationsLost(g_hAllocator, pool, nullptr);
4102
4103 // END: Free all remaining items.
4104 for(size_t i = items.size(); i--; )
4105 vmaDestroyBuffer(g_hAllocator, items[i].Buf, items[i].Alloc);
4106
4107 items.clear();
4108
Adam Sawickid2924172018-06-11 12:48:46 +02004109 ////////////////////////////////////////////////////////////////////////////////
4110 // Test for allocation too large for pool
4111
4112 {
4113 VmaAllocationCreateInfo allocCreateInfo = {};
4114 allocCreateInfo.pool = pool;
4115
4116 VkMemoryRequirements memReq;
4117 memReq.memoryTypeBits = UINT32_MAX;
4118 memReq.alignment = 1;
4119 memReq.size = poolCreateInfo.blockSize + 4;
4120
4121 VmaAllocation alloc = nullptr;
4122 res = vmaAllocateMemory(g_hAllocator, &memReq, &allocCreateInfo, &alloc, nullptr);
Adam Sawickib8d34d52018-10-03 17:41:20 +02004123 TEST(res == VK_ERROR_OUT_OF_DEVICE_MEMORY && alloc == nullptr);
Adam Sawickid2924172018-06-11 12:48:46 +02004124 }
4125
Adam Sawickib8333fb2018-03-13 16:15:53 +01004126 vmaDestroyPool(g_hAllocator, pool);
4127}
4128
Adam Sawickie44c6262018-06-15 14:30:39 +02004129static bool ValidatePattern(const void* pMemory, size_t size, uint8_t pattern)
4130{
4131 const uint8_t* pBytes = (const uint8_t*)pMemory;
4132 for(size_t i = 0; i < size; ++i)
4133 {
4134 if(pBytes[i] != pattern)
4135 {
4136 return false;
4137 }
4138 }
4139 return true;
4140}
4141
4142static void TestAllocationsInitialization()
4143{
4144 VkResult res;
4145
4146 const size_t BUF_SIZE = 1024;
4147
4148 // Create pool.
4149
4150 VkBufferCreateInfo bufInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
4151 bufInfo.size = BUF_SIZE;
4152 bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
4153
4154 VmaAllocationCreateInfo dummyBufAllocCreateInfo = {};
4155 dummyBufAllocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
4156
4157 VmaPoolCreateInfo poolCreateInfo = {};
4158 poolCreateInfo.blockSize = BUF_SIZE * 10;
4159 poolCreateInfo.minBlockCount = 1; // To keep memory alive while pool exists.
4160 poolCreateInfo.maxBlockCount = 1;
4161 res = vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &bufInfo, &dummyBufAllocCreateInfo, &poolCreateInfo.memoryTypeIndex);
Adam Sawickib8d34d52018-10-03 17:41:20 +02004162 TEST(res == VK_SUCCESS);
Adam Sawickie44c6262018-06-15 14:30:39 +02004163
4164 VmaAllocationCreateInfo bufAllocCreateInfo = {};
4165 res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &bufAllocCreateInfo.pool);
Adam Sawickib8d34d52018-10-03 17:41:20 +02004166 TEST(res == VK_SUCCESS);
Adam Sawickie44c6262018-06-15 14:30:39 +02004167
4168 // Create one persistently mapped buffer to keep memory of this block mapped,
4169 // so that pointer to mapped data will remain (more or less...) valid even
4170 // after destruction of other allocations.
4171
4172 bufAllocCreateInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT;
4173 VkBuffer firstBuf;
4174 VmaAllocation firstAlloc;
4175 res = vmaCreateBuffer(g_hAllocator, &bufInfo, &bufAllocCreateInfo, &firstBuf, &firstAlloc, nullptr);
Adam Sawickib8d34d52018-10-03 17:41:20 +02004176 TEST(res == VK_SUCCESS);
Adam Sawickie44c6262018-06-15 14:30:39 +02004177
4178 // Test buffers.
4179
4180 for(uint32_t i = 0; i < 2; ++i)
4181 {
4182 const bool persistentlyMapped = i == 0;
4183 bufAllocCreateInfo.flags = persistentlyMapped ? VMA_ALLOCATION_CREATE_MAPPED_BIT : 0;
4184 VkBuffer buf;
4185 VmaAllocation alloc;
4186 VmaAllocationInfo allocInfo;
4187 res = vmaCreateBuffer(g_hAllocator, &bufInfo, &bufAllocCreateInfo, &buf, &alloc, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02004188 TEST(res == VK_SUCCESS);
Adam Sawickie44c6262018-06-15 14:30:39 +02004189
4190 void* pMappedData;
4191 if(!persistentlyMapped)
4192 {
4193 res = vmaMapMemory(g_hAllocator, alloc, &pMappedData);
Adam Sawickib8d34d52018-10-03 17:41:20 +02004194 TEST(res == VK_SUCCESS);
Adam Sawickie44c6262018-06-15 14:30:39 +02004195 }
4196 else
4197 {
4198 pMappedData = allocInfo.pMappedData;
4199 }
4200
4201 // Validate initialized content
4202 bool valid = ValidatePattern(pMappedData, BUF_SIZE, 0xDC);
Adam Sawickib8d34d52018-10-03 17:41:20 +02004203 TEST(valid);
Adam Sawickie44c6262018-06-15 14:30:39 +02004204
4205 if(!persistentlyMapped)
4206 {
4207 vmaUnmapMemory(g_hAllocator, alloc);
4208 }
4209
4210 vmaDestroyBuffer(g_hAllocator, buf, alloc);
4211
4212 // Validate freed content
4213 valid = ValidatePattern(pMappedData, BUF_SIZE, 0xEF);
Adam Sawickib8d34d52018-10-03 17:41:20 +02004214 TEST(valid);
Adam Sawickie44c6262018-06-15 14:30:39 +02004215 }
4216
4217 vmaDestroyBuffer(g_hAllocator, firstBuf, firstAlloc);
4218 vmaDestroyPool(g_hAllocator, bufAllocCreateInfo.pool);
4219}
4220
Adam Sawickib8333fb2018-03-13 16:15:53 +01004221static void TestPool_Benchmark(
4222 PoolTestResult& outResult,
4223 const PoolTestConfig& config)
4224{
Adam Sawickib8d34d52018-10-03 17:41:20 +02004225 TEST(config.ThreadCount > 0);
Adam Sawickib8333fb2018-03-13 16:15:53 +01004226
4227 RandomNumberGenerator mainRand{config.RandSeed};
4228
4229 uint32_t allocationSizeProbabilitySum = std::accumulate(
4230 config.AllocationSizes.begin(),
4231 config.AllocationSizes.end(),
4232 0u,
4233 [](uint32_t sum, const AllocationSize& allocSize) {
4234 return sum + allocSize.Probability;
4235 });
4236
4237 VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
4238 bufferInfo.size = 256; // Whatever.
4239 bufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
4240
4241 VkImageCreateInfo imageInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
4242 imageInfo.imageType = VK_IMAGE_TYPE_2D;
4243 imageInfo.extent.width = 256; // Whatever.
4244 imageInfo.extent.height = 256; // Whatever.
4245 imageInfo.extent.depth = 1;
4246 imageInfo.mipLevels = 1;
4247 imageInfo.arrayLayers = 1;
4248 imageInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
4249 imageInfo.tiling = VK_IMAGE_TILING_OPTIMAL; // LINEAR if CPU memory.
4250 imageInfo.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED;
4251 imageInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; // TRANSFER_SRC if CPU memory.
4252 imageInfo.samples = VK_SAMPLE_COUNT_1_BIT;
4253
4254 uint32_t bufferMemoryTypeBits = UINT32_MAX;
4255 {
4256 VkBuffer dummyBuffer;
Adam Sawicki1f84f622019-07-02 13:40:01 +02004257 VkResult res = vkCreateBuffer(g_hDevice, &bufferInfo, g_Allocs, &dummyBuffer);
Adam Sawickib8d34d52018-10-03 17:41:20 +02004258 TEST(res == VK_SUCCESS);
Adam Sawickib8333fb2018-03-13 16:15:53 +01004259
4260 VkMemoryRequirements memReq;
4261 vkGetBufferMemoryRequirements(g_hDevice, dummyBuffer, &memReq);
4262 bufferMemoryTypeBits = memReq.memoryTypeBits;
4263
Adam Sawicki1f84f622019-07-02 13:40:01 +02004264 vkDestroyBuffer(g_hDevice, dummyBuffer, g_Allocs);
Adam Sawickib8333fb2018-03-13 16:15:53 +01004265 }
4266
4267 uint32_t imageMemoryTypeBits = UINT32_MAX;
4268 {
4269 VkImage dummyImage;
Adam Sawicki1f84f622019-07-02 13:40:01 +02004270 VkResult res = vkCreateImage(g_hDevice, &imageInfo, g_Allocs, &dummyImage);
Adam Sawickib8d34d52018-10-03 17:41:20 +02004271 TEST(res == VK_SUCCESS);
Adam Sawickib8333fb2018-03-13 16:15:53 +01004272
4273 VkMemoryRequirements memReq;
4274 vkGetImageMemoryRequirements(g_hDevice, dummyImage, &memReq);
4275 imageMemoryTypeBits = memReq.memoryTypeBits;
4276
Adam Sawicki1f84f622019-07-02 13:40:01 +02004277 vkDestroyImage(g_hDevice, dummyImage, g_Allocs);
Adam Sawickib8333fb2018-03-13 16:15:53 +01004278 }
4279
4280 uint32_t memoryTypeBits = 0;
4281 if(config.UsesBuffers() && config.UsesImages())
4282 {
4283 memoryTypeBits = bufferMemoryTypeBits & imageMemoryTypeBits;
4284 if(memoryTypeBits == 0)
4285 {
4286 PrintWarning(L"Cannot test buffers + images in the same memory pool on this GPU.");
4287 return;
4288 }
4289 }
4290 else if(config.UsesBuffers())
4291 memoryTypeBits = bufferMemoryTypeBits;
4292 else if(config.UsesImages())
4293 memoryTypeBits = imageMemoryTypeBits;
4294 else
Adam Sawickib8d34d52018-10-03 17:41:20 +02004295 TEST(0);
Adam Sawickib8333fb2018-03-13 16:15:53 +01004296
4297 VmaPoolCreateInfo poolCreateInfo = {};
4298 poolCreateInfo.memoryTypeIndex = 0;
4299 poolCreateInfo.minBlockCount = 1;
4300 poolCreateInfo.maxBlockCount = 1;
4301 poolCreateInfo.blockSize = config.PoolSize;
4302 poolCreateInfo.frameInUseCount = 1;
4303
4304 VmaAllocationCreateInfo dummyAllocCreateInfo = {};
4305 dummyAllocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
4306 vmaFindMemoryTypeIndex(g_hAllocator, memoryTypeBits, &dummyAllocCreateInfo, &poolCreateInfo.memoryTypeIndex);
4307
4308 VmaPool pool;
4309 VkResult res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool);
Adam Sawickib8d34d52018-10-03 17:41:20 +02004310 TEST(res == VK_SUCCESS);
Adam Sawickib8333fb2018-03-13 16:15:53 +01004311
4312 // Start time measurement - after creating pool and initializing data structures.
4313 time_point timeBeg = std::chrono::high_resolution_clock::now();
4314
4315 ////////////////////////////////////////////////////////////////////////////////
4316 // ThreadProc
4317 auto ThreadProc = [&](
4318 PoolTestThreadResult* outThreadResult,
4319 uint32_t randSeed,
4320 HANDLE frameStartEvent,
4321 HANDLE frameEndEvent) -> void
4322 {
4323 RandomNumberGenerator threadRand{randSeed};
4324
4325 outThreadResult->AllocationTimeMin = duration::max();
4326 outThreadResult->AllocationTimeSum = duration::zero();
4327 outThreadResult->AllocationTimeMax = duration::min();
4328 outThreadResult->DeallocationTimeMin = duration::max();
4329 outThreadResult->DeallocationTimeSum = duration::zero();
4330 outThreadResult->DeallocationTimeMax = duration::min();
4331 outThreadResult->AllocationCount = 0;
4332 outThreadResult->DeallocationCount = 0;
4333 outThreadResult->LostAllocationCount = 0;
4334 outThreadResult->LostAllocationTotalSize = 0;
4335 outThreadResult->FailedAllocationCount = 0;
4336 outThreadResult->FailedAllocationTotalSize = 0;
4337
4338 struct Item
4339 {
4340 VkDeviceSize BufferSize;
4341 VkExtent2D ImageSize;
4342 VkBuffer Buf;
4343 VkImage Image;
4344 VmaAllocation Alloc;
4345
4346 VkDeviceSize CalcSizeBytes() const
4347 {
4348 return BufferSize +
4349 ImageSize.width * ImageSize.height * 4;
4350 }
4351 };
4352 std::vector<Item> unusedItems, usedItems;
4353
4354 const size_t threadTotalItemCount = config.TotalItemCount / config.ThreadCount;
4355
4356 // Create all items - all unused, not yet allocated.
4357 for(size_t i = 0; i < threadTotalItemCount; ++i)
4358 {
4359 Item item = {};
4360
4361 uint32_t allocSizeIndex = 0;
4362 uint32_t r = threadRand.Generate() % allocationSizeProbabilitySum;
4363 while(r >= config.AllocationSizes[allocSizeIndex].Probability)
4364 r -= config.AllocationSizes[allocSizeIndex++].Probability;
4365
4366 const AllocationSize& allocSize = config.AllocationSizes[allocSizeIndex];
4367 if(allocSize.BufferSizeMax > 0)
4368 {
Adam Sawickib8d34d52018-10-03 17:41:20 +02004369 TEST(allocSize.BufferSizeMin > 0);
4370 TEST(allocSize.ImageSizeMin == 0 && allocSize.ImageSizeMax == 0);
Adam Sawickib8333fb2018-03-13 16:15:53 +01004371 if(allocSize.BufferSizeMax == allocSize.BufferSizeMin)
4372 item.BufferSize = allocSize.BufferSizeMin;
4373 else
4374 {
4375 item.BufferSize = allocSize.BufferSizeMin + threadRand.Generate() % (allocSize.BufferSizeMax - allocSize.BufferSizeMin);
4376 item.BufferSize = item.BufferSize / 16 * 16;
4377 }
4378 }
4379 else
4380 {
Adam Sawickib8d34d52018-10-03 17:41:20 +02004381 TEST(allocSize.ImageSizeMin > 0 && allocSize.ImageSizeMax > 0);
Adam Sawickib8333fb2018-03-13 16:15:53 +01004382 if(allocSize.ImageSizeMax == allocSize.ImageSizeMin)
4383 item.ImageSize.width = item.ImageSize.height = allocSize.ImageSizeMax;
4384 else
4385 {
4386 item.ImageSize.width = allocSize.ImageSizeMin + threadRand.Generate() % (allocSize.ImageSizeMax - allocSize.ImageSizeMin);
4387 item.ImageSize.height = allocSize.ImageSizeMin + threadRand.Generate() % (allocSize.ImageSizeMax - allocSize.ImageSizeMin);
4388 }
4389 }
4390
4391 unusedItems.push_back(item);
4392 }
4393
4394 auto Allocate = [&](Item& item) -> VkResult
4395 {
4396 VmaAllocationCreateInfo allocCreateInfo = {};
4397 allocCreateInfo.pool = pool;
4398 allocCreateInfo.flags = VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT |
4399 VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT;
4400
4401 if(item.BufferSize)
4402 {
4403 bufferInfo.size = item.BufferSize;
4404 PoolAllocationTimeRegisterObj timeRegisterObj(*outThreadResult);
4405 return vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocCreateInfo, &item.Buf, &item.Alloc, nullptr);
4406 }
4407 else
4408 {
Adam Sawickib8d34d52018-10-03 17:41:20 +02004409 TEST(item.ImageSize.width && item.ImageSize.height);
Adam Sawickib8333fb2018-03-13 16:15:53 +01004410
4411 imageInfo.extent.width = item.ImageSize.width;
4412 imageInfo.extent.height = item.ImageSize.height;
4413 PoolAllocationTimeRegisterObj timeRegisterObj(*outThreadResult);
4414 return vmaCreateImage(g_hAllocator, &imageInfo, &allocCreateInfo, &item.Image, &item.Alloc, nullptr);
4415 }
4416 };
4417
4418 ////////////////////////////////////////////////////////////////////////////////
4419 // Frames
4420 for(uint32_t frameIndex = 0; frameIndex < config.FrameCount; ++frameIndex)
4421 {
4422 WaitForSingleObject(frameStartEvent, INFINITE);
4423
4424 // Always make some percent of used bufs unused, to choose different used ones.
4425 const size_t bufsToMakeUnused = usedItems.size() * config.ItemsToMakeUnusedPercent / 100;
4426 for(size_t i = 0; i < bufsToMakeUnused; ++i)
4427 {
4428 size_t index = threadRand.Generate() % usedItems.size();
4429 unusedItems.push_back(usedItems[index]);
4430 usedItems.erase(usedItems.begin() + index);
4431 }
4432
4433 // Determine which bufs we want to use in this frame.
4434 const size_t usedBufCount = (threadRand.Generate() % (config.UsedItemCountMax - config.UsedItemCountMin) + config.UsedItemCountMin)
4435 / config.ThreadCount;
Adam Sawickib8d34d52018-10-03 17:41:20 +02004436 TEST(usedBufCount < usedItems.size() + unusedItems.size());
Adam Sawickib8333fb2018-03-13 16:15:53 +01004437 // Move some used to unused.
4438 while(usedBufCount < usedItems.size())
4439 {
4440 size_t index = threadRand.Generate() % usedItems.size();
4441 unusedItems.push_back(usedItems[index]);
4442 usedItems.erase(usedItems.begin() + index);
4443 }
4444 // Move some unused to used.
4445 while(usedBufCount > usedItems.size())
4446 {
4447 size_t index = threadRand.Generate() % unusedItems.size();
4448 usedItems.push_back(unusedItems[index]);
4449 unusedItems.erase(unusedItems.begin() + index);
4450 }
4451
4452 uint32_t touchExistingCount = 0;
4453 uint32_t touchLostCount = 0;
4454 uint32_t createSucceededCount = 0;
4455 uint32_t createFailedCount = 0;
4456
4457 // Touch all used bufs. If not created or lost, allocate.
4458 for(size_t i = 0; i < usedItems.size(); ++i)
4459 {
4460 Item& item = usedItems[i];
4461 // Not yet created.
4462 if(item.Alloc == VK_NULL_HANDLE)
4463 {
4464 res = Allocate(item);
4465 ++outThreadResult->AllocationCount;
4466 if(res != VK_SUCCESS)
4467 {
4468 item.Alloc = VK_NULL_HANDLE;
4469 item.Buf = VK_NULL_HANDLE;
4470 ++outThreadResult->FailedAllocationCount;
4471 outThreadResult->FailedAllocationTotalSize += item.CalcSizeBytes();
4472 ++createFailedCount;
4473 }
4474 else
4475 ++createSucceededCount;
4476 }
4477 else
4478 {
4479 // Touch.
4480 VmaAllocationInfo allocInfo;
4481 vmaGetAllocationInfo(g_hAllocator, item.Alloc, &allocInfo);
4482 // Lost.
4483 if(allocInfo.deviceMemory == VK_NULL_HANDLE)
4484 {
4485 ++touchLostCount;
4486
4487 // Destroy.
4488 {
4489 PoolDeallocationTimeRegisterObj timeRegisterObj(*outThreadResult);
4490 if(item.Buf)
4491 vmaDestroyBuffer(g_hAllocator, item.Buf, item.Alloc);
4492 else
4493 vmaDestroyImage(g_hAllocator, item.Image, item.Alloc);
4494 ++outThreadResult->DeallocationCount;
4495 }
4496 item.Alloc = VK_NULL_HANDLE;
4497 item.Buf = VK_NULL_HANDLE;
4498
4499 ++outThreadResult->LostAllocationCount;
4500 outThreadResult->LostAllocationTotalSize += item.CalcSizeBytes();
4501
4502 // Recreate.
4503 res = Allocate(item);
4504 ++outThreadResult->AllocationCount;
4505 // Creation failed.
4506 if(res != VK_SUCCESS)
4507 {
4508 ++outThreadResult->FailedAllocationCount;
4509 outThreadResult->FailedAllocationTotalSize += item.CalcSizeBytes();
4510 ++createFailedCount;
4511 }
4512 else
4513 ++createSucceededCount;
4514 }
4515 else
4516 ++touchExistingCount;
4517 }
4518 }
4519
4520 /*
4521 printf("Thread %u frame %u: Touch existing %u lost %u, create succeeded %u failed %u\n",
4522 randSeed, frameIndex,
4523 touchExistingCount, touchLostCount,
4524 createSucceededCount, createFailedCount);
4525 */
4526
4527 SetEvent(frameEndEvent);
4528 }
4529
4530 // Free all remaining items.
4531 for(size_t i = usedItems.size(); i--; )
4532 {
4533 PoolDeallocationTimeRegisterObj timeRegisterObj(*outThreadResult);
4534 if(usedItems[i].Buf)
4535 vmaDestroyBuffer(g_hAllocator, usedItems[i].Buf, usedItems[i].Alloc);
4536 else
4537 vmaDestroyImage(g_hAllocator, usedItems[i].Image, usedItems[i].Alloc);
4538 ++outThreadResult->DeallocationCount;
4539 }
4540 for(size_t i = unusedItems.size(); i--; )
4541 {
4542 PoolDeallocationTimeRegisterObj timeRegisterOb(*outThreadResult);
4543 if(unusedItems[i].Buf)
4544 vmaDestroyBuffer(g_hAllocator, unusedItems[i].Buf, unusedItems[i].Alloc);
4545 else
4546 vmaDestroyImage(g_hAllocator, unusedItems[i].Image, unusedItems[i].Alloc);
4547 ++outThreadResult->DeallocationCount;
4548 }
4549 };
4550
4551 // Launch threads.
4552 uint32_t threadRandSeed = mainRand.Generate();
4553 std::vector<HANDLE> frameStartEvents{config.ThreadCount};
4554 std::vector<HANDLE> frameEndEvents{config.ThreadCount};
4555 std::vector<std::thread> bkgThreads;
4556 std::vector<PoolTestThreadResult> threadResults{config.ThreadCount};
4557 for(uint32_t threadIndex = 0; threadIndex < config.ThreadCount; ++threadIndex)
4558 {
4559 frameStartEvents[threadIndex] = CreateEvent(NULL, FALSE, FALSE, NULL);
4560 frameEndEvents[threadIndex] = CreateEvent(NULL, FALSE, FALSE, NULL);
4561 bkgThreads.emplace_back(std::bind(
4562 ThreadProc,
4563 &threadResults[threadIndex],
4564 threadRandSeed + threadIndex,
4565 frameStartEvents[threadIndex],
4566 frameEndEvents[threadIndex]));
4567 }
4568
4569 // Execute frames.
Adam Sawickib8d34d52018-10-03 17:41:20 +02004570 TEST(config.ThreadCount <= MAXIMUM_WAIT_OBJECTS);
Adam Sawickib8333fb2018-03-13 16:15:53 +01004571 for(uint32_t frameIndex = 0; frameIndex < config.FrameCount; ++frameIndex)
4572 {
4573 vmaSetCurrentFrameIndex(g_hAllocator, frameIndex);
4574 for(size_t threadIndex = 0; threadIndex < config.ThreadCount; ++threadIndex)
4575 SetEvent(frameStartEvents[threadIndex]);
4576 WaitForMultipleObjects(config.ThreadCount, &frameEndEvents[0], TRUE, INFINITE);
4577 }
4578
4579 // Wait for threads finished
4580 for(size_t i = 0; i < bkgThreads.size(); ++i)
4581 {
4582 bkgThreads[i].join();
4583 CloseHandle(frameEndEvents[i]);
4584 CloseHandle(frameStartEvents[i]);
4585 }
4586 bkgThreads.clear();
4587
4588 // Finish time measurement - before destroying pool.
4589 outResult.TotalTime = std::chrono::high_resolution_clock::now() - timeBeg;
4590
4591 vmaDestroyPool(g_hAllocator, pool);
4592
4593 outResult.AllocationTimeMin = duration::max();
4594 outResult.AllocationTimeAvg = duration::zero();
4595 outResult.AllocationTimeMax = duration::min();
4596 outResult.DeallocationTimeMin = duration::max();
4597 outResult.DeallocationTimeAvg = duration::zero();
4598 outResult.DeallocationTimeMax = duration::min();
4599 outResult.LostAllocationCount = 0;
4600 outResult.LostAllocationTotalSize = 0;
4601 outResult.FailedAllocationCount = 0;
4602 outResult.FailedAllocationTotalSize = 0;
4603 size_t allocationCount = 0;
4604 size_t deallocationCount = 0;
4605 for(size_t threadIndex = 0; threadIndex < config.ThreadCount; ++threadIndex)
4606 {
4607 const PoolTestThreadResult& threadResult = threadResults[threadIndex];
4608 outResult.AllocationTimeMin = std::min(outResult.AllocationTimeMin, threadResult.AllocationTimeMin);
4609 outResult.AllocationTimeMax = std::max(outResult.AllocationTimeMax, threadResult.AllocationTimeMax);
4610 outResult.AllocationTimeAvg += threadResult.AllocationTimeSum;
4611 outResult.DeallocationTimeMin = std::min(outResult.DeallocationTimeMin, threadResult.DeallocationTimeMin);
4612 outResult.DeallocationTimeMax = std::max(outResult.DeallocationTimeMax, threadResult.DeallocationTimeMax);
4613 outResult.DeallocationTimeAvg += threadResult.DeallocationTimeSum;
4614 allocationCount += threadResult.AllocationCount;
4615 deallocationCount += threadResult.DeallocationCount;
4616 outResult.FailedAllocationCount += threadResult.FailedAllocationCount;
4617 outResult.FailedAllocationTotalSize += threadResult.FailedAllocationTotalSize;
4618 outResult.LostAllocationCount += threadResult.LostAllocationCount;
4619 outResult.LostAllocationTotalSize += threadResult.LostAllocationTotalSize;
4620 }
4621 if(allocationCount)
4622 outResult.AllocationTimeAvg /= allocationCount;
4623 if(deallocationCount)
4624 outResult.DeallocationTimeAvg /= deallocationCount;
4625}
4626
4627static inline bool MemoryRegionsOverlap(char* ptr1, size_t size1, char* ptr2, size_t size2)
4628{
4629 if(ptr1 < ptr2)
4630 return ptr1 + size1 > ptr2;
4631 else if(ptr2 < ptr1)
4632 return ptr2 + size2 > ptr1;
4633 else
4634 return true;
4635}
4636
Adam Sawickiefa88c42019-11-18 16:33:56 +01004637static void TestMemoryUsage()
4638{
4639 wprintf(L"Testing memory usage:\n");
4640
Adam Sawicki69185552019-11-18 17:03:34 +01004641 static const VmaMemoryUsage lastUsage = VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED;
Adam Sawickiefa88c42019-11-18 16:33:56 +01004642 for(uint32_t usage = 0; usage <= lastUsage; ++usage)
4643 {
4644 switch(usage)
4645 {
4646 case VMA_MEMORY_USAGE_UNKNOWN: printf(" VMA_MEMORY_USAGE_UNKNOWN:\n"); break;
4647 case VMA_MEMORY_USAGE_GPU_ONLY: printf(" VMA_MEMORY_USAGE_GPU_ONLY:\n"); break;
4648 case VMA_MEMORY_USAGE_CPU_ONLY: printf(" VMA_MEMORY_USAGE_CPU_ONLY:\n"); break;
4649 case VMA_MEMORY_USAGE_CPU_TO_GPU: printf(" VMA_MEMORY_USAGE_CPU_TO_GPU:\n"); break;
4650 case VMA_MEMORY_USAGE_GPU_TO_CPU: printf(" VMA_MEMORY_USAGE_GPU_TO_CPU:\n"); break;
4651 case VMA_MEMORY_USAGE_CPU_COPY: printf(" VMA_MEMORY_USAGE_CPU_COPY:\n"); break;
Adam Sawicki69185552019-11-18 17:03:34 +01004652 case VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED: printf(" VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED:\n"); break;
Adam Sawickiefa88c42019-11-18 16:33:56 +01004653 default: assert(0);
4654 }
4655
4656 auto printResult = [](const char* testName, VkResult res, uint32_t memoryTypeBits, uint32_t memoryTypeIndex)
4657 {
4658 if(res == VK_SUCCESS)
4659 printf(" %s: memoryTypeBits=0x%X, memoryTypeIndex=%u\n", testName, memoryTypeBits, memoryTypeIndex);
4660 else
4661 printf(" %s: memoryTypeBits=0x%X, FAILED with res=%d\n", testName, memoryTypeBits, (int32_t)res);
4662 };
4663
4664 // 1: Buffer for copy
4665 {
4666 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
4667 bufCreateInfo.size = 65536;
4668 bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
4669
4670 VkBuffer buf = VK_NULL_HANDLE;
4671 VkResult res = vkCreateBuffer(g_hDevice, &bufCreateInfo, g_Allocs, &buf);
4672 TEST(res == VK_SUCCESS && buf != VK_NULL_HANDLE);
4673
4674 VkMemoryRequirements memReq = {};
4675 vkGetBufferMemoryRequirements(g_hDevice, buf, &memReq);
4676
4677 VmaAllocationCreateInfo allocCreateInfo = {};
4678 allocCreateInfo.usage = (VmaMemoryUsage)usage;
4679 VmaAllocation alloc = VK_NULL_HANDLE;
4680 VmaAllocationInfo allocInfo = {};
4681 res = vmaAllocateMemoryForBuffer(g_hAllocator, buf, &allocCreateInfo, &alloc, &allocInfo);
4682 if(res == VK_SUCCESS)
4683 {
4684 TEST((memReq.memoryTypeBits & (1u << allocInfo.memoryType)) != 0);
4685 res = vkBindBufferMemory(g_hDevice, buf, allocInfo.deviceMemory, allocInfo.offset);
4686 TEST(res == VK_SUCCESS);
4687 }
4688 printResult("Buffer TRANSFER_DST + TRANSFER_SRC", res, memReq.memoryTypeBits, allocInfo.memoryType);
4689 vmaDestroyBuffer(g_hAllocator, buf, alloc);
4690 }
4691
4692 // 2: Vertex buffer
4693 {
4694 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
4695 bufCreateInfo.size = 65536;
4696 bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
4697
4698 VkBuffer buf = VK_NULL_HANDLE;
4699 VkResult res = vkCreateBuffer(g_hDevice, &bufCreateInfo, g_Allocs, &buf);
4700 TEST(res == VK_SUCCESS && buf != VK_NULL_HANDLE);
4701
4702 VkMemoryRequirements memReq = {};
4703 vkGetBufferMemoryRequirements(g_hDevice, buf, &memReq);
4704
4705 VmaAllocationCreateInfo allocCreateInfo = {};
4706 allocCreateInfo.usage = (VmaMemoryUsage)usage;
4707 VmaAllocation alloc = VK_NULL_HANDLE;
4708 VmaAllocationInfo allocInfo = {};
4709 res = vmaAllocateMemoryForBuffer(g_hAllocator, buf, &allocCreateInfo, &alloc, &allocInfo);
4710 if(res == VK_SUCCESS)
4711 {
4712 TEST((memReq.memoryTypeBits & (1u << allocInfo.memoryType)) != 0);
4713 res = vkBindBufferMemory(g_hDevice, buf, allocInfo.deviceMemory, allocInfo.offset);
4714 TEST(res == VK_SUCCESS);
4715 }
4716 printResult("Buffer TRANSFER_DST + VERTEX_BUFFER", res, memReq.memoryTypeBits, allocInfo.memoryType);
4717 vmaDestroyBuffer(g_hAllocator, buf, alloc);
4718 }
4719
4720 // 3: Image for copy, OPTIMAL
4721 {
4722 VkImageCreateInfo imgCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
4723 imgCreateInfo.imageType = VK_IMAGE_TYPE_2D;
4724 imgCreateInfo.extent.width = 256;
4725 imgCreateInfo.extent.height = 256;
4726 imgCreateInfo.extent.depth = 1;
4727 imgCreateInfo.mipLevels = 1;
4728 imgCreateInfo.arrayLayers = 1;
4729 imgCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
4730 imgCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
4731 imgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
4732 imgCreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
4733 imgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
4734
4735 VkImage img = VK_NULL_HANDLE;
4736 VkResult res = vkCreateImage(g_hDevice, &imgCreateInfo, g_Allocs, &img);
4737 TEST(res == VK_SUCCESS && img != VK_NULL_HANDLE);
4738
4739 VkMemoryRequirements memReq = {};
4740 vkGetImageMemoryRequirements(g_hDevice, img, &memReq);
4741
4742 VmaAllocationCreateInfo allocCreateInfo = {};
4743 allocCreateInfo.usage = (VmaMemoryUsage)usage;
4744 VmaAllocation alloc = VK_NULL_HANDLE;
4745 VmaAllocationInfo allocInfo = {};
4746 res = vmaAllocateMemoryForImage(g_hAllocator, img, &allocCreateInfo, &alloc, &allocInfo);
4747 if(res == VK_SUCCESS)
4748 {
4749 TEST((memReq.memoryTypeBits & (1u << allocInfo.memoryType)) != 0);
4750 res = vkBindImageMemory(g_hDevice, img, allocInfo.deviceMemory, allocInfo.offset);
4751 TEST(res == VK_SUCCESS);
4752 }
4753 printResult("Image OPTIMAL TRANSFER_DST + TRANSFER_SRC", res, memReq.memoryTypeBits, allocInfo.memoryType);
4754
4755 vmaDestroyImage(g_hAllocator, img, alloc);
4756 }
4757
4758 // 4: Image SAMPLED, OPTIMAL
4759 {
4760 VkImageCreateInfo imgCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
4761 imgCreateInfo.imageType = VK_IMAGE_TYPE_2D;
4762 imgCreateInfo.extent.width = 256;
4763 imgCreateInfo.extent.height = 256;
4764 imgCreateInfo.extent.depth = 1;
4765 imgCreateInfo.mipLevels = 1;
4766 imgCreateInfo.arrayLayers = 1;
4767 imgCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
4768 imgCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
4769 imgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
4770 imgCreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
4771 imgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
4772
4773 VkImage img = VK_NULL_HANDLE;
4774 VkResult res = vkCreateImage(g_hDevice, &imgCreateInfo, g_Allocs, &img);
4775 TEST(res == VK_SUCCESS && img != VK_NULL_HANDLE);
4776
4777 VkMemoryRequirements memReq = {};
4778 vkGetImageMemoryRequirements(g_hDevice, img, &memReq);
4779
4780 VmaAllocationCreateInfo allocCreateInfo = {};
4781 allocCreateInfo.usage = (VmaMemoryUsage)usage;
4782 VmaAllocation alloc = VK_NULL_HANDLE;
4783 VmaAllocationInfo allocInfo = {};
4784 res = vmaAllocateMemoryForImage(g_hAllocator, img, &allocCreateInfo, &alloc, &allocInfo);
4785 if(res == VK_SUCCESS)
4786 {
4787 TEST((memReq.memoryTypeBits & (1u << allocInfo.memoryType)) != 0);
4788 res = vkBindImageMemory(g_hDevice, img, allocInfo.deviceMemory, allocInfo.offset);
4789 TEST(res == VK_SUCCESS);
4790 }
4791 printResult("Image OPTIMAL TRANSFER_DST + SAMPLED", res, memReq.memoryTypeBits, allocInfo.memoryType);
4792 vmaDestroyImage(g_hAllocator, img, alloc);
4793 }
4794
4795 // 5: Image COLOR_ATTACHMENT, OPTIMAL
4796 {
4797 VkImageCreateInfo imgCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
4798 imgCreateInfo.imageType = VK_IMAGE_TYPE_2D;
4799 imgCreateInfo.extent.width = 256;
4800 imgCreateInfo.extent.height = 256;
4801 imgCreateInfo.extent.depth = 1;
4802 imgCreateInfo.mipLevels = 1;
4803 imgCreateInfo.arrayLayers = 1;
4804 imgCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
4805 imgCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
4806 imgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
4807 imgCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
4808 imgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
4809
4810 VkImage img = VK_NULL_HANDLE;
4811 VkResult res = vkCreateImage(g_hDevice, &imgCreateInfo, g_Allocs, &img);
4812 TEST(res == VK_SUCCESS && img != VK_NULL_HANDLE);
4813
4814 VkMemoryRequirements memReq = {};
4815 vkGetImageMemoryRequirements(g_hDevice, img, &memReq);
4816
4817 VmaAllocationCreateInfo allocCreateInfo = {};
4818 allocCreateInfo.usage = (VmaMemoryUsage)usage;
4819 VmaAllocation alloc = VK_NULL_HANDLE;
4820 VmaAllocationInfo allocInfo = {};
4821 res = vmaAllocateMemoryForImage(g_hAllocator, img, &allocCreateInfo, &alloc, &allocInfo);
4822 if(res == VK_SUCCESS)
4823 {
4824 TEST((memReq.memoryTypeBits & (1u << allocInfo.memoryType)) != 0);
4825 res = vkBindImageMemory(g_hDevice, img, allocInfo.deviceMemory, allocInfo.offset);
4826 TEST(res == VK_SUCCESS);
4827 }
4828 printResult("Image OPTIMAL SAMPLED + COLOR_ATTACHMENT", res, memReq.memoryTypeBits, allocInfo.memoryType);
4829 vmaDestroyImage(g_hAllocator, img, alloc);
4830 }
4831 }
4832}
4833
Adam Sawicki40ffe982019-10-11 15:56:02 +02004834static void TestBudget()
4835{
4836 wprintf(L"Testing budget...\n");
4837
Adam Sawicki353e3672019-11-02 14:12:05 +01004838 static const VkDeviceSize BUF_SIZE = 100ull * 1024 * 1024;
4839 static const uint32_t BUF_COUNT = 4;
Adam Sawicki40ffe982019-10-11 15:56:02 +02004840
4841 for(uint32_t testIndex = 0; testIndex < 2; ++testIndex)
4842 {
Adam Sawicki353e3672019-11-02 14:12:05 +01004843 vmaSetCurrentFrameIndex(g_hAllocator, ++g_FrameIndex);
4844
4845 VmaBudget budgetBeg[VK_MAX_MEMORY_HEAPS] = {};
4846 vmaGetBudget(g_hAllocator, budgetBeg);
Adam Sawicki40ffe982019-10-11 15:56:02 +02004847
Adam Sawicki4ac8ff82019-11-18 14:47:33 +01004848 for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
4849 {
4850 TEST(budgetBeg[i].allocationBytes <= budgetBeg[i].blockBytes);
4851 }
4852
Adam Sawicki40ffe982019-10-11 15:56:02 +02004853 VkBufferCreateInfo bufInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
4854 bufInfo.size = BUF_SIZE;
4855 bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
4856
4857 VmaAllocationCreateInfo allocCreateInfo = {};
4858 allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
4859 if(testIndex == 0)
4860 {
4861 allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
4862 }
4863
4864 // CREATE BUFFERS
4865 uint32_t heapIndex = 0;
4866 BufferInfo bufInfos[BUF_COUNT] = {};
4867 for(uint32_t bufIndex = 0; bufIndex < BUF_COUNT; ++bufIndex)
4868 {
4869 VmaAllocationInfo allocInfo;
4870 VkResult res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo,
4871 &bufInfos[bufIndex].Buffer, &bufInfos[bufIndex].Allocation, &allocInfo);
4872 TEST(res == VK_SUCCESS);
4873 if(bufIndex == 0)
4874 {
4875 heapIndex = MemoryTypeToHeap(allocInfo.memoryType);
4876 }
4877 else
4878 {
4879 // All buffers need to fall into the same heap.
4880 TEST(MemoryTypeToHeap(allocInfo.memoryType) == heapIndex);
4881 }
4882 }
4883
Adam Sawicki353e3672019-11-02 14:12:05 +01004884 VmaBudget budgetWithBufs[VK_MAX_MEMORY_HEAPS] = {};
4885 vmaGetBudget(g_hAllocator, budgetWithBufs);
Adam Sawicki40ffe982019-10-11 15:56:02 +02004886
4887 // DESTROY BUFFERS
4888 for(size_t bufIndex = BUF_COUNT; bufIndex--; )
4889 {
4890 vmaDestroyBuffer(g_hAllocator, bufInfos[bufIndex].Buffer, bufInfos[bufIndex].Allocation);
4891 }
4892
Adam Sawicki353e3672019-11-02 14:12:05 +01004893 VmaBudget budgetEnd[VK_MAX_MEMORY_HEAPS] = {};
4894 vmaGetBudget(g_hAllocator, budgetEnd);
Adam Sawicki40ffe982019-10-11 15:56:02 +02004895
4896 // CHECK
4897 for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
4898 {
Adam Sawicki353e3672019-11-02 14:12:05 +01004899 TEST(budgetEnd[i].allocationBytes <= budgetEnd[i].blockBytes);
Adam Sawicki40ffe982019-10-11 15:56:02 +02004900 if(i == heapIndex)
4901 {
Adam Sawicki353e3672019-11-02 14:12:05 +01004902 TEST(budgetEnd[i].allocationBytes == budgetBeg[i].allocationBytes);
4903 TEST(budgetWithBufs[i].allocationBytes == budgetBeg[i].allocationBytes + BUF_SIZE * BUF_COUNT);
4904 TEST(budgetWithBufs[i].blockBytes >= budgetEnd[i].blockBytes);
Adam Sawicki40ffe982019-10-11 15:56:02 +02004905 }
4906 else
4907 {
Adam Sawicki353e3672019-11-02 14:12:05 +01004908 TEST(budgetEnd[i].allocationBytes == budgetEnd[i].allocationBytes &&
4909 budgetEnd[i].allocationBytes == budgetWithBufs[i].allocationBytes);
4910 TEST(budgetEnd[i].blockBytes == budgetEnd[i].blockBytes &&
4911 budgetEnd[i].blockBytes == budgetWithBufs[i].blockBytes);
Adam Sawicki40ffe982019-10-11 15:56:02 +02004912 }
4913 }
4914 }
4915}
4916
Adam Sawickib8333fb2018-03-13 16:15:53 +01004917static void TestMapping()
4918{
4919 wprintf(L"Testing mapping...\n");
4920
4921 VkResult res;
4922 uint32_t memTypeIndex = UINT32_MAX;
4923
4924 enum TEST
4925 {
4926 TEST_NORMAL,
4927 TEST_POOL,
4928 TEST_DEDICATED,
4929 TEST_COUNT
4930 };
4931 for(uint32_t testIndex = 0; testIndex < TEST_COUNT; ++testIndex)
4932 {
4933 VmaPool pool = nullptr;
4934 if(testIndex == TEST_POOL)
4935 {
Adam Sawickib8d34d52018-10-03 17:41:20 +02004936 TEST(memTypeIndex != UINT32_MAX);
Adam Sawickib8333fb2018-03-13 16:15:53 +01004937 VmaPoolCreateInfo poolInfo = {};
4938 poolInfo.memoryTypeIndex = memTypeIndex;
4939 res = vmaCreatePool(g_hAllocator, &poolInfo, &pool);
Adam Sawickib8d34d52018-10-03 17:41:20 +02004940 TEST(res == VK_SUCCESS);
Adam Sawickib8333fb2018-03-13 16:15:53 +01004941 }
4942
4943 VkBufferCreateInfo bufInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
4944 bufInfo.size = 0x10000;
4945 bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
Adam Sawicki40ffe982019-10-11 15:56:02 +02004946
Adam Sawickib8333fb2018-03-13 16:15:53 +01004947 VmaAllocationCreateInfo allocCreateInfo = {};
4948 allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
4949 allocCreateInfo.pool = pool;
4950 if(testIndex == TEST_DEDICATED)
4951 allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
Adam Sawicki40ffe982019-10-11 15:56:02 +02004952
Adam Sawickib8333fb2018-03-13 16:15:53 +01004953 VmaAllocationInfo allocInfo;
Adam Sawicki40ffe982019-10-11 15:56:02 +02004954
Adam Sawickib8333fb2018-03-13 16:15:53 +01004955 // Mapped manually
4956
4957 // Create 2 buffers.
4958 BufferInfo bufferInfos[3];
4959 for(size_t i = 0; i < 2; ++i)
4960 {
4961 res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo,
4962 &bufferInfos[i].Buffer, &bufferInfos[i].Allocation, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02004963 TEST(res == VK_SUCCESS);
4964 TEST(allocInfo.pMappedData == nullptr);
Adam Sawickib8333fb2018-03-13 16:15:53 +01004965 memTypeIndex = allocInfo.memoryType;
4966 }
Adam Sawicki40ffe982019-10-11 15:56:02 +02004967
Adam Sawickib8333fb2018-03-13 16:15:53 +01004968 // Map buffer 0.
4969 char* data00 = nullptr;
4970 res = vmaMapMemory(g_hAllocator, bufferInfos[0].Allocation, (void**)&data00);
Adam Sawickib8d34d52018-10-03 17:41:20 +02004971 TEST(res == VK_SUCCESS && data00 != nullptr);
Adam Sawickib8333fb2018-03-13 16:15:53 +01004972 data00[0xFFFF] = data00[0];
4973
4974 // Map buffer 0 second time.
4975 char* data01 = nullptr;
4976 res = vmaMapMemory(g_hAllocator, bufferInfos[0].Allocation, (void**)&data01);
Adam Sawickib8d34d52018-10-03 17:41:20 +02004977 TEST(res == VK_SUCCESS && data01 == data00);
Adam Sawickib8333fb2018-03-13 16:15:53 +01004978
4979 // Map buffer 1.
4980 char* data1 = nullptr;
4981 res = vmaMapMemory(g_hAllocator, bufferInfos[1].Allocation, (void**)&data1);
Adam Sawickib8d34d52018-10-03 17:41:20 +02004982 TEST(res == VK_SUCCESS && data1 != nullptr);
4983 TEST(!MemoryRegionsOverlap(data00, (size_t)bufInfo.size, data1, (size_t)bufInfo.size));
Adam Sawickib8333fb2018-03-13 16:15:53 +01004984 data1[0xFFFF] = data1[0];
4985
4986 // Unmap buffer 0 two times.
4987 vmaUnmapMemory(g_hAllocator, bufferInfos[0].Allocation);
4988 vmaUnmapMemory(g_hAllocator, bufferInfos[0].Allocation);
4989 vmaGetAllocationInfo(g_hAllocator, bufferInfos[0].Allocation, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02004990 TEST(allocInfo.pMappedData == nullptr);
Adam Sawickib8333fb2018-03-13 16:15:53 +01004991
4992 // Unmap buffer 1.
4993 vmaUnmapMemory(g_hAllocator, bufferInfos[1].Allocation);
4994 vmaGetAllocationInfo(g_hAllocator, bufferInfos[1].Allocation, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02004995 TEST(allocInfo.pMappedData == nullptr);
Adam Sawickib8333fb2018-03-13 16:15:53 +01004996
4997 // Create 3rd buffer - persistently mapped.
4998 allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT;
4999 res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo,
5000 &bufferInfos[2].Buffer, &bufferInfos[2].Allocation, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02005001 TEST(res == VK_SUCCESS && allocInfo.pMappedData != nullptr);
Adam Sawickib8333fb2018-03-13 16:15:53 +01005002
5003 // Map buffer 2.
5004 char* data2 = nullptr;
5005 res = vmaMapMemory(g_hAllocator, bufferInfos[2].Allocation, (void**)&data2);
Adam Sawickib8d34d52018-10-03 17:41:20 +02005006 TEST(res == VK_SUCCESS && data2 == allocInfo.pMappedData);
Adam Sawickib8333fb2018-03-13 16:15:53 +01005007 data2[0xFFFF] = data2[0];
5008
5009 // Unmap buffer 2.
5010 vmaUnmapMemory(g_hAllocator, bufferInfos[2].Allocation);
5011 vmaGetAllocationInfo(g_hAllocator, bufferInfos[2].Allocation, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02005012 TEST(allocInfo.pMappedData == data2);
Adam Sawickib8333fb2018-03-13 16:15:53 +01005013
5014 // Destroy all buffers.
5015 for(size_t i = 3; i--; )
5016 vmaDestroyBuffer(g_hAllocator, bufferInfos[i].Buffer, bufferInfos[i].Allocation);
5017
5018 vmaDestroyPool(g_hAllocator, pool);
5019 }
5020}
5021
Adam Sawickidaa6a552019-06-25 15:26:37 +02005022// Test CREATE_MAPPED with required DEVICE_LOCAL. There was a bug with it.
5023static void TestDeviceLocalMapped()
5024{
5025 VkResult res;
5026
5027 for(uint32_t testIndex = 0; testIndex < 3; ++testIndex)
5028 {
5029 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
5030 bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
5031 bufCreateInfo.size = 4096;
5032
5033 VmaPool pool = VK_NULL_HANDLE;
5034 VmaAllocationCreateInfo allocCreateInfo = {};
5035 allocCreateInfo.requiredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
5036 allocCreateInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT;
5037 if(testIndex == 2)
5038 {
5039 VmaPoolCreateInfo poolCreateInfo = {};
5040 res = vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &poolCreateInfo.memoryTypeIndex);
5041 TEST(res == VK_SUCCESS);
5042 res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool);
5043 TEST(res == VK_SUCCESS);
5044 allocCreateInfo.pool = pool;
5045 }
5046 else if(testIndex == 1)
5047 {
5048 allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT;
5049 }
5050
5051 VkBuffer buf = VK_NULL_HANDLE;
5052 VmaAllocation alloc = VK_NULL_HANDLE;
5053 VmaAllocationInfo allocInfo = {};
5054 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
5055 TEST(res == VK_SUCCESS && alloc);
5056
5057 VkMemoryPropertyFlags memTypeFlags = 0;
5058 vmaGetMemoryTypeProperties(g_hAllocator, allocInfo.memoryType, &memTypeFlags);
5059 const bool shouldBeMapped = (memTypeFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
5060 TEST((allocInfo.pMappedData != nullptr) == shouldBeMapped);
5061
5062 vmaDestroyBuffer(g_hAllocator, buf, alloc);
5063 vmaDestroyPool(g_hAllocator, pool);
5064 }
5065}
5066
Adam Sawickib8333fb2018-03-13 16:15:53 +01005067static void TestMappingMultithreaded()
5068{
5069 wprintf(L"Testing mapping multithreaded...\n");
5070
5071 static const uint32_t threadCount = 16;
5072 static const uint32_t bufferCount = 1024;
5073 static const uint32_t threadBufferCount = bufferCount / threadCount;
5074
5075 VkResult res;
5076 volatile uint32_t memTypeIndex = UINT32_MAX;
5077
5078 enum TEST
5079 {
5080 TEST_NORMAL,
5081 TEST_POOL,
5082 TEST_DEDICATED,
5083 TEST_COUNT
5084 };
5085 for(uint32_t testIndex = 0; testIndex < TEST_COUNT; ++testIndex)
5086 {
5087 VmaPool pool = nullptr;
5088 if(testIndex == TEST_POOL)
5089 {
Adam Sawickib8d34d52018-10-03 17:41:20 +02005090 TEST(memTypeIndex != UINT32_MAX);
Adam Sawickib8333fb2018-03-13 16:15:53 +01005091 VmaPoolCreateInfo poolInfo = {};
5092 poolInfo.memoryTypeIndex = memTypeIndex;
5093 res = vmaCreatePool(g_hAllocator, &poolInfo, &pool);
Adam Sawickib8d34d52018-10-03 17:41:20 +02005094 TEST(res == VK_SUCCESS);
Adam Sawickib8333fb2018-03-13 16:15:53 +01005095 }
5096
5097 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
5098 bufCreateInfo.size = 0x10000;
5099 bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
5100
5101 VmaAllocationCreateInfo allocCreateInfo = {};
5102 allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
5103 allocCreateInfo.pool = pool;
5104 if(testIndex == TEST_DEDICATED)
5105 allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
5106
5107 std::thread threads[threadCount];
5108 for(uint32_t threadIndex = 0; threadIndex < threadCount; ++threadIndex)
5109 {
5110 threads[threadIndex] = std::thread([=, &memTypeIndex](){
5111 // ======== THREAD FUNCTION ========
5112
5113 RandomNumberGenerator rand{threadIndex};
5114
5115 enum class MODE
5116 {
5117 // Don't map this buffer at all.
5118 DONT_MAP,
5119 // Map and quickly unmap.
5120 MAP_FOR_MOMENT,
5121 // Map and unmap before destruction.
5122 MAP_FOR_LONGER,
5123 // Map two times. Quickly unmap, second unmap before destruction.
5124 MAP_TWO_TIMES,
5125 // Create this buffer as persistently mapped.
5126 PERSISTENTLY_MAPPED,
5127 COUNT
5128 };
5129 std::vector<BufferInfo> bufInfos{threadBufferCount};
5130 std::vector<MODE> bufModes{threadBufferCount};
5131
5132 for(uint32_t bufferIndex = 0; bufferIndex < threadBufferCount; ++bufferIndex)
5133 {
5134 BufferInfo& bufInfo = bufInfos[bufferIndex];
5135 const MODE mode = (MODE)(rand.Generate() % (uint32_t)MODE::COUNT);
5136 bufModes[bufferIndex] = mode;
5137
5138 VmaAllocationCreateInfo localAllocCreateInfo = allocCreateInfo;
5139 if(mode == MODE::PERSISTENTLY_MAPPED)
5140 localAllocCreateInfo.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT;
5141
5142 VmaAllocationInfo allocInfo;
5143 VkResult res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &localAllocCreateInfo,
5144 &bufInfo.Buffer, &bufInfo.Allocation, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02005145 TEST(res == VK_SUCCESS);
Adam Sawickib8333fb2018-03-13 16:15:53 +01005146
5147 if(memTypeIndex == UINT32_MAX)
5148 memTypeIndex = allocInfo.memoryType;
5149
5150 char* data = nullptr;
5151
5152 if(mode == MODE::PERSISTENTLY_MAPPED)
5153 {
5154 data = (char*)allocInfo.pMappedData;
Adam Sawickib8d34d52018-10-03 17:41:20 +02005155 TEST(data != nullptr);
Adam Sawickib8333fb2018-03-13 16:15:53 +01005156 }
5157 else if(mode == MODE::MAP_FOR_MOMENT || mode == MODE::MAP_FOR_LONGER ||
5158 mode == MODE::MAP_TWO_TIMES)
5159 {
Adam Sawickib8d34d52018-10-03 17:41:20 +02005160 TEST(data == nullptr);
Adam Sawickib8333fb2018-03-13 16:15:53 +01005161 res = vmaMapMemory(g_hAllocator, bufInfo.Allocation, (void**)&data);
Adam Sawickib8d34d52018-10-03 17:41:20 +02005162 TEST(res == VK_SUCCESS && data != nullptr);
Adam Sawickib8333fb2018-03-13 16:15:53 +01005163
5164 if(mode == MODE::MAP_TWO_TIMES)
5165 {
5166 char* data2 = nullptr;
5167 res = vmaMapMemory(g_hAllocator, bufInfo.Allocation, (void**)&data2);
Adam Sawickib8d34d52018-10-03 17:41:20 +02005168 TEST(res == VK_SUCCESS && data2 == data);
Adam Sawickib8333fb2018-03-13 16:15:53 +01005169 }
5170 }
5171 else if(mode == MODE::DONT_MAP)
5172 {
Adam Sawickib8d34d52018-10-03 17:41:20 +02005173 TEST(allocInfo.pMappedData == nullptr);
Adam Sawickib8333fb2018-03-13 16:15:53 +01005174 }
5175 else
Adam Sawickib8d34d52018-10-03 17:41:20 +02005176 TEST(0);
Adam Sawickib8333fb2018-03-13 16:15:53 +01005177
5178 // Test if reading and writing from the beginning and end of mapped memory doesn't crash.
5179 if(data)
5180 data[0xFFFF] = data[0];
5181
5182 if(mode == MODE::MAP_FOR_MOMENT || mode == MODE::MAP_TWO_TIMES)
5183 {
5184 vmaUnmapMemory(g_hAllocator, bufInfo.Allocation);
5185
5186 VmaAllocationInfo allocInfo;
5187 vmaGetAllocationInfo(g_hAllocator, bufInfo.Allocation, &allocInfo);
5188 if(mode == MODE::MAP_FOR_MOMENT)
Adam Sawickib8d34d52018-10-03 17:41:20 +02005189 TEST(allocInfo.pMappedData == nullptr);
Adam Sawickib8333fb2018-03-13 16:15:53 +01005190 else
Adam Sawickib8d34d52018-10-03 17:41:20 +02005191 TEST(allocInfo.pMappedData == data);
Adam Sawickib8333fb2018-03-13 16:15:53 +01005192 }
5193
5194 switch(rand.Generate() % 3)
5195 {
5196 case 0: Sleep(0); break; // Yield.
5197 case 1: Sleep(10); break; // 10 ms
5198 // default: No sleep.
5199 }
5200
5201 // Test if reading and writing from the beginning and end of mapped memory doesn't crash.
5202 if(data)
5203 data[0xFFFF] = data[0];
5204 }
5205
5206 for(size_t bufferIndex = threadBufferCount; bufferIndex--; )
5207 {
5208 if(bufModes[bufferIndex] == MODE::MAP_FOR_LONGER ||
5209 bufModes[bufferIndex] == MODE::MAP_TWO_TIMES)
5210 {
5211 vmaUnmapMemory(g_hAllocator, bufInfos[bufferIndex].Allocation);
5212
5213 VmaAllocationInfo allocInfo;
5214 vmaGetAllocationInfo(g_hAllocator, bufInfos[bufferIndex].Allocation, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02005215 TEST(allocInfo.pMappedData == nullptr);
Adam Sawickib8333fb2018-03-13 16:15:53 +01005216 }
5217
5218 vmaDestroyBuffer(g_hAllocator, bufInfos[bufferIndex].Buffer, bufInfos[bufferIndex].Allocation);
5219 }
5220 });
5221 }
5222
5223 for(uint32_t threadIndex = 0; threadIndex < threadCount; ++threadIndex)
5224 threads[threadIndex].join();
5225
5226 vmaDestroyPool(g_hAllocator, pool);
5227 }
5228}
5229
5230static void WriteMainTestResultHeader(FILE* file)
5231{
5232 fprintf(file,
Adam Sawicki740b08f2018-08-27 13:42:07 +02005233 "Code,Time,"
5234 "Threads,Buffers and images,Sizes,Operations,Allocation strategy,Free order,"
Adam Sawickib8333fb2018-03-13 16:15:53 +01005235 "Total Time (us),"
5236 "Allocation Time Min (us),"
5237 "Allocation Time Avg (us),"
5238 "Allocation Time Max (us),"
5239 "Deallocation Time Min (us),"
5240 "Deallocation Time Avg (us),"
5241 "Deallocation Time Max (us),"
5242 "Total Memory Allocated (B),"
5243 "Free Range Size Avg (B),"
5244 "Free Range Size Max (B)\n");
5245}
5246
5247static void WriteMainTestResult(
5248 FILE* file,
5249 const char* codeDescription,
5250 const char* testDescription,
5251 const Config& config, const Result& result)
5252{
5253 float totalTimeSeconds = ToFloatSeconds(result.TotalTime);
5254 float allocationTimeMinSeconds = ToFloatSeconds(result.AllocationTimeMin);
5255 float allocationTimeAvgSeconds = ToFloatSeconds(result.AllocationTimeAvg);
5256 float allocationTimeMaxSeconds = ToFloatSeconds(result.AllocationTimeMax);
5257 float deallocationTimeMinSeconds = ToFloatSeconds(result.DeallocationTimeMin);
5258 float deallocationTimeAvgSeconds = ToFloatSeconds(result.DeallocationTimeAvg);
5259 float deallocationTimeMaxSeconds = ToFloatSeconds(result.DeallocationTimeMax);
5260
Adam Sawicki33d2ce72018-08-27 13:59:13 +02005261 std::string currTime;
5262 CurrentTimeToStr(currTime);
Adam Sawickib8333fb2018-03-13 16:15:53 +01005263
5264 fprintf(file,
5265 "%s,%s,%s,"
Adam Sawickib8333fb2018-03-13 16:15:53 +01005266 "%.2f,%.2f,%.2f,%.2f,%.2f,%.2f,%.2f,%I64u,%I64u,%I64u\n",
5267 codeDescription,
Adam Sawicki33d2ce72018-08-27 13:59:13 +02005268 currTime.c_str(),
Adam Sawicki740b08f2018-08-27 13:42:07 +02005269 testDescription,
Adam Sawickib8333fb2018-03-13 16:15:53 +01005270 totalTimeSeconds * 1e6f,
5271 allocationTimeMinSeconds * 1e6f,
5272 allocationTimeAvgSeconds * 1e6f,
5273 allocationTimeMaxSeconds * 1e6f,
5274 deallocationTimeMinSeconds * 1e6f,
5275 deallocationTimeAvgSeconds * 1e6f,
5276 deallocationTimeMaxSeconds * 1e6f,
5277 result.TotalMemoryAllocated,
5278 result.FreeRangeSizeAvg,
5279 result.FreeRangeSizeMax);
5280}
5281
5282static void WritePoolTestResultHeader(FILE* file)
5283{
5284 fprintf(file,
5285 "Code,Test,Time,"
5286 "Config,"
5287 "Total Time (us),"
5288 "Allocation Time Min (us),"
5289 "Allocation Time Avg (us),"
5290 "Allocation Time Max (us),"
5291 "Deallocation Time Min (us),"
5292 "Deallocation Time Avg (us),"
5293 "Deallocation Time Max (us),"
5294 "Lost Allocation Count,"
5295 "Lost Allocation Total Size (B),"
5296 "Failed Allocation Count,"
5297 "Failed Allocation Total Size (B)\n");
5298}
5299
5300static void WritePoolTestResult(
5301 FILE* file,
5302 const char* codeDescription,
5303 const char* testDescription,
5304 const PoolTestConfig& config,
5305 const PoolTestResult& result)
5306{
5307 float totalTimeSeconds = ToFloatSeconds(result.TotalTime);
5308 float allocationTimeMinSeconds = ToFloatSeconds(result.AllocationTimeMin);
5309 float allocationTimeAvgSeconds = ToFloatSeconds(result.AllocationTimeAvg);
5310 float allocationTimeMaxSeconds = ToFloatSeconds(result.AllocationTimeMax);
5311 float deallocationTimeMinSeconds = ToFloatSeconds(result.DeallocationTimeMin);
5312 float deallocationTimeAvgSeconds = ToFloatSeconds(result.DeallocationTimeAvg);
5313 float deallocationTimeMaxSeconds = ToFloatSeconds(result.DeallocationTimeMax);
5314
Adam Sawicki33d2ce72018-08-27 13:59:13 +02005315 std::string currTime;
5316 CurrentTimeToStr(currTime);
Adam Sawickib8333fb2018-03-13 16:15:53 +01005317
5318 fprintf(file,
5319 "%s,%s,%s,"
5320 "ThreadCount=%u PoolSize=%llu FrameCount=%u TotalItemCount=%u UsedItemCount=%u...%u ItemsToMakeUnusedPercent=%u,"
5321 "%.2f,%.2f,%.2f,%.2f,%.2f,%.2f,%.2f,%I64u,%I64u,%I64u,%I64u\n",
5322 // General
5323 codeDescription,
5324 testDescription,
Adam Sawicki33d2ce72018-08-27 13:59:13 +02005325 currTime.c_str(),
Adam Sawickib8333fb2018-03-13 16:15:53 +01005326 // Config
5327 config.ThreadCount,
5328 (unsigned long long)config.PoolSize,
5329 config.FrameCount,
5330 config.TotalItemCount,
5331 config.UsedItemCountMin,
5332 config.UsedItemCountMax,
5333 config.ItemsToMakeUnusedPercent,
5334 // Results
5335 totalTimeSeconds * 1e6f,
5336 allocationTimeMinSeconds * 1e6f,
5337 allocationTimeAvgSeconds * 1e6f,
5338 allocationTimeMaxSeconds * 1e6f,
5339 deallocationTimeMinSeconds * 1e6f,
5340 deallocationTimeAvgSeconds * 1e6f,
5341 deallocationTimeMaxSeconds * 1e6f,
5342 result.LostAllocationCount,
5343 result.LostAllocationTotalSize,
5344 result.FailedAllocationCount,
5345 result.FailedAllocationTotalSize);
5346}
5347
5348static void PerformCustomMainTest(FILE* file)
5349{
5350 Config config{};
5351 config.RandSeed = 65735476;
5352 //config.MaxBytesToAllocate = 4ull * 1024 * 1024; // 4 MB
5353 config.MaxBytesToAllocate = 4ull * 1024 * 1024 * 1024; // 4 GB
5354 config.MemUsageProbability[0] = 1; // VMA_MEMORY_USAGE_GPU_ONLY
5355 config.FreeOrder = FREE_ORDER::FORWARD;
5356 config.ThreadCount = 16;
5357 config.ThreadsUsingCommonAllocationsProbabilityPercent = 50;
Adam Sawicki0667e332018-08-24 17:26:44 +02005358 config.AllocationStrategy = 0;
Adam Sawickib8333fb2018-03-13 16:15:53 +01005359
5360 // Buffers
5361 //config.AllocationSizes.push_back({4, 16, 1024});
5362 config.AllocationSizes.push_back({4, 0x10000, 0xA00000}); // 64 KB ... 10 MB
5363
5364 // Images
5365 //config.AllocationSizes.push_back({4, 0, 0, 4, 32});
5366 //config.AllocationSizes.push_back({4, 0, 0, 256, 2048});
5367
5368 config.BeginBytesToAllocate = config.MaxBytesToAllocate * 5 / 100;
5369 config.AdditionalOperationCount = 1024;
5370
5371 Result result{};
5372 VkResult res = MainTest(result, config);
Adam Sawickib8d34d52018-10-03 17:41:20 +02005373 TEST(res == VK_SUCCESS);
Adam Sawickib8333fb2018-03-13 16:15:53 +01005374 WriteMainTestResult(file, "Foo", "CustomTest", config, result);
5375}
5376
5377static void PerformCustomPoolTest(FILE* file)
5378{
5379 PoolTestConfig config;
5380 config.PoolSize = 100 * 1024 * 1024;
5381 config.RandSeed = 2345764;
5382 config.ThreadCount = 1;
5383 config.FrameCount = 200;
5384 config.ItemsToMakeUnusedPercent = 2;
5385
5386 AllocationSize allocSize = {};
5387 allocSize.BufferSizeMin = 1024;
5388 allocSize.BufferSizeMax = 1024 * 1024;
5389 allocSize.Probability = 1;
5390 config.AllocationSizes.push_back(allocSize);
5391
5392 allocSize.BufferSizeMin = 0;
5393 allocSize.BufferSizeMax = 0;
5394 allocSize.ImageSizeMin = 128;
5395 allocSize.ImageSizeMax = 1024;
5396 allocSize.Probability = 1;
5397 config.AllocationSizes.push_back(allocSize);
5398
5399 config.PoolSize = config.CalcAvgResourceSize() * 200;
5400 config.UsedItemCountMax = 160;
5401 config.TotalItemCount = config.UsedItemCountMax * 10;
5402 config.UsedItemCountMin = config.UsedItemCountMax * 80 / 100;
5403
5404 g_MemoryAliasingWarningEnabled = false;
5405 PoolTestResult result = {};
5406 TestPool_Benchmark(result, config);
5407 g_MemoryAliasingWarningEnabled = true;
5408
5409 WritePoolTestResult(file, "Code desc", "Test desc", config, result);
5410}
5411
Adam Sawickib8333fb2018-03-13 16:15:53 +01005412static void PerformMainTests(FILE* file)
5413{
5414 uint32_t repeatCount = 1;
5415 if(ConfigType >= CONFIG_TYPE_MAXIMUM) repeatCount = 3;
5416
5417 Config config{};
5418 config.RandSeed = 65735476;
5419 config.MemUsageProbability[0] = 1; // VMA_MEMORY_USAGE_GPU_ONLY
5420 config.FreeOrder = FREE_ORDER::FORWARD;
5421
5422 size_t threadCountCount = 1;
5423 switch(ConfigType)
5424 {
5425 case CONFIG_TYPE_MINIMUM: threadCountCount = 1; break;
5426 case CONFIG_TYPE_SMALL: threadCountCount = 2; break;
5427 case CONFIG_TYPE_AVERAGE: threadCountCount = 3; break;
5428 case CONFIG_TYPE_LARGE: threadCountCount = 5; break;
5429 case CONFIG_TYPE_MAXIMUM: threadCountCount = 7; break;
5430 default: assert(0);
5431 }
Adam Sawicki0667e332018-08-24 17:26:44 +02005432
Adam Sawicki0a3fb6c2018-08-27 14:40:27 +02005433 const size_t strategyCount = GetAllocationStrategyCount();
Adam Sawicki0667e332018-08-24 17:26:44 +02005434
Adam Sawickib8333fb2018-03-13 16:15:53 +01005435 for(size_t threadCountIndex = 0; threadCountIndex < threadCountCount; ++threadCountIndex)
5436 {
5437 std::string desc1;
5438
5439 switch(threadCountIndex)
5440 {
5441 case 0:
5442 desc1 += "1_thread";
5443 config.ThreadCount = 1;
5444 config.ThreadsUsingCommonAllocationsProbabilityPercent = 0;
5445 break;
5446 case 1:
5447 desc1 += "16_threads+0%_common";
5448 config.ThreadCount = 16;
5449 config.ThreadsUsingCommonAllocationsProbabilityPercent = 0;
5450 break;
5451 case 2:
5452 desc1 += "16_threads+50%_common";
5453 config.ThreadCount = 16;
5454 config.ThreadsUsingCommonAllocationsProbabilityPercent = 50;
5455 break;
5456 case 3:
5457 desc1 += "16_threads+100%_common";
5458 config.ThreadCount = 16;
5459 config.ThreadsUsingCommonAllocationsProbabilityPercent = 100;
5460 break;
5461 case 4:
5462 desc1 += "2_threads+0%_common";
5463 config.ThreadCount = 2;
5464 config.ThreadsUsingCommonAllocationsProbabilityPercent = 0;
5465 break;
5466 case 5:
5467 desc1 += "2_threads+50%_common";
5468 config.ThreadCount = 2;
5469 config.ThreadsUsingCommonAllocationsProbabilityPercent = 50;
5470 break;
5471 case 6:
5472 desc1 += "2_threads+100%_common";
5473 config.ThreadCount = 2;
5474 config.ThreadsUsingCommonAllocationsProbabilityPercent = 100;
5475 break;
5476 default:
5477 assert(0);
5478 }
5479
5480 // 0 = buffers, 1 = images, 2 = buffers and images
5481 size_t buffersVsImagesCount = 2;
5482 if(ConfigType >= CONFIG_TYPE_LARGE) ++buffersVsImagesCount;
5483 for(size_t buffersVsImagesIndex = 0; buffersVsImagesIndex < buffersVsImagesCount; ++buffersVsImagesIndex)
5484 {
5485 std::string desc2 = desc1;
5486 switch(buffersVsImagesIndex)
5487 {
Adam Sawicki740b08f2018-08-27 13:42:07 +02005488 case 0: desc2 += ",Buffers"; break;
5489 case 1: desc2 += ",Images"; break;
5490 case 2: desc2 += ",Buffers+Images"; break;
Adam Sawickib8333fb2018-03-13 16:15:53 +01005491 default: assert(0);
5492 }
5493
5494 // 0 = small, 1 = large, 2 = small and large
5495 size_t smallVsLargeCount = 2;
5496 if(ConfigType >= CONFIG_TYPE_LARGE) ++smallVsLargeCount;
5497 for(size_t smallVsLargeIndex = 0; smallVsLargeIndex < smallVsLargeCount; ++smallVsLargeIndex)
5498 {
5499 std::string desc3 = desc2;
5500 switch(smallVsLargeIndex)
5501 {
Adam Sawicki740b08f2018-08-27 13:42:07 +02005502 case 0: desc3 += ",Small"; break;
5503 case 1: desc3 += ",Large"; break;
5504 case 2: desc3 += ",Small+Large"; break;
Adam Sawickib8333fb2018-03-13 16:15:53 +01005505 default: assert(0);
5506 }
5507
5508 if(smallVsLargeIndex == 1 || smallVsLargeIndex == 2)
5509 config.MaxBytesToAllocate = 4ull * 1024 * 1024 * 1024; // 4 GB
5510 else
5511 config.MaxBytesToAllocate = 4ull * 1024 * 1024;
5512
5513 // 0 = varying sizes min...max, 1 = set of constant sizes
5514 size_t constantSizesCount = 1;
5515 if(ConfigType >= CONFIG_TYPE_SMALL) ++constantSizesCount;
5516 for(size_t constantSizesIndex = 0; constantSizesIndex < constantSizesCount; ++constantSizesIndex)
5517 {
5518 std::string desc4 = desc3;
5519 switch(constantSizesIndex)
5520 {
5521 case 0: desc4 += " Varying_sizes"; break;
5522 case 1: desc4 += " Constant_sizes"; break;
5523 default: assert(0);
5524 }
5525
5526 config.AllocationSizes.clear();
5527 // Buffers present
5528 if(buffersVsImagesIndex == 0 || buffersVsImagesIndex == 2)
5529 {
5530 // Small
5531 if(smallVsLargeIndex == 0 || smallVsLargeIndex == 2)
5532 {
5533 // Varying size
5534 if(constantSizesIndex == 0)
5535 config.AllocationSizes.push_back({4, 16, 1024});
5536 // Constant sizes
5537 else
5538 {
5539 config.AllocationSizes.push_back({1, 16, 16});
5540 config.AllocationSizes.push_back({1, 64, 64});
5541 config.AllocationSizes.push_back({1, 256, 256});
5542 config.AllocationSizes.push_back({1, 1024, 1024});
5543 }
5544 }
5545 // Large
5546 if(smallVsLargeIndex == 1 || smallVsLargeIndex == 2)
5547 {
5548 // Varying size
5549 if(constantSizesIndex == 0)
5550 config.AllocationSizes.push_back({4, 0x10000, 0xA00000}); // 64 KB ... 10 MB
5551 // Constant sizes
5552 else
5553 {
5554 config.AllocationSizes.push_back({1, 0x10000, 0x10000});
5555 config.AllocationSizes.push_back({1, 0x80000, 0x80000});
5556 config.AllocationSizes.push_back({1, 0x200000, 0x200000});
5557 config.AllocationSizes.push_back({1, 0xA00000, 0xA00000});
5558 }
5559 }
5560 }
5561 // Images present
5562 if(buffersVsImagesIndex == 1 || buffersVsImagesIndex == 2)
5563 {
5564 // Small
5565 if(smallVsLargeIndex == 0 || smallVsLargeIndex == 2)
5566 {
5567 // Varying size
5568 if(constantSizesIndex == 0)
5569 config.AllocationSizes.push_back({4, 0, 0, 4, 32});
5570 // Constant sizes
5571 else
5572 {
5573 config.AllocationSizes.push_back({1, 0, 0, 4, 4});
5574 config.AllocationSizes.push_back({1, 0, 0, 8, 8});
5575 config.AllocationSizes.push_back({1, 0, 0, 16, 16});
5576 config.AllocationSizes.push_back({1, 0, 0, 32, 32});
5577 }
5578 }
5579 // Large
5580 if(smallVsLargeIndex == 1 || smallVsLargeIndex == 2)
5581 {
5582 // Varying size
5583 if(constantSizesIndex == 0)
5584 config.AllocationSizes.push_back({4, 0, 0, 256, 2048});
5585 // Constant sizes
5586 else
5587 {
5588 config.AllocationSizes.push_back({1, 0, 0, 256, 256});
5589 config.AllocationSizes.push_back({1, 0, 0, 512, 512});
5590 config.AllocationSizes.push_back({1, 0, 0, 1024, 1024});
5591 config.AllocationSizes.push_back({1, 0, 0, 2048, 2048});
5592 }
5593 }
5594 }
5595
5596 // 0 = 100%, additional_operations = 0, 1 = 50%, 2 = 5%, 3 = 95% additional_operations = a lot
5597 size_t beginBytesToAllocateCount = 1;
5598 if(ConfigType >= CONFIG_TYPE_SMALL) ++beginBytesToAllocateCount;
5599 if(ConfigType >= CONFIG_TYPE_AVERAGE) ++beginBytesToAllocateCount;
5600 if(ConfigType >= CONFIG_TYPE_LARGE) ++beginBytesToAllocateCount;
5601 for(size_t beginBytesToAllocateIndex = 0; beginBytesToAllocateIndex < beginBytesToAllocateCount; ++beginBytesToAllocateIndex)
5602 {
5603 std::string desc5 = desc4;
5604
5605 switch(beginBytesToAllocateIndex)
5606 {
5607 case 0:
Adam Sawicki740b08f2018-08-27 13:42:07 +02005608 desc5 += ",Allocate_100%";
Adam Sawickib8333fb2018-03-13 16:15:53 +01005609 config.BeginBytesToAllocate = config.MaxBytesToAllocate;
5610 config.AdditionalOperationCount = 0;
5611 break;
5612 case 1:
Adam Sawicki740b08f2018-08-27 13:42:07 +02005613 desc5 += ",Allocate_50%+Operations";
Adam Sawickib8333fb2018-03-13 16:15:53 +01005614 config.BeginBytesToAllocate = config.MaxBytesToAllocate * 50 / 100;
5615 config.AdditionalOperationCount = 1024;
5616 break;
5617 case 2:
Adam Sawicki740b08f2018-08-27 13:42:07 +02005618 desc5 += ",Allocate_5%+Operations";
Adam Sawickib8333fb2018-03-13 16:15:53 +01005619 config.BeginBytesToAllocate = config.MaxBytesToAllocate * 5 / 100;
5620 config.AdditionalOperationCount = 1024;
5621 break;
5622 case 3:
Adam Sawicki740b08f2018-08-27 13:42:07 +02005623 desc5 += ",Allocate_95%+Operations";
Adam Sawickib8333fb2018-03-13 16:15:53 +01005624 config.BeginBytesToAllocate = config.MaxBytesToAllocate * 95 / 100;
5625 config.AdditionalOperationCount = 1024;
5626 break;
5627 default:
5628 assert(0);
5629 }
5630
Adam Sawicki0667e332018-08-24 17:26:44 +02005631 for(size_t strategyIndex = 0; strategyIndex < strategyCount; ++strategyIndex)
Adam Sawickib8333fb2018-03-13 16:15:53 +01005632 {
Adam Sawicki0667e332018-08-24 17:26:44 +02005633 std::string desc6 = desc5;
5634 switch(strategyIndex)
5635 {
5636 case 0:
Adam Sawicki740b08f2018-08-27 13:42:07 +02005637 desc6 += ",BestFit";
Adam Sawicki0667e332018-08-24 17:26:44 +02005638 config.AllocationStrategy = VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT;
5639 break;
5640 case 1:
Adam Sawicki740b08f2018-08-27 13:42:07 +02005641 desc6 += ",WorstFit";
Adam Sawicki0667e332018-08-24 17:26:44 +02005642 config.AllocationStrategy = VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT;
5643 break;
5644 case 2:
Adam Sawicki740b08f2018-08-27 13:42:07 +02005645 desc6 += ",FirstFit";
Adam Sawicki0667e332018-08-24 17:26:44 +02005646 config.AllocationStrategy = VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT;
5647 break;
5648 default:
5649 assert(0);
5650 }
Adam Sawickib8333fb2018-03-13 16:15:53 +01005651
Adam Sawicki33d2ce72018-08-27 13:59:13 +02005652 desc6 += ',';
5653 desc6 += FREE_ORDER_NAMES[(uint32_t)config.FreeOrder];
Adam Sawicki740b08f2018-08-27 13:42:07 +02005654
5655 const char* testDescription = desc6.c_str();
Adam Sawicki0667e332018-08-24 17:26:44 +02005656
5657 for(size_t repeat = 0; repeat < repeatCount; ++repeat)
5658 {
Adam Sawicki740b08f2018-08-27 13:42:07 +02005659 printf("%s #%u\n", testDescription, (uint32_t)repeat);
Adam Sawicki0667e332018-08-24 17:26:44 +02005660
5661 Result result{};
5662 VkResult res = MainTest(result, config);
Adam Sawickib8d34d52018-10-03 17:41:20 +02005663 TEST(res == VK_SUCCESS);
Adam Sawicki740b08f2018-08-27 13:42:07 +02005664 if(file)
5665 {
5666 WriteMainTestResult(file, CODE_DESCRIPTION, testDescription, config, result);
5667 }
Adam Sawicki0667e332018-08-24 17:26:44 +02005668 }
Adam Sawickib8333fb2018-03-13 16:15:53 +01005669 }
5670 }
5671 }
5672 }
5673 }
5674 }
5675}
5676
5677static void PerformPoolTests(FILE* file)
5678{
5679 const size_t AVG_RESOURCES_PER_POOL = 300;
5680
5681 uint32_t repeatCount = 1;
5682 if(ConfigType >= CONFIG_TYPE_MAXIMUM) repeatCount = 3;
5683
5684 PoolTestConfig config{};
5685 config.RandSeed = 2346343;
5686 config.FrameCount = 200;
5687 config.ItemsToMakeUnusedPercent = 2;
5688
5689 size_t threadCountCount = 1;
5690 switch(ConfigType)
5691 {
5692 case CONFIG_TYPE_MINIMUM: threadCountCount = 1; break;
5693 case CONFIG_TYPE_SMALL: threadCountCount = 2; break;
5694 case CONFIG_TYPE_AVERAGE: threadCountCount = 2; break;
5695 case CONFIG_TYPE_LARGE: threadCountCount = 3; break;
5696 case CONFIG_TYPE_MAXIMUM: threadCountCount = 3; break;
5697 default: assert(0);
5698 }
5699 for(size_t threadCountIndex = 0; threadCountIndex < threadCountCount; ++threadCountIndex)
5700 {
5701 std::string desc1;
5702
5703 switch(threadCountIndex)
5704 {
5705 case 0:
5706 desc1 += "1_thread";
5707 config.ThreadCount = 1;
5708 break;
5709 case 1:
5710 desc1 += "16_threads";
5711 config.ThreadCount = 16;
5712 break;
5713 case 2:
5714 desc1 += "2_threads";
5715 config.ThreadCount = 2;
5716 break;
5717 default:
5718 assert(0);
5719 }
5720
5721 // 0 = buffers, 1 = images, 2 = buffers and images
5722 size_t buffersVsImagesCount = 2;
5723 if(ConfigType >= CONFIG_TYPE_LARGE) ++buffersVsImagesCount;
5724 for(size_t buffersVsImagesIndex = 0; buffersVsImagesIndex < buffersVsImagesCount; ++buffersVsImagesIndex)
5725 {
5726 std::string desc2 = desc1;
5727 switch(buffersVsImagesIndex)
5728 {
5729 case 0: desc2 += " Buffers"; break;
5730 case 1: desc2 += " Images"; break;
5731 case 2: desc2 += " Buffers+Images"; break;
5732 default: assert(0);
5733 }
5734
5735 // 0 = small, 1 = large, 2 = small and large
5736 size_t smallVsLargeCount = 2;
5737 if(ConfigType >= CONFIG_TYPE_LARGE) ++smallVsLargeCount;
5738 for(size_t smallVsLargeIndex = 0; smallVsLargeIndex < smallVsLargeCount; ++smallVsLargeIndex)
5739 {
5740 std::string desc3 = desc2;
5741 switch(smallVsLargeIndex)
5742 {
5743 case 0: desc3 += " Small"; break;
5744 case 1: desc3 += " Large"; break;
5745 case 2: desc3 += " Small+Large"; break;
5746 default: assert(0);
5747 }
5748
5749 if(smallVsLargeIndex == 1 || smallVsLargeIndex == 2)
5750 config.PoolSize = 6ull * 1024 * 1024 * 1024; // 6 GB
5751 else
5752 config.PoolSize = 4ull * 1024 * 1024;
5753
5754 // 0 = varying sizes min...max, 1 = set of constant sizes
5755 size_t constantSizesCount = 1;
5756 if(ConfigType >= CONFIG_TYPE_SMALL) ++constantSizesCount;
5757 for(size_t constantSizesIndex = 0; constantSizesIndex < constantSizesCount; ++constantSizesIndex)
5758 {
5759 std::string desc4 = desc3;
5760 switch(constantSizesIndex)
5761 {
5762 case 0: desc4 += " Varying_sizes"; break;
5763 case 1: desc4 += " Constant_sizes"; break;
5764 default: assert(0);
5765 }
5766
5767 config.AllocationSizes.clear();
5768 // Buffers present
5769 if(buffersVsImagesIndex == 0 || buffersVsImagesIndex == 2)
5770 {
5771 // Small
5772 if(smallVsLargeIndex == 0 || smallVsLargeIndex == 2)
5773 {
5774 // Varying size
5775 if(constantSizesIndex == 0)
5776 config.AllocationSizes.push_back({4, 16, 1024});
5777 // Constant sizes
5778 else
5779 {
5780 config.AllocationSizes.push_back({1, 16, 16});
5781 config.AllocationSizes.push_back({1, 64, 64});
5782 config.AllocationSizes.push_back({1, 256, 256});
5783 config.AllocationSizes.push_back({1, 1024, 1024});
5784 }
5785 }
5786 // Large
5787 if(smallVsLargeIndex == 1 || smallVsLargeIndex == 2)
5788 {
5789 // Varying size
5790 if(constantSizesIndex == 0)
5791 config.AllocationSizes.push_back({4, 0x10000, 0xA00000}); // 64 KB ... 10 MB
5792 // Constant sizes
5793 else
5794 {
5795 config.AllocationSizes.push_back({1, 0x10000, 0x10000});
5796 config.AllocationSizes.push_back({1, 0x80000, 0x80000});
5797 config.AllocationSizes.push_back({1, 0x200000, 0x200000});
5798 config.AllocationSizes.push_back({1, 0xA00000, 0xA00000});
5799 }
5800 }
5801 }
5802 // Images present
5803 if(buffersVsImagesIndex == 1 || buffersVsImagesIndex == 2)
5804 {
5805 // Small
5806 if(smallVsLargeIndex == 0 || smallVsLargeIndex == 2)
5807 {
5808 // Varying size
5809 if(constantSizesIndex == 0)
5810 config.AllocationSizes.push_back({4, 0, 0, 4, 32});
5811 // Constant sizes
5812 else
5813 {
5814 config.AllocationSizes.push_back({1, 0, 0, 4, 4});
5815 config.AllocationSizes.push_back({1, 0, 0, 8, 8});
5816 config.AllocationSizes.push_back({1, 0, 0, 16, 16});
5817 config.AllocationSizes.push_back({1, 0, 0, 32, 32});
5818 }
5819 }
5820 // Large
5821 if(smallVsLargeIndex == 1 || smallVsLargeIndex == 2)
5822 {
5823 // Varying size
5824 if(constantSizesIndex == 0)
5825 config.AllocationSizes.push_back({4, 0, 0, 256, 2048});
5826 // Constant sizes
5827 else
5828 {
5829 config.AllocationSizes.push_back({1, 0, 0, 256, 256});
5830 config.AllocationSizes.push_back({1, 0, 0, 512, 512});
5831 config.AllocationSizes.push_back({1, 0, 0, 1024, 1024});
5832 config.AllocationSizes.push_back({1, 0, 0, 2048, 2048});
5833 }
5834 }
5835 }
5836
5837 const VkDeviceSize avgResourceSize = config.CalcAvgResourceSize();
5838 config.PoolSize = avgResourceSize * AVG_RESOURCES_PER_POOL;
5839
5840 // 0 = 66%, 1 = 133%, 2 = 100%, 3 = 33%, 4 = 166%
5841 size_t subscriptionModeCount;
5842 switch(ConfigType)
5843 {
5844 case CONFIG_TYPE_MINIMUM: subscriptionModeCount = 2; break;
5845 case CONFIG_TYPE_SMALL: subscriptionModeCount = 2; break;
5846 case CONFIG_TYPE_AVERAGE: subscriptionModeCount = 3; break;
5847 case CONFIG_TYPE_LARGE: subscriptionModeCount = 5; break;
5848 case CONFIG_TYPE_MAXIMUM: subscriptionModeCount = 5; break;
5849 default: assert(0);
5850 }
5851 for(size_t subscriptionModeIndex = 0; subscriptionModeIndex < subscriptionModeCount; ++subscriptionModeIndex)
5852 {
5853 std::string desc5 = desc4;
5854
5855 switch(subscriptionModeIndex)
5856 {
5857 case 0:
5858 desc5 += " Subscription_66%";
5859 config.UsedItemCountMax = AVG_RESOURCES_PER_POOL * 66 / 100;
5860 break;
5861 case 1:
5862 desc5 += " Subscription_133%";
5863 config.UsedItemCountMax = AVG_RESOURCES_PER_POOL * 133 / 100;
5864 break;
5865 case 2:
5866 desc5 += " Subscription_100%";
5867 config.UsedItemCountMax = AVG_RESOURCES_PER_POOL;
5868 break;
5869 case 3:
5870 desc5 += " Subscription_33%";
5871 config.UsedItemCountMax = AVG_RESOURCES_PER_POOL * 33 / 100;
5872 break;
5873 case 4:
5874 desc5 += " Subscription_166%";
5875 config.UsedItemCountMax = AVG_RESOURCES_PER_POOL * 166 / 100;
5876 break;
5877 default:
5878 assert(0);
5879 }
5880
5881 config.TotalItemCount = config.UsedItemCountMax * 5;
5882 config.UsedItemCountMin = config.UsedItemCountMax * 80 / 100;
5883
5884 const char* testDescription = desc5.c_str();
5885
5886 for(size_t repeat = 0; repeat < repeatCount; ++repeat)
5887 {
Adam Sawicki740b08f2018-08-27 13:42:07 +02005888 printf("%s #%u\n", testDescription, (uint32_t)repeat);
Adam Sawickib8333fb2018-03-13 16:15:53 +01005889
5890 PoolTestResult result{};
5891 g_MemoryAliasingWarningEnabled = false;
5892 TestPool_Benchmark(result, config);
5893 g_MemoryAliasingWarningEnabled = true;
5894 WritePoolTestResult(file, CODE_DESCRIPTION, testDescription, config, result);
5895 }
5896 }
5897 }
5898 }
5899 }
5900 }
5901}
5902
Adam Sawickia83793a2018-09-03 13:40:42 +02005903static void BasicTestBuddyAllocator()
5904{
5905 wprintf(L"Basic test buddy allocator\n");
5906
5907 RandomNumberGenerator rand{76543};
5908
5909 VkBufferCreateInfo sampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
5910 sampleBufCreateInfo.size = 1024; // Whatever.
5911 sampleBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
5912
5913 VmaAllocationCreateInfo sampleAllocCreateInfo = {};
5914 sampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
5915
5916 VmaPoolCreateInfo poolCreateInfo = {};
5917 VkResult res = vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &sampleBufCreateInfo, &sampleAllocCreateInfo, &poolCreateInfo.memoryTypeIndex);
Adam Sawickib8d34d52018-10-03 17:41:20 +02005918 TEST(res == VK_SUCCESS);
Adam Sawickia83793a2018-09-03 13:40:42 +02005919
Adam Sawickid6e6d6b2018-09-21 14:07:02 +02005920 // Deliberately adding 1023 to test usable size smaller than memory block size.
5921 poolCreateInfo.blockSize = 1024 * 1024 + 1023;
Adam Sawickia83793a2018-09-03 13:40:42 +02005922 poolCreateInfo.flags = VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT;
Adam Sawicki80927152018-09-07 17:27:23 +02005923 //poolCreateInfo.minBlockCount = poolCreateInfo.maxBlockCount = 1;
Adam Sawickia83793a2018-09-03 13:40:42 +02005924
5925 VmaPool pool = nullptr;
5926 res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool);
Adam Sawickib8d34d52018-10-03 17:41:20 +02005927 TEST(res == VK_SUCCESS);
Adam Sawickia83793a2018-09-03 13:40:42 +02005928
5929 VkBufferCreateInfo bufCreateInfo = sampleBufCreateInfo;
5930
5931 VmaAllocationCreateInfo allocCreateInfo = {};
5932 allocCreateInfo.pool = pool;
5933
5934 std::vector<BufferInfo> bufInfo;
5935 BufferInfo newBufInfo;
5936 VmaAllocationInfo allocInfo;
5937
5938 bufCreateInfo.size = 1024 * 256;
5939 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
5940 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02005941 TEST(res == VK_SUCCESS);
Adam Sawickia83793a2018-09-03 13:40:42 +02005942 bufInfo.push_back(newBufInfo);
5943
5944 bufCreateInfo.size = 1024 * 512;
5945 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
5946 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02005947 TEST(res == VK_SUCCESS);
Adam Sawickia83793a2018-09-03 13:40:42 +02005948 bufInfo.push_back(newBufInfo);
5949
5950 bufCreateInfo.size = 1024 * 128;
5951 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
5952 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02005953 TEST(res == VK_SUCCESS);
Adam Sawickia83793a2018-09-03 13:40:42 +02005954 bufInfo.push_back(newBufInfo);
Adam Sawickia01d4582018-09-21 14:22:35 +02005955
5956 // Test very small allocation, smaller than minimum node size.
5957 bufCreateInfo.size = 1;
5958 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
5959 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02005960 TEST(res == VK_SUCCESS);
Adam Sawickia01d4582018-09-21 14:22:35 +02005961 bufInfo.push_back(newBufInfo);
Adam Sawickia83793a2018-09-03 13:40:42 +02005962
Adam Sawicki9933c5c2018-09-21 14:57:24 +02005963 // Test some small allocation with alignment requirement.
5964 {
5965 VkMemoryRequirements memReq;
5966 memReq.alignment = 256;
5967 memReq.memoryTypeBits = UINT32_MAX;
5968 memReq.size = 32;
5969
5970 newBufInfo.Buffer = VK_NULL_HANDLE;
5971 res = vmaAllocateMemory(g_hAllocator, &memReq, &allocCreateInfo,
5972 &newBufInfo.Allocation, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02005973 TEST(res == VK_SUCCESS);
5974 TEST(allocInfo.offset % memReq.alignment == 0);
Adam Sawicki9933c5c2018-09-21 14:57:24 +02005975 bufInfo.push_back(newBufInfo);
5976 }
5977
5978 //SaveAllocatorStatsToFile(L"TEST.json");
5979
Adam Sawicki21017c62018-09-07 15:26:59 +02005980 VmaPoolStats stats = {};
5981 vmaGetPoolStats(g_hAllocator, pool, &stats);
5982 int DBG = 0; // Set breakpoint here to inspect `stats`.
5983
Adam Sawicki80927152018-09-07 17:27:23 +02005984 // Allocate enough new buffers to surely fall into second block.
5985 for(uint32_t i = 0; i < 32; ++i)
5986 {
5987 bufCreateInfo.size = 1024 * (rand.Generate() % 32 + 1);
5988 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
5989 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
Adam Sawickib8d34d52018-10-03 17:41:20 +02005990 TEST(res == VK_SUCCESS);
Adam Sawicki80927152018-09-07 17:27:23 +02005991 bufInfo.push_back(newBufInfo);
5992 }
5993
5994 SaveAllocatorStatsToFile(L"BuddyTest01.json");
5995
Adam Sawickia83793a2018-09-03 13:40:42 +02005996 // Destroy the buffers in random order.
5997 while(!bufInfo.empty())
5998 {
5999 const size_t indexToDestroy = rand.Generate() % bufInfo.size();
6000 const BufferInfo& currBufInfo = bufInfo[indexToDestroy];
6001 vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
6002 bufInfo.erase(bufInfo.begin() + indexToDestroy);
6003 }
6004
6005 vmaDestroyPool(g_hAllocator, pool);
6006}
6007
Adam Sawicki2e4d3ef2018-10-03 15:48:17 +02006008static void BasicTestAllocatePages()
6009{
6010 wprintf(L"Basic test allocate pages\n");
6011
6012 RandomNumberGenerator rand{765461};
6013
6014 VkBufferCreateInfo sampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
6015 sampleBufCreateInfo.size = 1024; // Whatever.
6016 sampleBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
6017
6018 VmaAllocationCreateInfo sampleAllocCreateInfo = {};
6019 sampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
6020
6021 VmaPoolCreateInfo poolCreateInfo = {};
6022 VkResult res = vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &sampleBufCreateInfo, &sampleAllocCreateInfo, &poolCreateInfo.memoryTypeIndex);
Adam Sawickia7d77692018-10-03 16:15:27 +02006023 TEST(res == VK_SUCCESS);
Adam Sawicki2e4d3ef2018-10-03 15:48:17 +02006024
6025 // 1 block of 1 MB.
6026 poolCreateInfo.blockSize = 1024 * 1024;
6027 poolCreateInfo.minBlockCount = poolCreateInfo.maxBlockCount = 1;
6028
6029 // Create pool.
6030 VmaPool pool = nullptr;
6031 res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool);
Adam Sawickia7d77692018-10-03 16:15:27 +02006032 TEST(res == VK_SUCCESS);
Adam Sawicki2e4d3ef2018-10-03 15:48:17 +02006033
6034 // Make 100 allocations of 4 KB - they should fit into the pool.
6035 VkMemoryRequirements memReq;
6036 memReq.memoryTypeBits = UINT32_MAX;
6037 memReq.alignment = 4 * 1024;
6038 memReq.size = 4 * 1024;
6039
6040 VmaAllocationCreateInfo allocCreateInfo = {};
6041 allocCreateInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT;
6042 allocCreateInfo.pool = pool;
6043
6044 constexpr uint32_t allocCount = 100;
6045
6046 std::vector<VmaAllocation> alloc{allocCount};
6047 std::vector<VmaAllocationInfo> allocInfo{allocCount};
6048 res = vmaAllocateMemoryPages(g_hAllocator, &memReq, &allocCreateInfo, allocCount, alloc.data(), allocInfo.data());
Adam Sawickia7d77692018-10-03 16:15:27 +02006049 TEST(res == VK_SUCCESS);
Adam Sawicki2e4d3ef2018-10-03 15:48:17 +02006050 for(uint32_t i = 0; i < allocCount; ++i)
6051 {
Adam Sawickia7d77692018-10-03 16:15:27 +02006052 TEST(alloc[i] != VK_NULL_HANDLE &&
Adam Sawicki2e4d3ef2018-10-03 15:48:17 +02006053 allocInfo[i].pMappedData != nullptr &&
6054 allocInfo[i].deviceMemory == allocInfo[0].deviceMemory &&
6055 allocInfo[i].memoryType == allocInfo[0].memoryType);
6056 }
6057
6058 // Free the allocations.
6059 vmaFreeMemoryPages(g_hAllocator, allocCount, alloc.data());
6060 std::fill(alloc.begin(), alloc.end(), nullptr);
6061 std::fill(allocInfo.begin(), allocInfo.end(), VmaAllocationInfo{});
6062
6063 // Try to make 100 allocations of 100 KB. This call should fail due to not enough memory.
6064 // Also test optional allocationInfo = null.
6065 memReq.size = 100 * 1024;
6066 res = vmaAllocateMemoryPages(g_hAllocator, &memReq, &allocCreateInfo, allocCount, alloc.data(), nullptr);
Adam Sawickia7d77692018-10-03 16:15:27 +02006067 TEST(res != VK_SUCCESS);
6068 TEST(std::find_if(alloc.begin(), alloc.end(), [](VmaAllocation alloc){ return alloc != VK_NULL_HANDLE; }) == alloc.end());
Adam Sawicki2e4d3ef2018-10-03 15:48:17 +02006069
6070 // Make 100 allocations of 4 KB, but with required alignment of 128 KB. This should also fail.
6071 memReq.size = 4 * 1024;
6072 memReq.alignment = 128 * 1024;
6073 res = vmaAllocateMemoryPages(g_hAllocator, &memReq, &allocCreateInfo, allocCount, alloc.data(), allocInfo.data());
Adam Sawickia7d77692018-10-03 16:15:27 +02006074 TEST(res != VK_SUCCESS);
Adam Sawicki2e4d3ef2018-10-03 15:48:17 +02006075
6076 // Make 100 dedicated allocations of 4 KB.
6077 memReq.alignment = 4 * 1024;
6078 memReq.size = 4 * 1024;
6079
6080 VmaAllocationCreateInfo dedicatedAllocCreateInfo = {};
6081 dedicatedAllocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
6082 dedicatedAllocCreateInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT | VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
6083 res = vmaAllocateMemoryPages(g_hAllocator, &memReq, &dedicatedAllocCreateInfo, allocCount, alloc.data(), allocInfo.data());
Adam Sawickia7d77692018-10-03 16:15:27 +02006084 TEST(res == VK_SUCCESS);
Adam Sawicki2e4d3ef2018-10-03 15:48:17 +02006085 for(uint32_t i = 0; i < allocCount; ++i)
6086 {
Adam Sawickia7d77692018-10-03 16:15:27 +02006087 TEST(alloc[i] != VK_NULL_HANDLE &&
Adam Sawicki2e4d3ef2018-10-03 15:48:17 +02006088 allocInfo[i].pMappedData != nullptr &&
6089 allocInfo[i].memoryType == allocInfo[0].memoryType &&
6090 allocInfo[i].offset == 0);
6091 if(i > 0)
6092 {
Adam Sawickia7d77692018-10-03 16:15:27 +02006093 TEST(allocInfo[i].deviceMemory != allocInfo[0].deviceMemory);
Adam Sawicki2e4d3ef2018-10-03 15:48:17 +02006094 }
6095 }
6096
6097 // Free the allocations.
6098 vmaFreeMemoryPages(g_hAllocator, allocCount, alloc.data());
6099 std::fill(alloc.begin(), alloc.end(), nullptr);
6100 std::fill(allocInfo.begin(), allocInfo.end(), VmaAllocationInfo{});
6101
6102 vmaDestroyPool(g_hAllocator, pool);
6103}
6104
Adam Sawickif2975342018-10-16 13:49:02 +02006105// Test the testing environment.
6106static void TestGpuData()
6107{
6108 RandomNumberGenerator rand = { 53434 };
6109
6110 std::vector<AllocInfo> allocInfo;
6111
6112 for(size_t i = 0; i < 100; ++i)
6113 {
6114 AllocInfo info = {};
6115
6116 info.m_BufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
6117 info.m_BufferInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT |
6118 VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
6119 VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
6120 info.m_BufferInfo.size = 1024 * 1024 * (rand.Generate() % 9 + 1);
6121
6122 VmaAllocationCreateInfo allocCreateInfo = {};
6123 allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
6124
6125 VkResult res = vmaCreateBuffer(g_hAllocator, &info.m_BufferInfo, &allocCreateInfo, &info.m_Buffer, &info.m_Allocation, nullptr);
6126 TEST(res == VK_SUCCESS);
6127
6128 info.m_StartValue = rand.Generate();
6129
6130 allocInfo.push_back(std::move(info));
6131 }
6132
6133 UploadGpuData(allocInfo.data(), allocInfo.size());
6134
6135 ValidateGpuData(allocInfo.data(), allocInfo.size());
6136
6137 DestroyAllAllocations(allocInfo);
6138}
6139
Adam Sawickib8333fb2018-03-13 16:15:53 +01006140void Test()
6141{
6142 wprintf(L"TESTING:\n");
6143
Adam Sawicki48b8a332019-11-02 15:24:33 +01006144 if(false)
Adam Sawicki70a683e2018-08-24 15:36:32 +02006145 {
Adam Sawicki1a8424f2018-12-13 11:01:16 +01006146 ////////////////////////////////////////////////////////////////////////////////
6147 // Temporarily insert custom tests here:
Adam Sawicki70a683e2018-08-24 15:36:32 +02006148 return;
6149 }
6150
Adam Sawickib8333fb2018-03-13 16:15:53 +01006151 // # Simple tests
6152
6153 TestBasics();
Adam Sawickif2975342018-10-16 13:49:02 +02006154 //TestGpuData(); // Not calling this because it's just testing the testing environment.
Adam Sawicki212a4a62018-06-14 15:44:45 +02006155#if VMA_DEBUG_MARGIN
6156 TestDebugMargin();
6157#else
6158 TestPool_SameSize();
Adam Sawickiddcbf8c2019-11-22 15:22:42 +01006159 TestPool_MinBlockCount();
Adam Sawicki212a4a62018-06-14 15:44:45 +02006160 TestHeapSizeLimit();
6161#endif
Adam Sawickie44c6262018-06-15 14:30:39 +02006162#if VMA_DEBUG_INITIALIZE_ALLOCATIONS
6163 TestAllocationsInitialization();
6164#endif
Adam Sawickiefa88c42019-11-18 16:33:56 +01006165 TestMemoryUsage();
Adam Sawicki40ffe982019-10-11 15:56:02 +02006166 TestBudget();
Adam Sawickib8333fb2018-03-13 16:15:53 +01006167 TestMapping();
Adam Sawickidaa6a552019-06-25 15:26:37 +02006168 TestDeviceLocalMapped();
Adam Sawickib8333fb2018-03-13 16:15:53 +01006169 TestMappingMultithreaded();
Adam Sawicki0876c0d2018-06-20 15:18:11 +02006170 TestLinearAllocator();
Adam Sawicki8cfe05f2018-08-22 16:48:17 +02006171 ManuallyTestLinearAllocator();
Adam Sawicki70a683e2018-08-24 15:36:32 +02006172 TestLinearAllocatorMultiBlock();
Adam Sawicki33d2ce72018-08-27 13:59:13 +02006173
Adam Sawicki4338f662018-09-07 14:12:37 +02006174 BasicTestBuddyAllocator();
Adam Sawicki2e4d3ef2018-10-03 15:48:17 +02006175 BasicTestAllocatePages();
Adam Sawicki4338f662018-09-07 14:12:37 +02006176
Adam Sawicki33d2ce72018-08-27 13:59:13 +02006177 {
6178 FILE* file;
Adam Sawickic6432d12018-09-21 16:44:16 +02006179 fopen_s(&file, "Algorithms.csv", "w");
Adam Sawicki33d2ce72018-08-27 13:59:13 +02006180 assert(file != NULL);
Adam Sawicki80927152018-09-07 17:27:23 +02006181 BenchmarkAlgorithms(file);
Adam Sawicki33d2ce72018-08-27 13:59:13 +02006182 fclose(file);
6183 }
6184
Adam Sawickib8333fb2018-03-13 16:15:53 +01006185 TestDefragmentationSimple();
6186 TestDefragmentationFull();
Adam Sawicki52076eb2018-11-22 16:14:50 +01006187 TestDefragmentationWholePool();
Adam Sawicki9a4f5082018-11-23 17:26:05 +01006188 TestDefragmentationGpu();
Adam Sawickia52012d2019-12-23 15:28:51 +01006189 TestDefragmentationIncrementalBasic();
6190 TestDefragmentationIncrementalComplex();
Adam Sawickib8333fb2018-03-13 16:15:53 +01006191
6192 // # Detailed tests
6193 FILE* file;
6194 fopen_s(&file, "Results.csv", "w");
6195 assert(file != NULL);
6196
6197 WriteMainTestResultHeader(file);
6198 PerformMainTests(file);
6199 //PerformCustomMainTest(file);
6200
6201 WritePoolTestResultHeader(file);
6202 PerformPoolTests(file);
6203 //PerformCustomPoolTest(file);
6204
6205 fclose(file);
Adam Sawicki4ac8ff82019-11-18 14:47:33 +01006206
Adam Sawickib8333fb2018-03-13 16:15:53 +01006207 wprintf(L"Done.\n");
6208}
6209
Adam Sawickif1a793c2018-03-13 15:42:22 +01006210#endif // #ifdef _WIN32