blob: 3942f33bc375b87487818c02fd04194921155bf3 [file] [log] [blame]
Adam Sawickif1a793c2018-03-13 15:42:22 +01001#include "Tests.h"
2#include "VmaUsage.h"
3#include "Common.h"
Adam Sawickib8333fb2018-03-13 16:15:53 +01004#include <atomic>
5#include <thread>
6#include <mutex>
Adam Sawickif1a793c2018-03-13 15:42:22 +01007
8#ifdef _WIN32
9
Adam Sawickib8333fb2018-03-13 16:15:53 +010010enum class FREE_ORDER { FORWARD, BACKWARD, RANDOM, COUNT };
11
12struct AllocationSize
13{
14 uint32_t Probability;
15 VkDeviceSize BufferSizeMin, BufferSizeMax;
16 uint32_t ImageSizeMin, ImageSizeMax;
17};
18
19struct Config
20{
21 uint32_t RandSeed;
22 VkDeviceSize BeginBytesToAllocate;
23 uint32_t AdditionalOperationCount;
24 VkDeviceSize MaxBytesToAllocate;
25 uint32_t MemUsageProbability[4]; // For VMA_MEMORY_USAGE_*
26 std::vector<AllocationSize> AllocationSizes;
27 uint32_t ThreadCount;
28 uint32_t ThreadsUsingCommonAllocationsProbabilityPercent;
29 FREE_ORDER FreeOrder;
30};
31
32struct Result
33{
34 duration TotalTime;
35 duration AllocationTimeMin, AllocationTimeAvg, AllocationTimeMax;
36 duration DeallocationTimeMin, DeallocationTimeAvg, DeallocationTimeMax;
37 VkDeviceSize TotalMemoryAllocated;
38 VkDeviceSize FreeRangeSizeAvg, FreeRangeSizeMax;
39};
40
41void TestDefragmentationSimple();
42void TestDefragmentationFull();
43
44struct PoolTestConfig
45{
46 uint32_t RandSeed;
47 uint32_t ThreadCount;
48 VkDeviceSize PoolSize;
49 uint32_t FrameCount;
50 uint32_t TotalItemCount;
51 // Range for number of items used in each frame.
52 uint32_t UsedItemCountMin, UsedItemCountMax;
53 // Percent of items to make unused, and possibly make some others used in each frame.
54 uint32_t ItemsToMakeUnusedPercent;
55 std::vector<AllocationSize> AllocationSizes;
56
57 VkDeviceSize CalcAvgResourceSize() const
58 {
59 uint32_t probabilitySum = 0;
60 VkDeviceSize sizeSum = 0;
61 for(size_t i = 0; i < AllocationSizes.size(); ++i)
62 {
63 const AllocationSize& allocSize = AllocationSizes[i];
64 if(allocSize.BufferSizeMax > 0)
65 sizeSum += (allocSize.BufferSizeMin + allocSize.BufferSizeMax) / 2 * allocSize.Probability;
66 else
67 {
68 const VkDeviceSize avgDimension = (allocSize.ImageSizeMin + allocSize.ImageSizeMax) / 2;
69 sizeSum += avgDimension * avgDimension * 4 * allocSize.Probability;
70 }
71 probabilitySum += allocSize.Probability;
72 }
73 return sizeSum / probabilitySum;
74 }
75
76 bool UsesBuffers() const
77 {
78 for(size_t i = 0; i < AllocationSizes.size(); ++i)
79 if(AllocationSizes[i].BufferSizeMax > 0)
80 return true;
81 return false;
82 }
83
84 bool UsesImages() const
85 {
86 for(size_t i = 0; i < AllocationSizes.size(); ++i)
87 if(AllocationSizes[i].ImageSizeMax > 0)
88 return true;
89 return false;
90 }
91};
92
93struct PoolTestResult
94{
95 duration TotalTime;
96 duration AllocationTimeMin, AllocationTimeAvg, AllocationTimeMax;
97 duration DeallocationTimeMin, DeallocationTimeAvg, DeallocationTimeMax;
98 size_t LostAllocationCount, LostAllocationTotalSize;
99 size_t FailedAllocationCount, FailedAllocationTotalSize;
100};
101
102static const uint32_t IMAGE_BYTES_PER_PIXEL = 1;
103
104struct BufferInfo
105{
106 VkBuffer Buffer = VK_NULL_HANDLE;
107 VmaAllocation Allocation = VK_NULL_HANDLE;
108};
109
110static void InitResult(Result& outResult)
111{
112 outResult.TotalTime = duration::zero();
113 outResult.AllocationTimeMin = duration::max();
114 outResult.AllocationTimeAvg = duration::zero();
115 outResult.AllocationTimeMax = duration::min();
116 outResult.DeallocationTimeMin = duration::max();
117 outResult.DeallocationTimeAvg = duration::zero();
118 outResult.DeallocationTimeMax = duration::min();
119 outResult.TotalMemoryAllocated = 0;
120 outResult.FreeRangeSizeAvg = 0;
121 outResult.FreeRangeSizeMax = 0;
122}
123
124class TimeRegisterObj
125{
126public:
127 TimeRegisterObj(duration& min, duration& sum, duration& max) :
128 m_Min(min),
129 m_Sum(sum),
130 m_Max(max),
131 m_TimeBeg(std::chrono::high_resolution_clock::now())
132 {
133 }
134
135 ~TimeRegisterObj()
136 {
137 duration d = std::chrono::high_resolution_clock::now() - m_TimeBeg;
138 m_Sum += d;
139 if(d < m_Min) m_Min = d;
140 if(d > m_Max) m_Max = d;
141 }
142
143private:
144 duration& m_Min;
145 duration& m_Sum;
146 duration& m_Max;
147 time_point m_TimeBeg;
148};
149
150struct PoolTestThreadResult
151{
152 duration AllocationTimeMin, AllocationTimeSum, AllocationTimeMax;
153 duration DeallocationTimeMin, DeallocationTimeSum, DeallocationTimeMax;
154 size_t AllocationCount, DeallocationCount;
155 size_t LostAllocationCount, LostAllocationTotalSize;
156 size_t FailedAllocationCount, FailedAllocationTotalSize;
157};
158
159class AllocationTimeRegisterObj : public TimeRegisterObj
160{
161public:
162 AllocationTimeRegisterObj(Result& result) :
163 TimeRegisterObj(result.AllocationTimeMin, result.AllocationTimeAvg, result.AllocationTimeMax)
164 {
165 }
166};
167
168class DeallocationTimeRegisterObj : public TimeRegisterObj
169{
170public:
171 DeallocationTimeRegisterObj(Result& result) :
172 TimeRegisterObj(result.DeallocationTimeMin, result.DeallocationTimeAvg, result.DeallocationTimeMax)
173 {
174 }
175};
176
177class PoolAllocationTimeRegisterObj : public TimeRegisterObj
178{
179public:
180 PoolAllocationTimeRegisterObj(PoolTestThreadResult& result) :
181 TimeRegisterObj(result.AllocationTimeMin, result.AllocationTimeSum, result.AllocationTimeMax)
182 {
183 }
184};
185
186class PoolDeallocationTimeRegisterObj : public TimeRegisterObj
187{
188public:
189 PoolDeallocationTimeRegisterObj(PoolTestThreadResult& result) :
190 TimeRegisterObj(result.DeallocationTimeMin, result.DeallocationTimeSum, result.DeallocationTimeMax)
191 {
192 }
193};
194
195VkResult MainTest(Result& outResult, const Config& config)
196{
197 assert(config.ThreadCount > 0);
198
199 InitResult(outResult);
200
201 RandomNumberGenerator mainRand{config.RandSeed};
202
203 time_point timeBeg = std::chrono::high_resolution_clock::now();
204
205 std::atomic<size_t> allocationCount = 0;
206 VkResult res = VK_SUCCESS;
207
208 uint32_t memUsageProbabilitySum =
209 config.MemUsageProbability[0] + config.MemUsageProbability[1] +
210 config.MemUsageProbability[2] + config.MemUsageProbability[3];
211 assert(memUsageProbabilitySum > 0);
212
213 uint32_t allocationSizeProbabilitySum = std::accumulate(
214 config.AllocationSizes.begin(),
215 config.AllocationSizes.end(),
216 0u,
217 [](uint32_t sum, const AllocationSize& allocSize) {
218 return sum + allocSize.Probability;
219 });
220
221 struct Allocation
222 {
223 VkBuffer Buffer;
224 VkImage Image;
225 VmaAllocation Alloc;
226 };
227
228 std::vector<Allocation> commonAllocations;
229 std::mutex commonAllocationsMutex;
230
231 auto Allocate = [&](
232 VkDeviceSize bufferSize,
233 const VkExtent2D imageExtent,
234 RandomNumberGenerator& localRand,
235 VkDeviceSize& totalAllocatedBytes,
236 std::vector<Allocation>& allocations) -> VkResult
237 {
238 assert((bufferSize == 0) != (imageExtent.width == 0 && imageExtent.height == 0));
239
240 uint32_t memUsageIndex = 0;
241 uint32_t memUsageRand = localRand.Generate() % memUsageProbabilitySum;
242 while(memUsageRand >= config.MemUsageProbability[memUsageIndex])
243 memUsageRand -= config.MemUsageProbability[memUsageIndex++];
244
245 VmaAllocationCreateInfo memReq = {};
246 memReq.usage = (VmaMemoryUsage)(VMA_MEMORY_USAGE_GPU_ONLY + memUsageIndex);
247
248 Allocation allocation = {};
249 VmaAllocationInfo allocationInfo;
250
251 // Buffer
252 if(bufferSize > 0)
253 {
254 assert(imageExtent.width == 0);
255 VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
256 bufferInfo.size = bufferSize;
257 bufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
258
259 {
260 AllocationTimeRegisterObj timeRegisterObj{outResult};
261 res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &memReq, &allocation.Buffer, &allocation.Alloc, &allocationInfo);
262 }
263 }
264 // Image
265 else
266 {
267 VkImageCreateInfo imageInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
268 imageInfo.imageType = VK_IMAGE_TYPE_2D;
269 imageInfo.extent.width = imageExtent.width;
270 imageInfo.extent.height = imageExtent.height;
271 imageInfo.extent.depth = 1;
272 imageInfo.mipLevels = 1;
273 imageInfo.arrayLayers = 1;
274 imageInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
275 imageInfo.tiling = memReq.usage == VMA_MEMORY_USAGE_GPU_ONLY ?
276 VK_IMAGE_TILING_OPTIMAL :
277 VK_IMAGE_TILING_LINEAR;
278 imageInfo.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED;
279 switch(memReq.usage)
280 {
281 case VMA_MEMORY_USAGE_GPU_ONLY:
282 switch(localRand.Generate() % 3)
283 {
284 case 0:
285 imageInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
286 break;
287 case 1:
288 imageInfo.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
289 break;
290 case 2:
291 imageInfo.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
292 break;
293 }
294 break;
295 case VMA_MEMORY_USAGE_CPU_ONLY:
296 case VMA_MEMORY_USAGE_CPU_TO_GPU:
297 imageInfo.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
298 break;
299 case VMA_MEMORY_USAGE_GPU_TO_CPU:
300 imageInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT;
301 break;
302 }
303 imageInfo.samples = VK_SAMPLE_COUNT_1_BIT;
304 imageInfo.flags = 0;
305
306 {
307 AllocationTimeRegisterObj timeRegisterObj{outResult};
308 res = vmaCreateImage(g_hAllocator, &imageInfo, &memReq, &allocation.Image, &allocation.Alloc, &allocationInfo);
309 }
310 }
311
312 if(res == VK_SUCCESS)
313 {
314 ++allocationCount;
315 totalAllocatedBytes += allocationInfo.size;
316 bool useCommonAllocations = localRand.Generate() % 100 < config.ThreadsUsingCommonAllocationsProbabilityPercent;
317 if(useCommonAllocations)
318 {
319 std::unique_lock<std::mutex> lock(commonAllocationsMutex);
320 commonAllocations.push_back(allocation);
321 }
322 else
323 allocations.push_back(allocation);
324 }
325 else
326 {
327 assert(0);
328 }
329 return res;
330 };
331
332 auto GetNextAllocationSize = [&](
333 VkDeviceSize& outBufSize,
334 VkExtent2D& outImageSize,
335 RandomNumberGenerator& localRand)
336 {
337 outBufSize = 0;
338 outImageSize = {0, 0};
339
340 uint32_t allocSizeIndex = 0;
341 uint32_t r = localRand.Generate() % allocationSizeProbabilitySum;
342 while(r >= config.AllocationSizes[allocSizeIndex].Probability)
343 r -= config.AllocationSizes[allocSizeIndex++].Probability;
344
345 const AllocationSize& allocSize = config.AllocationSizes[allocSizeIndex];
346 if(allocSize.BufferSizeMax > 0)
347 {
348 assert(allocSize.ImageSizeMax == 0);
349 if(allocSize.BufferSizeMax == allocSize.BufferSizeMin)
350 outBufSize = allocSize.BufferSizeMin;
351 else
352 {
353 outBufSize = allocSize.BufferSizeMin + localRand.Generate() % (allocSize.BufferSizeMax - allocSize.BufferSizeMin);
354 outBufSize = outBufSize / 16 * 16;
355 }
356 }
357 else
358 {
359 if(allocSize.ImageSizeMax == allocSize.ImageSizeMin)
360 outImageSize.width = outImageSize.height = allocSize.ImageSizeMax;
361 else
362 {
363 outImageSize.width = allocSize.ImageSizeMin + localRand.Generate() % (allocSize.ImageSizeMax - allocSize.ImageSizeMin);
364 outImageSize.height = allocSize.ImageSizeMin + localRand.Generate() % (allocSize.ImageSizeMax - allocSize.ImageSizeMin);
365 }
366 }
367 };
368
369 std::atomic<uint32_t> numThreadsReachedMaxAllocations = 0;
370 HANDLE threadsFinishEvent = CreateEvent(NULL, TRUE, FALSE, NULL);
371
372 auto ThreadProc = [&](uint32_t randSeed) -> void
373 {
374 RandomNumberGenerator threadRand(randSeed);
375 VkDeviceSize threadTotalAllocatedBytes = 0;
376 std::vector<Allocation> threadAllocations;
377 VkDeviceSize threadBeginBytesToAllocate = config.BeginBytesToAllocate / config.ThreadCount;
378 VkDeviceSize threadMaxBytesToAllocate = config.MaxBytesToAllocate / config.ThreadCount;
379 uint32_t threadAdditionalOperationCount = config.AdditionalOperationCount / config.ThreadCount;
380
381 // BEGIN ALLOCATIONS
382 for(;;)
383 {
384 VkDeviceSize bufferSize = 0;
385 VkExtent2D imageExtent = {};
386 GetNextAllocationSize(bufferSize, imageExtent, threadRand);
387 if(threadTotalAllocatedBytes + bufferSize + imageExtent.width * imageExtent.height * IMAGE_BYTES_PER_PIXEL <
388 threadBeginBytesToAllocate)
389 {
390 if(Allocate(bufferSize, imageExtent, threadRand, threadTotalAllocatedBytes, threadAllocations) != VK_SUCCESS)
391 break;
392 }
393 else
394 break;
395 }
396
397 // ADDITIONAL ALLOCATIONS AND FREES
398 for(size_t i = 0; i < threadAdditionalOperationCount; ++i)
399 {
400 VkDeviceSize bufferSize = 0;
401 VkExtent2D imageExtent = {};
402 GetNextAllocationSize(bufferSize, imageExtent, threadRand);
403
404 // true = allocate, false = free
405 bool allocate = threadRand.Generate() % 2 != 0;
406
407 if(allocate)
408 {
409 if(threadTotalAllocatedBytes +
410 bufferSize +
411 imageExtent.width * imageExtent.height * IMAGE_BYTES_PER_PIXEL <
412 threadMaxBytesToAllocate)
413 {
414 if(Allocate(bufferSize, imageExtent, threadRand, threadTotalAllocatedBytes, threadAllocations) != VK_SUCCESS)
415 break;
416 }
417 }
418 else
419 {
420 bool useCommonAllocations = threadRand.Generate() % 100 < config.ThreadsUsingCommonAllocationsProbabilityPercent;
421 if(useCommonAllocations)
422 {
423 std::unique_lock<std::mutex> lock(commonAllocationsMutex);
424 if(!commonAllocations.empty())
425 {
426 size_t indexToFree = threadRand.Generate() % commonAllocations.size();
427 VmaAllocationInfo allocationInfo;
428 vmaGetAllocationInfo(g_hAllocator, commonAllocations[indexToFree].Alloc, &allocationInfo);
429 if(threadTotalAllocatedBytes >= allocationInfo.size)
430 {
431 DeallocationTimeRegisterObj timeRegisterObj{outResult};
432 if(commonAllocations[indexToFree].Buffer != VK_NULL_HANDLE)
433 vmaDestroyBuffer(g_hAllocator, commonAllocations[indexToFree].Buffer, commonAllocations[indexToFree].Alloc);
434 else
435 vmaDestroyImage(g_hAllocator, commonAllocations[indexToFree].Image, commonAllocations[indexToFree].Alloc);
436 threadTotalAllocatedBytes -= allocationInfo.size;
437 commonAllocations.erase(commonAllocations.begin() + indexToFree);
438 }
439 }
440 }
441 else
442 {
443 if(!threadAllocations.empty())
444 {
445 size_t indexToFree = threadRand.Generate() % threadAllocations.size();
446 VmaAllocationInfo allocationInfo;
447 vmaGetAllocationInfo(g_hAllocator, threadAllocations[indexToFree].Alloc, &allocationInfo);
448 if(threadTotalAllocatedBytes >= allocationInfo.size)
449 {
450 DeallocationTimeRegisterObj timeRegisterObj{outResult};
451 if(threadAllocations[indexToFree].Buffer != VK_NULL_HANDLE)
452 vmaDestroyBuffer(g_hAllocator, threadAllocations[indexToFree].Buffer, threadAllocations[indexToFree].Alloc);
453 else
454 vmaDestroyImage(g_hAllocator, threadAllocations[indexToFree].Image, threadAllocations[indexToFree].Alloc);
455 threadTotalAllocatedBytes -= allocationInfo.size;
456 threadAllocations.erase(threadAllocations.begin() + indexToFree);
457 }
458 }
459 }
460 }
461 }
462
463 ++numThreadsReachedMaxAllocations;
464
465 WaitForSingleObject(threadsFinishEvent, INFINITE);
466
467 // DEALLOCATION
468 while(!threadAllocations.empty())
469 {
470 size_t indexToFree = 0;
471 switch(config.FreeOrder)
472 {
473 case FREE_ORDER::FORWARD:
474 indexToFree = 0;
475 break;
476 case FREE_ORDER::BACKWARD:
477 indexToFree = threadAllocations.size() - 1;
478 break;
479 case FREE_ORDER::RANDOM:
480 indexToFree = mainRand.Generate() % threadAllocations.size();
481 break;
482 }
483
484 {
485 DeallocationTimeRegisterObj timeRegisterObj{outResult};
486 if(threadAllocations[indexToFree].Buffer != VK_NULL_HANDLE)
487 vmaDestroyBuffer(g_hAllocator, threadAllocations[indexToFree].Buffer, threadAllocations[indexToFree].Alloc);
488 else
489 vmaDestroyImage(g_hAllocator, threadAllocations[indexToFree].Image, threadAllocations[indexToFree].Alloc);
490 }
491 threadAllocations.erase(threadAllocations.begin() + indexToFree);
492 }
493 };
494
495 uint32_t threadRandSeed = mainRand.Generate();
496 std::vector<std::thread> bkgThreads;
497 for(size_t i = 0; i < config.ThreadCount; ++i)
498 {
499 bkgThreads.emplace_back(std::bind(ThreadProc, threadRandSeed + (uint32_t)i));
500 }
501
502 // Wait for threads reached max allocations
503 while(numThreadsReachedMaxAllocations < config.ThreadCount)
504 Sleep(0);
505
506 // CALCULATE MEMORY STATISTICS ON FINAL USAGE
507 VmaStats vmaStats = {};
508 vmaCalculateStats(g_hAllocator, &vmaStats);
509 outResult.TotalMemoryAllocated = vmaStats.total.usedBytes + vmaStats.total.unusedBytes;
510 outResult.FreeRangeSizeMax = vmaStats.total.unusedRangeSizeMax;
511 outResult.FreeRangeSizeAvg = vmaStats.total.unusedRangeSizeAvg;
512
513 // Signal threads to deallocate
514 SetEvent(threadsFinishEvent);
515
516 // Wait for threads finished
517 for(size_t i = 0; i < bkgThreads.size(); ++i)
518 bkgThreads[i].join();
519 bkgThreads.clear();
520
521 CloseHandle(threadsFinishEvent);
522
523 // Deallocate remaining common resources
524 while(!commonAllocations.empty())
525 {
526 size_t indexToFree = 0;
527 switch(config.FreeOrder)
528 {
529 case FREE_ORDER::FORWARD:
530 indexToFree = 0;
531 break;
532 case FREE_ORDER::BACKWARD:
533 indexToFree = commonAllocations.size() - 1;
534 break;
535 case FREE_ORDER::RANDOM:
536 indexToFree = mainRand.Generate() % commonAllocations.size();
537 break;
538 }
539
540 {
541 DeallocationTimeRegisterObj timeRegisterObj{outResult};
542 if(commonAllocations[indexToFree].Buffer != VK_NULL_HANDLE)
543 vmaDestroyBuffer(g_hAllocator, commonAllocations[indexToFree].Buffer, commonAllocations[indexToFree].Alloc);
544 else
545 vmaDestroyImage(g_hAllocator, commonAllocations[indexToFree].Image, commonAllocations[indexToFree].Alloc);
546 }
547 commonAllocations.erase(commonAllocations.begin() + indexToFree);
548 }
549
550 if(allocationCount)
551 {
552 outResult.AllocationTimeAvg /= allocationCount;
553 outResult.DeallocationTimeAvg /= allocationCount;
554 }
555
556 outResult.TotalTime = std::chrono::high_resolution_clock::now() - timeBeg;
557
558 return res;
559}
560
Adam Sawickie44c6262018-06-15 14:30:39 +0200561static void SaveAllocatorStatsToFile(const wchar_t* filePath)
Adam Sawickib8333fb2018-03-13 16:15:53 +0100562{
563 char* stats;
Adam Sawickie44c6262018-06-15 14:30:39 +0200564 vmaBuildStatsString(g_hAllocator, &stats, VK_TRUE);
Adam Sawickib8333fb2018-03-13 16:15:53 +0100565 SaveFile(filePath, stats, strlen(stats));
Adam Sawickie44c6262018-06-15 14:30:39 +0200566 vmaFreeStatsString(g_hAllocator, stats);
Adam Sawickib8333fb2018-03-13 16:15:53 +0100567}
568
569struct AllocInfo
570{
571 VmaAllocation m_Allocation;
572 VkBuffer m_Buffer;
573 VkImage m_Image;
574 uint32_t m_StartValue;
575 union
576 {
577 VkBufferCreateInfo m_BufferInfo;
578 VkImageCreateInfo m_ImageInfo;
579 };
580};
581
582static void GetMemReq(VmaAllocationCreateInfo& outMemReq)
583{
584 outMemReq = {};
585 outMemReq.usage = VMA_MEMORY_USAGE_CPU_TO_GPU;
586 //outMemReq.flags = VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT;
587}
588
589static void CreateBuffer(
590 VmaPool pool,
591 const VkBufferCreateInfo& bufCreateInfo,
592 bool persistentlyMapped,
593 AllocInfo& outAllocInfo)
594{
595 outAllocInfo = {};
596 outAllocInfo.m_BufferInfo = bufCreateInfo;
597
598 VmaAllocationCreateInfo allocCreateInfo = {};
599 allocCreateInfo.pool = pool;
600 if(persistentlyMapped)
601 allocCreateInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT;
602
603 VmaAllocationInfo vmaAllocInfo = {};
604 ERR_GUARD_VULKAN( vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &outAllocInfo.m_Buffer, &outAllocInfo.m_Allocation, &vmaAllocInfo) );
605
606 // Setup StartValue and fill.
607 {
608 outAllocInfo.m_StartValue = (uint32_t)rand();
609 uint32_t* data = (uint32_t*)vmaAllocInfo.pMappedData;
610 assert((data != nullptr) == persistentlyMapped);
611 if(!persistentlyMapped)
612 {
613 ERR_GUARD_VULKAN( vmaMapMemory(g_hAllocator, outAllocInfo.m_Allocation, (void**)&data) );
614 }
615
616 uint32_t value = outAllocInfo.m_StartValue;
617 assert(bufCreateInfo.size % 4 == 0);
618 for(size_t i = 0; i < bufCreateInfo.size / sizeof(uint32_t); ++i)
619 data[i] = value++;
620
621 if(!persistentlyMapped)
622 vmaUnmapMemory(g_hAllocator, outAllocInfo.m_Allocation);
623 }
624}
625
626static void CreateAllocation(AllocInfo& outAllocation, VmaAllocator allocator)
627{
628 outAllocation.m_Allocation = nullptr;
629 outAllocation.m_Buffer = nullptr;
630 outAllocation.m_Image = nullptr;
631 outAllocation.m_StartValue = (uint32_t)rand();
632
633 VmaAllocationCreateInfo vmaMemReq;
634 GetMemReq(vmaMemReq);
635
636 VmaAllocationInfo allocInfo;
637
638 const bool isBuffer = true;//(rand() & 0x1) != 0;
639 const bool isLarge = (rand() % 16) == 0;
640 if(isBuffer)
641 {
642 const uint32_t bufferSize = isLarge ?
643 (rand() % 10 + 1) * (1024 * 1024) : // 1 MB ... 10 MB
644 (rand() % 1024 + 1) * 1024; // 1 KB ... 1 MB
645
646 VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
647 bufferInfo.size = bufferSize;
648 bufferInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
649
650 VkResult res = vmaCreateBuffer(allocator, &bufferInfo, &vmaMemReq, &outAllocation.m_Buffer, &outAllocation.m_Allocation, &allocInfo);
651 outAllocation.m_BufferInfo = bufferInfo;
652 assert(res == VK_SUCCESS);
653 }
654 else
655 {
656 const uint32_t imageSizeX = isLarge ?
657 1024 + rand() % (4096 - 1024) : // 1024 ... 4096
658 rand() % 1024 + 1; // 1 ... 1024
659 const uint32_t imageSizeY = isLarge ?
660 1024 + rand() % (4096 - 1024) : // 1024 ... 4096
661 rand() % 1024 + 1; // 1 ... 1024
662
663 VkImageCreateInfo imageInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
664 imageInfo.imageType = VK_IMAGE_TYPE_2D;
665 imageInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
666 imageInfo.extent.width = imageSizeX;
667 imageInfo.extent.height = imageSizeY;
668 imageInfo.extent.depth = 1;
669 imageInfo.mipLevels = 1;
670 imageInfo.arrayLayers = 1;
671 imageInfo.samples = VK_SAMPLE_COUNT_1_BIT;
672 imageInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
673 imageInfo.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED;
674 imageInfo.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
675
676 VkResult res = vmaCreateImage(allocator, &imageInfo, &vmaMemReq, &outAllocation.m_Image, &outAllocation.m_Allocation, &allocInfo);
677 outAllocation.m_ImageInfo = imageInfo;
678 assert(res == VK_SUCCESS);
679 }
680
681 uint32_t* data = (uint32_t*)allocInfo.pMappedData;
682 if(allocInfo.pMappedData == nullptr)
683 {
684 VkResult res = vmaMapMemory(allocator, outAllocation.m_Allocation, (void**)&data);
685 assert(res == VK_SUCCESS);
686 }
687
688 uint32_t value = outAllocation.m_StartValue;
689 assert(allocInfo.size % 4 == 0);
690 for(size_t i = 0; i < allocInfo.size / sizeof(uint32_t); ++i)
691 data[i] = value++;
692
693 if(allocInfo.pMappedData == nullptr)
694 vmaUnmapMemory(allocator, outAllocation.m_Allocation);
695}
696
697static void DestroyAllocation(const AllocInfo& allocation)
698{
699 if(allocation.m_Buffer)
700 vmaDestroyBuffer(g_hAllocator, allocation.m_Buffer, allocation.m_Allocation);
701 else
702 vmaDestroyImage(g_hAllocator, allocation.m_Image, allocation.m_Allocation);
703}
704
705static void DestroyAllAllocations(std::vector<AllocInfo>& allocations)
706{
707 for(size_t i = allocations.size(); i--; )
708 DestroyAllocation(allocations[i]);
709 allocations.clear();
710}
711
712static void ValidateAllocationData(const AllocInfo& allocation)
713{
714 VmaAllocationInfo allocInfo;
715 vmaGetAllocationInfo(g_hAllocator, allocation.m_Allocation, &allocInfo);
716
717 uint32_t* data = (uint32_t*)allocInfo.pMappedData;
718 if(allocInfo.pMappedData == nullptr)
719 {
720 VkResult res = vmaMapMemory(g_hAllocator, allocation.m_Allocation, (void**)&data);
721 assert(res == VK_SUCCESS);
722 }
723
724 uint32_t value = allocation.m_StartValue;
725 bool ok = true;
726 size_t i;
727 assert(allocInfo.size % 4 == 0);
728 for(i = 0; i < allocInfo.size / sizeof(uint32_t); ++i)
729 {
730 if(data[i] != value++)
731 {
732 ok = false;
733 break;
734 }
735 }
736 assert(ok);
737
738 if(allocInfo.pMappedData == nullptr)
739 vmaUnmapMemory(g_hAllocator, allocation.m_Allocation);
740}
741
742static void RecreateAllocationResource(AllocInfo& allocation)
743{
744 VmaAllocationInfo allocInfo;
745 vmaGetAllocationInfo(g_hAllocator, allocation.m_Allocation, &allocInfo);
746
747 if(allocation.m_Buffer)
748 {
749 vkDestroyBuffer(g_hDevice, allocation.m_Buffer, nullptr);
750
751 VkResult res = vkCreateBuffer(g_hDevice, &allocation.m_BufferInfo, nullptr, &allocation.m_Buffer);
752 assert(res == VK_SUCCESS);
753
754 // Just to silence validation layer warnings.
755 VkMemoryRequirements vkMemReq;
756 vkGetBufferMemoryRequirements(g_hDevice, allocation.m_Buffer, &vkMemReq);
757 assert(vkMemReq.size == allocation.m_BufferInfo.size);
758
759 res = vkBindBufferMemory(g_hDevice, allocation.m_Buffer, allocInfo.deviceMemory, allocInfo.offset);
760 assert(res == VK_SUCCESS);
761 }
762 else
763 {
764 vkDestroyImage(g_hDevice, allocation.m_Image, nullptr);
765
766 VkResult res = vkCreateImage(g_hDevice, &allocation.m_ImageInfo, nullptr, &allocation.m_Image);
767 assert(res == VK_SUCCESS);
768
769 // Just to silence validation layer warnings.
770 VkMemoryRequirements vkMemReq;
771 vkGetImageMemoryRequirements(g_hDevice, allocation.m_Image, &vkMemReq);
772
773 res = vkBindImageMemory(g_hDevice, allocation.m_Image, allocInfo.deviceMemory, allocInfo.offset);
774 assert(res == VK_SUCCESS);
775 }
776}
777
778static void Defragment(AllocInfo* allocs, size_t allocCount,
779 const VmaDefragmentationInfo* defragmentationInfo = nullptr,
780 VmaDefragmentationStats* defragmentationStats = nullptr)
781{
782 std::vector<VmaAllocation> vmaAllocs(allocCount);
783 for(size_t i = 0; i < allocCount; ++i)
784 vmaAllocs[i] = allocs[i].m_Allocation;
785
786 std::vector<VkBool32> allocChanged(allocCount);
787
788 ERR_GUARD_VULKAN( vmaDefragment(g_hAllocator, vmaAllocs.data(), allocCount, allocChanged.data(),
789 defragmentationInfo, defragmentationStats) );
790
791 for(size_t i = 0; i < allocCount; ++i)
792 {
793 if(allocChanged[i])
794 {
795 RecreateAllocationResource(allocs[i]);
796 }
797 }
798}
799
800static void ValidateAllocationsData(const AllocInfo* allocs, size_t allocCount)
801{
802 std::for_each(allocs, allocs + allocCount, [](const AllocInfo& allocInfo) {
803 ValidateAllocationData(allocInfo);
804 });
805}
806
807void TestDefragmentationSimple()
808{
809 wprintf(L"Test defragmentation simple\n");
810
811 RandomNumberGenerator rand(667);
812
813 const VkDeviceSize BUF_SIZE = 0x10000;
814 const VkDeviceSize BLOCK_SIZE = BUF_SIZE * 8;
815
816 const VkDeviceSize MIN_BUF_SIZE = 32;
817 const VkDeviceSize MAX_BUF_SIZE = BUF_SIZE * 4;
818 auto RandomBufSize = [&]() -> VkDeviceSize {
819 return align_up<VkDeviceSize>(rand.Generate() % (MAX_BUF_SIZE - MIN_BUF_SIZE + 1) + MIN_BUF_SIZE, 32);
820 };
821
822 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
823 bufCreateInfo.size = BUF_SIZE;
824 bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
825
826 VmaAllocationCreateInfo exampleAllocCreateInfo = {};
827 exampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
828
829 uint32_t memTypeIndex = UINT32_MAX;
830 vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &bufCreateInfo, &exampleAllocCreateInfo, &memTypeIndex);
831
832 VmaPoolCreateInfo poolCreateInfo = {};
833 poolCreateInfo.blockSize = BLOCK_SIZE;
834 poolCreateInfo.memoryTypeIndex = memTypeIndex;
835
836 VmaPool pool;
837 ERR_GUARD_VULKAN( vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool) );
838
839 std::vector<AllocInfo> allocations;
840
841 // persistentlyMappedOption = 0 - not persistently mapped.
842 // persistentlyMappedOption = 1 - persistently mapped.
843 for(uint32_t persistentlyMappedOption = 0; persistentlyMappedOption < 2; ++persistentlyMappedOption)
844 {
845 wprintf(L" Persistently mapped option = %u\n", persistentlyMappedOption);
846 const bool persistentlyMapped = persistentlyMappedOption != 0;
847
848 // # Test 1
849 // Buffers of fixed size.
850 // Fill 2 blocks. Remove odd buffers. Defragment everything.
851 // Expected result: at least 1 block freed.
852 {
853 for(size_t i = 0; i < BLOCK_SIZE / BUF_SIZE * 2; ++i)
854 {
855 AllocInfo allocInfo;
856 CreateBuffer(pool, bufCreateInfo, persistentlyMapped, allocInfo);
857 allocations.push_back(allocInfo);
858 }
859
860 for(size_t i = 1; i < allocations.size(); ++i)
861 {
862 DestroyAllocation(allocations[i]);
863 allocations.erase(allocations.begin() + i);
864 }
865
866 VmaDefragmentationStats defragStats;
867 Defragment(allocations.data(), allocations.size(), nullptr, &defragStats);
868 assert(defragStats.allocationsMoved > 0 && defragStats.bytesMoved > 0);
869 assert(defragStats.deviceMemoryBlocksFreed >= 1);
870
871 ValidateAllocationsData(allocations.data(), allocations.size());
872
873 DestroyAllAllocations(allocations);
874 }
875
876 // # Test 2
877 // Buffers of fixed size.
878 // Fill 2 blocks. Remove odd buffers. Defragment one buffer at time.
879 // Expected result: Each of 4 interations makes some progress.
880 {
881 for(size_t i = 0; i < BLOCK_SIZE / BUF_SIZE * 2; ++i)
882 {
883 AllocInfo allocInfo;
884 CreateBuffer(pool, bufCreateInfo, persistentlyMapped, allocInfo);
885 allocations.push_back(allocInfo);
886 }
887
888 for(size_t i = 1; i < allocations.size(); ++i)
889 {
890 DestroyAllocation(allocations[i]);
891 allocations.erase(allocations.begin() + i);
892 }
893
894 VmaDefragmentationInfo defragInfo = {};
895 defragInfo.maxAllocationsToMove = 1;
896 defragInfo.maxBytesToMove = BUF_SIZE;
897
898 for(size_t i = 0; i < BLOCK_SIZE / BUF_SIZE / 2; ++i)
899 {
900 VmaDefragmentationStats defragStats;
901 Defragment(allocations.data(), allocations.size(), &defragInfo, &defragStats);
902 assert(defragStats.allocationsMoved > 0 && defragStats.bytesMoved > 0);
903 }
904
905 ValidateAllocationsData(allocations.data(), allocations.size());
906
907 DestroyAllAllocations(allocations);
908 }
909
910 // # Test 3
911 // Buffers of variable size.
912 // Create a number of buffers. Remove some percent of them.
913 // Defragment while having some percent of them unmovable.
914 // Expected result: Just simple validation.
915 {
916 for(size_t i = 0; i < 100; ++i)
917 {
918 VkBufferCreateInfo localBufCreateInfo = bufCreateInfo;
919 localBufCreateInfo.size = RandomBufSize();
920
921 AllocInfo allocInfo;
922 CreateBuffer(pool, bufCreateInfo, persistentlyMapped, allocInfo);
923 allocations.push_back(allocInfo);
924 }
925
926 const uint32_t percentToDelete = 60;
927 const size_t numberToDelete = allocations.size() * percentToDelete / 100;
928 for(size_t i = 0; i < numberToDelete; ++i)
929 {
930 size_t indexToDelete = rand.Generate() % (uint32_t)allocations.size();
931 DestroyAllocation(allocations[indexToDelete]);
932 allocations.erase(allocations.begin() + indexToDelete);
933 }
934
935 // Non-movable allocations will be at the beginning of allocations array.
936 const uint32_t percentNonMovable = 20;
937 const size_t numberNonMovable = allocations.size() * percentNonMovable / 100;
938 for(size_t i = 0; i < numberNonMovable; ++i)
939 {
940 size_t indexNonMovable = i + rand.Generate() % (uint32_t)(allocations.size() - i);
941 if(indexNonMovable != i)
942 std::swap(allocations[i], allocations[indexNonMovable]);
943 }
944
945 VmaDefragmentationStats defragStats;
946 Defragment(
947 allocations.data() + numberNonMovable,
948 allocations.size() - numberNonMovable,
949 nullptr, &defragStats);
950
951 ValidateAllocationsData(allocations.data(), allocations.size());
952
953 DestroyAllAllocations(allocations);
954 }
955 }
956
957 vmaDestroyPool(g_hAllocator, pool);
958}
959
960void TestDefragmentationFull()
961{
962 std::vector<AllocInfo> allocations;
963
964 // Create initial allocations.
965 for(size_t i = 0; i < 400; ++i)
966 {
967 AllocInfo allocation;
968 CreateAllocation(allocation, g_hAllocator);
969 allocations.push_back(allocation);
970 }
971
972 // Delete random allocations
973 const size_t allocationsToDeletePercent = 80;
974 size_t allocationsToDelete = allocations.size() * allocationsToDeletePercent / 100;
975 for(size_t i = 0; i < allocationsToDelete; ++i)
976 {
977 size_t index = (size_t)rand() % allocations.size();
978 DestroyAllocation(allocations[index]);
979 allocations.erase(allocations.begin() + index);
980 }
981
982 for(size_t i = 0; i < allocations.size(); ++i)
983 ValidateAllocationData(allocations[i]);
984
Adam Sawickie44c6262018-06-15 14:30:39 +0200985 SaveAllocatorStatsToFile(L"Before.csv");
Adam Sawickib8333fb2018-03-13 16:15:53 +0100986
987 {
988 std::vector<VmaAllocation> vmaAllocations(allocations.size());
989 for(size_t i = 0; i < allocations.size(); ++i)
990 vmaAllocations[i] = allocations[i].m_Allocation;
991
992 const size_t nonMovablePercent = 0;
993 size_t nonMovableCount = vmaAllocations.size() * nonMovablePercent / 100;
994 for(size_t i = 0; i < nonMovableCount; ++i)
995 {
996 size_t index = (size_t)rand() % vmaAllocations.size();
997 vmaAllocations.erase(vmaAllocations.begin() + index);
998 }
999
1000 const uint32_t defragCount = 1;
1001 for(uint32_t defragIndex = 0; defragIndex < defragCount; ++defragIndex)
1002 {
1003 std::vector<VkBool32> allocationsChanged(vmaAllocations.size());
1004
1005 VmaDefragmentationInfo defragmentationInfo;
1006 defragmentationInfo.maxAllocationsToMove = UINT_MAX;
1007 defragmentationInfo.maxBytesToMove = SIZE_MAX;
1008
1009 wprintf(L"Defragmentation #%u\n", defragIndex);
1010
1011 time_point begTime = std::chrono::high_resolution_clock::now();
1012
1013 VmaDefragmentationStats stats;
1014 VkResult res = vmaDefragment(g_hAllocator, vmaAllocations.data(), vmaAllocations.size(), allocationsChanged.data(), &defragmentationInfo, &stats);
1015 assert(res >= 0);
1016
1017 float defragmentDuration = ToFloatSeconds(std::chrono::high_resolution_clock::now() - begTime);
1018
1019 wprintf(L"Moved allocations %u, bytes %llu\n", stats.allocationsMoved, stats.bytesMoved);
1020 wprintf(L"Freed blocks %u, bytes %llu\n", stats.deviceMemoryBlocksFreed, stats.bytesFreed);
1021 wprintf(L"Time: %.2f s\n", defragmentDuration);
1022
1023 for(size_t i = 0; i < vmaAllocations.size(); ++i)
1024 {
1025 if(allocationsChanged[i])
1026 {
1027 RecreateAllocationResource(allocations[i]);
1028 }
1029 }
1030
1031 for(size_t i = 0; i < allocations.size(); ++i)
1032 ValidateAllocationData(allocations[i]);
1033
1034 wchar_t fileName[MAX_PATH];
1035 swprintf(fileName, MAX_PATH, L"After_%02u.csv", defragIndex);
Adam Sawickie44c6262018-06-15 14:30:39 +02001036 SaveAllocatorStatsToFile(fileName);
Adam Sawickib8333fb2018-03-13 16:15:53 +01001037 }
1038 }
1039
1040 // Destroy all remaining allocations.
1041 DestroyAllAllocations(allocations);
1042}
1043
1044static void TestUserData()
1045{
1046 VkResult res;
1047
1048 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
1049 bufCreateInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
1050 bufCreateInfo.size = 0x10000;
1051
1052 for(uint32_t testIndex = 0; testIndex < 2; ++testIndex)
1053 {
1054 // Opaque pointer
1055 {
1056
1057 void* numberAsPointer = (void*)(size_t)0xC2501FF3u;
1058 void* pointerToSomething = &res;
1059
1060 VmaAllocationCreateInfo allocCreateInfo = {};
1061 allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
1062 allocCreateInfo.pUserData = numberAsPointer;
1063 if(testIndex == 1)
1064 allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
1065
1066 VkBuffer buf; VmaAllocation alloc; VmaAllocationInfo allocInfo;
1067 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
1068 assert(res == VK_SUCCESS);
1069 assert(allocInfo.pUserData = numberAsPointer);
1070
1071 vmaGetAllocationInfo(g_hAllocator, alloc, &allocInfo);
1072 assert(allocInfo.pUserData == numberAsPointer);
1073
1074 vmaSetAllocationUserData(g_hAllocator, alloc, pointerToSomething);
1075 vmaGetAllocationInfo(g_hAllocator, alloc, &allocInfo);
1076 assert(allocInfo.pUserData == pointerToSomething);
1077
1078 vmaDestroyBuffer(g_hAllocator, buf, alloc);
1079 }
1080
1081 // String
1082 {
1083 const char* name1 = "Buffer name \\\"\'<>&% \nSecond line .,;=";
1084 const char* name2 = "2";
1085 const size_t name1Len = strlen(name1);
1086
1087 char* name1Buf = new char[name1Len + 1];
1088 strcpy_s(name1Buf, name1Len + 1, name1);
1089
1090 VmaAllocationCreateInfo allocCreateInfo = {};
1091 allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
1092 allocCreateInfo.flags = VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT;
1093 allocCreateInfo.pUserData = name1Buf;
1094 if(testIndex == 1)
1095 allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
1096
1097 VkBuffer buf; VmaAllocation alloc; VmaAllocationInfo allocInfo;
1098 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
1099 assert(res == VK_SUCCESS);
1100 assert(allocInfo.pUserData != nullptr && allocInfo.pUserData != name1Buf);
1101 assert(strcmp(name1, (const char*)allocInfo.pUserData) == 0);
1102
1103 delete[] name1Buf;
1104
1105 vmaGetAllocationInfo(g_hAllocator, alloc, &allocInfo);
1106 assert(strcmp(name1, (const char*)allocInfo.pUserData) == 0);
1107
1108 vmaSetAllocationUserData(g_hAllocator, alloc, (void*)name2);
1109 vmaGetAllocationInfo(g_hAllocator, alloc, &allocInfo);
1110 assert(strcmp(name2, (const char*)allocInfo.pUserData) == 0);
1111
1112 vmaSetAllocationUserData(g_hAllocator, alloc, nullptr);
1113 vmaGetAllocationInfo(g_hAllocator, alloc, &allocInfo);
1114 assert(allocInfo.pUserData == nullptr);
1115
1116 vmaDestroyBuffer(g_hAllocator, buf, alloc);
1117 }
1118 }
1119}
1120
1121static void TestMemoryRequirements()
1122{
1123 VkResult res;
1124 VkBuffer buf;
1125 VmaAllocation alloc;
1126 VmaAllocationInfo allocInfo;
1127
1128 const VkPhysicalDeviceMemoryProperties* memProps;
1129 vmaGetMemoryProperties(g_hAllocator, &memProps);
1130
1131 VkBufferCreateInfo bufInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
1132 bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
1133 bufInfo.size = 128;
1134
1135 VmaAllocationCreateInfo allocCreateInfo = {};
1136
1137 // No requirements.
1138 res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
1139 assert(res == VK_SUCCESS);
1140 vmaDestroyBuffer(g_hAllocator, buf, alloc);
1141
1142 // Usage.
1143 allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
1144 allocCreateInfo.requiredFlags = 0;
1145 allocCreateInfo.preferredFlags = 0;
1146 allocCreateInfo.memoryTypeBits = UINT32_MAX;
1147
1148 res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
1149 assert(res == VK_SUCCESS);
1150 assert(memProps->memoryTypes[allocInfo.memoryType].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
1151 vmaDestroyBuffer(g_hAllocator, buf, alloc);
1152
1153 // Required flags, preferred flags.
1154 allocCreateInfo.usage = VMA_MEMORY_USAGE_UNKNOWN;
1155 allocCreateInfo.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
1156 allocCreateInfo.preferredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
1157 allocCreateInfo.memoryTypeBits = 0;
1158
1159 res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
1160 assert(res == VK_SUCCESS);
1161 assert(memProps->memoryTypes[allocInfo.memoryType].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
1162 assert(memProps->memoryTypes[allocInfo.memoryType].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
1163 vmaDestroyBuffer(g_hAllocator, buf, alloc);
1164
1165 // memoryTypeBits.
1166 const uint32_t memType = allocInfo.memoryType;
1167 allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
1168 allocCreateInfo.requiredFlags = 0;
1169 allocCreateInfo.preferredFlags = 0;
1170 allocCreateInfo.memoryTypeBits = 1u << memType;
1171
1172 res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
1173 assert(res == VK_SUCCESS);
1174 assert(allocInfo.memoryType == memType);
1175 vmaDestroyBuffer(g_hAllocator, buf, alloc);
1176
1177}
1178
1179static void TestBasics()
1180{
1181 VkResult res;
1182
1183 TestMemoryRequirements();
1184
1185 // Lost allocation
1186 {
1187 VmaAllocation alloc = VK_NULL_HANDLE;
1188 vmaCreateLostAllocation(g_hAllocator, &alloc);
1189 assert(alloc != VK_NULL_HANDLE);
1190
1191 VmaAllocationInfo allocInfo;
1192 vmaGetAllocationInfo(g_hAllocator, alloc, &allocInfo);
1193 assert(allocInfo.deviceMemory == VK_NULL_HANDLE);
1194 assert(allocInfo.size == 0);
1195
1196 vmaFreeMemory(g_hAllocator, alloc);
1197 }
1198
1199 // Allocation that is MAPPED and not necessarily HOST_VISIBLE.
1200 {
1201 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
1202 bufCreateInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
1203 bufCreateInfo.size = 128;
1204
1205 VmaAllocationCreateInfo allocCreateInfo = {};
1206 allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
1207 allocCreateInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT;
1208
1209 VkBuffer buf; VmaAllocation alloc; VmaAllocationInfo allocInfo;
1210 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
1211 assert(res == VK_SUCCESS);
1212
1213 vmaDestroyBuffer(g_hAllocator, buf, alloc);
1214
1215 // Same with OWN_MEMORY.
1216 allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
1217
1218 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
1219 assert(res == VK_SUCCESS);
1220
1221 vmaDestroyBuffer(g_hAllocator, buf, alloc);
1222 }
1223
1224 TestUserData();
1225}
1226
1227void TestHeapSizeLimit()
1228{
1229 const VkDeviceSize HEAP_SIZE_LIMIT = 1ull * 1024 * 1024 * 1024; // 1 GB
1230 const VkDeviceSize BLOCK_SIZE = 128ull * 1024 * 1024; // 128 MB
1231
1232 VkDeviceSize heapSizeLimit[VK_MAX_MEMORY_HEAPS];
1233 for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
1234 {
1235 heapSizeLimit[i] = HEAP_SIZE_LIMIT;
1236 }
1237
1238 VmaAllocatorCreateInfo allocatorCreateInfo = {};
1239 allocatorCreateInfo.physicalDevice = g_hPhysicalDevice;
1240 allocatorCreateInfo.device = g_hDevice;
1241 allocatorCreateInfo.pHeapSizeLimit = heapSizeLimit;
1242
1243 VmaAllocator hAllocator;
1244 VkResult res = vmaCreateAllocator(&allocatorCreateInfo, &hAllocator);
1245 assert(res == VK_SUCCESS);
1246
1247 struct Item
1248 {
1249 VkBuffer hBuf;
1250 VmaAllocation hAlloc;
1251 };
1252 std::vector<Item> items;
1253
1254 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
1255 bufCreateInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
1256
1257 // 1. Allocate two blocks of Own Memory, half the size of BLOCK_SIZE.
1258 VmaAllocationInfo ownAllocInfo;
1259 {
1260 VmaAllocationCreateInfo allocCreateInfo = {};
1261 allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
1262 allocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
1263
1264 bufCreateInfo.size = BLOCK_SIZE / 2;
1265
1266 for(size_t i = 0; i < 2; ++i)
1267 {
1268 Item item;
1269 res = vmaCreateBuffer(hAllocator, &bufCreateInfo, &allocCreateInfo, &item.hBuf, &item.hAlloc, &ownAllocInfo);
1270 assert(res == VK_SUCCESS);
1271 items.push_back(item);
1272 }
1273 }
1274
1275 // Create pool to make sure allocations must be out of this memory type.
1276 VmaPoolCreateInfo poolCreateInfo = {};
1277 poolCreateInfo.memoryTypeIndex = ownAllocInfo.memoryType;
1278 poolCreateInfo.blockSize = BLOCK_SIZE;
1279
1280 VmaPool hPool;
1281 res = vmaCreatePool(hAllocator, &poolCreateInfo, &hPool);
1282 assert(res == VK_SUCCESS);
1283
1284 // 2. Allocate normal buffers from all the remaining memory.
1285 {
1286 VmaAllocationCreateInfo allocCreateInfo = {};
1287 allocCreateInfo.pool = hPool;
1288
1289 bufCreateInfo.size = BLOCK_SIZE / 2;
1290
1291 const size_t bufCount = ((HEAP_SIZE_LIMIT / BLOCK_SIZE) - 1) * 2;
1292 for(size_t i = 0; i < bufCount; ++i)
1293 {
1294 Item item;
1295 res = vmaCreateBuffer(hAllocator, &bufCreateInfo, &allocCreateInfo, &item.hBuf, &item.hAlloc, nullptr);
1296 assert(res == VK_SUCCESS);
1297 items.push_back(item);
1298 }
1299 }
1300
1301 // 3. Allocation of one more (even small) buffer should fail.
1302 {
1303 VmaAllocationCreateInfo allocCreateInfo = {};
1304 allocCreateInfo.pool = hPool;
1305
1306 bufCreateInfo.size = 128;
1307
1308 VkBuffer hBuf;
1309 VmaAllocation hAlloc;
1310 res = vmaCreateBuffer(hAllocator, &bufCreateInfo, &allocCreateInfo, &hBuf, &hAlloc, nullptr);
1311 assert(res == VK_ERROR_OUT_OF_DEVICE_MEMORY);
1312 }
1313
1314 // Destroy everything.
1315 for(size_t i = items.size(); i--; )
1316 {
1317 vmaDestroyBuffer(hAllocator, items[i].hBuf, items[i].hAlloc);
1318 }
1319
1320 vmaDestroyPool(hAllocator, hPool);
1321
1322 vmaDestroyAllocator(hAllocator);
1323}
1324
Adam Sawicki212a4a62018-06-14 15:44:45 +02001325#if VMA_DEBUG_MARGIN
Adam Sawicki73b16652018-06-11 16:39:25 +02001326static void TestDebugMargin()
1327{
1328 if(VMA_DEBUG_MARGIN == 0)
1329 {
1330 return;
1331 }
1332
1333 VkBufferCreateInfo bufInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
Adam Sawicki212a4a62018-06-14 15:44:45 +02001334 bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
Adam Sawicki73b16652018-06-11 16:39:25 +02001335
1336 VmaAllocationCreateInfo allocCreateInfo = {};
Adam Sawicki212a4a62018-06-14 15:44:45 +02001337 allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
Adam Sawicki73b16652018-06-11 16:39:25 +02001338
1339 // Create few buffers of different size.
1340 const size_t BUF_COUNT = 10;
1341 BufferInfo buffers[BUF_COUNT];
1342 VmaAllocationInfo allocInfo[BUF_COUNT];
1343 for(size_t i = 0; i < 10; ++i)
1344 {
1345 bufInfo.size = (VkDeviceSize)(i + 1) * 64;
Adam Sawicki212a4a62018-06-14 15:44:45 +02001346 // Last one will be mapped.
1347 allocCreateInfo.flags = (i == BUF_COUNT - 1) ? VMA_ALLOCATION_CREATE_MAPPED_BIT : 0;
Adam Sawicki73b16652018-06-11 16:39:25 +02001348
1349 VkResult res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo, &buffers[i].Buffer, &buffers[i].Allocation, &allocInfo[i]);
1350 assert(res == VK_SUCCESS);
1351 // Margin is preserved also at the beginning of a block.
1352 assert(allocInfo[i].offset >= VMA_DEBUG_MARGIN);
Adam Sawicki212a4a62018-06-14 15:44:45 +02001353
1354 if(i == BUF_COUNT - 1)
1355 {
1356 // Fill with data.
1357 assert(allocInfo[i].pMappedData != nullptr);
1358 // Uncomment this "+ 1" to overwrite past end of allocation and check corruption detection.
1359 memset(allocInfo[i].pMappedData, 0xFF, bufInfo.size /* + 1 */);
1360 }
Adam Sawicki73b16652018-06-11 16:39:25 +02001361 }
1362
1363 // Check if their offsets preserve margin between them.
1364 std::sort(allocInfo, allocInfo + BUF_COUNT, [](const VmaAllocationInfo& lhs, const VmaAllocationInfo& rhs) -> bool
1365 {
1366 if(lhs.deviceMemory != rhs.deviceMemory)
1367 {
1368 return lhs.deviceMemory < rhs.deviceMemory;
1369 }
1370 return lhs.offset < rhs.offset;
1371 });
1372 for(size_t i = 1; i < BUF_COUNT; ++i)
1373 {
1374 if(allocInfo[i].deviceMemory == allocInfo[i - 1].deviceMemory)
1375 {
1376 assert(allocInfo[i].offset >= allocInfo[i - 1].offset + VMA_DEBUG_MARGIN);
1377 }
1378 }
1379
Adam Sawicki212a4a62018-06-14 15:44:45 +02001380 VkResult res = vmaCheckCorruption(g_hAllocator, UINT32_MAX);
1381 assert(res == VK_SUCCESS);
1382
Adam Sawicki73b16652018-06-11 16:39:25 +02001383 // Destroy all buffers.
1384 for(size_t i = BUF_COUNT; i--; )
1385 {
1386 vmaDestroyBuffer(g_hAllocator, buffers[i].Buffer, buffers[i].Allocation);
1387 }
1388}
Adam Sawicki212a4a62018-06-14 15:44:45 +02001389#endif
Adam Sawicki73b16652018-06-11 16:39:25 +02001390
Adam Sawicki0876c0d2018-06-20 15:18:11 +02001391static void TestLinearAllocator()
1392{
1393 wprintf(L"Test linear allocator\n");
1394
1395 RandomNumberGenerator rand{645332};
1396
1397 VkBufferCreateInfo sampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
1398 sampleBufCreateInfo.size = 1024; // Whatever.
1399 sampleBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
1400
1401 VmaAllocationCreateInfo sampleAllocCreateInfo = {};
1402 sampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
1403
1404 VmaPoolCreateInfo poolCreateInfo = {};
1405 VkResult res = vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &sampleBufCreateInfo, &sampleAllocCreateInfo, &poolCreateInfo.memoryTypeIndex);
1406 assert(res == VK_SUCCESS);
1407
Adam Sawickiee082772018-06-20 17:45:49 +02001408 poolCreateInfo.blockSize = 1024 * 300;
Adam Sawicki0876c0d2018-06-20 15:18:11 +02001409 poolCreateInfo.flags = VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT;
1410 poolCreateInfo.minBlockCount = poolCreateInfo.maxBlockCount = 1;
1411
1412 VmaPool pool = nullptr;
1413 res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool);
1414 assert(res == VK_SUCCESS);
1415
1416 VkBufferCreateInfo bufCreateInfo = sampleBufCreateInfo;
1417
1418 VmaAllocationCreateInfo allocCreateInfo = {};
1419 allocCreateInfo.pool = pool;
1420
1421 constexpr size_t maxBufCount = 100;
1422 std::vector<BufferInfo> bufInfo;
1423
1424 constexpr VkDeviceSize bufSizeMin = 16;
1425 constexpr VkDeviceSize bufSizeMax = 1024;
1426 VmaAllocationInfo allocInfo;
1427 VkDeviceSize prevOffset = 0;
1428
1429 // Test one-time free.
1430 for(size_t i = 0; i < 2; ++i)
1431 {
1432 // Allocate number of buffers of varying size that surely fit into this block.
1433 VkDeviceSize bufSumSize = 0;
1434 for(size_t i = 0; i < maxBufCount; ++i)
1435 {
1436 bufCreateInfo.size = bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin);
1437 BufferInfo newBufInfo;
1438 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
1439 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
1440 assert(res == VK_SUCCESS);
1441 assert(i == 0 || allocInfo.offset > prevOffset);
1442 bufInfo.push_back(newBufInfo);
1443 prevOffset = allocInfo.offset;
1444 bufSumSize += bufCreateInfo.size;
1445 }
1446
1447 // Validate pool stats.
1448 VmaPoolStats stats;
1449 vmaGetPoolStats(g_hAllocator, pool, &stats);
1450 assert(stats.size == poolCreateInfo.blockSize);
1451 assert(stats.unusedSize = poolCreateInfo.blockSize - bufSumSize);
1452 assert(stats.allocationCount == bufInfo.size());
1453
1454 // Destroy the buffers in random order.
1455 while(!bufInfo.empty())
1456 {
1457 const size_t indexToDestroy = rand.Generate() % bufInfo.size();
1458 const BufferInfo& currBufInfo = bufInfo[indexToDestroy];
1459 vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
1460 bufInfo.erase(bufInfo.begin() + indexToDestroy);
1461 }
1462 }
1463
1464 // Test stack.
1465 {
1466 // Allocate number of buffers of varying size that surely fit into this block.
1467 for(size_t i = 0; i < maxBufCount; ++i)
1468 {
1469 bufCreateInfo.size = bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin);
1470 BufferInfo newBufInfo;
1471 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
1472 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
1473 assert(res == VK_SUCCESS);
1474 assert(i == 0 || allocInfo.offset > prevOffset);
1475 bufInfo.push_back(newBufInfo);
1476 prevOffset = allocInfo.offset;
1477 }
1478
1479 // Destroy few buffers from top of the stack.
1480 for(size_t i = 0; i < maxBufCount / 5; ++i)
1481 {
1482 const BufferInfo& currBufInfo = bufInfo.back();
1483 vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
1484 bufInfo.pop_back();
1485 }
1486
1487 // Create some more
1488 for(size_t i = 0; i < maxBufCount / 5; ++i)
1489 {
1490 bufCreateInfo.size = bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin);
1491 BufferInfo newBufInfo;
1492 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
1493 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
1494 assert(res == VK_SUCCESS);
1495 assert(i == 0 || allocInfo.offset > prevOffset);
1496 bufInfo.push_back(newBufInfo);
1497 prevOffset = allocInfo.offset;
1498 }
1499
1500 // Destroy the buffers in reverse order.
1501 while(!bufInfo.empty())
1502 {
1503 const BufferInfo& currBufInfo = bufInfo.back();
1504 vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
1505 bufInfo.pop_back();
1506 }
1507 }
1508
Adam Sawickiee082772018-06-20 17:45:49 +02001509 // Test ring buffer.
1510 {
1511 // Allocate number of buffers that surely fit into this block.
1512 bufCreateInfo.size = bufSizeMax;
1513 for(size_t i = 0; i < maxBufCount; ++i)
1514 {
1515 BufferInfo newBufInfo;
1516 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
1517 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
1518 assert(res == VK_SUCCESS);
1519 assert(i == 0 || allocInfo.offset > prevOffset);
1520 bufInfo.push_back(newBufInfo);
1521 prevOffset = allocInfo.offset;
1522 }
1523
1524 // Free and allocate new buffers so many times that we make sure we wrap-around at least once.
1525 const size_t buffersPerIter = maxBufCount / 10 - 1;
1526 const size_t iterCount = poolCreateInfo.blockSize / bufCreateInfo.size / buffersPerIter * 2;
1527 for(size_t iter = 0; iter < iterCount; ++iter)
1528 {
1529 for(size_t bufPerIter = 0; bufPerIter < buffersPerIter; ++bufPerIter)
1530 {
1531 const BufferInfo& currBufInfo = bufInfo.front();
1532 vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
1533 bufInfo.erase(bufInfo.begin());
1534 }
1535 for(size_t bufPerIter = 0; bufPerIter < buffersPerIter; ++bufPerIter)
1536 {
1537 BufferInfo newBufInfo;
1538 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
1539 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
1540 assert(res == VK_SUCCESS);
1541 bufInfo.push_back(newBufInfo);
1542 }
1543 }
1544
1545 // Allocate buffers until we reach out-of-memory.
1546 uint32_t debugIndex = 0;
1547 while(res == VK_SUCCESS)
1548 {
1549 BufferInfo newBufInfo;
1550 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
1551 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
1552 if(res == VK_SUCCESS)
1553 {
1554 bufInfo.push_back(newBufInfo);
1555 }
1556 else
1557 {
1558 assert(res == VK_ERROR_OUT_OF_DEVICE_MEMORY);
1559 }
1560 ++debugIndex;
1561 }
1562
1563 // Destroy the buffers in random order.
1564 while(!bufInfo.empty())
1565 {
1566 const size_t indexToDestroy = rand.Generate() % bufInfo.size();
1567 const BufferInfo& currBufInfo = bufInfo[indexToDestroy];
1568 vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
1569 bufInfo.erase(bufInfo.begin() + indexToDestroy);
1570 }
1571 }
1572
Adam Sawicki0876c0d2018-06-20 15:18:11 +02001573 vmaDestroyPool(g_hAllocator, pool);
1574}
1575
Adam Sawickib8333fb2018-03-13 16:15:53 +01001576static void TestPool_SameSize()
1577{
1578 const VkDeviceSize BUF_SIZE = 1024 * 1024;
1579 const size_t BUF_COUNT = 100;
1580 VkResult res;
1581
1582 RandomNumberGenerator rand{123};
1583
1584 VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
1585 bufferInfo.size = BUF_SIZE;
1586 bufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
1587
1588 uint32_t memoryTypeBits = UINT32_MAX;
1589 {
1590 VkBuffer dummyBuffer;
1591 res = vkCreateBuffer(g_hDevice, &bufferInfo, nullptr, &dummyBuffer);
1592 assert(res == VK_SUCCESS);
1593
1594 VkMemoryRequirements memReq;
1595 vkGetBufferMemoryRequirements(g_hDevice, dummyBuffer, &memReq);
1596 memoryTypeBits = memReq.memoryTypeBits;
1597
1598 vkDestroyBuffer(g_hDevice, dummyBuffer, nullptr);
1599 }
1600
1601 VmaAllocationCreateInfo poolAllocInfo = {};
1602 poolAllocInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
1603 uint32_t memTypeIndex;
1604 res = vmaFindMemoryTypeIndex(
1605 g_hAllocator,
1606 memoryTypeBits,
1607 &poolAllocInfo,
1608 &memTypeIndex);
1609
1610 VmaPoolCreateInfo poolCreateInfo = {};
1611 poolCreateInfo.memoryTypeIndex = memTypeIndex;
1612 poolCreateInfo.blockSize = BUF_SIZE * BUF_COUNT / 4;
1613 poolCreateInfo.minBlockCount = 1;
1614 poolCreateInfo.maxBlockCount = 4;
1615 poolCreateInfo.frameInUseCount = 0;
1616
1617 VmaPool pool;
1618 res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool);
1619 assert(res == VK_SUCCESS);
1620
1621 vmaSetCurrentFrameIndex(g_hAllocator, 1);
1622
1623 VmaAllocationCreateInfo allocInfo = {};
1624 allocInfo.pool = pool;
1625 allocInfo.flags = VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT |
1626 VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT;
1627
1628 struct BufItem
1629 {
1630 VkBuffer Buf;
1631 VmaAllocation Alloc;
1632 };
1633 std::vector<BufItem> items;
1634
1635 // Fill entire pool.
1636 for(size_t i = 0; i < BUF_COUNT; ++i)
1637 {
1638 BufItem item;
1639 res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr);
1640 assert(res == VK_SUCCESS);
1641 items.push_back(item);
1642 }
1643
1644 // Make sure that another allocation would fail.
1645 {
1646 BufItem item;
1647 res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr);
1648 assert(res == VK_ERROR_OUT_OF_DEVICE_MEMORY);
1649 }
1650
1651 // Validate that no buffer is lost. Also check that they are not mapped.
1652 for(size_t i = 0; i < items.size(); ++i)
1653 {
1654 VmaAllocationInfo allocInfo;
1655 vmaGetAllocationInfo(g_hAllocator, items[i].Alloc, &allocInfo);
1656 assert(allocInfo.deviceMemory != VK_NULL_HANDLE);
1657 assert(allocInfo.pMappedData == nullptr);
1658 }
1659
1660 // Free some percent of random items.
1661 {
1662 const size_t PERCENT_TO_FREE = 10;
1663 size_t itemsToFree = items.size() * PERCENT_TO_FREE / 100;
1664 for(size_t i = 0; i < itemsToFree; ++i)
1665 {
1666 size_t index = (size_t)rand.Generate() % items.size();
1667 vmaDestroyBuffer(g_hAllocator, items[index].Buf, items[index].Alloc);
1668 items.erase(items.begin() + index);
1669 }
1670 }
1671
1672 // Randomly allocate and free items.
1673 {
1674 const size_t OPERATION_COUNT = BUF_COUNT;
1675 for(size_t i = 0; i < OPERATION_COUNT; ++i)
1676 {
1677 bool allocate = rand.Generate() % 2 != 0;
1678 if(allocate)
1679 {
1680 if(items.size() < BUF_COUNT)
1681 {
1682 BufItem item;
1683 res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr);
1684 assert(res == VK_SUCCESS);
1685 items.push_back(item);
1686 }
1687 }
1688 else // Free
1689 {
1690 if(!items.empty())
1691 {
1692 size_t index = (size_t)rand.Generate() % items.size();
1693 vmaDestroyBuffer(g_hAllocator, items[index].Buf, items[index].Alloc);
1694 items.erase(items.begin() + index);
1695 }
1696 }
1697 }
1698 }
1699
1700 // Allocate up to maximum.
1701 while(items.size() < BUF_COUNT)
1702 {
1703 BufItem item;
1704 res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr);
1705 assert(res == VK_SUCCESS);
1706 items.push_back(item);
1707 }
1708
1709 // Validate that no buffer is lost.
1710 for(size_t i = 0; i < items.size(); ++i)
1711 {
1712 VmaAllocationInfo allocInfo;
1713 vmaGetAllocationInfo(g_hAllocator, items[i].Alloc, &allocInfo);
1714 assert(allocInfo.deviceMemory != VK_NULL_HANDLE);
1715 }
1716
1717 // Next frame.
1718 vmaSetCurrentFrameIndex(g_hAllocator, 2);
1719
1720 // Allocate another BUF_COUNT buffers.
1721 for(size_t i = 0; i < BUF_COUNT; ++i)
1722 {
1723 BufItem item;
1724 res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr);
1725 assert(res == VK_SUCCESS);
1726 items.push_back(item);
1727 }
1728
1729 // Make sure the first BUF_COUNT is lost. Delete them.
1730 for(size_t i = 0; i < BUF_COUNT; ++i)
1731 {
1732 VmaAllocationInfo allocInfo;
1733 vmaGetAllocationInfo(g_hAllocator, items[i].Alloc, &allocInfo);
1734 assert(allocInfo.deviceMemory == VK_NULL_HANDLE);
1735 vmaDestroyBuffer(g_hAllocator, items[i].Buf, items[i].Alloc);
1736 }
1737 items.erase(items.begin(), items.begin() + BUF_COUNT);
1738
1739 // Validate that no buffer is lost.
1740 for(size_t i = 0; i < items.size(); ++i)
1741 {
1742 VmaAllocationInfo allocInfo;
1743 vmaGetAllocationInfo(g_hAllocator, items[i].Alloc, &allocInfo);
1744 assert(allocInfo.deviceMemory != VK_NULL_HANDLE);
1745 }
1746
1747 // Free one item.
1748 vmaDestroyBuffer(g_hAllocator, items.back().Buf, items.back().Alloc);
1749 items.pop_back();
1750
1751 // Validate statistics.
1752 {
1753 VmaPoolStats poolStats = {};
1754 vmaGetPoolStats(g_hAllocator, pool, &poolStats);
1755 assert(poolStats.allocationCount == items.size());
1756 assert(poolStats.size = BUF_COUNT * BUF_SIZE);
1757 assert(poolStats.unusedRangeCount == 1);
1758 assert(poolStats.unusedRangeSizeMax == BUF_SIZE);
1759 assert(poolStats.unusedSize == BUF_SIZE);
1760 }
1761
1762 // Free all remaining items.
1763 for(size_t i = items.size(); i--; )
1764 vmaDestroyBuffer(g_hAllocator, items[i].Buf, items[i].Alloc);
1765 items.clear();
1766
1767 // Allocate maximum items again.
1768 for(size_t i = 0; i < BUF_COUNT; ++i)
1769 {
1770 BufItem item;
1771 res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr);
1772 assert(res == VK_SUCCESS);
1773 items.push_back(item);
1774 }
1775
1776 // Delete every other item.
1777 for(size_t i = 0; i < BUF_COUNT / 2; ++i)
1778 {
1779 vmaDestroyBuffer(g_hAllocator, items[i].Buf, items[i].Alloc);
1780 items.erase(items.begin() + i);
1781 }
1782
1783 // Defragment!
1784 {
1785 std::vector<VmaAllocation> allocationsToDefragment(items.size());
1786 for(size_t i = 0; i < items.size(); ++i)
1787 allocationsToDefragment[i] = items[i].Alloc;
1788
1789 VmaDefragmentationStats defragmentationStats;
1790 res = vmaDefragment(g_hAllocator, allocationsToDefragment.data(), items.size(), nullptr, nullptr, &defragmentationStats);
1791 assert(res == VK_SUCCESS);
1792 assert(defragmentationStats.deviceMemoryBlocksFreed == 2);
1793 }
1794
1795 // Free all remaining items.
1796 for(size_t i = items.size(); i--; )
1797 vmaDestroyBuffer(g_hAllocator, items[i].Buf, items[i].Alloc);
1798 items.clear();
1799
1800 ////////////////////////////////////////////////////////////////////////////////
1801 // Test for vmaMakePoolAllocationsLost
1802
1803 // Allocate 4 buffers on frame 10.
1804 vmaSetCurrentFrameIndex(g_hAllocator, 10);
1805 for(size_t i = 0; i < 4; ++i)
1806 {
1807 BufItem item;
1808 res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr);
1809 assert(res == VK_SUCCESS);
1810 items.push_back(item);
1811 }
1812
1813 // Touch first 2 of them on frame 11.
1814 vmaSetCurrentFrameIndex(g_hAllocator, 11);
1815 for(size_t i = 0; i < 2; ++i)
1816 {
1817 VmaAllocationInfo allocInfo;
1818 vmaGetAllocationInfo(g_hAllocator, items[i].Alloc, &allocInfo);
1819 }
1820
1821 // vmaMakePoolAllocationsLost. Only remaining 2 should be lost.
1822 size_t lostCount = 0xDEADC0DE;
1823 vmaMakePoolAllocationsLost(g_hAllocator, pool, &lostCount);
1824 assert(lostCount == 2);
1825
1826 // Make another call. Now 0 should be lost.
1827 vmaMakePoolAllocationsLost(g_hAllocator, pool, &lostCount);
1828 assert(lostCount == 0);
1829
1830 // Make another call, with null count. Should not crash.
1831 vmaMakePoolAllocationsLost(g_hAllocator, pool, nullptr);
1832
1833 // END: Free all remaining items.
1834 for(size_t i = items.size(); i--; )
1835 vmaDestroyBuffer(g_hAllocator, items[i].Buf, items[i].Alloc);
1836
1837 items.clear();
1838
Adam Sawickid2924172018-06-11 12:48:46 +02001839 ////////////////////////////////////////////////////////////////////////////////
1840 // Test for allocation too large for pool
1841
1842 {
1843 VmaAllocationCreateInfo allocCreateInfo = {};
1844 allocCreateInfo.pool = pool;
1845
1846 VkMemoryRequirements memReq;
1847 memReq.memoryTypeBits = UINT32_MAX;
1848 memReq.alignment = 1;
1849 memReq.size = poolCreateInfo.blockSize + 4;
1850
1851 VmaAllocation alloc = nullptr;
1852 res = vmaAllocateMemory(g_hAllocator, &memReq, &allocCreateInfo, &alloc, nullptr);
1853 assert(res == VK_ERROR_OUT_OF_DEVICE_MEMORY && alloc == nullptr);
1854 }
1855
Adam Sawickib8333fb2018-03-13 16:15:53 +01001856 vmaDestroyPool(g_hAllocator, pool);
1857}
1858
Adam Sawickie44c6262018-06-15 14:30:39 +02001859static bool ValidatePattern(const void* pMemory, size_t size, uint8_t pattern)
1860{
1861 const uint8_t* pBytes = (const uint8_t*)pMemory;
1862 for(size_t i = 0; i < size; ++i)
1863 {
1864 if(pBytes[i] != pattern)
1865 {
1866 return false;
1867 }
1868 }
1869 return true;
1870}
1871
1872static void TestAllocationsInitialization()
1873{
1874 VkResult res;
1875
1876 const size_t BUF_SIZE = 1024;
1877
1878 // Create pool.
1879
1880 VkBufferCreateInfo bufInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
1881 bufInfo.size = BUF_SIZE;
1882 bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
1883
1884 VmaAllocationCreateInfo dummyBufAllocCreateInfo = {};
1885 dummyBufAllocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
1886
1887 VmaPoolCreateInfo poolCreateInfo = {};
1888 poolCreateInfo.blockSize = BUF_SIZE * 10;
1889 poolCreateInfo.minBlockCount = 1; // To keep memory alive while pool exists.
1890 poolCreateInfo.maxBlockCount = 1;
1891 res = vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &bufInfo, &dummyBufAllocCreateInfo, &poolCreateInfo.memoryTypeIndex);
1892 assert(res == VK_SUCCESS);
1893
1894 VmaAllocationCreateInfo bufAllocCreateInfo = {};
1895 res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &bufAllocCreateInfo.pool);
1896 assert(res == VK_SUCCESS);
1897
1898 // Create one persistently mapped buffer to keep memory of this block mapped,
1899 // so that pointer to mapped data will remain (more or less...) valid even
1900 // after destruction of other allocations.
1901
1902 bufAllocCreateInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT;
1903 VkBuffer firstBuf;
1904 VmaAllocation firstAlloc;
1905 res = vmaCreateBuffer(g_hAllocator, &bufInfo, &bufAllocCreateInfo, &firstBuf, &firstAlloc, nullptr);
1906 assert(res == VK_SUCCESS);
1907
1908 // Test buffers.
1909
1910 for(uint32_t i = 0; i < 2; ++i)
1911 {
1912 const bool persistentlyMapped = i == 0;
1913 bufAllocCreateInfo.flags = persistentlyMapped ? VMA_ALLOCATION_CREATE_MAPPED_BIT : 0;
1914 VkBuffer buf;
1915 VmaAllocation alloc;
1916 VmaAllocationInfo allocInfo;
1917 res = vmaCreateBuffer(g_hAllocator, &bufInfo, &bufAllocCreateInfo, &buf, &alloc, &allocInfo);
1918 assert(res == VK_SUCCESS);
1919
1920 void* pMappedData;
1921 if(!persistentlyMapped)
1922 {
1923 res = vmaMapMemory(g_hAllocator, alloc, &pMappedData);
1924 assert(res == VK_SUCCESS);
1925 }
1926 else
1927 {
1928 pMappedData = allocInfo.pMappedData;
1929 }
1930
1931 // Validate initialized content
1932 bool valid = ValidatePattern(pMappedData, BUF_SIZE, 0xDC);
1933 assert(valid);
1934
1935 if(!persistentlyMapped)
1936 {
1937 vmaUnmapMemory(g_hAllocator, alloc);
1938 }
1939
1940 vmaDestroyBuffer(g_hAllocator, buf, alloc);
1941
1942 // Validate freed content
1943 valid = ValidatePattern(pMappedData, BUF_SIZE, 0xEF);
1944 assert(valid);
1945 }
1946
1947 vmaDestroyBuffer(g_hAllocator, firstBuf, firstAlloc);
1948 vmaDestroyPool(g_hAllocator, bufAllocCreateInfo.pool);
1949}
1950
Adam Sawickib8333fb2018-03-13 16:15:53 +01001951static void TestPool_Benchmark(
1952 PoolTestResult& outResult,
1953 const PoolTestConfig& config)
1954{
1955 assert(config.ThreadCount > 0);
1956
1957 RandomNumberGenerator mainRand{config.RandSeed};
1958
1959 uint32_t allocationSizeProbabilitySum = std::accumulate(
1960 config.AllocationSizes.begin(),
1961 config.AllocationSizes.end(),
1962 0u,
1963 [](uint32_t sum, const AllocationSize& allocSize) {
1964 return sum + allocSize.Probability;
1965 });
1966
1967 VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
1968 bufferInfo.size = 256; // Whatever.
1969 bufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
1970
1971 VkImageCreateInfo imageInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
1972 imageInfo.imageType = VK_IMAGE_TYPE_2D;
1973 imageInfo.extent.width = 256; // Whatever.
1974 imageInfo.extent.height = 256; // Whatever.
1975 imageInfo.extent.depth = 1;
1976 imageInfo.mipLevels = 1;
1977 imageInfo.arrayLayers = 1;
1978 imageInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
1979 imageInfo.tiling = VK_IMAGE_TILING_OPTIMAL; // LINEAR if CPU memory.
1980 imageInfo.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED;
1981 imageInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; // TRANSFER_SRC if CPU memory.
1982 imageInfo.samples = VK_SAMPLE_COUNT_1_BIT;
1983
1984 uint32_t bufferMemoryTypeBits = UINT32_MAX;
1985 {
1986 VkBuffer dummyBuffer;
1987 VkResult res = vkCreateBuffer(g_hDevice, &bufferInfo, nullptr, &dummyBuffer);
1988 assert(res == VK_SUCCESS);
1989
1990 VkMemoryRequirements memReq;
1991 vkGetBufferMemoryRequirements(g_hDevice, dummyBuffer, &memReq);
1992 bufferMemoryTypeBits = memReq.memoryTypeBits;
1993
1994 vkDestroyBuffer(g_hDevice, dummyBuffer, nullptr);
1995 }
1996
1997 uint32_t imageMemoryTypeBits = UINT32_MAX;
1998 {
1999 VkImage dummyImage;
2000 VkResult res = vkCreateImage(g_hDevice, &imageInfo, nullptr, &dummyImage);
2001 assert(res == VK_SUCCESS);
2002
2003 VkMemoryRequirements memReq;
2004 vkGetImageMemoryRequirements(g_hDevice, dummyImage, &memReq);
2005 imageMemoryTypeBits = memReq.memoryTypeBits;
2006
2007 vkDestroyImage(g_hDevice, dummyImage, nullptr);
2008 }
2009
2010 uint32_t memoryTypeBits = 0;
2011 if(config.UsesBuffers() && config.UsesImages())
2012 {
2013 memoryTypeBits = bufferMemoryTypeBits & imageMemoryTypeBits;
2014 if(memoryTypeBits == 0)
2015 {
2016 PrintWarning(L"Cannot test buffers + images in the same memory pool on this GPU.");
2017 return;
2018 }
2019 }
2020 else if(config.UsesBuffers())
2021 memoryTypeBits = bufferMemoryTypeBits;
2022 else if(config.UsesImages())
2023 memoryTypeBits = imageMemoryTypeBits;
2024 else
2025 assert(0);
2026
2027 VmaPoolCreateInfo poolCreateInfo = {};
2028 poolCreateInfo.memoryTypeIndex = 0;
2029 poolCreateInfo.minBlockCount = 1;
2030 poolCreateInfo.maxBlockCount = 1;
2031 poolCreateInfo.blockSize = config.PoolSize;
2032 poolCreateInfo.frameInUseCount = 1;
2033
2034 VmaAllocationCreateInfo dummyAllocCreateInfo = {};
2035 dummyAllocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
2036 vmaFindMemoryTypeIndex(g_hAllocator, memoryTypeBits, &dummyAllocCreateInfo, &poolCreateInfo.memoryTypeIndex);
2037
2038 VmaPool pool;
2039 VkResult res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool);
2040 assert(res == VK_SUCCESS);
2041
2042 // Start time measurement - after creating pool and initializing data structures.
2043 time_point timeBeg = std::chrono::high_resolution_clock::now();
2044
2045 ////////////////////////////////////////////////////////////////////////////////
2046 // ThreadProc
2047 auto ThreadProc = [&](
2048 PoolTestThreadResult* outThreadResult,
2049 uint32_t randSeed,
2050 HANDLE frameStartEvent,
2051 HANDLE frameEndEvent) -> void
2052 {
2053 RandomNumberGenerator threadRand{randSeed};
2054
2055 outThreadResult->AllocationTimeMin = duration::max();
2056 outThreadResult->AllocationTimeSum = duration::zero();
2057 outThreadResult->AllocationTimeMax = duration::min();
2058 outThreadResult->DeallocationTimeMin = duration::max();
2059 outThreadResult->DeallocationTimeSum = duration::zero();
2060 outThreadResult->DeallocationTimeMax = duration::min();
2061 outThreadResult->AllocationCount = 0;
2062 outThreadResult->DeallocationCount = 0;
2063 outThreadResult->LostAllocationCount = 0;
2064 outThreadResult->LostAllocationTotalSize = 0;
2065 outThreadResult->FailedAllocationCount = 0;
2066 outThreadResult->FailedAllocationTotalSize = 0;
2067
2068 struct Item
2069 {
2070 VkDeviceSize BufferSize;
2071 VkExtent2D ImageSize;
2072 VkBuffer Buf;
2073 VkImage Image;
2074 VmaAllocation Alloc;
2075
2076 VkDeviceSize CalcSizeBytes() const
2077 {
2078 return BufferSize +
2079 ImageSize.width * ImageSize.height * 4;
2080 }
2081 };
2082 std::vector<Item> unusedItems, usedItems;
2083
2084 const size_t threadTotalItemCount = config.TotalItemCount / config.ThreadCount;
2085
2086 // Create all items - all unused, not yet allocated.
2087 for(size_t i = 0; i < threadTotalItemCount; ++i)
2088 {
2089 Item item = {};
2090
2091 uint32_t allocSizeIndex = 0;
2092 uint32_t r = threadRand.Generate() % allocationSizeProbabilitySum;
2093 while(r >= config.AllocationSizes[allocSizeIndex].Probability)
2094 r -= config.AllocationSizes[allocSizeIndex++].Probability;
2095
2096 const AllocationSize& allocSize = config.AllocationSizes[allocSizeIndex];
2097 if(allocSize.BufferSizeMax > 0)
2098 {
2099 assert(allocSize.BufferSizeMin > 0);
2100 assert(allocSize.ImageSizeMin == 0 && allocSize.ImageSizeMax == 0);
2101 if(allocSize.BufferSizeMax == allocSize.BufferSizeMin)
2102 item.BufferSize = allocSize.BufferSizeMin;
2103 else
2104 {
2105 item.BufferSize = allocSize.BufferSizeMin + threadRand.Generate() % (allocSize.BufferSizeMax - allocSize.BufferSizeMin);
2106 item.BufferSize = item.BufferSize / 16 * 16;
2107 }
2108 }
2109 else
2110 {
2111 assert(allocSize.ImageSizeMin > 0 && allocSize.ImageSizeMax > 0);
2112 if(allocSize.ImageSizeMax == allocSize.ImageSizeMin)
2113 item.ImageSize.width = item.ImageSize.height = allocSize.ImageSizeMax;
2114 else
2115 {
2116 item.ImageSize.width = allocSize.ImageSizeMin + threadRand.Generate() % (allocSize.ImageSizeMax - allocSize.ImageSizeMin);
2117 item.ImageSize.height = allocSize.ImageSizeMin + threadRand.Generate() % (allocSize.ImageSizeMax - allocSize.ImageSizeMin);
2118 }
2119 }
2120
2121 unusedItems.push_back(item);
2122 }
2123
2124 auto Allocate = [&](Item& item) -> VkResult
2125 {
2126 VmaAllocationCreateInfo allocCreateInfo = {};
2127 allocCreateInfo.pool = pool;
2128 allocCreateInfo.flags = VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT |
2129 VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT;
2130
2131 if(item.BufferSize)
2132 {
2133 bufferInfo.size = item.BufferSize;
2134 PoolAllocationTimeRegisterObj timeRegisterObj(*outThreadResult);
2135 return vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocCreateInfo, &item.Buf, &item.Alloc, nullptr);
2136 }
2137 else
2138 {
2139 assert(item.ImageSize.width && item.ImageSize.height);
2140
2141 imageInfo.extent.width = item.ImageSize.width;
2142 imageInfo.extent.height = item.ImageSize.height;
2143 PoolAllocationTimeRegisterObj timeRegisterObj(*outThreadResult);
2144 return vmaCreateImage(g_hAllocator, &imageInfo, &allocCreateInfo, &item.Image, &item.Alloc, nullptr);
2145 }
2146 };
2147
2148 ////////////////////////////////////////////////////////////////////////////////
2149 // Frames
2150 for(uint32_t frameIndex = 0; frameIndex < config.FrameCount; ++frameIndex)
2151 {
2152 WaitForSingleObject(frameStartEvent, INFINITE);
2153
2154 // Always make some percent of used bufs unused, to choose different used ones.
2155 const size_t bufsToMakeUnused = usedItems.size() * config.ItemsToMakeUnusedPercent / 100;
2156 for(size_t i = 0; i < bufsToMakeUnused; ++i)
2157 {
2158 size_t index = threadRand.Generate() % usedItems.size();
2159 unusedItems.push_back(usedItems[index]);
2160 usedItems.erase(usedItems.begin() + index);
2161 }
2162
2163 // Determine which bufs we want to use in this frame.
2164 const size_t usedBufCount = (threadRand.Generate() % (config.UsedItemCountMax - config.UsedItemCountMin) + config.UsedItemCountMin)
2165 / config.ThreadCount;
2166 assert(usedBufCount < usedItems.size() + unusedItems.size());
2167 // Move some used to unused.
2168 while(usedBufCount < usedItems.size())
2169 {
2170 size_t index = threadRand.Generate() % usedItems.size();
2171 unusedItems.push_back(usedItems[index]);
2172 usedItems.erase(usedItems.begin() + index);
2173 }
2174 // Move some unused to used.
2175 while(usedBufCount > usedItems.size())
2176 {
2177 size_t index = threadRand.Generate() % unusedItems.size();
2178 usedItems.push_back(unusedItems[index]);
2179 unusedItems.erase(unusedItems.begin() + index);
2180 }
2181
2182 uint32_t touchExistingCount = 0;
2183 uint32_t touchLostCount = 0;
2184 uint32_t createSucceededCount = 0;
2185 uint32_t createFailedCount = 0;
2186
2187 // Touch all used bufs. If not created or lost, allocate.
2188 for(size_t i = 0; i < usedItems.size(); ++i)
2189 {
2190 Item& item = usedItems[i];
2191 // Not yet created.
2192 if(item.Alloc == VK_NULL_HANDLE)
2193 {
2194 res = Allocate(item);
2195 ++outThreadResult->AllocationCount;
2196 if(res != VK_SUCCESS)
2197 {
2198 item.Alloc = VK_NULL_HANDLE;
2199 item.Buf = VK_NULL_HANDLE;
2200 ++outThreadResult->FailedAllocationCount;
2201 outThreadResult->FailedAllocationTotalSize += item.CalcSizeBytes();
2202 ++createFailedCount;
2203 }
2204 else
2205 ++createSucceededCount;
2206 }
2207 else
2208 {
2209 // Touch.
2210 VmaAllocationInfo allocInfo;
2211 vmaGetAllocationInfo(g_hAllocator, item.Alloc, &allocInfo);
2212 // Lost.
2213 if(allocInfo.deviceMemory == VK_NULL_HANDLE)
2214 {
2215 ++touchLostCount;
2216
2217 // Destroy.
2218 {
2219 PoolDeallocationTimeRegisterObj timeRegisterObj(*outThreadResult);
2220 if(item.Buf)
2221 vmaDestroyBuffer(g_hAllocator, item.Buf, item.Alloc);
2222 else
2223 vmaDestroyImage(g_hAllocator, item.Image, item.Alloc);
2224 ++outThreadResult->DeallocationCount;
2225 }
2226 item.Alloc = VK_NULL_HANDLE;
2227 item.Buf = VK_NULL_HANDLE;
2228
2229 ++outThreadResult->LostAllocationCount;
2230 outThreadResult->LostAllocationTotalSize += item.CalcSizeBytes();
2231
2232 // Recreate.
2233 res = Allocate(item);
2234 ++outThreadResult->AllocationCount;
2235 // Creation failed.
2236 if(res != VK_SUCCESS)
2237 {
2238 ++outThreadResult->FailedAllocationCount;
2239 outThreadResult->FailedAllocationTotalSize += item.CalcSizeBytes();
2240 ++createFailedCount;
2241 }
2242 else
2243 ++createSucceededCount;
2244 }
2245 else
2246 ++touchExistingCount;
2247 }
2248 }
2249
2250 /*
2251 printf("Thread %u frame %u: Touch existing %u lost %u, create succeeded %u failed %u\n",
2252 randSeed, frameIndex,
2253 touchExistingCount, touchLostCount,
2254 createSucceededCount, createFailedCount);
2255 */
2256
2257 SetEvent(frameEndEvent);
2258 }
2259
2260 // Free all remaining items.
2261 for(size_t i = usedItems.size(); i--; )
2262 {
2263 PoolDeallocationTimeRegisterObj timeRegisterObj(*outThreadResult);
2264 if(usedItems[i].Buf)
2265 vmaDestroyBuffer(g_hAllocator, usedItems[i].Buf, usedItems[i].Alloc);
2266 else
2267 vmaDestroyImage(g_hAllocator, usedItems[i].Image, usedItems[i].Alloc);
2268 ++outThreadResult->DeallocationCount;
2269 }
2270 for(size_t i = unusedItems.size(); i--; )
2271 {
2272 PoolDeallocationTimeRegisterObj timeRegisterOb(*outThreadResult);
2273 if(unusedItems[i].Buf)
2274 vmaDestroyBuffer(g_hAllocator, unusedItems[i].Buf, unusedItems[i].Alloc);
2275 else
2276 vmaDestroyImage(g_hAllocator, unusedItems[i].Image, unusedItems[i].Alloc);
2277 ++outThreadResult->DeallocationCount;
2278 }
2279 };
2280
2281 // Launch threads.
2282 uint32_t threadRandSeed = mainRand.Generate();
2283 std::vector<HANDLE> frameStartEvents{config.ThreadCount};
2284 std::vector<HANDLE> frameEndEvents{config.ThreadCount};
2285 std::vector<std::thread> bkgThreads;
2286 std::vector<PoolTestThreadResult> threadResults{config.ThreadCount};
2287 for(uint32_t threadIndex = 0; threadIndex < config.ThreadCount; ++threadIndex)
2288 {
2289 frameStartEvents[threadIndex] = CreateEvent(NULL, FALSE, FALSE, NULL);
2290 frameEndEvents[threadIndex] = CreateEvent(NULL, FALSE, FALSE, NULL);
2291 bkgThreads.emplace_back(std::bind(
2292 ThreadProc,
2293 &threadResults[threadIndex],
2294 threadRandSeed + threadIndex,
2295 frameStartEvents[threadIndex],
2296 frameEndEvents[threadIndex]));
2297 }
2298
2299 // Execute frames.
2300 assert(config.ThreadCount <= MAXIMUM_WAIT_OBJECTS);
2301 for(uint32_t frameIndex = 0; frameIndex < config.FrameCount; ++frameIndex)
2302 {
2303 vmaSetCurrentFrameIndex(g_hAllocator, frameIndex);
2304 for(size_t threadIndex = 0; threadIndex < config.ThreadCount; ++threadIndex)
2305 SetEvent(frameStartEvents[threadIndex]);
2306 WaitForMultipleObjects(config.ThreadCount, &frameEndEvents[0], TRUE, INFINITE);
2307 }
2308
2309 // Wait for threads finished
2310 for(size_t i = 0; i < bkgThreads.size(); ++i)
2311 {
2312 bkgThreads[i].join();
2313 CloseHandle(frameEndEvents[i]);
2314 CloseHandle(frameStartEvents[i]);
2315 }
2316 bkgThreads.clear();
2317
2318 // Finish time measurement - before destroying pool.
2319 outResult.TotalTime = std::chrono::high_resolution_clock::now() - timeBeg;
2320
2321 vmaDestroyPool(g_hAllocator, pool);
2322
2323 outResult.AllocationTimeMin = duration::max();
2324 outResult.AllocationTimeAvg = duration::zero();
2325 outResult.AllocationTimeMax = duration::min();
2326 outResult.DeallocationTimeMin = duration::max();
2327 outResult.DeallocationTimeAvg = duration::zero();
2328 outResult.DeallocationTimeMax = duration::min();
2329 outResult.LostAllocationCount = 0;
2330 outResult.LostAllocationTotalSize = 0;
2331 outResult.FailedAllocationCount = 0;
2332 outResult.FailedAllocationTotalSize = 0;
2333 size_t allocationCount = 0;
2334 size_t deallocationCount = 0;
2335 for(size_t threadIndex = 0; threadIndex < config.ThreadCount; ++threadIndex)
2336 {
2337 const PoolTestThreadResult& threadResult = threadResults[threadIndex];
2338 outResult.AllocationTimeMin = std::min(outResult.AllocationTimeMin, threadResult.AllocationTimeMin);
2339 outResult.AllocationTimeMax = std::max(outResult.AllocationTimeMax, threadResult.AllocationTimeMax);
2340 outResult.AllocationTimeAvg += threadResult.AllocationTimeSum;
2341 outResult.DeallocationTimeMin = std::min(outResult.DeallocationTimeMin, threadResult.DeallocationTimeMin);
2342 outResult.DeallocationTimeMax = std::max(outResult.DeallocationTimeMax, threadResult.DeallocationTimeMax);
2343 outResult.DeallocationTimeAvg += threadResult.DeallocationTimeSum;
2344 allocationCount += threadResult.AllocationCount;
2345 deallocationCount += threadResult.DeallocationCount;
2346 outResult.FailedAllocationCount += threadResult.FailedAllocationCount;
2347 outResult.FailedAllocationTotalSize += threadResult.FailedAllocationTotalSize;
2348 outResult.LostAllocationCount += threadResult.LostAllocationCount;
2349 outResult.LostAllocationTotalSize += threadResult.LostAllocationTotalSize;
2350 }
2351 if(allocationCount)
2352 outResult.AllocationTimeAvg /= allocationCount;
2353 if(deallocationCount)
2354 outResult.DeallocationTimeAvg /= deallocationCount;
2355}
2356
2357static inline bool MemoryRegionsOverlap(char* ptr1, size_t size1, char* ptr2, size_t size2)
2358{
2359 if(ptr1 < ptr2)
2360 return ptr1 + size1 > ptr2;
2361 else if(ptr2 < ptr1)
2362 return ptr2 + size2 > ptr1;
2363 else
2364 return true;
2365}
2366
2367static void TestMapping()
2368{
2369 wprintf(L"Testing mapping...\n");
2370
2371 VkResult res;
2372 uint32_t memTypeIndex = UINT32_MAX;
2373
2374 enum TEST
2375 {
2376 TEST_NORMAL,
2377 TEST_POOL,
2378 TEST_DEDICATED,
2379 TEST_COUNT
2380 };
2381 for(uint32_t testIndex = 0; testIndex < TEST_COUNT; ++testIndex)
2382 {
2383 VmaPool pool = nullptr;
2384 if(testIndex == TEST_POOL)
2385 {
2386 assert(memTypeIndex != UINT32_MAX);
2387 VmaPoolCreateInfo poolInfo = {};
2388 poolInfo.memoryTypeIndex = memTypeIndex;
2389 res = vmaCreatePool(g_hAllocator, &poolInfo, &pool);
2390 assert(res == VK_SUCCESS);
2391 }
2392
2393 VkBufferCreateInfo bufInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
2394 bufInfo.size = 0x10000;
2395 bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
2396
2397 VmaAllocationCreateInfo allocCreateInfo = {};
2398 allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
2399 allocCreateInfo.pool = pool;
2400 if(testIndex == TEST_DEDICATED)
2401 allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
2402
2403 VmaAllocationInfo allocInfo;
2404
2405 // Mapped manually
2406
2407 // Create 2 buffers.
2408 BufferInfo bufferInfos[3];
2409 for(size_t i = 0; i < 2; ++i)
2410 {
2411 res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo,
2412 &bufferInfos[i].Buffer, &bufferInfos[i].Allocation, &allocInfo);
2413 assert(res == VK_SUCCESS);
2414 assert(allocInfo.pMappedData == nullptr);
2415 memTypeIndex = allocInfo.memoryType;
2416 }
2417
2418 // Map buffer 0.
2419 char* data00 = nullptr;
2420 res = vmaMapMemory(g_hAllocator, bufferInfos[0].Allocation, (void**)&data00);
2421 assert(res == VK_SUCCESS && data00 != nullptr);
2422 data00[0xFFFF] = data00[0];
2423
2424 // Map buffer 0 second time.
2425 char* data01 = nullptr;
2426 res = vmaMapMemory(g_hAllocator, bufferInfos[0].Allocation, (void**)&data01);
2427 assert(res == VK_SUCCESS && data01 == data00);
2428
2429 // Map buffer 1.
2430 char* data1 = nullptr;
2431 res = vmaMapMemory(g_hAllocator, bufferInfos[1].Allocation, (void**)&data1);
2432 assert(res == VK_SUCCESS && data1 != nullptr);
2433 assert(!MemoryRegionsOverlap(data00, (size_t)bufInfo.size, data1, (size_t)bufInfo.size));
2434 data1[0xFFFF] = data1[0];
2435
2436 // Unmap buffer 0 two times.
2437 vmaUnmapMemory(g_hAllocator, bufferInfos[0].Allocation);
2438 vmaUnmapMemory(g_hAllocator, bufferInfos[0].Allocation);
2439 vmaGetAllocationInfo(g_hAllocator, bufferInfos[0].Allocation, &allocInfo);
2440 assert(allocInfo.pMappedData == nullptr);
2441
2442 // Unmap buffer 1.
2443 vmaUnmapMemory(g_hAllocator, bufferInfos[1].Allocation);
2444 vmaGetAllocationInfo(g_hAllocator, bufferInfos[1].Allocation, &allocInfo);
2445 assert(allocInfo.pMappedData == nullptr);
2446
2447 // Create 3rd buffer - persistently mapped.
2448 allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT;
2449 res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo,
2450 &bufferInfos[2].Buffer, &bufferInfos[2].Allocation, &allocInfo);
2451 assert(res == VK_SUCCESS && allocInfo.pMappedData != nullptr);
2452
2453 // Map buffer 2.
2454 char* data2 = nullptr;
2455 res = vmaMapMemory(g_hAllocator, bufferInfos[2].Allocation, (void**)&data2);
2456 assert(res == VK_SUCCESS && data2 == allocInfo.pMappedData);
2457 data2[0xFFFF] = data2[0];
2458
2459 // Unmap buffer 2.
2460 vmaUnmapMemory(g_hAllocator, bufferInfos[2].Allocation);
2461 vmaGetAllocationInfo(g_hAllocator, bufferInfos[2].Allocation, &allocInfo);
2462 assert(allocInfo.pMappedData == data2);
2463
2464 // Destroy all buffers.
2465 for(size_t i = 3; i--; )
2466 vmaDestroyBuffer(g_hAllocator, bufferInfos[i].Buffer, bufferInfos[i].Allocation);
2467
2468 vmaDestroyPool(g_hAllocator, pool);
2469 }
2470}
2471
2472static void TestMappingMultithreaded()
2473{
2474 wprintf(L"Testing mapping multithreaded...\n");
2475
2476 static const uint32_t threadCount = 16;
2477 static const uint32_t bufferCount = 1024;
2478 static const uint32_t threadBufferCount = bufferCount / threadCount;
2479
2480 VkResult res;
2481 volatile uint32_t memTypeIndex = UINT32_MAX;
2482
2483 enum TEST
2484 {
2485 TEST_NORMAL,
2486 TEST_POOL,
2487 TEST_DEDICATED,
2488 TEST_COUNT
2489 };
2490 for(uint32_t testIndex = 0; testIndex < TEST_COUNT; ++testIndex)
2491 {
2492 VmaPool pool = nullptr;
2493 if(testIndex == TEST_POOL)
2494 {
2495 assert(memTypeIndex != UINT32_MAX);
2496 VmaPoolCreateInfo poolInfo = {};
2497 poolInfo.memoryTypeIndex = memTypeIndex;
2498 res = vmaCreatePool(g_hAllocator, &poolInfo, &pool);
2499 assert(res == VK_SUCCESS);
2500 }
2501
2502 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
2503 bufCreateInfo.size = 0x10000;
2504 bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
2505
2506 VmaAllocationCreateInfo allocCreateInfo = {};
2507 allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
2508 allocCreateInfo.pool = pool;
2509 if(testIndex == TEST_DEDICATED)
2510 allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
2511
2512 std::thread threads[threadCount];
2513 for(uint32_t threadIndex = 0; threadIndex < threadCount; ++threadIndex)
2514 {
2515 threads[threadIndex] = std::thread([=, &memTypeIndex](){
2516 // ======== THREAD FUNCTION ========
2517
2518 RandomNumberGenerator rand{threadIndex};
2519
2520 enum class MODE
2521 {
2522 // Don't map this buffer at all.
2523 DONT_MAP,
2524 // Map and quickly unmap.
2525 MAP_FOR_MOMENT,
2526 // Map and unmap before destruction.
2527 MAP_FOR_LONGER,
2528 // Map two times. Quickly unmap, second unmap before destruction.
2529 MAP_TWO_TIMES,
2530 // Create this buffer as persistently mapped.
2531 PERSISTENTLY_MAPPED,
2532 COUNT
2533 };
2534 std::vector<BufferInfo> bufInfos{threadBufferCount};
2535 std::vector<MODE> bufModes{threadBufferCount};
2536
2537 for(uint32_t bufferIndex = 0; bufferIndex < threadBufferCount; ++bufferIndex)
2538 {
2539 BufferInfo& bufInfo = bufInfos[bufferIndex];
2540 const MODE mode = (MODE)(rand.Generate() % (uint32_t)MODE::COUNT);
2541 bufModes[bufferIndex] = mode;
2542
2543 VmaAllocationCreateInfo localAllocCreateInfo = allocCreateInfo;
2544 if(mode == MODE::PERSISTENTLY_MAPPED)
2545 localAllocCreateInfo.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT;
2546
2547 VmaAllocationInfo allocInfo;
2548 VkResult res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &localAllocCreateInfo,
2549 &bufInfo.Buffer, &bufInfo.Allocation, &allocInfo);
2550 assert(res == VK_SUCCESS);
2551
2552 if(memTypeIndex == UINT32_MAX)
2553 memTypeIndex = allocInfo.memoryType;
2554
2555 char* data = nullptr;
2556
2557 if(mode == MODE::PERSISTENTLY_MAPPED)
2558 {
2559 data = (char*)allocInfo.pMappedData;
2560 assert(data != nullptr);
2561 }
2562 else if(mode == MODE::MAP_FOR_MOMENT || mode == MODE::MAP_FOR_LONGER ||
2563 mode == MODE::MAP_TWO_TIMES)
2564 {
2565 assert(data == nullptr);
2566 res = vmaMapMemory(g_hAllocator, bufInfo.Allocation, (void**)&data);
2567 assert(res == VK_SUCCESS && data != nullptr);
2568
2569 if(mode == MODE::MAP_TWO_TIMES)
2570 {
2571 char* data2 = nullptr;
2572 res = vmaMapMemory(g_hAllocator, bufInfo.Allocation, (void**)&data2);
2573 assert(res == VK_SUCCESS && data2 == data);
2574 }
2575 }
2576 else if(mode == MODE::DONT_MAP)
2577 {
2578 assert(allocInfo.pMappedData == nullptr);
2579 }
2580 else
2581 assert(0);
2582
2583 // Test if reading and writing from the beginning and end of mapped memory doesn't crash.
2584 if(data)
2585 data[0xFFFF] = data[0];
2586
2587 if(mode == MODE::MAP_FOR_MOMENT || mode == MODE::MAP_TWO_TIMES)
2588 {
2589 vmaUnmapMemory(g_hAllocator, bufInfo.Allocation);
2590
2591 VmaAllocationInfo allocInfo;
2592 vmaGetAllocationInfo(g_hAllocator, bufInfo.Allocation, &allocInfo);
2593 if(mode == MODE::MAP_FOR_MOMENT)
2594 assert(allocInfo.pMappedData == nullptr);
2595 else
2596 assert(allocInfo.pMappedData == data);
2597 }
2598
2599 switch(rand.Generate() % 3)
2600 {
2601 case 0: Sleep(0); break; // Yield.
2602 case 1: Sleep(10); break; // 10 ms
2603 // default: No sleep.
2604 }
2605
2606 // Test if reading and writing from the beginning and end of mapped memory doesn't crash.
2607 if(data)
2608 data[0xFFFF] = data[0];
2609 }
2610
2611 for(size_t bufferIndex = threadBufferCount; bufferIndex--; )
2612 {
2613 if(bufModes[bufferIndex] == MODE::MAP_FOR_LONGER ||
2614 bufModes[bufferIndex] == MODE::MAP_TWO_TIMES)
2615 {
2616 vmaUnmapMemory(g_hAllocator, bufInfos[bufferIndex].Allocation);
2617
2618 VmaAllocationInfo allocInfo;
2619 vmaGetAllocationInfo(g_hAllocator, bufInfos[bufferIndex].Allocation, &allocInfo);
2620 assert(allocInfo.pMappedData == nullptr);
2621 }
2622
2623 vmaDestroyBuffer(g_hAllocator, bufInfos[bufferIndex].Buffer, bufInfos[bufferIndex].Allocation);
2624 }
2625 });
2626 }
2627
2628 for(uint32_t threadIndex = 0; threadIndex < threadCount; ++threadIndex)
2629 threads[threadIndex].join();
2630
2631 vmaDestroyPool(g_hAllocator, pool);
2632 }
2633}
2634
2635static void WriteMainTestResultHeader(FILE* file)
2636{
2637 fprintf(file,
2638 "Code,Test,Time,"
2639 "Config,"
2640 "Total Time (us),"
2641 "Allocation Time Min (us),"
2642 "Allocation Time Avg (us),"
2643 "Allocation Time Max (us),"
2644 "Deallocation Time Min (us),"
2645 "Deallocation Time Avg (us),"
2646 "Deallocation Time Max (us),"
2647 "Total Memory Allocated (B),"
2648 "Free Range Size Avg (B),"
2649 "Free Range Size Max (B)\n");
2650}
2651
2652static void WriteMainTestResult(
2653 FILE* file,
2654 const char* codeDescription,
2655 const char* testDescription,
2656 const Config& config, const Result& result)
2657{
2658 float totalTimeSeconds = ToFloatSeconds(result.TotalTime);
2659 float allocationTimeMinSeconds = ToFloatSeconds(result.AllocationTimeMin);
2660 float allocationTimeAvgSeconds = ToFloatSeconds(result.AllocationTimeAvg);
2661 float allocationTimeMaxSeconds = ToFloatSeconds(result.AllocationTimeMax);
2662 float deallocationTimeMinSeconds = ToFloatSeconds(result.DeallocationTimeMin);
2663 float deallocationTimeAvgSeconds = ToFloatSeconds(result.DeallocationTimeAvg);
2664 float deallocationTimeMaxSeconds = ToFloatSeconds(result.DeallocationTimeMax);
2665
2666 time_t rawTime; time(&rawTime);
2667 struct tm timeInfo; localtime_s(&timeInfo, &rawTime);
2668 char timeStr[128];
2669 strftime(timeStr, _countof(timeStr), "%c", &timeInfo);
2670
2671 fprintf(file,
2672 "%s,%s,%s,"
2673 "BeginBytesToAllocate=%I64u MaxBytesToAllocate=%I64u AdditionalOperationCount=%u ThreadCount=%u FreeOrder=%d,"
2674 "%.2f,%.2f,%.2f,%.2f,%.2f,%.2f,%.2f,%I64u,%I64u,%I64u\n",
2675 codeDescription,
2676 testDescription,
2677 timeStr,
2678 config.BeginBytesToAllocate, config.MaxBytesToAllocate, config.AdditionalOperationCount, config.ThreadCount, (uint32_t)config.FreeOrder,
2679 totalTimeSeconds * 1e6f,
2680 allocationTimeMinSeconds * 1e6f,
2681 allocationTimeAvgSeconds * 1e6f,
2682 allocationTimeMaxSeconds * 1e6f,
2683 deallocationTimeMinSeconds * 1e6f,
2684 deallocationTimeAvgSeconds * 1e6f,
2685 deallocationTimeMaxSeconds * 1e6f,
2686 result.TotalMemoryAllocated,
2687 result.FreeRangeSizeAvg,
2688 result.FreeRangeSizeMax);
2689}
2690
2691static void WritePoolTestResultHeader(FILE* file)
2692{
2693 fprintf(file,
2694 "Code,Test,Time,"
2695 "Config,"
2696 "Total Time (us),"
2697 "Allocation Time Min (us),"
2698 "Allocation Time Avg (us),"
2699 "Allocation Time Max (us),"
2700 "Deallocation Time Min (us),"
2701 "Deallocation Time Avg (us),"
2702 "Deallocation Time Max (us),"
2703 "Lost Allocation Count,"
2704 "Lost Allocation Total Size (B),"
2705 "Failed Allocation Count,"
2706 "Failed Allocation Total Size (B)\n");
2707}
2708
2709static void WritePoolTestResult(
2710 FILE* file,
2711 const char* codeDescription,
2712 const char* testDescription,
2713 const PoolTestConfig& config,
2714 const PoolTestResult& result)
2715{
2716 float totalTimeSeconds = ToFloatSeconds(result.TotalTime);
2717 float allocationTimeMinSeconds = ToFloatSeconds(result.AllocationTimeMin);
2718 float allocationTimeAvgSeconds = ToFloatSeconds(result.AllocationTimeAvg);
2719 float allocationTimeMaxSeconds = ToFloatSeconds(result.AllocationTimeMax);
2720 float deallocationTimeMinSeconds = ToFloatSeconds(result.DeallocationTimeMin);
2721 float deallocationTimeAvgSeconds = ToFloatSeconds(result.DeallocationTimeAvg);
2722 float deallocationTimeMaxSeconds = ToFloatSeconds(result.DeallocationTimeMax);
2723
2724 time_t rawTime; time(&rawTime);
2725 struct tm timeInfo; localtime_s(&timeInfo, &rawTime);
2726 char timeStr[128];
2727 strftime(timeStr, _countof(timeStr), "%c", &timeInfo);
2728
2729 fprintf(file,
2730 "%s,%s,%s,"
2731 "ThreadCount=%u PoolSize=%llu FrameCount=%u TotalItemCount=%u UsedItemCount=%u...%u ItemsToMakeUnusedPercent=%u,"
2732 "%.2f,%.2f,%.2f,%.2f,%.2f,%.2f,%.2f,%I64u,%I64u,%I64u,%I64u\n",
2733 // General
2734 codeDescription,
2735 testDescription,
2736 timeStr,
2737 // Config
2738 config.ThreadCount,
2739 (unsigned long long)config.PoolSize,
2740 config.FrameCount,
2741 config.TotalItemCount,
2742 config.UsedItemCountMin,
2743 config.UsedItemCountMax,
2744 config.ItemsToMakeUnusedPercent,
2745 // Results
2746 totalTimeSeconds * 1e6f,
2747 allocationTimeMinSeconds * 1e6f,
2748 allocationTimeAvgSeconds * 1e6f,
2749 allocationTimeMaxSeconds * 1e6f,
2750 deallocationTimeMinSeconds * 1e6f,
2751 deallocationTimeAvgSeconds * 1e6f,
2752 deallocationTimeMaxSeconds * 1e6f,
2753 result.LostAllocationCount,
2754 result.LostAllocationTotalSize,
2755 result.FailedAllocationCount,
2756 result.FailedAllocationTotalSize);
2757}
2758
2759static void PerformCustomMainTest(FILE* file)
2760{
2761 Config config{};
2762 config.RandSeed = 65735476;
2763 //config.MaxBytesToAllocate = 4ull * 1024 * 1024; // 4 MB
2764 config.MaxBytesToAllocate = 4ull * 1024 * 1024 * 1024; // 4 GB
2765 config.MemUsageProbability[0] = 1; // VMA_MEMORY_USAGE_GPU_ONLY
2766 config.FreeOrder = FREE_ORDER::FORWARD;
2767 config.ThreadCount = 16;
2768 config.ThreadsUsingCommonAllocationsProbabilityPercent = 50;
2769
2770 // Buffers
2771 //config.AllocationSizes.push_back({4, 16, 1024});
2772 config.AllocationSizes.push_back({4, 0x10000, 0xA00000}); // 64 KB ... 10 MB
2773
2774 // Images
2775 //config.AllocationSizes.push_back({4, 0, 0, 4, 32});
2776 //config.AllocationSizes.push_back({4, 0, 0, 256, 2048});
2777
2778 config.BeginBytesToAllocate = config.MaxBytesToAllocate * 5 / 100;
2779 config.AdditionalOperationCount = 1024;
2780
2781 Result result{};
2782 VkResult res = MainTest(result, config);
2783 assert(res == VK_SUCCESS);
2784 WriteMainTestResult(file, "Foo", "CustomTest", config, result);
2785}
2786
2787static void PerformCustomPoolTest(FILE* file)
2788{
2789 PoolTestConfig config;
2790 config.PoolSize = 100 * 1024 * 1024;
2791 config.RandSeed = 2345764;
2792 config.ThreadCount = 1;
2793 config.FrameCount = 200;
2794 config.ItemsToMakeUnusedPercent = 2;
2795
2796 AllocationSize allocSize = {};
2797 allocSize.BufferSizeMin = 1024;
2798 allocSize.BufferSizeMax = 1024 * 1024;
2799 allocSize.Probability = 1;
2800 config.AllocationSizes.push_back(allocSize);
2801
2802 allocSize.BufferSizeMin = 0;
2803 allocSize.BufferSizeMax = 0;
2804 allocSize.ImageSizeMin = 128;
2805 allocSize.ImageSizeMax = 1024;
2806 allocSize.Probability = 1;
2807 config.AllocationSizes.push_back(allocSize);
2808
2809 config.PoolSize = config.CalcAvgResourceSize() * 200;
2810 config.UsedItemCountMax = 160;
2811 config.TotalItemCount = config.UsedItemCountMax * 10;
2812 config.UsedItemCountMin = config.UsedItemCountMax * 80 / 100;
2813
2814 g_MemoryAliasingWarningEnabled = false;
2815 PoolTestResult result = {};
2816 TestPool_Benchmark(result, config);
2817 g_MemoryAliasingWarningEnabled = true;
2818
2819 WritePoolTestResult(file, "Code desc", "Test desc", config, result);
2820}
2821
2822enum CONFIG_TYPE {
2823 CONFIG_TYPE_MINIMUM,
2824 CONFIG_TYPE_SMALL,
2825 CONFIG_TYPE_AVERAGE,
2826 CONFIG_TYPE_LARGE,
2827 CONFIG_TYPE_MAXIMUM,
2828 CONFIG_TYPE_COUNT
2829};
2830
2831static constexpr CONFIG_TYPE ConfigType = CONFIG_TYPE_SMALL;
2832//static constexpr CONFIG_TYPE ConfigType = CONFIG_TYPE_LARGE;
2833static const char* CODE_DESCRIPTION = "Foo";
2834
2835static void PerformMainTests(FILE* file)
2836{
2837 uint32_t repeatCount = 1;
2838 if(ConfigType >= CONFIG_TYPE_MAXIMUM) repeatCount = 3;
2839
2840 Config config{};
2841 config.RandSeed = 65735476;
2842 config.MemUsageProbability[0] = 1; // VMA_MEMORY_USAGE_GPU_ONLY
2843 config.FreeOrder = FREE_ORDER::FORWARD;
2844
2845 size_t threadCountCount = 1;
2846 switch(ConfigType)
2847 {
2848 case CONFIG_TYPE_MINIMUM: threadCountCount = 1; break;
2849 case CONFIG_TYPE_SMALL: threadCountCount = 2; break;
2850 case CONFIG_TYPE_AVERAGE: threadCountCount = 3; break;
2851 case CONFIG_TYPE_LARGE: threadCountCount = 5; break;
2852 case CONFIG_TYPE_MAXIMUM: threadCountCount = 7; break;
2853 default: assert(0);
2854 }
2855 for(size_t threadCountIndex = 0; threadCountIndex < threadCountCount; ++threadCountIndex)
2856 {
2857 std::string desc1;
2858
2859 switch(threadCountIndex)
2860 {
2861 case 0:
2862 desc1 += "1_thread";
2863 config.ThreadCount = 1;
2864 config.ThreadsUsingCommonAllocationsProbabilityPercent = 0;
2865 break;
2866 case 1:
2867 desc1 += "16_threads+0%_common";
2868 config.ThreadCount = 16;
2869 config.ThreadsUsingCommonAllocationsProbabilityPercent = 0;
2870 break;
2871 case 2:
2872 desc1 += "16_threads+50%_common";
2873 config.ThreadCount = 16;
2874 config.ThreadsUsingCommonAllocationsProbabilityPercent = 50;
2875 break;
2876 case 3:
2877 desc1 += "16_threads+100%_common";
2878 config.ThreadCount = 16;
2879 config.ThreadsUsingCommonAllocationsProbabilityPercent = 100;
2880 break;
2881 case 4:
2882 desc1 += "2_threads+0%_common";
2883 config.ThreadCount = 2;
2884 config.ThreadsUsingCommonAllocationsProbabilityPercent = 0;
2885 break;
2886 case 5:
2887 desc1 += "2_threads+50%_common";
2888 config.ThreadCount = 2;
2889 config.ThreadsUsingCommonAllocationsProbabilityPercent = 50;
2890 break;
2891 case 6:
2892 desc1 += "2_threads+100%_common";
2893 config.ThreadCount = 2;
2894 config.ThreadsUsingCommonAllocationsProbabilityPercent = 100;
2895 break;
2896 default:
2897 assert(0);
2898 }
2899
2900 // 0 = buffers, 1 = images, 2 = buffers and images
2901 size_t buffersVsImagesCount = 2;
2902 if(ConfigType >= CONFIG_TYPE_LARGE) ++buffersVsImagesCount;
2903 for(size_t buffersVsImagesIndex = 0; buffersVsImagesIndex < buffersVsImagesCount; ++buffersVsImagesIndex)
2904 {
2905 std::string desc2 = desc1;
2906 switch(buffersVsImagesIndex)
2907 {
2908 case 0: desc2 += " Buffers"; break;
2909 case 1: desc2 += " Images"; break;
2910 case 2: desc2 += " Buffers+Images"; break;
2911 default: assert(0);
2912 }
2913
2914 // 0 = small, 1 = large, 2 = small and large
2915 size_t smallVsLargeCount = 2;
2916 if(ConfigType >= CONFIG_TYPE_LARGE) ++smallVsLargeCount;
2917 for(size_t smallVsLargeIndex = 0; smallVsLargeIndex < smallVsLargeCount; ++smallVsLargeIndex)
2918 {
2919 std::string desc3 = desc2;
2920 switch(smallVsLargeIndex)
2921 {
2922 case 0: desc3 += " Small"; break;
2923 case 1: desc3 += " Large"; break;
2924 case 2: desc3 += " Small+Large"; break;
2925 default: assert(0);
2926 }
2927
2928 if(smallVsLargeIndex == 1 || smallVsLargeIndex == 2)
2929 config.MaxBytesToAllocate = 4ull * 1024 * 1024 * 1024; // 4 GB
2930 else
2931 config.MaxBytesToAllocate = 4ull * 1024 * 1024;
2932
2933 // 0 = varying sizes min...max, 1 = set of constant sizes
2934 size_t constantSizesCount = 1;
2935 if(ConfigType >= CONFIG_TYPE_SMALL) ++constantSizesCount;
2936 for(size_t constantSizesIndex = 0; constantSizesIndex < constantSizesCount; ++constantSizesIndex)
2937 {
2938 std::string desc4 = desc3;
2939 switch(constantSizesIndex)
2940 {
2941 case 0: desc4 += " Varying_sizes"; break;
2942 case 1: desc4 += " Constant_sizes"; break;
2943 default: assert(0);
2944 }
2945
2946 config.AllocationSizes.clear();
2947 // Buffers present
2948 if(buffersVsImagesIndex == 0 || buffersVsImagesIndex == 2)
2949 {
2950 // Small
2951 if(smallVsLargeIndex == 0 || smallVsLargeIndex == 2)
2952 {
2953 // Varying size
2954 if(constantSizesIndex == 0)
2955 config.AllocationSizes.push_back({4, 16, 1024});
2956 // Constant sizes
2957 else
2958 {
2959 config.AllocationSizes.push_back({1, 16, 16});
2960 config.AllocationSizes.push_back({1, 64, 64});
2961 config.AllocationSizes.push_back({1, 256, 256});
2962 config.AllocationSizes.push_back({1, 1024, 1024});
2963 }
2964 }
2965 // Large
2966 if(smallVsLargeIndex == 1 || smallVsLargeIndex == 2)
2967 {
2968 // Varying size
2969 if(constantSizesIndex == 0)
2970 config.AllocationSizes.push_back({4, 0x10000, 0xA00000}); // 64 KB ... 10 MB
2971 // Constant sizes
2972 else
2973 {
2974 config.AllocationSizes.push_back({1, 0x10000, 0x10000});
2975 config.AllocationSizes.push_back({1, 0x80000, 0x80000});
2976 config.AllocationSizes.push_back({1, 0x200000, 0x200000});
2977 config.AllocationSizes.push_back({1, 0xA00000, 0xA00000});
2978 }
2979 }
2980 }
2981 // Images present
2982 if(buffersVsImagesIndex == 1 || buffersVsImagesIndex == 2)
2983 {
2984 // Small
2985 if(smallVsLargeIndex == 0 || smallVsLargeIndex == 2)
2986 {
2987 // Varying size
2988 if(constantSizesIndex == 0)
2989 config.AllocationSizes.push_back({4, 0, 0, 4, 32});
2990 // Constant sizes
2991 else
2992 {
2993 config.AllocationSizes.push_back({1, 0, 0, 4, 4});
2994 config.AllocationSizes.push_back({1, 0, 0, 8, 8});
2995 config.AllocationSizes.push_back({1, 0, 0, 16, 16});
2996 config.AllocationSizes.push_back({1, 0, 0, 32, 32});
2997 }
2998 }
2999 // Large
3000 if(smallVsLargeIndex == 1 || smallVsLargeIndex == 2)
3001 {
3002 // Varying size
3003 if(constantSizesIndex == 0)
3004 config.AllocationSizes.push_back({4, 0, 0, 256, 2048});
3005 // Constant sizes
3006 else
3007 {
3008 config.AllocationSizes.push_back({1, 0, 0, 256, 256});
3009 config.AllocationSizes.push_back({1, 0, 0, 512, 512});
3010 config.AllocationSizes.push_back({1, 0, 0, 1024, 1024});
3011 config.AllocationSizes.push_back({1, 0, 0, 2048, 2048});
3012 }
3013 }
3014 }
3015
3016 // 0 = 100%, additional_operations = 0, 1 = 50%, 2 = 5%, 3 = 95% additional_operations = a lot
3017 size_t beginBytesToAllocateCount = 1;
3018 if(ConfigType >= CONFIG_TYPE_SMALL) ++beginBytesToAllocateCount;
3019 if(ConfigType >= CONFIG_TYPE_AVERAGE) ++beginBytesToAllocateCount;
3020 if(ConfigType >= CONFIG_TYPE_LARGE) ++beginBytesToAllocateCount;
3021 for(size_t beginBytesToAllocateIndex = 0; beginBytesToAllocateIndex < beginBytesToAllocateCount; ++beginBytesToAllocateIndex)
3022 {
3023 std::string desc5 = desc4;
3024
3025 switch(beginBytesToAllocateIndex)
3026 {
3027 case 0:
3028 desc5 += " Allocate_100%";
3029 config.BeginBytesToAllocate = config.MaxBytesToAllocate;
3030 config.AdditionalOperationCount = 0;
3031 break;
3032 case 1:
3033 desc5 += " Allocate_50%+Operations";
3034 config.BeginBytesToAllocate = config.MaxBytesToAllocate * 50 / 100;
3035 config.AdditionalOperationCount = 1024;
3036 break;
3037 case 2:
3038 desc5 += " Allocate_5%+Operations";
3039 config.BeginBytesToAllocate = config.MaxBytesToAllocate * 5 / 100;
3040 config.AdditionalOperationCount = 1024;
3041 break;
3042 case 3:
3043 desc5 += " Allocate_95%+Operations";
3044 config.BeginBytesToAllocate = config.MaxBytesToAllocate * 95 / 100;
3045 config.AdditionalOperationCount = 1024;
3046 break;
3047 default:
3048 assert(0);
3049 }
3050
3051 const char* testDescription = desc5.c_str();
3052
3053 for(size_t repeat = 0; repeat < repeatCount; ++repeat)
3054 {
3055 printf("%s Repeat %u\n", testDescription, (uint32_t)repeat);
3056
3057 Result result{};
3058 VkResult res = MainTest(result, config);
3059 assert(res == VK_SUCCESS);
3060 WriteMainTestResult(file, CODE_DESCRIPTION, testDescription, config, result);
3061 }
3062 }
3063 }
3064 }
3065 }
3066 }
3067}
3068
3069static void PerformPoolTests(FILE* file)
3070{
3071 const size_t AVG_RESOURCES_PER_POOL = 300;
3072
3073 uint32_t repeatCount = 1;
3074 if(ConfigType >= CONFIG_TYPE_MAXIMUM) repeatCount = 3;
3075
3076 PoolTestConfig config{};
3077 config.RandSeed = 2346343;
3078 config.FrameCount = 200;
3079 config.ItemsToMakeUnusedPercent = 2;
3080
3081 size_t threadCountCount = 1;
3082 switch(ConfigType)
3083 {
3084 case CONFIG_TYPE_MINIMUM: threadCountCount = 1; break;
3085 case CONFIG_TYPE_SMALL: threadCountCount = 2; break;
3086 case CONFIG_TYPE_AVERAGE: threadCountCount = 2; break;
3087 case CONFIG_TYPE_LARGE: threadCountCount = 3; break;
3088 case CONFIG_TYPE_MAXIMUM: threadCountCount = 3; break;
3089 default: assert(0);
3090 }
3091 for(size_t threadCountIndex = 0; threadCountIndex < threadCountCount; ++threadCountIndex)
3092 {
3093 std::string desc1;
3094
3095 switch(threadCountIndex)
3096 {
3097 case 0:
3098 desc1 += "1_thread";
3099 config.ThreadCount = 1;
3100 break;
3101 case 1:
3102 desc1 += "16_threads";
3103 config.ThreadCount = 16;
3104 break;
3105 case 2:
3106 desc1 += "2_threads";
3107 config.ThreadCount = 2;
3108 break;
3109 default:
3110 assert(0);
3111 }
3112
3113 // 0 = buffers, 1 = images, 2 = buffers and images
3114 size_t buffersVsImagesCount = 2;
3115 if(ConfigType >= CONFIG_TYPE_LARGE) ++buffersVsImagesCount;
3116 for(size_t buffersVsImagesIndex = 0; buffersVsImagesIndex < buffersVsImagesCount; ++buffersVsImagesIndex)
3117 {
3118 std::string desc2 = desc1;
3119 switch(buffersVsImagesIndex)
3120 {
3121 case 0: desc2 += " Buffers"; break;
3122 case 1: desc2 += " Images"; break;
3123 case 2: desc2 += " Buffers+Images"; break;
3124 default: assert(0);
3125 }
3126
3127 // 0 = small, 1 = large, 2 = small and large
3128 size_t smallVsLargeCount = 2;
3129 if(ConfigType >= CONFIG_TYPE_LARGE) ++smallVsLargeCount;
3130 for(size_t smallVsLargeIndex = 0; smallVsLargeIndex < smallVsLargeCount; ++smallVsLargeIndex)
3131 {
3132 std::string desc3 = desc2;
3133 switch(smallVsLargeIndex)
3134 {
3135 case 0: desc3 += " Small"; break;
3136 case 1: desc3 += " Large"; break;
3137 case 2: desc3 += " Small+Large"; break;
3138 default: assert(0);
3139 }
3140
3141 if(smallVsLargeIndex == 1 || smallVsLargeIndex == 2)
3142 config.PoolSize = 6ull * 1024 * 1024 * 1024; // 6 GB
3143 else
3144 config.PoolSize = 4ull * 1024 * 1024;
3145
3146 // 0 = varying sizes min...max, 1 = set of constant sizes
3147 size_t constantSizesCount = 1;
3148 if(ConfigType >= CONFIG_TYPE_SMALL) ++constantSizesCount;
3149 for(size_t constantSizesIndex = 0; constantSizesIndex < constantSizesCount; ++constantSizesIndex)
3150 {
3151 std::string desc4 = desc3;
3152 switch(constantSizesIndex)
3153 {
3154 case 0: desc4 += " Varying_sizes"; break;
3155 case 1: desc4 += " Constant_sizes"; break;
3156 default: assert(0);
3157 }
3158
3159 config.AllocationSizes.clear();
3160 // Buffers present
3161 if(buffersVsImagesIndex == 0 || buffersVsImagesIndex == 2)
3162 {
3163 // Small
3164 if(smallVsLargeIndex == 0 || smallVsLargeIndex == 2)
3165 {
3166 // Varying size
3167 if(constantSizesIndex == 0)
3168 config.AllocationSizes.push_back({4, 16, 1024});
3169 // Constant sizes
3170 else
3171 {
3172 config.AllocationSizes.push_back({1, 16, 16});
3173 config.AllocationSizes.push_back({1, 64, 64});
3174 config.AllocationSizes.push_back({1, 256, 256});
3175 config.AllocationSizes.push_back({1, 1024, 1024});
3176 }
3177 }
3178 // Large
3179 if(smallVsLargeIndex == 1 || smallVsLargeIndex == 2)
3180 {
3181 // Varying size
3182 if(constantSizesIndex == 0)
3183 config.AllocationSizes.push_back({4, 0x10000, 0xA00000}); // 64 KB ... 10 MB
3184 // Constant sizes
3185 else
3186 {
3187 config.AllocationSizes.push_back({1, 0x10000, 0x10000});
3188 config.AllocationSizes.push_back({1, 0x80000, 0x80000});
3189 config.AllocationSizes.push_back({1, 0x200000, 0x200000});
3190 config.AllocationSizes.push_back({1, 0xA00000, 0xA00000});
3191 }
3192 }
3193 }
3194 // Images present
3195 if(buffersVsImagesIndex == 1 || buffersVsImagesIndex == 2)
3196 {
3197 // Small
3198 if(smallVsLargeIndex == 0 || smallVsLargeIndex == 2)
3199 {
3200 // Varying size
3201 if(constantSizesIndex == 0)
3202 config.AllocationSizes.push_back({4, 0, 0, 4, 32});
3203 // Constant sizes
3204 else
3205 {
3206 config.AllocationSizes.push_back({1, 0, 0, 4, 4});
3207 config.AllocationSizes.push_back({1, 0, 0, 8, 8});
3208 config.AllocationSizes.push_back({1, 0, 0, 16, 16});
3209 config.AllocationSizes.push_back({1, 0, 0, 32, 32});
3210 }
3211 }
3212 // Large
3213 if(smallVsLargeIndex == 1 || smallVsLargeIndex == 2)
3214 {
3215 // Varying size
3216 if(constantSizesIndex == 0)
3217 config.AllocationSizes.push_back({4, 0, 0, 256, 2048});
3218 // Constant sizes
3219 else
3220 {
3221 config.AllocationSizes.push_back({1, 0, 0, 256, 256});
3222 config.AllocationSizes.push_back({1, 0, 0, 512, 512});
3223 config.AllocationSizes.push_back({1, 0, 0, 1024, 1024});
3224 config.AllocationSizes.push_back({1, 0, 0, 2048, 2048});
3225 }
3226 }
3227 }
3228
3229 const VkDeviceSize avgResourceSize = config.CalcAvgResourceSize();
3230 config.PoolSize = avgResourceSize * AVG_RESOURCES_PER_POOL;
3231
3232 // 0 = 66%, 1 = 133%, 2 = 100%, 3 = 33%, 4 = 166%
3233 size_t subscriptionModeCount;
3234 switch(ConfigType)
3235 {
3236 case CONFIG_TYPE_MINIMUM: subscriptionModeCount = 2; break;
3237 case CONFIG_TYPE_SMALL: subscriptionModeCount = 2; break;
3238 case CONFIG_TYPE_AVERAGE: subscriptionModeCount = 3; break;
3239 case CONFIG_TYPE_LARGE: subscriptionModeCount = 5; break;
3240 case CONFIG_TYPE_MAXIMUM: subscriptionModeCount = 5; break;
3241 default: assert(0);
3242 }
3243 for(size_t subscriptionModeIndex = 0; subscriptionModeIndex < subscriptionModeCount; ++subscriptionModeIndex)
3244 {
3245 std::string desc5 = desc4;
3246
3247 switch(subscriptionModeIndex)
3248 {
3249 case 0:
3250 desc5 += " Subscription_66%";
3251 config.UsedItemCountMax = AVG_RESOURCES_PER_POOL * 66 / 100;
3252 break;
3253 case 1:
3254 desc5 += " Subscription_133%";
3255 config.UsedItemCountMax = AVG_RESOURCES_PER_POOL * 133 / 100;
3256 break;
3257 case 2:
3258 desc5 += " Subscription_100%";
3259 config.UsedItemCountMax = AVG_RESOURCES_PER_POOL;
3260 break;
3261 case 3:
3262 desc5 += " Subscription_33%";
3263 config.UsedItemCountMax = AVG_RESOURCES_PER_POOL * 33 / 100;
3264 break;
3265 case 4:
3266 desc5 += " Subscription_166%";
3267 config.UsedItemCountMax = AVG_RESOURCES_PER_POOL * 166 / 100;
3268 break;
3269 default:
3270 assert(0);
3271 }
3272
3273 config.TotalItemCount = config.UsedItemCountMax * 5;
3274 config.UsedItemCountMin = config.UsedItemCountMax * 80 / 100;
3275
3276 const char* testDescription = desc5.c_str();
3277
3278 for(size_t repeat = 0; repeat < repeatCount; ++repeat)
3279 {
3280 printf("%s Repeat %u\n", testDescription, (uint32_t)repeat);
3281
3282 PoolTestResult result{};
3283 g_MemoryAliasingWarningEnabled = false;
3284 TestPool_Benchmark(result, config);
3285 g_MemoryAliasingWarningEnabled = true;
3286 WritePoolTestResult(file, CODE_DESCRIPTION, testDescription, config, result);
3287 }
3288 }
3289 }
3290 }
3291 }
3292 }
3293}
3294
3295void Test()
3296{
3297 wprintf(L"TESTING:\n");
3298
Adam Sawicki212a4a62018-06-14 15:44:45 +02003299 // TEMP tests
Adam Sawicki0876c0d2018-06-20 15:18:11 +02003300TestLinearAllocator();
3301return;
Adam Sawicki212a4a62018-06-14 15:44:45 +02003302
Adam Sawickib8333fb2018-03-13 16:15:53 +01003303 // # Simple tests
3304
3305 TestBasics();
Adam Sawicki212a4a62018-06-14 15:44:45 +02003306#if VMA_DEBUG_MARGIN
3307 TestDebugMargin();
3308#else
3309 TestPool_SameSize();
3310 TestHeapSizeLimit();
3311#endif
Adam Sawickie44c6262018-06-15 14:30:39 +02003312#if VMA_DEBUG_INITIALIZE_ALLOCATIONS
3313 TestAllocationsInitialization();
3314#endif
Adam Sawickib8333fb2018-03-13 16:15:53 +01003315 TestMapping();
3316 TestMappingMultithreaded();
Adam Sawicki0876c0d2018-06-20 15:18:11 +02003317 TestLinearAllocator();
Adam Sawickib8333fb2018-03-13 16:15:53 +01003318 TestDefragmentationSimple();
3319 TestDefragmentationFull();
3320
3321 // # Detailed tests
3322 FILE* file;
3323 fopen_s(&file, "Results.csv", "w");
3324 assert(file != NULL);
3325
3326 WriteMainTestResultHeader(file);
3327 PerformMainTests(file);
3328 //PerformCustomMainTest(file);
3329
3330 WritePoolTestResultHeader(file);
3331 PerformPoolTests(file);
3332 //PerformCustomPoolTest(file);
3333
3334 fclose(file);
3335
3336 wprintf(L"Done.\n");
3337}
3338
Adam Sawickif1a793c2018-03-13 15:42:22 +01003339#endif // #ifdef _WIN32