blob: 8b3100851eb47f68d8e5ba83ec30d90e5e34e3b4 [file] [log] [blame]
Adam Sawickif1a793c2018-03-13 15:42:22 +01001#include "Tests.h"
2#include "VmaUsage.h"
3#include "Common.h"
Adam Sawickib8333fb2018-03-13 16:15:53 +01004#include <atomic>
5#include <thread>
6#include <mutex>
Adam Sawickif1a793c2018-03-13 15:42:22 +01007
8#ifdef _WIN32
9
Adam Sawickib8333fb2018-03-13 16:15:53 +010010enum class FREE_ORDER { FORWARD, BACKWARD, RANDOM, COUNT };
11
12struct AllocationSize
13{
14 uint32_t Probability;
15 VkDeviceSize BufferSizeMin, BufferSizeMax;
16 uint32_t ImageSizeMin, ImageSizeMax;
17};
18
19struct Config
20{
21 uint32_t RandSeed;
22 VkDeviceSize BeginBytesToAllocate;
23 uint32_t AdditionalOperationCount;
24 VkDeviceSize MaxBytesToAllocate;
25 uint32_t MemUsageProbability[4]; // For VMA_MEMORY_USAGE_*
26 std::vector<AllocationSize> AllocationSizes;
27 uint32_t ThreadCount;
28 uint32_t ThreadsUsingCommonAllocationsProbabilityPercent;
29 FREE_ORDER FreeOrder;
30};
31
32struct Result
33{
34 duration TotalTime;
35 duration AllocationTimeMin, AllocationTimeAvg, AllocationTimeMax;
36 duration DeallocationTimeMin, DeallocationTimeAvg, DeallocationTimeMax;
37 VkDeviceSize TotalMemoryAllocated;
38 VkDeviceSize FreeRangeSizeAvg, FreeRangeSizeMax;
39};
40
41void TestDefragmentationSimple();
42void TestDefragmentationFull();
43
44struct PoolTestConfig
45{
46 uint32_t RandSeed;
47 uint32_t ThreadCount;
48 VkDeviceSize PoolSize;
49 uint32_t FrameCount;
50 uint32_t TotalItemCount;
51 // Range for number of items used in each frame.
52 uint32_t UsedItemCountMin, UsedItemCountMax;
53 // Percent of items to make unused, and possibly make some others used in each frame.
54 uint32_t ItemsToMakeUnusedPercent;
55 std::vector<AllocationSize> AllocationSizes;
56
57 VkDeviceSize CalcAvgResourceSize() const
58 {
59 uint32_t probabilitySum = 0;
60 VkDeviceSize sizeSum = 0;
61 for(size_t i = 0; i < AllocationSizes.size(); ++i)
62 {
63 const AllocationSize& allocSize = AllocationSizes[i];
64 if(allocSize.BufferSizeMax > 0)
65 sizeSum += (allocSize.BufferSizeMin + allocSize.BufferSizeMax) / 2 * allocSize.Probability;
66 else
67 {
68 const VkDeviceSize avgDimension = (allocSize.ImageSizeMin + allocSize.ImageSizeMax) / 2;
69 sizeSum += avgDimension * avgDimension * 4 * allocSize.Probability;
70 }
71 probabilitySum += allocSize.Probability;
72 }
73 return sizeSum / probabilitySum;
74 }
75
76 bool UsesBuffers() const
77 {
78 for(size_t i = 0; i < AllocationSizes.size(); ++i)
79 if(AllocationSizes[i].BufferSizeMax > 0)
80 return true;
81 return false;
82 }
83
84 bool UsesImages() const
85 {
86 for(size_t i = 0; i < AllocationSizes.size(); ++i)
87 if(AllocationSizes[i].ImageSizeMax > 0)
88 return true;
89 return false;
90 }
91};
92
93struct PoolTestResult
94{
95 duration TotalTime;
96 duration AllocationTimeMin, AllocationTimeAvg, AllocationTimeMax;
97 duration DeallocationTimeMin, DeallocationTimeAvg, DeallocationTimeMax;
98 size_t LostAllocationCount, LostAllocationTotalSize;
99 size_t FailedAllocationCount, FailedAllocationTotalSize;
100};
101
102static const uint32_t IMAGE_BYTES_PER_PIXEL = 1;
103
104struct BufferInfo
105{
106 VkBuffer Buffer = VK_NULL_HANDLE;
107 VmaAllocation Allocation = VK_NULL_HANDLE;
108};
109
110static void InitResult(Result& outResult)
111{
112 outResult.TotalTime = duration::zero();
113 outResult.AllocationTimeMin = duration::max();
114 outResult.AllocationTimeAvg = duration::zero();
115 outResult.AllocationTimeMax = duration::min();
116 outResult.DeallocationTimeMin = duration::max();
117 outResult.DeallocationTimeAvg = duration::zero();
118 outResult.DeallocationTimeMax = duration::min();
119 outResult.TotalMemoryAllocated = 0;
120 outResult.FreeRangeSizeAvg = 0;
121 outResult.FreeRangeSizeMax = 0;
122}
123
124class TimeRegisterObj
125{
126public:
127 TimeRegisterObj(duration& min, duration& sum, duration& max) :
128 m_Min(min),
129 m_Sum(sum),
130 m_Max(max),
131 m_TimeBeg(std::chrono::high_resolution_clock::now())
132 {
133 }
134
135 ~TimeRegisterObj()
136 {
137 duration d = std::chrono::high_resolution_clock::now() - m_TimeBeg;
138 m_Sum += d;
139 if(d < m_Min) m_Min = d;
140 if(d > m_Max) m_Max = d;
141 }
142
143private:
144 duration& m_Min;
145 duration& m_Sum;
146 duration& m_Max;
147 time_point m_TimeBeg;
148};
149
150struct PoolTestThreadResult
151{
152 duration AllocationTimeMin, AllocationTimeSum, AllocationTimeMax;
153 duration DeallocationTimeMin, DeallocationTimeSum, DeallocationTimeMax;
154 size_t AllocationCount, DeallocationCount;
155 size_t LostAllocationCount, LostAllocationTotalSize;
156 size_t FailedAllocationCount, FailedAllocationTotalSize;
157};
158
159class AllocationTimeRegisterObj : public TimeRegisterObj
160{
161public:
162 AllocationTimeRegisterObj(Result& result) :
163 TimeRegisterObj(result.AllocationTimeMin, result.AllocationTimeAvg, result.AllocationTimeMax)
164 {
165 }
166};
167
168class DeallocationTimeRegisterObj : public TimeRegisterObj
169{
170public:
171 DeallocationTimeRegisterObj(Result& result) :
172 TimeRegisterObj(result.DeallocationTimeMin, result.DeallocationTimeAvg, result.DeallocationTimeMax)
173 {
174 }
175};
176
177class PoolAllocationTimeRegisterObj : public TimeRegisterObj
178{
179public:
180 PoolAllocationTimeRegisterObj(PoolTestThreadResult& result) :
181 TimeRegisterObj(result.AllocationTimeMin, result.AllocationTimeSum, result.AllocationTimeMax)
182 {
183 }
184};
185
186class PoolDeallocationTimeRegisterObj : public TimeRegisterObj
187{
188public:
189 PoolDeallocationTimeRegisterObj(PoolTestThreadResult& result) :
190 TimeRegisterObj(result.DeallocationTimeMin, result.DeallocationTimeSum, result.DeallocationTimeMax)
191 {
192 }
193};
194
195VkResult MainTest(Result& outResult, const Config& config)
196{
197 assert(config.ThreadCount > 0);
198
199 InitResult(outResult);
200
201 RandomNumberGenerator mainRand{config.RandSeed};
202
203 time_point timeBeg = std::chrono::high_resolution_clock::now();
204
205 std::atomic<size_t> allocationCount = 0;
206 VkResult res = VK_SUCCESS;
207
208 uint32_t memUsageProbabilitySum =
209 config.MemUsageProbability[0] + config.MemUsageProbability[1] +
210 config.MemUsageProbability[2] + config.MemUsageProbability[3];
211 assert(memUsageProbabilitySum > 0);
212
213 uint32_t allocationSizeProbabilitySum = std::accumulate(
214 config.AllocationSizes.begin(),
215 config.AllocationSizes.end(),
216 0u,
217 [](uint32_t sum, const AllocationSize& allocSize) {
218 return sum + allocSize.Probability;
219 });
220
221 struct Allocation
222 {
223 VkBuffer Buffer;
224 VkImage Image;
225 VmaAllocation Alloc;
226 };
227
228 std::vector<Allocation> commonAllocations;
229 std::mutex commonAllocationsMutex;
230
231 auto Allocate = [&](
232 VkDeviceSize bufferSize,
233 const VkExtent2D imageExtent,
234 RandomNumberGenerator& localRand,
235 VkDeviceSize& totalAllocatedBytes,
236 std::vector<Allocation>& allocations) -> VkResult
237 {
238 assert((bufferSize == 0) != (imageExtent.width == 0 && imageExtent.height == 0));
239
240 uint32_t memUsageIndex = 0;
241 uint32_t memUsageRand = localRand.Generate() % memUsageProbabilitySum;
242 while(memUsageRand >= config.MemUsageProbability[memUsageIndex])
243 memUsageRand -= config.MemUsageProbability[memUsageIndex++];
244
245 VmaAllocationCreateInfo memReq = {};
246 memReq.usage = (VmaMemoryUsage)(VMA_MEMORY_USAGE_GPU_ONLY + memUsageIndex);
247
248 Allocation allocation = {};
249 VmaAllocationInfo allocationInfo;
250
251 // Buffer
252 if(bufferSize > 0)
253 {
254 assert(imageExtent.width == 0);
255 VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
256 bufferInfo.size = bufferSize;
257 bufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
258
259 {
260 AllocationTimeRegisterObj timeRegisterObj{outResult};
261 res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &memReq, &allocation.Buffer, &allocation.Alloc, &allocationInfo);
262 }
263 }
264 // Image
265 else
266 {
267 VkImageCreateInfo imageInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
268 imageInfo.imageType = VK_IMAGE_TYPE_2D;
269 imageInfo.extent.width = imageExtent.width;
270 imageInfo.extent.height = imageExtent.height;
271 imageInfo.extent.depth = 1;
272 imageInfo.mipLevels = 1;
273 imageInfo.arrayLayers = 1;
274 imageInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
275 imageInfo.tiling = memReq.usage == VMA_MEMORY_USAGE_GPU_ONLY ?
276 VK_IMAGE_TILING_OPTIMAL :
277 VK_IMAGE_TILING_LINEAR;
278 imageInfo.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED;
279 switch(memReq.usage)
280 {
281 case VMA_MEMORY_USAGE_GPU_ONLY:
282 switch(localRand.Generate() % 3)
283 {
284 case 0:
285 imageInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
286 break;
287 case 1:
288 imageInfo.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
289 break;
290 case 2:
291 imageInfo.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
292 break;
293 }
294 break;
295 case VMA_MEMORY_USAGE_CPU_ONLY:
296 case VMA_MEMORY_USAGE_CPU_TO_GPU:
297 imageInfo.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
298 break;
299 case VMA_MEMORY_USAGE_GPU_TO_CPU:
300 imageInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT;
301 break;
302 }
303 imageInfo.samples = VK_SAMPLE_COUNT_1_BIT;
304 imageInfo.flags = 0;
305
306 {
307 AllocationTimeRegisterObj timeRegisterObj{outResult};
308 res = vmaCreateImage(g_hAllocator, &imageInfo, &memReq, &allocation.Image, &allocation.Alloc, &allocationInfo);
309 }
310 }
311
312 if(res == VK_SUCCESS)
313 {
314 ++allocationCount;
315 totalAllocatedBytes += allocationInfo.size;
316 bool useCommonAllocations = localRand.Generate() % 100 < config.ThreadsUsingCommonAllocationsProbabilityPercent;
317 if(useCommonAllocations)
318 {
319 std::unique_lock<std::mutex> lock(commonAllocationsMutex);
320 commonAllocations.push_back(allocation);
321 }
322 else
323 allocations.push_back(allocation);
324 }
325 else
326 {
327 assert(0);
328 }
329 return res;
330 };
331
332 auto GetNextAllocationSize = [&](
333 VkDeviceSize& outBufSize,
334 VkExtent2D& outImageSize,
335 RandomNumberGenerator& localRand)
336 {
337 outBufSize = 0;
338 outImageSize = {0, 0};
339
340 uint32_t allocSizeIndex = 0;
341 uint32_t r = localRand.Generate() % allocationSizeProbabilitySum;
342 while(r >= config.AllocationSizes[allocSizeIndex].Probability)
343 r -= config.AllocationSizes[allocSizeIndex++].Probability;
344
345 const AllocationSize& allocSize = config.AllocationSizes[allocSizeIndex];
346 if(allocSize.BufferSizeMax > 0)
347 {
348 assert(allocSize.ImageSizeMax == 0);
349 if(allocSize.BufferSizeMax == allocSize.BufferSizeMin)
350 outBufSize = allocSize.BufferSizeMin;
351 else
352 {
353 outBufSize = allocSize.BufferSizeMin + localRand.Generate() % (allocSize.BufferSizeMax - allocSize.BufferSizeMin);
354 outBufSize = outBufSize / 16 * 16;
355 }
356 }
357 else
358 {
359 if(allocSize.ImageSizeMax == allocSize.ImageSizeMin)
360 outImageSize.width = outImageSize.height = allocSize.ImageSizeMax;
361 else
362 {
363 outImageSize.width = allocSize.ImageSizeMin + localRand.Generate() % (allocSize.ImageSizeMax - allocSize.ImageSizeMin);
364 outImageSize.height = allocSize.ImageSizeMin + localRand.Generate() % (allocSize.ImageSizeMax - allocSize.ImageSizeMin);
365 }
366 }
367 };
368
369 std::atomic<uint32_t> numThreadsReachedMaxAllocations = 0;
370 HANDLE threadsFinishEvent = CreateEvent(NULL, TRUE, FALSE, NULL);
371
372 auto ThreadProc = [&](uint32_t randSeed) -> void
373 {
374 RandomNumberGenerator threadRand(randSeed);
375 VkDeviceSize threadTotalAllocatedBytes = 0;
376 std::vector<Allocation> threadAllocations;
377 VkDeviceSize threadBeginBytesToAllocate = config.BeginBytesToAllocate / config.ThreadCount;
378 VkDeviceSize threadMaxBytesToAllocate = config.MaxBytesToAllocate / config.ThreadCount;
379 uint32_t threadAdditionalOperationCount = config.AdditionalOperationCount / config.ThreadCount;
380
381 // BEGIN ALLOCATIONS
382 for(;;)
383 {
384 VkDeviceSize bufferSize = 0;
385 VkExtent2D imageExtent = {};
386 GetNextAllocationSize(bufferSize, imageExtent, threadRand);
387 if(threadTotalAllocatedBytes + bufferSize + imageExtent.width * imageExtent.height * IMAGE_BYTES_PER_PIXEL <
388 threadBeginBytesToAllocate)
389 {
390 if(Allocate(bufferSize, imageExtent, threadRand, threadTotalAllocatedBytes, threadAllocations) != VK_SUCCESS)
391 break;
392 }
393 else
394 break;
395 }
396
397 // ADDITIONAL ALLOCATIONS AND FREES
398 for(size_t i = 0; i < threadAdditionalOperationCount; ++i)
399 {
400 VkDeviceSize bufferSize = 0;
401 VkExtent2D imageExtent = {};
402 GetNextAllocationSize(bufferSize, imageExtent, threadRand);
403
404 // true = allocate, false = free
405 bool allocate = threadRand.Generate() % 2 != 0;
406
407 if(allocate)
408 {
409 if(threadTotalAllocatedBytes +
410 bufferSize +
411 imageExtent.width * imageExtent.height * IMAGE_BYTES_PER_PIXEL <
412 threadMaxBytesToAllocate)
413 {
414 if(Allocate(bufferSize, imageExtent, threadRand, threadTotalAllocatedBytes, threadAllocations) != VK_SUCCESS)
415 break;
416 }
417 }
418 else
419 {
420 bool useCommonAllocations = threadRand.Generate() % 100 < config.ThreadsUsingCommonAllocationsProbabilityPercent;
421 if(useCommonAllocations)
422 {
423 std::unique_lock<std::mutex> lock(commonAllocationsMutex);
424 if(!commonAllocations.empty())
425 {
426 size_t indexToFree = threadRand.Generate() % commonAllocations.size();
427 VmaAllocationInfo allocationInfo;
428 vmaGetAllocationInfo(g_hAllocator, commonAllocations[indexToFree].Alloc, &allocationInfo);
429 if(threadTotalAllocatedBytes >= allocationInfo.size)
430 {
431 DeallocationTimeRegisterObj timeRegisterObj{outResult};
432 if(commonAllocations[indexToFree].Buffer != VK_NULL_HANDLE)
433 vmaDestroyBuffer(g_hAllocator, commonAllocations[indexToFree].Buffer, commonAllocations[indexToFree].Alloc);
434 else
435 vmaDestroyImage(g_hAllocator, commonAllocations[indexToFree].Image, commonAllocations[indexToFree].Alloc);
436 threadTotalAllocatedBytes -= allocationInfo.size;
437 commonAllocations.erase(commonAllocations.begin() + indexToFree);
438 }
439 }
440 }
441 else
442 {
443 if(!threadAllocations.empty())
444 {
445 size_t indexToFree = threadRand.Generate() % threadAllocations.size();
446 VmaAllocationInfo allocationInfo;
447 vmaGetAllocationInfo(g_hAllocator, threadAllocations[indexToFree].Alloc, &allocationInfo);
448 if(threadTotalAllocatedBytes >= allocationInfo.size)
449 {
450 DeallocationTimeRegisterObj timeRegisterObj{outResult};
451 if(threadAllocations[indexToFree].Buffer != VK_NULL_HANDLE)
452 vmaDestroyBuffer(g_hAllocator, threadAllocations[indexToFree].Buffer, threadAllocations[indexToFree].Alloc);
453 else
454 vmaDestroyImage(g_hAllocator, threadAllocations[indexToFree].Image, threadAllocations[indexToFree].Alloc);
455 threadTotalAllocatedBytes -= allocationInfo.size;
456 threadAllocations.erase(threadAllocations.begin() + indexToFree);
457 }
458 }
459 }
460 }
461 }
462
463 ++numThreadsReachedMaxAllocations;
464
465 WaitForSingleObject(threadsFinishEvent, INFINITE);
466
467 // DEALLOCATION
468 while(!threadAllocations.empty())
469 {
470 size_t indexToFree = 0;
471 switch(config.FreeOrder)
472 {
473 case FREE_ORDER::FORWARD:
474 indexToFree = 0;
475 break;
476 case FREE_ORDER::BACKWARD:
477 indexToFree = threadAllocations.size() - 1;
478 break;
479 case FREE_ORDER::RANDOM:
480 indexToFree = mainRand.Generate() % threadAllocations.size();
481 break;
482 }
483
484 {
485 DeallocationTimeRegisterObj timeRegisterObj{outResult};
486 if(threadAllocations[indexToFree].Buffer != VK_NULL_HANDLE)
487 vmaDestroyBuffer(g_hAllocator, threadAllocations[indexToFree].Buffer, threadAllocations[indexToFree].Alloc);
488 else
489 vmaDestroyImage(g_hAllocator, threadAllocations[indexToFree].Image, threadAllocations[indexToFree].Alloc);
490 }
491 threadAllocations.erase(threadAllocations.begin() + indexToFree);
492 }
493 };
494
495 uint32_t threadRandSeed = mainRand.Generate();
496 std::vector<std::thread> bkgThreads;
497 for(size_t i = 0; i < config.ThreadCount; ++i)
498 {
499 bkgThreads.emplace_back(std::bind(ThreadProc, threadRandSeed + (uint32_t)i));
500 }
501
502 // Wait for threads reached max allocations
503 while(numThreadsReachedMaxAllocations < config.ThreadCount)
504 Sleep(0);
505
506 // CALCULATE MEMORY STATISTICS ON FINAL USAGE
507 VmaStats vmaStats = {};
508 vmaCalculateStats(g_hAllocator, &vmaStats);
509 outResult.TotalMemoryAllocated = vmaStats.total.usedBytes + vmaStats.total.unusedBytes;
510 outResult.FreeRangeSizeMax = vmaStats.total.unusedRangeSizeMax;
511 outResult.FreeRangeSizeAvg = vmaStats.total.unusedRangeSizeAvg;
512
513 // Signal threads to deallocate
514 SetEvent(threadsFinishEvent);
515
516 // Wait for threads finished
517 for(size_t i = 0; i < bkgThreads.size(); ++i)
518 bkgThreads[i].join();
519 bkgThreads.clear();
520
521 CloseHandle(threadsFinishEvent);
522
523 // Deallocate remaining common resources
524 while(!commonAllocations.empty())
525 {
526 size_t indexToFree = 0;
527 switch(config.FreeOrder)
528 {
529 case FREE_ORDER::FORWARD:
530 indexToFree = 0;
531 break;
532 case FREE_ORDER::BACKWARD:
533 indexToFree = commonAllocations.size() - 1;
534 break;
535 case FREE_ORDER::RANDOM:
536 indexToFree = mainRand.Generate() % commonAllocations.size();
537 break;
538 }
539
540 {
541 DeallocationTimeRegisterObj timeRegisterObj{outResult};
542 if(commonAllocations[indexToFree].Buffer != VK_NULL_HANDLE)
543 vmaDestroyBuffer(g_hAllocator, commonAllocations[indexToFree].Buffer, commonAllocations[indexToFree].Alloc);
544 else
545 vmaDestroyImage(g_hAllocator, commonAllocations[indexToFree].Image, commonAllocations[indexToFree].Alloc);
546 }
547 commonAllocations.erase(commonAllocations.begin() + indexToFree);
548 }
549
550 if(allocationCount)
551 {
552 outResult.AllocationTimeAvg /= allocationCount;
553 outResult.DeallocationTimeAvg /= allocationCount;
554 }
555
556 outResult.TotalTime = std::chrono::high_resolution_clock::now() - timeBeg;
557
558 return res;
559}
560
Adam Sawickie44c6262018-06-15 14:30:39 +0200561static void SaveAllocatorStatsToFile(const wchar_t* filePath)
Adam Sawickib8333fb2018-03-13 16:15:53 +0100562{
563 char* stats;
Adam Sawickie44c6262018-06-15 14:30:39 +0200564 vmaBuildStatsString(g_hAllocator, &stats, VK_TRUE);
Adam Sawickib8333fb2018-03-13 16:15:53 +0100565 SaveFile(filePath, stats, strlen(stats));
Adam Sawickie44c6262018-06-15 14:30:39 +0200566 vmaFreeStatsString(g_hAllocator, stats);
Adam Sawickib8333fb2018-03-13 16:15:53 +0100567}
568
569struct AllocInfo
570{
571 VmaAllocation m_Allocation;
572 VkBuffer m_Buffer;
573 VkImage m_Image;
574 uint32_t m_StartValue;
575 union
576 {
577 VkBufferCreateInfo m_BufferInfo;
578 VkImageCreateInfo m_ImageInfo;
579 };
580};
581
582static void GetMemReq(VmaAllocationCreateInfo& outMemReq)
583{
584 outMemReq = {};
585 outMemReq.usage = VMA_MEMORY_USAGE_CPU_TO_GPU;
586 //outMemReq.flags = VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT;
587}
588
589static void CreateBuffer(
590 VmaPool pool,
591 const VkBufferCreateInfo& bufCreateInfo,
592 bool persistentlyMapped,
593 AllocInfo& outAllocInfo)
594{
595 outAllocInfo = {};
596 outAllocInfo.m_BufferInfo = bufCreateInfo;
597
598 VmaAllocationCreateInfo allocCreateInfo = {};
599 allocCreateInfo.pool = pool;
600 if(persistentlyMapped)
601 allocCreateInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT;
602
603 VmaAllocationInfo vmaAllocInfo = {};
604 ERR_GUARD_VULKAN( vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &outAllocInfo.m_Buffer, &outAllocInfo.m_Allocation, &vmaAllocInfo) );
605
606 // Setup StartValue and fill.
607 {
608 outAllocInfo.m_StartValue = (uint32_t)rand();
609 uint32_t* data = (uint32_t*)vmaAllocInfo.pMappedData;
610 assert((data != nullptr) == persistentlyMapped);
611 if(!persistentlyMapped)
612 {
613 ERR_GUARD_VULKAN( vmaMapMemory(g_hAllocator, outAllocInfo.m_Allocation, (void**)&data) );
614 }
615
616 uint32_t value = outAllocInfo.m_StartValue;
617 assert(bufCreateInfo.size % 4 == 0);
618 for(size_t i = 0; i < bufCreateInfo.size / sizeof(uint32_t); ++i)
619 data[i] = value++;
620
621 if(!persistentlyMapped)
622 vmaUnmapMemory(g_hAllocator, outAllocInfo.m_Allocation);
623 }
624}
625
626static void CreateAllocation(AllocInfo& outAllocation, VmaAllocator allocator)
627{
628 outAllocation.m_Allocation = nullptr;
629 outAllocation.m_Buffer = nullptr;
630 outAllocation.m_Image = nullptr;
631 outAllocation.m_StartValue = (uint32_t)rand();
632
633 VmaAllocationCreateInfo vmaMemReq;
634 GetMemReq(vmaMemReq);
635
636 VmaAllocationInfo allocInfo;
637
638 const bool isBuffer = true;//(rand() & 0x1) != 0;
639 const bool isLarge = (rand() % 16) == 0;
640 if(isBuffer)
641 {
642 const uint32_t bufferSize = isLarge ?
643 (rand() % 10 + 1) * (1024 * 1024) : // 1 MB ... 10 MB
644 (rand() % 1024 + 1) * 1024; // 1 KB ... 1 MB
645
646 VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
647 bufferInfo.size = bufferSize;
648 bufferInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
649
650 VkResult res = vmaCreateBuffer(allocator, &bufferInfo, &vmaMemReq, &outAllocation.m_Buffer, &outAllocation.m_Allocation, &allocInfo);
651 outAllocation.m_BufferInfo = bufferInfo;
652 assert(res == VK_SUCCESS);
653 }
654 else
655 {
656 const uint32_t imageSizeX = isLarge ?
657 1024 + rand() % (4096 - 1024) : // 1024 ... 4096
658 rand() % 1024 + 1; // 1 ... 1024
659 const uint32_t imageSizeY = isLarge ?
660 1024 + rand() % (4096 - 1024) : // 1024 ... 4096
661 rand() % 1024 + 1; // 1 ... 1024
662
663 VkImageCreateInfo imageInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
664 imageInfo.imageType = VK_IMAGE_TYPE_2D;
665 imageInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
666 imageInfo.extent.width = imageSizeX;
667 imageInfo.extent.height = imageSizeY;
668 imageInfo.extent.depth = 1;
669 imageInfo.mipLevels = 1;
670 imageInfo.arrayLayers = 1;
671 imageInfo.samples = VK_SAMPLE_COUNT_1_BIT;
672 imageInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
673 imageInfo.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED;
674 imageInfo.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
675
676 VkResult res = vmaCreateImage(allocator, &imageInfo, &vmaMemReq, &outAllocation.m_Image, &outAllocation.m_Allocation, &allocInfo);
677 outAllocation.m_ImageInfo = imageInfo;
678 assert(res == VK_SUCCESS);
679 }
680
681 uint32_t* data = (uint32_t*)allocInfo.pMappedData;
682 if(allocInfo.pMappedData == nullptr)
683 {
684 VkResult res = vmaMapMemory(allocator, outAllocation.m_Allocation, (void**)&data);
685 assert(res == VK_SUCCESS);
686 }
687
688 uint32_t value = outAllocation.m_StartValue;
689 assert(allocInfo.size % 4 == 0);
690 for(size_t i = 0; i < allocInfo.size / sizeof(uint32_t); ++i)
691 data[i] = value++;
692
693 if(allocInfo.pMappedData == nullptr)
694 vmaUnmapMemory(allocator, outAllocation.m_Allocation);
695}
696
697static void DestroyAllocation(const AllocInfo& allocation)
698{
699 if(allocation.m_Buffer)
700 vmaDestroyBuffer(g_hAllocator, allocation.m_Buffer, allocation.m_Allocation);
701 else
702 vmaDestroyImage(g_hAllocator, allocation.m_Image, allocation.m_Allocation);
703}
704
705static void DestroyAllAllocations(std::vector<AllocInfo>& allocations)
706{
707 for(size_t i = allocations.size(); i--; )
708 DestroyAllocation(allocations[i]);
709 allocations.clear();
710}
711
712static void ValidateAllocationData(const AllocInfo& allocation)
713{
714 VmaAllocationInfo allocInfo;
715 vmaGetAllocationInfo(g_hAllocator, allocation.m_Allocation, &allocInfo);
716
717 uint32_t* data = (uint32_t*)allocInfo.pMappedData;
718 if(allocInfo.pMappedData == nullptr)
719 {
720 VkResult res = vmaMapMemory(g_hAllocator, allocation.m_Allocation, (void**)&data);
721 assert(res == VK_SUCCESS);
722 }
723
724 uint32_t value = allocation.m_StartValue;
725 bool ok = true;
726 size_t i;
727 assert(allocInfo.size % 4 == 0);
728 for(i = 0; i < allocInfo.size / sizeof(uint32_t); ++i)
729 {
730 if(data[i] != value++)
731 {
732 ok = false;
733 break;
734 }
735 }
736 assert(ok);
737
738 if(allocInfo.pMappedData == nullptr)
739 vmaUnmapMemory(g_hAllocator, allocation.m_Allocation);
740}
741
742static void RecreateAllocationResource(AllocInfo& allocation)
743{
744 VmaAllocationInfo allocInfo;
745 vmaGetAllocationInfo(g_hAllocator, allocation.m_Allocation, &allocInfo);
746
747 if(allocation.m_Buffer)
748 {
749 vkDestroyBuffer(g_hDevice, allocation.m_Buffer, nullptr);
750
751 VkResult res = vkCreateBuffer(g_hDevice, &allocation.m_BufferInfo, nullptr, &allocation.m_Buffer);
752 assert(res == VK_SUCCESS);
753
754 // Just to silence validation layer warnings.
755 VkMemoryRequirements vkMemReq;
756 vkGetBufferMemoryRequirements(g_hDevice, allocation.m_Buffer, &vkMemReq);
757 assert(vkMemReq.size == allocation.m_BufferInfo.size);
758
759 res = vkBindBufferMemory(g_hDevice, allocation.m_Buffer, allocInfo.deviceMemory, allocInfo.offset);
760 assert(res == VK_SUCCESS);
761 }
762 else
763 {
764 vkDestroyImage(g_hDevice, allocation.m_Image, nullptr);
765
766 VkResult res = vkCreateImage(g_hDevice, &allocation.m_ImageInfo, nullptr, &allocation.m_Image);
767 assert(res == VK_SUCCESS);
768
769 // Just to silence validation layer warnings.
770 VkMemoryRequirements vkMemReq;
771 vkGetImageMemoryRequirements(g_hDevice, allocation.m_Image, &vkMemReq);
772
773 res = vkBindImageMemory(g_hDevice, allocation.m_Image, allocInfo.deviceMemory, allocInfo.offset);
774 assert(res == VK_SUCCESS);
775 }
776}
777
778static void Defragment(AllocInfo* allocs, size_t allocCount,
779 const VmaDefragmentationInfo* defragmentationInfo = nullptr,
780 VmaDefragmentationStats* defragmentationStats = nullptr)
781{
782 std::vector<VmaAllocation> vmaAllocs(allocCount);
783 for(size_t i = 0; i < allocCount; ++i)
784 vmaAllocs[i] = allocs[i].m_Allocation;
785
786 std::vector<VkBool32> allocChanged(allocCount);
787
788 ERR_GUARD_VULKAN( vmaDefragment(g_hAllocator, vmaAllocs.data(), allocCount, allocChanged.data(),
789 defragmentationInfo, defragmentationStats) );
790
791 for(size_t i = 0; i < allocCount; ++i)
792 {
793 if(allocChanged[i])
794 {
795 RecreateAllocationResource(allocs[i]);
796 }
797 }
798}
799
800static void ValidateAllocationsData(const AllocInfo* allocs, size_t allocCount)
801{
802 std::for_each(allocs, allocs + allocCount, [](const AllocInfo& allocInfo) {
803 ValidateAllocationData(allocInfo);
804 });
805}
806
807void TestDefragmentationSimple()
808{
809 wprintf(L"Test defragmentation simple\n");
810
811 RandomNumberGenerator rand(667);
812
813 const VkDeviceSize BUF_SIZE = 0x10000;
814 const VkDeviceSize BLOCK_SIZE = BUF_SIZE * 8;
815
816 const VkDeviceSize MIN_BUF_SIZE = 32;
817 const VkDeviceSize MAX_BUF_SIZE = BUF_SIZE * 4;
818 auto RandomBufSize = [&]() -> VkDeviceSize {
819 return align_up<VkDeviceSize>(rand.Generate() % (MAX_BUF_SIZE - MIN_BUF_SIZE + 1) + MIN_BUF_SIZE, 32);
820 };
821
822 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
823 bufCreateInfo.size = BUF_SIZE;
824 bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
825
826 VmaAllocationCreateInfo exampleAllocCreateInfo = {};
827 exampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
828
829 uint32_t memTypeIndex = UINT32_MAX;
830 vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &bufCreateInfo, &exampleAllocCreateInfo, &memTypeIndex);
831
832 VmaPoolCreateInfo poolCreateInfo = {};
833 poolCreateInfo.blockSize = BLOCK_SIZE;
834 poolCreateInfo.memoryTypeIndex = memTypeIndex;
835
836 VmaPool pool;
837 ERR_GUARD_VULKAN( vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool) );
838
839 std::vector<AllocInfo> allocations;
840
841 // persistentlyMappedOption = 0 - not persistently mapped.
842 // persistentlyMappedOption = 1 - persistently mapped.
843 for(uint32_t persistentlyMappedOption = 0; persistentlyMappedOption < 2; ++persistentlyMappedOption)
844 {
845 wprintf(L" Persistently mapped option = %u\n", persistentlyMappedOption);
846 const bool persistentlyMapped = persistentlyMappedOption != 0;
847
848 // # Test 1
849 // Buffers of fixed size.
850 // Fill 2 blocks. Remove odd buffers. Defragment everything.
851 // Expected result: at least 1 block freed.
852 {
853 for(size_t i = 0; i < BLOCK_SIZE / BUF_SIZE * 2; ++i)
854 {
855 AllocInfo allocInfo;
856 CreateBuffer(pool, bufCreateInfo, persistentlyMapped, allocInfo);
857 allocations.push_back(allocInfo);
858 }
859
860 for(size_t i = 1; i < allocations.size(); ++i)
861 {
862 DestroyAllocation(allocations[i]);
863 allocations.erase(allocations.begin() + i);
864 }
865
866 VmaDefragmentationStats defragStats;
867 Defragment(allocations.data(), allocations.size(), nullptr, &defragStats);
868 assert(defragStats.allocationsMoved > 0 && defragStats.bytesMoved > 0);
869 assert(defragStats.deviceMemoryBlocksFreed >= 1);
870
871 ValidateAllocationsData(allocations.data(), allocations.size());
872
873 DestroyAllAllocations(allocations);
874 }
875
876 // # Test 2
877 // Buffers of fixed size.
878 // Fill 2 blocks. Remove odd buffers. Defragment one buffer at time.
879 // Expected result: Each of 4 interations makes some progress.
880 {
881 for(size_t i = 0; i < BLOCK_SIZE / BUF_SIZE * 2; ++i)
882 {
883 AllocInfo allocInfo;
884 CreateBuffer(pool, bufCreateInfo, persistentlyMapped, allocInfo);
885 allocations.push_back(allocInfo);
886 }
887
888 for(size_t i = 1; i < allocations.size(); ++i)
889 {
890 DestroyAllocation(allocations[i]);
891 allocations.erase(allocations.begin() + i);
892 }
893
894 VmaDefragmentationInfo defragInfo = {};
895 defragInfo.maxAllocationsToMove = 1;
896 defragInfo.maxBytesToMove = BUF_SIZE;
897
898 for(size_t i = 0; i < BLOCK_SIZE / BUF_SIZE / 2; ++i)
899 {
900 VmaDefragmentationStats defragStats;
901 Defragment(allocations.data(), allocations.size(), &defragInfo, &defragStats);
902 assert(defragStats.allocationsMoved > 0 && defragStats.bytesMoved > 0);
903 }
904
905 ValidateAllocationsData(allocations.data(), allocations.size());
906
907 DestroyAllAllocations(allocations);
908 }
909
910 // # Test 3
911 // Buffers of variable size.
912 // Create a number of buffers. Remove some percent of them.
913 // Defragment while having some percent of them unmovable.
914 // Expected result: Just simple validation.
915 {
916 for(size_t i = 0; i < 100; ++i)
917 {
918 VkBufferCreateInfo localBufCreateInfo = bufCreateInfo;
919 localBufCreateInfo.size = RandomBufSize();
920
921 AllocInfo allocInfo;
922 CreateBuffer(pool, bufCreateInfo, persistentlyMapped, allocInfo);
923 allocations.push_back(allocInfo);
924 }
925
926 const uint32_t percentToDelete = 60;
927 const size_t numberToDelete = allocations.size() * percentToDelete / 100;
928 for(size_t i = 0; i < numberToDelete; ++i)
929 {
930 size_t indexToDelete = rand.Generate() % (uint32_t)allocations.size();
931 DestroyAllocation(allocations[indexToDelete]);
932 allocations.erase(allocations.begin() + indexToDelete);
933 }
934
935 // Non-movable allocations will be at the beginning of allocations array.
936 const uint32_t percentNonMovable = 20;
937 const size_t numberNonMovable = allocations.size() * percentNonMovable / 100;
938 for(size_t i = 0; i < numberNonMovable; ++i)
939 {
940 size_t indexNonMovable = i + rand.Generate() % (uint32_t)(allocations.size() - i);
941 if(indexNonMovable != i)
942 std::swap(allocations[i], allocations[indexNonMovable]);
943 }
944
945 VmaDefragmentationStats defragStats;
946 Defragment(
947 allocations.data() + numberNonMovable,
948 allocations.size() - numberNonMovable,
949 nullptr, &defragStats);
950
951 ValidateAllocationsData(allocations.data(), allocations.size());
952
953 DestroyAllAllocations(allocations);
954 }
955 }
956
957 vmaDestroyPool(g_hAllocator, pool);
958}
959
960void TestDefragmentationFull()
961{
962 std::vector<AllocInfo> allocations;
963
964 // Create initial allocations.
965 for(size_t i = 0; i < 400; ++i)
966 {
967 AllocInfo allocation;
968 CreateAllocation(allocation, g_hAllocator);
969 allocations.push_back(allocation);
970 }
971
972 // Delete random allocations
973 const size_t allocationsToDeletePercent = 80;
974 size_t allocationsToDelete = allocations.size() * allocationsToDeletePercent / 100;
975 for(size_t i = 0; i < allocationsToDelete; ++i)
976 {
977 size_t index = (size_t)rand() % allocations.size();
978 DestroyAllocation(allocations[index]);
979 allocations.erase(allocations.begin() + index);
980 }
981
982 for(size_t i = 0; i < allocations.size(); ++i)
983 ValidateAllocationData(allocations[i]);
984
Adam Sawickie44c6262018-06-15 14:30:39 +0200985 SaveAllocatorStatsToFile(L"Before.csv");
Adam Sawickib8333fb2018-03-13 16:15:53 +0100986
987 {
988 std::vector<VmaAllocation> vmaAllocations(allocations.size());
989 for(size_t i = 0; i < allocations.size(); ++i)
990 vmaAllocations[i] = allocations[i].m_Allocation;
991
992 const size_t nonMovablePercent = 0;
993 size_t nonMovableCount = vmaAllocations.size() * nonMovablePercent / 100;
994 for(size_t i = 0; i < nonMovableCount; ++i)
995 {
996 size_t index = (size_t)rand() % vmaAllocations.size();
997 vmaAllocations.erase(vmaAllocations.begin() + index);
998 }
999
1000 const uint32_t defragCount = 1;
1001 for(uint32_t defragIndex = 0; defragIndex < defragCount; ++defragIndex)
1002 {
1003 std::vector<VkBool32> allocationsChanged(vmaAllocations.size());
1004
1005 VmaDefragmentationInfo defragmentationInfo;
1006 defragmentationInfo.maxAllocationsToMove = UINT_MAX;
1007 defragmentationInfo.maxBytesToMove = SIZE_MAX;
1008
1009 wprintf(L"Defragmentation #%u\n", defragIndex);
1010
1011 time_point begTime = std::chrono::high_resolution_clock::now();
1012
1013 VmaDefragmentationStats stats;
1014 VkResult res = vmaDefragment(g_hAllocator, vmaAllocations.data(), vmaAllocations.size(), allocationsChanged.data(), &defragmentationInfo, &stats);
1015 assert(res >= 0);
1016
1017 float defragmentDuration = ToFloatSeconds(std::chrono::high_resolution_clock::now() - begTime);
1018
1019 wprintf(L"Moved allocations %u, bytes %llu\n", stats.allocationsMoved, stats.bytesMoved);
1020 wprintf(L"Freed blocks %u, bytes %llu\n", stats.deviceMemoryBlocksFreed, stats.bytesFreed);
1021 wprintf(L"Time: %.2f s\n", defragmentDuration);
1022
1023 for(size_t i = 0; i < vmaAllocations.size(); ++i)
1024 {
1025 if(allocationsChanged[i])
1026 {
1027 RecreateAllocationResource(allocations[i]);
1028 }
1029 }
1030
1031 for(size_t i = 0; i < allocations.size(); ++i)
1032 ValidateAllocationData(allocations[i]);
1033
1034 wchar_t fileName[MAX_PATH];
1035 swprintf(fileName, MAX_PATH, L"After_%02u.csv", defragIndex);
Adam Sawickie44c6262018-06-15 14:30:39 +02001036 SaveAllocatorStatsToFile(fileName);
Adam Sawickib8333fb2018-03-13 16:15:53 +01001037 }
1038 }
1039
1040 // Destroy all remaining allocations.
1041 DestroyAllAllocations(allocations);
1042}
1043
1044static void TestUserData()
1045{
1046 VkResult res;
1047
1048 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
1049 bufCreateInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
1050 bufCreateInfo.size = 0x10000;
1051
1052 for(uint32_t testIndex = 0; testIndex < 2; ++testIndex)
1053 {
1054 // Opaque pointer
1055 {
1056
1057 void* numberAsPointer = (void*)(size_t)0xC2501FF3u;
1058 void* pointerToSomething = &res;
1059
1060 VmaAllocationCreateInfo allocCreateInfo = {};
1061 allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
1062 allocCreateInfo.pUserData = numberAsPointer;
1063 if(testIndex == 1)
1064 allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
1065
1066 VkBuffer buf; VmaAllocation alloc; VmaAllocationInfo allocInfo;
1067 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
1068 assert(res == VK_SUCCESS);
1069 assert(allocInfo.pUserData = numberAsPointer);
1070
1071 vmaGetAllocationInfo(g_hAllocator, alloc, &allocInfo);
1072 assert(allocInfo.pUserData == numberAsPointer);
1073
1074 vmaSetAllocationUserData(g_hAllocator, alloc, pointerToSomething);
1075 vmaGetAllocationInfo(g_hAllocator, alloc, &allocInfo);
1076 assert(allocInfo.pUserData == pointerToSomething);
1077
1078 vmaDestroyBuffer(g_hAllocator, buf, alloc);
1079 }
1080
1081 // String
1082 {
1083 const char* name1 = "Buffer name \\\"\'<>&% \nSecond line .,;=";
1084 const char* name2 = "2";
1085 const size_t name1Len = strlen(name1);
1086
1087 char* name1Buf = new char[name1Len + 1];
1088 strcpy_s(name1Buf, name1Len + 1, name1);
1089
1090 VmaAllocationCreateInfo allocCreateInfo = {};
1091 allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
1092 allocCreateInfo.flags = VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT;
1093 allocCreateInfo.pUserData = name1Buf;
1094 if(testIndex == 1)
1095 allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
1096
1097 VkBuffer buf; VmaAllocation alloc; VmaAllocationInfo allocInfo;
1098 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
1099 assert(res == VK_SUCCESS);
1100 assert(allocInfo.pUserData != nullptr && allocInfo.pUserData != name1Buf);
1101 assert(strcmp(name1, (const char*)allocInfo.pUserData) == 0);
1102
1103 delete[] name1Buf;
1104
1105 vmaGetAllocationInfo(g_hAllocator, alloc, &allocInfo);
1106 assert(strcmp(name1, (const char*)allocInfo.pUserData) == 0);
1107
1108 vmaSetAllocationUserData(g_hAllocator, alloc, (void*)name2);
1109 vmaGetAllocationInfo(g_hAllocator, alloc, &allocInfo);
1110 assert(strcmp(name2, (const char*)allocInfo.pUserData) == 0);
1111
1112 vmaSetAllocationUserData(g_hAllocator, alloc, nullptr);
1113 vmaGetAllocationInfo(g_hAllocator, alloc, &allocInfo);
1114 assert(allocInfo.pUserData == nullptr);
1115
1116 vmaDestroyBuffer(g_hAllocator, buf, alloc);
1117 }
1118 }
1119}
1120
1121static void TestMemoryRequirements()
1122{
1123 VkResult res;
1124 VkBuffer buf;
1125 VmaAllocation alloc;
1126 VmaAllocationInfo allocInfo;
1127
1128 const VkPhysicalDeviceMemoryProperties* memProps;
1129 vmaGetMemoryProperties(g_hAllocator, &memProps);
1130
1131 VkBufferCreateInfo bufInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
1132 bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
1133 bufInfo.size = 128;
1134
1135 VmaAllocationCreateInfo allocCreateInfo = {};
1136
1137 // No requirements.
1138 res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
1139 assert(res == VK_SUCCESS);
1140 vmaDestroyBuffer(g_hAllocator, buf, alloc);
1141
1142 // Usage.
1143 allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
1144 allocCreateInfo.requiredFlags = 0;
1145 allocCreateInfo.preferredFlags = 0;
1146 allocCreateInfo.memoryTypeBits = UINT32_MAX;
1147
1148 res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
1149 assert(res == VK_SUCCESS);
1150 assert(memProps->memoryTypes[allocInfo.memoryType].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
1151 vmaDestroyBuffer(g_hAllocator, buf, alloc);
1152
1153 // Required flags, preferred flags.
1154 allocCreateInfo.usage = VMA_MEMORY_USAGE_UNKNOWN;
1155 allocCreateInfo.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
1156 allocCreateInfo.preferredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
1157 allocCreateInfo.memoryTypeBits = 0;
1158
1159 res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
1160 assert(res == VK_SUCCESS);
1161 assert(memProps->memoryTypes[allocInfo.memoryType].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
1162 assert(memProps->memoryTypes[allocInfo.memoryType].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
1163 vmaDestroyBuffer(g_hAllocator, buf, alloc);
1164
1165 // memoryTypeBits.
1166 const uint32_t memType = allocInfo.memoryType;
1167 allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
1168 allocCreateInfo.requiredFlags = 0;
1169 allocCreateInfo.preferredFlags = 0;
1170 allocCreateInfo.memoryTypeBits = 1u << memType;
1171
1172 res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
1173 assert(res == VK_SUCCESS);
1174 assert(allocInfo.memoryType == memType);
1175 vmaDestroyBuffer(g_hAllocator, buf, alloc);
1176
1177}
1178
1179static void TestBasics()
1180{
1181 VkResult res;
1182
1183 TestMemoryRequirements();
1184
1185 // Lost allocation
1186 {
1187 VmaAllocation alloc = VK_NULL_HANDLE;
1188 vmaCreateLostAllocation(g_hAllocator, &alloc);
1189 assert(alloc != VK_NULL_HANDLE);
1190
1191 VmaAllocationInfo allocInfo;
1192 vmaGetAllocationInfo(g_hAllocator, alloc, &allocInfo);
1193 assert(allocInfo.deviceMemory == VK_NULL_HANDLE);
1194 assert(allocInfo.size == 0);
1195
1196 vmaFreeMemory(g_hAllocator, alloc);
1197 }
1198
1199 // Allocation that is MAPPED and not necessarily HOST_VISIBLE.
1200 {
1201 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
1202 bufCreateInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
1203 bufCreateInfo.size = 128;
1204
1205 VmaAllocationCreateInfo allocCreateInfo = {};
1206 allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
1207 allocCreateInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT;
1208
1209 VkBuffer buf; VmaAllocation alloc; VmaAllocationInfo allocInfo;
1210 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
1211 assert(res == VK_SUCCESS);
1212
1213 vmaDestroyBuffer(g_hAllocator, buf, alloc);
1214
1215 // Same with OWN_MEMORY.
1216 allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
1217
1218 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
1219 assert(res == VK_SUCCESS);
1220
1221 vmaDestroyBuffer(g_hAllocator, buf, alloc);
1222 }
1223
1224 TestUserData();
1225}
1226
1227void TestHeapSizeLimit()
1228{
1229 const VkDeviceSize HEAP_SIZE_LIMIT = 1ull * 1024 * 1024 * 1024; // 1 GB
1230 const VkDeviceSize BLOCK_SIZE = 128ull * 1024 * 1024; // 128 MB
1231
1232 VkDeviceSize heapSizeLimit[VK_MAX_MEMORY_HEAPS];
1233 for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
1234 {
1235 heapSizeLimit[i] = HEAP_SIZE_LIMIT;
1236 }
1237
1238 VmaAllocatorCreateInfo allocatorCreateInfo = {};
1239 allocatorCreateInfo.physicalDevice = g_hPhysicalDevice;
1240 allocatorCreateInfo.device = g_hDevice;
1241 allocatorCreateInfo.pHeapSizeLimit = heapSizeLimit;
1242
1243 VmaAllocator hAllocator;
1244 VkResult res = vmaCreateAllocator(&allocatorCreateInfo, &hAllocator);
1245 assert(res == VK_SUCCESS);
1246
1247 struct Item
1248 {
1249 VkBuffer hBuf;
1250 VmaAllocation hAlloc;
1251 };
1252 std::vector<Item> items;
1253
1254 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
1255 bufCreateInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
1256
1257 // 1. Allocate two blocks of Own Memory, half the size of BLOCK_SIZE.
1258 VmaAllocationInfo ownAllocInfo;
1259 {
1260 VmaAllocationCreateInfo allocCreateInfo = {};
1261 allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
1262 allocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
1263
1264 bufCreateInfo.size = BLOCK_SIZE / 2;
1265
1266 for(size_t i = 0; i < 2; ++i)
1267 {
1268 Item item;
1269 res = vmaCreateBuffer(hAllocator, &bufCreateInfo, &allocCreateInfo, &item.hBuf, &item.hAlloc, &ownAllocInfo);
1270 assert(res == VK_SUCCESS);
1271 items.push_back(item);
1272 }
1273 }
1274
1275 // Create pool to make sure allocations must be out of this memory type.
1276 VmaPoolCreateInfo poolCreateInfo = {};
1277 poolCreateInfo.memoryTypeIndex = ownAllocInfo.memoryType;
1278 poolCreateInfo.blockSize = BLOCK_SIZE;
1279
1280 VmaPool hPool;
1281 res = vmaCreatePool(hAllocator, &poolCreateInfo, &hPool);
1282 assert(res == VK_SUCCESS);
1283
1284 // 2. Allocate normal buffers from all the remaining memory.
1285 {
1286 VmaAllocationCreateInfo allocCreateInfo = {};
1287 allocCreateInfo.pool = hPool;
1288
1289 bufCreateInfo.size = BLOCK_SIZE / 2;
1290
1291 const size_t bufCount = ((HEAP_SIZE_LIMIT / BLOCK_SIZE) - 1) * 2;
1292 for(size_t i = 0; i < bufCount; ++i)
1293 {
1294 Item item;
1295 res = vmaCreateBuffer(hAllocator, &bufCreateInfo, &allocCreateInfo, &item.hBuf, &item.hAlloc, nullptr);
1296 assert(res == VK_SUCCESS);
1297 items.push_back(item);
1298 }
1299 }
1300
1301 // 3. Allocation of one more (even small) buffer should fail.
1302 {
1303 VmaAllocationCreateInfo allocCreateInfo = {};
1304 allocCreateInfo.pool = hPool;
1305
1306 bufCreateInfo.size = 128;
1307
1308 VkBuffer hBuf;
1309 VmaAllocation hAlloc;
1310 res = vmaCreateBuffer(hAllocator, &bufCreateInfo, &allocCreateInfo, &hBuf, &hAlloc, nullptr);
1311 assert(res == VK_ERROR_OUT_OF_DEVICE_MEMORY);
1312 }
1313
1314 // Destroy everything.
1315 for(size_t i = items.size(); i--; )
1316 {
1317 vmaDestroyBuffer(hAllocator, items[i].hBuf, items[i].hAlloc);
1318 }
1319
1320 vmaDestroyPool(hAllocator, hPool);
1321
1322 vmaDestroyAllocator(hAllocator);
1323}
1324
Adam Sawicki212a4a62018-06-14 15:44:45 +02001325#if VMA_DEBUG_MARGIN
Adam Sawicki73b16652018-06-11 16:39:25 +02001326static void TestDebugMargin()
1327{
1328 if(VMA_DEBUG_MARGIN == 0)
1329 {
1330 return;
1331 }
1332
1333 VkBufferCreateInfo bufInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
Adam Sawicki212a4a62018-06-14 15:44:45 +02001334 bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
Adam Sawicki73b16652018-06-11 16:39:25 +02001335
1336 VmaAllocationCreateInfo allocCreateInfo = {};
Adam Sawicki212a4a62018-06-14 15:44:45 +02001337 allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
Adam Sawicki73b16652018-06-11 16:39:25 +02001338
1339 // Create few buffers of different size.
1340 const size_t BUF_COUNT = 10;
1341 BufferInfo buffers[BUF_COUNT];
1342 VmaAllocationInfo allocInfo[BUF_COUNT];
1343 for(size_t i = 0; i < 10; ++i)
1344 {
1345 bufInfo.size = (VkDeviceSize)(i + 1) * 64;
Adam Sawicki212a4a62018-06-14 15:44:45 +02001346 // Last one will be mapped.
1347 allocCreateInfo.flags = (i == BUF_COUNT - 1) ? VMA_ALLOCATION_CREATE_MAPPED_BIT : 0;
Adam Sawicki73b16652018-06-11 16:39:25 +02001348
1349 VkResult res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo, &buffers[i].Buffer, &buffers[i].Allocation, &allocInfo[i]);
1350 assert(res == VK_SUCCESS);
1351 // Margin is preserved also at the beginning of a block.
1352 assert(allocInfo[i].offset >= VMA_DEBUG_MARGIN);
Adam Sawicki212a4a62018-06-14 15:44:45 +02001353
1354 if(i == BUF_COUNT - 1)
1355 {
1356 // Fill with data.
1357 assert(allocInfo[i].pMappedData != nullptr);
1358 // Uncomment this "+ 1" to overwrite past end of allocation and check corruption detection.
1359 memset(allocInfo[i].pMappedData, 0xFF, bufInfo.size /* + 1 */);
1360 }
Adam Sawicki73b16652018-06-11 16:39:25 +02001361 }
1362
1363 // Check if their offsets preserve margin between them.
1364 std::sort(allocInfo, allocInfo + BUF_COUNT, [](const VmaAllocationInfo& lhs, const VmaAllocationInfo& rhs) -> bool
1365 {
1366 if(lhs.deviceMemory != rhs.deviceMemory)
1367 {
1368 return lhs.deviceMemory < rhs.deviceMemory;
1369 }
1370 return lhs.offset < rhs.offset;
1371 });
1372 for(size_t i = 1; i < BUF_COUNT; ++i)
1373 {
1374 if(allocInfo[i].deviceMemory == allocInfo[i - 1].deviceMemory)
1375 {
1376 assert(allocInfo[i].offset >= allocInfo[i - 1].offset + VMA_DEBUG_MARGIN);
1377 }
1378 }
1379
Adam Sawicki212a4a62018-06-14 15:44:45 +02001380 VkResult res = vmaCheckCorruption(g_hAllocator, UINT32_MAX);
1381 assert(res == VK_SUCCESS);
1382
Adam Sawicki73b16652018-06-11 16:39:25 +02001383 // Destroy all buffers.
1384 for(size_t i = BUF_COUNT; i--; )
1385 {
1386 vmaDestroyBuffer(g_hAllocator, buffers[i].Buffer, buffers[i].Allocation);
1387 }
1388}
Adam Sawicki212a4a62018-06-14 15:44:45 +02001389#endif
Adam Sawicki73b16652018-06-11 16:39:25 +02001390
Adam Sawicki0876c0d2018-06-20 15:18:11 +02001391static void TestLinearAllocator()
1392{
1393 wprintf(L"Test linear allocator\n");
1394
1395 RandomNumberGenerator rand{645332};
1396
1397 VkBufferCreateInfo sampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
1398 sampleBufCreateInfo.size = 1024; // Whatever.
1399 sampleBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
1400
1401 VmaAllocationCreateInfo sampleAllocCreateInfo = {};
1402 sampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
1403
1404 VmaPoolCreateInfo poolCreateInfo = {};
1405 VkResult res = vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &sampleBufCreateInfo, &sampleAllocCreateInfo, &poolCreateInfo.memoryTypeIndex);
1406 assert(res == VK_SUCCESS);
1407
1408 poolCreateInfo.blockSize = 1024 * 1024;
1409 poolCreateInfo.flags = VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT;
1410 poolCreateInfo.minBlockCount = poolCreateInfo.maxBlockCount = 1;
1411
1412 VmaPool pool = nullptr;
1413 res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool);
1414 assert(res == VK_SUCCESS);
1415
1416 VkBufferCreateInfo bufCreateInfo = sampleBufCreateInfo;
1417
1418 VmaAllocationCreateInfo allocCreateInfo = {};
1419 allocCreateInfo.pool = pool;
1420
1421 constexpr size_t maxBufCount = 100;
1422 std::vector<BufferInfo> bufInfo;
1423
1424 constexpr VkDeviceSize bufSizeMin = 16;
1425 constexpr VkDeviceSize bufSizeMax = 1024;
1426 VmaAllocationInfo allocInfo;
1427 VkDeviceSize prevOffset = 0;
1428
1429 // Test one-time free.
1430 for(size_t i = 0; i < 2; ++i)
1431 {
1432 // Allocate number of buffers of varying size that surely fit into this block.
1433 VkDeviceSize bufSumSize = 0;
1434 for(size_t i = 0; i < maxBufCount; ++i)
1435 {
1436 bufCreateInfo.size = bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin);
1437 BufferInfo newBufInfo;
1438 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
1439 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
1440 assert(res == VK_SUCCESS);
1441 assert(i == 0 || allocInfo.offset > prevOffset);
1442 bufInfo.push_back(newBufInfo);
1443 prevOffset = allocInfo.offset;
1444 bufSumSize += bufCreateInfo.size;
1445 }
1446
1447 // Validate pool stats.
1448 VmaPoolStats stats;
1449 vmaGetPoolStats(g_hAllocator, pool, &stats);
1450 assert(stats.size == poolCreateInfo.blockSize);
1451 assert(stats.unusedSize = poolCreateInfo.blockSize - bufSumSize);
1452 assert(stats.allocationCount == bufInfo.size());
1453
1454 // Destroy the buffers in random order.
1455 while(!bufInfo.empty())
1456 {
1457 const size_t indexToDestroy = rand.Generate() % bufInfo.size();
1458 const BufferInfo& currBufInfo = bufInfo[indexToDestroy];
1459 vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
1460 bufInfo.erase(bufInfo.begin() + indexToDestroy);
1461 }
1462 }
1463
1464 // Test stack.
1465 {
1466 // Allocate number of buffers of varying size that surely fit into this block.
1467 for(size_t i = 0; i < maxBufCount; ++i)
1468 {
1469 bufCreateInfo.size = bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin);
1470 BufferInfo newBufInfo;
1471 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
1472 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
1473 assert(res == VK_SUCCESS);
1474 assert(i == 0 || allocInfo.offset > prevOffset);
1475 bufInfo.push_back(newBufInfo);
1476 prevOffset = allocInfo.offset;
1477 }
1478
1479 // Destroy few buffers from top of the stack.
1480 for(size_t i = 0; i < maxBufCount / 5; ++i)
1481 {
1482 const BufferInfo& currBufInfo = bufInfo.back();
1483 vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
1484 bufInfo.pop_back();
1485 }
1486
1487 // Create some more
1488 for(size_t i = 0; i < maxBufCount / 5; ++i)
1489 {
1490 bufCreateInfo.size = bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin);
1491 BufferInfo newBufInfo;
1492 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
1493 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
1494 assert(res == VK_SUCCESS);
1495 assert(i == 0 || allocInfo.offset > prevOffset);
1496 bufInfo.push_back(newBufInfo);
1497 prevOffset = allocInfo.offset;
1498 }
1499
1500 // Destroy the buffers in reverse order.
1501 while(!bufInfo.empty())
1502 {
1503 const BufferInfo& currBufInfo = bufInfo.back();
1504 vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
1505 bufInfo.pop_back();
1506 }
1507 }
1508
1509 vmaDestroyPool(g_hAllocator, pool);
1510}
1511
Adam Sawickib8333fb2018-03-13 16:15:53 +01001512static void TestPool_SameSize()
1513{
1514 const VkDeviceSize BUF_SIZE = 1024 * 1024;
1515 const size_t BUF_COUNT = 100;
1516 VkResult res;
1517
1518 RandomNumberGenerator rand{123};
1519
1520 VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
1521 bufferInfo.size = BUF_SIZE;
1522 bufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
1523
1524 uint32_t memoryTypeBits = UINT32_MAX;
1525 {
1526 VkBuffer dummyBuffer;
1527 res = vkCreateBuffer(g_hDevice, &bufferInfo, nullptr, &dummyBuffer);
1528 assert(res == VK_SUCCESS);
1529
1530 VkMemoryRequirements memReq;
1531 vkGetBufferMemoryRequirements(g_hDevice, dummyBuffer, &memReq);
1532 memoryTypeBits = memReq.memoryTypeBits;
1533
1534 vkDestroyBuffer(g_hDevice, dummyBuffer, nullptr);
1535 }
1536
1537 VmaAllocationCreateInfo poolAllocInfo = {};
1538 poolAllocInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
1539 uint32_t memTypeIndex;
1540 res = vmaFindMemoryTypeIndex(
1541 g_hAllocator,
1542 memoryTypeBits,
1543 &poolAllocInfo,
1544 &memTypeIndex);
1545
1546 VmaPoolCreateInfo poolCreateInfo = {};
1547 poolCreateInfo.memoryTypeIndex = memTypeIndex;
1548 poolCreateInfo.blockSize = BUF_SIZE * BUF_COUNT / 4;
1549 poolCreateInfo.minBlockCount = 1;
1550 poolCreateInfo.maxBlockCount = 4;
1551 poolCreateInfo.frameInUseCount = 0;
1552
1553 VmaPool pool;
1554 res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool);
1555 assert(res == VK_SUCCESS);
1556
1557 vmaSetCurrentFrameIndex(g_hAllocator, 1);
1558
1559 VmaAllocationCreateInfo allocInfo = {};
1560 allocInfo.pool = pool;
1561 allocInfo.flags = VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT |
1562 VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT;
1563
1564 struct BufItem
1565 {
1566 VkBuffer Buf;
1567 VmaAllocation Alloc;
1568 };
1569 std::vector<BufItem> items;
1570
1571 // Fill entire pool.
1572 for(size_t i = 0; i < BUF_COUNT; ++i)
1573 {
1574 BufItem item;
1575 res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr);
1576 assert(res == VK_SUCCESS);
1577 items.push_back(item);
1578 }
1579
1580 // Make sure that another allocation would fail.
1581 {
1582 BufItem item;
1583 res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr);
1584 assert(res == VK_ERROR_OUT_OF_DEVICE_MEMORY);
1585 }
1586
1587 // Validate that no buffer is lost. Also check that they are not mapped.
1588 for(size_t i = 0; i < items.size(); ++i)
1589 {
1590 VmaAllocationInfo allocInfo;
1591 vmaGetAllocationInfo(g_hAllocator, items[i].Alloc, &allocInfo);
1592 assert(allocInfo.deviceMemory != VK_NULL_HANDLE);
1593 assert(allocInfo.pMappedData == nullptr);
1594 }
1595
1596 // Free some percent of random items.
1597 {
1598 const size_t PERCENT_TO_FREE = 10;
1599 size_t itemsToFree = items.size() * PERCENT_TO_FREE / 100;
1600 for(size_t i = 0; i < itemsToFree; ++i)
1601 {
1602 size_t index = (size_t)rand.Generate() % items.size();
1603 vmaDestroyBuffer(g_hAllocator, items[index].Buf, items[index].Alloc);
1604 items.erase(items.begin() + index);
1605 }
1606 }
1607
1608 // Randomly allocate and free items.
1609 {
1610 const size_t OPERATION_COUNT = BUF_COUNT;
1611 for(size_t i = 0; i < OPERATION_COUNT; ++i)
1612 {
1613 bool allocate = rand.Generate() % 2 != 0;
1614 if(allocate)
1615 {
1616 if(items.size() < BUF_COUNT)
1617 {
1618 BufItem item;
1619 res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr);
1620 assert(res == VK_SUCCESS);
1621 items.push_back(item);
1622 }
1623 }
1624 else // Free
1625 {
1626 if(!items.empty())
1627 {
1628 size_t index = (size_t)rand.Generate() % items.size();
1629 vmaDestroyBuffer(g_hAllocator, items[index].Buf, items[index].Alloc);
1630 items.erase(items.begin() + index);
1631 }
1632 }
1633 }
1634 }
1635
1636 // Allocate up to maximum.
1637 while(items.size() < BUF_COUNT)
1638 {
1639 BufItem item;
1640 res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr);
1641 assert(res == VK_SUCCESS);
1642 items.push_back(item);
1643 }
1644
1645 // Validate that no buffer is lost.
1646 for(size_t i = 0; i < items.size(); ++i)
1647 {
1648 VmaAllocationInfo allocInfo;
1649 vmaGetAllocationInfo(g_hAllocator, items[i].Alloc, &allocInfo);
1650 assert(allocInfo.deviceMemory != VK_NULL_HANDLE);
1651 }
1652
1653 // Next frame.
1654 vmaSetCurrentFrameIndex(g_hAllocator, 2);
1655
1656 // Allocate another BUF_COUNT buffers.
1657 for(size_t i = 0; i < BUF_COUNT; ++i)
1658 {
1659 BufItem item;
1660 res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr);
1661 assert(res == VK_SUCCESS);
1662 items.push_back(item);
1663 }
1664
1665 // Make sure the first BUF_COUNT is lost. Delete them.
1666 for(size_t i = 0; i < BUF_COUNT; ++i)
1667 {
1668 VmaAllocationInfo allocInfo;
1669 vmaGetAllocationInfo(g_hAllocator, items[i].Alloc, &allocInfo);
1670 assert(allocInfo.deviceMemory == VK_NULL_HANDLE);
1671 vmaDestroyBuffer(g_hAllocator, items[i].Buf, items[i].Alloc);
1672 }
1673 items.erase(items.begin(), items.begin() + BUF_COUNT);
1674
1675 // Validate that no buffer is lost.
1676 for(size_t i = 0; i < items.size(); ++i)
1677 {
1678 VmaAllocationInfo allocInfo;
1679 vmaGetAllocationInfo(g_hAllocator, items[i].Alloc, &allocInfo);
1680 assert(allocInfo.deviceMemory != VK_NULL_HANDLE);
1681 }
1682
1683 // Free one item.
1684 vmaDestroyBuffer(g_hAllocator, items.back().Buf, items.back().Alloc);
1685 items.pop_back();
1686
1687 // Validate statistics.
1688 {
1689 VmaPoolStats poolStats = {};
1690 vmaGetPoolStats(g_hAllocator, pool, &poolStats);
1691 assert(poolStats.allocationCount == items.size());
1692 assert(poolStats.size = BUF_COUNT * BUF_SIZE);
1693 assert(poolStats.unusedRangeCount == 1);
1694 assert(poolStats.unusedRangeSizeMax == BUF_SIZE);
1695 assert(poolStats.unusedSize == BUF_SIZE);
1696 }
1697
1698 // Free all remaining items.
1699 for(size_t i = items.size(); i--; )
1700 vmaDestroyBuffer(g_hAllocator, items[i].Buf, items[i].Alloc);
1701 items.clear();
1702
1703 // Allocate maximum items again.
1704 for(size_t i = 0; i < BUF_COUNT; ++i)
1705 {
1706 BufItem item;
1707 res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr);
1708 assert(res == VK_SUCCESS);
1709 items.push_back(item);
1710 }
1711
1712 // Delete every other item.
1713 for(size_t i = 0; i < BUF_COUNT / 2; ++i)
1714 {
1715 vmaDestroyBuffer(g_hAllocator, items[i].Buf, items[i].Alloc);
1716 items.erase(items.begin() + i);
1717 }
1718
1719 // Defragment!
1720 {
1721 std::vector<VmaAllocation> allocationsToDefragment(items.size());
1722 for(size_t i = 0; i < items.size(); ++i)
1723 allocationsToDefragment[i] = items[i].Alloc;
1724
1725 VmaDefragmentationStats defragmentationStats;
1726 res = vmaDefragment(g_hAllocator, allocationsToDefragment.data(), items.size(), nullptr, nullptr, &defragmentationStats);
1727 assert(res == VK_SUCCESS);
1728 assert(defragmentationStats.deviceMemoryBlocksFreed == 2);
1729 }
1730
1731 // Free all remaining items.
1732 for(size_t i = items.size(); i--; )
1733 vmaDestroyBuffer(g_hAllocator, items[i].Buf, items[i].Alloc);
1734 items.clear();
1735
1736 ////////////////////////////////////////////////////////////////////////////////
1737 // Test for vmaMakePoolAllocationsLost
1738
1739 // Allocate 4 buffers on frame 10.
1740 vmaSetCurrentFrameIndex(g_hAllocator, 10);
1741 for(size_t i = 0; i < 4; ++i)
1742 {
1743 BufItem item;
1744 res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr);
1745 assert(res == VK_SUCCESS);
1746 items.push_back(item);
1747 }
1748
1749 // Touch first 2 of them on frame 11.
1750 vmaSetCurrentFrameIndex(g_hAllocator, 11);
1751 for(size_t i = 0; i < 2; ++i)
1752 {
1753 VmaAllocationInfo allocInfo;
1754 vmaGetAllocationInfo(g_hAllocator, items[i].Alloc, &allocInfo);
1755 }
1756
1757 // vmaMakePoolAllocationsLost. Only remaining 2 should be lost.
1758 size_t lostCount = 0xDEADC0DE;
1759 vmaMakePoolAllocationsLost(g_hAllocator, pool, &lostCount);
1760 assert(lostCount == 2);
1761
1762 // Make another call. Now 0 should be lost.
1763 vmaMakePoolAllocationsLost(g_hAllocator, pool, &lostCount);
1764 assert(lostCount == 0);
1765
1766 // Make another call, with null count. Should not crash.
1767 vmaMakePoolAllocationsLost(g_hAllocator, pool, nullptr);
1768
1769 // END: Free all remaining items.
1770 for(size_t i = items.size(); i--; )
1771 vmaDestroyBuffer(g_hAllocator, items[i].Buf, items[i].Alloc);
1772
1773 items.clear();
1774
Adam Sawickid2924172018-06-11 12:48:46 +02001775 ////////////////////////////////////////////////////////////////////////////////
1776 // Test for allocation too large for pool
1777
1778 {
1779 VmaAllocationCreateInfo allocCreateInfo = {};
1780 allocCreateInfo.pool = pool;
1781
1782 VkMemoryRequirements memReq;
1783 memReq.memoryTypeBits = UINT32_MAX;
1784 memReq.alignment = 1;
1785 memReq.size = poolCreateInfo.blockSize + 4;
1786
1787 VmaAllocation alloc = nullptr;
1788 res = vmaAllocateMemory(g_hAllocator, &memReq, &allocCreateInfo, &alloc, nullptr);
1789 assert(res == VK_ERROR_OUT_OF_DEVICE_MEMORY && alloc == nullptr);
1790 }
1791
Adam Sawickib8333fb2018-03-13 16:15:53 +01001792 vmaDestroyPool(g_hAllocator, pool);
1793}
1794
Adam Sawickie44c6262018-06-15 14:30:39 +02001795static bool ValidatePattern(const void* pMemory, size_t size, uint8_t pattern)
1796{
1797 const uint8_t* pBytes = (const uint8_t*)pMemory;
1798 for(size_t i = 0; i < size; ++i)
1799 {
1800 if(pBytes[i] != pattern)
1801 {
1802 return false;
1803 }
1804 }
1805 return true;
1806}
1807
1808static void TestAllocationsInitialization()
1809{
1810 VkResult res;
1811
1812 const size_t BUF_SIZE = 1024;
1813
1814 // Create pool.
1815
1816 VkBufferCreateInfo bufInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
1817 bufInfo.size = BUF_SIZE;
1818 bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
1819
1820 VmaAllocationCreateInfo dummyBufAllocCreateInfo = {};
1821 dummyBufAllocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
1822
1823 VmaPoolCreateInfo poolCreateInfo = {};
1824 poolCreateInfo.blockSize = BUF_SIZE * 10;
1825 poolCreateInfo.minBlockCount = 1; // To keep memory alive while pool exists.
1826 poolCreateInfo.maxBlockCount = 1;
1827 res = vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &bufInfo, &dummyBufAllocCreateInfo, &poolCreateInfo.memoryTypeIndex);
1828 assert(res == VK_SUCCESS);
1829
1830 VmaAllocationCreateInfo bufAllocCreateInfo = {};
1831 res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &bufAllocCreateInfo.pool);
1832 assert(res == VK_SUCCESS);
1833
1834 // Create one persistently mapped buffer to keep memory of this block mapped,
1835 // so that pointer to mapped data will remain (more or less...) valid even
1836 // after destruction of other allocations.
1837
1838 bufAllocCreateInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT;
1839 VkBuffer firstBuf;
1840 VmaAllocation firstAlloc;
1841 res = vmaCreateBuffer(g_hAllocator, &bufInfo, &bufAllocCreateInfo, &firstBuf, &firstAlloc, nullptr);
1842 assert(res == VK_SUCCESS);
1843
1844 // Test buffers.
1845
1846 for(uint32_t i = 0; i < 2; ++i)
1847 {
1848 const bool persistentlyMapped = i == 0;
1849 bufAllocCreateInfo.flags = persistentlyMapped ? VMA_ALLOCATION_CREATE_MAPPED_BIT : 0;
1850 VkBuffer buf;
1851 VmaAllocation alloc;
1852 VmaAllocationInfo allocInfo;
1853 res = vmaCreateBuffer(g_hAllocator, &bufInfo, &bufAllocCreateInfo, &buf, &alloc, &allocInfo);
1854 assert(res == VK_SUCCESS);
1855
1856 void* pMappedData;
1857 if(!persistentlyMapped)
1858 {
1859 res = vmaMapMemory(g_hAllocator, alloc, &pMappedData);
1860 assert(res == VK_SUCCESS);
1861 }
1862 else
1863 {
1864 pMappedData = allocInfo.pMappedData;
1865 }
1866
1867 // Validate initialized content
1868 bool valid = ValidatePattern(pMappedData, BUF_SIZE, 0xDC);
1869 assert(valid);
1870
1871 if(!persistentlyMapped)
1872 {
1873 vmaUnmapMemory(g_hAllocator, alloc);
1874 }
1875
1876 vmaDestroyBuffer(g_hAllocator, buf, alloc);
1877
1878 // Validate freed content
1879 valid = ValidatePattern(pMappedData, BUF_SIZE, 0xEF);
1880 assert(valid);
1881 }
1882
1883 vmaDestroyBuffer(g_hAllocator, firstBuf, firstAlloc);
1884 vmaDestroyPool(g_hAllocator, bufAllocCreateInfo.pool);
1885}
1886
Adam Sawickib8333fb2018-03-13 16:15:53 +01001887static void TestPool_Benchmark(
1888 PoolTestResult& outResult,
1889 const PoolTestConfig& config)
1890{
1891 assert(config.ThreadCount > 0);
1892
1893 RandomNumberGenerator mainRand{config.RandSeed};
1894
1895 uint32_t allocationSizeProbabilitySum = std::accumulate(
1896 config.AllocationSizes.begin(),
1897 config.AllocationSizes.end(),
1898 0u,
1899 [](uint32_t sum, const AllocationSize& allocSize) {
1900 return sum + allocSize.Probability;
1901 });
1902
1903 VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
1904 bufferInfo.size = 256; // Whatever.
1905 bufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
1906
1907 VkImageCreateInfo imageInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
1908 imageInfo.imageType = VK_IMAGE_TYPE_2D;
1909 imageInfo.extent.width = 256; // Whatever.
1910 imageInfo.extent.height = 256; // Whatever.
1911 imageInfo.extent.depth = 1;
1912 imageInfo.mipLevels = 1;
1913 imageInfo.arrayLayers = 1;
1914 imageInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
1915 imageInfo.tiling = VK_IMAGE_TILING_OPTIMAL; // LINEAR if CPU memory.
1916 imageInfo.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED;
1917 imageInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; // TRANSFER_SRC if CPU memory.
1918 imageInfo.samples = VK_SAMPLE_COUNT_1_BIT;
1919
1920 uint32_t bufferMemoryTypeBits = UINT32_MAX;
1921 {
1922 VkBuffer dummyBuffer;
1923 VkResult res = vkCreateBuffer(g_hDevice, &bufferInfo, nullptr, &dummyBuffer);
1924 assert(res == VK_SUCCESS);
1925
1926 VkMemoryRequirements memReq;
1927 vkGetBufferMemoryRequirements(g_hDevice, dummyBuffer, &memReq);
1928 bufferMemoryTypeBits = memReq.memoryTypeBits;
1929
1930 vkDestroyBuffer(g_hDevice, dummyBuffer, nullptr);
1931 }
1932
1933 uint32_t imageMemoryTypeBits = UINT32_MAX;
1934 {
1935 VkImage dummyImage;
1936 VkResult res = vkCreateImage(g_hDevice, &imageInfo, nullptr, &dummyImage);
1937 assert(res == VK_SUCCESS);
1938
1939 VkMemoryRequirements memReq;
1940 vkGetImageMemoryRequirements(g_hDevice, dummyImage, &memReq);
1941 imageMemoryTypeBits = memReq.memoryTypeBits;
1942
1943 vkDestroyImage(g_hDevice, dummyImage, nullptr);
1944 }
1945
1946 uint32_t memoryTypeBits = 0;
1947 if(config.UsesBuffers() && config.UsesImages())
1948 {
1949 memoryTypeBits = bufferMemoryTypeBits & imageMemoryTypeBits;
1950 if(memoryTypeBits == 0)
1951 {
1952 PrintWarning(L"Cannot test buffers + images in the same memory pool on this GPU.");
1953 return;
1954 }
1955 }
1956 else if(config.UsesBuffers())
1957 memoryTypeBits = bufferMemoryTypeBits;
1958 else if(config.UsesImages())
1959 memoryTypeBits = imageMemoryTypeBits;
1960 else
1961 assert(0);
1962
1963 VmaPoolCreateInfo poolCreateInfo = {};
1964 poolCreateInfo.memoryTypeIndex = 0;
1965 poolCreateInfo.minBlockCount = 1;
1966 poolCreateInfo.maxBlockCount = 1;
1967 poolCreateInfo.blockSize = config.PoolSize;
1968 poolCreateInfo.frameInUseCount = 1;
1969
1970 VmaAllocationCreateInfo dummyAllocCreateInfo = {};
1971 dummyAllocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
1972 vmaFindMemoryTypeIndex(g_hAllocator, memoryTypeBits, &dummyAllocCreateInfo, &poolCreateInfo.memoryTypeIndex);
1973
1974 VmaPool pool;
1975 VkResult res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool);
1976 assert(res == VK_SUCCESS);
1977
1978 // Start time measurement - after creating pool and initializing data structures.
1979 time_point timeBeg = std::chrono::high_resolution_clock::now();
1980
1981 ////////////////////////////////////////////////////////////////////////////////
1982 // ThreadProc
1983 auto ThreadProc = [&](
1984 PoolTestThreadResult* outThreadResult,
1985 uint32_t randSeed,
1986 HANDLE frameStartEvent,
1987 HANDLE frameEndEvent) -> void
1988 {
1989 RandomNumberGenerator threadRand{randSeed};
1990
1991 outThreadResult->AllocationTimeMin = duration::max();
1992 outThreadResult->AllocationTimeSum = duration::zero();
1993 outThreadResult->AllocationTimeMax = duration::min();
1994 outThreadResult->DeallocationTimeMin = duration::max();
1995 outThreadResult->DeallocationTimeSum = duration::zero();
1996 outThreadResult->DeallocationTimeMax = duration::min();
1997 outThreadResult->AllocationCount = 0;
1998 outThreadResult->DeallocationCount = 0;
1999 outThreadResult->LostAllocationCount = 0;
2000 outThreadResult->LostAllocationTotalSize = 0;
2001 outThreadResult->FailedAllocationCount = 0;
2002 outThreadResult->FailedAllocationTotalSize = 0;
2003
2004 struct Item
2005 {
2006 VkDeviceSize BufferSize;
2007 VkExtent2D ImageSize;
2008 VkBuffer Buf;
2009 VkImage Image;
2010 VmaAllocation Alloc;
2011
2012 VkDeviceSize CalcSizeBytes() const
2013 {
2014 return BufferSize +
2015 ImageSize.width * ImageSize.height * 4;
2016 }
2017 };
2018 std::vector<Item> unusedItems, usedItems;
2019
2020 const size_t threadTotalItemCount = config.TotalItemCount / config.ThreadCount;
2021
2022 // Create all items - all unused, not yet allocated.
2023 for(size_t i = 0; i < threadTotalItemCount; ++i)
2024 {
2025 Item item = {};
2026
2027 uint32_t allocSizeIndex = 0;
2028 uint32_t r = threadRand.Generate() % allocationSizeProbabilitySum;
2029 while(r >= config.AllocationSizes[allocSizeIndex].Probability)
2030 r -= config.AllocationSizes[allocSizeIndex++].Probability;
2031
2032 const AllocationSize& allocSize = config.AllocationSizes[allocSizeIndex];
2033 if(allocSize.BufferSizeMax > 0)
2034 {
2035 assert(allocSize.BufferSizeMin > 0);
2036 assert(allocSize.ImageSizeMin == 0 && allocSize.ImageSizeMax == 0);
2037 if(allocSize.BufferSizeMax == allocSize.BufferSizeMin)
2038 item.BufferSize = allocSize.BufferSizeMin;
2039 else
2040 {
2041 item.BufferSize = allocSize.BufferSizeMin + threadRand.Generate() % (allocSize.BufferSizeMax - allocSize.BufferSizeMin);
2042 item.BufferSize = item.BufferSize / 16 * 16;
2043 }
2044 }
2045 else
2046 {
2047 assert(allocSize.ImageSizeMin > 0 && allocSize.ImageSizeMax > 0);
2048 if(allocSize.ImageSizeMax == allocSize.ImageSizeMin)
2049 item.ImageSize.width = item.ImageSize.height = allocSize.ImageSizeMax;
2050 else
2051 {
2052 item.ImageSize.width = allocSize.ImageSizeMin + threadRand.Generate() % (allocSize.ImageSizeMax - allocSize.ImageSizeMin);
2053 item.ImageSize.height = allocSize.ImageSizeMin + threadRand.Generate() % (allocSize.ImageSizeMax - allocSize.ImageSizeMin);
2054 }
2055 }
2056
2057 unusedItems.push_back(item);
2058 }
2059
2060 auto Allocate = [&](Item& item) -> VkResult
2061 {
2062 VmaAllocationCreateInfo allocCreateInfo = {};
2063 allocCreateInfo.pool = pool;
2064 allocCreateInfo.flags = VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT |
2065 VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT;
2066
2067 if(item.BufferSize)
2068 {
2069 bufferInfo.size = item.BufferSize;
2070 PoolAllocationTimeRegisterObj timeRegisterObj(*outThreadResult);
2071 return vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocCreateInfo, &item.Buf, &item.Alloc, nullptr);
2072 }
2073 else
2074 {
2075 assert(item.ImageSize.width && item.ImageSize.height);
2076
2077 imageInfo.extent.width = item.ImageSize.width;
2078 imageInfo.extent.height = item.ImageSize.height;
2079 PoolAllocationTimeRegisterObj timeRegisterObj(*outThreadResult);
2080 return vmaCreateImage(g_hAllocator, &imageInfo, &allocCreateInfo, &item.Image, &item.Alloc, nullptr);
2081 }
2082 };
2083
2084 ////////////////////////////////////////////////////////////////////////////////
2085 // Frames
2086 for(uint32_t frameIndex = 0; frameIndex < config.FrameCount; ++frameIndex)
2087 {
2088 WaitForSingleObject(frameStartEvent, INFINITE);
2089
2090 // Always make some percent of used bufs unused, to choose different used ones.
2091 const size_t bufsToMakeUnused = usedItems.size() * config.ItemsToMakeUnusedPercent / 100;
2092 for(size_t i = 0; i < bufsToMakeUnused; ++i)
2093 {
2094 size_t index = threadRand.Generate() % usedItems.size();
2095 unusedItems.push_back(usedItems[index]);
2096 usedItems.erase(usedItems.begin() + index);
2097 }
2098
2099 // Determine which bufs we want to use in this frame.
2100 const size_t usedBufCount = (threadRand.Generate() % (config.UsedItemCountMax - config.UsedItemCountMin) + config.UsedItemCountMin)
2101 / config.ThreadCount;
2102 assert(usedBufCount < usedItems.size() + unusedItems.size());
2103 // Move some used to unused.
2104 while(usedBufCount < usedItems.size())
2105 {
2106 size_t index = threadRand.Generate() % usedItems.size();
2107 unusedItems.push_back(usedItems[index]);
2108 usedItems.erase(usedItems.begin() + index);
2109 }
2110 // Move some unused to used.
2111 while(usedBufCount > usedItems.size())
2112 {
2113 size_t index = threadRand.Generate() % unusedItems.size();
2114 usedItems.push_back(unusedItems[index]);
2115 unusedItems.erase(unusedItems.begin() + index);
2116 }
2117
2118 uint32_t touchExistingCount = 0;
2119 uint32_t touchLostCount = 0;
2120 uint32_t createSucceededCount = 0;
2121 uint32_t createFailedCount = 0;
2122
2123 // Touch all used bufs. If not created or lost, allocate.
2124 for(size_t i = 0; i < usedItems.size(); ++i)
2125 {
2126 Item& item = usedItems[i];
2127 // Not yet created.
2128 if(item.Alloc == VK_NULL_HANDLE)
2129 {
2130 res = Allocate(item);
2131 ++outThreadResult->AllocationCount;
2132 if(res != VK_SUCCESS)
2133 {
2134 item.Alloc = VK_NULL_HANDLE;
2135 item.Buf = VK_NULL_HANDLE;
2136 ++outThreadResult->FailedAllocationCount;
2137 outThreadResult->FailedAllocationTotalSize += item.CalcSizeBytes();
2138 ++createFailedCount;
2139 }
2140 else
2141 ++createSucceededCount;
2142 }
2143 else
2144 {
2145 // Touch.
2146 VmaAllocationInfo allocInfo;
2147 vmaGetAllocationInfo(g_hAllocator, item.Alloc, &allocInfo);
2148 // Lost.
2149 if(allocInfo.deviceMemory == VK_NULL_HANDLE)
2150 {
2151 ++touchLostCount;
2152
2153 // Destroy.
2154 {
2155 PoolDeallocationTimeRegisterObj timeRegisterObj(*outThreadResult);
2156 if(item.Buf)
2157 vmaDestroyBuffer(g_hAllocator, item.Buf, item.Alloc);
2158 else
2159 vmaDestroyImage(g_hAllocator, item.Image, item.Alloc);
2160 ++outThreadResult->DeallocationCount;
2161 }
2162 item.Alloc = VK_NULL_HANDLE;
2163 item.Buf = VK_NULL_HANDLE;
2164
2165 ++outThreadResult->LostAllocationCount;
2166 outThreadResult->LostAllocationTotalSize += item.CalcSizeBytes();
2167
2168 // Recreate.
2169 res = Allocate(item);
2170 ++outThreadResult->AllocationCount;
2171 // Creation failed.
2172 if(res != VK_SUCCESS)
2173 {
2174 ++outThreadResult->FailedAllocationCount;
2175 outThreadResult->FailedAllocationTotalSize += item.CalcSizeBytes();
2176 ++createFailedCount;
2177 }
2178 else
2179 ++createSucceededCount;
2180 }
2181 else
2182 ++touchExistingCount;
2183 }
2184 }
2185
2186 /*
2187 printf("Thread %u frame %u: Touch existing %u lost %u, create succeeded %u failed %u\n",
2188 randSeed, frameIndex,
2189 touchExistingCount, touchLostCount,
2190 createSucceededCount, createFailedCount);
2191 */
2192
2193 SetEvent(frameEndEvent);
2194 }
2195
2196 // Free all remaining items.
2197 for(size_t i = usedItems.size(); i--; )
2198 {
2199 PoolDeallocationTimeRegisterObj timeRegisterObj(*outThreadResult);
2200 if(usedItems[i].Buf)
2201 vmaDestroyBuffer(g_hAllocator, usedItems[i].Buf, usedItems[i].Alloc);
2202 else
2203 vmaDestroyImage(g_hAllocator, usedItems[i].Image, usedItems[i].Alloc);
2204 ++outThreadResult->DeallocationCount;
2205 }
2206 for(size_t i = unusedItems.size(); i--; )
2207 {
2208 PoolDeallocationTimeRegisterObj timeRegisterOb(*outThreadResult);
2209 if(unusedItems[i].Buf)
2210 vmaDestroyBuffer(g_hAllocator, unusedItems[i].Buf, unusedItems[i].Alloc);
2211 else
2212 vmaDestroyImage(g_hAllocator, unusedItems[i].Image, unusedItems[i].Alloc);
2213 ++outThreadResult->DeallocationCount;
2214 }
2215 };
2216
2217 // Launch threads.
2218 uint32_t threadRandSeed = mainRand.Generate();
2219 std::vector<HANDLE> frameStartEvents{config.ThreadCount};
2220 std::vector<HANDLE> frameEndEvents{config.ThreadCount};
2221 std::vector<std::thread> bkgThreads;
2222 std::vector<PoolTestThreadResult> threadResults{config.ThreadCount};
2223 for(uint32_t threadIndex = 0; threadIndex < config.ThreadCount; ++threadIndex)
2224 {
2225 frameStartEvents[threadIndex] = CreateEvent(NULL, FALSE, FALSE, NULL);
2226 frameEndEvents[threadIndex] = CreateEvent(NULL, FALSE, FALSE, NULL);
2227 bkgThreads.emplace_back(std::bind(
2228 ThreadProc,
2229 &threadResults[threadIndex],
2230 threadRandSeed + threadIndex,
2231 frameStartEvents[threadIndex],
2232 frameEndEvents[threadIndex]));
2233 }
2234
2235 // Execute frames.
2236 assert(config.ThreadCount <= MAXIMUM_WAIT_OBJECTS);
2237 for(uint32_t frameIndex = 0; frameIndex < config.FrameCount; ++frameIndex)
2238 {
2239 vmaSetCurrentFrameIndex(g_hAllocator, frameIndex);
2240 for(size_t threadIndex = 0; threadIndex < config.ThreadCount; ++threadIndex)
2241 SetEvent(frameStartEvents[threadIndex]);
2242 WaitForMultipleObjects(config.ThreadCount, &frameEndEvents[0], TRUE, INFINITE);
2243 }
2244
2245 // Wait for threads finished
2246 for(size_t i = 0; i < bkgThreads.size(); ++i)
2247 {
2248 bkgThreads[i].join();
2249 CloseHandle(frameEndEvents[i]);
2250 CloseHandle(frameStartEvents[i]);
2251 }
2252 bkgThreads.clear();
2253
2254 // Finish time measurement - before destroying pool.
2255 outResult.TotalTime = std::chrono::high_resolution_clock::now() - timeBeg;
2256
2257 vmaDestroyPool(g_hAllocator, pool);
2258
2259 outResult.AllocationTimeMin = duration::max();
2260 outResult.AllocationTimeAvg = duration::zero();
2261 outResult.AllocationTimeMax = duration::min();
2262 outResult.DeallocationTimeMin = duration::max();
2263 outResult.DeallocationTimeAvg = duration::zero();
2264 outResult.DeallocationTimeMax = duration::min();
2265 outResult.LostAllocationCount = 0;
2266 outResult.LostAllocationTotalSize = 0;
2267 outResult.FailedAllocationCount = 0;
2268 outResult.FailedAllocationTotalSize = 0;
2269 size_t allocationCount = 0;
2270 size_t deallocationCount = 0;
2271 for(size_t threadIndex = 0; threadIndex < config.ThreadCount; ++threadIndex)
2272 {
2273 const PoolTestThreadResult& threadResult = threadResults[threadIndex];
2274 outResult.AllocationTimeMin = std::min(outResult.AllocationTimeMin, threadResult.AllocationTimeMin);
2275 outResult.AllocationTimeMax = std::max(outResult.AllocationTimeMax, threadResult.AllocationTimeMax);
2276 outResult.AllocationTimeAvg += threadResult.AllocationTimeSum;
2277 outResult.DeallocationTimeMin = std::min(outResult.DeallocationTimeMin, threadResult.DeallocationTimeMin);
2278 outResult.DeallocationTimeMax = std::max(outResult.DeallocationTimeMax, threadResult.DeallocationTimeMax);
2279 outResult.DeallocationTimeAvg += threadResult.DeallocationTimeSum;
2280 allocationCount += threadResult.AllocationCount;
2281 deallocationCount += threadResult.DeallocationCount;
2282 outResult.FailedAllocationCount += threadResult.FailedAllocationCount;
2283 outResult.FailedAllocationTotalSize += threadResult.FailedAllocationTotalSize;
2284 outResult.LostAllocationCount += threadResult.LostAllocationCount;
2285 outResult.LostAllocationTotalSize += threadResult.LostAllocationTotalSize;
2286 }
2287 if(allocationCount)
2288 outResult.AllocationTimeAvg /= allocationCount;
2289 if(deallocationCount)
2290 outResult.DeallocationTimeAvg /= deallocationCount;
2291}
2292
2293static inline bool MemoryRegionsOverlap(char* ptr1, size_t size1, char* ptr2, size_t size2)
2294{
2295 if(ptr1 < ptr2)
2296 return ptr1 + size1 > ptr2;
2297 else if(ptr2 < ptr1)
2298 return ptr2 + size2 > ptr1;
2299 else
2300 return true;
2301}
2302
2303static void TestMapping()
2304{
2305 wprintf(L"Testing mapping...\n");
2306
2307 VkResult res;
2308 uint32_t memTypeIndex = UINT32_MAX;
2309
2310 enum TEST
2311 {
2312 TEST_NORMAL,
2313 TEST_POOL,
2314 TEST_DEDICATED,
2315 TEST_COUNT
2316 };
2317 for(uint32_t testIndex = 0; testIndex < TEST_COUNT; ++testIndex)
2318 {
2319 VmaPool pool = nullptr;
2320 if(testIndex == TEST_POOL)
2321 {
2322 assert(memTypeIndex != UINT32_MAX);
2323 VmaPoolCreateInfo poolInfo = {};
2324 poolInfo.memoryTypeIndex = memTypeIndex;
2325 res = vmaCreatePool(g_hAllocator, &poolInfo, &pool);
2326 assert(res == VK_SUCCESS);
2327 }
2328
2329 VkBufferCreateInfo bufInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
2330 bufInfo.size = 0x10000;
2331 bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
2332
2333 VmaAllocationCreateInfo allocCreateInfo = {};
2334 allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
2335 allocCreateInfo.pool = pool;
2336 if(testIndex == TEST_DEDICATED)
2337 allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
2338
2339 VmaAllocationInfo allocInfo;
2340
2341 // Mapped manually
2342
2343 // Create 2 buffers.
2344 BufferInfo bufferInfos[3];
2345 for(size_t i = 0; i < 2; ++i)
2346 {
2347 res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo,
2348 &bufferInfos[i].Buffer, &bufferInfos[i].Allocation, &allocInfo);
2349 assert(res == VK_SUCCESS);
2350 assert(allocInfo.pMappedData == nullptr);
2351 memTypeIndex = allocInfo.memoryType;
2352 }
2353
2354 // Map buffer 0.
2355 char* data00 = nullptr;
2356 res = vmaMapMemory(g_hAllocator, bufferInfos[0].Allocation, (void**)&data00);
2357 assert(res == VK_SUCCESS && data00 != nullptr);
2358 data00[0xFFFF] = data00[0];
2359
2360 // Map buffer 0 second time.
2361 char* data01 = nullptr;
2362 res = vmaMapMemory(g_hAllocator, bufferInfos[0].Allocation, (void**)&data01);
2363 assert(res == VK_SUCCESS && data01 == data00);
2364
2365 // Map buffer 1.
2366 char* data1 = nullptr;
2367 res = vmaMapMemory(g_hAllocator, bufferInfos[1].Allocation, (void**)&data1);
2368 assert(res == VK_SUCCESS && data1 != nullptr);
2369 assert(!MemoryRegionsOverlap(data00, (size_t)bufInfo.size, data1, (size_t)bufInfo.size));
2370 data1[0xFFFF] = data1[0];
2371
2372 // Unmap buffer 0 two times.
2373 vmaUnmapMemory(g_hAllocator, bufferInfos[0].Allocation);
2374 vmaUnmapMemory(g_hAllocator, bufferInfos[0].Allocation);
2375 vmaGetAllocationInfo(g_hAllocator, bufferInfos[0].Allocation, &allocInfo);
2376 assert(allocInfo.pMappedData == nullptr);
2377
2378 // Unmap buffer 1.
2379 vmaUnmapMemory(g_hAllocator, bufferInfos[1].Allocation);
2380 vmaGetAllocationInfo(g_hAllocator, bufferInfos[1].Allocation, &allocInfo);
2381 assert(allocInfo.pMappedData == nullptr);
2382
2383 // Create 3rd buffer - persistently mapped.
2384 allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT;
2385 res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo,
2386 &bufferInfos[2].Buffer, &bufferInfos[2].Allocation, &allocInfo);
2387 assert(res == VK_SUCCESS && allocInfo.pMappedData != nullptr);
2388
2389 // Map buffer 2.
2390 char* data2 = nullptr;
2391 res = vmaMapMemory(g_hAllocator, bufferInfos[2].Allocation, (void**)&data2);
2392 assert(res == VK_SUCCESS && data2 == allocInfo.pMappedData);
2393 data2[0xFFFF] = data2[0];
2394
2395 // Unmap buffer 2.
2396 vmaUnmapMemory(g_hAllocator, bufferInfos[2].Allocation);
2397 vmaGetAllocationInfo(g_hAllocator, bufferInfos[2].Allocation, &allocInfo);
2398 assert(allocInfo.pMappedData == data2);
2399
2400 // Destroy all buffers.
2401 for(size_t i = 3; i--; )
2402 vmaDestroyBuffer(g_hAllocator, bufferInfos[i].Buffer, bufferInfos[i].Allocation);
2403
2404 vmaDestroyPool(g_hAllocator, pool);
2405 }
2406}
2407
2408static void TestMappingMultithreaded()
2409{
2410 wprintf(L"Testing mapping multithreaded...\n");
2411
2412 static const uint32_t threadCount = 16;
2413 static const uint32_t bufferCount = 1024;
2414 static const uint32_t threadBufferCount = bufferCount / threadCount;
2415
2416 VkResult res;
2417 volatile uint32_t memTypeIndex = UINT32_MAX;
2418
2419 enum TEST
2420 {
2421 TEST_NORMAL,
2422 TEST_POOL,
2423 TEST_DEDICATED,
2424 TEST_COUNT
2425 };
2426 for(uint32_t testIndex = 0; testIndex < TEST_COUNT; ++testIndex)
2427 {
2428 VmaPool pool = nullptr;
2429 if(testIndex == TEST_POOL)
2430 {
2431 assert(memTypeIndex != UINT32_MAX);
2432 VmaPoolCreateInfo poolInfo = {};
2433 poolInfo.memoryTypeIndex = memTypeIndex;
2434 res = vmaCreatePool(g_hAllocator, &poolInfo, &pool);
2435 assert(res == VK_SUCCESS);
2436 }
2437
2438 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
2439 bufCreateInfo.size = 0x10000;
2440 bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
2441
2442 VmaAllocationCreateInfo allocCreateInfo = {};
2443 allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
2444 allocCreateInfo.pool = pool;
2445 if(testIndex == TEST_DEDICATED)
2446 allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
2447
2448 std::thread threads[threadCount];
2449 for(uint32_t threadIndex = 0; threadIndex < threadCount; ++threadIndex)
2450 {
2451 threads[threadIndex] = std::thread([=, &memTypeIndex](){
2452 // ======== THREAD FUNCTION ========
2453
2454 RandomNumberGenerator rand{threadIndex};
2455
2456 enum class MODE
2457 {
2458 // Don't map this buffer at all.
2459 DONT_MAP,
2460 // Map and quickly unmap.
2461 MAP_FOR_MOMENT,
2462 // Map and unmap before destruction.
2463 MAP_FOR_LONGER,
2464 // Map two times. Quickly unmap, second unmap before destruction.
2465 MAP_TWO_TIMES,
2466 // Create this buffer as persistently mapped.
2467 PERSISTENTLY_MAPPED,
2468 COUNT
2469 };
2470 std::vector<BufferInfo> bufInfos{threadBufferCount};
2471 std::vector<MODE> bufModes{threadBufferCount};
2472
2473 for(uint32_t bufferIndex = 0; bufferIndex < threadBufferCount; ++bufferIndex)
2474 {
2475 BufferInfo& bufInfo = bufInfos[bufferIndex];
2476 const MODE mode = (MODE)(rand.Generate() % (uint32_t)MODE::COUNT);
2477 bufModes[bufferIndex] = mode;
2478
2479 VmaAllocationCreateInfo localAllocCreateInfo = allocCreateInfo;
2480 if(mode == MODE::PERSISTENTLY_MAPPED)
2481 localAllocCreateInfo.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT;
2482
2483 VmaAllocationInfo allocInfo;
2484 VkResult res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &localAllocCreateInfo,
2485 &bufInfo.Buffer, &bufInfo.Allocation, &allocInfo);
2486 assert(res == VK_SUCCESS);
2487
2488 if(memTypeIndex == UINT32_MAX)
2489 memTypeIndex = allocInfo.memoryType;
2490
2491 char* data = nullptr;
2492
2493 if(mode == MODE::PERSISTENTLY_MAPPED)
2494 {
2495 data = (char*)allocInfo.pMappedData;
2496 assert(data != nullptr);
2497 }
2498 else if(mode == MODE::MAP_FOR_MOMENT || mode == MODE::MAP_FOR_LONGER ||
2499 mode == MODE::MAP_TWO_TIMES)
2500 {
2501 assert(data == nullptr);
2502 res = vmaMapMemory(g_hAllocator, bufInfo.Allocation, (void**)&data);
2503 assert(res == VK_SUCCESS && data != nullptr);
2504
2505 if(mode == MODE::MAP_TWO_TIMES)
2506 {
2507 char* data2 = nullptr;
2508 res = vmaMapMemory(g_hAllocator, bufInfo.Allocation, (void**)&data2);
2509 assert(res == VK_SUCCESS && data2 == data);
2510 }
2511 }
2512 else if(mode == MODE::DONT_MAP)
2513 {
2514 assert(allocInfo.pMappedData == nullptr);
2515 }
2516 else
2517 assert(0);
2518
2519 // Test if reading and writing from the beginning and end of mapped memory doesn't crash.
2520 if(data)
2521 data[0xFFFF] = data[0];
2522
2523 if(mode == MODE::MAP_FOR_MOMENT || mode == MODE::MAP_TWO_TIMES)
2524 {
2525 vmaUnmapMemory(g_hAllocator, bufInfo.Allocation);
2526
2527 VmaAllocationInfo allocInfo;
2528 vmaGetAllocationInfo(g_hAllocator, bufInfo.Allocation, &allocInfo);
2529 if(mode == MODE::MAP_FOR_MOMENT)
2530 assert(allocInfo.pMappedData == nullptr);
2531 else
2532 assert(allocInfo.pMappedData == data);
2533 }
2534
2535 switch(rand.Generate() % 3)
2536 {
2537 case 0: Sleep(0); break; // Yield.
2538 case 1: Sleep(10); break; // 10 ms
2539 // default: No sleep.
2540 }
2541
2542 // Test if reading and writing from the beginning and end of mapped memory doesn't crash.
2543 if(data)
2544 data[0xFFFF] = data[0];
2545 }
2546
2547 for(size_t bufferIndex = threadBufferCount; bufferIndex--; )
2548 {
2549 if(bufModes[bufferIndex] == MODE::MAP_FOR_LONGER ||
2550 bufModes[bufferIndex] == MODE::MAP_TWO_TIMES)
2551 {
2552 vmaUnmapMemory(g_hAllocator, bufInfos[bufferIndex].Allocation);
2553
2554 VmaAllocationInfo allocInfo;
2555 vmaGetAllocationInfo(g_hAllocator, bufInfos[bufferIndex].Allocation, &allocInfo);
2556 assert(allocInfo.pMappedData == nullptr);
2557 }
2558
2559 vmaDestroyBuffer(g_hAllocator, bufInfos[bufferIndex].Buffer, bufInfos[bufferIndex].Allocation);
2560 }
2561 });
2562 }
2563
2564 for(uint32_t threadIndex = 0; threadIndex < threadCount; ++threadIndex)
2565 threads[threadIndex].join();
2566
2567 vmaDestroyPool(g_hAllocator, pool);
2568 }
2569}
2570
2571static void WriteMainTestResultHeader(FILE* file)
2572{
2573 fprintf(file,
2574 "Code,Test,Time,"
2575 "Config,"
2576 "Total Time (us),"
2577 "Allocation Time Min (us),"
2578 "Allocation Time Avg (us),"
2579 "Allocation Time Max (us),"
2580 "Deallocation Time Min (us),"
2581 "Deallocation Time Avg (us),"
2582 "Deallocation Time Max (us),"
2583 "Total Memory Allocated (B),"
2584 "Free Range Size Avg (B),"
2585 "Free Range Size Max (B)\n");
2586}
2587
2588static void WriteMainTestResult(
2589 FILE* file,
2590 const char* codeDescription,
2591 const char* testDescription,
2592 const Config& config, const Result& result)
2593{
2594 float totalTimeSeconds = ToFloatSeconds(result.TotalTime);
2595 float allocationTimeMinSeconds = ToFloatSeconds(result.AllocationTimeMin);
2596 float allocationTimeAvgSeconds = ToFloatSeconds(result.AllocationTimeAvg);
2597 float allocationTimeMaxSeconds = ToFloatSeconds(result.AllocationTimeMax);
2598 float deallocationTimeMinSeconds = ToFloatSeconds(result.DeallocationTimeMin);
2599 float deallocationTimeAvgSeconds = ToFloatSeconds(result.DeallocationTimeAvg);
2600 float deallocationTimeMaxSeconds = ToFloatSeconds(result.DeallocationTimeMax);
2601
2602 time_t rawTime; time(&rawTime);
2603 struct tm timeInfo; localtime_s(&timeInfo, &rawTime);
2604 char timeStr[128];
2605 strftime(timeStr, _countof(timeStr), "%c", &timeInfo);
2606
2607 fprintf(file,
2608 "%s,%s,%s,"
2609 "BeginBytesToAllocate=%I64u MaxBytesToAllocate=%I64u AdditionalOperationCount=%u ThreadCount=%u FreeOrder=%d,"
2610 "%.2f,%.2f,%.2f,%.2f,%.2f,%.2f,%.2f,%I64u,%I64u,%I64u\n",
2611 codeDescription,
2612 testDescription,
2613 timeStr,
2614 config.BeginBytesToAllocate, config.MaxBytesToAllocate, config.AdditionalOperationCount, config.ThreadCount, (uint32_t)config.FreeOrder,
2615 totalTimeSeconds * 1e6f,
2616 allocationTimeMinSeconds * 1e6f,
2617 allocationTimeAvgSeconds * 1e6f,
2618 allocationTimeMaxSeconds * 1e6f,
2619 deallocationTimeMinSeconds * 1e6f,
2620 deallocationTimeAvgSeconds * 1e6f,
2621 deallocationTimeMaxSeconds * 1e6f,
2622 result.TotalMemoryAllocated,
2623 result.FreeRangeSizeAvg,
2624 result.FreeRangeSizeMax);
2625}
2626
2627static void WritePoolTestResultHeader(FILE* file)
2628{
2629 fprintf(file,
2630 "Code,Test,Time,"
2631 "Config,"
2632 "Total Time (us),"
2633 "Allocation Time Min (us),"
2634 "Allocation Time Avg (us),"
2635 "Allocation Time Max (us),"
2636 "Deallocation Time Min (us),"
2637 "Deallocation Time Avg (us),"
2638 "Deallocation Time Max (us),"
2639 "Lost Allocation Count,"
2640 "Lost Allocation Total Size (B),"
2641 "Failed Allocation Count,"
2642 "Failed Allocation Total Size (B)\n");
2643}
2644
2645static void WritePoolTestResult(
2646 FILE* file,
2647 const char* codeDescription,
2648 const char* testDescription,
2649 const PoolTestConfig& config,
2650 const PoolTestResult& result)
2651{
2652 float totalTimeSeconds = ToFloatSeconds(result.TotalTime);
2653 float allocationTimeMinSeconds = ToFloatSeconds(result.AllocationTimeMin);
2654 float allocationTimeAvgSeconds = ToFloatSeconds(result.AllocationTimeAvg);
2655 float allocationTimeMaxSeconds = ToFloatSeconds(result.AllocationTimeMax);
2656 float deallocationTimeMinSeconds = ToFloatSeconds(result.DeallocationTimeMin);
2657 float deallocationTimeAvgSeconds = ToFloatSeconds(result.DeallocationTimeAvg);
2658 float deallocationTimeMaxSeconds = ToFloatSeconds(result.DeallocationTimeMax);
2659
2660 time_t rawTime; time(&rawTime);
2661 struct tm timeInfo; localtime_s(&timeInfo, &rawTime);
2662 char timeStr[128];
2663 strftime(timeStr, _countof(timeStr), "%c", &timeInfo);
2664
2665 fprintf(file,
2666 "%s,%s,%s,"
2667 "ThreadCount=%u PoolSize=%llu FrameCount=%u TotalItemCount=%u UsedItemCount=%u...%u ItemsToMakeUnusedPercent=%u,"
2668 "%.2f,%.2f,%.2f,%.2f,%.2f,%.2f,%.2f,%I64u,%I64u,%I64u,%I64u\n",
2669 // General
2670 codeDescription,
2671 testDescription,
2672 timeStr,
2673 // Config
2674 config.ThreadCount,
2675 (unsigned long long)config.PoolSize,
2676 config.FrameCount,
2677 config.TotalItemCount,
2678 config.UsedItemCountMin,
2679 config.UsedItemCountMax,
2680 config.ItemsToMakeUnusedPercent,
2681 // Results
2682 totalTimeSeconds * 1e6f,
2683 allocationTimeMinSeconds * 1e6f,
2684 allocationTimeAvgSeconds * 1e6f,
2685 allocationTimeMaxSeconds * 1e6f,
2686 deallocationTimeMinSeconds * 1e6f,
2687 deallocationTimeAvgSeconds * 1e6f,
2688 deallocationTimeMaxSeconds * 1e6f,
2689 result.LostAllocationCount,
2690 result.LostAllocationTotalSize,
2691 result.FailedAllocationCount,
2692 result.FailedAllocationTotalSize);
2693}
2694
2695static void PerformCustomMainTest(FILE* file)
2696{
2697 Config config{};
2698 config.RandSeed = 65735476;
2699 //config.MaxBytesToAllocate = 4ull * 1024 * 1024; // 4 MB
2700 config.MaxBytesToAllocate = 4ull * 1024 * 1024 * 1024; // 4 GB
2701 config.MemUsageProbability[0] = 1; // VMA_MEMORY_USAGE_GPU_ONLY
2702 config.FreeOrder = FREE_ORDER::FORWARD;
2703 config.ThreadCount = 16;
2704 config.ThreadsUsingCommonAllocationsProbabilityPercent = 50;
2705
2706 // Buffers
2707 //config.AllocationSizes.push_back({4, 16, 1024});
2708 config.AllocationSizes.push_back({4, 0x10000, 0xA00000}); // 64 KB ... 10 MB
2709
2710 // Images
2711 //config.AllocationSizes.push_back({4, 0, 0, 4, 32});
2712 //config.AllocationSizes.push_back({4, 0, 0, 256, 2048});
2713
2714 config.BeginBytesToAllocate = config.MaxBytesToAllocate * 5 / 100;
2715 config.AdditionalOperationCount = 1024;
2716
2717 Result result{};
2718 VkResult res = MainTest(result, config);
2719 assert(res == VK_SUCCESS);
2720 WriteMainTestResult(file, "Foo", "CustomTest", config, result);
2721}
2722
2723static void PerformCustomPoolTest(FILE* file)
2724{
2725 PoolTestConfig config;
2726 config.PoolSize = 100 * 1024 * 1024;
2727 config.RandSeed = 2345764;
2728 config.ThreadCount = 1;
2729 config.FrameCount = 200;
2730 config.ItemsToMakeUnusedPercent = 2;
2731
2732 AllocationSize allocSize = {};
2733 allocSize.BufferSizeMin = 1024;
2734 allocSize.BufferSizeMax = 1024 * 1024;
2735 allocSize.Probability = 1;
2736 config.AllocationSizes.push_back(allocSize);
2737
2738 allocSize.BufferSizeMin = 0;
2739 allocSize.BufferSizeMax = 0;
2740 allocSize.ImageSizeMin = 128;
2741 allocSize.ImageSizeMax = 1024;
2742 allocSize.Probability = 1;
2743 config.AllocationSizes.push_back(allocSize);
2744
2745 config.PoolSize = config.CalcAvgResourceSize() * 200;
2746 config.UsedItemCountMax = 160;
2747 config.TotalItemCount = config.UsedItemCountMax * 10;
2748 config.UsedItemCountMin = config.UsedItemCountMax * 80 / 100;
2749
2750 g_MemoryAliasingWarningEnabled = false;
2751 PoolTestResult result = {};
2752 TestPool_Benchmark(result, config);
2753 g_MemoryAliasingWarningEnabled = true;
2754
2755 WritePoolTestResult(file, "Code desc", "Test desc", config, result);
2756}
2757
2758enum CONFIG_TYPE {
2759 CONFIG_TYPE_MINIMUM,
2760 CONFIG_TYPE_SMALL,
2761 CONFIG_TYPE_AVERAGE,
2762 CONFIG_TYPE_LARGE,
2763 CONFIG_TYPE_MAXIMUM,
2764 CONFIG_TYPE_COUNT
2765};
2766
2767static constexpr CONFIG_TYPE ConfigType = CONFIG_TYPE_SMALL;
2768//static constexpr CONFIG_TYPE ConfigType = CONFIG_TYPE_LARGE;
2769static const char* CODE_DESCRIPTION = "Foo";
2770
2771static void PerformMainTests(FILE* file)
2772{
2773 uint32_t repeatCount = 1;
2774 if(ConfigType >= CONFIG_TYPE_MAXIMUM) repeatCount = 3;
2775
2776 Config config{};
2777 config.RandSeed = 65735476;
2778 config.MemUsageProbability[0] = 1; // VMA_MEMORY_USAGE_GPU_ONLY
2779 config.FreeOrder = FREE_ORDER::FORWARD;
2780
2781 size_t threadCountCount = 1;
2782 switch(ConfigType)
2783 {
2784 case CONFIG_TYPE_MINIMUM: threadCountCount = 1; break;
2785 case CONFIG_TYPE_SMALL: threadCountCount = 2; break;
2786 case CONFIG_TYPE_AVERAGE: threadCountCount = 3; break;
2787 case CONFIG_TYPE_LARGE: threadCountCount = 5; break;
2788 case CONFIG_TYPE_MAXIMUM: threadCountCount = 7; break;
2789 default: assert(0);
2790 }
2791 for(size_t threadCountIndex = 0; threadCountIndex < threadCountCount; ++threadCountIndex)
2792 {
2793 std::string desc1;
2794
2795 switch(threadCountIndex)
2796 {
2797 case 0:
2798 desc1 += "1_thread";
2799 config.ThreadCount = 1;
2800 config.ThreadsUsingCommonAllocationsProbabilityPercent = 0;
2801 break;
2802 case 1:
2803 desc1 += "16_threads+0%_common";
2804 config.ThreadCount = 16;
2805 config.ThreadsUsingCommonAllocationsProbabilityPercent = 0;
2806 break;
2807 case 2:
2808 desc1 += "16_threads+50%_common";
2809 config.ThreadCount = 16;
2810 config.ThreadsUsingCommonAllocationsProbabilityPercent = 50;
2811 break;
2812 case 3:
2813 desc1 += "16_threads+100%_common";
2814 config.ThreadCount = 16;
2815 config.ThreadsUsingCommonAllocationsProbabilityPercent = 100;
2816 break;
2817 case 4:
2818 desc1 += "2_threads+0%_common";
2819 config.ThreadCount = 2;
2820 config.ThreadsUsingCommonAllocationsProbabilityPercent = 0;
2821 break;
2822 case 5:
2823 desc1 += "2_threads+50%_common";
2824 config.ThreadCount = 2;
2825 config.ThreadsUsingCommonAllocationsProbabilityPercent = 50;
2826 break;
2827 case 6:
2828 desc1 += "2_threads+100%_common";
2829 config.ThreadCount = 2;
2830 config.ThreadsUsingCommonAllocationsProbabilityPercent = 100;
2831 break;
2832 default:
2833 assert(0);
2834 }
2835
2836 // 0 = buffers, 1 = images, 2 = buffers and images
2837 size_t buffersVsImagesCount = 2;
2838 if(ConfigType >= CONFIG_TYPE_LARGE) ++buffersVsImagesCount;
2839 for(size_t buffersVsImagesIndex = 0; buffersVsImagesIndex < buffersVsImagesCount; ++buffersVsImagesIndex)
2840 {
2841 std::string desc2 = desc1;
2842 switch(buffersVsImagesIndex)
2843 {
2844 case 0: desc2 += " Buffers"; break;
2845 case 1: desc2 += " Images"; break;
2846 case 2: desc2 += " Buffers+Images"; break;
2847 default: assert(0);
2848 }
2849
2850 // 0 = small, 1 = large, 2 = small and large
2851 size_t smallVsLargeCount = 2;
2852 if(ConfigType >= CONFIG_TYPE_LARGE) ++smallVsLargeCount;
2853 for(size_t smallVsLargeIndex = 0; smallVsLargeIndex < smallVsLargeCount; ++smallVsLargeIndex)
2854 {
2855 std::string desc3 = desc2;
2856 switch(smallVsLargeIndex)
2857 {
2858 case 0: desc3 += " Small"; break;
2859 case 1: desc3 += " Large"; break;
2860 case 2: desc3 += " Small+Large"; break;
2861 default: assert(0);
2862 }
2863
2864 if(smallVsLargeIndex == 1 || smallVsLargeIndex == 2)
2865 config.MaxBytesToAllocate = 4ull * 1024 * 1024 * 1024; // 4 GB
2866 else
2867 config.MaxBytesToAllocate = 4ull * 1024 * 1024;
2868
2869 // 0 = varying sizes min...max, 1 = set of constant sizes
2870 size_t constantSizesCount = 1;
2871 if(ConfigType >= CONFIG_TYPE_SMALL) ++constantSizesCount;
2872 for(size_t constantSizesIndex = 0; constantSizesIndex < constantSizesCount; ++constantSizesIndex)
2873 {
2874 std::string desc4 = desc3;
2875 switch(constantSizesIndex)
2876 {
2877 case 0: desc4 += " Varying_sizes"; break;
2878 case 1: desc4 += " Constant_sizes"; break;
2879 default: assert(0);
2880 }
2881
2882 config.AllocationSizes.clear();
2883 // Buffers present
2884 if(buffersVsImagesIndex == 0 || buffersVsImagesIndex == 2)
2885 {
2886 // Small
2887 if(smallVsLargeIndex == 0 || smallVsLargeIndex == 2)
2888 {
2889 // Varying size
2890 if(constantSizesIndex == 0)
2891 config.AllocationSizes.push_back({4, 16, 1024});
2892 // Constant sizes
2893 else
2894 {
2895 config.AllocationSizes.push_back({1, 16, 16});
2896 config.AllocationSizes.push_back({1, 64, 64});
2897 config.AllocationSizes.push_back({1, 256, 256});
2898 config.AllocationSizes.push_back({1, 1024, 1024});
2899 }
2900 }
2901 // Large
2902 if(smallVsLargeIndex == 1 || smallVsLargeIndex == 2)
2903 {
2904 // Varying size
2905 if(constantSizesIndex == 0)
2906 config.AllocationSizes.push_back({4, 0x10000, 0xA00000}); // 64 KB ... 10 MB
2907 // Constant sizes
2908 else
2909 {
2910 config.AllocationSizes.push_back({1, 0x10000, 0x10000});
2911 config.AllocationSizes.push_back({1, 0x80000, 0x80000});
2912 config.AllocationSizes.push_back({1, 0x200000, 0x200000});
2913 config.AllocationSizes.push_back({1, 0xA00000, 0xA00000});
2914 }
2915 }
2916 }
2917 // Images present
2918 if(buffersVsImagesIndex == 1 || buffersVsImagesIndex == 2)
2919 {
2920 // Small
2921 if(smallVsLargeIndex == 0 || smallVsLargeIndex == 2)
2922 {
2923 // Varying size
2924 if(constantSizesIndex == 0)
2925 config.AllocationSizes.push_back({4, 0, 0, 4, 32});
2926 // Constant sizes
2927 else
2928 {
2929 config.AllocationSizes.push_back({1, 0, 0, 4, 4});
2930 config.AllocationSizes.push_back({1, 0, 0, 8, 8});
2931 config.AllocationSizes.push_back({1, 0, 0, 16, 16});
2932 config.AllocationSizes.push_back({1, 0, 0, 32, 32});
2933 }
2934 }
2935 // Large
2936 if(smallVsLargeIndex == 1 || smallVsLargeIndex == 2)
2937 {
2938 // Varying size
2939 if(constantSizesIndex == 0)
2940 config.AllocationSizes.push_back({4, 0, 0, 256, 2048});
2941 // Constant sizes
2942 else
2943 {
2944 config.AllocationSizes.push_back({1, 0, 0, 256, 256});
2945 config.AllocationSizes.push_back({1, 0, 0, 512, 512});
2946 config.AllocationSizes.push_back({1, 0, 0, 1024, 1024});
2947 config.AllocationSizes.push_back({1, 0, 0, 2048, 2048});
2948 }
2949 }
2950 }
2951
2952 // 0 = 100%, additional_operations = 0, 1 = 50%, 2 = 5%, 3 = 95% additional_operations = a lot
2953 size_t beginBytesToAllocateCount = 1;
2954 if(ConfigType >= CONFIG_TYPE_SMALL) ++beginBytesToAllocateCount;
2955 if(ConfigType >= CONFIG_TYPE_AVERAGE) ++beginBytesToAllocateCount;
2956 if(ConfigType >= CONFIG_TYPE_LARGE) ++beginBytesToAllocateCount;
2957 for(size_t beginBytesToAllocateIndex = 0; beginBytesToAllocateIndex < beginBytesToAllocateCount; ++beginBytesToAllocateIndex)
2958 {
2959 std::string desc5 = desc4;
2960
2961 switch(beginBytesToAllocateIndex)
2962 {
2963 case 0:
2964 desc5 += " Allocate_100%";
2965 config.BeginBytesToAllocate = config.MaxBytesToAllocate;
2966 config.AdditionalOperationCount = 0;
2967 break;
2968 case 1:
2969 desc5 += " Allocate_50%+Operations";
2970 config.BeginBytesToAllocate = config.MaxBytesToAllocate * 50 / 100;
2971 config.AdditionalOperationCount = 1024;
2972 break;
2973 case 2:
2974 desc5 += " Allocate_5%+Operations";
2975 config.BeginBytesToAllocate = config.MaxBytesToAllocate * 5 / 100;
2976 config.AdditionalOperationCount = 1024;
2977 break;
2978 case 3:
2979 desc5 += " Allocate_95%+Operations";
2980 config.BeginBytesToAllocate = config.MaxBytesToAllocate * 95 / 100;
2981 config.AdditionalOperationCount = 1024;
2982 break;
2983 default:
2984 assert(0);
2985 }
2986
2987 const char* testDescription = desc5.c_str();
2988
2989 for(size_t repeat = 0; repeat < repeatCount; ++repeat)
2990 {
2991 printf("%s Repeat %u\n", testDescription, (uint32_t)repeat);
2992
2993 Result result{};
2994 VkResult res = MainTest(result, config);
2995 assert(res == VK_SUCCESS);
2996 WriteMainTestResult(file, CODE_DESCRIPTION, testDescription, config, result);
2997 }
2998 }
2999 }
3000 }
3001 }
3002 }
3003}
3004
3005static void PerformPoolTests(FILE* file)
3006{
3007 const size_t AVG_RESOURCES_PER_POOL = 300;
3008
3009 uint32_t repeatCount = 1;
3010 if(ConfigType >= CONFIG_TYPE_MAXIMUM) repeatCount = 3;
3011
3012 PoolTestConfig config{};
3013 config.RandSeed = 2346343;
3014 config.FrameCount = 200;
3015 config.ItemsToMakeUnusedPercent = 2;
3016
3017 size_t threadCountCount = 1;
3018 switch(ConfigType)
3019 {
3020 case CONFIG_TYPE_MINIMUM: threadCountCount = 1; break;
3021 case CONFIG_TYPE_SMALL: threadCountCount = 2; break;
3022 case CONFIG_TYPE_AVERAGE: threadCountCount = 2; break;
3023 case CONFIG_TYPE_LARGE: threadCountCount = 3; break;
3024 case CONFIG_TYPE_MAXIMUM: threadCountCount = 3; break;
3025 default: assert(0);
3026 }
3027 for(size_t threadCountIndex = 0; threadCountIndex < threadCountCount; ++threadCountIndex)
3028 {
3029 std::string desc1;
3030
3031 switch(threadCountIndex)
3032 {
3033 case 0:
3034 desc1 += "1_thread";
3035 config.ThreadCount = 1;
3036 break;
3037 case 1:
3038 desc1 += "16_threads";
3039 config.ThreadCount = 16;
3040 break;
3041 case 2:
3042 desc1 += "2_threads";
3043 config.ThreadCount = 2;
3044 break;
3045 default:
3046 assert(0);
3047 }
3048
3049 // 0 = buffers, 1 = images, 2 = buffers and images
3050 size_t buffersVsImagesCount = 2;
3051 if(ConfigType >= CONFIG_TYPE_LARGE) ++buffersVsImagesCount;
3052 for(size_t buffersVsImagesIndex = 0; buffersVsImagesIndex < buffersVsImagesCount; ++buffersVsImagesIndex)
3053 {
3054 std::string desc2 = desc1;
3055 switch(buffersVsImagesIndex)
3056 {
3057 case 0: desc2 += " Buffers"; break;
3058 case 1: desc2 += " Images"; break;
3059 case 2: desc2 += " Buffers+Images"; break;
3060 default: assert(0);
3061 }
3062
3063 // 0 = small, 1 = large, 2 = small and large
3064 size_t smallVsLargeCount = 2;
3065 if(ConfigType >= CONFIG_TYPE_LARGE) ++smallVsLargeCount;
3066 for(size_t smallVsLargeIndex = 0; smallVsLargeIndex < smallVsLargeCount; ++smallVsLargeIndex)
3067 {
3068 std::string desc3 = desc2;
3069 switch(smallVsLargeIndex)
3070 {
3071 case 0: desc3 += " Small"; break;
3072 case 1: desc3 += " Large"; break;
3073 case 2: desc3 += " Small+Large"; break;
3074 default: assert(0);
3075 }
3076
3077 if(smallVsLargeIndex == 1 || smallVsLargeIndex == 2)
3078 config.PoolSize = 6ull * 1024 * 1024 * 1024; // 6 GB
3079 else
3080 config.PoolSize = 4ull * 1024 * 1024;
3081
3082 // 0 = varying sizes min...max, 1 = set of constant sizes
3083 size_t constantSizesCount = 1;
3084 if(ConfigType >= CONFIG_TYPE_SMALL) ++constantSizesCount;
3085 for(size_t constantSizesIndex = 0; constantSizesIndex < constantSizesCount; ++constantSizesIndex)
3086 {
3087 std::string desc4 = desc3;
3088 switch(constantSizesIndex)
3089 {
3090 case 0: desc4 += " Varying_sizes"; break;
3091 case 1: desc4 += " Constant_sizes"; break;
3092 default: assert(0);
3093 }
3094
3095 config.AllocationSizes.clear();
3096 // Buffers present
3097 if(buffersVsImagesIndex == 0 || buffersVsImagesIndex == 2)
3098 {
3099 // Small
3100 if(smallVsLargeIndex == 0 || smallVsLargeIndex == 2)
3101 {
3102 // Varying size
3103 if(constantSizesIndex == 0)
3104 config.AllocationSizes.push_back({4, 16, 1024});
3105 // Constant sizes
3106 else
3107 {
3108 config.AllocationSizes.push_back({1, 16, 16});
3109 config.AllocationSizes.push_back({1, 64, 64});
3110 config.AllocationSizes.push_back({1, 256, 256});
3111 config.AllocationSizes.push_back({1, 1024, 1024});
3112 }
3113 }
3114 // Large
3115 if(smallVsLargeIndex == 1 || smallVsLargeIndex == 2)
3116 {
3117 // Varying size
3118 if(constantSizesIndex == 0)
3119 config.AllocationSizes.push_back({4, 0x10000, 0xA00000}); // 64 KB ... 10 MB
3120 // Constant sizes
3121 else
3122 {
3123 config.AllocationSizes.push_back({1, 0x10000, 0x10000});
3124 config.AllocationSizes.push_back({1, 0x80000, 0x80000});
3125 config.AllocationSizes.push_back({1, 0x200000, 0x200000});
3126 config.AllocationSizes.push_back({1, 0xA00000, 0xA00000});
3127 }
3128 }
3129 }
3130 // Images present
3131 if(buffersVsImagesIndex == 1 || buffersVsImagesIndex == 2)
3132 {
3133 // Small
3134 if(smallVsLargeIndex == 0 || smallVsLargeIndex == 2)
3135 {
3136 // Varying size
3137 if(constantSizesIndex == 0)
3138 config.AllocationSizes.push_back({4, 0, 0, 4, 32});
3139 // Constant sizes
3140 else
3141 {
3142 config.AllocationSizes.push_back({1, 0, 0, 4, 4});
3143 config.AllocationSizes.push_back({1, 0, 0, 8, 8});
3144 config.AllocationSizes.push_back({1, 0, 0, 16, 16});
3145 config.AllocationSizes.push_back({1, 0, 0, 32, 32});
3146 }
3147 }
3148 // Large
3149 if(smallVsLargeIndex == 1 || smallVsLargeIndex == 2)
3150 {
3151 // Varying size
3152 if(constantSizesIndex == 0)
3153 config.AllocationSizes.push_back({4, 0, 0, 256, 2048});
3154 // Constant sizes
3155 else
3156 {
3157 config.AllocationSizes.push_back({1, 0, 0, 256, 256});
3158 config.AllocationSizes.push_back({1, 0, 0, 512, 512});
3159 config.AllocationSizes.push_back({1, 0, 0, 1024, 1024});
3160 config.AllocationSizes.push_back({1, 0, 0, 2048, 2048});
3161 }
3162 }
3163 }
3164
3165 const VkDeviceSize avgResourceSize = config.CalcAvgResourceSize();
3166 config.PoolSize = avgResourceSize * AVG_RESOURCES_PER_POOL;
3167
3168 // 0 = 66%, 1 = 133%, 2 = 100%, 3 = 33%, 4 = 166%
3169 size_t subscriptionModeCount;
3170 switch(ConfigType)
3171 {
3172 case CONFIG_TYPE_MINIMUM: subscriptionModeCount = 2; break;
3173 case CONFIG_TYPE_SMALL: subscriptionModeCount = 2; break;
3174 case CONFIG_TYPE_AVERAGE: subscriptionModeCount = 3; break;
3175 case CONFIG_TYPE_LARGE: subscriptionModeCount = 5; break;
3176 case CONFIG_TYPE_MAXIMUM: subscriptionModeCount = 5; break;
3177 default: assert(0);
3178 }
3179 for(size_t subscriptionModeIndex = 0; subscriptionModeIndex < subscriptionModeCount; ++subscriptionModeIndex)
3180 {
3181 std::string desc5 = desc4;
3182
3183 switch(subscriptionModeIndex)
3184 {
3185 case 0:
3186 desc5 += " Subscription_66%";
3187 config.UsedItemCountMax = AVG_RESOURCES_PER_POOL * 66 / 100;
3188 break;
3189 case 1:
3190 desc5 += " Subscription_133%";
3191 config.UsedItemCountMax = AVG_RESOURCES_PER_POOL * 133 / 100;
3192 break;
3193 case 2:
3194 desc5 += " Subscription_100%";
3195 config.UsedItemCountMax = AVG_RESOURCES_PER_POOL;
3196 break;
3197 case 3:
3198 desc5 += " Subscription_33%";
3199 config.UsedItemCountMax = AVG_RESOURCES_PER_POOL * 33 / 100;
3200 break;
3201 case 4:
3202 desc5 += " Subscription_166%";
3203 config.UsedItemCountMax = AVG_RESOURCES_PER_POOL * 166 / 100;
3204 break;
3205 default:
3206 assert(0);
3207 }
3208
3209 config.TotalItemCount = config.UsedItemCountMax * 5;
3210 config.UsedItemCountMin = config.UsedItemCountMax * 80 / 100;
3211
3212 const char* testDescription = desc5.c_str();
3213
3214 for(size_t repeat = 0; repeat < repeatCount; ++repeat)
3215 {
3216 printf("%s Repeat %u\n", testDescription, (uint32_t)repeat);
3217
3218 PoolTestResult result{};
3219 g_MemoryAliasingWarningEnabled = false;
3220 TestPool_Benchmark(result, config);
3221 g_MemoryAliasingWarningEnabled = true;
3222 WritePoolTestResult(file, CODE_DESCRIPTION, testDescription, config, result);
3223 }
3224 }
3225 }
3226 }
3227 }
3228 }
3229}
3230
3231void Test()
3232{
3233 wprintf(L"TESTING:\n");
3234
Adam Sawicki212a4a62018-06-14 15:44:45 +02003235 // TEMP tests
Adam Sawicki0876c0d2018-06-20 15:18:11 +02003236TestLinearAllocator();
3237return;
Adam Sawicki212a4a62018-06-14 15:44:45 +02003238
Adam Sawickib8333fb2018-03-13 16:15:53 +01003239 // # Simple tests
3240
3241 TestBasics();
Adam Sawicki212a4a62018-06-14 15:44:45 +02003242#if VMA_DEBUG_MARGIN
3243 TestDebugMargin();
3244#else
3245 TestPool_SameSize();
3246 TestHeapSizeLimit();
3247#endif
Adam Sawickie44c6262018-06-15 14:30:39 +02003248#if VMA_DEBUG_INITIALIZE_ALLOCATIONS
3249 TestAllocationsInitialization();
3250#endif
Adam Sawickib8333fb2018-03-13 16:15:53 +01003251 TestMapping();
3252 TestMappingMultithreaded();
Adam Sawicki0876c0d2018-06-20 15:18:11 +02003253 TestLinearAllocator();
Adam Sawickib8333fb2018-03-13 16:15:53 +01003254 TestDefragmentationSimple();
3255 TestDefragmentationFull();
3256
3257 // # Detailed tests
3258 FILE* file;
3259 fopen_s(&file, "Results.csv", "w");
3260 assert(file != NULL);
3261
3262 WriteMainTestResultHeader(file);
3263 PerformMainTests(file);
3264 //PerformCustomMainTest(file);
3265
3266 WritePoolTestResultHeader(file);
3267 PerformPoolTests(file);
3268 //PerformCustomPoolTest(file);
3269
3270 fclose(file);
3271
3272 wprintf(L"Done.\n");
3273}
3274
Adam Sawickif1a793c2018-03-13 15:42:22 +01003275#endif // #ifdef _WIN32