blob: 21b82f2ba0c03c7c8c75d5ad6068d861751fd80d [file] [log] [blame]
Adam Sawickif1a793c2018-03-13 15:42:22 +01001#include "Tests.h"
2#include "VmaUsage.h"
3#include "Common.h"
Adam Sawickib8333fb2018-03-13 16:15:53 +01004#include <atomic>
5#include <thread>
6#include <mutex>
Adam Sawickif1a793c2018-03-13 15:42:22 +01007
8#ifdef _WIN32
9
Adam Sawicki33d2ce72018-08-27 13:59:13 +020010static const char* CODE_DESCRIPTION = "Foo";
11
Adam Sawicki0a607132018-08-24 11:18:41 +020012enum CONFIG_TYPE {
13 CONFIG_TYPE_MINIMUM,
14 CONFIG_TYPE_SMALL,
15 CONFIG_TYPE_AVERAGE,
16 CONFIG_TYPE_LARGE,
17 CONFIG_TYPE_MAXIMUM,
18 CONFIG_TYPE_COUNT
19};
20
21static constexpr CONFIG_TYPE ConfigType = CONFIG_TYPE_SMALL;
22//static constexpr CONFIG_TYPE ConfigType = CONFIG_TYPE_LARGE;
23
Adam Sawickib8333fb2018-03-13 16:15:53 +010024enum class FREE_ORDER { FORWARD, BACKWARD, RANDOM, COUNT };
25
Adam Sawicki0667e332018-08-24 17:26:44 +020026static const char* FREE_ORDER_NAMES[] = {
27 "FORWARD",
28 "BACKWARD",
29 "RANDOM",
Adam Sawicki0a607132018-08-24 11:18:41 +020030};
31
Adam Sawickib8333fb2018-03-13 16:15:53 +010032struct AllocationSize
33{
34 uint32_t Probability;
35 VkDeviceSize BufferSizeMin, BufferSizeMax;
36 uint32_t ImageSizeMin, ImageSizeMax;
37};
38
39struct Config
40{
41 uint32_t RandSeed;
42 VkDeviceSize BeginBytesToAllocate;
43 uint32_t AdditionalOperationCount;
44 VkDeviceSize MaxBytesToAllocate;
45 uint32_t MemUsageProbability[4]; // For VMA_MEMORY_USAGE_*
46 std::vector<AllocationSize> AllocationSizes;
47 uint32_t ThreadCount;
48 uint32_t ThreadsUsingCommonAllocationsProbabilityPercent;
49 FREE_ORDER FreeOrder;
Adam Sawicki0667e332018-08-24 17:26:44 +020050 VmaAllocationCreateFlags AllocationStrategy; // For VMA_ALLOCATION_CREATE_STRATEGY_*
Adam Sawickib8333fb2018-03-13 16:15:53 +010051};
52
53struct Result
54{
55 duration TotalTime;
56 duration AllocationTimeMin, AllocationTimeAvg, AllocationTimeMax;
57 duration DeallocationTimeMin, DeallocationTimeAvg, DeallocationTimeMax;
58 VkDeviceSize TotalMemoryAllocated;
59 VkDeviceSize FreeRangeSizeAvg, FreeRangeSizeMax;
60};
61
62void TestDefragmentationSimple();
63void TestDefragmentationFull();
64
65struct PoolTestConfig
66{
67 uint32_t RandSeed;
68 uint32_t ThreadCount;
69 VkDeviceSize PoolSize;
70 uint32_t FrameCount;
71 uint32_t TotalItemCount;
72 // Range for number of items used in each frame.
73 uint32_t UsedItemCountMin, UsedItemCountMax;
74 // Percent of items to make unused, and possibly make some others used in each frame.
75 uint32_t ItemsToMakeUnusedPercent;
76 std::vector<AllocationSize> AllocationSizes;
77
78 VkDeviceSize CalcAvgResourceSize() const
79 {
80 uint32_t probabilitySum = 0;
81 VkDeviceSize sizeSum = 0;
82 for(size_t i = 0; i < AllocationSizes.size(); ++i)
83 {
84 const AllocationSize& allocSize = AllocationSizes[i];
85 if(allocSize.BufferSizeMax > 0)
86 sizeSum += (allocSize.BufferSizeMin + allocSize.BufferSizeMax) / 2 * allocSize.Probability;
87 else
88 {
89 const VkDeviceSize avgDimension = (allocSize.ImageSizeMin + allocSize.ImageSizeMax) / 2;
90 sizeSum += avgDimension * avgDimension * 4 * allocSize.Probability;
91 }
92 probabilitySum += allocSize.Probability;
93 }
94 return sizeSum / probabilitySum;
95 }
96
97 bool UsesBuffers() const
98 {
99 for(size_t i = 0; i < AllocationSizes.size(); ++i)
100 if(AllocationSizes[i].BufferSizeMax > 0)
101 return true;
102 return false;
103 }
104
105 bool UsesImages() const
106 {
107 for(size_t i = 0; i < AllocationSizes.size(); ++i)
108 if(AllocationSizes[i].ImageSizeMax > 0)
109 return true;
110 return false;
111 }
112};
113
114struct PoolTestResult
115{
116 duration TotalTime;
117 duration AllocationTimeMin, AllocationTimeAvg, AllocationTimeMax;
118 duration DeallocationTimeMin, DeallocationTimeAvg, DeallocationTimeMax;
119 size_t LostAllocationCount, LostAllocationTotalSize;
120 size_t FailedAllocationCount, FailedAllocationTotalSize;
121};
122
123static const uint32_t IMAGE_BYTES_PER_PIXEL = 1;
124
Adam Sawicki8cfe05f2018-08-22 16:48:17 +0200125static uint32_t g_FrameIndex = 0;
126
Adam Sawickib8333fb2018-03-13 16:15:53 +0100127struct BufferInfo
128{
129 VkBuffer Buffer = VK_NULL_HANDLE;
130 VmaAllocation Allocation = VK_NULL_HANDLE;
131};
132
Adam Sawicki0a3fb6c2018-08-27 14:40:27 +0200133static uint32_t GetAllocationStrategyCount()
134{
135 uint32_t strategyCount = 0;
136 switch(ConfigType)
137 {
138 case CONFIG_TYPE_MINIMUM: strategyCount = 1; break;
139 case CONFIG_TYPE_SMALL: strategyCount = 1; break;
140 case CONFIG_TYPE_AVERAGE: strategyCount = 2; break;
141 case CONFIG_TYPE_LARGE: strategyCount = 2; break;
142 case CONFIG_TYPE_MAXIMUM: strategyCount = 3; break;
143 default: assert(0);
144 }
145 return strategyCount;
146}
147
148static const char* GetAllocationStrategyName(VmaAllocationCreateFlags allocStrategy)
149{
150 switch(allocStrategy)
151 {
152 case VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT: return "BEST_FIT"; break;
153 case VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT: return "WORST_FIT"; break;
154 case VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT: return "FIRST_FIT"; break;
155 case 0: return "Default"; break;
156 default: assert(0); return "";
157 }
158}
159
Adam Sawickib8333fb2018-03-13 16:15:53 +0100160static void InitResult(Result& outResult)
161{
162 outResult.TotalTime = duration::zero();
163 outResult.AllocationTimeMin = duration::max();
164 outResult.AllocationTimeAvg = duration::zero();
165 outResult.AllocationTimeMax = duration::min();
166 outResult.DeallocationTimeMin = duration::max();
167 outResult.DeallocationTimeAvg = duration::zero();
168 outResult.DeallocationTimeMax = duration::min();
169 outResult.TotalMemoryAllocated = 0;
170 outResult.FreeRangeSizeAvg = 0;
171 outResult.FreeRangeSizeMax = 0;
172}
173
174class TimeRegisterObj
175{
176public:
177 TimeRegisterObj(duration& min, duration& sum, duration& max) :
178 m_Min(min),
179 m_Sum(sum),
180 m_Max(max),
181 m_TimeBeg(std::chrono::high_resolution_clock::now())
182 {
183 }
184
185 ~TimeRegisterObj()
186 {
187 duration d = std::chrono::high_resolution_clock::now() - m_TimeBeg;
188 m_Sum += d;
189 if(d < m_Min) m_Min = d;
190 if(d > m_Max) m_Max = d;
191 }
192
193private:
194 duration& m_Min;
195 duration& m_Sum;
196 duration& m_Max;
197 time_point m_TimeBeg;
198};
199
200struct PoolTestThreadResult
201{
202 duration AllocationTimeMin, AllocationTimeSum, AllocationTimeMax;
203 duration DeallocationTimeMin, DeallocationTimeSum, DeallocationTimeMax;
204 size_t AllocationCount, DeallocationCount;
205 size_t LostAllocationCount, LostAllocationTotalSize;
206 size_t FailedAllocationCount, FailedAllocationTotalSize;
207};
208
209class AllocationTimeRegisterObj : public TimeRegisterObj
210{
211public:
212 AllocationTimeRegisterObj(Result& result) :
213 TimeRegisterObj(result.AllocationTimeMin, result.AllocationTimeAvg, result.AllocationTimeMax)
214 {
215 }
216};
217
218class DeallocationTimeRegisterObj : public TimeRegisterObj
219{
220public:
221 DeallocationTimeRegisterObj(Result& result) :
222 TimeRegisterObj(result.DeallocationTimeMin, result.DeallocationTimeAvg, result.DeallocationTimeMax)
223 {
224 }
225};
226
227class PoolAllocationTimeRegisterObj : public TimeRegisterObj
228{
229public:
230 PoolAllocationTimeRegisterObj(PoolTestThreadResult& result) :
231 TimeRegisterObj(result.AllocationTimeMin, result.AllocationTimeSum, result.AllocationTimeMax)
232 {
233 }
234};
235
236class PoolDeallocationTimeRegisterObj : public TimeRegisterObj
237{
238public:
239 PoolDeallocationTimeRegisterObj(PoolTestThreadResult& result) :
240 TimeRegisterObj(result.DeallocationTimeMin, result.DeallocationTimeSum, result.DeallocationTimeMax)
241 {
242 }
243};
244
Adam Sawicki33d2ce72018-08-27 13:59:13 +0200245static void CurrentTimeToStr(std::string& out)
246{
247 time_t rawTime; time(&rawTime);
248 struct tm timeInfo; localtime_s(&timeInfo, &rawTime);
249 char timeStr[128];
250 strftime(timeStr, _countof(timeStr), "%c", &timeInfo);
251 out = timeStr;
252}
253
Adam Sawickib8333fb2018-03-13 16:15:53 +0100254VkResult MainTest(Result& outResult, const Config& config)
255{
256 assert(config.ThreadCount > 0);
257
258 InitResult(outResult);
259
260 RandomNumberGenerator mainRand{config.RandSeed};
261
262 time_point timeBeg = std::chrono::high_resolution_clock::now();
263
264 std::atomic<size_t> allocationCount = 0;
265 VkResult res = VK_SUCCESS;
266
267 uint32_t memUsageProbabilitySum =
268 config.MemUsageProbability[0] + config.MemUsageProbability[1] +
269 config.MemUsageProbability[2] + config.MemUsageProbability[3];
270 assert(memUsageProbabilitySum > 0);
271
272 uint32_t allocationSizeProbabilitySum = std::accumulate(
273 config.AllocationSizes.begin(),
274 config.AllocationSizes.end(),
275 0u,
276 [](uint32_t sum, const AllocationSize& allocSize) {
277 return sum + allocSize.Probability;
278 });
279
280 struct Allocation
281 {
282 VkBuffer Buffer;
283 VkImage Image;
284 VmaAllocation Alloc;
285 };
286
287 std::vector<Allocation> commonAllocations;
288 std::mutex commonAllocationsMutex;
289
290 auto Allocate = [&](
291 VkDeviceSize bufferSize,
292 const VkExtent2D imageExtent,
293 RandomNumberGenerator& localRand,
294 VkDeviceSize& totalAllocatedBytes,
295 std::vector<Allocation>& allocations) -> VkResult
296 {
297 assert((bufferSize == 0) != (imageExtent.width == 0 && imageExtent.height == 0));
298
299 uint32_t memUsageIndex = 0;
300 uint32_t memUsageRand = localRand.Generate() % memUsageProbabilitySum;
301 while(memUsageRand >= config.MemUsageProbability[memUsageIndex])
302 memUsageRand -= config.MemUsageProbability[memUsageIndex++];
303
304 VmaAllocationCreateInfo memReq = {};
305 memReq.usage = (VmaMemoryUsage)(VMA_MEMORY_USAGE_GPU_ONLY + memUsageIndex);
Adam Sawicki0667e332018-08-24 17:26:44 +0200306 memReq.flags |= config.AllocationStrategy;
Adam Sawickib8333fb2018-03-13 16:15:53 +0100307
308 Allocation allocation = {};
309 VmaAllocationInfo allocationInfo;
310
311 // Buffer
312 if(bufferSize > 0)
313 {
314 assert(imageExtent.width == 0);
315 VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
316 bufferInfo.size = bufferSize;
317 bufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
318
319 {
320 AllocationTimeRegisterObj timeRegisterObj{outResult};
321 res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &memReq, &allocation.Buffer, &allocation.Alloc, &allocationInfo);
322 }
323 }
324 // Image
325 else
326 {
327 VkImageCreateInfo imageInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
328 imageInfo.imageType = VK_IMAGE_TYPE_2D;
329 imageInfo.extent.width = imageExtent.width;
330 imageInfo.extent.height = imageExtent.height;
331 imageInfo.extent.depth = 1;
332 imageInfo.mipLevels = 1;
333 imageInfo.arrayLayers = 1;
334 imageInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
335 imageInfo.tiling = memReq.usage == VMA_MEMORY_USAGE_GPU_ONLY ?
336 VK_IMAGE_TILING_OPTIMAL :
337 VK_IMAGE_TILING_LINEAR;
338 imageInfo.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED;
339 switch(memReq.usage)
340 {
341 case VMA_MEMORY_USAGE_GPU_ONLY:
342 switch(localRand.Generate() % 3)
343 {
344 case 0:
345 imageInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
346 break;
347 case 1:
348 imageInfo.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
349 break;
350 case 2:
351 imageInfo.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
352 break;
353 }
354 break;
355 case VMA_MEMORY_USAGE_CPU_ONLY:
356 case VMA_MEMORY_USAGE_CPU_TO_GPU:
357 imageInfo.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
358 break;
359 case VMA_MEMORY_USAGE_GPU_TO_CPU:
360 imageInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT;
361 break;
362 }
363 imageInfo.samples = VK_SAMPLE_COUNT_1_BIT;
364 imageInfo.flags = 0;
365
366 {
367 AllocationTimeRegisterObj timeRegisterObj{outResult};
368 res = vmaCreateImage(g_hAllocator, &imageInfo, &memReq, &allocation.Image, &allocation.Alloc, &allocationInfo);
369 }
370 }
371
372 if(res == VK_SUCCESS)
373 {
374 ++allocationCount;
375 totalAllocatedBytes += allocationInfo.size;
376 bool useCommonAllocations = localRand.Generate() % 100 < config.ThreadsUsingCommonAllocationsProbabilityPercent;
377 if(useCommonAllocations)
378 {
379 std::unique_lock<std::mutex> lock(commonAllocationsMutex);
380 commonAllocations.push_back(allocation);
381 }
382 else
383 allocations.push_back(allocation);
384 }
385 else
386 {
387 assert(0);
388 }
389 return res;
390 };
391
392 auto GetNextAllocationSize = [&](
393 VkDeviceSize& outBufSize,
394 VkExtent2D& outImageSize,
395 RandomNumberGenerator& localRand)
396 {
397 outBufSize = 0;
398 outImageSize = {0, 0};
399
400 uint32_t allocSizeIndex = 0;
401 uint32_t r = localRand.Generate() % allocationSizeProbabilitySum;
402 while(r >= config.AllocationSizes[allocSizeIndex].Probability)
403 r -= config.AllocationSizes[allocSizeIndex++].Probability;
404
405 const AllocationSize& allocSize = config.AllocationSizes[allocSizeIndex];
406 if(allocSize.BufferSizeMax > 0)
407 {
408 assert(allocSize.ImageSizeMax == 0);
409 if(allocSize.BufferSizeMax == allocSize.BufferSizeMin)
410 outBufSize = allocSize.BufferSizeMin;
411 else
412 {
413 outBufSize = allocSize.BufferSizeMin + localRand.Generate() % (allocSize.BufferSizeMax - allocSize.BufferSizeMin);
414 outBufSize = outBufSize / 16 * 16;
415 }
416 }
417 else
418 {
419 if(allocSize.ImageSizeMax == allocSize.ImageSizeMin)
420 outImageSize.width = outImageSize.height = allocSize.ImageSizeMax;
421 else
422 {
423 outImageSize.width = allocSize.ImageSizeMin + localRand.Generate() % (allocSize.ImageSizeMax - allocSize.ImageSizeMin);
424 outImageSize.height = allocSize.ImageSizeMin + localRand.Generate() % (allocSize.ImageSizeMax - allocSize.ImageSizeMin);
425 }
426 }
427 };
428
429 std::atomic<uint32_t> numThreadsReachedMaxAllocations = 0;
430 HANDLE threadsFinishEvent = CreateEvent(NULL, TRUE, FALSE, NULL);
431
432 auto ThreadProc = [&](uint32_t randSeed) -> void
433 {
434 RandomNumberGenerator threadRand(randSeed);
435 VkDeviceSize threadTotalAllocatedBytes = 0;
436 std::vector<Allocation> threadAllocations;
437 VkDeviceSize threadBeginBytesToAllocate = config.BeginBytesToAllocate / config.ThreadCount;
438 VkDeviceSize threadMaxBytesToAllocate = config.MaxBytesToAllocate / config.ThreadCount;
439 uint32_t threadAdditionalOperationCount = config.AdditionalOperationCount / config.ThreadCount;
440
441 // BEGIN ALLOCATIONS
442 for(;;)
443 {
444 VkDeviceSize bufferSize = 0;
445 VkExtent2D imageExtent = {};
446 GetNextAllocationSize(bufferSize, imageExtent, threadRand);
447 if(threadTotalAllocatedBytes + bufferSize + imageExtent.width * imageExtent.height * IMAGE_BYTES_PER_PIXEL <
448 threadBeginBytesToAllocate)
449 {
450 if(Allocate(bufferSize, imageExtent, threadRand, threadTotalAllocatedBytes, threadAllocations) != VK_SUCCESS)
451 break;
452 }
453 else
454 break;
455 }
456
457 // ADDITIONAL ALLOCATIONS AND FREES
458 for(size_t i = 0; i < threadAdditionalOperationCount; ++i)
459 {
460 VkDeviceSize bufferSize = 0;
461 VkExtent2D imageExtent = {};
462 GetNextAllocationSize(bufferSize, imageExtent, threadRand);
463
464 // true = allocate, false = free
465 bool allocate = threadRand.Generate() % 2 != 0;
466
467 if(allocate)
468 {
469 if(threadTotalAllocatedBytes +
470 bufferSize +
471 imageExtent.width * imageExtent.height * IMAGE_BYTES_PER_PIXEL <
472 threadMaxBytesToAllocate)
473 {
474 if(Allocate(bufferSize, imageExtent, threadRand, threadTotalAllocatedBytes, threadAllocations) != VK_SUCCESS)
475 break;
476 }
477 }
478 else
479 {
480 bool useCommonAllocations = threadRand.Generate() % 100 < config.ThreadsUsingCommonAllocationsProbabilityPercent;
481 if(useCommonAllocations)
482 {
483 std::unique_lock<std::mutex> lock(commonAllocationsMutex);
484 if(!commonAllocations.empty())
485 {
486 size_t indexToFree = threadRand.Generate() % commonAllocations.size();
487 VmaAllocationInfo allocationInfo;
488 vmaGetAllocationInfo(g_hAllocator, commonAllocations[indexToFree].Alloc, &allocationInfo);
489 if(threadTotalAllocatedBytes >= allocationInfo.size)
490 {
491 DeallocationTimeRegisterObj timeRegisterObj{outResult};
492 if(commonAllocations[indexToFree].Buffer != VK_NULL_HANDLE)
493 vmaDestroyBuffer(g_hAllocator, commonAllocations[indexToFree].Buffer, commonAllocations[indexToFree].Alloc);
494 else
495 vmaDestroyImage(g_hAllocator, commonAllocations[indexToFree].Image, commonAllocations[indexToFree].Alloc);
496 threadTotalAllocatedBytes -= allocationInfo.size;
497 commonAllocations.erase(commonAllocations.begin() + indexToFree);
498 }
499 }
500 }
501 else
502 {
503 if(!threadAllocations.empty())
504 {
505 size_t indexToFree = threadRand.Generate() % threadAllocations.size();
506 VmaAllocationInfo allocationInfo;
507 vmaGetAllocationInfo(g_hAllocator, threadAllocations[indexToFree].Alloc, &allocationInfo);
508 if(threadTotalAllocatedBytes >= allocationInfo.size)
509 {
510 DeallocationTimeRegisterObj timeRegisterObj{outResult};
511 if(threadAllocations[indexToFree].Buffer != VK_NULL_HANDLE)
512 vmaDestroyBuffer(g_hAllocator, threadAllocations[indexToFree].Buffer, threadAllocations[indexToFree].Alloc);
513 else
514 vmaDestroyImage(g_hAllocator, threadAllocations[indexToFree].Image, threadAllocations[indexToFree].Alloc);
515 threadTotalAllocatedBytes -= allocationInfo.size;
516 threadAllocations.erase(threadAllocations.begin() + indexToFree);
517 }
518 }
519 }
520 }
521 }
522
523 ++numThreadsReachedMaxAllocations;
524
525 WaitForSingleObject(threadsFinishEvent, INFINITE);
526
527 // DEALLOCATION
528 while(!threadAllocations.empty())
529 {
530 size_t indexToFree = 0;
531 switch(config.FreeOrder)
532 {
533 case FREE_ORDER::FORWARD:
534 indexToFree = 0;
535 break;
536 case FREE_ORDER::BACKWARD:
537 indexToFree = threadAllocations.size() - 1;
538 break;
539 case FREE_ORDER::RANDOM:
540 indexToFree = mainRand.Generate() % threadAllocations.size();
541 break;
542 }
543
544 {
545 DeallocationTimeRegisterObj timeRegisterObj{outResult};
546 if(threadAllocations[indexToFree].Buffer != VK_NULL_HANDLE)
547 vmaDestroyBuffer(g_hAllocator, threadAllocations[indexToFree].Buffer, threadAllocations[indexToFree].Alloc);
548 else
549 vmaDestroyImage(g_hAllocator, threadAllocations[indexToFree].Image, threadAllocations[indexToFree].Alloc);
550 }
551 threadAllocations.erase(threadAllocations.begin() + indexToFree);
552 }
553 };
554
555 uint32_t threadRandSeed = mainRand.Generate();
556 std::vector<std::thread> bkgThreads;
557 for(size_t i = 0; i < config.ThreadCount; ++i)
558 {
559 bkgThreads.emplace_back(std::bind(ThreadProc, threadRandSeed + (uint32_t)i));
560 }
561
562 // Wait for threads reached max allocations
563 while(numThreadsReachedMaxAllocations < config.ThreadCount)
564 Sleep(0);
565
566 // CALCULATE MEMORY STATISTICS ON FINAL USAGE
567 VmaStats vmaStats = {};
568 vmaCalculateStats(g_hAllocator, &vmaStats);
569 outResult.TotalMemoryAllocated = vmaStats.total.usedBytes + vmaStats.total.unusedBytes;
570 outResult.FreeRangeSizeMax = vmaStats.total.unusedRangeSizeMax;
571 outResult.FreeRangeSizeAvg = vmaStats.total.unusedRangeSizeAvg;
572
573 // Signal threads to deallocate
574 SetEvent(threadsFinishEvent);
575
576 // Wait for threads finished
577 for(size_t i = 0; i < bkgThreads.size(); ++i)
578 bkgThreads[i].join();
579 bkgThreads.clear();
580
581 CloseHandle(threadsFinishEvent);
582
583 // Deallocate remaining common resources
584 while(!commonAllocations.empty())
585 {
586 size_t indexToFree = 0;
587 switch(config.FreeOrder)
588 {
589 case FREE_ORDER::FORWARD:
590 indexToFree = 0;
591 break;
592 case FREE_ORDER::BACKWARD:
593 indexToFree = commonAllocations.size() - 1;
594 break;
595 case FREE_ORDER::RANDOM:
596 indexToFree = mainRand.Generate() % commonAllocations.size();
597 break;
598 }
599
600 {
601 DeallocationTimeRegisterObj timeRegisterObj{outResult};
602 if(commonAllocations[indexToFree].Buffer != VK_NULL_HANDLE)
603 vmaDestroyBuffer(g_hAllocator, commonAllocations[indexToFree].Buffer, commonAllocations[indexToFree].Alloc);
604 else
605 vmaDestroyImage(g_hAllocator, commonAllocations[indexToFree].Image, commonAllocations[indexToFree].Alloc);
606 }
607 commonAllocations.erase(commonAllocations.begin() + indexToFree);
608 }
609
610 if(allocationCount)
611 {
612 outResult.AllocationTimeAvg /= allocationCount;
613 outResult.DeallocationTimeAvg /= allocationCount;
614 }
615
616 outResult.TotalTime = std::chrono::high_resolution_clock::now() - timeBeg;
617
618 return res;
619}
620
Adam Sawickie44c6262018-06-15 14:30:39 +0200621static void SaveAllocatorStatsToFile(const wchar_t* filePath)
Adam Sawickib8333fb2018-03-13 16:15:53 +0100622{
623 char* stats;
Adam Sawickie44c6262018-06-15 14:30:39 +0200624 vmaBuildStatsString(g_hAllocator, &stats, VK_TRUE);
Adam Sawickib8333fb2018-03-13 16:15:53 +0100625 SaveFile(filePath, stats, strlen(stats));
Adam Sawickie44c6262018-06-15 14:30:39 +0200626 vmaFreeStatsString(g_hAllocator, stats);
Adam Sawickib8333fb2018-03-13 16:15:53 +0100627}
628
629struct AllocInfo
630{
631 VmaAllocation m_Allocation;
632 VkBuffer m_Buffer;
633 VkImage m_Image;
634 uint32_t m_StartValue;
635 union
636 {
637 VkBufferCreateInfo m_BufferInfo;
638 VkImageCreateInfo m_ImageInfo;
639 };
640};
641
642static void GetMemReq(VmaAllocationCreateInfo& outMemReq)
643{
644 outMemReq = {};
645 outMemReq.usage = VMA_MEMORY_USAGE_CPU_TO_GPU;
646 //outMemReq.flags = VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT;
647}
648
649static void CreateBuffer(
650 VmaPool pool,
651 const VkBufferCreateInfo& bufCreateInfo,
652 bool persistentlyMapped,
653 AllocInfo& outAllocInfo)
654{
655 outAllocInfo = {};
656 outAllocInfo.m_BufferInfo = bufCreateInfo;
657
658 VmaAllocationCreateInfo allocCreateInfo = {};
659 allocCreateInfo.pool = pool;
660 if(persistentlyMapped)
661 allocCreateInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT;
662
663 VmaAllocationInfo vmaAllocInfo = {};
664 ERR_GUARD_VULKAN( vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &outAllocInfo.m_Buffer, &outAllocInfo.m_Allocation, &vmaAllocInfo) );
665
666 // Setup StartValue and fill.
667 {
668 outAllocInfo.m_StartValue = (uint32_t)rand();
669 uint32_t* data = (uint32_t*)vmaAllocInfo.pMappedData;
670 assert((data != nullptr) == persistentlyMapped);
671 if(!persistentlyMapped)
672 {
673 ERR_GUARD_VULKAN( vmaMapMemory(g_hAllocator, outAllocInfo.m_Allocation, (void**)&data) );
674 }
675
676 uint32_t value = outAllocInfo.m_StartValue;
677 assert(bufCreateInfo.size % 4 == 0);
678 for(size_t i = 0; i < bufCreateInfo.size / sizeof(uint32_t); ++i)
679 data[i] = value++;
680
681 if(!persistentlyMapped)
682 vmaUnmapMemory(g_hAllocator, outAllocInfo.m_Allocation);
683 }
684}
685
686static void CreateAllocation(AllocInfo& outAllocation, VmaAllocator allocator)
687{
688 outAllocation.m_Allocation = nullptr;
689 outAllocation.m_Buffer = nullptr;
690 outAllocation.m_Image = nullptr;
691 outAllocation.m_StartValue = (uint32_t)rand();
692
693 VmaAllocationCreateInfo vmaMemReq;
694 GetMemReq(vmaMemReq);
695
696 VmaAllocationInfo allocInfo;
697
698 const bool isBuffer = true;//(rand() & 0x1) != 0;
699 const bool isLarge = (rand() % 16) == 0;
700 if(isBuffer)
701 {
702 const uint32_t bufferSize = isLarge ?
703 (rand() % 10 + 1) * (1024 * 1024) : // 1 MB ... 10 MB
704 (rand() % 1024 + 1) * 1024; // 1 KB ... 1 MB
705
706 VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
707 bufferInfo.size = bufferSize;
708 bufferInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
709
710 VkResult res = vmaCreateBuffer(allocator, &bufferInfo, &vmaMemReq, &outAllocation.m_Buffer, &outAllocation.m_Allocation, &allocInfo);
711 outAllocation.m_BufferInfo = bufferInfo;
712 assert(res == VK_SUCCESS);
713 }
714 else
715 {
716 const uint32_t imageSizeX = isLarge ?
717 1024 + rand() % (4096 - 1024) : // 1024 ... 4096
718 rand() % 1024 + 1; // 1 ... 1024
719 const uint32_t imageSizeY = isLarge ?
720 1024 + rand() % (4096 - 1024) : // 1024 ... 4096
721 rand() % 1024 + 1; // 1 ... 1024
722
723 VkImageCreateInfo imageInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
724 imageInfo.imageType = VK_IMAGE_TYPE_2D;
725 imageInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
726 imageInfo.extent.width = imageSizeX;
727 imageInfo.extent.height = imageSizeY;
728 imageInfo.extent.depth = 1;
729 imageInfo.mipLevels = 1;
730 imageInfo.arrayLayers = 1;
731 imageInfo.samples = VK_SAMPLE_COUNT_1_BIT;
732 imageInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
733 imageInfo.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED;
734 imageInfo.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
735
736 VkResult res = vmaCreateImage(allocator, &imageInfo, &vmaMemReq, &outAllocation.m_Image, &outAllocation.m_Allocation, &allocInfo);
737 outAllocation.m_ImageInfo = imageInfo;
738 assert(res == VK_SUCCESS);
739 }
740
741 uint32_t* data = (uint32_t*)allocInfo.pMappedData;
742 if(allocInfo.pMappedData == nullptr)
743 {
744 VkResult res = vmaMapMemory(allocator, outAllocation.m_Allocation, (void**)&data);
745 assert(res == VK_SUCCESS);
746 }
747
748 uint32_t value = outAllocation.m_StartValue;
749 assert(allocInfo.size % 4 == 0);
750 for(size_t i = 0; i < allocInfo.size / sizeof(uint32_t); ++i)
751 data[i] = value++;
752
753 if(allocInfo.pMappedData == nullptr)
754 vmaUnmapMemory(allocator, outAllocation.m_Allocation);
755}
756
757static void DestroyAllocation(const AllocInfo& allocation)
758{
759 if(allocation.m_Buffer)
760 vmaDestroyBuffer(g_hAllocator, allocation.m_Buffer, allocation.m_Allocation);
761 else
762 vmaDestroyImage(g_hAllocator, allocation.m_Image, allocation.m_Allocation);
763}
764
765static void DestroyAllAllocations(std::vector<AllocInfo>& allocations)
766{
767 for(size_t i = allocations.size(); i--; )
768 DestroyAllocation(allocations[i]);
769 allocations.clear();
770}
771
772static void ValidateAllocationData(const AllocInfo& allocation)
773{
774 VmaAllocationInfo allocInfo;
775 vmaGetAllocationInfo(g_hAllocator, allocation.m_Allocation, &allocInfo);
776
777 uint32_t* data = (uint32_t*)allocInfo.pMappedData;
778 if(allocInfo.pMappedData == nullptr)
779 {
780 VkResult res = vmaMapMemory(g_hAllocator, allocation.m_Allocation, (void**)&data);
781 assert(res == VK_SUCCESS);
782 }
783
784 uint32_t value = allocation.m_StartValue;
785 bool ok = true;
786 size_t i;
787 assert(allocInfo.size % 4 == 0);
788 for(i = 0; i < allocInfo.size / sizeof(uint32_t); ++i)
789 {
790 if(data[i] != value++)
791 {
792 ok = false;
793 break;
794 }
795 }
796 assert(ok);
797
798 if(allocInfo.pMappedData == nullptr)
799 vmaUnmapMemory(g_hAllocator, allocation.m_Allocation);
800}
801
802static void RecreateAllocationResource(AllocInfo& allocation)
803{
804 VmaAllocationInfo allocInfo;
805 vmaGetAllocationInfo(g_hAllocator, allocation.m_Allocation, &allocInfo);
806
807 if(allocation.m_Buffer)
808 {
809 vkDestroyBuffer(g_hDevice, allocation.m_Buffer, nullptr);
810
811 VkResult res = vkCreateBuffer(g_hDevice, &allocation.m_BufferInfo, nullptr, &allocation.m_Buffer);
812 assert(res == VK_SUCCESS);
813
814 // Just to silence validation layer warnings.
815 VkMemoryRequirements vkMemReq;
816 vkGetBufferMemoryRequirements(g_hDevice, allocation.m_Buffer, &vkMemReq);
817 assert(vkMemReq.size == allocation.m_BufferInfo.size);
818
819 res = vkBindBufferMemory(g_hDevice, allocation.m_Buffer, allocInfo.deviceMemory, allocInfo.offset);
820 assert(res == VK_SUCCESS);
821 }
822 else
823 {
824 vkDestroyImage(g_hDevice, allocation.m_Image, nullptr);
825
826 VkResult res = vkCreateImage(g_hDevice, &allocation.m_ImageInfo, nullptr, &allocation.m_Image);
827 assert(res == VK_SUCCESS);
828
829 // Just to silence validation layer warnings.
830 VkMemoryRequirements vkMemReq;
831 vkGetImageMemoryRequirements(g_hDevice, allocation.m_Image, &vkMemReq);
832
833 res = vkBindImageMemory(g_hDevice, allocation.m_Image, allocInfo.deviceMemory, allocInfo.offset);
834 assert(res == VK_SUCCESS);
835 }
836}
837
838static void Defragment(AllocInfo* allocs, size_t allocCount,
839 const VmaDefragmentationInfo* defragmentationInfo = nullptr,
840 VmaDefragmentationStats* defragmentationStats = nullptr)
841{
842 std::vector<VmaAllocation> vmaAllocs(allocCount);
843 for(size_t i = 0; i < allocCount; ++i)
844 vmaAllocs[i] = allocs[i].m_Allocation;
845
846 std::vector<VkBool32> allocChanged(allocCount);
847
848 ERR_GUARD_VULKAN( vmaDefragment(g_hAllocator, vmaAllocs.data(), allocCount, allocChanged.data(),
849 defragmentationInfo, defragmentationStats) );
850
851 for(size_t i = 0; i < allocCount; ++i)
852 {
853 if(allocChanged[i])
854 {
855 RecreateAllocationResource(allocs[i]);
856 }
857 }
858}
859
860static void ValidateAllocationsData(const AllocInfo* allocs, size_t allocCount)
861{
862 std::for_each(allocs, allocs + allocCount, [](const AllocInfo& allocInfo) {
863 ValidateAllocationData(allocInfo);
864 });
865}
866
867void TestDefragmentationSimple()
868{
869 wprintf(L"Test defragmentation simple\n");
870
871 RandomNumberGenerator rand(667);
872
873 const VkDeviceSize BUF_SIZE = 0x10000;
874 const VkDeviceSize BLOCK_SIZE = BUF_SIZE * 8;
875
876 const VkDeviceSize MIN_BUF_SIZE = 32;
877 const VkDeviceSize MAX_BUF_SIZE = BUF_SIZE * 4;
878 auto RandomBufSize = [&]() -> VkDeviceSize {
879 return align_up<VkDeviceSize>(rand.Generate() % (MAX_BUF_SIZE - MIN_BUF_SIZE + 1) + MIN_BUF_SIZE, 32);
880 };
881
882 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
883 bufCreateInfo.size = BUF_SIZE;
884 bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
885
886 VmaAllocationCreateInfo exampleAllocCreateInfo = {};
887 exampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
888
889 uint32_t memTypeIndex = UINT32_MAX;
890 vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &bufCreateInfo, &exampleAllocCreateInfo, &memTypeIndex);
891
892 VmaPoolCreateInfo poolCreateInfo = {};
893 poolCreateInfo.blockSize = BLOCK_SIZE;
894 poolCreateInfo.memoryTypeIndex = memTypeIndex;
895
896 VmaPool pool;
897 ERR_GUARD_VULKAN( vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool) );
898
899 std::vector<AllocInfo> allocations;
900
901 // persistentlyMappedOption = 0 - not persistently mapped.
902 // persistentlyMappedOption = 1 - persistently mapped.
903 for(uint32_t persistentlyMappedOption = 0; persistentlyMappedOption < 2; ++persistentlyMappedOption)
904 {
905 wprintf(L" Persistently mapped option = %u\n", persistentlyMappedOption);
906 const bool persistentlyMapped = persistentlyMappedOption != 0;
907
908 // # Test 1
909 // Buffers of fixed size.
910 // Fill 2 blocks. Remove odd buffers. Defragment everything.
911 // Expected result: at least 1 block freed.
912 {
913 for(size_t i = 0; i < BLOCK_SIZE / BUF_SIZE * 2; ++i)
914 {
915 AllocInfo allocInfo;
916 CreateBuffer(pool, bufCreateInfo, persistentlyMapped, allocInfo);
917 allocations.push_back(allocInfo);
918 }
919
920 for(size_t i = 1; i < allocations.size(); ++i)
921 {
922 DestroyAllocation(allocations[i]);
923 allocations.erase(allocations.begin() + i);
924 }
925
926 VmaDefragmentationStats defragStats;
927 Defragment(allocations.data(), allocations.size(), nullptr, &defragStats);
928 assert(defragStats.allocationsMoved > 0 && defragStats.bytesMoved > 0);
929 assert(defragStats.deviceMemoryBlocksFreed >= 1);
930
931 ValidateAllocationsData(allocations.data(), allocations.size());
932
933 DestroyAllAllocations(allocations);
934 }
935
936 // # Test 2
937 // Buffers of fixed size.
938 // Fill 2 blocks. Remove odd buffers. Defragment one buffer at time.
939 // Expected result: Each of 4 interations makes some progress.
940 {
941 for(size_t i = 0; i < BLOCK_SIZE / BUF_SIZE * 2; ++i)
942 {
943 AllocInfo allocInfo;
944 CreateBuffer(pool, bufCreateInfo, persistentlyMapped, allocInfo);
945 allocations.push_back(allocInfo);
946 }
947
948 for(size_t i = 1; i < allocations.size(); ++i)
949 {
950 DestroyAllocation(allocations[i]);
951 allocations.erase(allocations.begin() + i);
952 }
953
954 VmaDefragmentationInfo defragInfo = {};
955 defragInfo.maxAllocationsToMove = 1;
956 defragInfo.maxBytesToMove = BUF_SIZE;
957
958 for(size_t i = 0; i < BLOCK_SIZE / BUF_SIZE / 2; ++i)
959 {
960 VmaDefragmentationStats defragStats;
961 Defragment(allocations.data(), allocations.size(), &defragInfo, &defragStats);
962 assert(defragStats.allocationsMoved > 0 && defragStats.bytesMoved > 0);
963 }
964
965 ValidateAllocationsData(allocations.data(), allocations.size());
966
967 DestroyAllAllocations(allocations);
968 }
969
970 // # Test 3
971 // Buffers of variable size.
972 // Create a number of buffers. Remove some percent of them.
973 // Defragment while having some percent of them unmovable.
974 // Expected result: Just simple validation.
975 {
976 for(size_t i = 0; i < 100; ++i)
977 {
978 VkBufferCreateInfo localBufCreateInfo = bufCreateInfo;
979 localBufCreateInfo.size = RandomBufSize();
980
981 AllocInfo allocInfo;
982 CreateBuffer(pool, bufCreateInfo, persistentlyMapped, allocInfo);
983 allocations.push_back(allocInfo);
984 }
985
986 const uint32_t percentToDelete = 60;
987 const size_t numberToDelete = allocations.size() * percentToDelete / 100;
988 for(size_t i = 0; i < numberToDelete; ++i)
989 {
990 size_t indexToDelete = rand.Generate() % (uint32_t)allocations.size();
991 DestroyAllocation(allocations[indexToDelete]);
992 allocations.erase(allocations.begin() + indexToDelete);
993 }
994
995 // Non-movable allocations will be at the beginning of allocations array.
996 const uint32_t percentNonMovable = 20;
997 const size_t numberNonMovable = allocations.size() * percentNonMovable / 100;
998 for(size_t i = 0; i < numberNonMovable; ++i)
999 {
1000 size_t indexNonMovable = i + rand.Generate() % (uint32_t)(allocations.size() - i);
1001 if(indexNonMovable != i)
1002 std::swap(allocations[i], allocations[indexNonMovable]);
1003 }
1004
1005 VmaDefragmentationStats defragStats;
1006 Defragment(
1007 allocations.data() + numberNonMovable,
1008 allocations.size() - numberNonMovable,
1009 nullptr, &defragStats);
1010
1011 ValidateAllocationsData(allocations.data(), allocations.size());
1012
1013 DestroyAllAllocations(allocations);
1014 }
1015 }
1016
1017 vmaDestroyPool(g_hAllocator, pool);
1018}
1019
1020void TestDefragmentationFull()
1021{
1022 std::vector<AllocInfo> allocations;
1023
1024 // Create initial allocations.
1025 for(size_t i = 0; i < 400; ++i)
1026 {
1027 AllocInfo allocation;
1028 CreateAllocation(allocation, g_hAllocator);
1029 allocations.push_back(allocation);
1030 }
1031
1032 // Delete random allocations
1033 const size_t allocationsToDeletePercent = 80;
1034 size_t allocationsToDelete = allocations.size() * allocationsToDeletePercent / 100;
1035 for(size_t i = 0; i < allocationsToDelete; ++i)
1036 {
1037 size_t index = (size_t)rand() % allocations.size();
1038 DestroyAllocation(allocations[index]);
1039 allocations.erase(allocations.begin() + index);
1040 }
1041
1042 for(size_t i = 0; i < allocations.size(); ++i)
1043 ValidateAllocationData(allocations[i]);
1044
Adam Sawicki0667e332018-08-24 17:26:44 +02001045 //SaveAllocatorStatsToFile(L"Before.csv");
Adam Sawickib8333fb2018-03-13 16:15:53 +01001046
1047 {
1048 std::vector<VmaAllocation> vmaAllocations(allocations.size());
1049 for(size_t i = 0; i < allocations.size(); ++i)
1050 vmaAllocations[i] = allocations[i].m_Allocation;
1051
1052 const size_t nonMovablePercent = 0;
1053 size_t nonMovableCount = vmaAllocations.size() * nonMovablePercent / 100;
1054 for(size_t i = 0; i < nonMovableCount; ++i)
1055 {
1056 size_t index = (size_t)rand() % vmaAllocations.size();
1057 vmaAllocations.erase(vmaAllocations.begin() + index);
1058 }
1059
1060 const uint32_t defragCount = 1;
1061 for(uint32_t defragIndex = 0; defragIndex < defragCount; ++defragIndex)
1062 {
1063 std::vector<VkBool32> allocationsChanged(vmaAllocations.size());
1064
1065 VmaDefragmentationInfo defragmentationInfo;
1066 defragmentationInfo.maxAllocationsToMove = UINT_MAX;
1067 defragmentationInfo.maxBytesToMove = SIZE_MAX;
1068
1069 wprintf(L"Defragmentation #%u\n", defragIndex);
1070
1071 time_point begTime = std::chrono::high_resolution_clock::now();
1072
1073 VmaDefragmentationStats stats;
1074 VkResult res = vmaDefragment(g_hAllocator, vmaAllocations.data(), vmaAllocations.size(), allocationsChanged.data(), &defragmentationInfo, &stats);
1075 assert(res >= 0);
1076
1077 float defragmentDuration = ToFloatSeconds(std::chrono::high_resolution_clock::now() - begTime);
1078
1079 wprintf(L"Moved allocations %u, bytes %llu\n", stats.allocationsMoved, stats.bytesMoved);
1080 wprintf(L"Freed blocks %u, bytes %llu\n", stats.deviceMemoryBlocksFreed, stats.bytesFreed);
1081 wprintf(L"Time: %.2f s\n", defragmentDuration);
1082
1083 for(size_t i = 0; i < vmaAllocations.size(); ++i)
1084 {
1085 if(allocationsChanged[i])
1086 {
1087 RecreateAllocationResource(allocations[i]);
1088 }
1089 }
1090
1091 for(size_t i = 0; i < allocations.size(); ++i)
1092 ValidateAllocationData(allocations[i]);
1093
Adam Sawicki0667e332018-08-24 17:26:44 +02001094 //wchar_t fileName[MAX_PATH];
1095 //swprintf(fileName, MAX_PATH, L"After_%02u.csv", defragIndex);
1096 //SaveAllocatorStatsToFile(fileName);
Adam Sawickib8333fb2018-03-13 16:15:53 +01001097 }
1098 }
1099
1100 // Destroy all remaining allocations.
1101 DestroyAllAllocations(allocations);
1102}
1103
1104static void TestUserData()
1105{
1106 VkResult res;
1107
1108 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
1109 bufCreateInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
1110 bufCreateInfo.size = 0x10000;
1111
1112 for(uint32_t testIndex = 0; testIndex < 2; ++testIndex)
1113 {
1114 // Opaque pointer
1115 {
1116
1117 void* numberAsPointer = (void*)(size_t)0xC2501FF3u;
1118 void* pointerToSomething = &res;
1119
1120 VmaAllocationCreateInfo allocCreateInfo = {};
1121 allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
1122 allocCreateInfo.pUserData = numberAsPointer;
1123 if(testIndex == 1)
1124 allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
1125
1126 VkBuffer buf; VmaAllocation alloc; VmaAllocationInfo allocInfo;
1127 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
1128 assert(res == VK_SUCCESS);
1129 assert(allocInfo.pUserData = numberAsPointer);
1130
1131 vmaGetAllocationInfo(g_hAllocator, alloc, &allocInfo);
1132 assert(allocInfo.pUserData == numberAsPointer);
1133
1134 vmaSetAllocationUserData(g_hAllocator, alloc, pointerToSomething);
1135 vmaGetAllocationInfo(g_hAllocator, alloc, &allocInfo);
1136 assert(allocInfo.pUserData == pointerToSomething);
1137
1138 vmaDestroyBuffer(g_hAllocator, buf, alloc);
1139 }
1140
1141 // String
1142 {
1143 const char* name1 = "Buffer name \\\"\'<>&% \nSecond line .,;=";
1144 const char* name2 = "2";
1145 const size_t name1Len = strlen(name1);
1146
1147 char* name1Buf = new char[name1Len + 1];
1148 strcpy_s(name1Buf, name1Len + 1, name1);
1149
1150 VmaAllocationCreateInfo allocCreateInfo = {};
1151 allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
1152 allocCreateInfo.flags = VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT;
1153 allocCreateInfo.pUserData = name1Buf;
1154 if(testIndex == 1)
1155 allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
1156
1157 VkBuffer buf; VmaAllocation alloc; VmaAllocationInfo allocInfo;
1158 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
1159 assert(res == VK_SUCCESS);
1160 assert(allocInfo.pUserData != nullptr && allocInfo.pUserData != name1Buf);
1161 assert(strcmp(name1, (const char*)allocInfo.pUserData) == 0);
1162
1163 delete[] name1Buf;
1164
1165 vmaGetAllocationInfo(g_hAllocator, alloc, &allocInfo);
1166 assert(strcmp(name1, (const char*)allocInfo.pUserData) == 0);
1167
1168 vmaSetAllocationUserData(g_hAllocator, alloc, (void*)name2);
1169 vmaGetAllocationInfo(g_hAllocator, alloc, &allocInfo);
1170 assert(strcmp(name2, (const char*)allocInfo.pUserData) == 0);
1171
1172 vmaSetAllocationUserData(g_hAllocator, alloc, nullptr);
1173 vmaGetAllocationInfo(g_hAllocator, alloc, &allocInfo);
1174 assert(allocInfo.pUserData == nullptr);
1175
1176 vmaDestroyBuffer(g_hAllocator, buf, alloc);
1177 }
1178 }
1179}
1180
1181static void TestMemoryRequirements()
1182{
1183 VkResult res;
1184 VkBuffer buf;
1185 VmaAllocation alloc;
1186 VmaAllocationInfo allocInfo;
1187
1188 const VkPhysicalDeviceMemoryProperties* memProps;
1189 vmaGetMemoryProperties(g_hAllocator, &memProps);
1190
1191 VkBufferCreateInfo bufInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
1192 bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
1193 bufInfo.size = 128;
1194
1195 VmaAllocationCreateInfo allocCreateInfo = {};
1196
1197 // No requirements.
1198 res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
1199 assert(res == VK_SUCCESS);
1200 vmaDestroyBuffer(g_hAllocator, buf, alloc);
1201
1202 // Usage.
1203 allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
1204 allocCreateInfo.requiredFlags = 0;
1205 allocCreateInfo.preferredFlags = 0;
1206 allocCreateInfo.memoryTypeBits = UINT32_MAX;
1207
1208 res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
1209 assert(res == VK_SUCCESS);
1210 assert(memProps->memoryTypes[allocInfo.memoryType].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
1211 vmaDestroyBuffer(g_hAllocator, buf, alloc);
1212
1213 // Required flags, preferred flags.
1214 allocCreateInfo.usage = VMA_MEMORY_USAGE_UNKNOWN;
1215 allocCreateInfo.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
1216 allocCreateInfo.preferredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
1217 allocCreateInfo.memoryTypeBits = 0;
1218
1219 res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
1220 assert(res == VK_SUCCESS);
1221 assert(memProps->memoryTypes[allocInfo.memoryType].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
1222 assert(memProps->memoryTypes[allocInfo.memoryType].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
1223 vmaDestroyBuffer(g_hAllocator, buf, alloc);
1224
1225 // memoryTypeBits.
1226 const uint32_t memType = allocInfo.memoryType;
1227 allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
1228 allocCreateInfo.requiredFlags = 0;
1229 allocCreateInfo.preferredFlags = 0;
1230 allocCreateInfo.memoryTypeBits = 1u << memType;
1231
1232 res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
1233 assert(res == VK_SUCCESS);
1234 assert(allocInfo.memoryType == memType);
1235 vmaDestroyBuffer(g_hAllocator, buf, alloc);
1236
1237}
1238
1239static void TestBasics()
1240{
1241 VkResult res;
1242
1243 TestMemoryRequirements();
1244
1245 // Lost allocation
1246 {
1247 VmaAllocation alloc = VK_NULL_HANDLE;
1248 vmaCreateLostAllocation(g_hAllocator, &alloc);
1249 assert(alloc != VK_NULL_HANDLE);
1250
1251 VmaAllocationInfo allocInfo;
1252 vmaGetAllocationInfo(g_hAllocator, alloc, &allocInfo);
1253 assert(allocInfo.deviceMemory == VK_NULL_HANDLE);
1254 assert(allocInfo.size == 0);
1255
1256 vmaFreeMemory(g_hAllocator, alloc);
1257 }
1258
1259 // Allocation that is MAPPED and not necessarily HOST_VISIBLE.
1260 {
1261 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
1262 bufCreateInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
1263 bufCreateInfo.size = 128;
1264
1265 VmaAllocationCreateInfo allocCreateInfo = {};
1266 allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
1267 allocCreateInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT;
1268
1269 VkBuffer buf; VmaAllocation alloc; VmaAllocationInfo allocInfo;
1270 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
1271 assert(res == VK_SUCCESS);
1272
1273 vmaDestroyBuffer(g_hAllocator, buf, alloc);
1274
1275 // Same with OWN_MEMORY.
1276 allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
1277
1278 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
1279 assert(res == VK_SUCCESS);
1280
1281 vmaDestroyBuffer(g_hAllocator, buf, alloc);
1282 }
1283
1284 TestUserData();
1285}
1286
1287void TestHeapSizeLimit()
1288{
1289 const VkDeviceSize HEAP_SIZE_LIMIT = 1ull * 1024 * 1024 * 1024; // 1 GB
1290 const VkDeviceSize BLOCK_SIZE = 128ull * 1024 * 1024; // 128 MB
1291
1292 VkDeviceSize heapSizeLimit[VK_MAX_MEMORY_HEAPS];
1293 for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
1294 {
1295 heapSizeLimit[i] = HEAP_SIZE_LIMIT;
1296 }
1297
1298 VmaAllocatorCreateInfo allocatorCreateInfo = {};
1299 allocatorCreateInfo.physicalDevice = g_hPhysicalDevice;
1300 allocatorCreateInfo.device = g_hDevice;
1301 allocatorCreateInfo.pHeapSizeLimit = heapSizeLimit;
1302
1303 VmaAllocator hAllocator;
1304 VkResult res = vmaCreateAllocator(&allocatorCreateInfo, &hAllocator);
1305 assert(res == VK_SUCCESS);
1306
1307 struct Item
1308 {
1309 VkBuffer hBuf;
1310 VmaAllocation hAlloc;
1311 };
1312 std::vector<Item> items;
1313
1314 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
1315 bufCreateInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
1316
1317 // 1. Allocate two blocks of Own Memory, half the size of BLOCK_SIZE.
1318 VmaAllocationInfo ownAllocInfo;
1319 {
1320 VmaAllocationCreateInfo allocCreateInfo = {};
1321 allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
1322 allocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
1323
1324 bufCreateInfo.size = BLOCK_SIZE / 2;
1325
1326 for(size_t i = 0; i < 2; ++i)
1327 {
1328 Item item;
1329 res = vmaCreateBuffer(hAllocator, &bufCreateInfo, &allocCreateInfo, &item.hBuf, &item.hAlloc, &ownAllocInfo);
1330 assert(res == VK_SUCCESS);
1331 items.push_back(item);
1332 }
1333 }
1334
1335 // Create pool to make sure allocations must be out of this memory type.
1336 VmaPoolCreateInfo poolCreateInfo = {};
1337 poolCreateInfo.memoryTypeIndex = ownAllocInfo.memoryType;
1338 poolCreateInfo.blockSize = BLOCK_SIZE;
1339
1340 VmaPool hPool;
1341 res = vmaCreatePool(hAllocator, &poolCreateInfo, &hPool);
1342 assert(res == VK_SUCCESS);
1343
1344 // 2. Allocate normal buffers from all the remaining memory.
1345 {
1346 VmaAllocationCreateInfo allocCreateInfo = {};
1347 allocCreateInfo.pool = hPool;
1348
1349 bufCreateInfo.size = BLOCK_SIZE / 2;
1350
1351 const size_t bufCount = ((HEAP_SIZE_LIMIT / BLOCK_SIZE) - 1) * 2;
1352 for(size_t i = 0; i < bufCount; ++i)
1353 {
1354 Item item;
1355 res = vmaCreateBuffer(hAllocator, &bufCreateInfo, &allocCreateInfo, &item.hBuf, &item.hAlloc, nullptr);
1356 assert(res == VK_SUCCESS);
1357 items.push_back(item);
1358 }
1359 }
1360
1361 // 3. Allocation of one more (even small) buffer should fail.
1362 {
1363 VmaAllocationCreateInfo allocCreateInfo = {};
1364 allocCreateInfo.pool = hPool;
1365
1366 bufCreateInfo.size = 128;
1367
1368 VkBuffer hBuf;
1369 VmaAllocation hAlloc;
1370 res = vmaCreateBuffer(hAllocator, &bufCreateInfo, &allocCreateInfo, &hBuf, &hAlloc, nullptr);
1371 assert(res == VK_ERROR_OUT_OF_DEVICE_MEMORY);
1372 }
1373
1374 // Destroy everything.
1375 for(size_t i = items.size(); i--; )
1376 {
1377 vmaDestroyBuffer(hAllocator, items[i].hBuf, items[i].hAlloc);
1378 }
1379
1380 vmaDestroyPool(hAllocator, hPool);
1381
1382 vmaDestroyAllocator(hAllocator);
1383}
1384
Adam Sawicki212a4a62018-06-14 15:44:45 +02001385#if VMA_DEBUG_MARGIN
Adam Sawicki73b16652018-06-11 16:39:25 +02001386static void TestDebugMargin()
1387{
1388 if(VMA_DEBUG_MARGIN == 0)
1389 {
1390 return;
1391 }
1392
1393 VkBufferCreateInfo bufInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
Adam Sawicki212a4a62018-06-14 15:44:45 +02001394 bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
Adam Sawicki73b16652018-06-11 16:39:25 +02001395
1396 VmaAllocationCreateInfo allocCreateInfo = {};
Adam Sawicki212a4a62018-06-14 15:44:45 +02001397 allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
Adam Sawicki73b16652018-06-11 16:39:25 +02001398
1399 // Create few buffers of different size.
1400 const size_t BUF_COUNT = 10;
1401 BufferInfo buffers[BUF_COUNT];
1402 VmaAllocationInfo allocInfo[BUF_COUNT];
1403 for(size_t i = 0; i < 10; ++i)
1404 {
1405 bufInfo.size = (VkDeviceSize)(i + 1) * 64;
Adam Sawicki212a4a62018-06-14 15:44:45 +02001406 // Last one will be mapped.
1407 allocCreateInfo.flags = (i == BUF_COUNT - 1) ? VMA_ALLOCATION_CREATE_MAPPED_BIT : 0;
Adam Sawicki73b16652018-06-11 16:39:25 +02001408
1409 VkResult res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo, &buffers[i].Buffer, &buffers[i].Allocation, &allocInfo[i]);
1410 assert(res == VK_SUCCESS);
1411 // Margin is preserved also at the beginning of a block.
1412 assert(allocInfo[i].offset >= VMA_DEBUG_MARGIN);
Adam Sawicki212a4a62018-06-14 15:44:45 +02001413
1414 if(i == BUF_COUNT - 1)
1415 {
1416 // Fill with data.
1417 assert(allocInfo[i].pMappedData != nullptr);
1418 // Uncomment this "+ 1" to overwrite past end of allocation and check corruption detection.
1419 memset(allocInfo[i].pMappedData, 0xFF, bufInfo.size /* + 1 */);
1420 }
Adam Sawicki73b16652018-06-11 16:39:25 +02001421 }
1422
1423 // Check if their offsets preserve margin between them.
1424 std::sort(allocInfo, allocInfo + BUF_COUNT, [](const VmaAllocationInfo& lhs, const VmaAllocationInfo& rhs) -> bool
1425 {
1426 if(lhs.deviceMemory != rhs.deviceMemory)
1427 {
1428 return lhs.deviceMemory < rhs.deviceMemory;
1429 }
1430 return lhs.offset < rhs.offset;
1431 });
1432 for(size_t i = 1; i < BUF_COUNT; ++i)
1433 {
1434 if(allocInfo[i].deviceMemory == allocInfo[i - 1].deviceMemory)
1435 {
1436 assert(allocInfo[i].offset >= allocInfo[i - 1].offset + VMA_DEBUG_MARGIN);
1437 }
1438 }
1439
Adam Sawicki212a4a62018-06-14 15:44:45 +02001440 VkResult res = vmaCheckCorruption(g_hAllocator, UINT32_MAX);
1441 assert(res == VK_SUCCESS);
1442
Adam Sawicki73b16652018-06-11 16:39:25 +02001443 // Destroy all buffers.
1444 for(size_t i = BUF_COUNT; i--; )
1445 {
1446 vmaDestroyBuffer(g_hAllocator, buffers[i].Buffer, buffers[i].Allocation);
1447 }
1448}
Adam Sawicki212a4a62018-06-14 15:44:45 +02001449#endif
Adam Sawicki73b16652018-06-11 16:39:25 +02001450
Adam Sawicki0876c0d2018-06-20 15:18:11 +02001451static void TestLinearAllocator()
1452{
1453 wprintf(L"Test linear allocator\n");
1454
1455 RandomNumberGenerator rand{645332};
1456
1457 VkBufferCreateInfo sampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
1458 sampleBufCreateInfo.size = 1024; // Whatever.
1459 sampleBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
1460
1461 VmaAllocationCreateInfo sampleAllocCreateInfo = {};
1462 sampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
1463
1464 VmaPoolCreateInfo poolCreateInfo = {};
1465 VkResult res = vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &sampleBufCreateInfo, &sampleAllocCreateInfo, &poolCreateInfo.memoryTypeIndex);
1466 assert(res == VK_SUCCESS);
1467
Adam Sawickiee082772018-06-20 17:45:49 +02001468 poolCreateInfo.blockSize = 1024 * 300;
Adam Sawicki0876c0d2018-06-20 15:18:11 +02001469 poolCreateInfo.flags = VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT;
1470 poolCreateInfo.minBlockCount = poolCreateInfo.maxBlockCount = 1;
1471
1472 VmaPool pool = nullptr;
1473 res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool);
1474 assert(res == VK_SUCCESS);
1475
1476 VkBufferCreateInfo bufCreateInfo = sampleBufCreateInfo;
1477
1478 VmaAllocationCreateInfo allocCreateInfo = {};
1479 allocCreateInfo.pool = pool;
1480
1481 constexpr size_t maxBufCount = 100;
1482 std::vector<BufferInfo> bufInfo;
1483
1484 constexpr VkDeviceSize bufSizeMin = 16;
1485 constexpr VkDeviceSize bufSizeMax = 1024;
1486 VmaAllocationInfo allocInfo;
1487 VkDeviceSize prevOffset = 0;
1488
1489 // Test one-time free.
1490 for(size_t i = 0; i < 2; ++i)
1491 {
1492 // Allocate number of buffers of varying size that surely fit into this block.
1493 VkDeviceSize bufSumSize = 0;
1494 for(size_t i = 0; i < maxBufCount; ++i)
1495 {
1496 bufCreateInfo.size = bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin);
1497 BufferInfo newBufInfo;
1498 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
1499 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
1500 assert(res == VK_SUCCESS);
1501 assert(i == 0 || allocInfo.offset > prevOffset);
1502 bufInfo.push_back(newBufInfo);
1503 prevOffset = allocInfo.offset;
1504 bufSumSize += bufCreateInfo.size;
1505 }
1506
1507 // Validate pool stats.
1508 VmaPoolStats stats;
1509 vmaGetPoolStats(g_hAllocator, pool, &stats);
1510 assert(stats.size == poolCreateInfo.blockSize);
1511 assert(stats.unusedSize = poolCreateInfo.blockSize - bufSumSize);
1512 assert(stats.allocationCount == bufInfo.size());
1513
1514 // Destroy the buffers in random order.
1515 while(!bufInfo.empty())
1516 {
1517 const size_t indexToDestroy = rand.Generate() % bufInfo.size();
1518 const BufferInfo& currBufInfo = bufInfo[indexToDestroy];
1519 vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
1520 bufInfo.erase(bufInfo.begin() + indexToDestroy);
1521 }
1522 }
1523
1524 // Test stack.
1525 {
1526 // Allocate number of buffers of varying size that surely fit into this block.
1527 for(size_t i = 0; i < maxBufCount; ++i)
1528 {
1529 bufCreateInfo.size = bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin);
1530 BufferInfo newBufInfo;
1531 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
1532 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
1533 assert(res == VK_SUCCESS);
1534 assert(i == 0 || allocInfo.offset > prevOffset);
1535 bufInfo.push_back(newBufInfo);
1536 prevOffset = allocInfo.offset;
1537 }
1538
1539 // Destroy few buffers from top of the stack.
1540 for(size_t i = 0; i < maxBufCount / 5; ++i)
1541 {
1542 const BufferInfo& currBufInfo = bufInfo.back();
1543 vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
1544 bufInfo.pop_back();
1545 }
1546
1547 // Create some more
1548 for(size_t i = 0; i < maxBufCount / 5; ++i)
1549 {
1550 bufCreateInfo.size = bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin);
1551 BufferInfo newBufInfo;
1552 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
1553 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
1554 assert(res == VK_SUCCESS);
1555 assert(i == 0 || allocInfo.offset > prevOffset);
1556 bufInfo.push_back(newBufInfo);
1557 prevOffset = allocInfo.offset;
1558 }
1559
1560 // Destroy the buffers in reverse order.
1561 while(!bufInfo.empty())
1562 {
1563 const BufferInfo& currBufInfo = bufInfo.back();
1564 vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
1565 bufInfo.pop_back();
1566 }
1567 }
1568
Adam Sawickiee082772018-06-20 17:45:49 +02001569 // Test ring buffer.
1570 {
1571 // Allocate number of buffers that surely fit into this block.
1572 bufCreateInfo.size = bufSizeMax;
1573 for(size_t i = 0; i < maxBufCount; ++i)
1574 {
1575 BufferInfo newBufInfo;
1576 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
1577 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
1578 assert(res == VK_SUCCESS);
1579 assert(i == 0 || allocInfo.offset > prevOffset);
1580 bufInfo.push_back(newBufInfo);
1581 prevOffset = allocInfo.offset;
1582 }
1583
1584 // Free and allocate new buffers so many times that we make sure we wrap-around at least once.
1585 const size_t buffersPerIter = maxBufCount / 10 - 1;
1586 const size_t iterCount = poolCreateInfo.blockSize / bufCreateInfo.size / buffersPerIter * 2;
1587 for(size_t iter = 0; iter < iterCount; ++iter)
1588 {
1589 for(size_t bufPerIter = 0; bufPerIter < buffersPerIter; ++bufPerIter)
1590 {
1591 const BufferInfo& currBufInfo = bufInfo.front();
1592 vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
1593 bufInfo.erase(bufInfo.begin());
1594 }
1595 for(size_t bufPerIter = 0; bufPerIter < buffersPerIter; ++bufPerIter)
1596 {
1597 BufferInfo newBufInfo;
1598 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
1599 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
1600 assert(res == VK_SUCCESS);
1601 bufInfo.push_back(newBufInfo);
1602 }
1603 }
1604
1605 // Allocate buffers until we reach out-of-memory.
1606 uint32_t debugIndex = 0;
1607 while(res == VK_SUCCESS)
1608 {
1609 BufferInfo newBufInfo;
1610 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
1611 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
1612 if(res == VK_SUCCESS)
1613 {
1614 bufInfo.push_back(newBufInfo);
1615 }
1616 else
1617 {
1618 assert(res == VK_ERROR_OUT_OF_DEVICE_MEMORY);
1619 }
1620 ++debugIndex;
1621 }
1622
1623 // Destroy the buffers in random order.
1624 while(!bufInfo.empty())
1625 {
1626 const size_t indexToDestroy = rand.Generate() % bufInfo.size();
1627 const BufferInfo& currBufInfo = bufInfo[indexToDestroy];
1628 vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
1629 bufInfo.erase(bufInfo.begin() + indexToDestroy);
1630 }
1631 }
1632
Adam Sawicki680b2252018-08-22 14:47:32 +02001633 // Test double stack.
1634 {
1635 // Allocate number of buffers of varying size that surely fit into this block, alternate from bottom/top.
1636 VkDeviceSize prevOffsetLower = 0;
1637 VkDeviceSize prevOffsetUpper = poolCreateInfo.blockSize;
1638 for(size_t i = 0; i < maxBufCount; ++i)
1639 {
1640 const bool upperAddress = (i % 2) != 0;
1641 if(upperAddress)
1642 allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT;
1643 else
1644 allocCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT;
1645 bufCreateInfo.size = bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin);
1646 BufferInfo newBufInfo;
1647 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
1648 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
1649 assert(res == VK_SUCCESS);
1650 if(upperAddress)
1651 {
1652 assert(allocInfo.offset < prevOffsetUpper);
1653 prevOffsetUpper = allocInfo.offset;
1654 }
1655 else
1656 {
1657 assert(allocInfo.offset >= prevOffsetLower);
1658 prevOffsetLower = allocInfo.offset;
1659 }
1660 assert(prevOffsetLower < prevOffsetUpper);
1661 bufInfo.push_back(newBufInfo);
1662 }
1663
1664 // Destroy few buffers from top of the stack.
1665 for(size_t i = 0; i < maxBufCount / 5; ++i)
1666 {
1667 const BufferInfo& currBufInfo = bufInfo.back();
1668 vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
1669 bufInfo.pop_back();
1670 }
1671
1672 // Create some more
1673 for(size_t i = 0; i < maxBufCount / 5; ++i)
1674 {
1675 const bool upperAddress = (i % 2) != 0;
1676 if(upperAddress)
1677 allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT;
1678 else
1679 allocCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT;
1680 bufCreateInfo.size = bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin);
1681 BufferInfo newBufInfo;
1682 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
1683 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
1684 assert(res == VK_SUCCESS);
1685 bufInfo.push_back(newBufInfo);
1686 }
1687
1688 // Destroy the buffers in reverse order.
1689 while(!bufInfo.empty())
1690 {
1691 const BufferInfo& currBufInfo = bufInfo.back();
1692 vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
1693 bufInfo.pop_back();
1694 }
1695
1696 // Create buffers on both sides until we reach out of memory.
1697 prevOffsetLower = 0;
1698 prevOffsetUpper = poolCreateInfo.blockSize;
1699 res = VK_SUCCESS;
1700 for(size_t i = 0; res == VK_SUCCESS; ++i)
1701 {
1702 const bool upperAddress = (i % 2) != 0;
1703 if(upperAddress)
1704 allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT;
1705 else
1706 allocCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT;
1707 bufCreateInfo.size = bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin);
1708 BufferInfo newBufInfo;
1709 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
1710 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
1711 if(res == VK_SUCCESS)
1712 {
1713 if(upperAddress)
1714 {
1715 assert(allocInfo.offset < prevOffsetUpper);
1716 prevOffsetUpper = allocInfo.offset;
1717 }
1718 else
1719 {
1720 assert(allocInfo.offset >= prevOffsetLower);
1721 prevOffsetLower = allocInfo.offset;
1722 }
1723 assert(prevOffsetLower < prevOffsetUpper);
1724 bufInfo.push_back(newBufInfo);
1725 }
1726 }
1727
1728 // Destroy the buffers in random order.
1729 while(!bufInfo.empty())
1730 {
1731 const size_t indexToDestroy = rand.Generate() % bufInfo.size();
1732 const BufferInfo& currBufInfo = bufInfo[indexToDestroy];
1733 vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
1734 bufInfo.erase(bufInfo.begin() + indexToDestroy);
1735 }
1736
1737 // Create buffers on upper side only, constant size, until we reach out of memory.
1738 prevOffsetUpper = poolCreateInfo.blockSize;
1739 res = VK_SUCCESS;
1740 allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT;
1741 bufCreateInfo.size = bufSizeMax;
1742 for(size_t i = 0; res == VK_SUCCESS; ++i)
1743 {
1744 BufferInfo newBufInfo;
1745 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
1746 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
1747 if(res == VK_SUCCESS)
1748 {
1749 assert(allocInfo.offset < prevOffsetUpper);
1750 prevOffsetUpper = allocInfo.offset;
1751 bufInfo.push_back(newBufInfo);
1752 }
1753 }
1754
1755 // Destroy the buffers in reverse order.
1756 while(!bufInfo.empty())
1757 {
1758 const BufferInfo& currBufInfo = bufInfo.back();
1759 vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
1760 bufInfo.pop_back();
1761 }
1762 }
1763
Adam Sawicki8cfe05f2018-08-22 16:48:17 +02001764 // Test ring buffer with lost allocations.
1765 {
1766 // Allocate number of buffers until pool is full.
1767 // Notice CAN_BECOME_LOST flag and call to vmaSetCurrentFrameIndex.
1768 allocCreateInfo.flags = VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT;
1769 res = VK_SUCCESS;
1770 for(size_t i = 0; res == VK_SUCCESS; ++i)
1771 {
1772 vmaSetCurrentFrameIndex(g_hAllocator, ++g_FrameIndex);
1773
1774 bufCreateInfo.size = bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin);
1775
1776 BufferInfo newBufInfo;
1777 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
1778 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
1779 if(res == VK_SUCCESS)
1780 bufInfo.push_back(newBufInfo);
1781 }
1782
1783 // Free first half of it.
1784 {
1785 const size_t buffersToDelete = bufInfo.size() / 2;
1786 for(size_t i = 0; i < buffersToDelete; ++i)
1787 {
1788 vmaDestroyBuffer(g_hAllocator, bufInfo[i].Buffer, bufInfo[i].Allocation);
1789 }
1790 bufInfo.erase(bufInfo.begin(), bufInfo.begin() + buffersToDelete);
1791 }
1792
1793 // Allocate number of buffers until pool is full again.
Adam Sawicki0ebdf0c2018-08-22 17:02:44 +02001794 // This way we make sure ring buffers wraps around, front in in the middle.
Adam Sawicki8cfe05f2018-08-22 16:48:17 +02001795 res = VK_SUCCESS;
1796 for(size_t i = 0; res == VK_SUCCESS; ++i)
1797 {
1798 vmaSetCurrentFrameIndex(g_hAllocator, ++g_FrameIndex);
1799
1800 bufCreateInfo.size = bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin);
1801
1802 BufferInfo newBufInfo;
1803 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
1804 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
1805 if(res == VK_SUCCESS)
1806 bufInfo.push_back(newBufInfo);
1807 }
1808
1809 VkDeviceSize firstNewOffset;
1810 {
1811 vmaSetCurrentFrameIndex(g_hAllocator, ++g_FrameIndex);
1812
1813 // Allocate a large buffer with CAN_MAKE_OTHER_LOST.
1814 allocCreateInfo.flags = VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT;
1815 bufCreateInfo.size = bufSizeMax;
1816
1817 BufferInfo newBufInfo;
1818 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
1819 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
1820 assert(res == VK_SUCCESS);
1821 bufInfo.push_back(newBufInfo);
1822 firstNewOffset = allocInfo.offset;
1823
1824 // Make sure at least one buffer from the beginning became lost.
1825 vmaGetAllocationInfo(g_hAllocator, bufInfo[0].Allocation, &allocInfo);
1826 assert(allocInfo.deviceMemory == VK_NULL_HANDLE);
1827 }
1828
1829 // Allocate more buffers that CAN_MAKE_OTHER_LOST until we wrap-around with this.
1830 size_t newCount = 1;
1831 for(;;)
1832 {
1833 vmaSetCurrentFrameIndex(g_hAllocator, ++g_FrameIndex);
1834
1835 bufCreateInfo.size = bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin);
1836
1837 BufferInfo newBufInfo;
1838 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
1839 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
1840 assert(res == VK_SUCCESS);
1841 bufInfo.push_back(newBufInfo);
1842 ++newCount;
1843 if(allocInfo.offset < firstNewOffset)
1844 break;
1845 }
1846
Adam Sawicki0ebdf0c2018-08-22 17:02:44 +02001847 // Delete buffers that are lost.
1848 for(size_t i = bufInfo.size(); i--; )
1849 {
1850 vmaGetAllocationInfo(g_hAllocator, bufInfo[i].Allocation, &allocInfo);
1851 if(allocInfo.deviceMemory == VK_NULL_HANDLE)
1852 {
1853 vmaDestroyBuffer(g_hAllocator, bufInfo[i].Buffer, bufInfo[i].Allocation);
1854 bufInfo.erase(bufInfo.begin() + i);
1855 }
1856 }
1857
1858 // Test vmaMakePoolAllocationsLost
1859 {
1860 vmaSetCurrentFrameIndex(g_hAllocator, ++g_FrameIndex);
1861
1862 size_t lostAllocCount = SIZE_MAX;
1863 vmaMakePoolAllocationsLost(g_hAllocator, pool, &lostAllocCount);
1864 assert(lostAllocCount > 0);
1865
1866 size_t realLostAllocCount = 0;
1867 for(size_t i = 0; i < bufInfo.size(); ++i)
1868 {
1869 vmaGetAllocationInfo(g_hAllocator, bufInfo[i].Allocation, &allocInfo);
1870 if(allocInfo.deviceMemory == VK_NULL_HANDLE)
1871 ++realLostAllocCount;
1872 }
1873 assert(realLostAllocCount == lostAllocCount);
1874 }
1875
Adam Sawicki8cfe05f2018-08-22 16:48:17 +02001876 // Destroy all the buffers in forward order.
1877 for(size_t i = 0; i < bufInfo.size(); ++i)
1878 vmaDestroyBuffer(g_hAllocator, bufInfo[i].Buffer, bufInfo[i].Allocation);
1879 bufInfo.clear();
1880 }
1881
Adam Sawicki70a683e2018-08-24 15:36:32 +02001882 vmaDestroyPool(g_hAllocator, pool);
1883}
Adam Sawickif799c4f2018-08-23 10:40:30 +02001884
Adam Sawicki70a683e2018-08-24 15:36:32 +02001885static void TestLinearAllocatorMultiBlock()
1886{
1887 wprintf(L"Test linear allocator multi block\n");
1888
1889 RandomNumberGenerator rand{345673};
1890
1891 VkBufferCreateInfo sampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
1892 sampleBufCreateInfo.size = 1024 * 1024;
1893 sampleBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
1894
1895 VmaAllocationCreateInfo sampleAllocCreateInfo = {};
1896 sampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
1897
1898 VmaPoolCreateInfo poolCreateInfo = {};
1899 poolCreateInfo.flags = VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT;
1900 VkResult res = vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &sampleBufCreateInfo, &sampleAllocCreateInfo, &poolCreateInfo.memoryTypeIndex);
1901 assert(res == VK_SUCCESS);
1902
1903 VmaPool pool = nullptr;
1904 res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool);
1905 assert(res == VK_SUCCESS);
1906
1907 VkBufferCreateInfo bufCreateInfo = sampleBufCreateInfo;
1908
1909 VmaAllocationCreateInfo allocCreateInfo = {};
1910 allocCreateInfo.pool = pool;
1911
1912 std::vector<BufferInfo> bufInfo;
1913 VmaAllocationInfo allocInfo;
1914
1915 // Test one-time free.
1916 {
1917 // Allocate buffers until we move to a second block.
1918 VkDeviceMemory lastMem = VK_NULL_HANDLE;
1919 for(uint32_t i = 0; ; ++i)
1920 {
1921 BufferInfo newBufInfo;
1922 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
1923 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
1924 assert(res == VK_SUCCESS);
1925 bufInfo.push_back(newBufInfo);
1926 if(lastMem && allocInfo.deviceMemory != lastMem)
1927 {
1928 break;
1929 }
1930 lastMem = allocInfo.deviceMemory;
1931 }
1932
1933 assert(bufInfo.size() > 2);
1934
1935 // Make sure that pool has now two blocks.
1936 VmaPoolStats poolStats = {};
1937 vmaGetPoolStats(g_hAllocator, pool, &poolStats);
1938 assert(poolStats.blockCount == 2);
1939
1940 // Destroy all the buffers in random order.
1941 while(!bufInfo.empty())
1942 {
1943 const size_t indexToDestroy = rand.Generate() % bufInfo.size();
1944 const BufferInfo& currBufInfo = bufInfo[indexToDestroy];
1945 vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
1946 bufInfo.erase(bufInfo.begin() + indexToDestroy);
1947 }
1948
1949 // Make sure that pool has now at most one block.
1950 vmaGetPoolStats(g_hAllocator, pool, &poolStats);
1951 assert(poolStats.blockCount <= 1);
1952 }
1953
1954 // Test stack.
1955 {
1956 // Allocate buffers until we move to a second block.
1957 VkDeviceMemory lastMem = VK_NULL_HANDLE;
1958 for(uint32_t i = 0; ; ++i)
1959 {
1960 BufferInfo newBufInfo;
1961 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
1962 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
1963 assert(res == VK_SUCCESS);
1964 bufInfo.push_back(newBufInfo);
1965 if(lastMem && allocInfo.deviceMemory != lastMem)
1966 {
1967 break;
1968 }
1969 lastMem = allocInfo.deviceMemory;
1970 }
1971
1972 assert(bufInfo.size() > 2);
1973
1974 // Add few more buffers.
1975 for(uint32_t i = 0; i < 5; ++i)
1976 {
1977 BufferInfo newBufInfo;
1978 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
1979 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
1980 assert(res == VK_SUCCESS);
1981 bufInfo.push_back(newBufInfo);
1982 }
1983
1984 // Make sure that pool has now two blocks.
1985 VmaPoolStats poolStats = {};
1986 vmaGetPoolStats(g_hAllocator, pool, &poolStats);
1987 assert(poolStats.blockCount == 2);
1988
1989 // Delete half of buffers, LIFO.
1990 for(size_t i = 0, countToDelete = bufInfo.size() / 2; i < countToDelete; ++i)
1991 {
1992 const BufferInfo& currBufInfo = bufInfo.back();
1993 vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
1994 bufInfo.pop_back();
1995 }
1996
1997 // Add one more buffer.
1998 BufferInfo newBufInfo;
1999 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
2000 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
2001 assert(res == VK_SUCCESS);
2002 bufInfo.push_back(newBufInfo);
2003
2004 // Make sure that pool has now one block.
2005 vmaGetPoolStats(g_hAllocator, pool, &poolStats);
2006 assert(poolStats.blockCount == 1);
2007
2008 // Delete all the remaining buffers, LIFO.
2009 while(!bufInfo.empty())
2010 {
2011 const BufferInfo& currBufInfo = bufInfo.back();
2012 vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
2013 bufInfo.pop_back();
2014 }
Adam Sawickif799c4f2018-08-23 10:40:30 +02002015 }
2016
Adam Sawicki0876c0d2018-06-20 15:18:11 +02002017 vmaDestroyPool(g_hAllocator, pool);
2018}
2019
Adam Sawickifd11d752018-08-22 15:02:10 +02002020static void ManuallyTestLinearAllocator()
2021{
2022 VmaStats origStats;
2023 vmaCalculateStats(g_hAllocator, &origStats);
2024
2025 wprintf(L"Manually test linear allocator\n");
2026
2027 RandomNumberGenerator rand{645332};
2028
2029 VkBufferCreateInfo sampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
2030 sampleBufCreateInfo.size = 1024; // Whatever.
2031 sampleBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
2032
2033 VmaAllocationCreateInfo sampleAllocCreateInfo = {};
2034 sampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
2035
2036 VmaPoolCreateInfo poolCreateInfo = {};
2037 VkResult res = vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &sampleBufCreateInfo, &sampleAllocCreateInfo, &poolCreateInfo.memoryTypeIndex);
2038 assert(res == VK_SUCCESS);
2039
2040 poolCreateInfo.blockSize = 10 * 1024;
2041 poolCreateInfo.flags = VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT;
2042 poolCreateInfo.minBlockCount = poolCreateInfo.maxBlockCount = 1;
2043
2044 VmaPool pool = nullptr;
2045 res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool);
2046 assert(res == VK_SUCCESS);
2047
2048 VkBufferCreateInfo bufCreateInfo = sampleBufCreateInfo;
2049
2050 VmaAllocationCreateInfo allocCreateInfo = {};
2051 allocCreateInfo.pool = pool;
2052
2053 std::vector<BufferInfo> bufInfo;
2054 VmaAllocationInfo allocInfo;
2055 BufferInfo newBufInfo;
2056
2057 // Test double stack.
2058 {
2059 /*
2060 Lower: Buffer 32 B, Buffer 1024 B, Buffer 32 B
2061 Upper: Buffer 16 B, Buffer 1024 B, Buffer 128 B
2062
2063 Totally:
2064 1 block allocated
2065 10240 Vulkan bytes
2066 6 new allocations
2067 2256 bytes in allocations
2068 */
2069
2070 bufCreateInfo.size = 32;
2071 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
2072 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
2073 assert(res == VK_SUCCESS);
2074 bufInfo.push_back(newBufInfo);
2075
2076 bufCreateInfo.size = 1024;
2077 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
2078 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
2079 assert(res == VK_SUCCESS);
2080 bufInfo.push_back(newBufInfo);
2081
2082 bufCreateInfo.size = 32;
2083 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
2084 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
2085 assert(res == VK_SUCCESS);
2086 bufInfo.push_back(newBufInfo);
2087
2088 allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT;
2089
2090 bufCreateInfo.size = 128;
2091 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
2092 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
2093 assert(res == VK_SUCCESS);
2094 bufInfo.push_back(newBufInfo);
2095
2096 bufCreateInfo.size = 1024;
2097 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
2098 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
2099 assert(res == VK_SUCCESS);
2100 bufInfo.push_back(newBufInfo);
2101
2102 bufCreateInfo.size = 16;
2103 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
2104 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
2105 assert(res == VK_SUCCESS);
2106 bufInfo.push_back(newBufInfo);
2107
2108 VmaStats currStats;
2109 vmaCalculateStats(g_hAllocator, &currStats);
2110 VmaPoolStats poolStats;
2111 vmaGetPoolStats(g_hAllocator, pool, &poolStats);
2112
2113 char* statsStr = nullptr;
2114 vmaBuildStatsString(g_hAllocator, &statsStr, VK_TRUE);
2115
2116 // PUT BREAKPOINT HERE TO CHECK.
2117 // Inspect: currStats versus origStats, poolStats, statsStr.
2118 int I = 0;
2119
2120 vmaFreeStatsString(g_hAllocator, statsStr);
2121
2122 // Destroy the buffers in reverse order.
2123 while(!bufInfo.empty())
2124 {
2125 const BufferInfo& currBufInfo = bufInfo.back();
2126 vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
2127 bufInfo.pop_back();
2128 }
2129 }
2130
2131 vmaDestroyPool(g_hAllocator, pool);
2132}
2133
Adam Sawicki0a3fb6c2018-08-27 14:40:27 +02002134static void BenchmarkLinearAllocatorCase(FILE* file,
2135 bool linear,
2136 bool empty,
2137 VmaAllocationCreateFlags allocStrategy,
2138 FREE_ORDER freeOrder)
Adam Sawicki0a607132018-08-24 11:18:41 +02002139{
2140 RandomNumberGenerator rand{16223};
2141
2142 const VkDeviceSize bufSizeMin = 32;
2143 const VkDeviceSize bufSizeMax = 1024;
2144 const size_t maxBufCapacity = 10000;
2145 const uint32_t iterationCount = 10;
2146
2147 VkBufferCreateInfo sampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
2148 sampleBufCreateInfo.size = bufSizeMax;
2149 sampleBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
2150
2151 VmaAllocationCreateInfo sampleAllocCreateInfo = {};
2152 sampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
2153
2154 VmaPoolCreateInfo poolCreateInfo = {};
2155 VkResult res = vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &sampleBufCreateInfo, &sampleAllocCreateInfo, &poolCreateInfo.memoryTypeIndex);
2156 assert(res == VK_SUCCESS);
2157
2158 poolCreateInfo.blockSize = bufSizeMax * maxBufCapacity;
2159 if(linear)
2160 poolCreateInfo.flags = VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT;
2161 poolCreateInfo.minBlockCount = poolCreateInfo.maxBlockCount = 1;
2162
2163 VmaPool pool = nullptr;
2164 res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool);
2165 assert(res == VK_SUCCESS);
2166
2167 // Buffer created just to get memory requirements. Never bound to any memory.
2168 VkBuffer dummyBuffer = VK_NULL_HANDLE;
2169 res = vkCreateBuffer(g_hDevice, &sampleBufCreateInfo, nullptr, &dummyBuffer);
2170 assert(res == VK_SUCCESS && dummyBuffer);
2171
2172 VkMemoryRequirements memReq = {};
2173 vkGetBufferMemoryRequirements(g_hDevice, dummyBuffer, &memReq);
2174
2175 vkDestroyBuffer(g_hDevice, dummyBuffer, nullptr);
2176
2177 VmaAllocationCreateInfo allocCreateInfo = {};
2178 allocCreateInfo.pool = pool;
Adam Sawicki0a3fb6c2018-08-27 14:40:27 +02002179 allocCreateInfo.flags = allocStrategy;
Adam Sawicki0a607132018-08-24 11:18:41 +02002180
2181 VmaAllocation alloc;
2182 std::vector<VmaAllocation> baseAllocations;
2183
2184 if(!empty)
2185 {
2186 // Make allocations up to half of pool size.
2187 VkDeviceSize totalSize = 0;
2188 while(totalSize < poolCreateInfo.blockSize / 2)
2189 {
2190 memReq.size = bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin);
2191 res = vmaAllocateMemory(g_hAllocator, &memReq, &allocCreateInfo, &alloc, nullptr);
2192 assert(res == VK_SUCCESS);
2193 baseAllocations.push_back(alloc);
2194 totalSize += memReq.size;
2195 }
2196
2197 // Delete half of them, choose randomly.
2198 size_t allocsToDelete = baseAllocations.size() / 2;
2199 for(size_t i = 0; i < allocsToDelete; ++i)
2200 {
2201 const size_t index = (size_t)rand.Generate() % baseAllocations.size();
2202 vmaFreeMemory(g_hAllocator, baseAllocations[index]);
2203 baseAllocations.erase(baseAllocations.begin() + index);
2204 }
2205 }
2206
2207 // BENCHMARK
2208 const size_t allocCount = maxBufCapacity / 2;
2209 std::vector<VmaAllocation> testAllocations;
2210 testAllocations.reserve(allocCount);
2211 duration allocTotalDuration = duration::zero();
2212 duration freeTotalDuration = duration::zero();
2213 for(uint32_t iterationIndex = 0; iterationIndex < iterationCount; ++iterationIndex)
2214 {
2215 // Allocations
2216 time_point allocTimeBeg = std::chrono::high_resolution_clock::now();
2217 for(size_t i = 0; i < allocCount; ++i)
2218 {
2219 memReq.size = bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin);
2220 res = vmaAllocateMemory(g_hAllocator, &memReq, &allocCreateInfo, &alloc, nullptr);
2221 assert(res == VK_SUCCESS);
2222 testAllocations.push_back(alloc);
2223 }
2224 allocTotalDuration += std::chrono::high_resolution_clock::now() - allocTimeBeg;
2225
2226 // Deallocations
2227 switch(freeOrder)
2228 {
2229 case FREE_ORDER::FORWARD:
2230 // Leave testAllocations unchanged.
2231 break;
2232 case FREE_ORDER::BACKWARD:
2233 std::reverse(testAllocations.begin(), testAllocations.end());
2234 break;
2235 case FREE_ORDER::RANDOM:
2236 std::shuffle(testAllocations.begin(), testAllocations.end(), MyUniformRandomNumberGenerator(rand));
2237 break;
2238 default: assert(0);
2239 }
2240
2241 time_point freeTimeBeg = std::chrono::high_resolution_clock::now();
2242 for(size_t i = 0; i < allocCount; ++i)
2243 vmaFreeMemory(g_hAllocator, testAllocations[i]);
2244 freeTotalDuration += std::chrono::high_resolution_clock::now() - freeTimeBeg;
2245
2246 testAllocations.clear();
2247 }
2248
2249 // Delete baseAllocations
2250 while(!baseAllocations.empty())
2251 {
2252 vmaFreeMemory(g_hAllocator, baseAllocations.back());
2253 baseAllocations.pop_back();
2254 }
2255
2256 vmaDestroyPool(g_hAllocator, pool);
2257
Adam Sawicki33d2ce72018-08-27 13:59:13 +02002258 const float allocTotalSeconds = ToFloatSeconds(allocTotalDuration);
2259 const float freeTotalSeconds = ToFloatSeconds(freeTotalDuration);
2260
Adam Sawicki0a3fb6c2018-08-27 14:40:27 +02002261 printf(" LinearAlgorithm=%u %s Allocation=%s FreeOrder=%s: allocations %g s, free %g s\n",
Adam Sawicki0a607132018-08-24 11:18:41 +02002262 linear ? 1 : 0,
Adam Sawicki0667e332018-08-24 17:26:44 +02002263 empty ? "Empty" : "Not empty",
Adam Sawicki0a3fb6c2018-08-27 14:40:27 +02002264 GetAllocationStrategyName(allocStrategy),
Adam Sawicki0a607132018-08-24 11:18:41 +02002265 FREE_ORDER_NAMES[(size_t)freeOrder],
Adam Sawicki33d2ce72018-08-27 13:59:13 +02002266 allocTotalSeconds,
2267 freeTotalSeconds);
2268
2269 if(file)
2270 {
2271 std::string currTime;
2272 CurrentTimeToStr(currTime);
2273
Adam Sawicki0a3fb6c2018-08-27 14:40:27 +02002274 fprintf(file, "%s,%s,%u,%u,%s,%s,%g,%g\n",
Adam Sawicki33d2ce72018-08-27 13:59:13 +02002275 CODE_DESCRIPTION, currTime.c_str(),
2276 linear ? 1 : 0,
2277 empty ? 1 : 0,
Adam Sawicki0a3fb6c2018-08-27 14:40:27 +02002278 GetAllocationStrategyName(allocStrategy),
Adam Sawicki33d2ce72018-08-27 13:59:13 +02002279 FREE_ORDER_NAMES[(uint32_t)freeOrder],
2280 allocTotalSeconds,
2281 freeTotalSeconds);
2282 }
Adam Sawicki0a607132018-08-24 11:18:41 +02002283}
2284
Adam Sawicki33d2ce72018-08-27 13:59:13 +02002285static void BenchmarkLinearAllocator(FILE* file)
Adam Sawicki0a607132018-08-24 11:18:41 +02002286{
2287 wprintf(L"Benchmark linear allocator\n");
2288
Adam Sawicki33d2ce72018-08-27 13:59:13 +02002289 if(file)
2290 {
2291 fprintf(file,
2292 "Code,Time,"
Adam Sawicki0a3fb6c2018-08-27 14:40:27 +02002293 "Linear,Empty,Allocation strategy,Free order,"
Adam Sawicki33d2ce72018-08-27 13:59:13 +02002294 "Allocation time (s),Deallocation time (s)\n");
2295 }
2296
Adam Sawicki0a607132018-08-24 11:18:41 +02002297 uint32_t freeOrderCount = 1;
2298 if(ConfigType >= CONFIG_TYPE::CONFIG_TYPE_LARGE)
2299 freeOrderCount = 3;
2300 else if(ConfigType >= CONFIG_TYPE::CONFIG_TYPE_SMALL)
2301 freeOrderCount = 2;
2302
2303 const uint32_t emptyCount = ConfigType >= CONFIG_TYPE::CONFIG_TYPE_SMALL ? 2 : 1;
Adam Sawicki0a3fb6c2018-08-27 14:40:27 +02002304 const uint32_t allocStrategyCount = GetAllocationStrategyCount();
Adam Sawicki0a607132018-08-24 11:18:41 +02002305
2306 for(uint32_t freeOrderIndex = 0; freeOrderIndex < freeOrderCount; ++freeOrderIndex)
2307 {
2308 FREE_ORDER freeOrder = FREE_ORDER::COUNT;
2309 switch(freeOrderIndex)
2310 {
2311 case 0: freeOrder = FREE_ORDER::BACKWARD; break;
2312 case 1: freeOrder = FREE_ORDER::FORWARD; break;
2313 case 2: freeOrder = FREE_ORDER::RANDOM; break;
2314 default: assert(0);
2315 }
2316
2317 for(uint32_t emptyIndex = 0; emptyIndex < emptyCount; ++emptyIndex)
2318 {
2319 for(uint32_t linearIndex = 0; linearIndex < 2; ++linearIndex)
2320 {
Adam Sawicki0a3fb6c2018-08-27 14:40:27 +02002321 const bool linear = linearIndex ? 1 : 0;
2322
2323 uint32_t currAllocStrategyCount = linear ? 1 : allocStrategyCount;
2324 for(uint32_t allocStrategyIndex = 0; allocStrategyIndex < currAllocStrategyCount; ++allocStrategyIndex)
2325 {
2326 VmaAllocatorCreateFlags strategy = 0;
2327 if(!linear)
2328 {
2329 switch(allocStrategyIndex)
2330 {
2331 case 0: strategy = VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT; break;
2332 case 1: strategy = VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT; break;
2333 case 2: strategy = VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT; break;
2334 default: assert(0);
2335 }
2336 }
2337
2338 BenchmarkLinearAllocatorCase(
2339 file,
2340 linear, // linear
2341 emptyIndex ? 0 : 1, // empty
2342 strategy,
2343 freeOrder); // freeOrder
2344 }
Adam Sawicki0a607132018-08-24 11:18:41 +02002345 }
2346 }
2347 }
2348}
2349
Adam Sawickib8333fb2018-03-13 16:15:53 +01002350static void TestPool_SameSize()
2351{
2352 const VkDeviceSize BUF_SIZE = 1024 * 1024;
2353 const size_t BUF_COUNT = 100;
2354 VkResult res;
2355
2356 RandomNumberGenerator rand{123};
2357
2358 VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
2359 bufferInfo.size = BUF_SIZE;
2360 bufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
2361
2362 uint32_t memoryTypeBits = UINT32_MAX;
2363 {
2364 VkBuffer dummyBuffer;
2365 res = vkCreateBuffer(g_hDevice, &bufferInfo, nullptr, &dummyBuffer);
2366 assert(res == VK_SUCCESS);
2367
2368 VkMemoryRequirements memReq;
2369 vkGetBufferMemoryRequirements(g_hDevice, dummyBuffer, &memReq);
2370 memoryTypeBits = memReq.memoryTypeBits;
2371
2372 vkDestroyBuffer(g_hDevice, dummyBuffer, nullptr);
2373 }
2374
2375 VmaAllocationCreateInfo poolAllocInfo = {};
2376 poolAllocInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
2377 uint32_t memTypeIndex;
2378 res = vmaFindMemoryTypeIndex(
2379 g_hAllocator,
2380 memoryTypeBits,
2381 &poolAllocInfo,
2382 &memTypeIndex);
2383
2384 VmaPoolCreateInfo poolCreateInfo = {};
2385 poolCreateInfo.memoryTypeIndex = memTypeIndex;
2386 poolCreateInfo.blockSize = BUF_SIZE * BUF_COUNT / 4;
2387 poolCreateInfo.minBlockCount = 1;
2388 poolCreateInfo.maxBlockCount = 4;
2389 poolCreateInfo.frameInUseCount = 0;
2390
2391 VmaPool pool;
2392 res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool);
2393 assert(res == VK_SUCCESS);
2394
2395 vmaSetCurrentFrameIndex(g_hAllocator, 1);
2396
2397 VmaAllocationCreateInfo allocInfo = {};
2398 allocInfo.pool = pool;
2399 allocInfo.flags = VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT |
2400 VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT;
2401
2402 struct BufItem
2403 {
2404 VkBuffer Buf;
2405 VmaAllocation Alloc;
2406 };
2407 std::vector<BufItem> items;
2408
2409 // Fill entire pool.
2410 for(size_t i = 0; i < BUF_COUNT; ++i)
2411 {
2412 BufItem item;
2413 res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr);
2414 assert(res == VK_SUCCESS);
2415 items.push_back(item);
2416 }
2417
2418 // Make sure that another allocation would fail.
2419 {
2420 BufItem item;
2421 res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr);
2422 assert(res == VK_ERROR_OUT_OF_DEVICE_MEMORY);
2423 }
2424
2425 // Validate that no buffer is lost. Also check that they are not mapped.
2426 for(size_t i = 0; i < items.size(); ++i)
2427 {
2428 VmaAllocationInfo allocInfo;
2429 vmaGetAllocationInfo(g_hAllocator, items[i].Alloc, &allocInfo);
2430 assert(allocInfo.deviceMemory != VK_NULL_HANDLE);
2431 assert(allocInfo.pMappedData == nullptr);
2432 }
2433
2434 // Free some percent of random items.
2435 {
2436 const size_t PERCENT_TO_FREE = 10;
2437 size_t itemsToFree = items.size() * PERCENT_TO_FREE / 100;
2438 for(size_t i = 0; i < itemsToFree; ++i)
2439 {
2440 size_t index = (size_t)rand.Generate() % items.size();
2441 vmaDestroyBuffer(g_hAllocator, items[index].Buf, items[index].Alloc);
2442 items.erase(items.begin() + index);
2443 }
2444 }
2445
2446 // Randomly allocate and free items.
2447 {
2448 const size_t OPERATION_COUNT = BUF_COUNT;
2449 for(size_t i = 0; i < OPERATION_COUNT; ++i)
2450 {
2451 bool allocate = rand.Generate() % 2 != 0;
2452 if(allocate)
2453 {
2454 if(items.size() < BUF_COUNT)
2455 {
2456 BufItem item;
2457 res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr);
2458 assert(res == VK_SUCCESS);
2459 items.push_back(item);
2460 }
2461 }
2462 else // Free
2463 {
2464 if(!items.empty())
2465 {
2466 size_t index = (size_t)rand.Generate() % items.size();
2467 vmaDestroyBuffer(g_hAllocator, items[index].Buf, items[index].Alloc);
2468 items.erase(items.begin() + index);
2469 }
2470 }
2471 }
2472 }
2473
2474 // Allocate up to maximum.
2475 while(items.size() < BUF_COUNT)
2476 {
2477 BufItem item;
2478 res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr);
2479 assert(res == VK_SUCCESS);
2480 items.push_back(item);
2481 }
2482
2483 // Validate that no buffer is lost.
2484 for(size_t i = 0; i < items.size(); ++i)
2485 {
2486 VmaAllocationInfo allocInfo;
2487 vmaGetAllocationInfo(g_hAllocator, items[i].Alloc, &allocInfo);
2488 assert(allocInfo.deviceMemory != VK_NULL_HANDLE);
2489 }
2490
2491 // Next frame.
2492 vmaSetCurrentFrameIndex(g_hAllocator, 2);
2493
2494 // Allocate another BUF_COUNT buffers.
2495 for(size_t i = 0; i < BUF_COUNT; ++i)
2496 {
2497 BufItem item;
2498 res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr);
2499 assert(res == VK_SUCCESS);
2500 items.push_back(item);
2501 }
2502
2503 // Make sure the first BUF_COUNT is lost. Delete them.
2504 for(size_t i = 0; i < BUF_COUNT; ++i)
2505 {
2506 VmaAllocationInfo allocInfo;
2507 vmaGetAllocationInfo(g_hAllocator, items[i].Alloc, &allocInfo);
2508 assert(allocInfo.deviceMemory == VK_NULL_HANDLE);
2509 vmaDestroyBuffer(g_hAllocator, items[i].Buf, items[i].Alloc);
2510 }
2511 items.erase(items.begin(), items.begin() + BUF_COUNT);
2512
2513 // Validate that no buffer is lost.
2514 for(size_t i = 0; i < items.size(); ++i)
2515 {
2516 VmaAllocationInfo allocInfo;
2517 vmaGetAllocationInfo(g_hAllocator, items[i].Alloc, &allocInfo);
2518 assert(allocInfo.deviceMemory != VK_NULL_HANDLE);
2519 }
2520
2521 // Free one item.
2522 vmaDestroyBuffer(g_hAllocator, items.back().Buf, items.back().Alloc);
2523 items.pop_back();
2524
2525 // Validate statistics.
2526 {
2527 VmaPoolStats poolStats = {};
2528 vmaGetPoolStats(g_hAllocator, pool, &poolStats);
2529 assert(poolStats.allocationCount == items.size());
2530 assert(poolStats.size = BUF_COUNT * BUF_SIZE);
2531 assert(poolStats.unusedRangeCount == 1);
2532 assert(poolStats.unusedRangeSizeMax == BUF_SIZE);
2533 assert(poolStats.unusedSize == BUF_SIZE);
2534 }
2535
2536 // Free all remaining items.
2537 for(size_t i = items.size(); i--; )
2538 vmaDestroyBuffer(g_hAllocator, items[i].Buf, items[i].Alloc);
2539 items.clear();
2540
2541 // Allocate maximum items again.
2542 for(size_t i = 0; i < BUF_COUNT; ++i)
2543 {
2544 BufItem item;
2545 res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr);
2546 assert(res == VK_SUCCESS);
2547 items.push_back(item);
2548 }
2549
2550 // Delete every other item.
2551 for(size_t i = 0; i < BUF_COUNT / 2; ++i)
2552 {
2553 vmaDestroyBuffer(g_hAllocator, items[i].Buf, items[i].Alloc);
2554 items.erase(items.begin() + i);
2555 }
2556
2557 // Defragment!
2558 {
2559 std::vector<VmaAllocation> allocationsToDefragment(items.size());
2560 for(size_t i = 0; i < items.size(); ++i)
2561 allocationsToDefragment[i] = items[i].Alloc;
2562
2563 VmaDefragmentationStats defragmentationStats;
2564 res = vmaDefragment(g_hAllocator, allocationsToDefragment.data(), items.size(), nullptr, nullptr, &defragmentationStats);
2565 assert(res == VK_SUCCESS);
2566 assert(defragmentationStats.deviceMemoryBlocksFreed == 2);
2567 }
2568
2569 // Free all remaining items.
2570 for(size_t i = items.size(); i--; )
2571 vmaDestroyBuffer(g_hAllocator, items[i].Buf, items[i].Alloc);
2572 items.clear();
2573
2574 ////////////////////////////////////////////////////////////////////////////////
2575 // Test for vmaMakePoolAllocationsLost
2576
2577 // Allocate 4 buffers on frame 10.
2578 vmaSetCurrentFrameIndex(g_hAllocator, 10);
2579 for(size_t i = 0; i < 4; ++i)
2580 {
2581 BufItem item;
2582 res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr);
2583 assert(res == VK_SUCCESS);
2584 items.push_back(item);
2585 }
2586
2587 // Touch first 2 of them on frame 11.
2588 vmaSetCurrentFrameIndex(g_hAllocator, 11);
2589 for(size_t i = 0; i < 2; ++i)
2590 {
2591 VmaAllocationInfo allocInfo;
2592 vmaGetAllocationInfo(g_hAllocator, items[i].Alloc, &allocInfo);
2593 }
2594
2595 // vmaMakePoolAllocationsLost. Only remaining 2 should be lost.
2596 size_t lostCount = 0xDEADC0DE;
2597 vmaMakePoolAllocationsLost(g_hAllocator, pool, &lostCount);
2598 assert(lostCount == 2);
2599
2600 // Make another call. Now 0 should be lost.
2601 vmaMakePoolAllocationsLost(g_hAllocator, pool, &lostCount);
2602 assert(lostCount == 0);
2603
2604 // Make another call, with null count. Should not crash.
2605 vmaMakePoolAllocationsLost(g_hAllocator, pool, nullptr);
2606
2607 // END: Free all remaining items.
2608 for(size_t i = items.size(); i--; )
2609 vmaDestroyBuffer(g_hAllocator, items[i].Buf, items[i].Alloc);
2610
2611 items.clear();
2612
Adam Sawickid2924172018-06-11 12:48:46 +02002613 ////////////////////////////////////////////////////////////////////////////////
2614 // Test for allocation too large for pool
2615
2616 {
2617 VmaAllocationCreateInfo allocCreateInfo = {};
2618 allocCreateInfo.pool = pool;
2619
2620 VkMemoryRequirements memReq;
2621 memReq.memoryTypeBits = UINT32_MAX;
2622 memReq.alignment = 1;
2623 memReq.size = poolCreateInfo.blockSize + 4;
2624
2625 VmaAllocation alloc = nullptr;
2626 res = vmaAllocateMemory(g_hAllocator, &memReq, &allocCreateInfo, &alloc, nullptr);
2627 assert(res == VK_ERROR_OUT_OF_DEVICE_MEMORY && alloc == nullptr);
2628 }
2629
Adam Sawickib8333fb2018-03-13 16:15:53 +01002630 vmaDestroyPool(g_hAllocator, pool);
2631}
2632
Adam Sawickie44c6262018-06-15 14:30:39 +02002633static bool ValidatePattern(const void* pMemory, size_t size, uint8_t pattern)
2634{
2635 const uint8_t* pBytes = (const uint8_t*)pMemory;
2636 for(size_t i = 0; i < size; ++i)
2637 {
2638 if(pBytes[i] != pattern)
2639 {
2640 return false;
2641 }
2642 }
2643 return true;
2644}
2645
2646static void TestAllocationsInitialization()
2647{
2648 VkResult res;
2649
2650 const size_t BUF_SIZE = 1024;
2651
2652 // Create pool.
2653
2654 VkBufferCreateInfo bufInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
2655 bufInfo.size = BUF_SIZE;
2656 bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
2657
2658 VmaAllocationCreateInfo dummyBufAllocCreateInfo = {};
2659 dummyBufAllocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
2660
2661 VmaPoolCreateInfo poolCreateInfo = {};
2662 poolCreateInfo.blockSize = BUF_SIZE * 10;
2663 poolCreateInfo.minBlockCount = 1; // To keep memory alive while pool exists.
2664 poolCreateInfo.maxBlockCount = 1;
2665 res = vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &bufInfo, &dummyBufAllocCreateInfo, &poolCreateInfo.memoryTypeIndex);
2666 assert(res == VK_SUCCESS);
2667
2668 VmaAllocationCreateInfo bufAllocCreateInfo = {};
2669 res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &bufAllocCreateInfo.pool);
2670 assert(res == VK_SUCCESS);
2671
2672 // Create one persistently mapped buffer to keep memory of this block mapped,
2673 // so that pointer to mapped data will remain (more or less...) valid even
2674 // after destruction of other allocations.
2675
2676 bufAllocCreateInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT;
2677 VkBuffer firstBuf;
2678 VmaAllocation firstAlloc;
2679 res = vmaCreateBuffer(g_hAllocator, &bufInfo, &bufAllocCreateInfo, &firstBuf, &firstAlloc, nullptr);
2680 assert(res == VK_SUCCESS);
2681
2682 // Test buffers.
2683
2684 for(uint32_t i = 0; i < 2; ++i)
2685 {
2686 const bool persistentlyMapped = i == 0;
2687 bufAllocCreateInfo.flags = persistentlyMapped ? VMA_ALLOCATION_CREATE_MAPPED_BIT : 0;
2688 VkBuffer buf;
2689 VmaAllocation alloc;
2690 VmaAllocationInfo allocInfo;
2691 res = vmaCreateBuffer(g_hAllocator, &bufInfo, &bufAllocCreateInfo, &buf, &alloc, &allocInfo);
2692 assert(res == VK_SUCCESS);
2693
2694 void* pMappedData;
2695 if(!persistentlyMapped)
2696 {
2697 res = vmaMapMemory(g_hAllocator, alloc, &pMappedData);
2698 assert(res == VK_SUCCESS);
2699 }
2700 else
2701 {
2702 pMappedData = allocInfo.pMappedData;
2703 }
2704
2705 // Validate initialized content
2706 bool valid = ValidatePattern(pMappedData, BUF_SIZE, 0xDC);
2707 assert(valid);
2708
2709 if(!persistentlyMapped)
2710 {
2711 vmaUnmapMemory(g_hAllocator, alloc);
2712 }
2713
2714 vmaDestroyBuffer(g_hAllocator, buf, alloc);
2715
2716 // Validate freed content
2717 valid = ValidatePattern(pMappedData, BUF_SIZE, 0xEF);
2718 assert(valid);
2719 }
2720
2721 vmaDestroyBuffer(g_hAllocator, firstBuf, firstAlloc);
2722 vmaDestroyPool(g_hAllocator, bufAllocCreateInfo.pool);
2723}
2724
Adam Sawickib8333fb2018-03-13 16:15:53 +01002725static void TestPool_Benchmark(
2726 PoolTestResult& outResult,
2727 const PoolTestConfig& config)
2728{
2729 assert(config.ThreadCount > 0);
2730
2731 RandomNumberGenerator mainRand{config.RandSeed};
2732
2733 uint32_t allocationSizeProbabilitySum = std::accumulate(
2734 config.AllocationSizes.begin(),
2735 config.AllocationSizes.end(),
2736 0u,
2737 [](uint32_t sum, const AllocationSize& allocSize) {
2738 return sum + allocSize.Probability;
2739 });
2740
2741 VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
2742 bufferInfo.size = 256; // Whatever.
2743 bufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
2744
2745 VkImageCreateInfo imageInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
2746 imageInfo.imageType = VK_IMAGE_TYPE_2D;
2747 imageInfo.extent.width = 256; // Whatever.
2748 imageInfo.extent.height = 256; // Whatever.
2749 imageInfo.extent.depth = 1;
2750 imageInfo.mipLevels = 1;
2751 imageInfo.arrayLayers = 1;
2752 imageInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
2753 imageInfo.tiling = VK_IMAGE_TILING_OPTIMAL; // LINEAR if CPU memory.
2754 imageInfo.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED;
2755 imageInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; // TRANSFER_SRC if CPU memory.
2756 imageInfo.samples = VK_SAMPLE_COUNT_1_BIT;
2757
2758 uint32_t bufferMemoryTypeBits = UINT32_MAX;
2759 {
2760 VkBuffer dummyBuffer;
2761 VkResult res = vkCreateBuffer(g_hDevice, &bufferInfo, nullptr, &dummyBuffer);
2762 assert(res == VK_SUCCESS);
2763
2764 VkMemoryRequirements memReq;
2765 vkGetBufferMemoryRequirements(g_hDevice, dummyBuffer, &memReq);
2766 bufferMemoryTypeBits = memReq.memoryTypeBits;
2767
2768 vkDestroyBuffer(g_hDevice, dummyBuffer, nullptr);
2769 }
2770
2771 uint32_t imageMemoryTypeBits = UINT32_MAX;
2772 {
2773 VkImage dummyImage;
2774 VkResult res = vkCreateImage(g_hDevice, &imageInfo, nullptr, &dummyImage);
2775 assert(res == VK_SUCCESS);
2776
2777 VkMemoryRequirements memReq;
2778 vkGetImageMemoryRequirements(g_hDevice, dummyImage, &memReq);
2779 imageMemoryTypeBits = memReq.memoryTypeBits;
2780
2781 vkDestroyImage(g_hDevice, dummyImage, nullptr);
2782 }
2783
2784 uint32_t memoryTypeBits = 0;
2785 if(config.UsesBuffers() && config.UsesImages())
2786 {
2787 memoryTypeBits = bufferMemoryTypeBits & imageMemoryTypeBits;
2788 if(memoryTypeBits == 0)
2789 {
2790 PrintWarning(L"Cannot test buffers + images in the same memory pool on this GPU.");
2791 return;
2792 }
2793 }
2794 else if(config.UsesBuffers())
2795 memoryTypeBits = bufferMemoryTypeBits;
2796 else if(config.UsesImages())
2797 memoryTypeBits = imageMemoryTypeBits;
2798 else
2799 assert(0);
2800
2801 VmaPoolCreateInfo poolCreateInfo = {};
2802 poolCreateInfo.memoryTypeIndex = 0;
2803 poolCreateInfo.minBlockCount = 1;
2804 poolCreateInfo.maxBlockCount = 1;
2805 poolCreateInfo.blockSize = config.PoolSize;
2806 poolCreateInfo.frameInUseCount = 1;
2807
2808 VmaAllocationCreateInfo dummyAllocCreateInfo = {};
2809 dummyAllocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
2810 vmaFindMemoryTypeIndex(g_hAllocator, memoryTypeBits, &dummyAllocCreateInfo, &poolCreateInfo.memoryTypeIndex);
2811
2812 VmaPool pool;
2813 VkResult res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool);
2814 assert(res == VK_SUCCESS);
2815
2816 // Start time measurement - after creating pool and initializing data structures.
2817 time_point timeBeg = std::chrono::high_resolution_clock::now();
2818
2819 ////////////////////////////////////////////////////////////////////////////////
2820 // ThreadProc
2821 auto ThreadProc = [&](
2822 PoolTestThreadResult* outThreadResult,
2823 uint32_t randSeed,
2824 HANDLE frameStartEvent,
2825 HANDLE frameEndEvent) -> void
2826 {
2827 RandomNumberGenerator threadRand{randSeed};
2828
2829 outThreadResult->AllocationTimeMin = duration::max();
2830 outThreadResult->AllocationTimeSum = duration::zero();
2831 outThreadResult->AllocationTimeMax = duration::min();
2832 outThreadResult->DeallocationTimeMin = duration::max();
2833 outThreadResult->DeallocationTimeSum = duration::zero();
2834 outThreadResult->DeallocationTimeMax = duration::min();
2835 outThreadResult->AllocationCount = 0;
2836 outThreadResult->DeallocationCount = 0;
2837 outThreadResult->LostAllocationCount = 0;
2838 outThreadResult->LostAllocationTotalSize = 0;
2839 outThreadResult->FailedAllocationCount = 0;
2840 outThreadResult->FailedAllocationTotalSize = 0;
2841
2842 struct Item
2843 {
2844 VkDeviceSize BufferSize;
2845 VkExtent2D ImageSize;
2846 VkBuffer Buf;
2847 VkImage Image;
2848 VmaAllocation Alloc;
2849
2850 VkDeviceSize CalcSizeBytes() const
2851 {
2852 return BufferSize +
2853 ImageSize.width * ImageSize.height * 4;
2854 }
2855 };
2856 std::vector<Item> unusedItems, usedItems;
2857
2858 const size_t threadTotalItemCount = config.TotalItemCount / config.ThreadCount;
2859
2860 // Create all items - all unused, not yet allocated.
2861 for(size_t i = 0; i < threadTotalItemCount; ++i)
2862 {
2863 Item item = {};
2864
2865 uint32_t allocSizeIndex = 0;
2866 uint32_t r = threadRand.Generate() % allocationSizeProbabilitySum;
2867 while(r >= config.AllocationSizes[allocSizeIndex].Probability)
2868 r -= config.AllocationSizes[allocSizeIndex++].Probability;
2869
2870 const AllocationSize& allocSize = config.AllocationSizes[allocSizeIndex];
2871 if(allocSize.BufferSizeMax > 0)
2872 {
2873 assert(allocSize.BufferSizeMin > 0);
2874 assert(allocSize.ImageSizeMin == 0 && allocSize.ImageSizeMax == 0);
2875 if(allocSize.BufferSizeMax == allocSize.BufferSizeMin)
2876 item.BufferSize = allocSize.BufferSizeMin;
2877 else
2878 {
2879 item.BufferSize = allocSize.BufferSizeMin + threadRand.Generate() % (allocSize.BufferSizeMax - allocSize.BufferSizeMin);
2880 item.BufferSize = item.BufferSize / 16 * 16;
2881 }
2882 }
2883 else
2884 {
2885 assert(allocSize.ImageSizeMin > 0 && allocSize.ImageSizeMax > 0);
2886 if(allocSize.ImageSizeMax == allocSize.ImageSizeMin)
2887 item.ImageSize.width = item.ImageSize.height = allocSize.ImageSizeMax;
2888 else
2889 {
2890 item.ImageSize.width = allocSize.ImageSizeMin + threadRand.Generate() % (allocSize.ImageSizeMax - allocSize.ImageSizeMin);
2891 item.ImageSize.height = allocSize.ImageSizeMin + threadRand.Generate() % (allocSize.ImageSizeMax - allocSize.ImageSizeMin);
2892 }
2893 }
2894
2895 unusedItems.push_back(item);
2896 }
2897
2898 auto Allocate = [&](Item& item) -> VkResult
2899 {
2900 VmaAllocationCreateInfo allocCreateInfo = {};
2901 allocCreateInfo.pool = pool;
2902 allocCreateInfo.flags = VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT |
2903 VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT;
2904
2905 if(item.BufferSize)
2906 {
2907 bufferInfo.size = item.BufferSize;
2908 PoolAllocationTimeRegisterObj timeRegisterObj(*outThreadResult);
2909 return vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocCreateInfo, &item.Buf, &item.Alloc, nullptr);
2910 }
2911 else
2912 {
2913 assert(item.ImageSize.width && item.ImageSize.height);
2914
2915 imageInfo.extent.width = item.ImageSize.width;
2916 imageInfo.extent.height = item.ImageSize.height;
2917 PoolAllocationTimeRegisterObj timeRegisterObj(*outThreadResult);
2918 return vmaCreateImage(g_hAllocator, &imageInfo, &allocCreateInfo, &item.Image, &item.Alloc, nullptr);
2919 }
2920 };
2921
2922 ////////////////////////////////////////////////////////////////////////////////
2923 // Frames
2924 for(uint32_t frameIndex = 0; frameIndex < config.FrameCount; ++frameIndex)
2925 {
2926 WaitForSingleObject(frameStartEvent, INFINITE);
2927
2928 // Always make some percent of used bufs unused, to choose different used ones.
2929 const size_t bufsToMakeUnused = usedItems.size() * config.ItemsToMakeUnusedPercent / 100;
2930 for(size_t i = 0; i < bufsToMakeUnused; ++i)
2931 {
2932 size_t index = threadRand.Generate() % usedItems.size();
2933 unusedItems.push_back(usedItems[index]);
2934 usedItems.erase(usedItems.begin() + index);
2935 }
2936
2937 // Determine which bufs we want to use in this frame.
2938 const size_t usedBufCount = (threadRand.Generate() % (config.UsedItemCountMax - config.UsedItemCountMin) + config.UsedItemCountMin)
2939 / config.ThreadCount;
2940 assert(usedBufCount < usedItems.size() + unusedItems.size());
2941 // Move some used to unused.
2942 while(usedBufCount < usedItems.size())
2943 {
2944 size_t index = threadRand.Generate() % usedItems.size();
2945 unusedItems.push_back(usedItems[index]);
2946 usedItems.erase(usedItems.begin() + index);
2947 }
2948 // Move some unused to used.
2949 while(usedBufCount > usedItems.size())
2950 {
2951 size_t index = threadRand.Generate() % unusedItems.size();
2952 usedItems.push_back(unusedItems[index]);
2953 unusedItems.erase(unusedItems.begin() + index);
2954 }
2955
2956 uint32_t touchExistingCount = 0;
2957 uint32_t touchLostCount = 0;
2958 uint32_t createSucceededCount = 0;
2959 uint32_t createFailedCount = 0;
2960
2961 // Touch all used bufs. If not created or lost, allocate.
2962 for(size_t i = 0; i < usedItems.size(); ++i)
2963 {
2964 Item& item = usedItems[i];
2965 // Not yet created.
2966 if(item.Alloc == VK_NULL_HANDLE)
2967 {
2968 res = Allocate(item);
2969 ++outThreadResult->AllocationCount;
2970 if(res != VK_SUCCESS)
2971 {
2972 item.Alloc = VK_NULL_HANDLE;
2973 item.Buf = VK_NULL_HANDLE;
2974 ++outThreadResult->FailedAllocationCount;
2975 outThreadResult->FailedAllocationTotalSize += item.CalcSizeBytes();
2976 ++createFailedCount;
2977 }
2978 else
2979 ++createSucceededCount;
2980 }
2981 else
2982 {
2983 // Touch.
2984 VmaAllocationInfo allocInfo;
2985 vmaGetAllocationInfo(g_hAllocator, item.Alloc, &allocInfo);
2986 // Lost.
2987 if(allocInfo.deviceMemory == VK_NULL_HANDLE)
2988 {
2989 ++touchLostCount;
2990
2991 // Destroy.
2992 {
2993 PoolDeallocationTimeRegisterObj timeRegisterObj(*outThreadResult);
2994 if(item.Buf)
2995 vmaDestroyBuffer(g_hAllocator, item.Buf, item.Alloc);
2996 else
2997 vmaDestroyImage(g_hAllocator, item.Image, item.Alloc);
2998 ++outThreadResult->DeallocationCount;
2999 }
3000 item.Alloc = VK_NULL_HANDLE;
3001 item.Buf = VK_NULL_HANDLE;
3002
3003 ++outThreadResult->LostAllocationCount;
3004 outThreadResult->LostAllocationTotalSize += item.CalcSizeBytes();
3005
3006 // Recreate.
3007 res = Allocate(item);
3008 ++outThreadResult->AllocationCount;
3009 // Creation failed.
3010 if(res != VK_SUCCESS)
3011 {
3012 ++outThreadResult->FailedAllocationCount;
3013 outThreadResult->FailedAllocationTotalSize += item.CalcSizeBytes();
3014 ++createFailedCount;
3015 }
3016 else
3017 ++createSucceededCount;
3018 }
3019 else
3020 ++touchExistingCount;
3021 }
3022 }
3023
3024 /*
3025 printf("Thread %u frame %u: Touch existing %u lost %u, create succeeded %u failed %u\n",
3026 randSeed, frameIndex,
3027 touchExistingCount, touchLostCount,
3028 createSucceededCount, createFailedCount);
3029 */
3030
3031 SetEvent(frameEndEvent);
3032 }
3033
3034 // Free all remaining items.
3035 for(size_t i = usedItems.size(); i--; )
3036 {
3037 PoolDeallocationTimeRegisterObj timeRegisterObj(*outThreadResult);
3038 if(usedItems[i].Buf)
3039 vmaDestroyBuffer(g_hAllocator, usedItems[i].Buf, usedItems[i].Alloc);
3040 else
3041 vmaDestroyImage(g_hAllocator, usedItems[i].Image, usedItems[i].Alloc);
3042 ++outThreadResult->DeallocationCount;
3043 }
3044 for(size_t i = unusedItems.size(); i--; )
3045 {
3046 PoolDeallocationTimeRegisterObj timeRegisterOb(*outThreadResult);
3047 if(unusedItems[i].Buf)
3048 vmaDestroyBuffer(g_hAllocator, unusedItems[i].Buf, unusedItems[i].Alloc);
3049 else
3050 vmaDestroyImage(g_hAllocator, unusedItems[i].Image, unusedItems[i].Alloc);
3051 ++outThreadResult->DeallocationCount;
3052 }
3053 };
3054
3055 // Launch threads.
3056 uint32_t threadRandSeed = mainRand.Generate();
3057 std::vector<HANDLE> frameStartEvents{config.ThreadCount};
3058 std::vector<HANDLE> frameEndEvents{config.ThreadCount};
3059 std::vector<std::thread> bkgThreads;
3060 std::vector<PoolTestThreadResult> threadResults{config.ThreadCount};
3061 for(uint32_t threadIndex = 0; threadIndex < config.ThreadCount; ++threadIndex)
3062 {
3063 frameStartEvents[threadIndex] = CreateEvent(NULL, FALSE, FALSE, NULL);
3064 frameEndEvents[threadIndex] = CreateEvent(NULL, FALSE, FALSE, NULL);
3065 bkgThreads.emplace_back(std::bind(
3066 ThreadProc,
3067 &threadResults[threadIndex],
3068 threadRandSeed + threadIndex,
3069 frameStartEvents[threadIndex],
3070 frameEndEvents[threadIndex]));
3071 }
3072
3073 // Execute frames.
3074 assert(config.ThreadCount <= MAXIMUM_WAIT_OBJECTS);
3075 for(uint32_t frameIndex = 0; frameIndex < config.FrameCount; ++frameIndex)
3076 {
3077 vmaSetCurrentFrameIndex(g_hAllocator, frameIndex);
3078 for(size_t threadIndex = 0; threadIndex < config.ThreadCount; ++threadIndex)
3079 SetEvent(frameStartEvents[threadIndex]);
3080 WaitForMultipleObjects(config.ThreadCount, &frameEndEvents[0], TRUE, INFINITE);
3081 }
3082
3083 // Wait for threads finished
3084 for(size_t i = 0; i < bkgThreads.size(); ++i)
3085 {
3086 bkgThreads[i].join();
3087 CloseHandle(frameEndEvents[i]);
3088 CloseHandle(frameStartEvents[i]);
3089 }
3090 bkgThreads.clear();
3091
3092 // Finish time measurement - before destroying pool.
3093 outResult.TotalTime = std::chrono::high_resolution_clock::now() - timeBeg;
3094
3095 vmaDestroyPool(g_hAllocator, pool);
3096
3097 outResult.AllocationTimeMin = duration::max();
3098 outResult.AllocationTimeAvg = duration::zero();
3099 outResult.AllocationTimeMax = duration::min();
3100 outResult.DeallocationTimeMin = duration::max();
3101 outResult.DeallocationTimeAvg = duration::zero();
3102 outResult.DeallocationTimeMax = duration::min();
3103 outResult.LostAllocationCount = 0;
3104 outResult.LostAllocationTotalSize = 0;
3105 outResult.FailedAllocationCount = 0;
3106 outResult.FailedAllocationTotalSize = 0;
3107 size_t allocationCount = 0;
3108 size_t deallocationCount = 0;
3109 for(size_t threadIndex = 0; threadIndex < config.ThreadCount; ++threadIndex)
3110 {
3111 const PoolTestThreadResult& threadResult = threadResults[threadIndex];
3112 outResult.AllocationTimeMin = std::min(outResult.AllocationTimeMin, threadResult.AllocationTimeMin);
3113 outResult.AllocationTimeMax = std::max(outResult.AllocationTimeMax, threadResult.AllocationTimeMax);
3114 outResult.AllocationTimeAvg += threadResult.AllocationTimeSum;
3115 outResult.DeallocationTimeMin = std::min(outResult.DeallocationTimeMin, threadResult.DeallocationTimeMin);
3116 outResult.DeallocationTimeMax = std::max(outResult.DeallocationTimeMax, threadResult.DeallocationTimeMax);
3117 outResult.DeallocationTimeAvg += threadResult.DeallocationTimeSum;
3118 allocationCount += threadResult.AllocationCount;
3119 deallocationCount += threadResult.DeallocationCount;
3120 outResult.FailedAllocationCount += threadResult.FailedAllocationCount;
3121 outResult.FailedAllocationTotalSize += threadResult.FailedAllocationTotalSize;
3122 outResult.LostAllocationCount += threadResult.LostAllocationCount;
3123 outResult.LostAllocationTotalSize += threadResult.LostAllocationTotalSize;
3124 }
3125 if(allocationCount)
3126 outResult.AllocationTimeAvg /= allocationCount;
3127 if(deallocationCount)
3128 outResult.DeallocationTimeAvg /= deallocationCount;
3129}
3130
3131static inline bool MemoryRegionsOverlap(char* ptr1, size_t size1, char* ptr2, size_t size2)
3132{
3133 if(ptr1 < ptr2)
3134 return ptr1 + size1 > ptr2;
3135 else if(ptr2 < ptr1)
3136 return ptr2 + size2 > ptr1;
3137 else
3138 return true;
3139}
3140
3141static void TestMapping()
3142{
3143 wprintf(L"Testing mapping...\n");
3144
3145 VkResult res;
3146 uint32_t memTypeIndex = UINT32_MAX;
3147
3148 enum TEST
3149 {
3150 TEST_NORMAL,
3151 TEST_POOL,
3152 TEST_DEDICATED,
3153 TEST_COUNT
3154 };
3155 for(uint32_t testIndex = 0; testIndex < TEST_COUNT; ++testIndex)
3156 {
3157 VmaPool pool = nullptr;
3158 if(testIndex == TEST_POOL)
3159 {
3160 assert(memTypeIndex != UINT32_MAX);
3161 VmaPoolCreateInfo poolInfo = {};
3162 poolInfo.memoryTypeIndex = memTypeIndex;
3163 res = vmaCreatePool(g_hAllocator, &poolInfo, &pool);
3164 assert(res == VK_SUCCESS);
3165 }
3166
3167 VkBufferCreateInfo bufInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
3168 bufInfo.size = 0x10000;
3169 bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
3170
3171 VmaAllocationCreateInfo allocCreateInfo = {};
3172 allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
3173 allocCreateInfo.pool = pool;
3174 if(testIndex == TEST_DEDICATED)
3175 allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
3176
3177 VmaAllocationInfo allocInfo;
3178
3179 // Mapped manually
3180
3181 // Create 2 buffers.
3182 BufferInfo bufferInfos[3];
3183 for(size_t i = 0; i < 2; ++i)
3184 {
3185 res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo,
3186 &bufferInfos[i].Buffer, &bufferInfos[i].Allocation, &allocInfo);
3187 assert(res == VK_SUCCESS);
3188 assert(allocInfo.pMappedData == nullptr);
3189 memTypeIndex = allocInfo.memoryType;
3190 }
3191
3192 // Map buffer 0.
3193 char* data00 = nullptr;
3194 res = vmaMapMemory(g_hAllocator, bufferInfos[0].Allocation, (void**)&data00);
3195 assert(res == VK_SUCCESS && data00 != nullptr);
3196 data00[0xFFFF] = data00[0];
3197
3198 // Map buffer 0 second time.
3199 char* data01 = nullptr;
3200 res = vmaMapMemory(g_hAllocator, bufferInfos[0].Allocation, (void**)&data01);
3201 assert(res == VK_SUCCESS && data01 == data00);
3202
3203 // Map buffer 1.
3204 char* data1 = nullptr;
3205 res = vmaMapMemory(g_hAllocator, bufferInfos[1].Allocation, (void**)&data1);
3206 assert(res == VK_SUCCESS && data1 != nullptr);
3207 assert(!MemoryRegionsOverlap(data00, (size_t)bufInfo.size, data1, (size_t)bufInfo.size));
3208 data1[0xFFFF] = data1[0];
3209
3210 // Unmap buffer 0 two times.
3211 vmaUnmapMemory(g_hAllocator, bufferInfos[0].Allocation);
3212 vmaUnmapMemory(g_hAllocator, bufferInfos[0].Allocation);
3213 vmaGetAllocationInfo(g_hAllocator, bufferInfos[0].Allocation, &allocInfo);
3214 assert(allocInfo.pMappedData == nullptr);
3215
3216 // Unmap buffer 1.
3217 vmaUnmapMemory(g_hAllocator, bufferInfos[1].Allocation);
3218 vmaGetAllocationInfo(g_hAllocator, bufferInfos[1].Allocation, &allocInfo);
3219 assert(allocInfo.pMappedData == nullptr);
3220
3221 // Create 3rd buffer - persistently mapped.
3222 allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT;
3223 res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo,
3224 &bufferInfos[2].Buffer, &bufferInfos[2].Allocation, &allocInfo);
3225 assert(res == VK_SUCCESS && allocInfo.pMappedData != nullptr);
3226
3227 // Map buffer 2.
3228 char* data2 = nullptr;
3229 res = vmaMapMemory(g_hAllocator, bufferInfos[2].Allocation, (void**)&data2);
3230 assert(res == VK_SUCCESS && data2 == allocInfo.pMappedData);
3231 data2[0xFFFF] = data2[0];
3232
3233 // Unmap buffer 2.
3234 vmaUnmapMemory(g_hAllocator, bufferInfos[2].Allocation);
3235 vmaGetAllocationInfo(g_hAllocator, bufferInfos[2].Allocation, &allocInfo);
3236 assert(allocInfo.pMappedData == data2);
3237
3238 // Destroy all buffers.
3239 for(size_t i = 3; i--; )
3240 vmaDestroyBuffer(g_hAllocator, bufferInfos[i].Buffer, bufferInfos[i].Allocation);
3241
3242 vmaDestroyPool(g_hAllocator, pool);
3243 }
3244}
3245
3246static void TestMappingMultithreaded()
3247{
3248 wprintf(L"Testing mapping multithreaded...\n");
3249
3250 static const uint32_t threadCount = 16;
3251 static const uint32_t bufferCount = 1024;
3252 static const uint32_t threadBufferCount = bufferCount / threadCount;
3253
3254 VkResult res;
3255 volatile uint32_t memTypeIndex = UINT32_MAX;
3256
3257 enum TEST
3258 {
3259 TEST_NORMAL,
3260 TEST_POOL,
3261 TEST_DEDICATED,
3262 TEST_COUNT
3263 };
3264 for(uint32_t testIndex = 0; testIndex < TEST_COUNT; ++testIndex)
3265 {
3266 VmaPool pool = nullptr;
3267 if(testIndex == TEST_POOL)
3268 {
3269 assert(memTypeIndex != UINT32_MAX);
3270 VmaPoolCreateInfo poolInfo = {};
3271 poolInfo.memoryTypeIndex = memTypeIndex;
3272 res = vmaCreatePool(g_hAllocator, &poolInfo, &pool);
3273 assert(res == VK_SUCCESS);
3274 }
3275
3276 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
3277 bufCreateInfo.size = 0x10000;
3278 bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
3279
3280 VmaAllocationCreateInfo allocCreateInfo = {};
3281 allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
3282 allocCreateInfo.pool = pool;
3283 if(testIndex == TEST_DEDICATED)
3284 allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
3285
3286 std::thread threads[threadCount];
3287 for(uint32_t threadIndex = 0; threadIndex < threadCount; ++threadIndex)
3288 {
3289 threads[threadIndex] = std::thread([=, &memTypeIndex](){
3290 // ======== THREAD FUNCTION ========
3291
3292 RandomNumberGenerator rand{threadIndex};
3293
3294 enum class MODE
3295 {
3296 // Don't map this buffer at all.
3297 DONT_MAP,
3298 // Map and quickly unmap.
3299 MAP_FOR_MOMENT,
3300 // Map and unmap before destruction.
3301 MAP_FOR_LONGER,
3302 // Map two times. Quickly unmap, second unmap before destruction.
3303 MAP_TWO_TIMES,
3304 // Create this buffer as persistently mapped.
3305 PERSISTENTLY_MAPPED,
3306 COUNT
3307 };
3308 std::vector<BufferInfo> bufInfos{threadBufferCount};
3309 std::vector<MODE> bufModes{threadBufferCount};
3310
3311 for(uint32_t bufferIndex = 0; bufferIndex < threadBufferCount; ++bufferIndex)
3312 {
3313 BufferInfo& bufInfo = bufInfos[bufferIndex];
3314 const MODE mode = (MODE)(rand.Generate() % (uint32_t)MODE::COUNT);
3315 bufModes[bufferIndex] = mode;
3316
3317 VmaAllocationCreateInfo localAllocCreateInfo = allocCreateInfo;
3318 if(mode == MODE::PERSISTENTLY_MAPPED)
3319 localAllocCreateInfo.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT;
3320
3321 VmaAllocationInfo allocInfo;
3322 VkResult res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &localAllocCreateInfo,
3323 &bufInfo.Buffer, &bufInfo.Allocation, &allocInfo);
3324 assert(res == VK_SUCCESS);
3325
3326 if(memTypeIndex == UINT32_MAX)
3327 memTypeIndex = allocInfo.memoryType;
3328
3329 char* data = nullptr;
3330
3331 if(mode == MODE::PERSISTENTLY_MAPPED)
3332 {
3333 data = (char*)allocInfo.pMappedData;
3334 assert(data != nullptr);
3335 }
3336 else if(mode == MODE::MAP_FOR_MOMENT || mode == MODE::MAP_FOR_LONGER ||
3337 mode == MODE::MAP_TWO_TIMES)
3338 {
3339 assert(data == nullptr);
3340 res = vmaMapMemory(g_hAllocator, bufInfo.Allocation, (void**)&data);
3341 assert(res == VK_SUCCESS && data != nullptr);
3342
3343 if(mode == MODE::MAP_TWO_TIMES)
3344 {
3345 char* data2 = nullptr;
3346 res = vmaMapMemory(g_hAllocator, bufInfo.Allocation, (void**)&data2);
3347 assert(res == VK_SUCCESS && data2 == data);
3348 }
3349 }
3350 else if(mode == MODE::DONT_MAP)
3351 {
3352 assert(allocInfo.pMappedData == nullptr);
3353 }
3354 else
3355 assert(0);
3356
3357 // Test if reading and writing from the beginning and end of mapped memory doesn't crash.
3358 if(data)
3359 data[0xFFFF] = data[0];
3360
3361 if(mode == MODE::MAP_FOR_MOMENT || mode == MODE::MAP_TWO_TIMES)
3362 {
3363 vmaUnmapMemory(g_hAllocator, bufInfo.Allocation);
3364
3365 VmaAllocationInfo allocInfo;
3366 vmaGetAllocationInfo(g_hAllocator, bufInfo.Allocation, &allocInfo);
3367 if(mode == MODE::MAP_FOR_MOMENT)
3368 assert(allocInfo.pMappedData == nullptr);
3369 else
3370 assert(allocInfo.pMappedData == data);
3371 }
3372
3373 switch(rand.Generate() % 3)
3374 {
3375 case 0: Sleep(0); break; // Yield.
3376 case 1: Sleep(10); break; // 10 ms
3377 // default: No sleep.
3378 }
3379
3380 // Test if reading and writing from the beginning and end of mapped memory doesn't crash.
3381 if(data)
3382 data[0xFFFF] = data[0];
3383 }
3384
3385 for(size_t bufferIndex = threadBufferCount; bufferIndex--; )
3386 {
3387 if(bufModes[bufferIndex] == MODE::MAP_FOR_LONGER ||
3388 bufModes[bufferIndex] == MODE::MAP_TWO_TIMES)
3389 {
3390 vmaUnmapMemory(g_hAllocator, bufInfos[bufferIndex].Allocation);
3391
3392 VmaAllocationInfo allocInfo;
3393 vmaGetAllocationInfo(g_hAllocator, bufInfos[bufferIndex].Allocation, &allocInfo);
3394 assert(allocInfo.pMappedData == nullptr);
3395 }
3396
3397 vmaDestroyBuffer(g_hAllocator, bufInfos[bufferIndex].Buffer, bufInfos[bufferIndex].Allocation);
3398 }
3399 });
3400 }
3401
3402 for(uint32_t threadIndex = 0; threadIndex < threadCount; ++threadIndex)
3403 threads[threadIndex].join();
3404
3405 vmaDestroyPool(g_hAllocator, pool);
3406 }
3407}
3408
3409static void WriteMainTestResultHeader(FILE* file)
3410{
3411 fprintf(file,
Adam Sawicki740b08f2018-08-27 13:42:07 +02003412 "Code,Time,"
3413 "Threads,Buffers and images,Sizes,Operations,Allocation strategy,Free order,"
Adam Sawickib8333fb2018-03-13 16:15:53 +01003414 "Total Time (us),"
3415 "Allocation Time Min (us),"
3416 "Allocation Time Avg (us),"
3417 "Allocation Time Max (us),"
3418 "Deallocation Time Min (us),"
3419 "Deallocation Time Avg (us),"
3420 "Deallocation Time Max (us),"
3421 "Total Memory Allocated (B),"
3422 "Free Range Size Avg (B),"
3423 "Free Range Size Max (B)\n");
3424}
3425
3426static void WriteMainTestResult(
3427 FILE* file,
3428 const char* codeDescription,
3429 const char* testDescription,
3430 const Config& config, const Result& result)
3431{
3432 float totalTimeSeconds = ToFloatSeconds(result.TotalTime);
3433 float allocationTimeMinSeconds = ToFloatSeconds(result.AllocationTimeMin);
3434 float allocationTimeAvgSeconds = ToFloatSeconds(result.AllocationTimeAvg);
3435 float allocationTimeMaxSeconds = ToFloatSeconds(result.AllocationTimeMax);
3436 float deallocationTimeMinSeconds = ToFloatSeconds(result.DeallocationTimeMin);
3437 float deallocationTimeAvgSeconds = ToFloatSeconds(result.DeallocationTimeAvg);
3438 float deallocationTimeMaxSeconds = ToFloatSeconds(result.DeallocationTimeMax);
3439
Adam Sawicki33d2ce72018-08-27 13:59:13 +02003440 std::string currTime;
3441 CurrentTimeToStr(currTime);
Adam Sawickib8333fb2018-03-13 16:15:53 +01003442
3443 fprintf(file,
3444 "%s,%s,%s,"
Adam Sawickib8333fb2018-03-13 16:15:53 +01003445 "%.2f,%.2f,%.2f,%.2f,%.2f,%.2f,%.2f,%I64u,%I64u,%I64u\n",
3446 codeDescription,
Adam Sawicki33d2ce72018-08-27 13:59:13 +02003447 currTime.c_str(),
Adam Sawicki740b08f2018-08-27 13:42:07 +02003448 testDescription,
Adam Sawickib8333fb2018-03-13 16:15:53 +01003449 totalTimeSeconds * 1e6f,
3450 allocationTimeMinSeconds * 1e6f,
3451 allocationTimeAvgSeconds * 1e6f,
3452 allocationTimeMaxSeconds * 1e6f,
3453 deallocationTimeMinSeconds * 1e6f,
3454 deallocationTimeAvgSeconds * 1e6f,
3455 deallocationTimeMaxSeconds * 1e6f,
3456 result.TotalMemoryAllocated,
3457 result.FreeRangeSizeAvg,
3458 result.FreeRangeSizeMax);
3459}
3460
3461static void WritePoolTestResultHeader(FILE* file)
3462{
3463 fprintf(file,
3464 "Code,Test,Time,"
3465 "Config,"
3466 "Total Time (us),"
3467 "Allocation Time Min (us),"
3468 "Allocation Time Avg (us),"
3469 "Allocation Time Max (us),"
3470 "Deallocation Time Min (us),"
3471 "Deallocation Time Avg (us),"
3472 "Deallocation Time Max (us),"
3473 "Lost Allocation Count,"
3474 "Lost Allocation Total Size (B),"
3475 "Failed Allocation Count,"
3476 "Failed Allocation Total Size (B)\n");
3477}
3478
3479static void WritePoolTestResult(
3480 FILE* file,
3481 const char* codeDescription,
3482 const char* testDescription,
3483 const PoolTestConfig& config,
3484 const PoolTestResult& result)
3485{
3486 float totalTimeSeconds = ToFloatSeconds(result.TotalTime);
3487 float allocationTimeMinSeconds = ToFloatSeconds(result.AllocationTimeMin);
3488 float allocationTimeAvgSeconds = ToFloatSeconds(result.AllocationTimeAvg);
3489 float allocationTimeMaxSeconds = ToFloatSeconds(result.AllocationTimeMax);
3490 float deallocationTimeMinSeconds = ToFloatSeconds(result.DeallocationTimeMin);
3491 float deallocationTimeAvgSeconds = ToFloatSeconds(result.DeallocationTimeAvg);
3492 float deallocationTimeMaxSeconds = ToFloatSeconds(result.DeallocationTimeMax);
3493
Adam Sawicki33d2ce72018-08-27 13:59:13 +02003494 std::string currTime;
3495 CurrentTimeToStr(currTime);
Adam Sawickib8333fb2018-03-13 16:15:53 +01003496
3497 fprintf(file,
3498 "%s,%s,%s,"
3499 "ThreadCount=%u PoolSize=%llu FrameCount=%u TotalItemCount=%u UsedItemCount=%u...%u ItemsToMakeUnusedPercent=%u,"
3500 "%.2f,%.2f,%.2f,%.2f,%.2f,%.2f,%.2f,%I64u,%I64u,%I64u,%I64u\n",
3501 // General
3502 codeDescription,
3503 testDescription,
Adam Sawicki33d2ce72018-08-27 13:59:13 +02003504 currTime.c_str(),
Adam Sawickib8333fb2018-03-13 16:15:53 +01003505 // Config
3506 config.ThreadCount,
3507 (unsigned long long)config.PoolSize,
3508 config.FrameCount,
3509 config.TotalItemCount,
3510 config.UsedItemCountMin,
3511 config.UsedItemCountMax,
3512 config.ItemsToMakeUnusedPercent,
3513 // Results
3514 totalTimeSeconds * 1e6f,
3515 allocationTimeMinSeconds * 1e6f,
3516 allocationTimeAvgSeconds * 1e6f,
3517 allocationTimeMaxSeconds * 1e6f,
3518 deallocationTimeMinSeconds * 1e6f,
3519 deallocationTimeAvgSeconds * 1e6f,
3520 deallocationTimeMaxSeconds * 1e6f,
3521 result.LostAllocationCount,
3522 result.LostAllocationTotalSize,
3523 result.FailedAllocationCount,
3524 result.FailedAllocationTotalSize);
3525}
3526
3527static void PerformCustomMainTest(FILE* file)
3528{
3529 Config config{};
3530 config.RandSeed = 65735476;
3531 //config.MaxBytesToAllocate = 4ull * 1024 * 1024; // 4 MB
3532 config.MaxBytesToAllocate = 4ull * 1024 * 1024 * 1024; // 4 GB
3533 config.MemUsageProbability[0] = 1; // VMA_MEMORY_USAGE_GPU_ONLY
3534 config.FreeOrder = FREE_ORDER::FORWARD;
3535 config.ThreadCount = 16;
3536 config.ThreadsUsingCommonAllocationsProbabilityPercent = 50;
Adam Sawicki0667e332018-08-24 17:26:44 +02003537 config.AllocationStrategy = 0;
Adam Sawickib8333fb2018-03-13 16:15:53 +01003538
3539 // Buffers
3540 //config.AllocationSizes.push_back({4, 16, 1024});
3541 config.AllocationSizes.push_back({4, 0x10000, 0xA00000}); // 64 KB ... 10 MB
3542
3543 // Images
3544 //config.AllocationSizes.push_back({4, 0, 0, 4, 32});
3545 //config.AllocationSizes.push_back({4, 0, 0, 256, 2048});
3546
3547 config.BeginBytesToAllocate = config.MaxBytesToAllocate * 5 / 100;
3548 config.AdditionalOperationCount = 1024;
3549
3550 Result result{};
3551 VkResult res = MainTest(result, config);
3552 assert(res == VK_SUCCESS);
3553 WriteMainTestResult(file, "Foo", "CustomTest", config, result);
3554}
3555
3556static void PerformCustomPoolTest(FILE* file)
3557{
3558 PoolTestConfig config;
3559 config.PoolSize = 100 * 1024 * 1024;
3560 config.RandSeed = 2345764;
3561 config.ThreadCount = 1;
3562 config.FrameCount = 200;
3563 config.ItemsToMakeUnusedPercent = 2;
3564
3565 AllocationSize allocSize = {};
3566 allocSize.BufferSizeMin = 1024;
3567 allocSize.BufferSizeMax = 1024 * 1024;
3568 allocSize.Probability = 1;
3569 config.AllocationSizes.push_back(allocSize);
3570
3571 allocSize.BufferSizeMin = 0;
3572 allocSize.BufferSizeMax = 0;
3573 allocSize.ImageSizeMin = 128;
3574 allocSize.ImageSizeMax = 1024;
3575 allocSize.Probability = 1;
3576 config.AllocationSizes.push_back(allocSize);
3577
3578 config.PoolSize = config.CalcAvgResourceSize() * 200;
3579 config.UsedItemCountMax = 160;
3580 config.TotalItemCount = config.UsedItemCountMax * 10;
3581 config.UsedItemCountMin = config.UsedItemCountMax * 80 / 100;
3582
3583 g_MemoryAliasingWarningEnabled = false;
3584 PoolTestResult result = {};
3585 TestPool_Benchmark(result, config);
3586 g_MemoryAliasingWarningEnabled = true;
3587
3588 WritePoolTestResult(file, "Code desc", "Test desc", config, result);
3589}
3590
Adam Sawickib8333fb2018-03-13 16:15:53 +01003591static void PerformMainTests(FILE* file)
3592{
3593 uint32_t repeatCount = 1;
3594 if(ConfigType >= CONFIG_TYPE_MAXIMUM) repeatCount = 3;
3595
3596 Config config{};
3597 config.RandSeed = 65735476;
3598 config.MemUsageProbability[0] = 1; // VMA_MEMORY_USAGE_GPU_ONLY
3599 config.FreeOrder = FREE_ORDER::FORWARD;
3600
3601 size_t threadCountCount = 1;
3602 switch(ConfigType)
3603 {
3604 case CONFIG_TYPE_MINIMUM: threadCountCount = 1; break;
3605 case CONFIG_TYPE_SMALL: threadCountCount = 2; break;
3606 case CONFIG_TYPE_AVERAGE: threadCountCount = 3; break;
3607 case CONFIG_TYPE_LARGE: threadCountCount = 5; break;
3608 case CONFIG_TYPE_MAXIMUM: threadCountCount = 7; break;
3609 default: assert(0);
3610 }
Adam Sawicki0667e332018-08-24 17:26:44 +02003611
Adam Sawicki0a3fb6c2018-08-27 14:40:27 +02003612 const size_t strategyCount = GetAllocationStrategyCount();
Adam Sawicki0667e332018-08-24 17:26:44 +02003613
Adam Sawickib8333fb2018-03-13 16:15:53 +01003614 for(size_t threadCountIndex = 0; threadCountIndex < threadCountCount; ++threadCountIndex)
3615 {
3616 std::string desc1;
3617
3618 switch(threadCountIndex)
3619 {
3620 case 0:
3621 desc1 += "1_thread";
3622 config.ThreadCount = 1;
3623 config.ThreadsUsingCommonAllocationsProbabilityPercent = 0;
3624 break;
3625 case 1:
3626 desc1 += "16_threads+0%_common";
3627 config.ThreadCount = 16;
3628 config.ThreadsUsingCommonAllocationsProbabilityPercent = 0;
3629 break;
3630 case 2:
3631 desc1 += "16_threads+50%_common";
3632 config.ThreadCount = 16;
3633 config.ThreadsUsingCommonAllocationsProbabilityPercent = 50;
3634 break;
3635 case 3:
3636 desc1 += "16_threads+100%_common";
3637 config.ThreadCount = 16;
3638 config.ThreadsUsingCommonAllocationsProbabilityPercent = 100;
3639 break;
3640 case 4:
3641 desc1 += "2_threads+0%_common";
3642 config.ThreadCount = 2;
3643 config.ThreadsUsingCommonAllocationsProbabilityPercent = 0;
3644 break;
3645 case 5:
3646 desc1 += "2_threads+50%_common";
3647 config.ThreadCount = 2;
3648 config.ThreadsUsingCommonAllocationsProbabilityPercent = 50;
3649 break;
3650 case 6:
3651 desc1 += "2_threads+100%_common";
3652 config.ThreadCount = 2;
3653 config.ThreadsUsingCommonAllocationsProbabilityPercent = 100;
3654 break;
3655 default:
3656 assert(0);
3657 }
3658
3659 // 0 = buffers, 1 = images, 2 = buffers and images
3660 size_t buffersVsImagesCount = 2;
3661 if(ConfigType >= CONFIG_TYPE_LARGE) ++buffersVsImagesCount;
3662 for(size_t buffersVsImagesIndex = 0; buffersVsImagesIndex < buffersVsImagesCount; ++buffersVsImagesIndex)
3663 {
3664 std::string desc2 = desc1;
3665 switch(buffersVsImagesIndex)
3666 {
Adam Sawicki740b08f2018-08-27 13:42:07 +02003667 case 0: desc2 += ",Buffers"; break;
3668 case 1: desc2 += ",Images"; break;
3669 case 2: desc2 += ",Buffers+Images"; break;
Adam Sawickib8333fb2018-03-13 16:15:53 +01003670 default: assert(0);
3671 }
3672
3673 // 0 = small, 1 = large, 2 = small and large
3674 size_t smallVsLargeCount = 2;
3675 if(ConfigType >= CONFIG_TYPE_LARGE) ++smallVsLargeCount;
3676 for(size_t smallVsLargeIndex = 0; smallVsLargeIndex < smallVsLargeCount; ++smallVsLargeIndex)
3677 {
3678 std::string desc3 = desc2;
3679 switch(smallVsLargeIndex)
3680 {
Adam Sawicki740b08f2018-08-27 13:42:07 +02003681 case 0: desc3 += ",Small"; break;
3682 case 1: desc3 += ",Large"; break;
3683 case 2: desc3 += ",Small+Large"; break;
Adam Sawickib8333fb2018-03-13 16:15:53 +01003684 default: assert(0);
3685 }
3686
3687 if(smallVsLargeIndex == 1 || smallVsLargeIndex == 2)
3688 config.MaxBytesToAllocate = 4ull * 1024 * 1024 * 1024; // 4 GB
3689 else
3690 config.MaxBytesToAllocate = 4ull * 1024 * 1024;
3691
3692 // 0 = varying sizes min...max, 1 = set of constant sizes
3693 size_t constantSizesCount = 1;
3694 if(ConfigType >= CONFIG_TYPE_SMALL) ++constantSizesCount;
3695 for(size_t constantSizesIndex = 0; constantSizesIndex < constantSizesCount; ++constantSizesIndex)
3696 {
3697 std::string desc4 = desc3;
3698 switch(constantSizesIndex)
3699 {
3700 case 0: desc4 += " Varying_sizes"; break;
3701 case 1: desc4 += " Constant_sizes"; break;
3702 default: assert(0);
3703 }
3704
3705 config.AllocationSizes.clear();
3706 // Buffers present
3707 if(buffersVsImagesIndex == 0 || buffersVsImagesIndex == 2)
3708 {
3709 // Small
3710 if(smallVsLargeIndex == 0 || smallVsLargeIndex == 2)
3711 {
3712 // Varying size
3713 if(constantSizesIndex == 0)
3714 config.AllocationSizes.push_back({4, 16, 1024});
3715 // Constant sizes
3716 else
3717 {
3718 config.AllocationSizes.push_back({1, 16, 16});
3719 config.AllocationSizes.push_back({1, 64, 64});
3720 config.AllocationSizes.push_back({1, 256, 256});
3721 config.AllocationSizes.push_back({1, 1024, 1024});
3722 }
3723 }
3724 // Large
3725 if(smallVsLargeIndex == 1 || smallVsLargeIndex == 2)
3726 {
3727 // Varying size
3728 if(constantSizesIndex == 0)
3729 config.AllocationSizes.push_back({4, 0x10000, 0xA00000}); // 64 KB ... 10 MB
3730 // Constant sizes
3731 else
3732 {
3733 config.AllocationSizes.push_back({1, 0x10000, 0x10000});
3734 config.AllocationSizes.push_back({1, 0x80000, 0x80000});
3735 config.AllocationSizes.push_back({1, 0x200000, 0x200000});
3736 config.AllocationSizes.push_back({1, 0xA00000, 0xA00000});
3737 }
3738 }
3739 }
3740 // Images present
3741 if(buffersVsImagesIndex == 1 || buffersVsImagesIndex == 2)
3742 {
3743 // Small
3744 if(smallVsLargeIndex == 0 || smallVsLargeIndex == 2)
3745 {
3746 // Varying size
3747 if(constantSizesIndex == 0)
3748 config.AllocationSizes.push_back({4, 0, 0, 4, 32});
3749 // Constant sizes
3750 else
3751 {
3752 config.AllocationSizes.push_back({1, 0, 0, 4, 4});
3753 config.AllocationSizes.push_back({1, 0, 0, 8, 8});
3754 config.AllocationSizes.push_back({1, 0, 0, 16, 16});
3755 config.AllocationSizes.push_back({1, 0, 0, 32, 32});
3756 }
3757 }
3758 // Large
3759 if(smallVsLargeIndex == 1 || smallVsLargeIndex == 2)
3760 {
3761 // Varying size
3762 if(constantSizesIndex == 0)
3763 config.AllocationSizes.push_back({4, 0, 0, 256, 2048});
3764 // Constant sizes
3765 else
3766 {
3767 config.AllocationSizes.push_back({1, 0, 0, 256, 256});
3768 config.AllocationSizes.push_back({1, 0, 0, 512, 512});
3769 config.AllocationSizes.push_back({1, 0, 0, 1024, 1024});
3770 config.AllocationSizes.push_back({1, 0, 0, 2048, 2048});
3771 }
3772 }
3773 }
3774
3775 // 0 = 100%, additional_operations = 0, 1 = 50%, 2 = 5%, 3 = 95% additional_operations = a lot
3776 size_t beginBytesToAllocateCount = 1;
3777 if(ConfigType >= CONFIG_TYPE_SMALL) ++beginBytesToAllocateCount;
3778 if(ConfigType >= CONFIG_TYPE_AVERAGE) ++beginBytesToAllocateCount;
3779 if(ConfigType >= CONFIG_TYPE_LARGE) ++beginBytesToAllocateCount;
3780 for(size_t beginBytesToAllocateIndex = 0; beginBytesToAllocateIndex < beginBytesToAllocateCount; ++beginBytesToAllocateIndex)
3781 {
3782 std::string desc5 = desc4;
3783
3784 switch(beginBytesToAllocateIndex)
3785 {
3786 case 0:
Adam Sawicki740b08f2018-08-27 13:42:07 +02003787 desc5 += ",Allocate_100%";
Adam Sawickib8333fb2018-03-13 16:15:53 +01003788 config.BeginBytesToAllocate = config.MaxBytesToAllocate;
3789 config.AdditionalOperationCount = 0;
3790 break;
3791 case 1:
Adam Sawicki740b08f2018-08-27 13:42:07 +02003792 desc5 += ",Allocate_50%+Operations";
Adam Sawickib8333fb2018-03-13 16:15:53 +01003793 config.BeginBytesToAllocate = config.MaxBytesToAllocate * 50 / 100;
3794 config.AdditionalOperationCount = 1024;
3795 break;
3796 case 2:
Adam Sawicki740b08f2018-08-27 13:42:07 +02003797 desc5 += ",Allocate_5%+Operations";
Adam Sawickib8333fb2018-03-13 16:15:53 +01003798 config.BeginBytesToAllocate = config.MaxBytesToAllocate * 5 / 100;
3799 config.AdditionalOperationCount = 1024;
3800 break;
3801 case 3:
Adam Sawicki740b08f2018-08-27 13:42:07 +02003802 desc5 += ",Allocate_95%+Operations";
Adam Sawickib8333fb2018-03-13 16:15:53 +01003803 config.BeginBytesToAllocate = config.MaxBytesToAllocate * 95 / 100;
3804 config.AdditionalOperationCount = 1024;
3805 break;
3806 default:
3807 assert(0);
3808 }
3809
Adam Sawicki0667e332018-08-24 17:26:44 +02003810 for(size_t strategyIndex = 0; strategyIndex < strategyCount; ++strategyIndex)
Adam Sawickib8333fb2018-03-13 16:15:53 +01003811 {
Adam Sawicki0667e332018-08-24 17:26:44 +02003812 std::string desc6 = desc5;
3813 switch(strategyIndex)
3814 {
3815 case 0:
Adam Sawicki740b08f2018-08-27 13:42:07 +02003816 desc6 += ",BestFit";
Adam Sawicki0667e332018-08-24 17:26:44 +02003817 config.AllocationStrategy = VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT;
3818 break;
3819 case 1:
Adam Sawicki740b08f2018-08-27 13:42:07 +02003820 desc6 += ",WorstFit";
Adam Sawicki0667e332018-08-24 17:26:44 +02003821 config.AllocationStrategy = VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT;
3822 break;
3823 case 2:
Adam Sawicki740b08f2018-08-27 13:42:07 +02003824 desc6 += ",FirstFit";
Adam Sawicki0667e332018-08-24 17:26:44 +02003825 config.AllocationStrategy = VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT;
3826 break;
3827 default:
3828 assert(0);
3829 }
Adam Sawickib8333fb2018-03-13 16:15:53 +01003830
Adam Sawicki33d2ce72018-08-27 13:59:13 +02003831 desc6 += ',';
3832 desc6 += FREE_ORDER_NAMES[(uint32_t)config.FreeOrder];
Adam Sawicki740b08f2018-08-27 13:42:07 +02003833
3834 const char* testDescription = desc6.c_str();
Adam Sawicki0667e332018-08-24 17:26:44 +02003835
3836 for(size_t repeat = 0; repeat < repeatCount; ++repeat)
3837 {
Adam Sawicki740b08f2018-08-27 13:42:07 +02003838 printf("%s #%u\n", testDescription, (uint32_t)repeat);
Adam Sawicki0667e332018-08-24 17:26:44 +02003839
3840 Result result{};
3841 VkResult res = MainTest(result, config);
3842 assert(res == VK_SUCCESS);
Adam Sawicki740b08f2018-08-27 13:42:07 +02003843 if(file)
3844 {
3845 WriteMainTestResult(file, CODE_DESCRIPTION, testDescription, config, result);
3846 }
Adam Sawicki0667e332018-08-24 17:26:44 +02003847 }
Adam Sawickib8333fb2018-03-13 16:15:53 +01003848 }
3849 }
3850 }
3851 }
3852 }
3853 }
3854}
3855
3856static void PerformPoolTests(FILE* file)
3857{
3858 const size_t AVG_RESOURCES_PER_POOL = 300;
3859
3860 uint32_t repeatCount = 1;
3861 if(ConfigType >= CONFIG_TYPE_MAXIMUM) repeatCount = 3;
3862
3863 PoolTestConfig config{};
3864 config.RandSeed = 2346343;
3865 config.FrameCount = 200;
3866 config.ItemsToMakeUnusedPercent = 2;
3867
3868 size_t threadCountCount = 1;
3869 switch(ConfigType)
3870 {
3871 case CONFIG_TYPE_MINIMUM: threadCountCount = 1; break;
3872 case CONFIG_TYPE_SMALL: threadCountCount = 2; break;
3873 case CONFIG_TYPE_AVERAGE: threadCountCount = 2; break;
3874 case CONFIG_TYPE_LARGE: threadCountCount = 3; break;
3875 case CONFIG_TYPE_MAXIMUM: threadCountCount = 3; break;
3876 default: assert(0);
3877 }
3878 for(size_t threadCountIndex = 0; threadCountIndex < threadCountCount; ++threadCountIndex)
3879 {
3880 std::string desc1;
3881
3882 switch(threadCountIndex)
3883 {
3884 case 0:
3885 desc1 += "1_thread";
3886 config.ThreadCount = 1;
3887 break;
3888 case 1:
3889 desc1 += "16_threads";
3890 config.ThreadCount = 16;
3891 break;
3892 case 2:
3893 desc1 += "2_threads";
3894 config.ThreadCount = 2;
3895 break;
3896 default:
3897 assert(0);
3898 }
3899
3900 // 0 = buffers, 1 = images, 2 = buffers and images
3901 size_t buffersVsImagesCount = 2;
3902 if(ConfigType >= CONFIG_TYPE_LARGE) ++buffersVsImagesCount;
3903 for(size_t buffersVsImagesIndex = 0; buffersVsImagesIndex < buffersVsImagesCount; ++buffersVsImagesIndex)
3904 {
3905 std::string desc2 = desc1;
3906 switch(buffersVsImagesIndex)
3907 {
3908 case 0: desc2 += " Buffers"; break;
3909 case 1: desc2 += " Images"; break;
3910 case 2: desc2 += " Buffers+Images"; break;
3911 default: assert(0);
3912 }
3913
3914 // 0 = small, 1 = large, 2 = small and large
3915 size_t smallVsLargeCount = 2;
3916 if(ConfigType >= CONFIG_TYPE_LARGE) ++smallVsLargeCount;
3917 for(size_t smallVsLargeIndex = 0; smallVsLargeIndex < smallVsLargeCount; ++smallVsLargeIndex)
3918 {
3919 std::string desc3 = desc2;
3920 switch(smallVsLargeIndex)
3921 {
3922 case 0: desc3 += " Small"; break;
3923 case 1: desc3 += " Large"; break;
3924 case 2: desc3 += " Small+Large"; break;
3925 default: assert(0);
3926 }
3927
3928 if(smallVsLargeIndex == 1 || smallVsLargeIndex == 2)
3929 config.PoolSize = 6ull * 1024 * 1024 * 1024; // 6 GB
3930 else
3931 config.PoolSize = 4ull * 1024 * 1024;
3932
3933 // 0 = varying sizes min...max, 1 = set of constant sizes
3934 size_t constantSizesCount = 1;
3935 if(ConfigType >= CONFIG_TYPE_SMALL) ++constantSizesCount;
3936 for(size_t constantSizesIndex = 0; constantSizesIndex < constantSizesCount; ++constantSizesIndex)
3937 {
3938 std::string desc4 = desc3;
3939 switch(constantSizesIndex)
3940 {
3941 case 0: desc4 += " Varying_sizes"; break;
3942 case 1: desc4 += " Constant_sizes"; break;
3943 default: assert(0);
3944 }
3945
3946 config.AllocationSizes.clear();
3947 // Buffers present
3948 if(buffersVsImagesIndex == 0 || buffersVsImagesIndex == 2)
3949 {
3950 // Small
3951 if(smallVsLargeIndex == 0 || smallVsLargeIndex == 2)
3952 {
3953 // Varying size
3954 if(constantSizesIndex == 0)
3955 config.AllocationSizes.push_back({4, 16, 1024});
3956 // Constant sizes
3957 else
3958 {
3959 config.AllocationSizes.push_back({1, 16, 16});
3960 config.AllocationSizes.push_back({1, 64, 64});
3961 config.AllocationSizes.push_back({1, 256, 256});
3962 config.AllocationSizes.push_back({1, 1024, 1024});
3963 }
3964 }
3965 // Large
3966 if(smallVsLargeIndex == 1 || smallVsLargeIndex == 2)
3967 {
3968 // Varying size
3969 if(constantSizesIndex == 0)
3970 config.AllocationSizes.push_back({4, 0x10000, 0xA00000}); // 64 KB ... 10 MB
3971 // Constant sizes
3972 else
3973 {
3974 config.AllocationSizes.push_back({1, 0x10000, 0x10000});
3975 config.AllocationSizes.push_back({1, 0x80000, 0x80000});
3976 config.AllocationSizes.push_back({1, 0x200000, 0x200000});
3977 config.AllocationSizes.push_back({1, 0xA00000, 0xA00000});
3978 }
3979 }
3980 }
3981 // Images present
3982 if(buffersVsImagesIndex == 1 || buffersVsImagesIndex == 2)
3983 {
3984 // Small
3985 if(smallVsLargeIndex == 0 || smallVsLargeIndex == 2)
3986 {
3987 // Varying size
3988 if(constantSizesIndex == 0)
3989 config.AllocationSizes.push_back({4, 0, 0, 4, 32});
3990 // Constant sizes
3991 else
3992 {
3993 config.AllocationSizes.push_back({1, 0, 0, 4, 4});
3994 config.AllocationSizes.push_back({1, 0, 0, 8, 8});
3995 config.AllocationSizes.push_back({1, 0, 0, 16, 16});
3996 config.AllocationSizes.push_back({1, 0, 0, 32, 32});
3997 }
3998 }
3999 // Large
4000 if(smallVsLargeIndex == 1 || smallVsLargeIndex == 2)
4001 {
4002 // Varying size
4003 if(constantSizesIndex == 0)
4004 config.AllocationSizes.push_back({4, 0, 0, 256, 2048});
4005 // Constant sizes
4006 else
4007 {
4008 config.AllocationSizes.push_back({1, 0, 0, 256, 256});
4009 config.AllocationSizes.push_back({1, 0, 0, 512, 512});
4010 config.AllocationSizes.push_back({1, 0, 0, 1024, 1024});
4011 config.AllocationSizes.push_back({1, 0, 0, 2048, 2048});
4012 }
4013 }
4014 }
4015
4016 const VkDeviceSize avgResourceSize = config.CalcAvgResourceSize();
4017 config.PoolSize = avgResourceSize * AVG_RESOURCES_PER_POOL;
4018
4019 // 0 = 66%, 1 = 133%, 2 = 100%, 3 = 33%, 4 = 166%
4020 size_t subscriptionModeCount;
4021 switch(ConfigType)
4022 {
4023 case CONFIG_TYPE_MINIMUM: subscriptionModeCount = 2; break;
4024 case CONFIG_TYPE_SMALL: subscriptionModeCount = 2; break;
4025 case CONFIG_TYPE_AVERAGE: subscriptionModeCount = 3; break;
4026 case CONFIG_TYPE_LARGE: subscriptionModeCount = 5; break;
4027 case CONFIG_TYPE_MAXIMUM: subscriptionModeCount = 5; break;
4028 default: assert(0);
4029 }
4030 for(size_t subscriptionModeIndex = 0; subscriptionModeIndex < subscriptionModeCount; ++subscriptionModeIndex)
4031 {
4032 std::string desc5 = desc4;
4033
4034 switch(subscriptionModeIndex)
4035 {
4036 case 0:
4037 desc5 += " Subscription_66%";
4038 config.UsedItemCountMax = AVG_RESOURCES_PER_POOL * 66 / 100;
4039 break;
4040 case 1:
4041 desc5 += " Subscription_133%";
4042 config.UsedItemCountMax = AVG_RESOURCES_PER_POOL * 133 / 100;
4043 break;
4044 case 2:
4045 desc5 += " Subscription_100%";
4046 config.UsedItemCountMax = AVG_RESOURCES_PER_POOL;
4047 break;
4048 case 3:
4049 desc5 += " Subscription_33%";
4050 config.UsedItemCountMax = AVG_RESOURCES_PER_POOL * 33 / 100;
4051 break;
4052 case 4:
4053 desc5 += " Subscription_166%";
4054 config.UsedItemCountMax = AVG_RESOURCES_PER_POOL * 166 / 100;
4055 break;
4056 default:
4057 assert(0);
4058 }
4059
4060 config.TotalItemCount = config.UsedItemCountMax * 5;
4061 config.UsedItemCountMin = config.UsedItemCountMax * 80 / 100;
4062
4063 const char* testDescription = desc5.c_str();
4064
4065 for(size_t repeat = 0; repeat < repeatCount; ++repeat)
4066 {
Adam Sawicki740b08f2018-08-27 13:42:07 +02004067 printf("%s #%u\n", testDescription, (uint32_t)repeat);
Adam Sawickib8333fb2018-03-13 16:15:53 +01004068
4069 PoolTestResult result{};
4070 g_MemoryAliasingWarningEnabled = false;
4071 TestPool_Benchmark(result, config);
4072 g_MemoryAliasingWarningEnabled = true;
4073 WritePoolTestResult(file, CODE_DESCRIPTION, testDescription, config, result);
4074 }
4075 }
4076 }
4077 }
4078 }
4079 }
4080}
4081
Adam Sawickia83793a2018-09-03 13:40:42 +02004082static void BasicTestBuddyAllocator()
4083{
4084 wprintf(L"Basic test buddy allocator\n");
4085
4086 RandomNumberGenerator rand{76543};
4087
4088 VkBufferCreateInfo sampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
4089 sampleBufCreateInfo.size = 1024; // Whatever.
4090 sampleBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
4091
4092 VmaAllocationCreateInfo sampleAllocCreateInfo = {};
4093 sampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
4094
4095 VmaPoolCreateInfo poolCreateInfo = {};
4096 VkResult res = vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &sampleBufCreateInfo, &sampleAllocCreateInfo, &poolCreateInfo.memoryTypeIndex);
4097 assert(res == VK_SUCCESS);
4098
4099 poolCreateInfo.blockSize = 1024 * 1024;
4100 poolCreateInfo.flags = VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT;
4101 poolCreateInfo.minBlockCount = poolCreateInfo.maxBlockCount = 1;
4102
4103 VmaPool pool = nullptr;
4104 res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool);
4105 assert(res == VK_SUCCESS);
4106
4107 VkBufferCreateInfo bufCreateInfo = sampleBufCreateInfo;
4108
4109 VmaAllocationCreateInfo allocCreateInfo = {};
4110 allocCreateInfo.pool = pool;
4111
4112 std::vector<BufferInfo> bufInfo;
4113 BufferInfo newBufInfo;
4114 VmaAllocationInfo allocInfo;
4115
4116 bufCreateInfo.size = 1024 * 256;
4117 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
4118 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
4119 assert(res == VK_SUCCESS);
4120 bufInfo.push_back(newBufInfo);
4121
4122 bufCreateInfo.size = 1024 * 512;
4123 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
4124 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
4125 assert(res == VK_SUCCESS);
4126 bufInfo.push_back(newBufInfo);
4127
4128 bufCreateInfo.size = 1024 * 128;
4129 res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
4130 &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
4131 assert(res == VK_SUCCESS);
4132 bufInfo.push_back(newBufInfo);
4133
4134 SaveAllocatorStatsToFile(L"BuddyTest01.json");
4135
4136 // Destroy the buffers in random order.
4137 while(!bufInfo.empty())
4138 {
4139 const size_t indexToDestroy = rand.Generate() % bufInfo.size();
4140 const BufferInfo& currBufInfo = bufInfo[indexToDestroy];
4141 vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
4142 bufInfo.erase(bufInfo.begin() + indexToDestroy);
4143 }
4144
4145 vmaDestroyPool(g_hAllocator, pool);
4146}
4147
Adam Sawickib8333fb2018-03-13 16:15:53 +01004148void Test()
4149{
4150 wprintf(L"TESTING:\n");
4151
Adam Sawickia83793a2018-09-03 13:40:42 +02004152 if(true)
Adam Sawicki70a683e2018-08-24 15:36:32 +02004153 {
4154 // # Temporarily insert custom tests here
Adam Sawicki0a3fb6c2018-08-27 14:40:27 +02004155 // ########################################
4156 // ########################################
Adam Sawickia83793a2018-09-03 13:40:42 +02004157 BasicTestBuddyAllocator();
Adam Sawicki70a683e2018-08-24 15:36:32 +02004158 return;
4159 }
4160
Adam Sawickib8333fb2018-03-13 16:15:53 +01004161 // # Simple tests
4162
4163 TestBasics();
Adam Sawicki212a4a62018-06-14 15:44:45 +02004164#if VMA_DEBUG_MARGIN
4165 TestDebugMargin();
4166#else
4167 TestPool_SameSize();
4168 TestHeapSizeLimit();
4169#endif
Adam Sawickie44c6262018-06-15 14:30:39 +02004170#if VMA_DEBUG_INITIALIZE_ALLOCATIONS
4171 TestAllocationsInitialization();
4172#endif
Adam Sawickib8333fb2018-03-13 16:15:53 +01004173 TestMapping();
4174 TestMappingMultithreaded();
Adam Sawicki0876c0d2018-06-20 15:18:11 +02004175 TestLinearAllocator();
Adam Sawicki8cfe05f2018-08-22 16:48:17 +02004176 ManuallyTestLinearAllocator();
Adam Sawicki70a683e2018-08-24 15:36:32 +02004177 TestLinearAllocatorMultiBlock();
Adam Sawicki33d2ce72018-08-27 13:59:13 +02004178
4179 {
4180 FILE* file;
4181 fopen_s(&file, "LinearAllocator.csv", "w");
4182 assert(file != NULL);
4183
4184 BenchmarkLinearAllocator(file);
4185
4186 fclose(file);
4187 }
4188
Adam Sawickib8333fb2018-03-13 16:15:53 +01004189 TestDefragmentationSimple();
4190 TestDefragmentationFull();
4191
4192 // # Detailed tests
4193 FILE* file;
4194 fopen_s(&file, "Results.csv", "w");
4195 assert(file != NULL);
4196
4197 WriteMainTestResultHeader(file);
4198 PerformMainTests(file);
4199 //PerformCustomMainTest(file);
4200
4201 WritePoolTestResultHeader(file);
4202 PerformPoolTests(file);
4203 //PerformCustomPoolTest(file);
4204
4205 fclose(file);
4206
4207 wprintf(L"Done.\n");
4208}
4209
Adam Sawickif1a793c2018-03-13 15:42:22 +01004210#endif // #ifdef _WIN32