Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
410 #include <vulkan/vulkan.h>
411 
412 VK_DEFINE_HANDLE(VmaAllocator)
413 
414 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
416  VmaAllocator allocator,
417  uint32_t memoryType,
418  VkDeviceMemory memory,
419  VkDeviceSize size);
421 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
422  VmaAllocator allocator,
423  uint32_t memoryType,
424  VkDeviceMemory memory,
425  VkDeviceSize size);
426 
434 typedef struct VmaDeviceMemoryCallbacks {
440 
476 
479 typedef VkFlags VmaAllocatorCreateFlags;
480 
485 typedef struct VmaVulkanFunctions {
486  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
487  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
488  PFN_vkAllocateMemory vkAllocateMemory;
489  PFN_vkFreeMemory vkFreeMemory;
490  PFN_vkMapMemory vkMapMemory;
491  PFN_vkUnmapMemory vkUnmapMemory;
492  PFN_vkBindBufferMemory vkBindBufferMemory;
493  PFN_vkBindImageMemory vkBindImageMemory;
494  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
495  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
496  PFN_vkCreateBuffer vkCreateBuffer;
497  PFN_vkDestroyBuffer vkDestroyBuffer;
498  PFN_vkCreateImage vkCreateImage;
499  PFN_vkDestroyImage vkDestroyImage;
500  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
501  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
503 
506 {
508  VmaAllocatorCreateFlags flags;
510 
511  VkPhysicalDevice physicalDevice;
513 
514  VkDevice device;
516 
519 
522 
523  const VkAllocationCallbacks* pAllocationCallbacks;
525 
540  uint32_t frameInUseCount;
558  const VkDeviceSize* pHeapSizeLimit;
572 
574 VkResult vmaCreateAllocator(
575  const VmaAllocatorCreateInfo* pCreateInfo,
576  VmaAllocator* pAllocator);
577 
580  VmaAllocator allocator);
581 
587  VmaAllocator allocator,
588  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
589 
595  VmaAllocator allocator,
596  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
597 
605  VmaAllocator allocator,
606  uint32_t memoryTypeIndex,
607  VkMemoryPropertyFlags* pFlags);
608 
618  VmaAllocator allocator,
619  uint32_t frameIndex);
620 
623 typedef struct VmaStatInfo
624 {
626  uint32_t blockCount;
628  uint32_t allocationCount;
632  VkDeviceSize usedBytes;
634  VkDeviceSize unusedBytes;
635  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
636  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
637 } VmaStatInfo;
638 
640 typedef struct VmaStats
641 {
642  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
643  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
645 } VmaStats;
646 
648 void vmaCalculateStats(
649  VmaAllocator allocator,
650  VmaStats* pStats);
651 
652 #define VMA_STATS_STRING_ENABLED 1
653 
654 #if VMA_STATS_STRING_ENABLED
655 
657 
660  VmaAllocator allocator,
661  char** ppStatsString,
662  VkBool32 detailedMap);
663 
664 void vmaFreeStatsString(
665  VmaAllocator allocator,
666  char* pStatsString);
667 
668 #endif // #if VMA_STATS_STRING_ENABLED
669 
670 VK_DEFINE_HANDLE(VmaPool)
671 
672 typedef enum VmaMemoryUsage
673 {
679 
682 
685 
689 
704 
749 
752 typedef VkFlags VmaAllocationCreateFlags;
753 
755 {
757  VmaAllocationCreateFlags flags;
768  VkMemoryPropertyFlags requiredFlags;
774  VkMemoryPropertyFlags preferredFlags;
776  void* pUserData;
781  VmaPool pool;
783 
798 VkResult vmaFindMemoryTypeIndex(
799  VmaAllocator allocator,
800  uint32_t memoryTypeBits,
801  const VmaAllocationCreateInfo* pAllocationCreateInfo,
802  uint32_t* pMemoryTypeIndex);
803 
805 typedef enum VmaPoolCreateFlagBits {
833 
836 typedef VkFlags VmaPoolCreateFlags;
837 
840 typedef struct VmaPoolCreateInfo {
843  uint32_t memoryTypeIndex;
846  VmaPoolCreateFlags flags;
851  VkDeviceSize blockSize;
878  uint32_t frameInUseCount;
880 
883 typedef struct VmaPoolStats {
886  VkDeviceSize size;
889  VkDeviceSize unusedSize;
902  VkDeviceSize unusedRangeSizeMax;
903 } VmaPoolStats;
904 
911 VkResult vmaCreatePool(
912  VmaAllocator allocator,
913  const VmaPoolCreateInfo* pCreateInfo,
914  VmaPool* pPool);
915 
918 void vmaDestroyPool(
919  VmaAllocator allocator,
920  VmaPool pool);
921 
928 void vmaGetPoolStats(
929  VmaAllocator allocator,
930  VmaPool pool,
931  VmaPoolStats* pPoolStats);
932 
940  VmaAllocator allocator,
941  VmaPool pool,
942  size_t* pLostAllocationCount);
943 
944 VK_DEFINE_HANDLE(VmaAllocation)
945 
946 
948 typedef struct VmaAllocationInfo {
953  uint32_t memoryType;
962  VkDeviceMemory deviceMemory;
967  VkDeviceSize offset;
972  VkDeviceSize size;
978  void* pMappedData;
983  void* pUserData;
985 
996 VkResult vmaAllocateMemory(
997  VmaAllocator allocator,
998  const VkMemoryRequirements* pVkMemoryRequirements,
999  const VmaAllocationCreateInfo* pCreateInfo,
1000  VmaAllocation* pAllocation,
1001  VmaAllocationInfo* pAllocationInfo);
1002 
1010  VmaAllocator allocator,
1011  VkBuffer buffer,
1012  const VmaAllocationCreateInfo* pCreateInfo,
1013  VmaAllocation* pAllocation,
1014  VmaAllocationInfo* pAllocationInfo);
1015 
1017 VkResult vmaAllocateMemoryForImage(
1018  VmaAllocator allocator,
1019  VkImage image,
1020  const VmaAllocationCreateInfo* pCreateInfo,
1021  VmaAllocation* pAllocation,
1022  VmaAllocationInfo* pAllocationInfo);
1023 
1025 void vmaFreeMemory(
1026  VmaAllocator allocator,
1027  VmaAllocation allocation);
1028 
1031  VmaAllocator allocator,
1032  VmaAllocation allocation,
1033  VmaAllocationInfo* pAllocationInfo);
1034 
1037  VmaAllocator allocator,
1038  VmaAllocation allocation,
1039  void* pUserData);
1040 
1052  VmaAllocator allocator,
1053  VmaAllocation* pAllocation);
1054 
1063 VkResult vmaMapMemory(
1064  VmaAllocator allocator,
1065  VmaAllocation allocation,
1066  void** ppData);
1067 
1068 void vmaUnmapMemory(
1069  VmaAllocator allocator,
1070  VmaAllocation allocation);
1071 
1093 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator);
1094 
1102 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator);
1103 
1105 typedef struct VmaDefragmentationInfo {
1110  VkDeviceSize maxBytesToMove;
1117 
1119 typedef struct VmaDefragmentationStats {
1121  VkDeviceSize bytesMoved;
1123  VkDeviceSize bytesFreed;
1129 
1200 VkResult vmaDefragment(
1201  VmaAllocator allocator,
1202  VmaAllocation* pAllocations,
1203  size_t allocationCount,
1204  VkBool32* pAllocationsChanged,
1205  const VmaDefragmentationInfo *pDefragmentationInfo,
1206  VmaDefragmentationStats* pDefragmentationStats);
1207 
1226 VkResult vmaCreateBuffer(
1227  VmaAllocator allocator,
1228  const VkBufferCreateInfo* pBufferCreateInfo,
1229  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1230  VkBuffer* pBuffer,
1231  VmaAllocation* pAllocation,
1232  VmaAllocationInfo* pAllocationInfo);
1233 
1242 void vmaDestroyBuffer(
1243  VmaAllocator allocator,
1244  VkBuffer buffer,
1245  VmaAllocation allocation);
1246 
1248 VkResult vmaCreateImage(
1249  VmaAllocator allocator,
1250  const VkImageCreateInfo* pImageCreateInfo,
1251  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1252  VkImage* pImage,
1253  VmaAllocation* pAllocation,
1254  VmaAllocationInfo* pAllocationInfo);
1255 
1264 void vmaDestroyImage(
1265  VmaAllocator allocator,
1266  VkImage image,
1267  VmaAllocation allocation);
1268 
1269 #ifdef __cplusplus
1270 }
1271 #endif
1272 
1273 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
1274 
1275 // For Visual Studio IntelliSense.
1276 #ifdef __INTELLISENSE__
1277 #define VMA_IMPLEMENTATION
1278 #endif
1279 
1280 #ifdef VMA_IMPLEMENTATION
1281 #undef VMA_IMPLEMENTATION
1282 
1283 #include <cstdint>
1284 #include <cstdlib>
1285 #include <cstring>
1286 
1287 /*******************************************************************************
1288 CONFIGURATION SECTION
1289 
1290 Define some of these macros before each #include of this header or change them
1291 here if you need other then default behavior depending on your environment.
1292 */
1293 
1294 /*
1295 Define this macro to 1 to make the library fetch pointers to Vulkan functions
1296 internally, like:
1297 
1298  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
1299 
1300 Define to 0 if you are going to provide you own pointers to Vulkan functions via
1301 VmaAllocatorCreateInfo::pVulkanFunctions.
1302 */
1303 #ifndef VMA_STATIC_VULKAN_FUNCTIONS
1304 #define VMA_STATIC_VULKAN_FUNCTIONS 1
1305 #endif
1306 
1307 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
1308 //#define VMA_USE_STL_CONTAINERS 1
1309 
1310 /* Set this macro to 1 to make the library including and using STL containers:
1311 std::pair, std::vector, std::list, std::unordered_map.
1312 
1313 Set it to 0 or undefined to make the library using its own implementation of
1314 the containers.
1315 */
1316 #if VMA_USE_STL_CONTAINERS
1317  #define VMA_USE_STL_VECTOR 1
1318  #define VMA_USE_STL_UNORDERED_MAP 1
1319  #define VMA_USE_STL_LIST 1
1320 #endif
1321 
1322 #if VMA_USE_STL_VECTOR
1323  #include <vector>
1324 #endif
1325 
1326 #if VMA_USE_STL_UNORDERED_MAP
1327  #include <unordered_map>
1328 #endif
1329 
1330 #if VMA_USE_STL_LIST
1331  #include <list>
1332 #endif
1333 
1334 /*
1335 Following headers are used in this CONFIGURATION section only, so feel free to
1336 remove them if not needed.
1337 */
1338 #include <cassert> // for assert
1339 #include <algorithm> // for min, max
1340 #include <mutex> // for std::mutex
1341 #include <atomic> // for std::atomic
1342 
1343 #if !defined(_WIN32)
1344  #include <malloc.h> // for aligned_alloc()
1345 #endif
1346 
1347 // Normal assert to check for programmer's errors, especially in Debug configuration.
1348 #ifndef VMA_ASSERT
1349  #ifdef _DEBUG
1350  #define VMA_ASSERT(expr) assert(expr)
1351  #else
1352  #define VMA_ASSERT(expr)
1353  #endif
1354 #endif
1355 
1356 // Assert that will be called very often, like inside data structures e.g. operator[].
1357 // Making it non-empty can make program slow.
1358 #ifndef VMA_HEAVY_ASSERT
1359  #ifdef _DEBUG
1360  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
1361  #else
1362  #define VMA_HEAVY_ASSERT(expr)
1363  #endif
1364 #endif
1365 
1366 #ifndef VMA_NULL
1367  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
1368  #define VMA_NULL nullptr
1369 #endif
1370 
1371 #ifndef VMA_ALIGN_OF
1372  #define VMA_ALIGN_OF(type) (__alignof(type))
1373 #endif
1374 
1375 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
1376  #if defined(_WIN32)
1377  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
1378  #else
1379  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
1380  #endif
1381 #endif
1382 
1383 #ifndef VMA_SYSTEM_FREE
1384  #if defined(_WIN32)
1385  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
1386  #else
1387  #define VMA_SYSTEM_FREE(ptr) free(ptr)
1388  #endif
1389 #endif
1390 
1391 #ifndef VMA_MIN
1392  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
1393 #endif
1394 
1395 #ifndef VMA_MAX
1396  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
1397 #endif
1398 
1399 #ifndef VMA_SWAP
1400  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
1401 #endif
1402 
1403 #ifndef VMA_SORT
1404  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
1405 #endif
1406 
1407 #ifndef VMA_DEBUG_LOG
1408  #define VMA_DEBUG_LOG(format, ...)
1409  /*
1410  #define VMA_DEBUG_LOG(format, ...) do { \
1411  printf(format, __VA_ARGS__); \
1412  printf("\n"); \
1413  } while(false)
1414  */
1415 #endif
1416 
1417 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
1418 #if VMA_STATS_STRING_ENABLED
1419  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
1420  {
1421  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
1422  }
1423  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
1424  {
1425  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
1426  }
1427  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
1428  {
1429  snprintf(outStr, strLen, "%p", ptr);
1430  }
1431 #endif
1432 
1433 #ifndef VMA_MUTEX
1434  class VmaMutex
1435  {
1436  public:
1437  VmaMutex() { }
1438  ~VmaMutex() { }
1439  void Lock() { m_Mutex.lock(); }
1440  void Unlock() { m_Mutex.unlock(); }
1441  private:
1442  std::mutex m_Mutex;
1443  };
1444  #define VMA_MUTEX VmaMutex
1445 #endif
1446 
1447 /*
1448 If providing your own implementation, you need to implement a subset of std::atomic:
1449 
1450 - Constructor(uint32_t desired)
1451 - uint32_t load() const
1452 - void store(uint32_t desired)
1453 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
1454 */
1455 #ifndef VMA_ATOMIC_UINT32
1456  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
1457 #endif
1458 
1459 #ifndef VMA_BEST_FIT
1460 
1472  #define VMA_BEST_FIT (1)
1473 #endif
1474 
1475 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
1476 
1480  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
1481 #endif
1482 
1483 #ifndef VMA_DEBUG_ALIGNMENT
1484 
1488  #define VMA_DEBUG_ALIGNMENT (1)
1489 #endif
1490 
1491 #ifndef VMA_DEBUG_MARGIN
1492 
1496  #define VMA_DEBUG_MARGIN (0)
1497 #endif
1498 
1499 #ifndef VMA_DEBUG_GLOBAL_MUTEX
1500 
1504  #define VMA_DEBUG_GLOBAL_MUTEX (0)
1505 #endif
1506 
1507 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
1508 
1512  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
1513 #endif
1514 
1515 #ifndef VMA_SMALL_HEAP_MAX_SIZE
1516  #define VMA_SMALL_HEAP_MAX_SIZE (512 * 1024 * 1024)
1518 #endif
1519 
1520 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
1521  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256 * 1024 * 1024)
1523 #endif
1524 
1525 #ifndef VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE
1526  #define VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE (64 * 1024 * 1024)
1528 #endif
1529 
1530 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
1531 
1532 /*******************************************************************************
1533 END OF CONFIGURATION
1534 */
1535 
1536 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
1537  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
1538 
1539 // Returns number of bits set to 1 in (v).
1540 static inline uint32_t CountBitsSet(uint32_t v)
1541 {
1542  uint32_t c = v - ((v >> 1) & 0x55555555);
1543  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
1544  c = ((c >> 4) + c) & 0x0F0F0F0F;
1545  c = ((c >> 8) + c) & 0x00FF00FF;
1546  c = ((c >> 16) + c) & 0x0000FFFF;
1547  return c;
1548 }
1549 
1550 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
1551 // Use types like uint32_t, uint64_t as T.
1552 template <typename T>
1553 static inline T VmaAlignUp(T val, T align)
1554 {
1555  return (val + align - 1) / align * align;
1556 }
1557 
1558 // Division with mathematical rounding to nearest number.
1559 template <typename T>
1560 inline T VmaRoundDiv(T x, T y)
1561 {
1562  return (x + (y / (T)2)) / y;
1563 }
1564 
1565 #ifndef VMA_SORT
1566 
1567 template<typename Iterator, typename Compare>
1568 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
1569 {
1570  Iterator centerValue = end; --centerValue;
1571  Iterator insertIndex = beg;
1572  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
1573  {
1574  if(cmp(*memTypeIndex, *centerValue))
1575  {
1576  if(insertIndex != memTypeIndex)
1577  {
1578  VMA_SWAP(*memTypeIndex, *insertIndex);
1579  }
1580  ++insertIndex;
1581  }
1582  }
1583  if(insertIndex != centerValue)
1584  {
1585  VMA_SWAP(*insertIndex, *centerValue);
1586  }
1587  return insertIndex;
1588 }
1589 
1590 template<typename Iterator, typename Compare>
1591 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
1592 {
1593  if(beg < end)
1594  {
1595  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
1596  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
1597  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
1598  }
1599 }
1600 
1601 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
1602 
1603 #endif // #ifndef VMA_SORT
1604 
1605 /*
1606 Returns true if two memory blocks occupy overlapping pages.
1607 ResourceA must be in less memory offset than ResourceB.
1608 
1609 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
1610 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
1611 */
1612 static inline bool VmaBlocksOnSamePage(
1613  VkDeviceSize resourceAOffset,
1614  VkDeviceSize resourceASize,
1615  VkDeviceSize resourceBOffset,
1616  VkDeviceSize pageSize)
1617 {
1618  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
1619  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
1620  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
1621  VkDeviceSize resourceBStart = resourceBOffset;
1622  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
1623  return resourceAEndPage == resourceBStartPage;
1624 }
1625 
1626 enum VmaSuballocationType
1627 {
1628  VMA_SUBALLOCATION_TYPE_FREE = 0,
1629  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
1630  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
1631  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
1632  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
1633  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
1634  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
1635 };
1636 
1637 /*
1638 Returns true if given suballocation types could conflict and must respect
1639 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
1640 or linear image and another one is optimal image. If type is unknown, behave
1641 conservatively.
1642 */
1643 static inline bool VmaIsBufferImageGranularityConflict(
1644  VmaSuballocationType suballocType1,
1645  VmaSuballocationType suballocType2)
1646 {
1647  if(suballocType1 > suballocType2)
1648  {
1649  VMA_SWAP(suballocType1, suballocType2);
1650  }
1651 
1652  switch(suballocType1)
1653  {
1654  case VMA_SUBALLOCATION_TYPE_FREE:
1655  return false;
1656  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
1657  return true;
1658  case VMA_SUBALLOCATION_TYPE_BUFFER:
1659  return
1660  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
1661  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1662  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
1663  return
1664  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
1665  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
1666  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1667  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
1668  return
1669  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1670  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
1671  return false;
1672  default:
1673  VMA_ASSERT(0);
1674  return true;
1675  }
1676 }
1677 
1678 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
1679 struct VmaMutexLock
1680 {
1681 public:
1682  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
1683  m_pMutex(useMutex ? &mutex : VMA_NULL)
1684  {
1685  if(m_pMutex)
1686  {
1687  m_pMutex->Lock();
1688  }
1689  }
1690 
1691  ~VmaMutexLock()
1692  {
1693  if(m_pMutex)
1694  {
1695  m_pMutex->Unlock();
1696  }
1697  }
1698 
1699 private:
1700  VMA_MUTEX* m_pMutex;
1701 };
1702 
1703 #if VMA_DEBUG_GLOBAL_MUTEX
1704  static VMA_MUTEX gDebugGlobalMutex;
1705  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
1706 #else
1707  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
1708 #endif
1709 
1710 // Minimum size of a free suballocation to register it in the free suballocation collection.
1711 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
1712 
1713 /*
1714 Performs binary search and returns iterator to first element that is greater or
1715 equal to (key), according to comparison (cmp).
1716 
1717 Cmp should return true if first argument is less than second argument.
1718 
1719 Returned value is the found element, if present in the collection or place where
1720 new element with value (key) should be inserted.
1721 */
1722 template <typename IterT, typename KeyT, typename CmpT>
1723 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
1724 {
1725  size_t down = 0, up = (end - beg);
1726  while(down < up)
1727  {
1728  const size_t mid = (down + up) / 2;
1729  if(cmp(*(beg+mid), key))
1730  {
1731  down = mid + 1;
1732  }
1733  else
1734  {
1735  up = mid;
1736  }
1737  }
1738  return beg + down;
1739 }
1740 
1742 // Memory allocation
1743 
1744 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
1745 {
1746  if((pAllocationCallbacks != VMA_NULL) &&
1747  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
1748  {
1749  return (*pAllocationCallbacks->pfnAllocation)(
1750  pAllocationCallbacks->pUserData,
1751  size,
1752  alignment,
1753  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1754  }
1755  else
1756  {
1757  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
1758  }
1759 }
1760 
1761 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
1762 {
1763  if((pAllocationCallbacks != VMA_NULL) &&
1764  (pAllocationCallbacks->pfnFree != VMA_NULL))
1765  {
1766  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
1767  }
1768  else
1769  {
1770  VMA_SYSTEM_FREE(ptr);
1771  }
1772 }
1773 
1774 template<typename T>
1775 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
1776 {
1777  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
1778 }
1779 
1780 template<typename T>
1781 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
1782 {
1783  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
1784 }
1785 
1786 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
1787 
1788 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
1789 
1790 template<typename T>
1791 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
1792 {
1793  ptr->~T();
1794  VmaFree(pAllocationCallbacks, ptr);
1795 }
1796 
1797 template<typename T>
1798 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
1799 {
1800  if(ptr != VMA_NULL)
1801  {
1802  for(size_t i = count; i--; )
1803  {
1804  ptr[i].~T();
1805  }
1806  VmaFree(pAllocationCallbacks, ptr);
1807  }
1808 }
1809 
1810 // STL-compatible allocator.
1811 template<typename T>
1812 class VmaStlAllocator
1813 {
1814 public:
1815  const VkAllocationCallbacks* const m_pCallbacks;
1816  typedef T value_type;
1817 
1818  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
1819  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
1820 
1821  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
1822  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
1823 
1824  template<typename U>
1825  bool operator==(const VmaStlAllocator<U>& rhs) const
1826  {
1827  return m_pCallbacks == rhs.m_pCallbacks;
1828  }
1829  template<typename U>
1830  bool operator!=(const VmaStlAllocator<U>& rhs) const
1831  {
1832  return m_pCallbacks != rhs.m_pCallbacks;
1833  }
1834 
1835  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
1836 };
1837 
1838 #if VMA_USE_STL_VECTOR
1839 
1840 #define VmaVector std::vector
1841 
1842 template<typename T, typename allocatorT>
1843 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
1844 {
1845  vec.insert(vec.begin() + index, item);
1846 }
1847 
1848 template<typename T, typename allocatorT>
1849 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
1850 {
1851  vec.erase(vec.begin() + index);
1852 }
1853 
1854 #else // #if VMA_USE_STL_VECTOR
1855 
1856 /* Class with interface compatible with subset of std::vector.
1857 T must be POD because constructors and destructors are not called and memcpy is
1858 used for these objects. */
1859 template<typename T, typename AllocatorT>
1860 class VmaVector
1861 {
1862 public:
1863  typedef T value_type;
1864 
1865  VmaVector(const AllocatorT& allocator) :
1866  m_Allocator(allocator),
1867  m_pArray(VMA_NULL),
1868  m_Count(0),
1869  m_Capacity(0)
1870  {
1871  }
1872 
1873  VmaVector(size_t count, const AllocatorT& allocator) :
1874  m_Allocator(allocator),
1875  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
1876  m_Count(count),
1877  m_Capacity(count)
1878  {
1879  }
1880 
1881  VmaVector(const VmaVector<T, AllocatorT>& src) :
1882  m_Allocator(src.m_Allocator),
1883  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
1884  m_Count(src.m_Count),
1885  m_Capacity(src.m_Count)
1886  {
1887  if(m_Count != 0)
1888  {
1889  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
1890  }
1891  }
1892 
1893  ~VmaVector()
1894  {
1895  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1896  }
1897 
1898  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
1899  {
1900  if(&rhs != this)
1901  {
1902  resize(rhs.m_Count);
1903  if(m_Count != 0)
1904  {
1905  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
1906  }
1907  }
1908  return *this;
1909  }
1910 
1911  bool empty() const { return m_Count == 0; }
1912  size_t size() const { return m_Count; }
1913  T* data() { return m_pArray; }
1914  const T* data() const { return m_pArray; }
1915 
1916  T& operator[](size_t index)
1917  {
1918  VMA_HEAVY_ASSERT(index < m_Count);
1919  return m_pArray[index];
1920  }
1921  const T& operator[](size_t index) const
1922  {
1923  VMA_HEAVY_ASSERT(index < m_Count);
1924  return m_pArray[index];
1925  }
1926 
1927  T& front()
1928  {
1929  VMA_HEAVY_ASSERT(m_Count > 0);
1930  return m_pArray[0];
1931  }
1932  const T& front() const
1933  {
1934  VMA_HEAVY_ASSERT(m_Count > 0);
1935  return m_pArray[0];
1936  }
1937  T& back()
1938  {
1939  VMA_HEAVY_ASSERT(m_Count > 0);
1940  return m_pArray[m_Count - 1];
1941  }
1942  const T& back() const
1943  {
1944  VMA_HEAVY_ASSERT(m_Count > 0);
1945  return m_pArray[m_Count - 1];
1946  }
1947 
1948  void reserve(size_t newCapacity, bool freeMemory = false)
1949  {
1950  newCapacity = VMA_MAX(newCapacity, m_Count);
1951 
1952  if((newCapacity < m_Capacity) && !freeMemory)
1953  {
1954  newCapacity = m_Capacity;
1955  }
1956 
1957  if(newCapacity != m_Capacity)
1958  {
1959  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
1960  if(m_Count != 0)
1961  {
1962  memcpy(newArray, m_pArray, m_Count * sizeof(T));
1963  }
1964  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1965  m_Capacity = newCapacity;
1966  m_pArray = newArray;
1967  }
1968  }
1969 
1970  void resize(size_t newCount, bool freeMemory = false)
1971  {
1972  size_t newCapacity = m_Capacity;
1973  if(newCount > m_Capacity)
1974  {
1975  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
1976  }
1977  else if(freeMemory)
1978  {
1979  newCapacity = newCount;
1980  }
1981 
1982  if(newCapacity != m_Capacity)
1983  {
1984  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
1985  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
1986  if(elementsToCopy != 0)
1987  {
1988  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
1989  }
1990  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1991  m_Capacity = newCapacity;
1992  m_pArray = newArray;
1993  }
1994 
1995  m_Count = newCount;
1996  }
1997 
1998  void clear(bool freeMemory = false)
1999  {
2000  resize(0, freeMemory);
2001  }
2002 
2003  void insert(size_t index, const T& src)
2004  {
2005  VMA_HEAVY_ASSERT(index <= m_Count);
2006  const size_t oldCount = size();
2007  resize(oldCount + 1);
2008  if(index < oldCount)
2009  {
2010  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
2011  }
2012  m_pArray[index] = src;
2013  }
2014 
2015  void remove(size_t index)
2016  {
2017  VMA_HEAVY_ASSERT(index < m_Count);
2018  const size_t oldCount = size();
2019  if(index < oldCount - 1)
2020  {
2021  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
2022  }
2023  resize(oldCount - 1);
2024  }
2025 
2026  void push_back(const T& src)
2027  {
2028  const size_t newIndex = size();
2029  resize(newIndex + 1);
2030  m_pArray[newIndex] = src;
2031  }
2032 
2033  void pop_back()
2034  {
2035  VMA_HEAVY_ASSERT(m_Count > 0);
2036  resize(size() - 1);
2037  }
2038 
2039  void push_front(const T& src)
2040  {
2041  insert(0, src);
2042  }
2043 
2044  void pop_front()
2045  {
2046  VMA_HEAVY_ASSERT(m_Count > 0);
2047  remove(0);
2048  }
2049 
2050  typedef T* iterator;
2051 
2052  iterator begin() { return m_pArray; }
2053  iterator end() { return m_pArray + m_Count; }
2054 
2055 private:
2056  AllocatorT m_Allocator;
2057  T* m_pArray;
2058  size_t m_Count;
2059  size_t m_Capacity;
2060 };
2061 
2062 template<typename T, typename allocatorT>
2063 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
2064 {
2065  vec.insert(index, item);
2066 }
2067 
2068 template<typename T, typename allocatorT>
2069 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
2070 {
2071  vec.remove(index);
2072 }
2073 
2074 #endif // #if VMA_USE_STL_VECTOR
2075 
2076 template<typename CmpLess, typename VectorT>
2077 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
2078 {
2079  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2080  vector.data(),
2081  vector.data() + vector.size(),
2082  value,
2083  CmpLess()) - vector.data();
2084  VmaVectorInsert(vector, indexToInsert, value);
2085  return indexToInsert;
2086 }
2087 
2088 template<typename CmpLess, typename VectorT>
2089 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
2090 {
2091  CmpLess comparator;
2092  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2093  vector.begin(),
2094  vector.end(),
2095  value,
2096  comparator);
2097  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
2098  {
2099  size_t indexToRemove = it - vector.begin();
2100  VmaVectorRemove(vector, indexToRemove);
2101  return true;
2102  }
2103  return false;
2104 }
2105 
2106 template<typename CmpLess, typename VectorT>
2107 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
2108 {
2109  CmpLess comparator;
2110  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2111  vector.data(),
2112  vector.data() + vector.size(),
2113  value,
2114  comparator);
2115  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
2116  {
2117  return it - vector.begin();
2118  }
2119  else
2120  {
2121  return vector.size();
2122  }
2123 }
2124 
2126 // class VmaPoolAllocator
2127 
2128 /*
2129 Allocator for objects of type T using a list of arrays (pools) to speed up
2130 allocation. Number of elements that can be allocated is not bounded because
2131 allocator can create multiple blocks.
2132 */
2133 template<typename T>
2134 class VmaPoolAllocator
2135 {
2136 public:
2137  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
2138  ~VmaPoolAllocator();
2139  void Clear();
2140  T* Alloc();
2141  void Free(T* ptr);
2142 
2143 private:
2144  union Item
2145  {
2146  uint32_t NextFreeIndex;
2147  T Value;
2148  };
2149 
2150  struct ItemBlock
2151  {
2152  Item* pItems;
2153  uint32_t FirstFreeIndex;
2154  };
2155 
2156  const VkAllocationCallbacks* m_pAllocationCallbacks;
2157  size_t m_ItemsPerBlock;
2158  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
2159 
2160  ItemBlock& CreateNewBlock();
2161 };
2162 
2163 template<typename T>
2164 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
2165  m_pAllocationCallbacks(pAllocationCallbacks),
2166  m_ItemsPerBlock(itemsPerBlock),
2167  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
2168 {
2169  VMA_ASSERT(itemsPerBlock > 0);
2170 }
2171 
2172 template<typename T>
2173 VmaPoolAllocator<T>::~VmaPoolAllocator()
2174 {
2175  Clear();
2176 }
2177 
2178 template<typename T>
2179 void VmaPoolAllocator<T>::Clear()
2180 {
2181  for(size_t i = m_ItemBlocks.size(); i--; )
2182  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
2183  m_ItemBlocks.clear();
2184 }
2185 
2186 template<typename T>
2187 T* VmaPoolAllocator<T>::Alloc()
2188 {
2189  for(size_t i = m_ItemBlocks.size(); i--; )
2190  {
2191  ItemBlock& block = m_ItemBlocks[i];
2192  // This block has some free items: Use first one.
2193  if(block.FirstFreeIndex != UINT32_MAX)
2194  {
2195  Item* const pItem = &block.pItems[block.FirstFreeIndex];
2196  block.FirstFreeIndex = pItem->NextFreeIndex;
2197  return &pItem->Value;
2198  }
2199  }
2200 
2201  // No block has free item: Create new one and use it.
2202  ItemBlock& newBlock = CreateNewBlock();
2203  Item* const pItem = &newBlock.pItems[0];
2204  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
2205  return &pItem->Value;
2206 }
2207 
2208 template<typename T>
2209 void VmaPoolAllocator<T>::Free(T* ptr)
2210 {
2211  // Search all memory blocks to find ptr.
2212  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
2213  {
2214  ItemBlock& block = m_ItemBlocks[i];
2215 
2216  // Casting to union.
2217  Item* pItemPtr;
2218  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
2219 
2220  // Check if pItemPtr is in address range of this block.
2221  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
2222  {
2223  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
2224  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
2225  block.FirstFreeIndex = index;
2226  return;
2227  }
2228  }
2229  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
2230 }
2231 
2232 template<typename T>
2233 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
2234 {
2235  ItemBlock newBlock = {
2236  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
2237 
2238  m_ItemBlocks.push_back(newBlock);
2239 
2240  // Setup singly-linked list of all free items in this block.
2241  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
2242  newBlock.pItems[i].NextFreeIndex = i + 1;
2243  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
2244  return m_ItemBlocks.back();
2245 }
2246 
2248 // class VmaRawList, VmaList
2249 
2250 #if VMA_USE_STL_LIST
2251 
2252 #define VmaList std::list
2253 
2254 #else // #if VMA_USE_STL_LIST
2255 
2256 template<typename T>
2257 struct VmaListItem
2258 {
2259  VmaListItem* pPrev;
2260  VmaListItem* pNext;
2261  T Value;
2262 };
2263 
2264 // Doubly linked list.
2265 template<typename T>
2266 class VmaRawList
2267 {
2268 public:
2269  typedef VmaListItem<T> ItemType;
2270 
2271  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
2272  ~VmaRawList();
2273  void Clear();
2274 
2275  size_t GetCount() const { return m_Count; }
2276  bool IsEmpty() const { return m_Count == 0; }
2277 
2278  ItemType* Front() { return m_pFront; }
2279  const ItemType* Front() const { return m_pFront; }
2280  ItemType* Back() { return m_pBack; }
2281  const ItemType* Back() const { return m_pBack; }
2282 
2283  ItemType* PushBack();
2284  ItemType* PushFront();
2285  ItemType* PushBack(const T& value);
2286  ItemType* PushFront(const T& value);
2287  void PopBack();
2288  void PopFront();
2289 
2290  // Item can be null - it means PushBack.
2291  ItemType* InsertBefore(ItemType* pItem);
2292  // Item can be null - it means PushFront.
2293  ItemType* InsertAfter(ItemType* pItem);
2294 
2295  ItemType* InsertBefore(ItemType* pItem, const T& value);
2296  ItemType* InsertAfter(ItemType* pItem, const T& value);
2297 
2298  void Remove(ItemType* pItem);
2299 
2300 private:
2301  const VkAllocationCallbacks* const m_pAllocationCallbacks;
2302  VmaPoolAllocator<ItemType> m_ItemAllocator;
2303  ItemType* m_pFront;
2304  ItemType* m_pBack;
2305  size_t m_Count;
2306 
2307  // Declared not defined, to block copy constructor and assignment operator.
2308  VmaRawList(const VmaRawList<T>& src);
2309  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
2310 };
2311 
2312 template<typename T>
2313 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
2314  m_pAllocationCallbacks(pAllocationCallbacks),
2315  m_ItemAllocator(pAllocationCallbacks, 128),
2316  m_pFront(VMA_NULL),
2317  m_pBack(VMA_NULL),
2318  m_Count(0)
2319 {
2320 }
2321 
2322 template<typename T>
2323 VmaRawList<T>::~VmaRawList()
2324 {
2325  // Intentionally not calling Clear, because that would be unnecessary
2326  // computations to return all items to m_ItemAllocator as free.
2327 }
2328 
2329 template<typename T>
2330 void VmaRawList<T>::Clear()
2331 {
2332  if(IsEmpty() == false)
2333  {
2334  ItemType* pItem = m_pBack;
2335  while(pItem != VMA_NULL)
2336  {
2337  ItemType* const pPrevItem = pItem->pPrev;
2338  m_ItemAllocator.Free(pItem);
2339  pItem = pPrevItem;
2340  }
2341  m_pFront = VMA_NULL;
2342  m_pBack = VMA_NULL;
2343  m_Count = 0;
2344  }
2345 }
2346 
2347 template<typename T>
2348 VmaListItem<T>* VmaRawList<T>::PushBack()
2349 {
2350  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2351  pNewItem->pNext = VMA_NULL;
2352  if(IsEmpty())
2353  {
2354  pNewItem->pPrev = VMA_NULL;
2355  m_pFront = pNewItem;
2356  m_pBack = pNewItem;
2357  m_Count = 1;
2358  }
2359  else
2360  {
2361  pNewItem->pPrev = m_pBack;
2362  m_pBack->pNext = pNewItem;
2363  m_pBack = pNewItem;
2364  ++m_Count;
2365  }
2366  return pNewItem;
2367 }
2368 
2369 template<typename T>
2370 VmaListItem<T>* VmaRawList<T>::PushFront()
2371 {
2372  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2373  pNewItem->pPrev = VMA_NULL;
2374  if(IsEmpty())
2375  {
2376  pNewItem->pNext = VMA_NULL;
2377  m_pFront = pNewItem;
2378  m_pBack = pNewItem;
2379  m_Count = 1;
2380  }
2381  else
2382  {
2383  pNewItem->pNext = m_pFront;
2384  m_pFront->pPrev = pNewItem;
2385  m_pFront = pNewItem;
2386  ++m_Count;
2387  }
2388  return pNewItem;
2389 }
2390 
2391 template<typename T>
2392 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
2393 {
2394  ItemType* const pNewItem = PushBack();
2395  pNewItem->Value = value;
2396  return pNewItem;
2397 }
2398 
2399 template<typename T>
2400 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
2401 {
2402  ItemType* const pNewItem = PushFront();
2403  pNewItem->Value = value;
2404  return pNewItem;
2405 }
2406 
2407 template<typename T>
2408 void VmaRawList<T>::PopBack()
2409 {
2410  VMA_HEAVY_ASSERT(m_Count > 0);
2411  ItemType* const pBackItem = m_pBack;
2412  ItemType* const pPrevItem = pBackItem->pPrev;
2413  if(pPrevItem != VMA_NULL)
2414  {
2415  pPrevItem->pNext = VMA_NULL;
2416  }
2417  m_pBack = pPrevItem;
2418  m_ItemAllocator.Free(pBackItem);
2419  --m_Count;
2420 }
2421 
2422 template<typename T>
2423 void VmaRawList<T>::PopFront()
2424 {
2425  VMA_HEAVY_ASSERT(m_Count > 0);
2426  ItemType* const pFrontItem = m_pFront;
2427  ItemType* const pNextItem = pFrontItem->pNext;
2428  if(pNextItem != VMA_NULL)
2429  {
2430  pNextItem->pPrev = VMA_NULL;
2431  }
2432  m_pFront = pNextItem;
2433  m_ItemAllocator.Free(pFrontItem);
2434  --m_Count;
2435 }
2436 
2437 template<typename T>
2438 void VmaRawList<T>::Remove(ItemType* pItem)
2439 {
2440  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
2441  VMA_HEAVY_ASSERT(m_Count > 0);
2442 
2443  if(pItem->pPrev != VMA_NULL)
2444  {
2445  pItem->pPrev->pNext = pItem->pNext;
2446  }
2447  else
2448  {
2449  VMA_HEAVY_ASSERT(m_pFront == pItem);
2450  m_pFront = pItem->pNext;
2451  }
2452 
2453  if(pItem->pNext != VMA_NULL)
2454  {
2455  pItem->pNext->pPrev = pItem->pPrev;
2456  }
2457  else
2458  {
2459  VMA_HEAVY_ASSERT(m_pBack == pItem);
2460  m_pBack = pItem->pPrev;
2461  }
2462 
2463  m_ItemAllocator.Free(pItem);
2464  --m_Count;
2465 }
2466 
2467 template<typename T>
2468 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
2469 {
2470  if(pItem != VMA_NULL)
2471  {
2472  ItemType* const prevItem = pItem->pPrev;
2473  ItemType* const newItem = m_ItemAllocator.Alloc();
2474  newItem->pPrev = prevItem;
2475  newItem->pNext = pItem;
2476  pItem->pPrev = newItem;
2477  if(prevItem != VMA_NULL)
2478  {
2479  prevItem->pNext = newItem;
2480  }
2481  else
2482  {
2483  VMA_HEAVY_ASSERT(m_pFront == pItem);
2484  m_pFront = newItem;
2485  }
2486  ++m_Count;
2487  return newItem;
2488  }
2489  else
2490  return PushBack();
2491 }
2492 
2493 template<typename T>
2494 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
2495 {
2496  if(pItem != VMA_NULL)
2497  {
2498  ItemType* const nextItem = pItem->pNext;
2499  ItemType* const newItem = m_ItemAllocator.Alloc();
2500  newItem->pNext = nextItem;
2501  newItem->pPrev = pItem;
2502  pItem->pNext = newItem;
2503  if(nextItem != VMA_NULL)
2504  {
2505  nextItem->pPrev = newItem;
2506  }
2507  else
2508  {
2509  VMA_HEAVY_ASSERT(m_pBack == pItem);
2510  m_pBack = newItem;
2511  }
2512  ++m_Count;
2513  return newItem;
2514  }
2515  else
2516  return PushFront();
2517 }
2518 
2519 template<typename T>
2520 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
2521 {
2522  ItemType* const newItem = InsertBefore(pItem);
2523  newItem->Value = value;
2524  return newItem;
2525 }
2526 
2527 template<typename T>
2528 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
2529 {
2530  ItemType* const newItem = InsertAfter(pItem);
2531  newItem->Value = value;
2532  return newItem;
2533 }
2534 
2535 template<typename T, typename AllocatorT>
2536 class VmaList
2537 {
2538 public:
2539  class iterator
2540  {
2541  public:
2542  iterator() :
2543  m_pList(VMA_NULL),
2544  m_pItem(VMA_NULL)
2545  {
2546  }
2547 
2548  T& operator*() const
2549  {
2550  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2551  return m_pItem->Value;
2552  }
2553  T* operator->() const
2554  {
2555  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2556  return &m_pItem->Value;
2557  }
2558 
2559  iterator& operator++()
2560  {
2561  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2562  m_pItem = m_pItem->pNext;
2563  return *this;
2564  }
2565  iterator& operator--()
2566  {
2567  if(m_pItem != VMA_NULL)
2568  {
2569  m_pItem = m_pItem->pPrev;
2570  }
2571  else
2572  {
2573  VMA_HEAVY_ASSERT(!m_pList.IsEmpty());
2574  m_pItem = m_pList->Back();
2575  }
2576  return *this;
2577  }
2578 
2579  iterator operator++(int)
2580  {
2581  iterator result = *this;
2582  ++*this;
2583  return result;
2584  }
2585  iterator operator--(int)
2586  {
2587  iterator result = *this;
2588  --*this;
2589  return result;
2590  }
2591 
2592  bool operator==(const iterator& rhs) const
2593  {
2594  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2595  return m_pItem == rhs.m_pItem;
2596  }
2597  bool operator!=(const iterator& rhs) const
2598  {
2599  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2600  return m_pItem != rhs.m_pItem;
2601  }
2602 
2603  private:
2604  VmaRawList<T>* m_pList;
2605  VmaListItem<T>* m_pItem;
2606 
2607  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
2608  m_pList(pList),
2609  m_pItem(pItem)
2610  {
2611  }
2612 
2613  friend class VmaList<T, AllocatorT>;
2614  };
2615 
2616  class const_iterator
2617  {
2618  public:
2619  const_iterator() :
2620  m_pList(VMA_NULL),
2621  m_pItem(VMA_NULL)
2622  {
2623  }
2624 
2625  const_iterator(const iterator& src) :
2626  m_pList(src.m_pList),
2627  m_pItem(src.m_pItem)
2628  {
2629  }
2630 
2631  const T& operator*() const
2632  {
2633  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2634  return m_pItem->Value;
2635  }
2636  const T* operator->() const
2637  {
2638  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2639  return &m_pItem->Value;
2640  }
2641 
2642  const_iterator& operator++()
2643  {
2644  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2645  m_pItem = m_pItem->pNext;
2646  return *this;
2647  }
2648  const_iterator& operator--()
2649  {
2650  if(m_pItem != VMA_NULL)
2651  {
2652  m_pItem = m_pItem->pPrev;
2653  }
2654  else
2655  {
2656  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
2657  m_pItem = m_pList->Back();
2658  }
2659  return *this;
2660  }
2661 
2662  const_iterator operator++(int)
2663  {
2664  const_iterator result = *this;
2665  ++*this;
2666  return result;
2667  }
2668  const_iterator operator--(int)
2669  {
2670  const_iterator result = *this;
2671  --*this;
2672  return result;
2673  }
2674 
2675  bool operator==(const const_iterator& rhs) const
2676  {
2677  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2678  return m_pItem == rhs.m_pItem;
2679  }
2680  bool operator!=(const const_iterator& rhs) const
2681  {
2682  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2683  return m_pItem != rhs.m_pItem;
2684  }
2685 
2686  private:
2687  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
2688  m_pList(pList),
2689  m_pItem(pItem)
2690  {
2691  }
2692 
2693  const VmaRawList<T>* m_pList;
2694  const VmaListItem<T>* m_pItem;
2695 
2696  friend class VmaList<T, AllocatorT>;
2697  };
2698 
2699  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
2700 
2701  bool empty() const { return m_RawList.IsEmpty(); }
2702  size_t size() const { return m_RawList.GetCount(); }
2703 
2704  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
2705  iterator end() { return iterator(&m_RawList, VMA_NULL); }
2706 
2707  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
2708  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
2709 
2710  void clear() { m_RawList.Clear(); }
2711  void push_back(const T& value) { m_RawList.PushBack(value); }
2712  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
2713  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
2714 
2715 private:
2716  VmaRawList<T> m_RawList;
2717 };
2718 
2719 #endif // #if VMA_USE_STL_LIST
2720 
2722 // class VmaMap
2723 
2724 // Unused in this version.
2725 #if 0
2726 
2727 #if VMA_USE_STL_UNORDERED_MAP
2728 
2729 #define VmaPair std::pair
2730 
2731 #define VMA_MAP_TYPE(KeyT, ValueT) \
2732  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
2733 
2734 #else // #if VMA_USE_STL_UNORDERED_MAP
2735 
2736 template<typename T1, typename T2>
2737 struct VmaPair
2738 {
2739  T1 first;
2740  T2 second;
2741 
2742  VmaPair() : first(), second() { }
2743  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
2744 };
2745 
2746 /* Class compatible with subset of interface of std::unordered_map.
2747 KeyT, ValueT must be POD because they will be stored in VmaVector.
2748 */
2749 template<typename KeyT, typename ValueT>
2750 class VmaMap
2751 {
2752 public:
2753  typedef VmaPair<KeyT, ValueT> PairType;
2754  typedef PairType* iterator;
2755 
2756  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
2757 
2758  iterator begin() { return m_Vector.begin(); }
2759  iterator end() { return m_Vector.end(); }
2760 
2761  void insert(const PairType& pair);
2762  iterator find(const KeyT& key);
2763  void erase(iterator it);
2764 
2765 private:
2766  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
2767 };
2768 
2769 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
2770 
2771 template<typename FirstT, typename SecondT>
2772 struct VmaPairFirstLess
2773 {
2774  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
2775  {
2776  return lhs.first < rhs.first;
2777  }
2778  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
2779  {
2780  return lhs.first < rhsFirst;
2781  }
2782 };
2783 
2784 template<typename KeyT, typename ValueT>
2785 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
2786 {
2787  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2788  m_Vector.data(),
2789  m_Vector.data() + m_Vector.size(),
2790  pair,
2791  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
2792  VmaVectorInsert(m_Vector, indexToInsert, pair);
2793 }
2794 
2795 template<typename KeyT, typename ValueT>
2796 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
2797 {
2798  PairType* it = VmaBinaryFindFirstNotLess(
2799  m_Vector.data(),
2800  m_Vector.data() + m_Vector.size(),
2801  key,
2802  VmaPairFirstLess<KeyT, ValueT>());
2803  if((it != m_Vector.end()) && (it->first == key))
2804  {
2805  return it;
2806  }
2807  else
2808  {
2809  return m_Vector.end();
2810  }
2811 }
2812 
2813 template<typename KeyT, typename ValueT>
2814 void VmaMap<KeyT, ValueT>::erase(iterator it)
2815 {
2816  VmaVectorRemove(m_Vector, it - m_Vector.begin());
2817 }
2818 
2819 #endif // #if VMA_USE_STL_UNORDERED_MAP
2820 
2821 #endif // #if 0
2822 
2824 
2825 class VmaDeviceMemoryBlock;
2826 
2827 enum VMA_BLOCK_VECTOR_TYPE
2828 {
2829  VMA_BLOCK_VECTOR_TYPE_UNMAPPED,
2830  VMA_BLOCK_VECTOR_TYPE_MAPPED,
2831  VMA_BLOCK_VECTOR_TYPE_COUNT
2832 };
2833 
2834 static VMA_BLOCK_VECTOR_TYPE VmaAllocationCreateFlagsToBlockVectorType(VmaAllocationCreateFlags flags)
2835 {
2836  return (flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0 ?
2837  VMA_BLOCK_VECTOR_TYPE_MAPPED :
2838  VMA_BLOCK_VECTOR_TYPE_UNMAPPED;
2839 }
2840 
2841 struct VmaAllocation_T
2842 {
2843 public:
2844  enum ALLOCATION_TYPE
2845  {
2846  ALLOCATION_TYPE_NONE,
2847  ALLOCATION_TYPE_BLOCK,
2848  ALLOCATION_TYPE_DEDICATED,
2849  };
2850 
2851  VmaAllocation_T(uint32_t currentFrameIndex) :
2852  m_Alignment(1),
2853  m_Size(0),
2854  m_pUserData(VMA_NULL),
2855  m_Type(ALLOCATION_TYPE_NONE),
2856  m_SuballocationType(VMA_SUBALLOCATION_TYPE_UNKNOWN),
2857  m_LastUseFrameIndex(currentFrameIndex)
2858  {
2859  }
2860 
2861  void InitBlockAllocation(
2862  VmaPool hPool,
2863  VmaDeviceMemoryBlock* block,
2864  VkDeviceSize offset,
2865  VkDeviceSize alignment,
2866  VkDeviceSize size,
2867  VmaSuballocationType suballocationType,
2868  void* pUserData,
2869  bool canBecomeLost)
2870  {
2871  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
2872  VMA_ASSERT(block != VMA_NULL);
2873  m_Type = ALLOCATION_TYPE_BLOCK;
2874  m_Alignment = alignment;
2875  m_Size = size;
2876  m_pUserData = pUserData;
2877  m_SuballocationType = suballocationType;
2878  m_BlockAllocation.m_hPool = hPool;
2879  m_BlockAllocation.m_Block = block;
2880  m_BlockAllocation.m_Offset = offset;
2881  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
2882  }
2883 
2884  void InitLost()
2885  {
2886  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
2887  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
2888  m_Type = ALLOCATION_TYPE_BLOCK;
2889  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
2890  m_BlockAllocation.m_Block = VMA_NULL;
2891  m_BlockAllocation.m_Offset = 0;
2892  m_BlockAllocation.m_CanBecomeLost = true;
2893  }
2894 
2895  void ChangeBlockAllocation(
2896  VmaDeviceMemoryBlock* block,
2897  VkDeviceSize offset)
2898  {
2899  VMA_ASSERT(block != VMA_NULL);
2900  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
2901  m_BlockAllocation.m_Block = block;
2902  m_BlockAllocation.m_Offset = offset;
2903  }
2904 
2905  void InitDedicatedAllocation(
2906  uint32_t memoryTypeIndex,
2907  VkDeviceMemory hMemory,
2908  VmaSuballocationType suballocationType,
2909  bool persistentMap,
2910  void* pMappedData,
2911  VkDeviceSize size,
2912  void* pUserData)
2913  {
2914  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
2915  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
2916  m_Type = ALLOCATION_TYPE_DEDICATED;
2917  m_Alignment = 0;
2918  m_Size = size;
2919  m_pUserData = pUserData;
2920  m_SuballocationType = suballocationType;
2921  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
2922  m_DedicatedAllocation.m_hMemory = hMemory;
2923  m_DedicatedAllocation.m_PersistentMap = persistentMap;
2924  m_DedicatedAllocation.m_pMappedData = pMappedData;
2925  }
2926 
2927  ALLOCATION_TYPE GetType() const { return m_Type; }
2928  VkDeviceSize GetAlignment() const { return m_Alignment; }
2929  VkDeviceSize GetSize() const { return m_Size; }
2930  void* GetUserData() const { return m_pUserData; }
2931  void SetUserData(void* pUserData) { m_pUserData = pUserData; }
2932  VmaSuballocationType GetSuballocationType() const { return m_SuballocationType; }
2933 
2934  VmaDeviceMemoryBlock* GetBlock() const
2935  {
2936  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
2937  return m_BlockAllocation.m_Block;
2938  }
2939  VkDeviceSize GetOffset() const;
2940  VkDeviceMemory GetMemory() const;
2941  uint32_t GetMemoryTypeIndex() const;
2942  VMA_BLOCK_VECTOR_TYPE GetBlockVectorType() const;
2943  void* GetMappedData() const;
2944  bool CanBecomeLost() const;
2945  VmaPool GetPool() const;
2946 
2947  VkResult DedicatedAllocMapPersistentlyMappedMemory(VmaAllocator hAllocator);
2948  void DedicatedAllocUnmapPersistentlyMappedMemory(VmaAllocator hAllocator);
2949 
2950  uint32_t GetLastUseFrameIndex() const
2951  {
2952  return m_LastUseFrameIndex.load();
2953  }
2954  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
2955  {
2956  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
2957  }
2958  /*
2959  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
2960  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
2961  - Else, returns false.
2962 
2963  If hAllocation is already lost, assert - you should not call it then.
2964  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
2965  */
2966  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
2967 
2968  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
2969  {
2970  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
2971  outInfo.blockCount = 1;
2972  outInfo.allocationCount = 1;
2973  outInfo.unusedRangeCount = 0;
2974  outInfo.usedBytes = m_Size;
2975  outInfo.unusedBytes = 0;
2976  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
2977  outInfo.unusedRangeSizeMin = UINT64_MAX;
2978  outInfo.unusedRangeSizeMax = 0;
2979  }
2980 
2981 private:
2982  VkDeviceSize m_Alignment;
2983  VkDeviceSize m_Size;
2984  void* m_pUserData;
2985  ALLOCATION_TYPE m_Type;
2986  VmaSuballocationType m_SuballocationType;
2987  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
2988 
2989  // Allocation out of VmaDeviceMemoryBlock.
2990  struct BlockAllocation
2991  {
2992  VmaPool m_hPool; // Null if belongs to general memory.
2993  VmaDeviceMemoryBlock* m_Block;
2994  VkDeviceSize m_Offset;
2995  bool m_CanBecomeLost;
2996  };
2997 
2998  // Allocation for an object that has its own private VkDeviceMemory.
2999  struct DedicatedAllocation
3000  {
3001  uint32_t m_MemoryTypeIndex;
3002  VkDeviceMemory m_hMemory;
3003  bool m_PersistentMap;
3004  void* m_pMappedData;
3005  };
3006 
3007  union
3008  {
3009  // Allocation out of VmaDeviceMemoryBlock.
3010  BlockAllocation m_BlockAllocation;
3011  // Allocation for an object that has its own private VkDeviceMemory.
3012  DedicatedAllocation m_DedicatedAllocation;
3013  };
3014 };
3015 
3016 /*
3017 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
3018 allocated memory block or free.
3019 */
3020 struct VmaSuballocation
3021 {
3022  VkDeviceSize offset;
3023  VkDeviceSize size;
3024  VmaAllocation hAllocation;
3025  VmaSuballocationType type;
3026 };
3027 
3028 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
3029 
3030 // Cost of one additional allocation lost, as equivalent in bytes.
3031 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
3032 
3033 /*
3034 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
3035 
3036 If canMakeOtherLost was false:
3037 - item points to a FREE suballocation.
3038 - itemsToMakeLostCount is 0.
3039 
3040 If canMakeOtherLost was true:
3041 - item points to first of sequence of suballocations, which are either FREE,
3042  or point to VmaAllocations that can become lost.
3043 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
3044  the requested allocation to succeed.
3045 */
3046 struct VmaAllocationRequest
3047 {
3048  VkDeviceSize offset;
3049  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
3050  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
3051  VmaSuballocationList::iterator item;
3052  size_t itemsToMakeLostCount;
3053 
3054  VkDeviceSize CalcCost() const
3055  {
3056  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
3057  }
3058 };
3059 
3060 /*
3061 Data structure used for bookkeeping of allocations and unused ranges of memory
3062 in a single VkDeviceMemory block.
3063 */
3064 class VmaBlockMetadata
3065 {
3066 public:
3067  VmaBlockMetadata(VmaAllocator hAllocator);
3068  ~VmaBlockMetadata();
3069  void Init(VkDeviceSize size);
3070 
3071  // Validates all data structures inside this object. If not valid, returns false.
3072  bool Validate() const;
3073  VkDeviceSize GetSize() const { return m_Size; }
3074  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
3075  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
3076  VkDeviceSize GetUnusedRangeSizeMax() const;
3077  // Returns true if this block is empty - contains only single free suballocation.
3078  bool IsEmpty() const;
3079 
3080  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
3081  void AddPoolStats(VmaPoolStats& inoutStats) const;
3082 
3083 #if VMA_STATS_STRING_ENABLED
3084  void PrintDetailedMap(class VmaJsonWriter& json) const;
3085 #endif
3086 
3087  // Creates trivial request for case when block is empty.
3088  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
3089 
3090  // Tries to find a place for suballocation with given parameters inside this block.
3091  // If succeeded, fills pAllocationRequest and returns true.
3092  // If failed, returns false.
3093  bool CreateAllocationRequest(
3094  uint32_t currentFrameIndex,
3095  uint32_t frameInUseCount,
3096  VkDeviceSize bufferImageGranularity,
3097  VkDeviceSize allocSize,
3098  VkDeviceSize allocAlignment,
3099  VmaSuballocationType allocType,
3100  bool canMakeOtherLost,
3101  VmaAllocationRequest* pAllocationRequest);
3102 
3103  bool MakeRequestedAllocationsLost(
3104  uint32_t currentFrameIndex,
3105  uint32_t frameInUseCount,
3106  VmaAllocationRequest* pAllocationRequest);
3107 
3108  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3109 
3110  // Makes actual allocation based on request. Request must already be checked and valid.
3111  void Alloc(
3112  const VmaAllocationRequest& request,
3113  VmaSuballocationType type,
3114  VkDeviceSize allocSize,
3115  VmaAllocation hAllocation);
3116 
3117  // Frees suballocation assigned to given memory region.
3118  void Free(const VmaAllocation allocation);
3119 
3120 private:
3121  VkDeviceSize m_Size;
3122  uint32_t m_FreeCount;
3123  VkDeviceSize m_SumFreeSize;
3124  VmaSuballocationList m_Suballocations;
3125  // Suballocations that are free and have size greater than certain threshold.
3126  // Sorted by size, ascending.
3127  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
3128 
3129  bool ValidateFreeSuballocationList() const;
3130 
3131  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
3132  // If yes, fills pOffset and returns true. If no, returns false.
3133  bool CheckAllocation(
3134  uint32_t currentFrameIndex,
3135  uint32_t frameInUseCount,
3136  VkDeviceSize bufferImageGranularity,
3137  VkDeviceSize allocSize,
3138  VkDeviceSize allocAlignment,
3139  VmaSuballocationType allocType,
3140  VmaSuballocationList::const_iterator suballocItem,
3141  bool canMakeOtherLost,
3142  VkDeviceSize* pOffset,
3143  size_t* itemsToMakeLostCount,
3144  VkDeviceSize* pSumFreeSize,
3145  VkDeviceSize* pSumItemSize) const;
3146  // Given free suballocation, it merges it with following one, which must also be free.
3147  void MergeFreeWithNext(VmaSuballocationList::iterator item);
3148  // Releases given suballocation, making it free.
3149  // Merges it with adjacent free suballocations if applicable.
3150  // Returns iterator to new free suballocation at this place.
3151  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
3152  // Given free suballocation, it inserts it into sorted list of
3153  // m_FreeSuballocationsBySize if it's suitable.
3154  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
3155  // Given free suballocation, it removes it from sorted list of
3156  // m_FreeSuballocationsBySize if it's suitable.
3157  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
3158 };
3159 
3160 /*
3161 Represents a single block of device memory (`VkDeviceMemory`) with all the
3162 data about its regions (aka suballocations, `VmaAllocation`), assigned and free.
3163 
3164 Thread-safety: This class must be externally synchronized.
3165 */
3166 class VmaDeviceMemoryBlock
3167 {
3168 public:
3169  uint32_t m_MemoryTypeIndex;
3170  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
3171  VkDeviceMemory m_hMemory;
3172  bool m_PersistentMap;
3173  void* m_pMappedData;
3174  VmaBlockMetadata m_Metadata;
3175 
3176  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
3177 
3178  ~VmaDeviceMemoryBlock()
3179  {
3180  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
3181  }
3182 
3183  // Always call after construction.
3184  void Init(
3185  uint32_t newMemoryTypeIndex,
3186  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
3187  VkDeviceMemory newMemory,
3188  VkDeviceSize newSize,
3189  bool persistentMap,
3190  void* pMappedData);
3191  // Always call before destruction.
3192  void Destroy(VmaAllocator allocator);
3193 
3194  // Validates all data structures inside this object. If not valid, returns false.
3195  bool Validate() const;
3196 };
3197 
3198 struct VmaPointerLess
3199 {
3200  bool operator()(const void* lhs, const void* rhs) const
3201  {
3202  return lhs < rhs;
3203  }
3204 };
3205 
3206 class VmaDefragmentator;
3207 
3208 /*
3209 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
3210 Vulkan memory type.
3211 
3212 Synchronized internally with a mutex.
3213 */
3214 struct VmaBlockVector
3215 {
3216  VmaBlockVector(
3217  VmaAllocator hAllocator,
3218  uint32_t memoryTypeIndex,
3219  VMA_BLOCK_VECTOR_TYPE blockVectorType,
3220  VkDeviceSize preferredBlockSize,
3221  size_t minBlockCount,
3222  size_t maxBlockCount,
3223  VkDeviceSize bufferImageGranularity,
3224  uint32_t frameInUseCount,
3225  bool isCustomPool);
3226  ~VmaBlockVector();
3227 
3228  VkResult CreateMinBlocks();
3229 
3230  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
3231  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
3232  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
3233  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
3234  VMA_BLOCK_VECTOR_TYPE GetBlockVectorType() const { return m_BlockVectorType; }
3235 
3236  void GetPoolStats(VmaPoolStats* pStats);
3237 
3238  bool IsEmpty() const { return m_Blocks.empty(); }
3239 
3240  VkResult Allocate(
3241  VmaPool hCurrentPool,
3242  uint32_t currentFrameIndex,
3243  const VkMemoryRequirements& vkMemReq,
3244  const VmaAllocationCreateInfo& createInfo,
3245  VmaSuballocationType suballocType,
3246  VmaAllocation* pAllocation);
3247 
3248  void Free(
3249  VmaAllocation hAllocation);
3250 
3251  // Adds statistics of this BlockVector to pStats.
3252  void AddStats(VmaStats* pStats);
3253 
3254 #if VMA_STATS_STRING_ENABLED
3255  void PrintDetailedMap(class VmaJsonWriter& json);
3256 #endif
3257 
3258  void UnmapPersistentlyMappedMemory();
3259  VkResult MapPersistentlyMappedMemory();
3260 
3261  void MakePoolAllocationsLost(
3262  uint32_t currentFrameIndex,
3263  size_t* pLostAllocationCount);
3264 
3265  VmaDefragmentator* EnsureDefragmentator(
3266  VmaAllocator hAllocator,
3267  uint32_t currentFrameIndex);
3268 
3269  VkResult Defragment(
3270  VmaDefragmentationStats* pDefragmentationStats,
3271  VkDeviceSize& maxBytesToMove,
3272  uint32_t& maxAllocationsToMove);
3273 
3274  void DestroyDefragmentator();
3275 
3276 private:
3277  friend class VmaDefragmentator;
3278 
3279  const VmaAllocator m_hAllocator;
3280  const uint32_t m_MemoryTypeIndex;
3281  const VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
3282  const VkDeviceSize m_PreferredBlockSize;
3283  const size_t m_MinBlockCount;
3284  const size_t m_MaxBlockCount;
3285  const VkDeviceSize m_BufferImageGranularity;
3286  const uint32_t m_FrameInUseCount;
3287  const bool m_IsCustomPool;
3288  VMA_MUTEX m_Mutex;
3289  // Incrementally sorted by sumFreeSize, ascending.
3290  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
3291  /* There can be at most one allocation that is completely empty - a
3292  hysteresis to avoid pessimistic case of alternating creation and destruction
3293  of a VkDeviceMemory. */
3294  bool m_HasEmptyBlock;
3295  VmaDefragmentator* m_pDefragmentator;
3296 
3297  // Finds and removes given block from vector.
3298  void Remove(VmaDeviceMemoryBlock* pBlock);
3299 
3300  // Performs single step in sorting m_Blocks. They may not be fully sorted
3301  // after this call.
3302  void IncrementallySortBlocks();
3303 
3304  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
3305 };
3306 
3307 struct VmaPool_T
3308 {
3309 public:
3310  VmaBlockVector m_BlockVector;
3311 
3312  // Takes ownership.
3313  VmaPool_T(
3314  VmaAllocator hAllocator,
3315  const VmaPoolCreateInfo& createInfo);
3316  ~VmaPool_T();
3317 
3318  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
3319 
3320 #if VMA_STATS_STRING_ENABLED
3321  //void PrintDetailedMap(class VmaStringBuilder& sb);
3322 #endif
3323 };
3324 
3325 class VmaDefragmentator
3326 {
3327  const VmaAllocator m_hAllocator;
3328  VmaBlockVector* const m_pBlockVector;
3329  uint32_t m_CurrentFrameIndex;
3330  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
3331  VkDeviceSize m_BytesMoved;
3332  uint32_t m_AllocationsMoved;
3333 
3334  struct AllocationInfo
3335  {
3336  VmaAllocation m_hAllocation;
3337  VkBool32* m_pChanged;
3338 
3339  AllocationInfo() :
3340  m_hAllocation(VK_NULL_HANDLE),
3341  m_pChanged(VMA_NULL)
3342  {
3343  }
3344  };
3345 
3346  struct AllocationInfoSizeGreater
3347  {
3348  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
3349  {
3350  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
3351  }
3352  };
3353 
3354  // Used between AddAllocation and Defragment.
3355  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3356 
3357  struct BlockInfo
3358  {
3359  VmaDeviceMemoryBlock* m_pBlock;
3360  bool m_HasNonMovableAllocations;
3361  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3362 
3363  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
3364  m_pBlock(VMA_NULL),
3365  m_HasNonMovableAllocations(true),
3366  m_Allocations(pAllocationCallbacks),
3367  m_pMappedDataForDefragmentation(VMA_NULL)
3368  {
3369  }
3370 
3371  void CalcHasNonMovableAllocations()
3372  {
3373  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
3374  const size_t defragmentAllocCount = m_Allocations.size();
3375  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
3376  }
3377 
3378  void SortAllocationsBySizeDescecnding()
3379  {
3380  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
3381  }
3382 
3383  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
3384  void Unmap(VmaAllocator hAllocator);
3385 
3386  private:
3387  // Not null if mapped for defragmentation only, not persistently mapped.
3388  void* m_pMappedDataForDefragmentation;
3389  };
3390 
3391  struct BlockPointerLess
3392  {
3393  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
3394  {
3395  return pLhsBlockInfo->m_pBlock < pRhsBlock;
3396  }
3397  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3398  {
3399  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
3400  }
3401  };
3402 
3403  // 1. Blocks with some non-movable allocations go first.
3404  // 2. Blocks with smaller sumFreeSize go first.
3405  struct BlockInfoCompareMoveDestination
3406  {
3407  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3408  {
3409  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
3410  {
3411  return true;
3412  }
3413  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
3414  {
3415  return false;
3416  }
3417  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
3418  {
3419  return true;
3420  }
3421  return false;
3422  }
3423  };
3424 
3425  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
3426  BlockInfoVector m_Blocks;
3427 
3428  VkResult DefragmentRound(
3429  VkDeviceSize maxBytesToMove,
3430  uint32_t maxAllocationsToMove);
3431 
3432  static bool MoveMakesSense(
3433  size_t dstBlockIndex, VkDeviceSize dstOffset,
3434  size_t srcBlockIndex, VkDeviceSize srcOffset);
3435 
3436 public:
3437  VmaDefragmentator(
3438  VmaAllocator hAllocator,
3439  VmaBlockVector* pBlockVector,
3440  uint32_t currentFrameIndex);
3441 
3442  ~VmaDefragmentator();
3443 
3444  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
3445  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
3446 
3447  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
3448 
3449  VkResult Defragment(
3450  VkDeviceSize maxBytesToMove,
3451  uint32_t maxAllocationsToMove);
3452 };
3453 
3454 // Main allocator object.
3455 struct VmaAllocator_T
3456 {
3457  bool m_UseMutex;
3458  bool m_UseKhrDedicatedAllocation;
3459  VkDevice m_hDevice;
3460  bool m_AllocationCallbacksSpecified;
3461  VkAllocationCallbacks m_AllocationCallbacks;
3462  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
3463  // Non-zero when we are inside UnmapPersistentlyMappedMemory...MapPersistentlyMappedMemory.
3464  // Counter to allow nested calls to these functions.
3465  uint32_t m_UnmapPersistentlyMappedMemoryCounter;
3466 
3467  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
3468  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
3469  VMA_MUTEX m_HeapSizeLimitMutex;
3470 
3471  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
3472  VkPhysicalDeviceMemoryProperties m_MemProps;
3473 
3474  // Default pools.
3475  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
3476 
3477  // Each vector is sorted by memory (handle value).
3478  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
3479  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
3480  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
3481 
3482  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
3483  ~VmaAllocator_T();
3484 
3485  const VkAllocationCallbacks* GetAllocationCallbacks() const
3486  {
3487  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
3488  }
3489  const VmaVulkanFunctions& GetVulkanFunctions() const
3490  {
3491  return m_VulkanFunctions;
3492  }
3493 
3494  VkDeviceSize GetBufferImageGranularity() const
3495  {
3496  return VMA_MAX(
3497  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
3498  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
3499  }
3500 
3501  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
3502  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
3503 
3504  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
3505  {
3506  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
3507  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
3508  }
3509 
3510  void GetBufferMemoryRequirements(
3511  VkBuffer hBuffer,
3512  VkMemoryRequirements& memReq,
3513  bool& requiresDedicatedAllocation,
3514  bool& prefersDedicatedAllocation) const;
3515  void GetImageMemoryRequirements(
3516  VkImage hImage,
3517  VkMemoryRequirements& memReq,
3518  bool& requiresDedicatedAllocation,
3519  bool& prefersDedicatedAllocation) const;
3520 
3521  // Main allocation function.
3522  VkResult AllocateMemory(
3523  const VkMemoryRequirements& vkMemReq,
3524  bool requiresDedicatedAllocation,
3525  bool prefersDedicatedAllocation,
3526  VkBuffer dedicatedBuffer,
3527  VkImage dedicatedImage,
3528  const VmaAllocationCreateInfo& createInfo,
3529  VmaSuballocationType suballocType,
3530  VmaAllocation* pAllocation);
3531 
3532  // Main deallocation function.
3533  void FreeMemory(const VmaAllocation allocation);
3534 
3535  void CalculateStats(VmaStats* pStats);
3536 
3537 #if VMA_STATS_STRING_ENABLED
3538  void PrintDetailedMap(class VmaJsonWriter& json);
3539 #endif
3540 
3541  void UnmapPersistentlyMappedMemory();
3542  VkResult MapPersistentlyMappedMemory();
3543 
3544  VkResult Defragment(
3545  VmaAllocation* pAllocations,
3546  size_t allocationCount,
3547  VkBool32* pAllocationsChanged,
3548  const VmaDefragmentationInfo* pDefragmentationInfo,
3549  VmaDefragmentationStats* pDefragmentationStats);
3550 
3551  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
3552 
3553  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
3554  void DestroyPool(VmaPool pool);
3555  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
3556 
3557  void SetCurrentFrameIndex(uint32_t frameIndex);
3558 
3559  void MakePoolAllocationsLost(
3560  VmaPool hPool,
3561  size_t* pLostAllocationCount);
3562 
3563  void CreateLostAllocation(VmaAllocation* pAllocation);
3564 
3565  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
3566  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
3567 
3568 private:
3569  VkDeviceSize m_PreferredLargeHeapBlockSize;
3570  VkDeviceSize m_PreferredSmallHeapBlockSize;
3571 
3572  VkPhysicalDevice m_PhysicalDevice;
3573  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
3574 
3575  VMA_MUTEX m_PoolsMutex;
3576  // Protected by m_PoolsMutex. Sorted by pointer value.
3577  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
3578 
3579  VmaVulkanFunctions m_VulkanFunctions;
3580 
3581  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
3582 
3583  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
3584 
3585  VkResult AllocateMemoryOfType(
3586  const VkMemoryRequirements& vkMemReq,
3587  bool dedicatedAllocation,
3588  VkBuffer dedicatedBuffer,
3589  VkImage dedicatedImage,
3590  const VmaAllocationCreateInfo& createInfo,
3591  uint32_t memTypeIndex,
3592  VmaSuballocationType suballocType,
3593  VmaAllocation* pAllocation);
3594 
3595  // Allocates and registers new VkDeviceMemory specifically for single allocation.
3596  VkResult AllocateDedicatedMemory(
3597  VkDeviceSize size,
3598  VmaSuballocationType suballocType,
3599  uint32_t memTypeIndex,
3600  bool map,
3601  void* pUserData,
3602  VkBuffer dedicatedBuffer,
3603  VkImage dedicatedImage,
3604  VmaAllocation* pAllocation);
3605 
3606  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
3607  void FreeDedicatedMemory(VmaAllocation allocation);
3608 };
3609 
3611 // Memory allocation #2 after VmaAllocator_T definition
3612 
3613 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
3614 {
3615  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
3616 }
3617 
3618 static void VmaFree(VmaAllocator hAllocator, void* ptr)
3619 {
3620  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
3621 }
3622 
3623 template<typename T>
3624 static T* VmaAllocate(VmaAllocator hAllocator)
3625 {
3626  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
3627 }
3628 
3629 template<typename T>
3630 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
3631 {
3632  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
3633 }
3634 
3635 template<typename T>
3636 static void vma_delete(VmaAllocator hAllocator, T* ptr)
3637 {
3638  if(ptr != VMA_NULL)
3639  {
3640  ptr->~T();
3641  VmaFree(hAllocator, ptr);
3642  }
3643 }
3644 
3645 template<typename T>
3646 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
3647 {
3648  if(ptr != VMA_NULL)
3649  {
3650  for(size_t i = count; i--; )
3651  ptr[i].~T();
3652  VmaFree(hAllocator, ptr);
3653  }
3654 }
3655 
3657 // VmaStringBuilder
3658 
3659 #if VMA_STATS_STRING_ENABLED
3660 
3661 class VmaStringBuilder
3662 {
3663 public:
3664  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
3665  size_t GetLength() const { return m_Data.size(); }
3666  const char* GetData() const { return m_Data.data(); }
3667 
3668  void Add(char ch) { m_Data.push_back(ch); }
3669  void Add(const char* pStr);
3670  void AddNewLine() { Add('\n'); }
3671  void AddNumber(uint32_t num);
3672  void AddNumber(uint64_t num);
3673  void AddPointer(const void* ptr);
3674 
3675 private:
3676  VmaVector< char, VmaStlAllocator<char> > m_Data;
3677 };
3678 
3679 void VmaStringBuilder::Add(const char* pStr)
3680 {
3681  const size_t strLen = strlen(pStr);
3682  if(strLen > 0)
3683  {
3684  const size_t oldCount = m_Data.size();
3685  m_Data.resize(oldCount + strLen);
3686  memcpy(m_Data.data() + oldCount, pStr, strLen);
3687  }
3688 }
3689 
3690 void VmaStringBuilder::AddNumber(uint32_t num)
3691 {
3692  char buf[11];
3693  VmaUint32ToStr(buf, sizeof(buf), num);
3694  Add(buf);
3695 }
3696 
3697 void VmaStringBuilder::AddNumber(uint64_t num)
3698 {
3699  char buf[21];
3700  VmaUint64ToStr(buf, sizeof(buf), num);
3701  Add(buf);
3702 }
3703 
3704 void VmaStringBuilder::AddPointer(const void* ptr)
3705 {
3706  char buf[21];
3707  VmaPtrToStr(buf, sizeof(buf), ptr);
3708  Add(buf);
3709 }
3710 
3711 #endif // #if VMA_STATS_STRING_ENABLED
3712 
3714 // VmaJsonWriter
3715 
3716 #if VMA_STATS_STRING_ENABLED
3717 
3718 class VmaJsonWriter
3719 {
3720 public:
3721  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
3722  ~VmaJsonWriter();
3723 
3724  void BeginObject(bool singleLine = false);
3725  void EndObject();
3726 
3727  void BeginArray(bool singleLine = false);
3728  void EndArray();
3729 
3730  void WriteString(const char* pStr);
3731  void BeginString(const char* pStr = VMA_NULL);
3732  void ContinueString(const char* pStr);
3733  void ContinueString(uint32_t n);
3734  void ContinueString(uint64_t n);
3735  void EndString(const char* pStr = VMA_NULL);
3736 
3737  void WriteNumber(uint32_t n);
3738  void WriteNumber(uint64_t n);
3739  void WriteBool(bool b);
3740  void WriteNull();
3741 
3742 private:
3743  static const char* const INDENT;
3744 
3745  enum COLLECTION_TYPE
3746  {
3747  COLLECTION_TYPE_OBJECT,
3748  COLLECTION_TYPE_ARRAY,
3749  };
3750  struct StackItem
3751  {
3752  COLLECTION_TYPE type;
3753  uint32_t valueCount;
3754  bool singleLineMode;
3755  };
3756 
3757  VmaStringBuilder& m_SB;
3758  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
3759  bool m_InsideString;
3760 
3761  void BeginValue(bool isString);
3762  void WriteIndent(bool oneLess = false);
3763 };
3764 
3765 const char* const VmaJsonWriter::INDENT = " ";
3766 
3767 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
3768  m_SB(sb),
3769  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
3770  m_InsideString(false)
3771 {
3772 }
3773 
3774 VmaJsonWriter::~VmaJsonWriter()
3775 {
3776  VMA_ASSERT(!m_InsideString);
3777  VMA_ASSERT(m_Stack.empty());
3778 }
3779 
3780 void VmaJsonWriter::BeginObject(bool singleLine)
3781 {
3782  VMA_ASSERT(!m_InsideString);
3783 
3784  BeginValue(false);
3785  m_SB.Add('{');
3786 
3787  StackItem item;
3788  item.type = COLLECTION_TYPE_OBJECT;
3789  item.valueCount = 0;
3790  item.singleLineMode = singleLine;
3791  m_Stack.push_back(item);
3792 }
3793 
3794 void VmaJsonWriter::EndObject()
3795 {
3796  VMA_ASSERT(!m_InsideString);
3797 
3798  WriteIndent(true);
3799  m_SB.Add('}');
3800 
3801  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
3802  m_Stack.pop_back();
3803 }
3804 
3805 void VmaJsonWriter::BeginArray(bool singleLine)
3806 {
3807  VMA_ASSERT(!m_InsideString);
3808 
3809  BeginValue(false);
3810  m_SB.Add('[');
3811 
3812  StackItem item;
3813  item.type = COLLECTION_TYPE_ARRAY;
3814  item.valueCount = 0;
3815  item.singleLineMode = singleLine;
3816  m_Stack.push_back(item);
3817 }
3818 
3819 void VmaJsonWriter::EndArray()
3820 {
3821  VMA_ASSERT(!m_InsideString);
3822 
3823  WriteIndent(true);
3824  m_SB.Add(']');
3825 
3826  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
3827  m_Stack.pop_back();
3828 }
3829 
3830 void VmaJsonWriter::WriteString(const char* pStr)
3831 {
3832  BeginString(pStr);
3833  EndString();
3834 }
3835 
3836 void VmaJsonWriter::BeginString(const char* pStr)
3837 {
3838  VMA_ASSERT(!m_InsideString);
3839 
3840  BeginValue(true);
3841  m_SB.Add('"');
3842  m_InsideString = true;
3843  if(pStr != VMA_NULL && pStr[0] != '\0')
3844  {
3845  ContinueString(pStr);
3846  }
3847 }
3848 
3849 void VmaJsonWriter::ContinueString(const char* pStr)
3850 {
3851  VMA_ASSERT(m_InsideString);
3852 
3853  const size_t strLen = strlen(pStr);
3854  for(size_t i = 0; i < strLen; ++i)
3855  {
3856  char ch = pStr[i];
3857  if(ch == '\'')
3858  {
3859  m_SB.Add("\\\\");
3860  }
3861  else if(ch == '"')
3862  {
3863  m_SB.Add("\\\"");
3864  }
3865  else if(ch >= 32)
3866  {
3867  m_SB.Add(ch);
3868  }
3869  else switch(ch)
3870  {
3871  case '\n':
3872  m_SB.Add("\\n");
3873  break;
3874  case '\r':
3875  m_SB.Add("\\r");
3876  break;
3877  case '\t':
3878  m_SB.Add("\\t");
3879  break;
3880  default:
3881  VMA_ASSERT(0 && "Character not currently supported.");
3882  break;
3883  }
3884  }
3885 }
3886 
3887 void VmaJsonWriter::ContinueString(uint32_t n)
3888 {
3889  VMA_ASSERT(m_InsideString);
3890  m_SB.AddNumber(n);
3891 }
3892 
3893 void VmaJsonWriter::ContinueString(uint64_t n)
3894 {
3895  VMA_ASSERT(m_InsideString);
3896  m_SB.AddNumber(n);
3897 }
3898 
3899 void VmaJsonWriter::EndString(const char* pStr)
3900 {
3901  VMA_ASSERT(m_InsideString);
3902  if(pStr != VMA_NULL && pStr[0] != '\0')
3903  {
3904  ContinueString(pStr);
3905  }
3906  m_SB.Add('"');
3907  m_InsideString = false;
3908 }
3909 
3910 void VmaJsonWriter::WriteNumber(uint32_t n)
3911 {
3912  VMA_ASSERT(!m_InsideString);
3913  BeginValue(false);
3914  m_SB.AddNumber(n);
3915 }
3916 
3917 void VmaJsonWriter::WriteNumber(uint64_t n)
3918 {
3919  VMA_ASSERT(!m_InsideString);
3920  BeginValue(false);
3921  m_SB.AddNumber(n);
3922 }
3923 
3924 void VmaJsonWriter::WriteBool(bool b)
3925 {
3926  VMA_ASSERT(!m_InsideString);
3927  BeginValue(false);
3928  m_SB.Add(b ? "true" : "false");
3929 }
3930 
3931 void VmaJsonWriter::WriteNull()
3932 {
3933  VMA_ASSERT(!m_InsideString);
3934  BeginValue(false);
3935  m_SB.Add("null");
3936 }
3937 
3938 void VmaJsonWriter::BeginValue(bool isString)
3939 {
3940  if(!m_Stack.empty())
3941  {
3942  StackItem& currItem = m_Stack.back();
3943  if(currItem.type == COLLECTION_TYPE_OBJECT &&
3944  currItem.valueCount % 2 == 0)
3945  {
3946  VMA_ASSERT(isString);
3947  }
3948 
3949  if(currItem.type == COLLECTION_TYPE_OBJECT &&
3950  currItem.valueCount % 2 != 0)
3951  {
3952  m_SB.Add(": ");
3953  }
3954  else if(currItem.valueCount > 0)
3955  {
3956  m_SB.Add(", ");
3957  WriteIndent();
3958  }
3959  else
3960  {
3961  WriteIndent();
3962  }
3963  ++currItem.valueCount;
3964  }
3965 }
3966 
3967 void VmaJsonWriter::WriteIndent(bool oneLess)
3968 {
3969  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
3970  {
3971  m_SB.AddNewLine();
3972 
3973  size_t count = m_Stack.size();
3974  if(count > 0 && oneLess)
3975  {
3976  --count;
3977  }
3978  for(size_t i = 0; i < count; ++i)
3979  {
3980  m_SB.Add(INDENT);
3981  }
3982  }
3983 }
3984 
3985 #endif // #if VMA_STATS_STRING_ENABLED
3986 
3988 
3989 VkDeviceSize VmaAllocation_T::GetOffset() const
3990 {
3991  switch(m_Type)
3992  {
3993  case ALLOCATION_TYPE_BLOCK:
3994  return m_BlockAllocation.m_Offset;
3995  case ALLOCATION_TYPE_DEDICATED:
3996  return 0;
3997  default:
3998  VMA_ASSERT(0);
3999  return 0;
4000  }
4001 }
4002 
4003 VkDeviceMemory VmaAllocation_T::GetMemory() const
4004 {
4005  switch(m_Type)
4006  {
4007  case ALLOCATION_TYPE_BLOCK:
4008  return m_BlockAllocation.m_Block->m_hMemory;
4009  case ALLOCATION_TYPE_DEDICATED:
4010  return m_DedicatedAllocation.m_hMemory;
4011  default:
4012  VMA_ASSERT(0);
4013  return VK_NULL_HANDLE;
4014  }
4015 }
4016 
4017 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
4018 {
4019  switch(m_Type)
4020  {
4021  case ALLOCATION_TYPE_BLOCK:
4022  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
4023  case ALLOCATION_TYPE_DEDICATED:
4024  return m_DedicatedAllocation.m_MemoryTypeIndex;
4025  default:
4026  VMA_ASSERT(0);
4027  return UINT32_MAX;
4028  }
4029 }
4030 
4031 VMA_BLOCK_VECTOR_TYPE VmaAllocation_T::GetBlockVectorType() const
4032 {
4033  switch(m_Type)
4034  {
4035  case ALLOCATION_TYPE_BLOCK:
4036  return m_BlockAllocation.m_Block->m_BlockVectorType;
4037  case ALLOCATION_TYPE_DEDICATED:
4038  return (m_DedicatedAllocation.m_PersistentMap ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED);
4039  default:
4040  VMA_ASSERT(0);
4041  return VMA_BLOCK_VECTOR_TYPE_COUNT;
4042  }
4043 }
4044 
4045 void* VmaAllocation_T::GetMappedData() const
4046 {
4047  switch(m_Type)
4048  {
4049  case ALLOCATION_TYPE_BLOCK:
4050  if(m_BlockAllocation.m_Block->m_pMappedData != VMA_NULL)
4051  {
4052  return (char*)m_BlockAllocation.m_Block->m_pMappedData + m_BlockAllocation.m_Offset;
4053  }
4054  else
4055  {
4056  return VMA_NULL;
4057  }
4058  break;
4059  case ALLOCATION_TYPE_DEDICATED:
4060  return m_DedicatedAllocation.m_pMappedData;
4061  default:
4062  VMA_ASSERT(0);
4063  return VMA_NULL;
4064  }
4065 }
4066 
4067 bool VmaAllocation_T::CanBecomeLost() const
4068 {
4069  switch(m_Type)
4070  {
4071  case ALLOCATION_TYPE_BLOCK:
4072  return m_BlockAllocation.m_CanBecomeLost;
4073  case ALLOCATION_TYPE_DEDICATED:
4074  return false;
4075  default:
4076  VMA_ASSERT(0);
4077  return false;
4078  }
4079 }
4080 
4081 VmaPool VmaAllocation_T::GetPool() const
4082 {
4083  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4084  return m_BlockAllocation.m_hPool;
4085 }
4086 
4087 VkResult VmaAllocation_T::DedicatedAllocMapPersistentlyMappedMemory(VmaAllocator hAllocator)
4088 {
4089  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
4090  if(m_DedicatedAllocation.m_PersistentMap)
4091  {
4092  return (*hAllocator->GetVulkanFunctions().vkMapMemory)(
4093  hAllocator->m_hDevice,
4094  m_DedicatedAllocation.m_hMemory,
4095  0,
4096  VK_WHOLE_SIZE,
4097  0,
4098  &m_DedicatedAllocation.m_pMappedData);
4099  }
4100  return VK_SUCCESS;
4101 }
4102 void VmaAllocation_T::DedicatedAllocUnmapPersistentlyMappedMemory(VmaAllocator hAllocator)
4103 {
4104  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
4105  if(m_DedicatedAllocation.m_pMappedData)
4106  {
4107  VMA_ASSERT(m_DedicatedAllocation.m_PersistentMap);
4108  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_DedicatedAllocation.m_hMemory);
4109  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
4110  }
4111 }
4112 
4113 
4114 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4115 {
4116  VMA_ASSERT(CanBecomeLost());
4117 
4118  /*
4119  Warning: This is a carefully designed algorithm.
4120  Do not modify unless you really know what you're doing :)
4121  */
4122  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
4123  for(;;)
4124  {
4125  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
4126  {
4127  VMA_ASSERT(0);
4128  return false;
4129  }
4130  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
4131  {
4132  return false;
4133  }
4134  else // Last use time earlier than current time.
4135  {
4136  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
4137  {
4138  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
4139  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
4140  return true;
4141  }
4142  }
4143  }
4144 }
4145 
4146 #if VMA_STATS_STRING_ENABLED
4147 
4148 // Correspond to values of enum VmaSuballocationType.
4149 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
4150  "FREE",
4151  "UNKNOWN",
4152  "BUFFER",
4153  "IMAGE_UNKNOWN",
4154  "IMAGE_LINEAR",
4155  "IMAGE_OPTIMAL",
4156 };
4157 
4158 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
4159 {
4160  json.BeginObject();
4161 
4162  json.WriteString("Blocks");
4163  json.WriteNumber(stat.blockCount);
4164 
4165  json.WriteString("Allocations");
4166  json.WriteNumber(stat.allocationCount);
4167 
4168  json.WriteString("UnusedRanges");
4169  json.WriteNumber(stat.unusedRangeCount);
4170 
4171  json.WriteString("UsedBytes");
4172  json.WriteNumber(stat.usedBytes);
4173 
4174  json.WriteString("UnusedBytes");
4175  json.WriteNumber(stat.unusedBytes);
4176 
4177  if(stat.allocationCount > 1)
4178  {
4179  json.WriteString("AllocationSize");
4180  json.BeginObject(true);
4181  json.WriteString("Min");
4182  json.WriteNumber(stat.allocationSizeMin);
4183  json.WriteString("Avg");
4184  json.WriteNumber(stat.allocationSizeAvg);
4185  json.WriteString("Max");
4186  json.WriteNumber(stat.allocationSizeMax);
4187  json.EndObject();
4188  }
4189 
4190  if(stat.unusedRangeCount > 1)
4191  {
4192  json.WriteString("UnusedRangeSize");
4193  json.BeginObject(true);
4194  json.WriteString("Min");
4195  json.WriteNumber(stat.unusedRangeSizeMin);
4196  json.WriteString("Avg");
4197  json.WriteNumber(stat.unusedRangeSizeAvg);
4198  json.WriteString("Max");
4199  json.WriteNumber(stat.unusedRangeSizeMax);
4200  json.EndObject();
4201  }
4202 
4203  json.EndObject();
4204 }
4205 
4206 #endif // #if VMA_STATS_STRING_ENABLED
4207 
4208 struct VmaSuballocationItemSizeLess
4209 {
4210  bool operator()(
4211  const VmaSuballocationList::iterator lhs,
4212  const VmaSuballocationList::iterator rhs) const
4213  {
4214  return lhs->size < rhs->size;
4215  }
4216  bool operator()(
4217  const VmaSuballocationList::iterator lhs,
4218  VkDeviceSize rhsSize) const
4219  {
4220  return lhs->size < rhsSize;
4221  }
4222 };
4223 
4225 // class VmaBlockMetadata
4226 
4227 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
4228  m_Size(0),
4229  m_FreeCount(0),
4230  m_SumFreeSize(0),
4231  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
4232  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
4233 {
4234 }
4235 
4236 VmaBlockMetadata::~VmaBlockMetadata()
4237 {
4238 }
4239 
4240 void VmaBlockMetadata::Init(VkDeviceSize size)
4241 {
4242  m_Size = size;
4243  m_FreeCount = 1;
4244  m_SumFreeSize = size;
4245 
4246  VmaSuballocation suballoc = {};
4247  suballoc.offset = 0;
4248  suballoc.size = size;
4249  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4250  suballoc.hAllocation = VK_NULL_HANDLE;
4251 
4252  m_Suballocations.push_back(suballoc);
4253  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
4254  --suballocItem;
4255  m_FreeSuballocationsBySize.push_back(suballocItem);
4256 }
4257 
4258 bool VmaBlockMetadata::Validate() const
4259 {
4260  if(m_Suballocations.empty())
4261  {
4262  return false;
4263  }
4264 
4265  // Expected offset of new suballocation as calculates from previous ones.
4266  VkDeviceSize calculatedOffset = 0;
4267  // Expected number of free suballocations as calculated from traversing their list.
4268  uint32_t calculatedFreeCount = 0;
4269  // Expected sum size of free suballocations as calculated from traversing their list.
4270  VkDeviceSize calculatedSumFreeSize = 0;
4271  // Expected number of free suballocations that should be registered in
4272  // m_FreeSuballocationsBySize calculated from traversing their list.
4273  size_t freeSuballocationsToRegister = 0;
4274  // True if previous visisted suballocation was free.
4275  bool prevFree = false;
4276 
4277  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4278  suballocItem != m_Suballocations.cend();
4279  ++suballocItem)
4280  {
4281  const VmaSuballocation& subAlloc = *suballocItem;
4282 
4283  // Actual offset of this suballocation doesn't match expected one.
4284  if(subAlloc.offset != calculatedOffset)
4285  {
4286  return false;
4287  }
4288 
4289  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
4290  // Two adjacent free suballocations are invalid. They should be merged.
4291  if(prevFree && currFree)
4292  {
4293  return false;
4294  }
4295  prevFree = currFree;
4296 
4297  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
4298  {
4299  return false;
4300  }
4301 
4302  if(currFree)
4303  {
4304  calculatedSumFreeSize += subAlloc.size;
4305  ++calculatedFreeCount;
4306  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
4307  {
4308  ++freeSuballocationsToRegister;
4309  }
4310  }
4311 
4312  calculatedOffset += subAlloc.size;
4313  }
4314 
4315  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
4316  // match expected one.
4317  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
4318  {
4319  return false;
4320  }
4321 
4322  VkDeviceSize lastSize = 0;
4323  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
4324  {
4325  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
4326 
4327  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
4328  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
4329  {
4330  return false;
4331  }
4332  // They must be sorted by size ascending.
4333  if(suballocItem->size < lastSize)
4334  {
4335  return false;
4336  }
4337 
4338  lastSize = suballocItem->size;
4339  }
4340 
4341  // Check if totals match calculacted values.
4342  return
4343  ValidateFreeSuballocationList() &&
4344  (calculatedOffset == m_Size) &&
4345  (calculatedSumFreeSize == m_SumFreeSize) &&
4346  (calculatedFreeCount == m_FreeCount);
4347 }
4348 
4349 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
4350 {
4351  if(!m_FreeSuballocationsBySize.empty())
4352  {
4353  return m_FreeSuballocationsBySize.back()->size;
4354  }
4355  else
4356  {
4357  return 0;
4358  }
4359 }
4360 
4361 bool VmaBlockMetadata::IsEmpty() const
4362 {
4363  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
4364 }
4365 
4366 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
4367 {
4368  outInfo.blockCount = 1;
4369 
4370  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
4371  outInfo.allocationCount = rangeCount - m_FreeCount;
4372  outInfo.unusedRangeCount = m_FreeCount;
4373 
4374  outInfo.unusedBytes = m_SumFreeSize;
4375  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
4376 
4377  outInfo.allocationSizeMin = UINT64_MAX;
4378  outInfo.allocationSizeMax = 0;
4379  outInfo.unusedRangeSizeMin = UINT64_MAX;
4380  outInfo.unusedRangeSizeMax = 0;
4381 
4382  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4383  suballocItem != m_Suballocations.cend();
4384  ++suballocItem)
4385  {
4386  const VmaSuballocation& suballoc = *suballocItem;
4387  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
4388  {
4389  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
4390  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
4391  }
4392  else
4393  {
4394  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
4395  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
4396  }
4397  }
4398 }
4399 
4400 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
4401 {
4402  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
4403 
4404  inoutStats.size += m_Size;
4405  inoutStats.unusedSize += m_SumFreeSize;
4406  inoutStats.allocationCount += rangeCount - m_FreeCount;
4407  inoutStats.unusedRangeCount += m_FreeCount;
4408  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
4409 }
4410 
4411 #if VMA_STATS_STRING_ENABLED
4412 
4413 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
4414 {
4415  json.BeginObject();
4416 
4417  json.WriteString("TotalBytes");
4418  json.WriteNumber(m_Size);
4419 
4420  json.WriteString("UnusedBytes");
4421  json.WriteNumber(m_SumFreeSize);
4422 
4423  json.WriteString("Allocations");
4424  json.WriteNumber(m_Suballocations.size() - m_FreeCount);
4425 
4426  json.WriteString("UnusedRanges");
4427  json.WriteNumber(m_FreeCount);
4428 
4429  json.WriteString("Suballocations");
4430  json.BeginArray();
4431  size_t i = 0;
4432  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4433  suballocItem != m_Suballocations.cend();
4434  ++suballocItem, ++i)
4435  {
4436  json.BeginObject(true);
4437 
4438  json.WriteString("Type");
4439  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
4440 
4441  json.WriteString("Size");
4442  json.WriteNumber(suballocItem->size);
4443 
4444  json.WriteString("Offset");
4445  json.WriteNumber(suballocItem->offset);
4446 
4447  json.EndObject();
4448  }
4449  json.EndArray();
4450 
4451  json.EndObject();
4452 }
4453 
4454 #endif // #if VMA_STATS_STRING_ENABLED
4455 
4456 /*
4457 How many suitable free suballocations to analyze before choosing best one.
4458 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
4459  be chosen.
4460 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
4461  suballocations will be analized and best one will be chosen.
4462 - Any other value is also acceptable.
4463 */
4464 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
4465 
4466 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
4467 {
4468  VMA_ASSERT(IsEmpty());
4469  pAllocationRequest->offset = 0;
4470  pAllocationRequest->sumFreeSize = m_SumFreeSize;
4471  pAllocationRequest->sumItemSize = 0;
4472  pAllocationRequest->item = m_Suballocations.begin();
4473  pAllocationRequest->itemsToMakeLostCount = 0;
4474 }
4475 
4476 bool VmaBlockMetadata::CreateAllocationRequest(
4477  uint32_t currentFrameIndex,
4478  uint32_t frameInUseCount,
4479  VkDeviceSize bufferImageGranularity,
4480  VkDeviceSize allocSize,
4481  VkDeviceSize allocAlignment,
4482  VmaSuballocationType allocType,
4483  bool canMakeOtherLost,
4484  VmaAllocationRequest* pAllocationRequest)
4485 {
4486  VMA_ASSERT(allocSize > 0);
4487  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
4488  VMA_ASSERT(pAllocationRequest != VMA_NULL);
4489  VMA_HEAVY_ASSERT(Validate());
4490 
4491  // There is not enough total free space in this block to fullfill the request: Early return.
4492  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
4493  {
4494  return false;
4495  }
4496 
4497  // New algorithm, efficiently searching freeSuballocationsBySize.
4498  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
4499  if(freeSuballocCount > 0)
4500  {
4501  if(VMA_BEST_FIT)
4502  {
4503  // Find first free suballocation with size not less than allocSize.
4504  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
4505  m_FreeSuballocationsBySize.data(),
4506  m_FreeSuballocationsBySize.data() + freeSuballocCount,
4507  allocSize,
4508  VmaSuballocationItemSizeLess());
4509  size_t index = it - m_FreeSuballocationsBySize.data();
4510  for(; index < freeSuballocCount; ++index)
4511  {
4512  if(CheckAllocation(
4513  currentFrameIndex,
4514  frameInUseCount,
4515  bufferImageGranularity,
4516  allocSize,
4517  allocAlignment,
4518  allocType,
4519  m_FreeSuballocationsBySize[index],
4520  false, // canMakeOtherLost
4521  &pAllocationRequest->offset,
4522  &pAllocationRequest->itemsToMakeLostCount,
4523  &pAllocationRequest->sumFreeSize,
4524  &pAllocationRequest->sumItemSize))
4525  {
4526  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
4527  return true;
4528  }
4529  }
4530  }
4531  else
4532  {
4533  // Search staring from biggest suballocations.
4534  for(size_t index = freeSuballocCount; index--; )
4535  {
4536  if(CheckAllocation(
4537  currentFrameIndex,
4538  frameInUseCount,
4539  bufferImageGranularity,
4540  allocSize,
4541  allocAlignment,
4542  allocType,
4543  m_FreeSuballocationsBySize[index],
4544  false, // canMakeOtherLost
4545  &pAllocationRequest->offset,
4546  &pAllocationRequest->itemsToMakeLostCount,
4547  &pAllocationRequest->sumFreeSize,
4548  &pAllocationRequest->sumItemSize))
4549  {
4550  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
4551  return true;
4552  }
4553  }
4554  }
4555  }
4556 
4557  if(canMakeOtherLost)
4558  {
4559  // Brute-force algorithm. TODO: Come up with something better.
4560 
4561  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
4562  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
4563 
4564  VmaAllocationRequest tmpAllocRequest = {};
4565  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
4566  suballocIt != m_Suballocations.end();
4567  ++suballocIt)
4568  {
4569  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
4570  suballocIt->hAllocation->CanBecomeLost())
4571  {
4572  if(CheckAllocation(
4573  currentFrameIndex,
4574  frameInUseCount,
4575  bufferImageGranularity,
4576  allocSize,
4577  allocAlignment,
4578  allocType,
4579  suballocIt,
4580  canMakeOtherLost,
4581  &tmpAllocRequest.offset,
4582  &tmpAllocRequest.itemsToMakeLostCount,
4583  &tmpAllocRequest.sumFreeSize,
4584  &tmpAllocRequest.sumItemSize))
4585  {
4586  tmpAllocRequest.item = suballocIt;
4587 
4588  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
4589  {
4590  *pAllocationRequest = tmpAllocRequest;
4591  }
4592  }
4593  }
4594  }
4595 
4596  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
4597  {
4598  return true;
4599  }
4600  }
4601 
4602  return false;
4603 }
4604 
4605 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
4606  uint32_t currentFrameIndex,
4607  uint32_t frameInUseCount,
4608  VmaAllocationRequest* pAllocationRequest)
4609 {
4610  while(pAllocationRequest->itemsToMakeLostCount > 0)
4611  {
4612  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
4613  {
4614  ++pAllocationRequest->item;
4615  }
4616  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
4617  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
4618  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
4619  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
4620  {
4621  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
4622  --pAllocationRequest->itemsToMakeLostCount;
4623  }
4624  else
4625  {
4626  return false;
4627  }
4628  }
4629 
4630  VMA_HEAVY_ASSERT(Validate());
4631  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
4632  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
4633 
4634  return true;
4635 }
4636 
4637 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4638 {
4639  uint32_t lostAllocationCount = 0;
4640  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
4641  it != m_Suballocations.end();
4642  ++it)
4643  {
4644  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
4645  it->hAllocation->CanBecomeLost() &&
4646  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
4647  {
4648  it = FreeSuballocation(it);
4649  ++lostAllocationCount;
4650  }
4651  }
4652  return lostAllocationCount;
4653 }
4654 
4655 void VmaBlockMetadata::Alloc(
4656  const VmaAllocationRequest& request,
4657  VmaSuballocationType type,
4658  VkDeviceSize allocSize,
4659  VmaAllocation hAllocation)
4660 {
4661  VMA_ASSERT(request.item != m_Suballocations.end());
4662  VmaSuballocation& suballoc = *request.item;
4663  // Given suballocation is a free block.
4664  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
4665  // Given offset is inside this suballocation.
4666  VMA_ASSERT(request.offset >= suballoc.offset);
4667  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
4668  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
4669  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
4670 
4671  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
4672  // it to become used.
4673  UnregisterFreeSuballocation(request.item);
4674 
4675  suballoc.offset = request.offset;
4676  suballoc.size = allocSize;
4677  suballoc.type = type;
4678  suballoc.hAllocation = hAllocation;
4679 
4680  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
4681  if(paddingEnd)
4682  {
4683  VmaSuballocation paddingSuballoc = {};
4684  paddingSuballoc.offset = request.offset + allocSize;
4685  paddingSuballoc.size = paddingEnd;
4686  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4687  VmaSuballocationList::iterator next = request.item;
4688  ++next;
4689  const VmaSuballocationList::iterator paddingEndItem =
4690  m_Suballocations.insert(next, paddingSuballoc);
4691  RegisterFreeSuballocation(paddingEndItem);
4692  }
4693 
4694  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
4695  if(paddingBegin)
4696  {
4697  VmaSuballocation paddingSuballoc = {};
4698  paddingSuballoc.offset = request.offset - paddingBegin;
4699  paddingSuballoc.size = paddingBegin;
4700  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4701  const VmaSuballocationList::iterator paddingBeginItem =
4702  m_Suballocations.insert(request.item, paddingSuballoc);
4703  RegisterFreeSuballocation(paddingBeginItem);
4704  }
4705 
4706  // Update totals.
4707  m_FreeCount = m_FreeCount - 1;
4708  if(paddingBegin > 0)
4709  {
4710  ++m_FreeCount;
4711  }
4712  if(paddingEnd > 0)
4713  {
4714  ++m_FreeCount;
4715  }
4716  m_SumFreeSize -= allocSize;
4717 }
4718 
4719 void VmaBlockMetadata::Free(const VmaAllocation allocation)
4720 {
4721  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
4722  suballocItem != m_Suballocations.end();
4723  ++suballocItem)
4724  {
4725  VmaSuballocation& suballoc = *suballocItem;
4726  if(suballoc.hAllocation == allocation)
4727  {
4728  FreeSuballocation(suballocItem);
4729  VMA_HEAVY_ASSERT(Validate());
4730  return;
4731  }
4732  }
4733  VMA_ASSERT(0 && "Not found!");
4734 }
4735 
4736 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
4737 {
4738  VkDeviceSize lastSize = 0;
4739  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
4740  {
4741  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
4742 
4743  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
4744  {
4745  VMA_ASSERT(0);
4746  return false;
4747  }
4748  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
4749  {
4750  VMA_ASSERT(0);
4751  return false;
4752  }
4753  if(it->size < lastSize)
4754  {
4755  VMA_ASSERT(0);
4756  return false;
4757  }
4758 
4759  lastSize = it->size;
4760  }
4761  return true;
4762 }
4763 
4764 bool VmaBlockMetadata::CheckAllocation(
4765  uint32_t currentFrameIndex,
4766  uint32_t frameInUseCount,
4767  VkDeviceSize bufferImageGranularity,
4768  VkDeviceSize allocSize,
4769  VkDeviceSize allocAlignment,
4770  VmaSuballocationType allocType,
4771  VmaSuballocationList::const_iterator suballocItem,
4772  bool canMakeOtherLost,
4773  VkDeviceSize* pOffset,
4774  size_t* itemsToMakeLostCount,
4775  VkDeviceSize* pSumFreeSize,
4776  VkDeviceSize* pSumItemSize) const
4777 {
4778  VMA_ASSERT(allocSize > 0);
4779  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
4780  VMA_ASSERT(suballocItem != m_Suballocations.cend());
4781  VMA_ASSERT(pOffset != VMA_NULL);
4782 
4783  *itemsToMakeLostCount = 0;
4784  *pSumFreeSize = 0;
4785  *pSumItemSize = 0;
4786 
4787  if(canMakeOtherLost)
4788  {
4789  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
4790  {
4791  *pSumFreeSize = suballocItem->size;
4792  }
4793  else
4794  {
4795  if(suballocItem->hAllocation->CanBecomeLost() &&
4796  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
4797  {
4798  ++*itemsToMakeLostCount;
4799  *pSumItemSize = suballocItem->size;
4800  }
4801  else
4802  {
4803  return false;
4804  }
4805  }
4806 
4807  // Remaining size is too small for this request: Early return.
4808  if(m_Size - suballocItem->offset < allocSize)
4809  {
4810  return false;
4811  }
4812 
4813  // Start from offset equal to beginning of this suballocation.
4814  *pOffset = suballocItem->offset;
4815 
4816  // Apply VMA_DEBUG_MARGIN at the beginning.
4817  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
4818  {
4819  *pOffset += VMA_DEBUG_MARGIN;
4820  }
4821 
4822  // Apply alignment.
4823  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
4824  *pOffset = VmaAlignUp(*pOffset, alignment);
4825 
4826  // Check previous suballocations for BufferImageGranularity conflicts.
4827  // Make bigger alignment if necessary.
4828  if(bufferImageGranularity > 1)
4829  {
4830  bool bufferImageGranularityConflict = false;
4831  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
4832  while(prevSuballocItem != m_Suballocations.cbegin())
4833  {
4834  --prevSuballocItem;
4835  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
4836  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
4837  {
4838  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
4839  {
4840  bufferImageGranularityConflict = true;
4841  break;
4842  }
4843  }
4844  else
4845  // Already on previous page.
4846  break;
4847  }
4848  if(bufferImageGranularityConflict)
4849  {
4850  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
4851  }
4852  }
4853 
4854  // Now that we have final *pOffset, check if we are past suballocItem.
4855  // If yes, return false - this function should be called for another suballocItem as starting point.
4856  if(*pOffset >= suballocItem->offset + suballocItem->size)
4857  {
4858  return false;
4859  }
4860 
4861  // Calculate padding at the beginning based on current offset.
4862  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
4863 
4864  // Calculate required margin at the end if this is not last suballocation.
4865  VmaSuballocationList::const_iterator next = suballocItem;
4866  ++next;
4867  const VkDeviceSize requiredEndMargin =
4868  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
4869 
4870  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
4871  // Another early return check.
4872  if(suballocItem->offset + totalSize > m_Size)
4873  {
4874  return false;
4875  }
4876 
4877  // Advance lastSuballocItem until desired size is reached.
4878  // Update itemsToMakeLostCount.
4879  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
4880  if(totalSize > suballocItem->size)
4881  {
4882  VkDeviceSize remainingSize = totalSize - suballocItem->size;
4883  while(remainingSize > 0)
4884  {
4885  ++lastSuballocItem;
4886  if(lastSuballocItem == m_Suballocations.cend())
4887  {
4888  return false;
4889  }
4890  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
4891  {
4892  *pSumFreeSize += lastSuballocItem->size;
4893  }
4894  else
4895  {
4896  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
4897  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
4898  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
4899  {
4900  ++*itemsToMakeLostCount;
4901  *pSumItemSize += lastSuballocItem->size;
4902  }
4903  else
4904  {
4905  return false;
4906  }
4907  }
4908  remainingSize = (lastSuballocItem->size < remainingSize) ?
4909  remainingSize - lastSuballocItem->size : 0;
4910  }
4911  }
4912 
4913  // Check next suballocations for BufferImageGranularity conflicts.
4914  // If conflict exists, we must mark more allocations lost or fail.
4915  if(bufferImageGranularity > 1)
4916  {
4917  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
4918  ++nextSuballocItem;
4919  while(nextSuballocItem != m_Suballocations.cend())
4920  {
4921  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
4922  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
4923  {
4924  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
4925  {
4926  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
4927  if(nextSuballoc.hAllocation->CanBecomeLost() &&
4928  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
4929  {
4930  ++*itemsToMakeLostCount;
4931  }
4932  else
4933  {
4934  return false;
4935  }
4936  }
4937  }
4938  else
4939  {
4940  // Already on next page.
4941  break;
4942  }
4943  ++nextSuballocItem;
4944  }
4945  }
4946  }
4947  else
4948  {
4949  const VmaSuballocation& suballoc = *suballocItem;
4950  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
4951 
4952  *pSumFreeSize = suballoc.size;
4953 
4954  // Size of this suballocation is too small for this request: Early return.
4955  if(suballoc.size < allocSize)
4956  {
4957  return false;
4958  }
4959 
4960  // Start from offset equal to beginning of this suballocation.
4961  *pOffset = suballoc.offset;
4962 
4963  // Apply VMA_DEBUG_MARGIN at the beginning.
4964  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
4965  {
4966  *pOffset += VMA_DEBUG_MARGIN;
4967  }
4968 
4969  // Apply alignment.
4970  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
4971  *pOffset = VmaAlignUp(*pOffset, alignment);
4972 
4973  // Check previous suballocations for BufferImageGranularity conflicts.
4974  // Make bigger alignment if necessary.
4975  if(bufferImageGranularity > 1)
4976  {
4977  bool bufferImageGranularityConflict = false;
4978  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
4979  while(prevSuballocItem != m_Suballocations.cbegin())
4980  {
4981  --prevSuballocItem;
4982  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
4983  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
4984  {
4985  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
4986  {
4987  bufferImageGranularityConflict = true;
4988  break;
4989  }
4990  }
4991  else
4992  // Already on previous page.
4993  break;
4994  }
4995  if(bufferImageGranularityConflict)
4996  {
4997  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
4998  }
4999  }
5000 
5001  // Calculate padding at the beginning based on current offset.
5002  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
5003 
5004  // Calculate required margin at the end if this is not last suballocation.
5005  VmaSuballocationList::const_iterator next = suballocItem;
5006  ++next;
5007  const VkDeviceSize requiredEndMargin =
5008  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5009 
5010  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
5011  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
5012  {
5013  return false;
5014  }
5015 
5016  // Check next suballocations for BufferImageGranularity conflicts.
5017  // If conflict exists, allocation cannot be made here.
5018  if(bufferImageGranularity > 1)
5019  {
5020  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
5021  ++nextSuballocItem;
5022  while(nextSuballocItem != m_Suballocations.cend())
5023  {
5024  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5025  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5026  {
5027  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5028  {
5029  return false;
5030  }
5031  }
5032  else
5033  {
5034  // Already on next page.
5035  break;
5036  }
5037  ++nextSuballocItem;
5038  }
5039  }
5040  }
5041 
5042  // All tests passed: Success. pOffset is already filled.
5043  return true;
5044 }
5045 
5046 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
5047 {
5048  VMA_ASSERT(item != m_Suballocations.end());
5049  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5050 
5051  VmaSuballocationList::iterator nextItem = item;
5052  ++nextItem;
5053  VMA_ASSERT(nextItem != m_Suballocations.end());
5054  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
5055 
5056  item->size += nextItem->size;
5057  --m_FreeCount;
5058  m_Suballocations.erase(nextItem);
5059 }
5060 
5061 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
5062 {
5063  // Change this suballocation to be marked as free.
5064  VmaSuballocation& suballoc = *suballocItem;
5065  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5066  suballoc.hAllocation = VK_NULL_HANDLE;
5067 
5068  // Update totals.
5069  ++m_FreeCount;
5070  m_SumFreeSize += suballoc.size;
5071 
5072  // Merge with previous and/or next suballocation if it's also free.
5073  bool mergeWithNext = false;
5074  bool mergeWithPrev = false;
5075 
5076  VmaSuballocationList::iterator nextItem = suballocItem;
5077  ++nextItem;
5078  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
5079  {
5080  mergeWithNext = true;
5081  }
5082 
5083  VmaSuballocationList::iterator prevItem = suballocItem;
5084  if(suballocItem != m_Suballocations.begin())
5085  {
5086  --prevItem;
5087  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5088  {
5089  mergeWithPrev = true;
5090  }
5091  }
5092 
5093  if(mergeWithNext)
5094  {
5095  UnregisterFreeSuballocation(nextItem);
5096  MergeFreeWithNext(suballocItem);
5097  }
5098 
5099  if(mergeWithPrev)
5100  {
5101  UnregisterFreeSuballocation(prevItem);
5102  MergeFreeWithNext(prevItem);
5103  RegisterFreeSuballocation(prevItem);
5104  return prevItem;
5105  }
5106  else
5107  {
5108  RegisterFreeSuballocation(suballocItem);
5109  return suballocItem;
5110  }
5111 }
5112 
5113 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
5114 {
5115  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5116  VMA_ASSERT(item->size > 0);
5117 
5118  // You may want to enable this validation at the beginning or at the end of
5119  // this function, depending on what do you want to check.
5120  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5121 
5122  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5123  {
5124  if(m_FreeSuballocationsBySize.empty())
5125  {
5126  m_FreeSuballocationsBySize.push_back(item);
5127  }
5128  else
5129  {
5130  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
5131  }
5132  }
5133 
5134  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5135 }
5136 
5137 
5138 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
5139 {
5140  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5141  VMA_ASSERT(item->size > 0);
5142 
5143  // You may want to enable this validation at the beginning or at the end of
5144  // this function, depending on what do you want to check.
5145  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5146 
5147  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5148  {
5149  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5150  m_FreeSuballocationsBySize.data(),
5151  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
5152  item,
5153  VmaSuballocationItemSizeLess());
5154  for(size_t index = it - m_FreeSuballocationsBySize.data();
5155  index < m_FreeSuballocationsBySize.size();
5156  ++index)
5157  {
5158  if(m_FreeSuballocationsBySize[index] == item)
5159  {
5160  VmaVectorRemove(m_FreeSuballocationsBySize, index);
5161  return;
5162  }
5163  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
5164  }
5165  VMA_ASSERT(0 && "Not found.");
5166  }
5167 
5168  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5169 }
5170 
5172 // class VmaDeviceMemoryBlock
5173 
5174 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
5175  m_MemoryTypeIndex(UINT32_MAX),
5176  m_BlockVectorType(VMA_BLOCK_VECTOR_TYPE_COUNT),
5177  m_hMemory(VK_NULL_HANDLE),
5178  m_PersistentMap(false),
5179  m_pMappedData(VMA_NULL),
5180  m_Metadata(hAllocator)
5181 {
5182 }
5183 
5184 void VmaDeviceMemoryBlock::Init(
5185  uint32_t newMemoryTypeIndex,
5186  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
5187  VkDeviceMemory newMemory,
5188  VkDeviceSize newSize,
5189  bool persistentMap,
5190  void* pMappedData)
5191 {
5192  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5193 
5194  m_MemoryTypeIndex = newMemoryTypeIndex;
5195  m_BlockVectorType = newBlockVectorType;
5196  m_hMemory = newMemory;
5197  m_PersistentMap = persistentMap;
5198  m_pMappedData = pMappedData;
5199 
5200  m_Metadata.Init(newSize);
5201 }
5202 
5203 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
5204 {
5205  // This is the most important assert in the entire library.
5206  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
5207  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
5208 
5209  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
5210  if(m_pMappedData != VMA_NULL)
5211  {
5212  (allocator->GetVulkanFunctions().vkUnmapMemory)(allocator->m_hDevice, m_hMemory);
5213  m_pMappedData = VMA_NULL;
5214  }
5215 
5216  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
5217  m_hMemory = VK_NULL_HANDLE;
5218 }
5219 
5220 bool VmaDeviceMemoryBlock::Validate() const
5221 {
5222  if((m_hMemory == VK_NULL_HANDLE) ||
5223  (m_Metadata.GetSize() == 0))
5224  {
5225  return false;
5226  }
5227 
5228  return m_Metadata.Validate();
5229 }
5230 
5231 static void InitStatInfo(VmaStatInfo& outInfo)
5232 {
5233  memset(&outInfo, 0, sizeof(outInfo));
5234  outInfo.allocationSizeMin = UINT64_MAX;
5235  outInfo.unusedRangeSizeMin = UINT64_MAX;
5236 }
5237 
5238 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
5239 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
5240 {
5241  inoutInfo.blockCount += srcInfo.blockCount;
5242  inoutInfo.allocationCount += srcInfo.allocationCount;
5243  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
5244  inoutInfo.usedBytes += srcInfo.usedBytes;
5245  inoutInfo.unusedBytes += srcInfo.unusedBytes;
5246  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
5247  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
5248  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
5249  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
5250 }
5251 
5252 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
5253 {
5254  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
5255  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
5256  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
5257  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
5258 }
5259 
5260 VmaPool_T::VmaPool_T(
5261  VmaAllocator hAllocator,
5262  const VmaPoolCreateInfo& createInfo) :
5263  m_BlockVector(
5264  hAllocator,
5265  createInfo.memoryTypeIndex,
5266  (createInfo.flags & VMA_POOL_CREATE_PERSISTENT_MAP_BIT) != 0 ?
5267  VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED,
5268  createInfo.blockSize,
5269  createInfo.minBlockCount,
5270  createInfo.maxBlockCount,
5271  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
5272  createInfo.frameInUseCount,
5273  true) // isCustomPool
5274 {
5275 }
5276 
5277 VmaPool_T::~VmaPool_T()
5278 {
5279 }
5280 
5281 #if VMA_STATS_STRING_ENABLED
5282 
5283 #endif // #if VMA_STATS_STRING_ENABLED
5284 
5285 VmaBlockVector::VmaBlockVector(
5286  VmaAllocator hAllocator,
5287  uint32_t memoryTypeIndex,
5288  VMA_BLOCK_VECTOR_TYPE blockVectorType,
5289  VkDeviceSize preferredBlockSize,
5290  size_t minBlockCount,
5291  size_t maxBlockCount,
5292  VkDeviceSize bufferImageGranularity,
5293  uint32_t frameInUseCount,
5294  bool isCustomPool) :
5295  m_hAllocator(hAllocator),
5296  m_MemoryTypeIndex(memoryTypeIndex),
5297  m_BlockVectorType(blockVectorType),
5298  m_PreferredBlockSize(preferredBlockSize),
5299  m_MinBlockCount(minBlockCount),
5300  m_MaxBlockCount(maxBlockCount),
5301  m_BufferImageGranularity(bufferImageGranularity),
5302  m_FrameInUseCount(frameInUseCount),
5303  m_IsCustomPool(isCustomPool),
5304  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
5305  m_HasEmptyBlock(false),
5306  m_pDefragmentator(VMA_NULL)
5307 {
5308 }
5309 
5310 VmaBlockVector::~VmaBlockVector()
5311 {
5312  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
5313 
5314  for(size_t i = m_Blocks.size(); i--; )
5315  {
5316  m_Blocks[i]->Destroy(m_hAllocator);
5317  vma_delete(m_hAllocator, m_Blocks[i]);
5318  }
5319 }
5320 
5321 VkResult VmaBlockVector::CreateMinBlocks()
5322 {
5323  for(size_t i = 0; i < m_MinBlockCount; ++i)
5324  {
5325  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
5326  if(res != VK_SUCCESS)
5327  {
5328  return res;
5329  }
5330  }
5331  return VK_SUCCESS;
5332 }
5333 
5334 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
5335 {
5336  pStats->size = 0;
5337  pStats->unusedSize = 0;
5338  pStats->allocationCount = 0;
5339  pStats->unusedRangeCount = 0;
5340  pStats->unusedRangeSizeMax = 0;
5341 
5342  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5343 
5344  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5345  {
5346  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
5347  VMA_ASSERT(pBlock);
5348  VMA_HEAVY_ASSERT(pBlock->Validate());
5349  pBlock->m_Metadata.AddPoolStats(*pStats);
5350  }
5351 }
5352 
5353 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
5354 
5355 VkResult VmaBlockVector::Allocate(
5356  VmaPool hCurrentPool,
5357  uint32_t currentFrameIndex,
5358  const VkMemoryRequirements& vkMemReq,
5359  const VmaAllocationCreateInfo& createInfo,
5360  VmaSuballocationType suballocType,
5361  VmaAllocation* pAllocation)
5362 {
5363  // Validate flags.
5364  if(createInfo.pool != VK_NULL_HANDLE &&
5365  ((createInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0) != (m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED))
5366  {
5367  VMA_ASSERT(0 && "Usage of VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT must match VMA_POOL_CREATE_PERSISTENT_MAP_BIT.");
5368  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5369  }
5370 
5371  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5372 
5373  // 1. Search existing allocations. Try to allocate without making other allocations lost.
5374  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
5375  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
5376  {
5377  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
5378  VMA_ASSERT(pCurrBlock);
5379  VmaAllocationRequest currRequest = {};
5380  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
5381  currentFrameIndex,
5382  m_FrameInUseCount,
5383  m_BufferImageGranularity,
5384  vkMemReq.size,
5385  vkMemReq.alignment,
5386  suballocType,
5387  false, // canMakeOtherLost
5388  &currRequest))
5389  {
5390  // Allocate from pCurrBlock.
5391  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
5392 
5393  // We no longer have an empty Allocation.
5394  if(pCurrBlock->m_Metadata.IsEmpty())
5395  {
5396  m_HasEmptyBlock = false;
5397  }
5398 
5399  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
5400  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
5401  (*pAllocation)->InitBlockAllocation(
5402  hCurrentPool,
5403  pCurrBlock,
5404  currRequest.offset,
5405  vkMemReq.alignment,
5406  vkMemReq.size,
5407  suballocType,
5408  createInfo.pUserData,
5409  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5410  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
5411  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
5412  return VK_SUCCESS;
5413  }
5414  }
5415 
5416  const bool canCreateNewBlock =
5417  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
5418  (m_Blocks.size() < m_MaxBlockCount);
5419 
5420  // 2. Try to create new block.
5421  if(canCreateNewBlock)
5422  {
5423  // 2.1. Start with full preferredBlockSize.
5424  VkDeviceSize blockSize = m_PreferredBlockSize;
5425  size_t newBlockIndex = 0;
5426  VkResult res = CreateBlock(blockSize, &newBlockIndex);
5427  // Allocating blocks of other sizes is allowed only in default pools.
5428  // In custom pools block size is fixed.
5429  if(res < 0 && m_IsCustomPool == false)
5430  {
5431  // 2.2. Try half the size.
5432  blockSize /= 2;
5433  if(blockSize >= vkMemReq.size)
5434  {
5435  res = CreateBlock(blockSize, &newBlockIndex);
5436  if(res < 0)
5437  {
5438  // 2.3. Try quarter the size.
5439  blockSize /= 2;
5440  if(blockSize >= vkMemReq.size)
5441  {
5442  res = CreateBlock(blockSize, &newBlockIndex);
5443  }
5444  }
5445  }
5446  }
5447  if(res == VK_SUCCESS)
5448  {
5449  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
5450  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= vkMemReq.size);
5451 
5452  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
5453  VmaAllocationRequest allocRequest;
5454  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
5455  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
5456  pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
5457  (*pAllocation)->InitBlockAllocation(
5458  hCurrentPool,
5459  pBlock,
5460  allocRequest.offset,
5461  vkMemReq.alignment,
5462  vkMemReq.size,
5463  suballocType,
5464  createInfo.pUserData,
5465  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5466  VMA_HEAVY_ASSERT(pBlock->Validate());
5467  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
5468 
5469  return VK_SUCCESS;
5470  }
5471  }
5472 
5473  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
5474 
5475  // 3. Try to allocate from existing blocks with making other allocations lost.
5476  if(canMakeOtherLost)
5477  {
5478  uint32_t tryIndex = 0;
5479  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
5480  {
5481  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
5482  VmaAllocationRequest bestRequest = {};
5483  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
5484 
5485  // 1. Search existing allocations.
5486  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
5487  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
5488  {
5489  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
5490  VMA_ASSERT(pCurrBlock);
5491  VmaAllocationRequest currRequest = {};
5492  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
5493  currentFrameIndex,
5494  m_FrameInUseCount,
5495  m_BufferImageGranularity,
5496  vkMemReq.size,
5497  vkMemReq.alignment,
5498  suballocType,
5499  canMakeOtherLost,
5500  &currRequest))
5501  {
5502  const VkDeviceSize currRequestCost = currRequest.CalcCost();
5503  if(pBestRequestBlock == VMA_NULL ||
5504  currRequestCost < bestRequestCost)
5505  {
5506  pBestRequestBlock = pCurrBlock;
5507  bestRequest = currRequest;
5508  bestRequestCost = currRequestCost;
5509 
5510  if(bestRequestCost == 0)
5511  {
5512  break;
5513  }
5514  }
5515  }
5516  }
5517 
5518  if(pBestRequestBlock != VMA_NULL)
5519  {
5520  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
5521  currentFrameIndex,
5522  m_FrameInUseCount,
5523  &bestRequest))
5524  {
5525  // We no longer have an empty Allocation.
5526  if(pBestRequestBlock->m_Metadata.IsEmpty())
5527  {
5528  m_HasEmptyBlock = false;
5529  }
5530  // Allocate from this pBlock.
5531  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
5532  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
5533  (*pAllocation)->InitBlockAllocation(
5534  hCurrentPool,
5535  pBestRequestBlock,
5536  bestRequest.offset,
5537  vkMemReq.alignment,
5538  vkMemReq.size,
5539  suballocType,
5540  createInfo.pUserData,
5541  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5542  VMA_HEAVY_ASSERT(pBlock->Validate());
5543  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
5544  return VK_SUCCESS;
5545  }
5546  // else: Some allocations must have been touched while we are here. Next try.
5547  }
5548  else
5549  {
5550  // Could not find place in any of the blocks - break outer loop.
5551  break;
5552  }
5553  }
5554  /* Maximum number of tries exceeded - a very unlike event when many other
5555  threads are simultaneously touching allocations making it impossible to make
5556  lost at the same time as we try to allocate. */
5557  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
5558  {
5559  return VK_ERROR_TOO_MANY_OBJECTS;
5560  }
5561  }
5562 
5563  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5564 }
5565 
5566 void VmaBlockVector::Free(
5567  VmaAllocation hAllocation)
5568 {
5569  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
5570 
5571  // Scope for lock.
5572  {
5573  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5574 
5575  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
5576 
5577  pBlock->m_Metadata.Free(hAllocation);
5578  VMA_HEAVY_ASSERT(pBlock->Validate());
5579 
5580  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
5581 
5582  // pBlock became empty after this deallocation.
5583  if(pBlock->m_Metadata.IsEmpty())
5584  {
5585  // Already has empty Allocation. We don't want to have two, so delete this one.
5586  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
5587  {
5588  pBlockToDelete = pBlock;
5589  Remove(pBlock);
5590  }
5591  // We now have first empty Allocation.
5592  else
5593  {
5594  m_HasEmptyBlock = true;
5595  }
5596  }
5597  // pBlock didn't become empty, but we have another empty block - find and free that one.
5598  // (This is optional, heuristics.)
5599  else if(m_HasEmptyBlock)
5600  {
5601  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
5602  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
5603  {
5604  pBlockToDelete = pLastBlock;
5605  m_Blocks.pop_back();
5606  m_HasEmptyBlock = false;
5607  }
5608  }
5609 
5610  IncrementallySortBlocks();
5611  }
5612 
5613  // Destruction of a free Allocation. Deferred until this point, outside of mutex
5614  // lock, for performance reason.
5615  if(pBlockToDelete != VMA_NULL)
5616  {
5617  VMA_DEBUG_LOG(" Deleted empty allocation");
5618  pBlockToDelete->Destroy(m_hAllocator);
5619  vma_delete(m_hAllocator, pBlockToDelete);
5620  }
5621 }
5622 
5623 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
5624 {
5625  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5626  {
5627  if(m_Blocks[blockIndex] == pBlock)
5628  {
5629  VmaVectorRemove(m_Blocks, blockIndex);
5630  return;
5631  }
5632  }
5633  VMA_ASSERT(0);
5634 }
5635 
5636 void VmaBlockVector::IncrementallySortBlocks()
5637 {
5638  // Bubble sort only until first swap.
5639  for(size_t i = 1; i < m_Blocks.size(); ++i)
5640  {
5641  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
5642  {
5643  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
5644  return;
5645  }
5646  }
5647 }
5648 
5649 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
5650 {
5651  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
5652  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
5653  allocInfo.allocationSize = blockSize;
5654  VkDeviceMemory mem = VK_NULL_HANDLE;
5655  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
5656  if(res < 0)
5657  {
5658  return res;
5659  }
5660 
5661  // New VkDeviceMemory successfully created.
5662 
5663  // Map memory if needed.
5664  void* pMappedData = VMA_NULL;
5665  const bool persistentMap = (m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED);
5666  if(persistentMap && m_hAllocator->m_UnmapPersistentlyMappedMemoryCounter == 0)
5667  {
5668  res = (*m_hAllocator->GetVulkanFunctions().vkMapMemory)(
5669  m_hAllocator->m_hDevice,
5670  mem,
5671  0,
5672  VK_WHOLE_SIZE,
5673  0,
5674  &pMappedData);
5675  if(res < 0)
5676  {
5677  VMA_DEBUG_LOG(" vkMapMemory FAILED");
5678  m_hAllocator->FreeVulkanMemory(m_MemoryTypeIndex, blockSize, mem);
5679  return res;
5680  }
5681  }
5682 
5683  // Create new Allocation for it.
5684  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
5685  pBlock->Init(
5686  m_MemoryTypeIndex,
5687  (VMA_BLOCK_VECTOR_TYPE)m_BlockVectorType,
5688  mem,
5689  allocInfo.allocationSize,
5690  persistentMap,
5691  pMappedData);
5692 
5693  m_Blocks.push_back(pBlock);
5694  if(pNewBlockIndex != VMA_NULL)
5695  {
5696  *pNewBlockIndex = m_Blocks.size() - 1;
5697  }
5698 
5699  return VK_SUCCESS;
5700 }
5701 
5702 #if VMA_STATS_STRING_ENABLED
5703 
5704 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
5705 {
5706  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5707 
5708  json.BeginObject();
5709 
5710  if(m_IsCustomPool)
5711  {
5712  json.WriteString("MemoryTypeIndex");
5713  json.WriteNumber(m_MemoryTypeIndex);
5714 
5715  if(m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
5716  {
5717  json.WriteString("Mapped");
5718  json.WriteBool(true);
5719  }
5720 
5721  json.WriteString("BlockSize");
5722  json.WriteNumber(m_PreferredBlockSize);
5723 
5724  json.WriteString("BlockCount");
5725  json.BeginObject(true);
5726  if(m_MinBlockCount > 0)
5727  {
5728  json.WriteString("Min");
5729  json.WriteNumber(m_MinBlockCount);
5730  }
5731  if(m_MaxBlockCount < SIZE_MAX)
5732  {
5733  json.WriteString("Max");
5734  json.WriteNumber(m_MaxBlockCount);
5735  }
5736  json.WriteString("Cur");
5737  json.WriteNumber(m_Blocks.size());
5738  json.EndObject();
5739 
5740  if(m_FrameInUseCount > 0)
5741  {
5742  json.WriteString("FrameInUseCount");
5743  json.WriteNumber(m_FrameInUseCount);
5744  }
5745  }
5746  else
5747  {
5748  json.WriteString("PreferredBlockSize");
5749  json.WriteNumber(m_PreferredBlockSize);
5750  }
5751 
5752  json.WriteString("Blocks");
5753  json.BeginArray();
5754  for(size_t i = 0; i < m_Blocks.size(); ++i)
5755  {
5756  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
5757  }
5758  json.EndArray();
5759 
5760  json.EndObject();
5761 }
5762 
5763 #endif // #if VMA_STATS_STRING_ENABLED
5764 
5765 void VmaBlockVector::UnmapPersistentlyMappedMemory()
5766 {
5767  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5768 
5769  for(size_t i = m_Blocks.size(); i--; )
5770  {
5771  VmaDeviceMemoryBlock* pBlock = m_Blocks[i];
5772  if(pBlock->m_pMappedData != VMA_NULL)
5773  {
5774  VMA_ASSERT(pBlock->m_PersistentMap != false);
5775  (m_hAllocator->GetVulkanFunctions().vkUnmapMemory)(m_hAllocator->m_hDevice, pBlock->m_hMemory);
5776  pBlock->m_pMappedData = VMA_NULL;
5777  }
5778  }
5779 }
5780 
5781 VkResult VmaBlockVector::MapPersistentlyMappedMemory()
5782 {
5783  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5784 
5785  VkResult finalResult = VK_SUCCESS;
5786  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
5787  {
5788  VmaDeviceMemoryBlock* pBlock = m_Blocks[i];
5789  if(pBlock->m_PersistentMap)
5790  {
5791  VMA_ASSERT(pBlock->m_pMappedData == nullptr);
5792  VkResult localResult = (*m_hAllocator->GetVulkanFunctions().vkMapMemory)(
5793  m_hAllocator->m_hDevice,
5794  pBlock->m_hMemory,
5795  0,
5796  VK_WHOLE_SIZE,
5797  0,
5798  &pBlock->m_pMappedData);
5799  if(localResult != VK_SUCCESS)
5800  {
5801  finalResult = localResult;
5802  }
5803  }
5804  }
5805  return finalResult;
5806 }
5807 
5808 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
5809  VmaAllocator hAllocator,
5810  uint32_t currentFrameIndex)
5811 {
5812  if(m_pDefragmentator == VMA_NULL)
5813  {
5814  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
5815  hAllocator,
5816  this,
5817  currentFrameIndex);
5818  }
5819 
5820  return m_pDefragmentator;
5821 }
5822 
5823 VkResult VmaBlockVector::Defragment(
5824  VmaDefragmentationStats* pDefragmentationStats,
5825  VkDeviceSize& maxBytesToMove,
5826  uint32_t& maxAllocationsToMove)
5827 {
5828  if(m_pDefragmentator == VMA_NULL)
5829  {
5830  return VK_SUCCESS;
5831  }
5832 
5833  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5834 
5835  // Defragment.
5836  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
5837 
5838  // Accumulate statistics.
5839  if(pDefragmentationStats != VMA_NULL)
5840  {
5841  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
5842  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
5843  pDefragmentationStats->bytesMoved += bytesMoved;
5844  pDefragmentationStats->allocationsMoved += allocationsMoved;
5845  VMA_ASSERT(bytesMoved <= maxBytesToMove);
5846  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
5847  maxBytesToMove -= bytesMoved;
5848  maxAllocationsToMove -= allocationsMoved;
5849  }
5850 
5851  // Free empty blocks.
5852  m_HasEmptyBlock = false;
5853  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
5854  {
5855  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
5856  if(pBlock->m_Metadata.IsEmpty())
5857  {
5858  if(m_Blocks.size() > m_MinBlockCount)
5859  {
5860  if(pDefragmentationStats != VMA_NULL)
5861  {
5862  ++pDefragmentationStats->deviceMemoryBlocksFreed;
5863  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
5864  }
5865 
5866  VmaVectorRemove(m_Blocks, blockIndex);
5867  pBlock->Destroy(m_hAllocator);
5868  vma_delete(m_hAllocator, pBlock);
5869  }
5870  else
5871  {
5872  m_HasEmptyBlock = true;
5873  }
5874  }
5875  }
5876 
5877  return result;
5878 }
5879 
5880 void VmaBlockVector::DestroyDefragmentator()
5881 {
5882  if(m_pDefragmentator != VMA_NULL)
5883  {
5884  vma_delete(m_hAllocator, m_pDefragmentator);
5885  m_pDefragmentator = VMA_NULL;
5886  }
5887 }
5888 
5889 void VmaBlockVector::MakePoolAllocationsLost(
5890  uint32_t currentFrameIndex,
5891  size_t* pLostAllocationCount)
5892 {
5893  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5894 
5895  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5896  {
5897  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
5898  VMA_ASSERT(pBlock);
5899  pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
5900  }
5901 }
5902 
5903 void VmaBlockVector::AddStats(VmaStats* pStats)
5904 {
5905  const uint32_t memTypeIndex = m_MemoryTypeIndex;
5906  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
5907 
5908  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5909 
5910  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5911  {
5912  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
5913  VMA_ASSERT(pBlock);
5914  VMA_HEAVY_ASSERT(pBlock->Validate());
5915  VmaStatInfo allocationStatInfo;
5916  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
5917  VmaAddStatInfo(pStats->total, allocationStatInfo);
5918  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
5919  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
5920  }
5921 }
5922 
5924 // VmaDefragmentator members definition
5925 
5926 VmaDefragmentator::VmaDefragmentator(
5927  VmaAllocator hAllocator,
5928  VmaBlockVector* pBlockVector,
5929  uint32_t currentFrameIndex) :
5930  m_hAllocator(hAllocator),
5931  m_pBlockVector(pBlockVector),
5932  m_CurrentFrameIndex(currentFrameIndex),
5933  m_BytesMoved(0),
5934  m_AllocationsMoved(0),
5935  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
5936  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
5937 {
5938 }
5939 
5940 VmaDefragmentator::~VmaDefragmentator()
5941 {
5942  for(size_t i = m_Blocks.size(); i--; )
5943  {
5944  vma_delete(m_hAllocator, m_Blocks[i]);
5945  }
5946 }
5947 
5948 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
5949 {
5950  AllocationInfo allocInfo;
5951  allocInfo.m_hAllocation = hAlloc;
5952  allocInfo.m_pChanged = pChanged;
5953  m_Allocations.push_back(allocInfo);
5954 }
5955 
5956 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
5957 {
5958  // It has already been mapped for defragmentation.
5959  if(m_pMappedDataForDefragmentation)
5960  {
5961  *ppMappedData = m_pMappedDataForDefragmentation;
5962  return VK_SUCCESS;
5963  }
5964 
5965  // It is persistently mapped.
5966  if(m_pBlock->m_PersistentMap)
5967  {
5968  VMA_ASSERT(m_pBlock->m_pMappedData != VMA_NULL);
5969  *ppMappedData = m_pBlock->m_pMappedData;
5970  return VK_SUCCESS;
5971  }
5972 
5973  // Map on first usage.
5974  VkResult res = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
5975  hAllocator->m_hDevice,
5976  m_pBlock->m_hMemory,
5977  0,
5978  VK_WHOLE_SIZE,
5979  0,
5980  &m_pMappedDataForDefragmentation);
5981  *ppMappedData = m_pMappedDataForDefragmentation;
5982  return res;
5983 }
5984 
5985 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
5986 {
5987  if(m_pMappedDataForDefragmentation != VMA_NULL)
5988  {
5989  (hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_pBlock->m_hMemory);
5990  }
5991 }
5992 
5993 VkResult VmaDefragmentator::DefragmentRound(
5994  VkDeviceSize maxBytesToMove,
5995  uint32_t maxAllocationsToMove)
5996 {
5997  if(m_Blocks.empty())
5998  {
5999  return VK_SUCCESS;
6000  }
6001 
6002  size_t srcBlockIndex = m_Blocks.size() - 1;
6003  size_t srcAllocIndex = SIZE_MAX;
6004  for(;;)
6005  {
6006  // 1. Find next allocation to move.
6007  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
6008  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
6009  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
6010  {
6011  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
6012  {
6013  // Finished: no more allocations to process.
6014  if(srcBlockIndex == 0)
6015  {
6016  return VK_SUCCESS;
6017  }
6018  else
6019  {
6020  --srcBlockIndex;
6021  srcAllocIndex = SIZE_MAX;
6022  }
6023  }
6024  else
6025  {
6026  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
6027  }
6028  }
6029 
6030  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
6031  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
6032 
6033  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
6034  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
6035  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
6036  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
6037 
6038  // 2. Try to find new place for this allocation in preceding or current block.
6039  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
6040  {
6041  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
6042  VmaAllocationRequest dstAllocRequest;
6043  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
6044  m_CurrentFrameIndex,
6045  m_pBlockVector->GetFrameInUseCount(),
6046  m_pBlockVector->GetBufferImageGranularity(),
6047  size,
6048  alignment,
6049  suballocType,
6050  false, // canMakeOtherLost
6051  &dstAllocRequest) &&
6052  MoveMakesSense(
6053  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
6054  {
6055  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
6056 
6057  // Reached limit on number of allocations or bytes to move.
6058  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
6059  (m_BytesMoved + size > maxBytesToMove))
6060  {
6061  return VK_INCOMPLETE;
6062  }
6063 
6064  void* pDstMappedData = VMA_NULL;
6065  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
6066  if(res != VK_SUCCESS)
6067  {
6068  return res;
6069  }
6070 
6071  void* pSrcMappedData = VMA_NULL;
6072  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
6073  if(res != VK_SUCCESS)
6074  {
6075  return res;
6076  }
6077 
6078  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
6079  memcpy(
6080  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
6081  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
6082  static_cast<size_t>(size));
6083 
6084  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
6085  pSrcBlockInfo->m_pBlock->m_Metadata.Free(allocInfo.m_hAllocation);
6086 
6087  allocInfo.m_hAllocation->ChangeBlockAllocation(pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
6088 
6089  if(allocInfo.m_pChanged != VMA_NULL)
6090  {
6091  *allocInfo.m_pChanged = VK_TRUE;
6092  }
6093 
6094  ++m_AllocationsMoved;
6095  m_BytesMoved += size;
6096 
6097  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
6098 
6099  break;
6100  }
6101  }
6102 
6103  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
6104 
6105  if(srcAllocIndex > 0)
6106  {
6107  --srcAllocIndex;
6108  }
6109  else
6110  {
6111  if(srcBlockIndex > 0)
6112  {
6113  --srcBlockIndex;
6114  srcAllocIndex = SIZE_MAX;
6115  }
6116  else
6117  {
6118  return VK_SUCCESS;
6119  }
6120  }
6121  }
6122 }
6123 
6124 VkResult VmaDefragmentator::Defragment(
6125  VkDeviceSize maxBytesToMove,
6126  uint32_t maxAllocationsToMove)
6127 {
6128  if(m_Allocations.empty())
6129  {
6130  return VK_SUCCESS;
6131  }
6132 
6133  // Create block info for each block.
6134  const size_t blockCount = m_pBlockVector->m_Blocks.size();
6135  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6136  {
6137  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
6138  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
6139  m_Blocks.push_back(pBlockInfo);
6140  }
6141 
6142  // Sort them by m_pBlock pointer value.
6143  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
6144 
6145  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
6146  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
6147  {
6148  AllocationInfo& allocInfo = m_Allocations[blockIndex];
6149  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
6150  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
6151  {
6152  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
6153  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
6154  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
6155  {
6156  (*it)->m_Allocations.push_back(allocInfo);
6157  }
6158  else
6159  {
6160  VMA_ASSERT(0);
6161  }
6162  }
6163  }
6164  m_Allocations.clear();
6165 
6166  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6167  {
6168  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
6169  pBlockInfo->CalcHasNonMovableAllocations();
6170  pBlockInfo->SortAllocationsBySizeDescecnding();
6171  }
6172 
6173  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
6174  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
6175 
6176  // Execute defragmentation rounds (the main part).
6177  VkResult result = VK_SUCCESS;
6178  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
6179  {
6180  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
6181  }
6182 
6183  // Unmap blocks that were mapped for defragmentation.
6184  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6185  {
6186  m_Blocks[blockIndex]->Unmap(m_hAllocator);
6187  }
6188 
6189  return result;
6190 }
6191 
6192 bool VmaDefragmentator::MoveMakesSense(
6193  size_t dstBlockIndex, VkDeviceSize dstOffset,
6194  size_t srcBlockIndex, VkDeviceSize srcOffset)
6195 {
6196  if(dstBlockIndex < srcBlockIndex)
6197  {
6198  return true;
6199  }
6200  if(dstBlockIndex > srcBlockIndex)
6201  {
6202  return false;
6203  }
6204  if(dstOffset < srcOffset)
6205  {
6206  return true;
6207  }
6208  return false;
6209 }
6210 
6212 // VmaAllocator_T
6213 
6214 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
6215  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
6216  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
6217  m_PhysicalDevice(pCreateInfo->physicalDevice),
6218  m_hDevice(pCreateInfo->device),
6219  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
6220  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
6221  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
6222  m_UnmapPersistentlyMappedMemoryCounter(0),
6223  m_PreferredLargeHeapBlockSize(0),
6224  m_PreferredSmallHeapBlockSize(0),
6225  m_CurrentFrameIndex(0),
6226  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
6227 {
6228  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
6229 
6230  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
6231  memset(&m_MemProps, 0, sizeof(m_MemProps));
6232  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
6233 
6234  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
6235  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
6236 
6237  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
6238  {
6239  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
6240  }
6241 
6242  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
6243  {
6244  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
6245  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
6246  }
6247 
6248  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
6249 
6250  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
6251  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
6252 
6253  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
6254  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
6255  m_PreferredSmallHeapBlockSize = (pCreateInfo->preferredSmallHeapBlockSize != 0) ?
6256  pCreateInfo->preferredSmallHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE);
6257 
6258  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
6259  {
6260  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
6261  {
6262  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
6263  if(limit != VK_WHOLE_SIZE)
6264  {
6265  m_HeapSizeLimit[heapIndex] = limit;
6266  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
6267  {
6268  m_MemProps.memoryHeaps[heapIndex].size = limit;
6269  }
6270  }
6271  }
6272  }
6273 
6274  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6275  {
6276  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
6277 
6278  for(size_t blockVectorTypeIndex = 0; blockVectorTypeIndex < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorTypeIndex)
6279  {
6280  m_pBlockVectors[memTypeIndex][blockVectorTypeIndex] = vma_new(this, VmaBlockVector)(
6281  this,
6282  memTypeIndex,
6283  static_cast<VMA_BLOCK_VECTOR_TYPE>(blockVectorTypeIndex),
6284  preferredBlockSize,
6285  0,
6286  SIZE_MAX,
6287  GetBufferImageGranularity(),
6288  pCreateInfo->frameInUseCount,
6289  false); // isCustomPool
6290  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
6291  // becase minBlockCount is 0.
6292  m_pDedicatedAllocations[memTypeIndex][blockVectorTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
6293  }
6294  }
6295 }
6296 
6297 VmaAllocator_T::~VmaAllocator_T()
6298 {
6299  VMA_ASSERT(m_Pools.empty());
6300 
6301  for(size_t i = GetMemoryTypeCount(); i--; )
6302  {
6303  for(size_t j = VMA_BLOCK_VECTOR_TYPE_COUNT; j--; )
6304  {
6305  vma_delete(this, m_pDedicatedAllocations[i][j]);
6306  vma_delete(this, m_pBlockVectors[i][j]);
6307  }
6308  }
6309 }
6310 
6311 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
6312 {
6313 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
6314  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
6315  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
6316  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
6317  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
6318  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
6319  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
6320  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
6321  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
6322  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
6323  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
6324  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
6325  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
6326  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
6327  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
6328  // Ignoring vkGetBufferMemoryRequirements2KHR.
6329  // Ignoring vkGetImageMemoryRequirements2KHR.
6330 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
6331 
6332 #define VMA_COPY_IF_NOT_NULL(funcName) \
6333  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
6334 
6335  if(pVulkanFunctions != VMA_NULL)
6336  {
6337  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
6338  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
6339  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
6340  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
6341  VMA_COPY_IF_NOT_NULL(vkMapMemory);
6342  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
6343  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
6344  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
6345  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
6346  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
6347  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
6348  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
6349  VMA_COPY_IF_NOT_NULL(vkCreateImage);
6350  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
6351  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
6352  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
6353  }
6354 
6355 #undef VMA_COPY_IF_NOT_NULL
6356 
6357  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
6358  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
6359  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
6360  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
6361  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
6362  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
6363  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
6364  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
6365  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
6366  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
6367  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
6368  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
6369  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
6370  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
6371  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
6372  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
6373  if(m_UseKhrDedicatedAllocation)
6374  {
6375  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
6376  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
6377  }
6378 }
6379 
6380 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
6381 {
6382  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
6383  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
6384  return (heapSize <= VMA_SMALL_HEAP_MAX_SIZE) ?
6385  m_PreferredSmallHeapBlockSize : m_PreferredLargeHeapBlockSize;
6386 }
6387 
6388 VkResult VmaAllocator_T::AllocateMemoryOfType(
6389  const VkMemoryRequirements& vkMemReq,
6390  bool dedicatedAllocation,
6391  VkBuffer dedicatedBuffer,
6392  VkImage dedicatedImage,
6393  const VmaAllocationCreateInfo& createInfo,
6394  uint32_t memTypeIndex,
6395  VmaSuballocationType suballocType,
6396  VmaAllocation* pAllocation)
6397 {
6398  VMA_ASSERT(pAllocation != VMA_NULL);
6399  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
6400 
6401  uint32_t blockVectorType = VmaAllocationCreateFlagsToBlockVectorType(createInfo.flags);
6402  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
6403  VMA_ASSERT(blockVector);
6404 
6405  VmaAllocationCreateInfo finalCreateInfo = createInfo;
6406 
6407  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
6408  bool preferDedicatedMemory =
6409  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
6410  dedicatedAllocation ||
6411  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
6412  vkMemReq.size > preferredBlockSize / 2;
6413 
6414  if(preferDedicatedMemory &&
6415  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
6416  finalCreateInfo.pool == VK_NULL_HANDLE)
6417  {
6419  }
6420 
6421  // If memory type is not HOST_VISIBLE, disable PERSISTENT_MAP.
6422  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0 &&
6423  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
6424  {
6425  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT;
6426  }
6427 
6428  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
6429  {
6430  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6431  {
6432  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6433  }
6434  else
6435  {
6436  return AllocateDedicatedMemory(
6437  vkMemReq.size,
6438  suballocType,
6439  memTypeIndex,
6440  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0,
6441  finalCreateInfo.pUserData,
6442  dedicatedBuffer,
6443  dedicatedImage,
6444  pAllocation);
6445  }
6446  }
6447  else
6448  {
6449  VkResult res = blockVector->Allocate(
6450  VK_NULL_HANDLE, // hCurrentPool
6451  m_CurrentFrameIndex.load(),
6452  vkMemReq,
6453  finalCreateInfo,
6454  suballocType,
6455  pAllocation);
6456  if(res == VK_SUCCESS)
6457  {
6458  return res;
6459  }
6460 
6461  // 5. Try dedicated memory.
6462  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6463  {
6464  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6465  }
6466  else
6467  {
6468  res = AllocateDedicatedMemory(
6469  vkMemReq.size,
6470  suballocType,
6471  memTypeIndex,
6472  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0,
6473  finalCreateInfo.pUserData,
6474  dedicatedBuffer,
6475  dedicatedImage,
6476  pAllocation);
6477  if(res == VK_SUCCESS)
6478  {
6479  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
6480  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
6481  return VK_SUCCESS;
6482  }
6483  else
6484  {
6485  // Everything failed: Return error code.
6486  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
6487  return res;
6488  }
6489  }
6490  }
6491 }
6492 
6493 VkResult VmaAllocator_T::AllocateDedicatedMemory(
6494  VkDeviceSize size,
6495  VmaSuballocationType suballocType,
6496  uint32_t memTypeIndex,
6497  bool map,
6498  void* pUserData,
6499  VkBuffer dedicatedBuffer,
6500  VkImage dedicatedImage,
6501  VmaAllocation* pAllocation)
6502 {
6503  VMA_ASSERT(pAllocation);
6504 
6505  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
6506  allocInfo.memoryTypeIndex = memTypeIndex;
6507  allocInfo.allocationSize = size;
6508 
6509  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
6510  if(m_UseKhrDedicatedAllocation)
6511  {
6512  if(dedicatedBuffer != VK_NULL_HANDLE)
6513  {
6514  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
6515  dedicatedAllocInfo.buffer = dedicatedBuffer;
6516  allocInfo.pNext = &dedicatedAllocInfo;
6517  }
6518  else if(dedicatedImage != VK_NULL_HANDLE)
6519  {
6520  dedicatedAllocInfo.image = dedicatedImage;
6521  allocInfo.pNext = &dedicatedAllocInfo;
6522  }
6523  }
6524 
6525  // Allocate VkDeviceMemory.
6526  VkDeviceMemory hMemory = VK_NULL_HANDLE;
6527  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
6528  if(res < 0)
6529  {
6530  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
6531  return res;
6532  }
6533 
6534  void* pMappedData = nullptr;
6535  if(map)
6536  {
6537  if(m_UnmapPersistentlyMappedMemoryCounter == 0)
6538  {
6539  res = (*m_VulkanFunctions.vkMapMemory)(
6540  m_hDevice,
6541  hMemory,
6542  0,
6543  VK_WHOLE_SIZE,
6544  0,
6545  &pMappedData);
6546  if(res < 0)
6547  {
6548  VMA_DEBUG_LOG(" vkMapMemory FAILED");
6549  FreeVulkanMemory(memTypeIndex, size, hMemory);
6550  return res;
6551  }
6552  }
6553  }
6554 
6555  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load());
6556  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, map, pMappedData, size, pUserData);
6557 
6558  // Register it in m_pDedicatedAllocations.
6559  {
6560  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
6561  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex][map ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED];
6562  VMA_ASSERT(pDedicatedAllocations);
6563  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
6564  }
6565 
6566  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
6567 
6568  return VK_SUCCESS;
6569 }
6570 
6571 void VmaAllocator_T::GetBufferMemoryRequirements(
6572  VkBuffer hBuffer,
6573  VkMemoryRequirements& memReq,
6574  bool& requiresDedicatedAllocation,
6575  bool& prefersDedicatedAllocation) const
6576 {
6577  if(m_UseKhrDedicatedAllocation)
6578  {
6579  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
6580  memReqInfo.buffer = hBuffer;
6581 
6582  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
6583 
6584  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
6585  memReq2.pNext = &memDedicatedReq;
6586 
6587  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
6588 
6589  memReq = memReq2.memoryRequirements;
6590  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
6591  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
6592  }
6593  else
6594  {
6595  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
6596  requiresDedicatedAllocation = false;
6597  prefersDedicatedAllocation = false;
6598  }
6599 }
6600 
6601 void VmaAllocator_T::GetImageMemoryRequirements(
6602  VkImage hImage,
6603  VkMemoryRequirements& memReq,
6604  bool& requiresDedicatedAllocation,
6605  bool& prefersDedicatedAllocation) const
6606 {
6607  if(m_UseKhrDedicatedAllocation)
6608  {
6609  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
6610  memReqInfo.image = hImage;
6611 
6612  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
6613 
6614  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
6615  memReq2.pNext = &memDedicatedReq;
6616 
6617  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
6618 
6619  memReq = memReq2.memoryRequirements;
6620  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
6621  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
6622  }
6623  else
6624  {
6625  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
6626  requiresDedicatedAllocation = false;
6627  prefersDedicatedAllocation = false;
6628  }
6629 }
6630 
6631 VkResult VmaAllocator_T::AllocateMemory(
6632  const VkMemoryRequirements& vkMemReq,
6633  bool requiresDedicatedAllocation,
6634  bool prefersDedicatedAllocation,
6635  VkBuffer dedicatedBuffer,
6636  VkImage dedicatedImage,
6637  const VmaAllocationCreateInfo& createInfo,
6638  VmaSuballocationType suballocType,
6639  VmaAllocation* pAllocation)
6640 {
6641  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
6642  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6643  {
6644  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
6645  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6646  }
6647  if(requiresDedicatedAllocation)
6648  {
6649  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6650  {
6651  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
6652  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6653  }
6654  if(createInfo.pool != VK_NULL_HANDLE)
6655  {
6656  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
6657  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6658  }
6659  }
6660  if((createInfo.pool != VK_NULL_HANDLE) &&
6661  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
6662  {
6663  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
6664  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6665  }
6666 
6667  if(createInfo.pool != VK_NULL_HANDLE)
6668  {
6669  return createInfo.pool->m_BlockVector.Allocate(
6670  createInfo.pool,
6671  m_CurrentFrameIndex.load(),
6672  vkMemReq,
6673  createInfo,
6674  suballocType,
6675  pAllocation);
6676  }
6677  else
6678  {
6679  // Bit mask of memory Vulkan types acceptable for this allocation.
6680  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
6681  uint32_t memTypeIndex = UINT32_MAX;
6682  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
6683  if(res == VK_SUCCESS)
6684  {
6685  res = AllocateMemoryOfType(
6686  vkMemReq,
6687  requiresDedicatedAllocation || prefersDedicatedAllocation,
6688  dedicatedBuffer,
6689  dedicatedImage,
6690  createInfo,
6691  memTypeIndex,
6692  suballocType,
6693  pAllocation);
6694  // Succeeded on first try.
6695  if(res == VK_SUCCESS)
6696  {
6697  return res;
6698  }
6699  // Allocation from this memory type failed. Try other compatible memory types.
6700  else
6701  {
6702  for(;;)
6703  {
6704  // Remove old memTypeIndex from list of possibilities.
6705  memoryTypeBits &= ~(1u << memTypeIndex);
6706  // Find alternative memTypeIndex.
6707  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
6708  if(res == VK_SUCCESS)
6709  {
6710  res = AllocateMemoryOfType(
6711  vkMemReq,
6712  requiresDedicatedAllocation || prefersDedicatedAllocation,
6713  dedicatedBuffer,
6714  dedicatedImage,
6715  createInfo,
6716  memTypeIndex,
6717  suballocType,
6718  pAllocation);
6719  // Allocation from this alternative memory type succeeded.
6720  if(res == VK_SUCCESS)
6721  {
6722  return res;
6723  }
6724  // else: Allocation from this memory type failed. Try next one - next loop iteration.
6725  }
6726  // No other matching memory type index could be found.
6727  else
6728  {
6729  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
6730  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6731  }
6732  }
6733  }
6734  }
6735  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
6736  else
6737  return res;
6738  }
6739 }
6740 
6741 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
6742 {
6743  VMA_ASSERT(allocation);
6744 
6745  if(allocation->CanBecomeLost() == false ||
6746  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
6747  {
6748  switch(allocation->GetType())
6749  {
6750  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
6751  {
6752  VmaBlockVector* pBlockVector = VMA_NULL;
6753  VmaPool hPool = allocation->GetPool();
6754  if(hPool != VK_NULL_HANDLE)
6755  {
6756  pBlockVector = &hPool->m_BlockVector;
6757  }
6758  else
6759  {
6760  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
6761  const VMA_BLOCK_VECTOR_TYPE blockVectorType = allocation->GetBlockVectorType();
6762  pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
6763  }
6764  pBlockVector->Free(allocation);
6765  }
6766  break;
6767  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
6768  FreeDedicatedMemory(allocation);
6769  break;
6770  default:
6771  VMA_ASSERT(0);
6772  }
6773  }
6774 
6775  vma_delete(this, allocation);
6776 }
6777 
6778 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
6779 {
6780  // Initialize.
6781  InitStatInfo(pStats->total);
6782  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
6783  InitStatInfo(pStats->memoryType[i]);
6784  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
6785  InitStatInfo(pStats->memoryHeap[i]);
6786 
6787  // Process default pools.
6788  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6789  {
6790  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
6791  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
6792  {
6793  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
6794  VMA_ASSERT(pBlockVector);
6795  pBlockVector->AddStats(pStats);
6796  }
6797  }
6798 
6799  // Process custom pools.
6800  {
6801  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6802  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
6803  {
6804  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
6805  }
6806  }
6807 
6808  // Process dedicated allocations.
6809  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6810  {
6811  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
6812  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
6813  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
6814  {
6815  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex][blockVectorType];
6816  VMA_ASSERT(pDedicatedAllocVector);
6817  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
6818  {
6819  VmaStatInfo allocationStatInfo;
6820  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
6821  VmaAddStatInfo(pStats->total, allocationStatInfo);
6822  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
6823  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
6824  }
6825  }
6826  }
6827 
6828  // Postprocess.
6829  VmaPostprocessCalcStatInfo(pStats->total);
6830  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
6831  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
6832  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
6833  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
6834 }
6835 
6836 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
6837 
6838 void VmaAllocator_T::UnmapPersistentlyMappedMemory()
6839 {
6840  if(m_UnmapPersistentlyMappedMemoryCounter++ == 0)
6841  {
6842  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
6843  {
6844  for(uint32_t memTypeIndex = m_MemProps.memoryTypeCount; memTypeIndex--; )
6845  {
6846  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
6847  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
6848  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
6849  {
6850  // Process DedicatedAllocations.
6851  {
6852  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
6853  AllocationVectorType* pDedicatedAllocationsVector = m_pDedicatedAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6854  for(size_t dedicatedAllocIndex = pDedicatedAllocationsVector->size(); dedicatedAllocIndex--; )
6855  {
6856  VmaAllocation hAlloc = (*pDedicatedAllocationsVector)[dedicatedAllocIndex];
6857  hAlloc->DedicatedAllocUnmapPersistentlyMappedMemory(this);
6858  }
6859  }
6860 
6861  // Process normal Allocations.
6862  {
6863  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6864  pBlockVector->UnmapPersistentlyMappedMemory();
6865  }
6866  }
6867  }
6868 
6869  // Process custom pools.
6870  {
6871  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6872  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
6873  {
6874  m_Pools[poolIndex]->GetBlockVector().UnmapPersistentlyMappedMemory();
6875  }
6876  }
6877  }
6878  }
6879 }
6880 
6881 VkResult VmaAllocator_T::MapPersistentlyMappedMemory()
6882 {
6883  VMA_ASSERT(m_UnmapPersistentlyMappedMemoryCounter > 0);
6884  if(--m_UnmapPersistentlyMappedMemoryCounter == 0)
6885  {
6886  VkResult finalResult = VK_SUCCESS;
6887  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
6888  {
6889  // Process custom pools.
6890  {
6891  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6892  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
6893  {
6894  m_Pools[poolIndex]->GetBlockVector().MapPersistentlyMappedMemory();
6895  }
6896  }
6897 
6898  for(uint32_t memTypeIndex = 0; memTypeIndex < m_MemProps.memoryTypeCount; ++memTypeIndex)
6899  {
6900  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
6901  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
6902  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
6903  {
6904  // Process DedicatedAllocations.
6905  {
6906  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
6907  AllocationVectorType* pAllocationsVector = m_pDedicatedAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6908  for(size_t dedicatedAllocIndex = 0, dedicatedAllocCount = pAllocationsVector->size(); dedicatedAllocIndex < dedicatedAllocCount; ++dedicatedAllocIndex)
6909  {
6910  VmaAllocation hAlloc = (*pAllocationsVector)[dedicatedAllocIndex];
6911  hAlloc->DedicatedAllocMapPersistentlyMappedMemory(this);
6912  }
6913  }
6914 
6915  // Process normal Allocations.
6916  {
6917  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6918  VkResult localResult = pBlockVector->MapPersistentlyMappedMemory();
6919  if(localResult != VK_SUCCESS)
6920  {
6921  finalResult = localResult;
6922  }
6923  }
6924  }
6925  }
6926  }
6927  return finalResult;
6928  }
6929  else
6930  return VK_SUCCESS;
6931 }
6932 
6933 VkResult VmaAllocator_T::Defragment(
6934  VmaAllocation* pAllocations,
6935  size_t allocationCount,
6936  VkBool32* pAllocationsChanged,
6937  const VmaDefragmentationInfo* pDefragmentationInfo,
6938  VmaDefragmentationStats* pDefragmentationStats)
6939 {
6940  if(pAllocationsChanged != VMA_NULL)
6941  {
6942  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
6943  }
6944  if(pDefragmentationStats != VMA_NULL)
6945  {
6946  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
6947  }
6948 
6949  if(m_UnmapPersistentlyMappedMemoryCounter > 0)
6950  {
6951  VMA_DEBUG_LOG("ERROR: Cannot defragment when inside vmaUnmapPersistentlyMappedMemory.");
6952  return VK_ERROR_MEMORY_MAP_FAILED;
6953  }
6954 
6955  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
6956 
6957  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
6958 
6959  const size_t poolCount = m_Pools.size();
6960 
6961  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
6962  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
6963  {
6964  VmaAllocation hAlloc = pAllocations[allocIndex];
6965  VMA_ASSERT(hAlloc);
6966  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
6967  // DedicatedAlloc cannot be defragmented.
6968  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
6969  // Only HOST_VISIBLE memory types can be defragmented.
6970  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
6971  // Lost allocation cannot be defragmented.
6972  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
6973  {
6974  VmaBlockVector* pAllocBlockVector = nullptr;
6975 
6976  const VmaPool hAllocPool = hAlloc->GetPool();
6977  // This allocation belongs to custom pool.
6978  if(hAllocPool != VK_NULL_HANDLE)
6979  {
6980  pAllocBlockVector = &hAllocPool->GetBlockVector();
6981  }
6982  // This allocation belongs to general pool.
6983  else
6984  {
6985  pAllocBlockVector = m_pBlockVectors[memTypeIndex][hAlloc->GetBlockVectorType()];
6986  }
6987 
6988  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
6989 
6990  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
6991  &pAllocationsChanged[allocIndex] : VMA_NULL;
6992  pDefragmentator->AddAllocation(hAlloc, pChanged);
6993  }
6994  }
6995 
6996  VkResult result = VK_SUCCESS;
6997 
6998  // ======== Main processing.
6999 
7000  VkDeviceSize maxBytesToMove = SIZE_MAX;
7001  uint32_t maxAllocationsToMove = UINT32_MAX;
7002  if(pDefragmentationInfo != VMA_NULL)
7003  {
7004  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
7005  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
7006  }
7007 
7008  // Process standard memory.
7009  for(uint32_t memTypeIndex = 0;
7010  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
7011  ++memTypeIndex)
7012  {
7013  // Only HOST_VISIBLE memory types can be defragmented.
7014  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7015  {
7016  for(uint32_t blockVectorType = 0;
7017  (blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT) && (result == VK_SUCCESS);
7018  ++blockVectorType)
7019  {
7020  result = m_pBlockVectors[memTypeIndex][blockVectorType]->Defragment(
7021  pDefragmentationStats,
7022  maxBytesToMove,
7023  maxAllocationsToMove);
7024  }
7025  }
7026  }
7027 
7028  // Process custom pools.
7029  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
7030  {
7031  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
7032  pDefragmentationStats,
7033  maxBytesToMove,
7034  maxAllocationsToMove);
7035  }
7036 
7037  // ======== Destroy defragmentators.
7038 
7039  // Process custom pools.
7040  for(size_t poolIndex = poolCount; poolIndex--; )
7041  {
7042  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
7043  }
7044 
7045  // Process standard memory.
7046  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
7047  {
7048  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7049  {
7050  for(size_t blockVectorType = VMA_BLOCK_VECTOR_TYPE_COUNT; blockVectorType--; )
7051  {
7052  m_pBlockVectors[memTypeIndex][blockVectorType]->DestroyDefragmentator();
7053  }
7054  }
7055  }
7056 
7057  return result;
7058 }
7059 
7060 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
7061 {
7062  if(hAllocation->CanBecomeLost())
7063  {
7064  /*
7065  Warning: This is a carefully designed algorithm.
7066  Do not modify unless you really know what you're doing :)
7067  */
7068  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
7069  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
7070  for(;;)
7071  {
7072  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7073  {
7074  pAllocationInfo->memoryType = UINT32_MAX;
7075  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
7076  pAllocationInfo->offset = 0;
7077  pAllocationInfo->size = hAllocation->GetSize();
7078  pAllocationInfo->pMappedData = VMA_NULL;
7079  pAllocationInfo->pUserData = hAllocation->GetUserData();
7080  return;
7081  }
7082  else if(localLastUseFrameIndex == localCurrFrameIndex)
7083  {
7084  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7085  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7086  pAllocationInfo->offset = hAllocation->GetOffset();
7087  pAllocationInfo->size = hAllocation->GetSize();
7088  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
7089  pAllocationInfo->pUserData = hAllocation->GetUserData();
7090  return;
7091  }
7092  else // Last use time earlier than current time.
7093  {
7094  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
7095  {
7096  localLastUseFrameIndex = localCurrFrameIndex;
7097  }
7098  }
7099  }
7100  }
7101  // We could use the same code here, but for performance reasons we don't need to use the hAllocation.LastUseFrameIndex atomic.
7102  else
7103  {
7104  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7105  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7106  pAllocationInfo->offset = hAllocation->GetOffset();
7107  pAllocationInfo->size = hAllocation->GetSize();
7108  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
7109  pAllocationInfo->pUserData = hAllocation->GetUserData();
7110  }
7111 }
7112 
7113 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
7114 {
7115  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
7116 
7117  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
7118 
7119  if(newCreateInfo.maxBlockCount == 0)
7120  {
7121  newCreateInfo.maxBlockCount = SIZE_MAX;
7122  }
7123  if(newCreateInfo.blockSize == 0)
7124  {
7125  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
7126  }
7127 
7128  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
7129 
7130  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
7131  if(res != VK_SUCCESS)
7132  {
7133  vma_delete(this, *pPool);
7134  *pPool = VMA_NULL;
7135  return res;
7136  }
7137 
7138  // Add to m_Pools.
7139  {
7140  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7141  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
7142  }
7143 
7144  return VK_SUCCESS;
7145 }
7146 
7147 void VmaAllocator_T::DestroyPool(VmaPool pool)
7148 {
7149  // Remove from m_Pools.
7150  {
7151  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7152  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
7153  VMA_ASSERT(success && "Pool not found in Allocator.");
7154  }
7155 
7156  vma_delete(this, pool);
7157 }
7158 
7159 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
7160 {
7161  pool->m_BlockVector.GetPoolStats(pPoolStats);
7162 }
7163 
7164 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
7165 {
7166  m_CurrentFrameIndex.store(frameIndex);
7167 }
7168 
7169 void VmaAllocator_T::MakePoolAllocationsLost(
7170  VmaPool hPool,
7171  size_t* pLostAllocationCount)
7172 {
7173  hPool->m_BlockVector.MakePoolAllocationsLost(
7174  m_CurrentFrameIndex.load(),
7175  pLostAllocationCount);
7176 }
7177 
7178 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
7179 {
7180  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST);
7181  (*pAllocation)->InitLost();
7182 }
7183 
7184 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
7185 {
7186  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
7187 
7188  VkResult res;
7189  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
7190  {
7191  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
7192  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
7193  {
7194  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
7195  if(res == VK_SUCCESS)
7196  {
7197  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
7198  }
7199  }
7200  else
7201  {
7202  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
7203  }
7204  }
7205  else
7206  {
7207  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
7208  }
7209 
7210  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
7211  {
7212  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
7213  }
7214 
7215  return res;
7216 }
7217 
7218 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
7219 {
7220  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
7221  {
7222  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
7223  }
7224 
7225  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
7226 
7227  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
7228  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
7229  {
7230  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
7231  m_HeapSizeLimit[heapIndex] += size;
7232  }
7233 }
7234 
7235 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
7236 {
7237  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
7238 
7239  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
7240  {
7241  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7242  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex][allocation->GetBlockVectorType()];
7243  VMA_ASSERT(pDedicatedAllocations);
7244  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
7245  VMA_ASSERT(success);
7246  }
7247 
7248  VkDeviceMemory hMemory = allocation->GetMemory();
7249 
7250  if(allocation->GetMappedData() != VMA_NULL)
7251  {
7252  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
7253  }
7254 
7255  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
7256 
7257  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
7258 }
7259 
7260 #if VMA_STATS_STRING_ENABLED
7261 
7262 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
7263 {
7264  bool dedicatedAllocationsStarted = false;
7265  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7266  {
7267  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7268  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
7269  {
7270  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex][blockVectorType];
7271  VMA_ASSERT(pDedicatedAllocVector);
7272  if(pDedicatedAllocVector->empty() == false)
7273  {
7274  if(dedicatedAllocationsStarted == false)
7275  {
7276  dedicatedAllocationsStarted = true;
7277  json.WriteString("DedicatedAllocations");
7278  json.BeginObject();
7279  }
7280 
7281  json.BeginString("Type ");
7282  json.ContinueString(memTypeIndex);
7283  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
7284  {
7285  json.ContinueString(" Mapped");
7286  }
7287  json.EndString();
7288 
7289  json.BeginArray();
7290 
7291  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
7292  {
7293  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
7294  json.BeginObject(true);
7295 
7296  json.WriteString("Size");
7297  json.WriteNumber(hAlloc->GetSize());
7298 
7299  json.WriteString("Type");
7300  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
7301 
7302  json.EndObject();
7303  }
7304 
7305  json.EndArray();
7306  }
7307  }
7308  }
7309  if(dedicatedAllocationsStarted)
7310  {
7311  json.EndObject();
7312  }
7313 
7314  {
7315  bool allocationsStarted = false;
7316  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7317  {
7318  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
7319  {
7320  if(m_pBlockVectors[memTypeIndex][blockVectorType]->IsEmpty() == false)
7321  {
7322  if(allocationsStarted == false)
7323  {
7324  allocationsStarted = true;
7325  json.WriteString("DefaultPools");
7326  json.BeginObject();
7327  }
7328 
7329  json.BeginString("Type ");
7330  json.ContinueString(memTypeIndex);
7331  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
7332  {
7333  json.ContinueString(" Mapped");
7334  }
7335  json.EndString();
7336 
7337  m_pBlockVectors[memTypeIndex][blockVectorType]->PrintDetailedMap(json);
7338  }
7339  }
7340  }
7341  if(allocationsStarted)
7342  {
7343  json.EndObject();
7344  }
7345  }
7346 
7347  {
7348  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7349  const size_t poolCount = m_Pools.size();
7350  if(poolCount > 0)
7351  {
7352  json.WriteString("Pools");
7353  json.BeginArray();
7354  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
7355  {
7356  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
7357  }
7358  json.EndArray();
7359  }
7360  }
7361 }
7362 
7363 #endif // #if VMA_STATS_STRING_ENABLED
7364 
7365 static VkResult AllocateMemoryForImage(
7366  VmaAllocator allocator,
7367  VkImage image,
7368  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7369  VmaSuballocationType suballocType,
7370  VmaAllocation* pAllocation)
7371 {
7372  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
7373 
7374  VkMemoryRequirements vkMemReq = {};
7375  bool requiresDedicatedAllocation = false;
7376  bool prefersDedicatedAllocation = false;
7377  allocator->GetImageMemoryRequirements(image, vkMemReq,
7378  requiresDedicatedAllocation, prefersDedicatedAllocation);
7379 
7380  return allocator->AllocateMemory(
7381  vkMemReq,
7382  requiresDedicatedAllocation,
7383  prefersDedicatedAllocation,
7384  VK_NULL_HANDLE, // dedicatedBuffer
7385  image, // dedicatedImage
7386  *pAllocationCreateInfo,
7387  suballocType,
7388  pAllocation);
7389 }
7390 
7392 // Public interface
7393 
7394 VkResult vmaCreateAllocator(
7395  const VmaAllocatorCreateInfo* pCreateInfo,
7396  VmaAllocator* pAllocator)
7397 {
7398  VMA_ASSERT(pCreateInfo && pAllocator);
7399  VMA_DEBUG_LOG("vmaCreateAllocator");
7400  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
7401  return VK_SUCCESS;
7402 }
7403 
7404 void vmaDestroyAllocator(
7405  VmaAllocator allocator)
7406 {
7407  if(allocator != VK_NULL_HANDLE)
7408  {
7409  VMA_DEBUG_LOG("vmaDestroyAllocator");
7410  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
7411  vma_delete(&allocationCallbacks, allocator);
7412  }
7413 }
7414 
7416  VmaAllocator allocator,
7417  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
7418 {
7419  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
7420  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
7421 }
7422 
7424  VmaAllocator allocator,
7425  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
7426 {
7427  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
7428  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
7429 }
7430 
7432  VmaAllocator allocator,
7433  uint32_t memoryTypeIndex,
7434  VkMemoryPropertyFlags* pFlags)
7435 {
7436  VMA_ASSERT(allocator && pFlags);
7437  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
7438  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
7439 }
7440 
7442  VmaAllocator allocator,
7443  uint32_t frameIndex)
7444 {
7445  VMA_ASSERT(allocator);
7446  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
7447 
7448  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7449 
7450  allocator->SetCurrentFrameIndex(frameIndex);
7451 }
7452 
7453 void vmaCalculateStats(
7454  VmaAllocator allocator,
7455  VmaStats* pStats)
7456 {
7457  VMA_ASSERT(allocator && pStats);
7458  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7459  allocator->CalculateStats(pStats);
7460 }
7461 
7462 #if VMA_STATS_STRING_ENABLED
7463 
7464 void vmaBuildStatsString(
7465  VmaAllocator allocator,
7466  char** ppStatsString,
7467  VkBool32 detailedMap)
7468 {
7469  VMA_ASSERT(allocator && ppStatsString);
7470  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7471 
7472  VmaStringBuilder sb(allocator);
7473  {
7474  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
7475  json.BeginObject();
7476 
7477  VmaStats stats;
7478  allocator->CalculateStats(&stats);
7479 
7480  json.WriteString("Total");
7481  VmaPrintStatInfo(json, stats.total);
7482 
7483  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
7484  {
7485  json.BeginString("Heap ");
7486  json.ContinueString(heapIndex);
7487  json.EndString();
7488  json.BeginObject();
7489 
7490  json.WriteString("Size");
7491  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
7492 
7493  json.WriteString("Flags");
7494  json.BeginArray(true);
7495  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
7496  {
7497  json.WriteString("DEVICE_LOCAL");
7498  }
7499  json.EndArray();
7500 
7501  if(stats.memoryHeap[heapIndex].blockCount > 0)
7502  {
7503  json.WriteString("Stats");
7504  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
7505  }
7506 
7507  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
7508  {
7509  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
7510  {
7511  json.BeginString("Type ");
7512  json.ContinueString(typeIndex);
7513  json.EndString();
7514 
7515  json.BeginObject();
7516 
7517  json.WriteString("Flags");
7518  json.BeginArray(true);
7519  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
7520  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
7521  {
7522  json.WriteString("DEVICE_LOCAL");
7523  }
7524  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7525  {
7526  json.WriteString("HOST_VISIBLE");
7527  }
7528  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
7529  {
7530  json.WriteString("HOST_COHERENT");
7531  }
7532  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
7533  {
7534  json.WriteString("HOST_CACHED");
7535  }
7536  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
7537  {
7538  json.WriteString("LAZILY_ALLOCATED");
7539  }
7540  json.EndArray();
7541 
7542  if(stats.memoryType[typeIndex].blockCount > 0)
7543  {
7544  json.WriteString("Stats");
7545  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
7546  }
7547 
7548  json.EndObject();
7549  }
7550  }
7551 
7552  json.EndObject();
7553  }
7554  if(detailedMap == VK_TRUE)
7555  {
7556  allocator->PrintDetailedMap(json);
7557  }
7558 
7559  json.EndObject();
7560  }
7561 
7562  const size_t len = sb.GetLength();
7563  char* const pChars = vma_new_array(allocator, char, len + 1);
7564  if(len > 0)
7565  {
7566  memcpy(pChars, sb.GetData(), len);
7567  }
7568  pChars[len] = '\0';
7569  *ppStatsString = pChars;
7570 }
7571 
7572 void vmaFreeStatsString(
7573  VmaAllocator allocator,
7574  char* pStatsString)
7575 {
7576  if(pStatsString != VMA_NULL)
7577  {
7578  VMA_ASSERT(allocator);
7579  size_t len = strlen(pStatsString);
7580  vma_delete_array(allocator, pStatsString, len + 1);
7581  }
7582 }
7583 
7584 #endif // #if VMA_STATS_STRING_ENABLED
7585 
7588 VkResult vmaFindMemoryTypeIndex(
7589  VmaAllocator allocator,
7590  uint32_t memoryTypeBits,
7591  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7592  uint32_t* pMemoryTypeIndex)
7593 {
7594  VMA_ASSERT(allocator != VK_NULL_HANDLE);
7595  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
7596  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
7597 
7598  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
7599  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
7600  if(preferredFlags == 0)
7601  {
7602  preferredFlags = requiredFlags;
7603  }
7604  // preferredFlags, if not 0, must be a superset of requiredFlags.
7605  VMA_ASSERT((requiredFlags & ~preferredFlags) == 0);
7606 
7607  // Convert usage to requiredFlags and preferredFlags.
7608  switch(pAllocationCreateInfo->usage)
7609  {
7611  break;
7613  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
7614  break;
7616  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
7617  break;
7619  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7620  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
7621  break;
7623  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7624  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
7625  break;
7626  default:
7627  break;
7628  }
7629 
7630  *pMemoryTypeIndex = UINT32_MAX;
7631  uint32_t minCost = UINT32_MAX;
7632  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
7633  memTypeIndex < allocator->GetMemoryTypeCount();
7634  ++memTypeIndex, memTypeBit <<= 1)
7635  {
7636  // This memory type is acceptable according to memoryTypeBits bitmask.
7637  if((memTypeBit & memoryTypeBits) != 0)
7638  {
7639  const VkMemoryPropertyFlags currFlags =
7640  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
7641  // This memory type contains requiredFlags.
7642  if((requiredFlags & ~currFlags) == 0)
7643  {
7644  // Calculate cost as number of bits from preferredFlags not present in this memory type.
7645  uint32_t currCost = CountBitsSet(preferredFlags & ~currFlags);
7646  // Remember memory type with lowest cost.
7647  if(currCost < minCost)
7648  {
7649  *pMemoryTypeIndex = memTypeIndex;
7650  if(currCost == 0)
7651  {
7652  return VK_SUCCESS;
7653  }
7654  minCost = currCost;
7655  }
7656  }
7657  }
7658  }
7659  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
7660 }
7661 
7662 VkResult vmaCreatePool(
7663  VmaAllocator allocator,
7664  const VmaPoolCreateInfo* pCreateInfo,
7665  VmaPool* pPool)
7666 {
7667  VMA_ASSERT(allocator && pCreateInfo && pPool);
7668 
7669  VMA_DEBUG_LOG("vmaCreatePool");
7670 
7671  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7672 
7673  return allocator->CreatePool(pCreateInfo, pPool);
7674 }
7675 
7676 void vmaDestroyPool(
7677  VmaAllocator allocator,
7678  VmaPool pool)
7679 {
7680  VMA_ASSERT(allocator && pool);
7681 
7682  VMA_DEBUG_LOG("vmaDestroyPool");
7683 
7684  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7685 
7686  allocator->DestroyPool(pool);
7687 }
7688 
7689 void vmaGetPoolStats(
7690  VmaAllocator allocator,
7691  VmaPool pool,
7692  VmaPoolStats* pPoolStats)
7693 {
7694  VMA_ASSERT(allocator && pool && pPoolStats);
7695 
7696  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7697 
7698  allocator->GetPoolStats(pool, pPoolStats);
7699 }
7700 
7702  VmaAllocator allocator,
7703  VmaPool pool,
7704  size_t* pLostAllocationCount)
7705 {
7706  VMA_ASSERT(allocator && pool);
7707 
7708  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7709 
7710  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
7711 }
7712 
7713 VkResult vmaAllocateMemory(
7714  VmaAllocator allocator,
7715  const VkMemoryRequirements* pVkMemoryRequirements,
7716  const VmaAllocationCreateInfo* pCreateInfo,
7717  VmaAllocation* pAllocation,
7718  VmaAllocationInfo* pAllocationInfo)
7719 {
7720  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
7721 
7722  VMA_DEBUG_LOG("vmaAllocateMemory");
7723 
7724  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7725 
7726  VkResult result = allocator->AllocateMemory(
7727  *pVkMemoryRequirements,
7728  false, // requiresDedicatedAllocation
7729  false, // prefersDedicatedAllocation
7730  VK_NULL_HANDLE, // dedicatedBuffer
7731  VK_NULL_HANDLE, // dedicatedImage
7732  *pCreateInfo,
7733  VMA_SUBALLOCATION_TYPE_UNKNOWN,
7734  pAllocation);
7735 
7736  if(pAllocationInfo && result == VK_SUCCESS)
7737  {
7738  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7739  }
7740 
7741  return result;
7742 }
7743 
7745  VmaAllocator allocator,
7746  VkBuffer buffer,
7747  const VmaAllocationCreateInfo* pCreateInfo,
7748  VmaAllocation* pAllocation,
7749  VmaAllocationInfo* pAllocationInfo)
7750 {
7751  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
7752 
7753  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
7754 
7755  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7756 
7757  VkMemoryRequirements vkMemReq = {};
7758  bool requiresDedicatedAllocation = false;
7759  bool prefersDedicatedAllocation = false;
7760  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
7761  requiresDedicatedAllocation,
7762  prefersDedicatedAllocation);
7763 
7764  VkResult result = allocator->AllocateMemory(
7765  vkMemReq,
7766  requiresDedicatedAllocation,
7767  prefersDedicatedAllocation,
7768  buffer, // dedicatedBuffer
7769  VK_NULL_HANDLE, // dedicatedImage
7770  *pCreateInfo,
7771  VMA_SUBALLOCATION_TYPE_BUFFER,
7772  pAllocation);
7773 
7774  if(pAllocationInfo && result == VK_SUCCESS)
7775  {
7776  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7777  }
7778 
7779  return result;
7780 }
7781 
7782 VkResult vmaAllocateMemoryForImage(
7783  VmaAllocator allocator,
7784  VkImage image,
7785  const VmaAllocationCreateInfo* pCreateInfo,
7786  VmaAllocation* pAllocation,
7787  VmaAllocationInfo* pAllocationInfo)
7788 {
7789  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
7790 
7791  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
7792 
7793  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7794 
7795  VkResult result = AllocateMemoryForImage(
7796  allocator,
7797  image,
7798  pCreateInfo,
7799  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
7800  pAllocation);
7801 
7802  if(pAllocationInfo && result == VK_SUCCESS)
7803  {
7804  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7805  }
7806 
7807  return result;
7808 }
7809 
7810 void vmaFreeMemory(
7811  VmaAllocator allocator,
7812  VmaAllocation allocation)
7813 {
7814  VMA_ASSERT(allocator && allocation);
7815 
7816  VMA_DEBUG_LOG("vmaFreeMemory");
7817 
7818  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7819 
7820  allocator->FreeMemory(allocation);
7821 }
7822 
7824  VmaAllocator allocator,
7825  VmaAllocation allocation,
7826  VmaAllocationInfo* pAllocationInfo)
7827 {
7828  VMA_ASSERT(allocator && allocation && pAllocationInfo);
7829 
7830  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7831 
7832  allocator->GetAllocationInfo(allocation, pAllocationInfo);
7833 }
7834 
7836  VmaAllocator allocator,
7837  VmaAllocation allocation,
7838  void* pUserData)
7839 {
7840  VMA_ASSERT(allocator && allocation);
7841 
7842  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7843 
7844  allocation->SetUserData(pUserData);
7845 }
7846 
7848  VmaAllocator allocator,
7849  VmaAllocation* pAllocation)
7850 {
7851  VMA_ASSERT(allocator && pAllocation);
7852 
7853  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
7854 
7855  allocator->CreateLostAllocation(pAllocation);
7856 }
7857 
7858 VkResult vmaMapMemory(
7859  VmaAllocator allocator,
7860  VmaAllocation allocation,
7861  void** ppData)
7862 {
7863  VMA_ASSERT(allocator && allocation && ppData);
7864 
7865  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7866 
7867  return (*allocator->GetVulkanFunctions().vkMapMemory)(
7868  allocator->m_hDevice,
7869  allocation->GetMemory(),
7870  allocation->GetOffset(),
7871  allocation->GetSize(),
7872  0,
7873  ppData);
7874 }
7875 
7876 void vmaUnmapMemory(
7877  VmaAllocator allocator,
7878  VmaAllocation allocation)
7879 {
7880  VMA_ASSERT(allocator && allocation);
7881 
7882  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7883 
7884  (*allocator->GetVulkanFunctions().vkUnmapMemory)(allocator->m_hDevice, allocation->GetMemory());
7885 }
7886 
7887 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator)
7888 {
7889  VMA_ASSERT(allocator);
7890 
7891  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7892 
7893  allocator->UnmapPersistentlyMappedMemory();
7894 }
7895 
7896 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator)
7897 {
7898  VMA_ASSERT(allocator);
7899 
7900  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7901 
7902  return allocator->MapPersistentlyMappedMemory();
7903 }
7904 
7905 VkResult vmaDefragment(
7906  VmaAllocator allocator,
7907  VmaAllocation* pAllocations,
7908  size_t allocationCount,
7909  VkBool32* pAllocationsChanged,
7910  const VmaDefragmentationInfo *pDefragmentationInfo,
7911  VmaDefragmentationStats* pDefragmentationStats)
7912 {
7913  VMA_ASSERT(allocator && pAllocations);
7914 
7915  VMA_DEBUG_LOG("vmaDefragment");
7916 
7917  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7918 
7919  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
7920 }
7921 
7922 VkResult vmaCreateBuffer(
7923  VmaAllocator allocator,
7924  const VkBufferCreateInfo* pBufferCreateInfo,
7925  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7926  VkBuffer* pBuffer,
7927  VmaAllocation* pAllocation,
7928  VmaAllocationInfo* pAllocationInfo)
7929 {
7930  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
7931 
7932  VMA_DEBUG_LOG("vmaCreateBuffer");
7933 
7934  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7935 
7936  *pBuffer = VK_NULL_HANDLE;
7937  *pAllocation = VK_NULL_HANDLE;
7938 
7939  // 1. Create VkBuffer.
7940  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
7941  allocator->m_hDevice,
7942  pBufferCreateInfo,
7943  allocator->GetAllocationCallbacks(),
7944  pBuffer);
7945  if(res >= 0)
7946  {
7947  // 2. vkGetBufferMemoryRequirements.
7948  VkMemoryRequirements vkMemReq = {};
7949  bool requiresDedicatedAllocation = false;
7950  bool prefersDedicatedAllocation = false;
7951  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
7952  requiresDedicatedAllocation, prefersDedicatedAllocation);
7953 
7954  // 3. Allocate memory using allocator.
7955  res = allocator->AllocateMemory(
7956  vkMemReq,
7957  requiresDedicatedAllocation,
7958  prefersDedicatedAllocation,
7959  *pBuffer, // dedicatedBuffer
7960  VK_NULL_HANDLE, // dedicatedImage
7961  *pAllocationCreateInfo,
7962  VMA_SUBALLOCATION_TYPE_BUFFER,
7963  pAllocation);
7964  if(res >= 0)
7965  {
7966  // 3. Bind buffer with memory.
7967  res = (*allocator->GetVulkanFunctions().vkBindBufferMemory)(
7968  allocator->m_hDevice,
7969  *pBuffer,
7970  (*pAllocation)->GetMemory(),
7971  (*pAllocation)->GetOffset());
7972  if(res >= 0)
7973  {
7974  // All steps succeeded.
7975  if(pAllocationInfo != VMA_NULL)
7976  {
7977  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7978  }
7979  return VK_SUCCESS;
7980  }
7981  allocator->FreeMemory(*pAllocation);
7982  *pAllocation = VK_NULL_HANDLE;
7983  return res;
7984  }
7985  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
7986  *pBuffer = VK_NULL_HANDLE;
7987  return res;
7988  }
7989  return res;
7990 }
7991 
7992 void vmaDestroyBuffer(
7993  VmaAllocator allocator,
7994  VkBuffer buffer,
7995  VmaAllocation allocation)
7996 {
7997  if(buffer != VK_NULL_HANDLE)
7998  {
7999  VMA_ASSERT(allocator);
8000 
8001  VMA_DEBUG_LOG("vmaDestroyBuffer");
8002 
8003  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8004 
8005  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
8006 
8007  allocator->FreeMemory(allocation);
8008  }
8009 }
8010 
8011 VkResult vmaCreateImage(
8012  VmaAllocator allocator,
8013  const VkImageCreateInfo* pImageCreateInfo,
8014  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8015  VkImage* pImage,
8016  VmaAllocation* pAllocation,
8017  VmaAllocationInfo* pAllocationInfo)
8018 {
8019  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
8020 
8021  VMA_DEBUG_LOG("vmaCreateImage");
8022 
8023  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8024 
8025  *pImage = VK_NULL_HANDLE;
8026  *pAllocation = VK_NULL_HANDLE;
8027 
8028  // 1. Create VkImage.
8029  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
8030  allocator->m_hDevice,
8031  pImageCreateInfo,
8032  allocator->GetAllocationCallbacks(),
8033  pImage);
8034  if(res >= 0)
8035  {
8036  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
8037  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
8038  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
8039 
8040  // 2. Allocate memory using allocator.
8041  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
8042  if(res >= 0)
8043  {
8044  // 3. Bind image with memory.
8045  res = (*allocator->GetVulkanFunctions().vkBindImageMemory)(
8046  allocator->m_hDevice,
8047  *pImage,
8048  (*pAllocation)->GetMemory(),
8049  (*pAllocation)->GetOffset());
8050  if(res >= 0)
8051  {
8052  // All steps succeeded.
8053  if(pAllocationInfo != VMA_NULL)
8054  {
8055  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8056  }
8057  return VK_SUCCESS;
8058  }
8059  allocator->FreeMemory(*pAllocation);
8060  *pAllocation = VK_NULL_HANDLE;
8061  return res;
8062  }
8063  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
8064  *pImage = VK_NULL_HANDLE;
8065  return res;
8066  }
8067  return res;
8068 }
8069 
8070 void vmaDestroyImage(
8071  VmaAllocator allocator,
8072  VkImage image,
8073  VmaAllocation allocation)
8074 {
8075  if(image != VK_NULL_HANDLE)
8076  {
8077  VMA_ASSERT(allocator);
8078 
8079  VMA_DEBUG_LOG("vmaDestroyImage");
8080 
8081  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8082 
8083  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
8084 
8085  allocator->FreeMemory(allocation);
8086  }
8087 }
8088 
8089 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:486
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:703
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:511
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:496
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Memory will be used on device only, so faster access from the device is preferred. No need to be mappable on host.
Definition: vk_mem_alloc.h:677
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:490
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:962
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:508
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:1115
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:832
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:886
Definition: vk_mem_alloc.h:741
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:479
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:774
Definition: vk_mem_alloc.h:687
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks.
Definition: vk_mem_alloc.h:523
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:570
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:505
VkDeviceSize preferredSmallHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from small heaps <= 512 MB...
Definition: vk_mem_alloc.h:520
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:691
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation.
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:635
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:493
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:634
PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR
Definition: vk_mem_alloc.h:501
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:1119
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:540
VmaStatInfo total
Definition: vk_mem_alloc.h:644
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:1127
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:757
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1110
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:494
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:415
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:514
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:840
Definition: vk_mem_alloc.h:834
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:972
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:491
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:776
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:856
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:892
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:477
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:843
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
VmaMemoryUsage
Definition: vk_mem_alloc.h:672
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:1105
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:1123
Memory will be used for frequent (dynamic) updates from host and reads on device (upload).
Definition: vk_mem_alloc.h:683
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:492
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:640
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:421
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:442
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:447
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:1125
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:768
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region.
Definition: vk_mem_alloc.h:902
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:487
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:623
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
Definition: vk_mem_alloc.h:851
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:434
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:748
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:636
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:438
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:846
Memory will be used for frequent writing on device and readback on host (download).
Definition: vk_mem_alloc.h:686
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:763
Definition: vk_mem_alloc.h:754
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:626
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:489
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool.
Definition: vk_mem_alloc.h:864
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory.
Definition: vk_mem_alloc.h:526
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:895
VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator)
Maps back persistently mapped memory of types that are HOST_COHERENT and DEVICE_LOCAL.
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:752
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:781
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either NULL or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:558
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:642
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:635
Definition: vk_mem_alloc.h:814
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:498
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:436
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:497
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:878
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:983
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps. ...
Definition: vk_mem_alloc.h:517
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:635
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:632
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:883
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:967
Definition: vk_mem_alloc.h:750
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1121
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:485
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR
Definition: vk_mem_alloc.h:500
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:630
No intended memory usage specified. Use other members of VmaAllocationCreateInfo to specify your requ...
Definition: vk_mem_alloc.h:675
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:836
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:628
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:495
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:499
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:714
Memory will be mapped on host. Could be used for transfer to/from device.
Definition: vk_mem_alloc.h:680
void * pMappedData
Pointer to the beginning of this allocation as mapped data. Null if this alloaction is not persistent...
Definition: vk_mem_alloc.h:978
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:475
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:488
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:948
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:730
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:805
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:636
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:643
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:889
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:636
void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator)
Unmaps persistently mapped memory of types that are HOST_COHERENT and DEVICE_LOCAL.
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:953