Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
387 #include <vulkan/vulkan.h>
388 
390 
394 VK_DEFINE_HANDLE(VmaAllocator)
395 
396 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
398  VmaAllocator allocator,
399  uint32_t memoryType,
400  VkDeviceMemory memory,
401  VkDeviceSize size);
403 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
404  VmaAllocator allocator,
405  uint32_t memoryType,
406  VkDeviceMemory memory,
407  VkDeviceSize size);
408 
414 typedef struct VmaDeviceMemoryCallbacks {
420 
422 typedef enum VmaAllocatorFlagBits {
428 
431 typedef VkFlags VmaAllocatorFlags;
432 
433 typedef struct VmaVulkanFunctions {
434  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
435  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
436  PFN_vkAllocateMemory vkAllocateMemory;
437  PFN_vkFreeMemory vkFreeMemory;
438  PFN_vkMapMemory vkMapMemory;
439  PFN_vkUnmapMemory vkUnmapMemory;
440  PFN_vkBindBufferMemory vkBindBufferMemory;
441  PFN_vkBindImageMemory vkBindImageMemory;
442  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
443  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
444  PFN_vkCreateBuffer vkCreateBuffer;
445  PFN_vkDestroyBuffer vkDestroyBuffer;
446  PFN_vkCreateImage vkCreateImage;
447  PFN_vkDestroyImage vkDestroyImage;
449 
452 {
456 
457  VkPhysicalDevice physicalDevice;
459 
460  VkDevice device;
462 
465 
468 
469  const VkAllocationCallbacks* pAllocationCallbacks;
471 
486  uint32_t frameInUseCount;
504  const VkDeviceSize* pHeapSizeLimit;
518 
520 VkResult vmaCreateAllocator(
521  const VmaAllocatorCreateInfo* pCreateInfo,
522  VmaAllocator* pAllocator);
523 
526  VmaAllocator allocator);
527 
533  VmaAllocator allocator,
534  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
535 
541  VmaAllocator allocator,
542  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
543 
551  VmaAllocator allocator,
552  uint32_t memoryTypeIndex,
553  VkMemoryPropertyFlags* pFlags);
554 
564  VmaAllocator allocator,
565  uint32_t frameIndex);
566 
567 typedef struct VmaStatInfo
568 {
570  uint32_t BlockCount;
572  uint32_t AllocationCount;
576  VkDeviceSize UsedBytes;
578  VkDeviceSize UnusedBytes;
579  VkDeviceSize AllocationSizeMin, AllocationSizeAvg, AllocationSizeMax;
580  VkDeviceSize UnusedRangeSizeMin, UnusedRangeSizeAvg, UnusedRangeSizeMax;
581 } VmaStatInfo;
582 
584 typedef struct VmaStats
585 {
586  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
587  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
589 } VmaStats;
590 
592 void vmaCalculateStats(
593  VmaAllocator allocator,
594  VmaStats* pStats);
595 
596 #define VMA_STATS_STRING_ENABLED 1
597 
598 #if VMA_STATS_STRING_ENABLED
599 
601 
604  VmaAllocator allocator,
605  char** ppStatsString,
606  VkBool32 detailedMap);
607 
608 void vmaFreeStatsString(
609  VmaAllocator allocator,
610  char* pStatsString);
611 
612 #endif // #if VMA_STATS_STRING_ENABLED
613 
616 
621 VK_DEFINE_HANDLE(VmaPool)
622 
623 typedef enum VmaMemoryUsage
624 {
630 
633 
636 
640 
655 
694 
697 typedef VkFlags VmaAllocationCreateFlags;
698 
700 {
713  VkMemoryPropertyFlags requiredFlags;
719  VkMemoryPropertyFlags preferredFlags;
721  void* pUserData;
726  VmaPool pool;
728 
743 VkResult vmaFindMemoryTypeIndex(
744  VmaAllocator allocator,
745  uint32_t memoryTypeBits,
746  const VmaAllocationCreateInfo* pAllocationCreateInfo,
747  uint32_t* pMemoryTypeIndex);
748 
751 
756 typedef enum VmaPoolCreateFlagBits {
785 
788 typedef VkFlags VmaPoolCreateFlags;
789 
792 typedef struct VmaPoolCreateInfo {
795  uint32_t memoryTypeIndex;
803  VkDeviceSize blockSize;
830  uint32_t frameInUseCount;
832 
835 typedef struct VmaPoolStats {
838  VkDeviceSize size;
841  VkDeviceSize unusedSize;
854  VkDeviceSize unusedRangeSizeMax;
855 } VmaPoolStats;
856 
863 VkResult vmaCreatePool(
864  VmaAllocator allocator,
865  const VmaPoolCreateInfo* pCreateInfo,
866  VmaPool* pPool);
867 
870 void vmaDestroyPool(
871  VmaAllocator allocator,
872  VmaPool pool);
873 
880 void vmaGetPoolStats(
881  VmaAllocator allocator,
882  VmaPool pool,
883  VmaPoolStats* pPoolStats);
884 
892  VmaAllocator allocator,
893  VmaPool pool,
894  size_t* pLostAllocationCount);
895 
896 VK_DEFINE_HANDLE(VmaAllocation)
897 
898 
900 typedef struct VmaAllocationInfo {
905  uint32_t memoryType;
914  VkDeviceMemory deviceMemory;
919  VkDeviceSize offset;
924  VkDeviceSize size;
930  void* pMappedData;
935  void* pUserData;
937 
948 VkResult vmaAllocateMemory(
949  VmaAllocator allocator,
950  const VkMemoryRequirements* pVkMemoryRequirements,
951  const VmaAllocationCreateInfo* pCreateInfo,
952  VmaAllocation* pAllocation,
953  VmaAllocationInfo* pAllocationInfo);
954 
962  VmaAllocator allocator,
963  VkBuffer buffer,
964  const VmaAllocationCreateInfo* pCreateInfo,
965  VmaAllocation* pAllocation,
966  VmaAllocationInfo* pAllocationInfo);
967 
970  VmaAllocator allocator,
971  VkImage image,
972  const VmaAllocationCreateInfo* pCreateInfo,
973  VmaAllocation* pAllocation,
974  VmaAllocationInfo* pAllocationInfo);
975 
977 void vmaFreeMemory(
978  VmaAllocator allocator,
979  VmaAllocation allocation);
980 
983  VmaAllocator allocator,
984  VmaAllocation allocation,
985  VmaAllocationInfo* pAllocationInfo);
986 
989  VmaAllocator allocator,
990  VmaAllocation allocation,
991  void* pUserData);
992 
1004  VmaAllocator allocator,
1005  VmaAllocation* pAllocation);
1006 
1015 VkResult vmaMapMemory(
1016  VmaAllocator allocator,
1017  VmaAllocation allocation,
1018  void** ppData);
1019 
1020 void vmaUnmapMemory(
1021  VmaAllocator allocator,
1022  VmaAllocation allocation);
1023 
1045 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator);
1046 
1054 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator);
1055 
1057 typedef struct VmaDefragmentationInfo {
1062  VkDeviceSize maxBytesToMove;
1069 
1071 typedef struct VmaDefragmentationStats {
1073  VkDeviceSize bytesMoved;
1075  VkDeviceSize bytesFreed;
1081 
1152 VkResult vmaDefragment(
1153  VmaAllocator allocator,
1154  VmaAllocation* pAllocations,
1155  size_t allocationCount,
1156  VkBool32* pAllocationsChanged,
1157  const VmaDefragmentationInfo *pDefragmentationInfo,
1158  VmaDefragmentationStats* pDefragmentationStats);
1159 
1162 
1185 VkResult vmaCreateBuffer(
1186  VmaAllocator allocator,
1187  const VkBufferCreateInfo* pBufferCreateInfo,
1188  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1189  VkBuffer* pBuffer,
1190  VmaAllocation* pAllocation,
1191  VmaAllocationInfo* pAllocationInfo);
1192 
1201 void vmaDestroyBuffer(
1202  VmaAllocator allocator,
1203  VkBuffer buffer,
1204  VmaAllocation allocation);
1205 
1207 VkResult vmaCreateImage(
1208  VmaAllocator allocator,
1209  const VkImageCreateInfo* pImageCreateInfo,
1210  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1211  VkImage* pImage,
1212  VmaAllocation* pAllocation,
1213  VmaAllocationInfo* pAllocationInfo);
1214 
1223 void vmaDestroyImage(
1224  VmaAllocator allocator,
1225  VkImage image,
1226  VmaAllocation allocation);
1227 
1230 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
1231 
1232 // For Visual Studio IntelliSense.
1233 #ifdef __INTELLISENSE__
1234 #define VMA_IMPLEMENTATION
1235 #endif
1236 
1237 #ifdef VMA_IMPLEMENTATION
1238 #undef VMA_IMPLEMENTATION
1239 
1240 #include <cstdint>
1241 #include <cstdlib>
1242 #include <cstring>
1243 
1244 /*******************************************************************************
1245 CONFIGURATION SECTION
1246 
1247 Define some of these macros before each #include of this header or change them
1248 here if you need other then default behavior depending on your environment.
1249 */
1250 
1251 /*
1252 Define this macro to 1 to make the library fetch pointers to Vulkan functions
1253 internally, like:
1254 
1255  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
1256 
1257 Define to 0 if you are going to provide you own pointers to Vulkan functions via
1258 VmaAllocatorCreateInfo::pVulkanFunctions.
1259 */
1260 #ifndef VMA_STATIC_VULKAN_FUNCTIONS
1261 #define VMA_STATIC_VULKAN_FUNCTIONS 1
1262 #endif
1263 
1264 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
1265 //#define VMA_USE_STL_CONTAINERS 1
1266 
1267 /* Set this macro to 1 to make the library including and using STL containers:
1268 std::pair, std::vector, std::list, std::unordered_map.
1269 
1270 Set it to 0 or undefined to make the library using its own implementation of
1271 the containers.
1272 */
1273 #if VMA_USE_STL_CONTAINERS
1274  #define VMA_USE_STL_VECTOR 1
1275  #define VMA_USE_STL_UNORDERED_MAP 1
1276  #define VMA_USE_STL_LIST 1
1277 #endif
1278 
1279 #if VMA_USE_STL_VECTOR
1280  #include <vector>
1281 #endif
1282 
1283 #if VMA_USE_STL_UNORDERED_MAP
1284  #include <unordered_map>
1285 #endif
1286 
1287 #if VMA_USE_STL_LIST
1288  #include <list>
1289 #endif
1290 
1291 /*
1292 Following headers are used in this CONFIGURATION section only, so feel free to
1293 remove them if not needed.
1294 */
1295 #include <cassert> // for assert
1296 #include <algorithm> // for min, max
1297 #include <mutex> // for std::mutex
1298 #include <atomic> // for std::atomic
1299 
1300 #if !defined(_WIN32)
1301  #include <malloc.h> // for aligned_alloc()
1302 #endif
1303 
1304 // Normal assert to check for programmer's errors, especially in Debug configuration.
1305 #ifndef VMA_ASSERT
1306  #ifdef _DEBUG
1307  #define VMA_ASSERT(expr) assert(expr)
1308  #else
1309  #define VMA_ASSERT(expr)
1310  #endif
1311 #endif
1312 
1313 // Assert that will be called very often, like inside data structures e.g. operator[].
1314 // Making it non-empty can make program slow.
1315 #ifndef VMA_HEAVY_ASSERT
1316  #ifdef _DEBUG
1317  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
1318  #else
1319  #define VMA_HEAVY_ASSERT(expr)
1320  #endif
1321 #endif
1322 
1323 #ifndef VMA_NULL
1324  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
1325  #define VMA_NULL nullptr
1326 #endif
1327 
1328 #ifndef VMA_ALIGN_OF
1329  #define VMA_ALIGN_OF(type) (__alignof(type))
1330 #endif
1331 
1332 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
1333  #if defined(_WIN32)
1334  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
1335  #else
1336  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
1337  #endif
1338 #endif
1339 
1340 #ifndef VMA_SYSTEM_FREE
1341  #if defined(_WIN32)
1342  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
1343  #else
1344  #define VMA_SYSTEM_FREE(ptr) free(ptr)
1345  #endif
1346 #endif
1347 
1348 #ifndef VMA_MIN
1349  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
1350 #endif
1351 
1352 #ifndef VMA_MAX
1353  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
1354 #endif
1355 
1356 #ifndef VMA_SWAP
1357  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
1358 #endif
1359 
1360 #ifndef VMA_SORT
1361  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
1362 #endif
1363 
1364 #ifndef VMA_DEBUG_LOG
1365  #define VMA_DEBUG_LOG(format, ...)
1366  /*
1367  #define VMA_DEBUG_LOG(format, ...) do { \
1368  printf(format, __VA_ARGS__); \
1369  printf("\n"); \
1370  } while(false)
1371  */
1372 #endif
1373 
1374 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
1375 #if VMA_STATS_STRING_ENABLED
1376  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
1377  {
1378  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
1379  }
1380  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
1381  {
1382  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
1383  }
1384  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
1385  {
1386  snprintf(outStr, strLen, "%p", ptr);
1387  }
1388 #endif
1389 
1390 #ifndef VMA_MUTEX
1391  class VmaMutex
1392  {
1393  public:
1394  VmaMutex() { }
1395  ~VmaMutex() { }
1396  void Lock() { m_Mutex.lock(); }
1397  void Unlock() { m_Mutex.unlock(); }
1398  private:
1399  std::mutex m_Mutex;
1400  };
1401  #define VMA_MUTEX VmaMutex
1402 #endif
1403 
1404 /*
1405 If providing your own implementation, you need to implement a subset of std::atomic:
1406 
1407 - Constructor(uint32_t desired)
1408 - uint32_t load() const
1409 - void store(uint32_t desired)
1410 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
1411 */
1412 #ifndef VMA_ATOMIC_UINT32
1413  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
1414 #endif
1415 
1416 #ifndef VMA_BEST_FIT
1417 
1429  #define VMA_BEST_FIT (1)
1430 #endif
1431 
1432 #ifndef VMA_DEBUG_ALWAYS_OWN_MEMORY
1433 
1437  #define VMA_DEBUG_ALWAYS_OWN_MEMORY (0)
1438 #endif
1439 
1440 #ifndef VMA_DEBUG_ALIGNMENT
1441 
1445  #define VMA_DEBUG_ALIGNMENT (1)
1446 #endif
1447 
1448 #ifndef VMA_DEBUG_MARGIN
1449 
1453  #define VMA_DEBUG_MARGIN (0)
1454 #endif
1455 
1456 #ifndef VMA_DEBUG_GLOBAL_MUTEX
1457 
1461  #define VMA_DEBUG_GLOBAL_MUTEX (0)
1462 #endif
1463 
1464 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
1465 
1469  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
1470 #endif
1471 
1472 #ifndef VMA_SMALL_HEAP_MAX_SIZE
1473  #define VMA_SMALL_HEAP_MAX_SIZE (512 * 1024 * 1024)
1475 #endif
1476 
1477 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
1478  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256 * 1024 * 1024)
1480 #endif
1481 
1482 #ifndef VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE
1483  #define VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE (64 * 1024 * 1024)
1485 #endif
1486 
1487 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
1488 
1489 /*******************************************************************************
1490 END OF CONFIGURATION
1491 */
1492 
1493 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
1494  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
1495 
1496 // Returns number of bits set to 1 in (v).
1497 static inline uint32_t CountBitsSet(uint32_t v)
1498 {
1499  uint32_t c = v - ((v >> 1) & 0x55555555);
1500  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
1501  c = ((c >> 4) + c) & 0x0F0F0F0F;
1502  c = ((c >> 8) + c) & 0x00FF00FF;
1503  c = ((c >> 16) + c) & 0x0000FFFF;
1504  return c;
1505 }
1506 
1507 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
1508 // Use types like uint32_t, uint64_t as T.
1509 template <typename T>
1510 static inline T VmaAlignUp(T val, T align)
1511 {
1512  return (val + align - 1) / align * align;
1513 }
1514 
1515 // Division with mathematical rounding to nearest number.
1516 template <typename T>
1517 inline T VmaRoundDiv(T x, T y)
1518 {
1519  return (x + (y / (T)2)) / y;
1520 }
1521 
1522 #ifndef VMA_SORT
1523 
1524 template<typename Iterator, typename Compare>
1525 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
1526 {
1527  Iterator centerValue = end; --centerValue;
1528  Iterator insertIndex = beg;
1529  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
1530  {
1531  if(cmp(*memTypeIndex, *centerValue))
1532  {
1533  if(insertIndex != memTypeIndex)
1534  {
1535  VMA_SWAP(*memTypeIndex, *insertIndex);
1536  }
1537  ++insertIndex;
1538  }
1539  }
1540  if(insertIndex != centerValue)
1541  {
1542  VMA_SWAP(*insertIndex, *centerValue);
1543  }
1544  return insertIndex;
1545 }
1546 
1547 template<typename Iterator, typename Compare>
1548 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
1549 {
1550  if(beg < end)
1551  {
1552  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
1553  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
1554  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
1555  }
1556 }
1557 
1558 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
1559 
1560 #endif // #ifndef VMA_SORT
1561 
1562 /*
1563 Returns true if two memory blocks occupy overlapping pages.
1564 ResourceA must be in less memory offset than ResourceB.
1565 
1566 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
1567 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
1568 */
1569 static inline bool VmaBlocksOnSamePage(
1570  VkDeviceSize resourceAOffset,
1571  VkDeviceSize resourceASize,
1572  VkDeviceSize resourceBOffset,
1573  VkDeviceSize pageSize)
1574 {
1575  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
1576  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
1577  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
1578  VkDeviceSize resourceBStart = resourceBOffset;
1579  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
1580  return resourceAEndPage == resourceBStartPage;
1581 }
1582 
1583 enum VmaSuballocationType
1584 {
1585  VMA_SUBALLOCATION_TYPE_FREE = 0,
1586  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
1587  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
1588  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
1589  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
1590  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
1591  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
1592 };
1593 
1594 /*
1595 Returns true if given suballocation types could conflict and must respect
1596 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
1597 or linear image and another one is optimal image. If type is unknown, behave
1598 conservatively.
1599 */
1600 static inline bool VmaIsBufferImageGranularityConflict(
1601  VmaSuballocationType suballocType1,
1602  VmaSuballocationType suballocType2)
1603 {
1604  if(suballocType1 > suballocType2)
1605  {
1606  VMA_SWAP(suballocType1, suballocType2);
1607  }
1608 
1609  switch(suballocType1)
1610  {
1611  case VMA_SUBALLOCATION_TYPE_FREE:
1612  return false;
1613  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
1614  return true;
1615  case VMA_SUBALLOCATION_TYPE_BUFFER:
1616  return
1617  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
1618  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1619  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
1620  return
1621  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
1622  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
1623  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1624  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
1625  return
1626  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1627  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
1628  return false;
1629  default:
1630  VMA_ASSERT(0);
1631  return true;
1632  }
1633 }
1634 
1635 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
1636 struct VmaMutexLock
1637 {
1638 public:
1639  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
1640  m_pMutex(useMutex ? &mutex : VMA_NULL)
1641  {
1642  if(m_pMutex)
1643  {
1644  m_pMutex->Lock();
1645  }
1646  }
1647 
1648  ~VmaMutexLock()
1649  {
1650  if(m_pMutex)
1651  {
1652  m_pMutex->Unlock();
1653  }
1654  }
1655 
1656 private:
1657  VMA_MUTEX* m_pMutex;
1658 };
1659 
1660 #if VMA_DEBUG_GLOBAL_MUTEX
1661  static VMA_MUTEX gDebugGlobalMutex;
1662  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
1663 #else
1664  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
1665 #endif
1666 
1667 // Minimum size of a free suballocation to register it in the free suballocation collection.
1668 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
1669 
1670 /*
1671 Performs binary search and returns iterator to first element that is greater or
1672 equal to (key), according to comparison (cmp).
1673 
1674 Cmp should return true if first argument is less than second argument.
1675 
1676 Returned value is the found element, if present in the collection or place where
1677 new element with value (key) should be inserted.
1678 */
1679 template <typename IterT, typename KeyT, typename CmpT>
1680 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
1681 {
1682  size_t down = 0, up = (end - beg);
1683  while(down < up)
1684  {
1685  const size_t mid = (down + up) / 2;
1686  if(cmp(*(beg+mid), key))
1687  {
1688  down = mid + 1;
1689  }
1690  else
1691  {
1692  up = mid;
1693  }
1694  }
1695  return beg + down;
1696 }
1697 
1699 // Memory allocation
1700 
1701 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
1702 {
1703  if((pAllocationCallbacks != VMA_NULL) &&
1704  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
1705  {
1706  return (*pAllocationCallbacks->pfnAllocation)(
1707  pAllocationCallbacks->pUserData,
1708  size,
1709  alignment,
1710  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1711  }
1712  else
1713  {
1714  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
1715  }
1716 }
1717 
1718 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
1719 {
1720  if((pAllocationCallbacks != VMA_NULL) &&
1721  (pAllocationCallbacks->pfnFree != VMA_NULL))
1722  {
1723  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
1724  }
1725  else
1726  {
1727  VMA_SYSTEM_FREE(ptr);
1728  }
1729 }
1730 
1731 template<typename T>
1732 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
1733 {
1734  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
1735 }
1736 
1737 template<typename T>
1738 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
1739 {
1740  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
1741 }
1742 
1743 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
1744 
1745 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
1746 
1747 template<typename T>
1748 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
1749 {
1750  ptr->~T();
1751  VmaFree(pAllocationCallbacks, ptr);
1752 }
1753 
1754 template<typename T>
1755 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
1756 {
1757  if(ptr != VMA_NULL)
1758  {
1759  for(size_t i = count; i--; )
1760  {
1761  ptr[i].~T();
1762  }
1763  VmaFree(pAllocationCallbacks, ptr);
1764  }
1765 }
1766 
1767 // STL-compatible allocator.
1768 template<typename T>
1769 class VmaStlAllocator
1770 {
1771 public:
1772  const VkAllocationCallbacks* const m_pCallbacks;
1773  typedef T value_type;
1774 
1775  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
1776  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
1777 
1778  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
1779  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
1780 
1781  template<typename U>
1782  bool operator==(const VmaStlAllocator<U>& rhs) const
1783  {
1784  return m_pCallbacks == rhs.m_pCallbacks;
1785  }
1786  template<typename U>
1787  bool operator!=(const VmaStlAllocator<U>& rhs) const
1788  {
1789  return m_pCallbacks != rhs.m_pCallbacks;
1790  }
1791 
1792  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
1793 };
1794 
1795 #if VMA_USE_STL_VECTOR
1796 
1797 #define VmaVector std::vector
1798 
1799 template<typename T, typename allocatorT>
1800 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
1801 {
1802  vec.insert(vec.begin() + index, item);
1803 }
1804 
1805 template<typename T, typename allocatorT>
1806 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
1807 {
1808  vec.erase(vec.begin() + index);
1809 }
1810 
1811 #else // #if VMA_USE_STL_VECTOR
1812 
1813 /* Class with interface compatible with subset of std::vector.
1814 T must be POD because constructors and destructors are not called and memcpy is
1815 used for these objects. */
1816 template<typename T, typename AllocatorT>
1817 class VmaVector
1818 {
1819 public:
1820  typedef T value_type;
1821 
1822  VmaVector(const AllocatorT& allocator) :
1823  m_Allocator(allocator),
1824  m_pArray(VMA_NULL),
1825  m_Count(0),
1826  m_Capacity(0)
1827  {
1828  }
1829 
1830  VmaVector(size_t count, const AllocatorT& allocator) :
1831  m_Allocator(allocator),
1832  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
1833  m_Count(count),
1834  m_Capacity(count)
1835  {
1836  }
1837 
1838  VmaVector(const VmaVector<T, AllocatorT>& src) :
1839  m_Allocator(src.m_Allocator),
1840  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
1841  m_Count(src.m_Count),
1842  m_Capacity(src.m_Count)
1843  {
1844  if(m_Count != 0)
1845  {
1846  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
1847  }
1848  }
1849 
1850  ~VmaVector()
1851  {
1852  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1853  }
1854 
1855  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
1856  {
1857  if(&rhs != this)
1858  {
1859  resize(rhs.m_Count);
1860  if(m_Count != 0)
1861  {
1862  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
1863  }
1864  }
1865  return *this;
1866  }
1867 
1868  bool empty() const { return m_Count == 0; }
1869  size_t size() const { return m_Count; }
1870  T* data() { return m_pArray; }
1871  const T* data() const { return m_pArray; }
1872 
1873  T& operator[](size_t index)
1874  {
1875  VMA_HEAVY_ASSERT(index < m_Count);
1876  return m_pArray[index];
1877  }
1878  const T& operator[](size_t index) const
1879  {
1880  VMA_HEAVY_ASSERT(index < m_Count);
1881  return m_pArray[index];
1882  }
1883 
1884  T& front()
1885  {
1886  VMA_HEAVY_ASSERT(m_Count > 0);
1887  return m_pArray[0];
1888  }
1889  const T& front() const
1890  {
1891  VMA_HEAVY_ASSERT(m_Count > 0);
1892  return m_pArray[0];
1893  }
1894  T& back()
1895  {
1896  VMA_HEAVY_ASSERT(m_Count > 0);
1897  return m_pArray[m_Count - 1];
1898  }
1899  const T& back() const
1900  {
1901  VMA_HEAVY_ASSERT(m_Count > 0);
1902  return m_pArray[m_Count - 1];
1903  }
1904 
1905  void reserve(size_t newCapacity, bool freeMemory = false)
1906  {
1907  newCapacity = VMA_MAX(newCapacity, m_Count);
1908 
1909  if((newCapacity < m_Capacity) && !freeMemory)
1910  {
1911  newCapacity = m_Capacity;
1912  }
1913 
1914  if(newCapacity != m_Capacity)
1915  {
1916  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
1917  if(m_Count != 0)
1918  {
1919  memcpy(newArray, m_pArray, m_Count * sizeof(T));
1920  }
1921  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1922  m_Capacity = newCapacity;
1923  m_pArray = newArray;
1924  }
1925  }
1926 
1927  void resize(size_t newCount, bool freeMemory = false)
1928  {
1929  size_t newCapacity = m_Capacity;
1930  if(newCount > m_Capacity)
1931  {
1932  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
1933  }
1934  else if(freeMemory)
1935  {
1936  newCapacity = newCount;
1937  }
1938 
1939  if(newCapacity != m_Capacity)
1940  {
1941  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
1942  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
1943  if(elementsToCopy != 0)
1944  {
1945  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
1946  }
1947  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1948  m_Capacity = newCapacity;
1949  m_pArray = newArray;
1950  }
1951 
1952  m_Count = newCount;
1953  }
1954 
1955  void clear(bool freeMemory = false)
1956  {
1957  resize(0, freeMemory);
1958  }
1959 
1960  void insert(size_t index, const T& src)
1961  {
1962  VMA_HEAVY_ASSERT(index <= m_Count);
1963  const size_t oldCount = size();
1964  resize(oldCount + 1);
1965  if(index < oldCount)
1966  {
1967  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
1968  }
1969  m_pArray[index] = src;
1970  }
1971 
1972  void remove(size_t index)
1973  {
1974  VMA_HEAVY_ASSERT(index < m_Count);
1975  const size_t oldCount = size();
1976  if(index < oldCount - 1)
1977  {
1978  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
1979  }
1980  resize(oldCount - 1);
1981  }
1982 
1983  void push_back(const T& src)
1984  {
1985  const size_t newIndex = size();
1986  resize(newIndex + 1);
1987  m_pArray[newIndex] = src;
1988  }
1989 
1990  void pop_back()
1991  {
1992  VMA_HEAVY_ASSERT(m_Count > 0);
1993  resize(size() - 1);
1994  }
1995 
1996  void push_front(const T& src)
1997  {
1998  insert(0, src);
1999  }
2000 
2001  void pop_front()
2002  {
2003  VMA_HEAVY_ASSERT(m_Count > 0);
2004  remove(0);
2005  }
2006 
2007  typedef T* iterator;
2008 
2009  iterator begin() { return m_pArray; }
2010  iterator end() { return m_pArray + m_Count; }
2011 
2012 private:
2013  AllocatorT m_Allocator;
2014  T* m_pArray;
2015  size_t m_Count;
2016  size_t m_Capacity;
2017 };
2018 
2019 template<typename T, typename allocatorT>
2020 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
2021 {
2022  vec.insert(index, item);
2023 }
2024 
2025 template<typename T, typename allocatorT>
2026 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
2027 {
2028  vec.remove(index);
2029 }
2030 
2031 #endif // #if VMA_USE_STL_VECTOR
2032 
2033 template<typename CmpLess, typename VectorT>
2034 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
2035 {
2036  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2037  vector.data(),
2038  vector.data() + vector.size(),
2039  value,
2040  CmpLess()) - vector.data();
2041  VmaVectorInsert(vector, indexToInsert, value);
2042  return indexToInsert;
2043 }
2044 
2045 template<typename CmpLess, typename VectorT>
2046 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
2047 {
2048  CmpLess comparator;
2049  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2050  vector.begin(),
2051  vector.end(),
2052  value,
2053  comparator);
2054  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
2055  {
2056  size_t indexToRemove = it - vector.begin();
2057  VmaVectorRemove(vector, indexToRemove);
2058  return true;
2059  }
2060  return false;
2061 }
2062 
2063 template<typename CmpLess, typename VectorT>
2064 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
2065 {
2066  CmpLess comparator;
2067  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2068  vector.data(),
2069  vector.data() + vector.size(),
2070  value,
2071  comparator);
2072  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
2073  {
2074  return it - vector.begin();
2075  }
2076  else
2077  {
2078  return vector.size();
2079  }
2080 }
2081 
2083 // class VmaPoolAllocator
2084 
2085 /*
2086 Allocator for objects of type T using a list of arrays (pools) to speed up
2087 allocation. Number of elements that can be allocated is not bounded because
2088 allocator can create multiple blocks.
2089 */
2090 template<typename T>
2091 class VmaPoolAllocator
2092 {
2093 public:
2094  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
2095  ~VmaPoolAllocator();
2096  void Clear();
2097  T* Alloc();
2098  void Free(T* ptr);
2099 
2100 private:
2101  union Item
2102  {
2103  uint32_t NextFreeIndex;
2104  T Value;
2105  };
2106 
2107  struct ItemBlock
2108  {
2109  Item* pItems;
2110  uint32_t FirstFreeIndex;
2111  };
2112 
2113  const VkAllocationCallbacks* m_pAllocationCallbacks;
2114  size_t m_ItemsPerBlock;
2115  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
2116 
2117  ItemBlock& CreateNewBlock();
2118 };
2119 
2120 template<typename T>
2121 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
2122  m_pAllocationCallbacks(pAllocationCallbacks),
2123  m_ItemsPerBlock(itemsPerBlock),
2124  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
2125 {
2126  VMA_ASSERT(itemsPerBlock > 0);
2127 }
2128 
2129 template<typename T>
2130 VmaPoolAllocator<T>::~VmaPoolAllocator()
2131 {
2132  Clear();
2133 }
2134 
2135 template<typename T>
2136 void VmaPoolAllocator<T>::Clear()
2137 {
2138  for(size_t i = m_ItemBlocks.size(); i--; )
2139  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
2140  m_ItemBlocks.clear();
2141 }
2142 
2143 template<typename T>
2144 T* VmaPoolAllocator<T>::Alloc()
2145 {
2146  for(size_t i = m_ItemBlocks.size(); i--; )
2147  {
2148  ItemBlock& block = m_ItemBlocks[i];
2149  // This block has some free items: Use first one.
2150  if(block.FirstFreeIndex != UINT32_MAX)
2151  {
2152  Item* const pItem = &block.pItems[block.FirstFreeIndex];
2153  block.FirstFreeIndex = pItem->NextFreeIndex;
2154  return &pItem->Value;
2155  }
2156  }
2157 
2158  // No block has free item: Create new one and use it.
2159  ItemBlock& newBlock = CreateNewBlock();
2160  Item* const pItem = &newBlock.pItems[0];
2161  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
2162  return &pItem->Value;
2163 }
2164 
2165 template<typename T>
2166 void VmaPoolAllocator<T>::Free(T* ptr)
2167 {
2168  // Search all memory blocks to find ptr.
2169  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
2170  {
2171  ItemBlock& block = m_ItemBlocks[i];
2172 
2173  // Casting to union.
2174  Item* pItemPtr;
2175  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
2176 
2177  // Check if pItemPtr is in address range of this block.
2178  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
2179  {
2180  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
2181  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
2182  block.FirstFreeIndex = index;
2183  return;
2184  }
2185  }
2186  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
2187 }
2188 
2189 template<typename T>
2190 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
2191 {
2192  ItemBlock newBlock = {
2193  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
2194 
2195  m_ItemBlocks.push_back(newBlock);
2196 
2197  // Setup singly-linked list of all free items in this block.
2198  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
2199  newBlock.pItems[i].NextFreeIndex = i + 1;
2200  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
2201  return m_ItemBlocks.back();
2202 }
2203 
2205 // class VmaRawList, VmaList
2206 
2207 #if VMA_USE_STL_LIST
2208 
2209 #define VmaList std::list
2210 
2211 #else // #if VMA_USE_STL_LIST
2212 
2213 template<typename T>
2214 struct VmaListItem
2215 {
2216  VmaListItem* pPrev;
2217  VmaListItem* pNext;
2218  T Value;
2219 };
2220 
2221 // Doubly linked list.
2222 template<typename T>
2223 class VmaRawList
2224 {
2225 public:
2226  typedef VmaListItem<T> ItemType;
2227 
2228  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
2229  ~VmaRawList();
2230  void Clear();
2231 
2232  size_t GetCount() const { return m_Count; }
2233  bool IsEmpty() const { return m_Count == 0; }
2234 
2235  ItemType* Front() { return m_pFront; }
2236  const ItemType* Front() const { return m_pFront; }
2237  ItemType* Back() { return m_pBack; }
2238  const ItemType* Back() const { return m_pBack; }
2239 
2240  ItemType* PushBack();
2241  ItemType* PushFront();
2242  ItemType* PushBack(const T& value);
2243  ItemType* PushFront(const T& value);
2244  void PopBack();
2245  void PopFront();
2246 
2247  // Item can be null - it means PushBack.
2248  ItemType* InsertBefore(ItemType* pItem);
2249  // Item can be null - it means PushFront.
2250  ItemType* InsertAfter(ItemType* pItem);
2251 
2252  ItemType* InsertBefore(ItemType* pItem, const T& value);
2253  ItemType* InsertAfter(ItemType* pItem, const T& value);
2254 
2255  void Remove(ItemType* pItem);
2256 
2257 private:
2258  const VkAllocationCallbacks* const m_pAllocationCallbacks;
2259  VmaPoolAllocator<ItemType> m_ItemAllocator;
2260  ItemType* m_pFront;
2261  ItemType* m_pBack;
2262  size_t m_Count;
2263 
2264  // Declared not defined, to block copy constructor and assignment operator.
2265  VmaRawList(const VmaRawList<T>& src);
2266  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
2267 };
2268 
2269 template<typename T>
2270 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
2271  m_pAllocationCallbacks(pAllocationCallbacks),
2272  m_ItemAllocator(pAllocationCallbacks, 128),
2273  m_pFront(VMA_NULL),
2274  m_pBack(VMA_NULL),
2275  m_Count(0)
2276 {
2277 }
2278 
2279 template<typename T>
2280 VmaRawList<T>::~VmaRawList()
2281 {
2282  // Intentionally not calling Clear, because that would be unnecessary
2283  // computations to return all items to m_ItemAllocator as free.
2284 }
2285 
2286 template<typename T>
2287 void VmaRawList<T>::Clear()
2288 {
2289  if(IsEmpty() == false)
2290  {
2291  ItemType* pItem = m_pBack;
2292  while(pItem != VMA_NULL)
2293  {
2294  ItemType* const pPrevItem = pItem->pPrev;
2295  m_ItemAllocator.Free(pItem);
2296  pItem = pPrevItem;
2297  }
2298  m_pFront = VMA_NULL;
2299  m_pBack = VMA_NULL;
2300  m_Count = 0;
2301  }
2302 }
2303 
2304 template<typename T>
2305 VmaListItem<T>* VmaRawList<T>::PushBack()
2306 {
2307  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2308  pNewItem->pNext = VMA_NULL;
2309  if(IsEmpty())
2310  {
2311  pNewItem->pPrev = VMA_NULL;
2312  m_pFront = pNewItem;
2313  m_pBack = pNewItem;
2314  m_Count = 1;
2315  }
2316  else
2317  {
2318  pNewItem->pPrev = m_pBack;
2319  m_pBack->pNext = pNewItem;
2320  m_pBack = pNewItem;
2321  ++m_Count;
2322  }
2323  return pNewItem;
2324 }
2325 
2326 template<typename T>
2327 VmaListItem<T>* VmaRawList<T>::PushFront()
2328 {
2329  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2330  pNewItem->pPrev = VMA_NULL;
2331  if(IsEmpty())
2332  {
2333  pNewItem->pNext = VMA_NULL;
2334  m_pFront = pNewItem;
2335  m_pBack = pNewItem;
2336  m_Count = 1;
2337  }
2338  else
2339  {
2340  pNewItem->pNext = m_pFront;
2341  m_pFront->pPrev = pNewItem;
2342  m_pFront = pNewItem;
2343  ++m_Count;
2344  }
2345  return pNewItem;
2346 }
2347 
2348 template<typename T>
2349 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
2350 {
2351  ItemType* const pNewItem = PushBack();
2352  pNewItem->Value = value;
2353  return pNewItem;
2354 }
2355 
2356 template<typename T>
2357 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
2358 {
2359  ItemType* const pNewItem = PushFront();
2360  pNewItem->Value = value;
2361  return pNewItem;
2362 }
2363 
2364 template<typename T>
2365 void VmaRawList<T>::PopBack()
2366 {
2367  VMA_HEAVY_ASSERT(m_Count > 0);
2368  ItemType* const pBackItem = m_pBack;
2369  ItemType* const pPrevItem = pBackItem->pPrev;
2370  if(pPrevItem != VMA_NULL)
2371  {
2372  pPrevItem->pNext = VMA_NULL;
2373  }
2374  m_pBack = pPrevItem;
2375  m_ItemAllocator.Free(pBackItem);
2376  --m_Count;
2377 }
2378 
2379 template<typename T>
2380 void VmaRawList<T>::PopFront()
2381 {
2382  VMA_HEAVY_ASSERT(m_Count > 0);
2383  ItemType* const pFrontItem = m_pFront;
2384  ItemType* const pNextItem = pFrontItem->pNext;
2385  if(pNextItem != VMA_NULL)
2386  {
2387  pNextItem->pPrev = VMA_NULL;
2388  }
2389  m_pFront = pNextItem;
2390  m_ItemAllocator.Free(pFrontItem);
2391  --m_Count;
2392 }
2393 
2394 template<typename T>
2395 void VmaRawList<T>::Remove(ItemType* pItem)
2396 {
2397  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
2398  VMA_HEAVY_ASSERT(m_Count > 0);
2399 
2400  if(pItem->pPrev != VMA_NULL)
2401  {
2402  pItem->pPrev->pNext = pItem->pNext;
2403  }
2404  else
2405  {
2406  VMA_HEAVY_ASSERT(m_pFront == pItem);
2407  m_pFront = pItem->pNext;
2408  }
2409 
2410  if(pItem->pNext != VMA_NULL)
2411  {
2412  pItem->pNext->pPrev = pItem->pPrev;
2413  }
2414  else
2415  {
2416  VMA_HEAVY_ASSERT(m_pBack == pItem);
2417  m_pBack = pItem->pPrev;
2418  }
2419 
2420  m_ItemAllocator.Free(pItem);
2421  --m_Count;
2422 }
2423 
2424 template<typename T>
2425 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
2426 {
2427  if(pItem != VMA_NULL)
2428  {
2429  ItemType* const prevItem = pItem->pPrev;
2430  ItemType* const newItem = m_ItemAllocator.Alloc();
2431  newItem->pPrev = prevItem;
2432  newItem->pNext = pItem;
2433  pItem->pPrev = newItem;
2434  if(prevItem != VMA_NULL)
2435  {
2436  prevItem->pNext = newItem;
2437  }
2438  else
2439  {
2440  VMA_HEAVY_ASSERT(m_pFront == pItem);
2441  m_pFront = newItem;
2442  }
2443  ++m_Count;
2444  return newItem;
2445  }
2446  else
2447  return PushBack();
2448 }
2449 
2450 template<typename T>
2451 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
2452 {
2453  if(pItem != VMA_NULL)
2454  {
2455  ItemType* const nextItem = pItem->pNext;
2456  ItemType* const newItem = m_ItemAllocator.Alloc();
2457  newItem->pNext = nextItem;
2458  newItem->pPrev = pItem;
2459  pItem->pNext = newItem;
2460  if(nextItem != VMA_NULL)
2461  {
2462  nextItem->pPrev = newItem;
2463  }
2464  else
2465  {
2466  VMA_HEAVY_ASSERT(m_pBack == pItem);
2467  m_pBack = newItem;
2468  }
2469  ++m_Count;
2470  return newItem;
2471  }
2472  else
2473  return PushFront();
2474 }
2475 
2476 template<typename T>
2477 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
2478 {
2479  ItemType* const newItem = InsertBefore(pItem);
2480  newItem->Value = value;
2481  return newItem;
2482 }
2483 
2484 template<typename T>
2485 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
2486 {
2487  ItemType* const newItem = InsertAfter(pItem);
2488  newItem->Value = value;
2489  return newItem;
2490 }
2491 
2492 template<typename T, typename AllocatorT>
2493 class VmaList
2494 {
2495 public:
2496  class iterator
2497  {
2498  public:
2499  iterator() :
2500  m_pList(VMA_NULL),
2501  m_pItem(VMA_NULL)
2502  {
2503  }
2504 
2505  T& operator*() const
2506  {
2507  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2508  return m_pItem->Value;
2509  }
2510  T* operator->() const
2511  {
2512  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2513  return &m_pItem->Value;
2514  }
2515 
2516  iterator& operator++()
2517  {
2518  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2519  m_pItem = m_pItem->pNext;
2520  return *this;
2521  }
2522  iterator& operator--()
2523  {
2524  if(m_pItem != VMA_NULL)
2525  {
2526  m_pItem = m_pItem->pPrev;
2527  }
2528  else
2529  {
2530  VMA_HEAVY_ASSERT(!m_pList.IsEmpty());
2531  m_pItem = m_pList->Back();
2532  }
2533  return *this;
2534  }
2535 
2536  iterator operator++(int)
2537  {
2538  iterator result = *this;
2539  ++*this;
2540  return result;
2541  }
2542  iterator operator--(int)
2543  {
2544  iterator result = *this;
2545  --*this;
2546  return result;
2547  }
2548 
2549  bool operator==(const iterator& rhs) const
2550  {
2551  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2552  return m_pItem == rhs.m_pItem;
2553  }
2554  bool operator!=(const iterator& rhs) const
2555  {
2556  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2557  return m_pItem != rhs.m_pItem;
2558  }
2559 
2560  private:
2561  VmaRawList<T>* m_pList;
2562  VmaListItem<T>* m_pItem;
2563 
2564  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
2565  m_pList(pList),
2566  m_pItem(pItem)
2567  {
2568  }
2569 
2570  friend class VmaList<T, AllocatorT>;
2571  };
2572 
2573  class const_iterator
2574  {
2575  public:
2576  const_iterator() :
2577  m_pList(VMA_NULL),
2578  m_pItem(VMA_NULL)
2579  {
2580  }
2581 
2582  const_iterator(const iterator& src) :
2583  m_pList(src.m_pList),
2584  m_pItem(src.m_pItem)
2585  {
2586  }
2587 
2588  const T& operator*() const
2589  {
2590  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2591  return m_pItem->Value;
2592  }
2593  const T* operator->() const
2594  {
2595  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2596  return &m_pItem->Value;
2597  }
2598 
2599  const_iterator& operator++()
2600  {
2601  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2602  m_pItem = m_pItem->pNext;
2603  return *this;
2604  }
2605  const_iterator& operator--()
2606  {
2607  if(m_pItem != VMA_NULL)
2608  {
2609  m_pItem = m_pItem->pPrev;
2610  }
2611  else
2612  {
2613  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
2614  m_pItem = m_pList->Back();
2615  }
2616  return *this;
2617  }
2618 
2619  const_iterator operator++(int)
2620  {
2621  const_iterator result = *this;
2622  ++*this;
2623  return result;
2624  }
2625  const_iterator operator--(int)
2626  {
2627  const_iterator result = *this;
2628  --*this;
2629  return result;
2630  }
2631 
2632  bool operator==(const const_iterator& rhs) const
2633  {
2634  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2635  return m_pItem == rhs.m_pItem;
2636  }
2637  bool operator!=(const const_iterator& rhs) const
2638  {
2639  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2640  return m_pItem != rhs.m_pItem;
2641  }
2642 
2643  private:
2644  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
2645  m_pList(pList),
2646  m_pItem(pItem)
2647  {
2648  }
2649 
2650  const VmaRawList<T>* m_pList;
2651  const VmaListItem<T>* m_pItem;
2652 
2653  friend class VmaList<T, AllocatorT>;
2654  };
2655 
2656  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
2657 
2658  bool empty() const { return m_RawList.IsEmpty(); }
2659  size_t size() const { return m_RawList.GetCount(); }
2660 
2661  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
2662  iterator end() { return iterator(&m_RawList, VMA_NULL); }
2663 
2664  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
2665  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
2666 
2667  void clear() { m_RawList.Clear(); }
2668  void push_back(const T& value) { m_RawList.PushBack(value); }
2669  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
2670  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
2671 
2672 private:
2673  VmaRawList<T> m_RawList;
2674 };
2675 
2676 #endif // #if VMA_USE_STL_LIST
2677 
2679 // class VmaMap
2680 
2681 // Unused in this version.
2682 #if 0
2683 
2684 #if VMA_USE_STL_UNORDERED_MAP
2685 
2686 #define VmaPair std::pair
2687 
2688 #define VMA_MAP_TYPE(KeyT, ValueT) \
2689  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
2690 
2691 #else // #if VMA_USE_STL_UNORDERED_MAP
2692 
2693 template<typename T1, typename T2>
2694 struct VmaPair
2695 {
2696  T1 first;
2697  T2 second;
2698 
2699  VmaPair() : first(), second() { }
2700  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
2701 };
2702 
2703 /* Class compatible with subset of interface of std::unordered_map.
2704 KeyT, ValueT must be POD because they will be stored in VmaVector.
2705 */
2706 template<typename KeyT, typename ValueT>
2707 class VmaMap
2708 {
2709 public:
2710  typedef VmaPair<KeyT, ValueT> PairType;
2711  typedef PairType* iterator;
2712 
2713  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
2714 
2715  iterator begin() { return m_Vector.begin(); }
2716  iterator end() { return m_Vector.end(); }
2717 
2718  void insert(const PairType& pair);
2719  iterator find(const KeyT& key);
2720  void erase(iterator it);
2721 
2722 private:
2723  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
2724 };
2725 
2726 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
2727 
2728 template<typename FirstT, typename SecondT>
2729 struct VmaPairFirstLess
2730 {
2731  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
2732  {
2733  return lhs.first < rhs.first;
2734  }
2735  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
2736  {
2737  return lhs.first < rhsFirst;
2738  }
2739 };
2740 
2741 template<typename KeyT, typename ValueT>
2742 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
2743 {
2744  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2745  m_Vector.data(),
2746  m_Vector.data() + m_Vector.size(),
2747  pair,
2748  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
2749  VmaVectorInsert(m_Vector, indexToInsert, pair);
2750 }
2751 
2752 template<typename KeyT, typename ValueT>
2753 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
2754 {
2755  PairType* it = VmaBinaryFindFirstNotLess(
2756  m_Vector.data(),
2757  m_Vector.data() + m_Vector.size(),
2758  key,
2759  VmaPairFirstLess<KeyT, ValueT>());
2760  if((it != m_Vector.end()) && (it->first == key))
2761  {
2762  return it;
2763  }
2764  else
2765  {
2766  return m_Vector.end();
2767  }
2768 }
2769 
2770 template<typename KeyT, typename ValueT>
2771 void VmaMap<KeyT, ValueT>::erase(iterator it)
2772 {
2773  VmaVectorRemove(m_Vector, it - m_Vector.begin());
2774 }
2775 
2776 #endif // #if VMA_USE_STL_UNORDERED_MAP
2777 
2778 #endif // #if 0
2779 
2781 
2782 class VmaDeviceMemoryBlock;
2783 
2784 enum VMA_BLOCK_VECTOR_TYPE
2785 {
2786  VMA_BLOCK_VECTOR_TYPE_UNMAPPED,
2787  VMA_BLOCK_VECTOR_TYPE_MAPPED,
2788  VMA_BLOCK_VECTOR_TYPE_COUNT
2789 };
2790 
2791 static VMA_BLOCK_VECTOR_TYPE VmaAllocationCreateFlagsToBlockVectorType(VmaAllocationCreateFlags flags)
2792 {
2793  return (flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0 ?
2794  VMA_BLOCK_VECTOR_TYPE_MAPPED :
2795  VMA_BLOCK_VECTOR_TYPE_UNMAPPED;
2796 }
2797 
2798 struct VmaAllocation_T
2799 {
2800 public:
2801  enum ALLOCATION_TYPE
2802  {
2803  ALLOCATION_TYPE_NONE,
2804  ALLOCATION_TYPE_BLOCK,
2805  ALLOCATION_TYPE_OWN,
2806  };
2807 
2808  VmaAllocation_T(uint32_t currentFrameIndex) :
2809  m_Alignment(1),
2810  m_Size(0),
2811  m_pUserData(VMA_NULL),
2812  m_Type(ALLOCATION_TYPE_NONE),
2813  m_SuballocationType(VMA_SUBALLOCATION_TYPE_UNKNOWN),
2814  m_LastUseFrameIndex(currentFrameIndex)
2815  {
2816  }
2817 
2818  void InitBlockAllocation(
2819  VmaPool hPool,
2820  VmaDeviceMemoryBlock* block,
2821  VkDeviceSize offset,
2822  VkDeviceSize alignment,
2823  VkDeviceSize size,
2824  VmaSuballocationType suballocationType,
2825  void* pUserData,
2826  bool canBecomeLost)
2827  {
2828  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
2829  VMA_ASSERT(block != VMA_NULL);
2830  m_Type = ALLOCATION_TYPE_BLOCK;
2831  m_Alignment = alignment;
2832  m_Size = size;
2833  m_pUserData = pUserData;
2834  m_SuballocationType = suballocationType;
2835  m_BlockAllocation.m_hPool = hPool;
2836  m_BlockAllocation.m_Block = block;
2837  m_BlockAllocation.m_Offset = offset;
2838  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
2839  }
2840 
2841  void InitLost()
2842  {
2843  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
2844  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
2845  m_Type = ALLOCATION_TYPE_BLOCK;
2846  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
2847  m_BlockAllocation.m_Block = VMA_NULL;
2848  m_BlockAllocation.m_Offset = 0;
2849  m_BlockAllocation.m_CanBecomeLost = true;
2850  }
2851 
2852  void ChangeBlockAllocation(
2853  VmaDeviceMemoryBlock* block,
2854  VkDeviceSize offset)
2855  {
2856  VMA_ASSERT(block != VMA_NULL);
2857  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
2858  m_BlockAllocation.m_Block = block;
2859  m_BlockAllocation.m_Offset = offset;
2860  }
2861 
2862  void InitOwnAllocation(
2863  uint32_t memoryTypeIndex,
2864  VkDeviceMemory hMemory,
2865  VmaSuballocationType suballocationType,
2866  bool persistentMap,
2867  void* pMappedData,
2868  VkDeviceSize size,
2869  void* pUserData)
2870  {
2871  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
2872  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
2873  m_Type = ALLOCATION_TYPE_OWN;
2874  m_Alignment = 0;
2875  m_Size = size;
2876  m_pUserData = pUserData;
2877  m_SuballocationType = suballocationType;
2878  m_OwnAllocation.m_MemoryTypeIndex = memoryTypeIndex;
2879  m_OwnAllocation.m_hMemory = hMemory;
2880  m_OwnAllocation.m_PersistentMap = persistentMap;
2881  m_OwnAllocation.m_pMappedData = pMappedData;
2882  }
2883 
2884  ALLOCATION_TYPE GetType() const { return m_Type; }
2885  VkDeviceSize GetAlignment() const { return m_Alignment; }
2886  VkDeviceSize GetSize() const { return m_Size; }
2887  void* GetUserData() const { return m_pUserData; }
2888  void SetUserData(void* pUserData) { m_pUserData = pUserData; }
2889  VmaSuballocationType GetSuballocationType() const { return m_SuballocationType; }
2890 
2891  VmaDeviceMemoryBlock* GetBlock() const
2892  {
2893  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
2894  return m_BlockAllocation.m_Block;
2895  }
2896  VkDeviceSize GetOffset() const;
2897  VkDeviceMemory GetMemory() const;
2898  uint32_t GetMemoryTypeIndex() const;
2899  VMA_BLOCK_VECTOR_TYPE GetBlockVectorType() const;
2900  void* GetMappedData() const;
2901  bool CanBecomeLost() const;
2902  VmaPool GetPool() const;
2903 
2904  VkResult OwnAllocMapPersistentlyMappedMemory(VmaAllocator hAllocator);
2905  void OwnAllocUnmapPersistentlyMappedMemory(VmaAllocator hAllocator);
2906 
2907  uint32_t GetLastUseFrameIndex() const
2908  {
2909  return m_LastUseFrameIndex.load();
2910  }
2911  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
2912  {
2913  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
2914  }
2915  /*
2916  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
2917  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
2918  - Else, returns false.
2919 
2920  If hAllocation is already lost, assert - you should not call it then.
2921  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
2922  */
2923  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
2924 
2925  void OwnAllocCalcStatsInfo(VmaStatInfo& outInfo)
2926  {
2927  VMA_ASSERT(m_Type == ALLOCATION_TYPE_OWN);
2928  outInfo.BlockCount = 1;
2929  outInfo.AllocationCount = 1;
2930  outInfo.UnusedRangeCount = 0;
2931  outInfo.UsedBytes = m_Size;
2932  outInfo.UnusedBytes = 0;
2933  outInfo.AllocationSizeMin = outInfo.AllocationSizeMax = m_Size;
2934  outInfo.UnusedRangeSizeMin = UINT64_MAX;
2935  outInfo.UnusedRangeSizeMax = 0;
2936  }
2937 
2938 private:
2939  VkDeviceSize m_Alignment;
2940  VkDeviceSize m_Size;
2941  void* m_pUserData;
2942  ALLOCATION_TYPE m_Type;
2943  VmaSuballocationType m_SuballocationType;
2944  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
2945 
2946  // Allocation out of VmaDeviceMemoryBlock.
2947  struct BlockAllocation
2948  {
2949  VmaPool m_hPool; // Null if belongs to general memory.
2950  VmaDeviceMemoryBlock* m_Block;
2951  VkDeviceSize m_Offset;
2952  bool m_CanBecomeLost;
2953  };
2954 
2955  // Allocation for an object that has its own private VkDeviceMemory.
2956  struct OwnAllocation
2957  {
2958  uint32_t m_MemoryTypeIndex;
2959  VkDeviceMemory m_hMemory;
2960  bool m_PersistentMap;
2961  void* m_pMappedData;
2962  };
2963 
2964  union
2965  {
2966  // Allocation out of VmaDeviceMemoryBlock.
2967  BlockAllocation m_BlockAllocation;
2968  // Allocation for an object that has its own private VkDeviceMemory.
2969  OwnAllocation m_OwnAllocation;
2970  };
2971 };
2972 
2973 /*
2974 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
2975 allocated memory block or free.
2976 */
2977 struct VmaSuballocation
2978 {
2979  VkDeviceSize offset;
2980  VkDeviceSize size;
2981  VmaAllocation hAllocation;
2982  VmaSuballocationType type;
2983 };
2984 
2985 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
2986 
2987 // Cost of one additional allocation lost, as equivalent in bytes.
2988 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
2989 
2990 /*
2991 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
2992 
2993 If canMakeOtherLost was false:
2994 - item points to a FREE suballocation.
2995 - itemsToMakeLostCount is 0.
2996 
2997 If canMakeOtherLost was true:
2998 - item points to first of sequence of suballocations, which are either FREE,
2999  or point to VmaAllocations that can become lost.
3000 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
3001  the requested allocation to succeed.
3002 */
3003 struct VmaAllocationRequest
3004 {
3005  VkDeviceSize offset;
3006  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
3007  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
3008  VmaSuballocationList::iterator item;
3009  size_t itemsToMakeLostCount;
3010 
3011  VkDeviceSize CalcCost() const
3012  {
3013  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
3014  }
3015 };
3016 
3017 /*
3018 Represents a single block of device memory (VkDeviceMemory ) with all the
3019 data about its regions (aka suballocations, VmaAllocation), assigned and free.
3020 
3021 Thread-safety: This class must be externally synchronized.
3022 */
3023 class VmaDeviceMemoryBlock
3024 {
3025 public:
3026  uint32_t m_MemoryTypeIndex;
3027  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
3028  VkDeviceMemory m_hMemory;
3029  VkDeviceSize m_Size;
3030  bool m_PersistentMap;
3031  void* m_pMappedData;
3032  uint32_t m_FreeCount;
3033  VkDeviceSize m_SumFreeSize;
3034  VmaSuballocationList m_Suballocations;
3035  // Suballocations that are free and have size greater than certain threshold.
3036  // Sorted by size, ascending.
3037  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
3038 
3039  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
3040 
3041  ~VmaDeviceMemoryBlock()
3042  {
3043  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
3044  }
3045 
3046  // Always call after construction.
3047  void Init(
3048  uint32_t newMemoryTypeIndex,
3049  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
3050  VkDeviceMemory newMemory,
3051  VkDeviceSize newSize,
3052  bool persistentMap,
3053  void* pMappedData);
3054  // Always call before destruction.
3055  void Destroy(VmaAllocator allocator);
3056 
3057  // Validates all data structures inside this object. If not valid, returns false.
3058  bool Validate() const;
3059 
3060  VkDeviceSize GetUnusedRangeSizeMax() const;
3061 
3062  // Tries to find a place for suballocation with given parameters inside this allocation.
3063  // If succeeded, fills pAllocationRequest and returns true.
3064  // If failed, returns false.
3065  bool CreateAllocationRequest(
3066  uint32_t currentFrameIndex,
3067  uint32_t frameInUseCount,
3068  VkDeviceSize bufferImageGranularity,
3069  VkDeviceSize allocSize,
3070  VkDeviceSize allocAlignment,
3071  VmaSuballocationType allocType,
3072  bool canMakeOtherLost,
3073  VmaAllocationRequest* pAllocationRequest);
3074 
3075  bool MakeRequestedAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount, VmaAllocationRequest* pAllocationRequest);
3076 
3077  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3078 
3079  // Returns true if this allocation is empty - contains only single free suballocation.
3080  bool IsEmpty() const;
3081 
3082  // Makes actual allocation based on request. Request must already be checked
3083  // and valid.
3084  void Alloc(
3085  const VmaAllocationRequest& request,
3086  VmaSuballocationType type,
3087  VkDeviceSize allocSize,
3088  VmaAllocation hAllocation);
3089 
3090  // Frees suballocation assigned to given memory region.
3091  void Free(const VmaAllocation allocation);
3092 
3093 #if VMA_STATS_STRING_ENABLED
3094  void PrintDetailedMap(class VmaJsonWriter& json) const;
3095 #endif
3096 
3097 private:
3098  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
3099  // If yes, fills pOffset and returns true. If no, returns false.
3100  bool CheckAllocation(
3101  uint32_t currentFrameIndex,
3102  uint32_t frameInUseCount,
3103  VkDeviceSize bufferImageGranularity,
3104  VkDeviceSize allocSize,
3105  VkDeviceSize allocAlignment,
3106  VmaSuballocationType allocType,
3107  VmaSuballocationList::const_iterator suballocItem,
3108  bool canMakeOtherLost,
3109  VkDeviceSize* pOffset,
3110  size_t* itemsToMakeLostCount,
3111  VkDeviceSize* pSumFreeSize,
3112  VkDeviceSize* pSumItemSize) const;
3113 
3114  // Given free suballocation, it merges it with following one, which must also be free.
3115  void MergeFreeWithNext(VmaSuballocationList::iterator item);
3116  // Releases given suballocation, making it free.
3117  // Merges it with adjacent free suballocations if applicable.
3118  // Returns iterator to new free suballocation at this place.
3119  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
3120  // Given free suballocation, it inserts it into sorted list of
3121  // m_FreeSuballocationsBySize if it's suitable.
3122  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
3123  // Given free suballocation, it removes it from sorted list of
3124  // m_FreeSuballocationsBySize if it's suitable.
3125  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
3126 
3127  bool ValidateFreeSuballocationList() const;
3128 };
3129 
3130 struct VmaPointerLess
3131 {
3132  bool operator()(const void* lhs, const void* rhs) const
3133  {
3134  return lhs < rhs;
3135  }
3136 };
3137 
3138 class VmaDefragmentator;
3139 
3140 /*
3141 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
3142 Vulkan memory type.
3143 
3144 Synchronized internally with a mutex.
3145 */
3146 struct VmaBlockVector
3147 {
3148  VmaBlockVector(
3149  VmaAllocator hAllocator,
3150  uint32_t memoryTypeIndex,
3151  VMA_BLOCK_VECTOR_TYPE blockVectorType,
3152  VkDeviceSize preferredBlockSize,
3153  size_t minBlockCount,
3154  size_t maxBlockCount,
3155  VkDeviceSize bufferImageGranularity,
3156  uint32_t frameInUseCount,
3157  bool isCustomPool);
3158  ~VmaBlockVector();
3159 
3160  VkResult CreateMinBlocks();
3161 
3162  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
3163  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
3164  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
3165  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
3166  VMA_BLOCK_VECTOR_TYPE GetBlockVectorType() const { return m_BlockVectorType; }
3167 
3168  void GetPoolStats(VmaPoolStats* pStats);
3169 
3170  bool IsEmpty() const { return m_Blocks.empty(); }
3171 
3172  VkResult Allocate(
3173  VmaPool hCurrentPool,
3174  uint32_t currentFrameIndex,
3175  const VkMemoryRequirements& vkMemReq,
3176  const VmaAllocationCreateInfo& createInfo,
3177  VmaSuballocationType suballocType,
3178  VmaAllocation* pAllocation);
3179 
3180  void Free(
3181  VmaAllocation hAllocation);
3182 
3183  // Adds statistics of this BlockVector to pStats.
3184  void AddStats(VmaStats* pStats);
3185 
3186 #if VMA_STATS_STRING_ENABLED
3187  void PrintDetailedMap(class VmaJsonWriter& json);
3188 #endif
3189 
3190  void UnmapPersistentlyMappedMemory();
3191  VkResult MapPersistentlyMappedMemory();
3192 
3193  void MakePoolAllocationsLost(
3194  uint32_t currentFrameIndex,
3195  size_t* pLostAllocationCount);
3196 
3197  VmaDefragmentator* EnsureDefragmentator(
3198  VmaAllocator hAllocator,
3199  uint32_t currentFrameIndex);
3200 
3201  VkResult Defragment(
3202  VmaDefragmentationStats* pDefragmentationStats,
3203  VkDeviceSize& maxBytesToMove,
3204  uint32_t& maxAllocationsToMove);
3205 
3206  void DestroyDefragmentator();
3207 
3208 private:
3209  friend class VmaDefragmentator;
3210 
3211  const VmaAllocator m_hAllocator;
3212  const uint32_t m_MemoryTypeIndex;
3213  const VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
3214  const VkDeviceSize m_PreferredBlockSize;
3215  const size_t m_MinBlockCount;
3216  const size_t m_MaxBlockCount;
3217  const VkDeviceSize m_BufferImageGranularity;
3218  const uint32_t m_FrameInUseCount;
3219  const bool m_IsCustomPool;
3220  VMA_MUTEX m_Mutex;
3221  // Incrementally sorted by sumFreeSize, ascending.
3222  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
3223  /* There can be at most one allocation that is completely empty - a
3224  hysteresis to avoid pessimistic case of alternating creation and destruction
3225  of a VkDeviceMemory. */
3226  bool m_HasEmptyBlock;
3227  VmaDefragmentator* m_pDefragmentator;
3228 
3229  // Finds and removes given block from vector.
3230  void Remove(VmaDeviceMemoryBlock* pBlock);
3231 
3232  // Performs single step in sorting m_Blocks. They may not be fully sorted
3233  // after this call.
3234  void IncrementallySortBlocks();
3235 
3236  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
3237 };
3238 
3239 struct VmaPool_T
3240 {
3241 public:
3242  VmaBlockVector m_BlockVector;
3243 
3244  // Takes ownership.
3245  VmaPool_T(
3246  VmaAllocator hAllocator,
3247  const VmaPoolCreateInfo& createInfo);
3248  ~VmaPool_T();
3249 
3250  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
3251 
3252 #if VMA_STATS_STRING_ENABLED
3253  //void PrintDetailedMap(class VmaStringBuilder& sb);
3254 #endif
3255 };
3256 
3257 class VmaDefragmentator
3258 {
3259  const VmaAllocator m_hAllocator;
3260  VmaBlockVector* const m_pBlockVector;
3261  uint32_t m_CurrentFrameIndex;
3262  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
3263  VkDeviceSize m_BytesMoved;
3264  uint32_t m_AllocationsMoved;
3265 
3266  struct AllocationInfo
3267  {
3268  VmaAllocation m_hAllocation;
3269  VkBool32* m_pChanged;
3270 
3271  AllocationInfo() :
3272  m_hAllocation(VK_NULL_HANDLE),
3273  m_pChanged(VMA_NULL)
3274  {
3275  }
3276  };
3277 
3278  struct AllocationInfoSizeGreater
3279  {
3280  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
3281  {
3282  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
3283  }
3284  };
3285 
3286  // Used between AddAllocation and Defragment.
3287  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3288 
3289  struct BlockInfo
3290  {
3291  VmaDeviceMemoryBlock* m_pBlock;
3292  bool m_HasNonMovableAllocations;
3293  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3294 
3295  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
3296  m_pBlock(VMA_NULL),
3297  m_HasNonMovableAllocations(true),
3298  m_Allocations(pAllocationCallbacks),
3299  m_pMappedDataForDefragmentation(VMA_NULL)
3300  {
3301  }
3302 
3303  void CalcHasNonMovableAllocations()
3304  {
3305  const size_t blockAllocCount =
3306  m_pBlock->m_Suballocations.size() - m_pBlock->m_FreeCount;
3307  const size_t defragmentAllocCount = m_Allocations.size();
3308  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
3309  }
3310 
3311  void SortAllocationsBySizeDescecnding()
3312  {
3313  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
3314  }
3315 
3316  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
3317  void Unmap(VmaAllocator hAllocator);
3318 
3319  private:
3320  // Not null if mapped for defragmentation only, not persistently mapped.
3321  void* m_pMappedDataForDefragmentation;
3322  };
3323 
3324  struct BlockPointerLess
3325  {
3326  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
3327  {
3328  return pLhsBlockInfo->m_pBlock < pRhsBlock;
3329  }
3330  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3331  {
3332  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
3333  }
3334  };
3335 
3336  // 1. Blocks with some non-movable allocations go first.
3337  // 2. Blocks with smaller sumFreeSize go first.
3338  struct BlockInfoCompareMoveDestination
3339  {
3340  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3341  {
3342  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
3343  {
3344  return true;
3345  }
3346  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
3347  {
3348  return false;
3349  }
3350  if(pLhsBlockInfo->m_pBlock->m_SumFreeSize < pRhsBlockInfo->m_pBlock->m_SumFreeSize)
3351  {
3352  return true;
3353  }
3354  return false;
3355  }
3356  };
3357 
3358  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
3359  BlockInfoVector m_Blocks;
3360 
3361  VkResult DefragmentRound(
3362  VkDeviceSize maxBytesToMove,
3363  uint32_t maxAllocationsToMove);
3364 
3365  static bool MoveMakesSense(
3366  size_t dstBlockIndex, VkDeviceSize dstOffset,
3367  size_t srcBlockIndex, VkDeviceSize srcOffset);
3368 
3369 public:
3370  VmaDefragmentator(
3371  VmaAllocator hAllocator,
3372  VmaBlockVector* pBlockVector,
3373  uint32_t currentFrameIndex);
3374 
3375  ~VmaDefragmentator();
3376 
3377  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
3378  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
3379 
3380  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
3381 
3382  VkResult Defragment(
3383  VkDeviceSize maxBytesToMove,
3384  uint32_t maxAllocationsToMove);
3385 };
3386 
3387 // Main allocator object.
3388 struct VmaAllocator_T
3389 {
3390  bool m_UseMutex;
3391  VkDevice m_hDevice;
3392  bool m_AllocationCallbacksSpecified;
3393  VkAllocationCallbacks m_AllocationCallbacks;
3394  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
3395  // Non-zero when we are inside UnmapPersistentlyMappedMemory...MapPersistentlyMappedMemory.
3396  // Counter to allow nested calls to these functions.
3397  uint32_t m_UnmapPersistentlyMappedMemoryCounter;
3398 
3399  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
3400  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
3401  VMA_MUTEX m_HeapSizeLimitMutex;
3402 
3403  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
3404  VkPhysicalDeviceMemoryProperties m_MemProps;
3405 
3406  // Default pools.
3407  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
3408 
3409  // Each vector is sorted by memory (handle value).
3410  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
3411  AllocationVectorType* m_pOwnAllocations[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
3412  VMA_MUTEX m_OwnAllocationsMutex[VK_MAX_MEMORY_TYPES];
3413 
3414  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
3415  ~VmaAllocator_T();
3416 
3417  const VkAllocationCallbacks* GetAllocationCallbacks() const
3418  {
3419  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
3420  }
3421  const VmaVulkanFunctions& GetVulkanFunctions() const
3422  {
3423  return m_VulkanFunctions;
3424  }
3425 
3426  VkDeviceSize GetBufferImageGranularity() const
3427  {
3428  return VMA_MAX(
3429  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
3430  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
3431  }
3432 
3433  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
3434  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
3435 
3436  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
3437  {
3438  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
3439  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
3440  }
3441 
3442  // Main allocation function.
3443  VkResult AllocateMemory(
3444  const VkMemoryRequirements& vkMemReq,
3445  const VmaAllocationCreateInfo& createInfo,
3446  VmaSuballocationType suballocType,
3447  VmaAllocation* pAllocation);
3448 
3449  // Main deallocation function.
3450  void FreeMemory(const VmaAllocation allocation);
3451 
3452  void CalculateStats(VmaStats* pStats);
3453 
3454 #if VMA_STATS_STRING_ENABLED
3455  void PrintDetailedMap(class VmaJsonWriter& json);
3456 #endif
3457 
3458  void UnmapPersistentlyMappedMemory();
3459  VkResult MapPersistentlyMappedMemory();
3460 
3461  VkResult Defragment(
3462  VmaAllocation* pAllocations,
3463  size_t allocationCount,
3464  VkBool32* pAllocationsChanged,
3465  const VmaDefragmentationInfo* pDefragmentationInfo,
3466  VmaDefragmentationStats* pDefragmentationStats);
3467 
3468  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
3469 
3470  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
3471  void DestroyPool(VmaPool pool);
3472  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
3473 
3474  void SetCurrentFrameIndex(uint32_t frameIndex);
3475 
3476  void MakePoolAllocationsLost(
3477  VmaPool hPool,
3478  size_t* pLostAllocationCount);
3479 
3480  void CreateLostAllocation(VmaAllocation* pAllocation);
3481 
3482  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
3483  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
3484 
3485 private:
3486  VkDeviceSize m_PreferredLargeHeapBlockSize;
3487  VkDeviceSize m_PreferredSmallHeapBlockSize;
3488 
3489  VkPhysicalDevice m_PhysicalDevice;
3490  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
3491 
3492  VMA_MUTEX m_PoolsMutex;
3493  // Protected by m_PoolsMutex. Sorted by pointer value.
3494  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
3495 
3496  VmaVulkanFunctions m_VulkanFunctions;
3497 
3498  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
3499 
3500  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
3501 
3502  VkResult AllocateMemoryOfType(
3503  const VkMemoryRequirements& vkMemReq,
3504  const VmaAllocationCreateInfo& createInfo,
3505  uint32_t memTypeIndex,
3506  VmaSuballocationType suballocType,
3507  VmaAllocation* pAllocation);
3508 
3509  // Allocates and registers new VkDeviceMemory specifically for single allocation.
3510  VkResult AllocateOwnMemory(
3511  VkDeviceSize size,
3512  VmaSuballocationType suballocType,
3513  uint32_t memTypeIndex,
3514  bool map,
3515  void* pUserData,
3516  VmaAllocation* pAllocation);
3517 
3518  // Tries to free pMemory as Own Memory. Returns true if found and freed.
3519  void FreeOwnMemory(VmaAllocation allocation);
3520 };
3521 
3523 // Memory allocation #2 after VmaAllocator_T definition
3524 
3525 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
3526 {
3527  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
3528 }
3529 
3530 static void VmaFree(VmaAllocator hAllocator, void* ptr)
3531 {
3532  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
3533 }
3534 
3535 template<typename T>
3536 static T* VmaAllocate(VmaAllocator hAllocator)
3537 {
3538  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
3539 }
3540 
3541 template<typename T>
3542 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
3543 {
3544  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
3545 }
3546 
3547 template<typename T>
3548 static void vma_delete(VmaAllocator hAllocator, T* ptr)
3549 {
3550  if(ptr != VMA_NULL)
3551  {
3552  ptr->~T();
3553  VmaFree(hAllocator, ptr);
3554  }
3555 }
3556 
3557 template<typename T>
3558 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
3559 {
3560  if(ptr != VMA_NULL)
3561  {
3562  for(size_t i = count; i--; )
3563  ptr[i].~T();
3564  VmaFree(hAllocator, ptr);
3565  }
3566 }
3567 
3569 // VmaStringBuilder
3570 
3571 #if VMA_STATS_STRING_ENABLED
3572 
3573 class VmaStringBuilder
3574 {
3575 public:
3576  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
3577  size_t GetLength() const { return m_Data.size(); }
3578  const char* GetData() const { return m_Data.data(); }
3579 
3580  void Add(char ch) { m_Data.push_back(ch); }
3581  void Add(const char* pStr);
3582  void AddNewLine() { Add('\n'); }
3583  void AddNumber(uint32_t num);
3584  void AddNumber(uint64_t num);
3585  void AddPointer(const void* ptr);
3586 
3587 private:
3588  VmaVector< char, VmaStlAllocator<char> > m_Data;
3589 };
3590 
3591 void VmaStringBuilder::Add(const char* pStr)
3592 {
3593  const size_t strLen = strlen(pStr);
3594  if(strLen > 0)
3595  {
3596  const size_t oldCount = m_Data.size();
3597  m_Data.resize(oldCount + strLen);
3598  memcpy(m_Data.data() + oldCount, pStr, strLen);
3599  }
3600 }
3601 
3602 void VmaStringBuilder::AddNumber(uint32_t num)
3603 {
3604  char buf[11];
3605  VmaUint32ToStr(buf, sizeof(buf), num);
3606  Add(buf);
3607 }
3608 
3609 void VmaStringBuilder::AddNumber(uint64_t num)
3610 {
3611  char buf[21];
3612  VmaUint64ToStr(buf, sizeof(buf), num);
3613  Add(buf);
3614 }
3615 
3616 void VmaStringBuilder::AddPointer(const void* ptr)
3617 {
3618  char buf[21];
3619  VmaPtrToStr(buf, sizeof(buf), ptr);
3620  Add(buf);
3621 }
3622 
3623 #endif // #if VMA_STATS_STRING_ENABLED
3624 
3626 // VmaJsonWriter
3627 
3628 #if VMA_STATS_STRING_ENABLED
3629 
3630 class VmaJsonWriter
3631 {
3632 public:
3633  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
3634  ~VmaJsonWriter();
3635 
3636  void BeginObject(bool singleLine = false);
3637  void EndObject();
3638 
3639  void BeginArray(bool singleLine = false);
3640  void EndArray();
3641 
3642  void WriteString(const char* pStr);
3643  void BeginString(const char* pStr = VMA_NULL);
3644  void ContinueString(const char* pStr);
3645  void ContinueString(uint32_t n);
3646  void ContinueString(uint64_t n);
3647  void EndString(const char* pStr = VMA_NULL);
3648 
3649  void WriteNumber(uint32_t n);
3650  void WriteNumber(uint64_t n);
3651  void WriteBool(bool b);
3652  void WriteNull();
3653 
3654 private:
3655  static const char* const INDENT;
3656 
3657  enum COLLECTION_TYPE
3658  {
3659  COLLECTION_TYPE_OBJECT,
3660  COLLECTION_TYPE_ARRAY,
3661  };
3662  struct StackItem
3663  {
3664  COLLECTION_TYPE type;
3665  uint32_t valueCount;
3666  bool singleLineMode;
3667  };
3668 
3669  VmaStringBuilder& m_SB;
3670  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
3671  bool m_InsideString;
3672 
3673  void BeginValue(bool isString);
3674  void WriteIndent(bool oneLess = false);
3675 };
3676 
3677 const char* const VmaJsonWriter::INDENT = " ";
3678 
3679 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
3680  m_SB(sb),
3681  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
3682  m_InsideString(false)
3683 {
3684 }
3685 
3686 VmaJsonWriter::~VmaJsonWriter()
3687 {
3688  VMA_ASSERT(!m_InsideString);
3689  VMA_ASSERT(m_Stack.empty());
3690 }
3691 
3692 void VmaJsonWriter::BeginObject(bool singleLine)
3693 {
3694  VMA_ASSERT(!m_InsideString);
3695 
3696  BeginValue(false);
3697  m_SB.Add('{');
3698 
3699  StackItem item;
3700  item.type = COLLECTION_TYPE_OBJECT;
3701  item.valueCount = 0;
3702  item.singleLineMode = singleLine;
3703  m_Stack.push_back(item);
3704 }
3705 
3706 void VmaJsonWriter::EndObject()
3707 {
3708  VMA_ASSERT(!m_InsideString);
3709 
3710  WriteIndent(true);
3711  m_SB.Add('}');
3712 
3713  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
3714  m_Stack.pop_back();
3715 }
3716 
3717 void VmaJsonWriter::BeginArray(bool singleLine)
3718 {
3719  VMA_ASSERT(!m_InsideString);
3720 
3721  BeginValue(false);
3722  m_SB.Add('[');
3723 
3724  StackItem item;
3725  item.type = COLLECTION_TYPE_ARRAY;
3726  item.valueCount = 0;
3727  item.singleLineMode = singleLine;
3728  m_Stack.push_back(item);
3729 }
3730 
3731 void VmaJsonWriter::EndArray()
3732 {
3733  VMA_ASSERT(!m_InsideString);
3734 
3735  WriteIndent(true);
3736  m_SB.Add(']');
3737 
3738  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
3739  m_Stack.pop_back();
3740 }
3741 
3742 void VmaJsonWriter::WriteString(const char* pStr)
3743 {
3744  BeginString(pStr);
3745  EndString();
3746 }
3747 
3748 void VmaJsonWriter::BeginString(const char* pStr)
3749 {
3750  VMA_ASSERT(!m_InsideString);
3751 
3752  BeginValue(true);
3753  m_SB.Add('"');
3754  m_InsideString = true;
3755  if(pStr != VMA_NULL && pStr[0] != '\0')
3756  {
3757  ContinueString(pStr);
3758  }
3759 }
3760 
3761 void VmaJsonWriter::ContinueString(const char* pStr)
3762 {
3763  VMA_ASSERT(m_InsideString);
3764 
3765  const size_t strLen = strlen(pStr);
3766  for(size_t i = 0; i < strLen; ++i)
3767  {
3768  char ch = pStr[i];
3769  if(ch == '\'')
3770  {
3771  m_SB.Add("\\\\");
3772  }
3773  else if(ch == '"')
3774  {
3775  m_SB.Add("\\\"");
3776  }
3777  else if(ch >= 32)
3778  {
3779  m_SB.Add(ch);
3780  }
3781  else switch(ch)
3782  {
3783  case '\n':
3784  m_SB.Add("\\n");
3785  break;
3786  case '\r':
3787  m_SB.Add("\\r");
3788  break;
3789  case '\t':
3790  m_SB.Add("\\t");
3791  break;
3792  default:
3793  VMA_ASSERT(0 && "Character not currently supported.");
3794  break;
3795  }
3796  }
3797 }
3798 
3799 void VmaJsonWriter::ContinueString(uint32_t n)
3800 {
3801  VMA_ASSERT(m_InsideString);
3802  m_SB.AddNumber(n);
3803 }
3804 
3805 void VmaJsonWriter::ContinueString(uint64_t n)
3806 {
3807  VMA_ASSERT(m_InsideString);
3808  m_SB.AddNumber(n);
3809 }
3810 
3811 void VmaJsonWriter::EndString(const char* pStr)
3812 {
3813  VMA_ASSERT(m_InsideString);
3814  if(pStr != VMA_NULL && pStr[0] != '\0')
3815  {
3816  ContinueString(pStr);
3817  }
3818  m_SB.Add('"');
3819  m_InsideString = false;
3820 }
3821 
3822 void VmaJsonWriter::WriteNumber(uint32_t n)
3823 {
3824  VMA_ASSERT(!m_InsideString);
3825  BeginValue(false);
3826  m_SB.AddNumber(n);
3827 }
3828 
3829 void VmaJsonWriter::WriteNumber(uint64_t n)
3830 {
3831  VMA_ASSERT(!m_InsideString);
3832  BeginValue(false);
3833  m_SB.AddNumber(n);
3834 }
3835 
3836 void VmaJsonWriter::WriteBool(bool b)
3837 {
3838  VMA_ASSERT(!m_InsideString);
3839  BeginValue(false);
3840  m_SB.Add(b ? "true" : "false");
3841 }
3842 
3843 void VmaJsonWriter::WriteNull()
3844 {
3845  VMA_ASSERT(!m_InsideString);
3846  BeginValue(false);
3847  m_SB.Add("null");
3848 }
3849 
3850 void VmaJsonWriter::BeginValue(bool isString)
3851 {
3852  if(!m_Stack.empty())
3853  {
3854  StackItem& currItem = m_Stack.back();
3855  if(currItem.type == COLLECTION_TYPE_OBJECT &&
3856  currItem.valueCount % 2 == 0)
3857  {
3858  VMA_ASSERT(isString);
3859  }
3860 
3861  if(currItem.type == COLLECTION_TYPE_OBJECT &&
3862  currItem.valueCount % 2 != 0)
3863  {
3864  m_SB.Add(": ");
3865  }
3866  else if(currItem.valueCount > 0)
3867  {
3868  m_SB.Add(", ");
3869  WriteIndent();
3870  }
3871  else
3872  {
3873  WriteIndent();
3874  }
3875  ++currItem.valueCount;
3876  }
3877 }
3878 
3879 void VmaJsonWriter::WriteIndent(bool oneLess)
3880 {
3881  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
3882  {
3883  m_SB.AddNewLine();
3884 
3885  size_t count = m_Stack.size();
3886  if(count > 0 && oneLess)
3887  {
3888  --count;
3889  }
3890  for(size_t i = 0; i < count; ++i)
3891  {
3892  m_SB.Add(INDENT);
3893  }
3894  }
3895 }
3896 
3897 #endif // #if VMA_STATS_STRING_ENABLED
3898 
3900 
3901 VkDeviceSize VmaAllocation_T::GetOffset() const
3902 {
3903  switch(m_Type)
3904  {
3905  case ALLOCATION_TYPE_BLOCK:
3906  return m_BlockAllocation.m_Offset;
3907  case ALLOCATION_TYPE_OWN:
3908  return 0;
3909  default:
3910  VMA_ASSERT(0);
3911  return 0;
3912  }
3913 }
3914 
3915 VkDeviceMemory VmaAllocation_T::GetMemory() const
3916 {
3917  switch(m_Type)
3918  {
3919  case ALLOCATION_TYPE_BLOCK:
3920  return m_BlockAllocation.m_Block->m_hMemory;
3921  case ALLOCATION_TYPE_OWN:
3922  return m_OwnAllocation.m_hMemory;
3923  default:
3924  VMA_ASSERT(0);
3925  return VK_NULL_HANDLE;
3926  }
3927 }
3928 
3929 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
3930 {
3931  switch(m_Type)
3932  {
3933  case ALLOCATION_TYPE_BLOCK:
3934  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
3935  case ALLOCATION_TYPE_OWN:
3936  return m_OwnAllocation.m_MemoryTypeIndex;
3937  default:
3938  VMA_ASSERT(0);
3939  return UINT32_MAX;
3940  }
3941 }
3942 
3943 VMA_BLOCK_VECTOR_TYPE VmaAllocation_T::GetBlockVectorType() const
3944 {
3945  switch(m_Type)
3946  {
3947  case ALLOCATION_TYPE_BLOCK:
3948  return m_BlockAllocation.m_Block->m_BlockVectorType;
3949  case ALLOCATION_TYPE_OWN:
3950  return (m_OwnAllocation.m_PersistentMap ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED);
3951  default:
3952  VMA_ASSERT(0);
3953  return VMA_BLOCK_VECTOR_TYPE_COUNT;
3954  }
3955 }
3956 
3957 void* VmaAllocation_T::GetMappedData() const
3958 {
3959  switch(m_Type)
3960  {
3961  case ALLOCATION_TYPE_BLOCK:
3962  if(m_BlockAllocation.m_Block->m_pMappedData != VMA_NULL)
3963  {
3964  return (char*)m_BlockAllocation.m_Block->m_pMappedData + m_BlockAllocation.m_Offset;
3965  }
3966  else
3967  {
3968  return VMA_NULL;
3969  }
3970  break;
3971  case ALLOCATION_TYPE_OWN:
3972  return m_OwnAllocation.m_pMappedData;
3973  default:
3974  VMA_ASSERT(0);
3975  return VMA_NULL;
3976  }
3977 }
3978 
3979 bool VmaAllocation_T::CanBecomeLost() const
3980 {
3981  switch(m_Type)
3982  {
3983  case ALLOCATION_TYPE_BLOCK:
3984  return m_BlockAllocation.m_CanBecomeLost;
3985  case ALLOCATION_TYPE_OWN:
3986  return false;
3987  default:
3988  VMA_ASSERT(0);
3989  return false;
3990  }
3991 }
3992 
3993 VmaPool VmaAllocation_T::GetPool() const
3994 {
3995  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
3996  return m_BlockAllocation.m_hPool;
3997 }
3998 
3999 VkResult VmaAllocation_T::OwnAllocMapPersistentlyMappedMemory(VmaAllocator hAllocator)
4000 {
4001  VMA_ASSERT(m_Type == ALLOCATION_TYPE_OWN);
4002  if(m_OwnAllocation.m_PersistentMap)
4003  {
4004  return (*hAllocator->GetVulkanFunctions().vkMapMemory)(
4005  hAllocator->m_hDevice,
4006  m_OwnAllocation.m_hMemory,
4007  0,
4008  VK_WHOLE_SIZE,
4009  0,
4010  &m_OwnAllocation.m_pMappedData);
4011  }
4012  return VK_SUCCESS;
4013 }
4014 void VmaAllocation_T::OwnAllocUnmapPersistentlyMappedMemory(VmaAllocator hAllocator)
4015 {
4016  VMA_ASSERT(m_Type == ALLOCATION_TYPE_OWN);
4017  if(m_OwnAllocation.m_pMappedData)
4018  {
4019  VMA_ASSERT(m_OwnAllocation.m_PersistentMap);
4020  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_OwnAllocation.m_hMemory);
4021  m_OwnAllocation.m_pMappedData = VMA_NULL;
4022  }
4023 }
4024 
4025 
4026 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4027 {
4028  VMA_ASSERT(CanBecomeLost());
4029 
4030  /*
4031  Warning: This is a carefully designed algorithm.
4032  Do not modify unless you really know what you're doing :)
4033  */
4034  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
4035  for(;;)
4036  {
4037  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
4038  {
4039  VMA_ASSERT(0);
4040  return false;
4041  }
4042  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
4043  {
4044  return false;
4045  }
4046  else // Last use time earlier than current time.
4047  {
4048  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
4049  {
4050  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
4051  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
4052  return true;
4053  }
4054  }
4055  }
4056 }
4057 
4058 #if VMA_STATS_STRING_ENABLED
4059 
4060 // Correspond to values of enum VmaSuballocationType.
4061 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
4062  "FREE",
4063  "UNKNOWN",
4064  "BUFFER",
4065  "IMAGE_UNKNOWN",
4066  "IMAGE_LINEAR",
4067  "IMAGE_OPTIMAL",
4068 };
4069 
4070 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
4071 {
4072  json.BeginObject();
4073 
4074  json.WriteString("Blocks");
4075  json.WriteNumber(stat.BlockCount);
4076 
4077  json.WriteString("Allocations");
4078  json.WriteNumber(stat.AllocationCount);
4079 
4080  json.WriteString("UnusedRanges");
4081  json.WriteNumber(stat.UnusedRangeCount);
4082 
4083  json.WriteString("UsedBytes");
4084  json.WriteNumber(stat.UsedBytes);
4085 
4086  json.WriteString("UnusedBytes");
4087  json.WriteNumber(stat.UnusedBytes);
4088 
4089  if(stat.AllocationCount > 1)
4090  {
4091  json.WriteString("AllocationSize");
4092  json.BeginObject(true);
4093  json.WriteString("Min");
4094  json.WriteNumber(stat.AllocationSizeMin);
4095  json.WriteString("Avg");
4096  json.WriteNumber(stat.AllocationSizeAvg);
4097  json.WriteString("Max");
4098  json.WriteNumber(stat.AllocationSizeMax);
4099  json.EndObject();
4100  }
4101 
4102  if(stat.UnusedRangeCount > 1)
4103  {
4104  json.WriteString("UnusedRangeSize");
4105  json.BeginObject(true);
4106  json.WriteString("Min");
4107  json.WriteNumber(stat.UnusedRangeSizeMin);
4108  json.WriteString("Avg");
4109  json.WriteNumber(stat.UnusedRangeSizeAvg);
4110  json.WriteString("Max");
4111  json.WriteNumber(stat.UnusedRangeSizeMax);
4112  json.EndObject();
4113  }
4114 
4115  json.EndObject();
4116 }
4117 
4118 #endif // #if VMA_STATS_STRING_ENABLED
4119 
4120 struct VmaSuballocationItemSizeLess
4121 {
4122  bool operator()(
4123  const VmaSuballocationList::iterator lhs,
4124  const VmaSuballocationList::iterator rhs) const
4125  {
4126  return lhs->size < rhs->size;
4127  }
4128  bool operator()(
4129  const VmaSuballocationList::iterator lhs,
4130  VkDeviceSize rhsSize) const
4131  {
4132  return lhs->size < rhsSize;
4133  }
4134 };
4135 
4136 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
4137  m_MemoryTypeIndex(UINT32_MAX),
4138  m_BlockVectorType(VMA_BLOCK_VECTOR_TYPE_COUNT),
4139  m_hMemory(VK_NULL_HANDLE),
4140  m_Size(0),
4141  m_PersistentMap(false),
4142  m_pMappedData(VMA_NULL),
4143  m_FreeCount(0),
4144  m_SumFreeSize(0),
4145  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
4146  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
4147 {
4148 }
4149 
4150 void VmaDeviceMemoryBlock::Init(
4151  uint32_t newMemoryTypeIndex,
4152  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
4153  VkDeviceMemory newMemory,
4154  VkDeviceSize newSize,
4155  bool persistentMap,
4156  void* pMappedData)
4157 {
4158  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
4159 
4160  m_MemoryTypeIndex = newMemoryTypeIndex;
4161  m_BlockVectorType = newBlockVectorType;
4162  m_hMemory = newMemory;
4163  m_Size = newSize;
4164  m_PersistentMap = persistentMap;
4165  m_pMappedData = pMappedData;
4166  m_FreeCount = 1;
4167  m_SumFreeSize = newSize;
4168 
4169  m_Suballocations.clear();
4170  m_FreeSuballocationsBySize.clear();
4171 
4172  VmaSuballocation suballoc = {};
4173  suballoc.offset = 0;
4174  suballoc.size = newSize;
4175  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4176  suballoc.hAllocation = VK_NULL_HANDLE;
4177 
4178  m_Suballocations.push_back(suballoc);
4179  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
4180  --suballocItem;
4181  m_FreeSuballocationsBySize.push_back(suballocItem);
4182 }
4183 
4184 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
4185 {
4186  // This is the most important assert in the entire library.
4187  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
4188  VMA_ASSERT(IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
4189 
4190  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
4191  if(m_pMappedData != VMA_NULL)
4192  {
4193  (allocator->GetVulkanFunctions().vkUnmapMemory)(allocator->m_hDevice, m_hMemory);
4194  m_pMappedData = VMA_NULL;
4195  }
4196 
4197  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Size, m_hMemory);
4198  m_hMemory = VK_NULL_HANDLE;
4199 }
4200 
4201 bool VmaDeviceMemoryBlock::Validate() const
4202 {
4203  if((m_hMemory == VK_NULL_HANDLE) ||
4204  (m_Size == 0) ||
4205  m_Suballocations.empty())
4206  {
4207  return false;
4208  }
4209 
4210  // Expected offset of new suballocation as calculates from previous ones.
4211  VkDeviceSize calculatedOffset = 0;
4212  // Expected number of free suballocations as calculated from traversing their list.
4213  uint32_t calculatedFreeCount = 0;
4214  // Expected sum size of free suballocations as calculated from traversing their list.
4215  VkDeviceSize calculatedSumFreeSize = 0;
4216  // Expected number of free suballocations that should be registered in
4217  // m_FreeSuballocationsBySize calculated from traversing their list.
4218  size_t freeSuballocationsToRegister = 0;
4219  // True if previous visisted suballocation was free.
4220  bool prevFree = false;
4221 
4222  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4223  suballocItem != m_Suballocations.cend();
4224  ++suballocItem)
4225  {
4226  const VmaSuballocation& subAlloc = *suballocItem;
4227 
4228  // Actual offset of this suballocation doesn't match expected one.
4229  if(subAlloc.offset != calculatedOffset)
4230  {
4231  return false;
4232  }
4233 
4234  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
4235  // Two adjacent free suballocations are invalid. They should be merged.
4236  if(prevFree && currFree)
4237  {
4238  return false;
4239  }
4240  prevFree = currFree;
4241 
4242  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
4243  {
4244  return false;
4245  }
4246 
4247  if(currFree)
4248  {
4249  calculatedSumFreeSize += subAlloc.size;
4250  ++calculatedFreeCount;
4251  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
4252  {
4253  ++freeSuballocationsToRegister;
4254  }
4255  }
4256 
4257  calculatedOffset += subAlloc.size;
4258  }
4259 
4260  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
4261  // match expected one.
4262  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
4263  {
4264  return false;
4265  }
4266 
4267  VkDeviceSize lastSize = 0;
4268  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
4269  {
4270  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
4271 
4272  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
4273  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
4274  {
4275  return false;
4276  }
4277  // They must be sorted by size ascending.
4278  if(suballocItem->size < lastSize)
4279  {
4280  return false;
4281  }
4282 
4283  lastSize = suballocItem->size;
4284  }
4285 
4286  // Check if totals match calculacted values.
4287  return
4288  (calculatedOffset == m_Size) &&
4289  (calculatedSumFreeSize == m_SumFreeSize) &&
4290  (calculatedFreeCount == m_FreeCount);
4291 }
4292 
4293 VkDeviceSize VmaDeviceMemoryBlock::GetUnusedRangeSizeMax() const
4294 {
4295  if(!m_FreeSuballocationsBySize.empty())
4296  {
4297  return m_FreeSuballocationsBySize.back()->size;
4298  }
4299  else
4300  {
4301  return 0;
4302  }
4303 }
4304 
4305 /*
4306 How many suitable free suballocations to analyze before choosing best one.
4307 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
4308  be chosen.
4309 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
4310  suballocations will be analized and best one will be chosen.
4311 - Any other value is also acceptable.
4312 */
4313 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
4314 
4315 bool VmaDeviceMemoryBlock::CreateAllocationRequest(
4316  uint32_t currentFrameIndex,
4317  uint32_t frameInUseCount,
4318  VkDeviceSize bufferImageGranularity,
4319  VkDeviceSize allocSize,
4320  VkDeviceSize allocAlignment,
4321  VmaSuballocationType allocType,
4322  bool canMakeOtherLost,
4323  VmaAllocationRequest* pAllocationRequest)
4324 {
4325  VMA_ASSERT(allocSize > 0);
4326  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
4327  VMA_ASSERT(pAllocationRequest != VMA_NULL);
4328  VMA_HEAVY_ASSERT(Validate());
4329 
4330  // There is not enough total free space in this block to fullfill the request: Early return.
4331  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
4332  {
4333  return false;
4334  }
4335 
4336  // New algorithm, efficiently searching freeSuballocationsBySize.
4337  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
4338  if(freeSuballocCount > 0)
4339  {
4340  if(VMA_BEST_FIT)
4341  {
4342  // Find first free suballocation with size not less than allocSize.
4343  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
4344  m_FreeSuballocationsBySize.data(),
4345  m_FreeSuballocationsBySize.data() + freeSuballocCount,
4346  allocSize,
4347  VmaSuballocationItemSizeLess());
4348  size_t index = it - m_FreeSuballocationsBySize.data();
4349  for(; index < freeSuballocCount; ++index)
4350  {
4351  if(CheckAllocation(
4352  currentFrameIndex,
4353  frameInUseCount,
4354  bufferImageGranularity,
4355  allocSize,
4356  allocAlignment,
4357  allocType,
4358  m_FreeSuballocationsBySize[index],
4359  false, // canMakeOtherLost
4360  &pAllocationRequest->offset,
4361  &pAllocationRequest->itemsToMakeLostCount,
4362  &pAllocationRequest->sumFreeSize,
4363  &pAllocationRequest->sumItemSize))
4364  {
4365  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
4366  return true;
4367  }
4368  }
4369  }
4370  else
4371  {
4372  // Search staring from biggest suballocations.
4373  for(size_t index = freeSuballocCount; index--; )
4374  {
4375  if(CheckAllocation(
4376  currentFrameIndex,
4377  frameInUseCount,
4378  bufferImageGranularity,
4379  allocSize,
4380  allocAlignment,
4381  allocType,
4382  m_FreeSuballocationsBySize[index],
4383  false, // canMakeOtherLost
4384  &pAllocationRequest->offset,
4385  &pAllocationRequest->itemsToMakeLostCount,
4386  &pAllocationRequest->sumFreeSize,
4387  &pAllocationRequest->sumItemSize))
4388  {
4389  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
4390  return true;
4391  }
4392  }
4393  }
4394  }
4395 
4396  if(canMakeOtherLost)
4397  {
4398  // Brute-force algorithm. TODO: Come up with something better.
4399 
4400  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
4401  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
4402 
4403  VmaAllocationRequest tmpAllocRequest = {};
4404  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
4405  suballocIt != m_Suballocations.end();
4406  ++suballocIt)
4407  {
4408  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
4409  suballocIt->hAllocation->CanBecomeLost())
4410  {
4411  if(CheckAllocation(
4412  currentFrameIndex,
4413  frameInUseCount,
4414  bufferImageGranularity,
4415  allocSize,
4416  allocAlignment,
4417  allocType,
4418  suballocIt,
4419  canMakeOtherLost,
4420  &tmpAllocRequest.offset,
4421  &tmpAllocRequest.itemsToMakeLostCount,
4422  &tmpAllocRequest.sumFreeSize,
4423  &tmpAllocRequest.sumItemSize))
4424  {
4425  tmpAllocRequest.item = suballocIt;
4426 
4427  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
4428  {
4429  *pAllocationRequest = tmpAllocRequest;
4430  }
4431  }
4432  }
4433  }
4434 
4435  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
4436  {
4437  return true;
4438  }
4439  }
4440 
4441  return false;
4442 }
4443 
4444 bool VmaDeviceMemoryBlock::MakeRequestedAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount, VmaAllocationRequest* pAllocationRequest)
4445 {
4446  while(pAllocationRequest->itemsToMakeLostCount > 0)
4447  {
4448  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
4449  {
4450  ++pAllocationRequest->item;
4451  }
4452  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
4453  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
4454  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
4455  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
4456  {
4457  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
4458  --pAllocationRequest->itemsToMakeLostCount;
4459  }
4460  else
4461  {
4462  return false;
4463  }
4464  }
4465 
4466  VMA_HEAVY_ASSERT(Validate());
4467  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
4468  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
4469 
4470  return true;
4471 }
4472 
4473 uint32_t VmaDeviceMemoryBlock::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4474 {
4475  uint32_t lostAllocationCount = 0;
4476  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
4477  it != m_Suballocations.end();
4478  ++it)
4479  {
4480  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
4481  it->hAllocation->CanBecomeLost() &&
4482  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
4483  {
4484  it = FreeSuballocation(it);
4485  ++lostAllocationCount;
4486  }
4487  }
4488  return lostAllocationCount;
4489 }
4490 
4491 bool VmaDeviceMemoryBlock::CheckAllocation(
4492  uint32_t currentFrameIndex,
4493  uint32_t frameInUseCount,
4494  VkDeviceSize bufferImageGranularity,
4495  VkDeviceSize allocSize,
4496  VkDeviceSize allocAlignment,
4497  VmaSuballocationType allocType,
4498  VmaSuballocationList::const_iterator suballocItem,
4499  bool canMakeOtherLost,
4500  VkDeviceSize* pOffset,
4501  size_t* itemsToMakeLostCount,
4502  VkDeviceSize* pSumFreeSize,
4503  VkDeviceSize* pSumItemSize) const
4504 {
4505  VMA_ASSERT(allocSize > 0);
4506  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
4507  VMA_ASSERT(suballocItem != m_Suballocations.cend());
4508  VMA_ASSERT(pOffset != VMA_NULL);
4509 
4510  *itemsToMakeLostCount = 0;
4511  *pSumFreeSize = 0;
4512  *pSumItemSize = 0;
4513 
4514  if(canMakeOtherLost)
4515  {
4516  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
4517  {
4518  *pSumFreeSize = suballocItem->size;
4519  }
4520  else
4521  {
4522  if(suballocItem->hAllocation->CanBecomeLost() &&
4523  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
4524  {
4525  ++*itemsToMakeLostCount;
4526  *pSumItemSize = suballocItem->size;
4527  }
4528  else
4529  {
4530  return false;
4531  }
4532  }
4533 
4534  // Remaining size is too small for this request: Early return.
4535  if(m_Size - suballocItem->offset < allocSize)
4536  {
4537  return false;
4538  }
4539 
4540  // Start from offset equal to beginning of this suballocation.
4541  *pOffset = suballocItem->offset;
4542 
4543  // Apply VMA_DEBUG_MARGIN at the beginning.
4544  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
4545  {
4546  *pOffset += VMA_DEBUG_MARGIN;
4547  }
4548 
4549  // Apply alignment.
4550  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
4551  *pOffset = VmaAlignUp(*pOffset, alignment);
4552 
4553  // Check previous suballocations for BufferImageGranularity conflicts.
4554  // Make bigger alignment if necessary.
4555  if(bufferImageGranularity > 1)
4556  {
4557  bool bufferImageGranularityConflict = false;
4558  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
4559  while(prevSuballocItem != m_Suballocations.cbegin())
4560  {
4561  --prevSuballocItem;
4562  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
4563  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
4564  {
4565  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
4566  {
4567  bufferImageGranularityConflict = true;
4568  break;
4569  }
4570  }
4571  else
4572  // Already on previous page.
4573  break;
4574  }
4575  if(bufferImageGranularityConflict)
4576  {
4577  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
4578  }
4579  }
4580 
4581  // Now that we have final *pOffset, check if we are past suballocItem.
4582  // If yes, return false - this function should be called for another suballocItem as starting point.
4583  if(*pOffset >= suballocItem->offset + suballocItem->size)
4584  {
4585  return false;
4586  }
4587 
4588  // Calculate padding at the beginning based on current offset.
4589  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
4590 
4591  // Calculate required margin at the end if this is not last suballocation.
4592  VmaSuballocationList::const_iterator next = suballocItem;
4593  ++next;
4594  const VkDeviceSize requiredEndMargin =
4595  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
4596 
4597  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
4598  // Another early return check.
4599  if(suballocItem->offset + totalSize > m_Size)
4600  {
4601  return false;
4602  }
4603 
4604  // Advance lastSuballocItem until desired size is reached.
4605  // Update itemsToMakeLostCount.
4606  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
4607  if(totalSize > suballocItem->size)
4608  {
4609  VkDeviceSize remainingSize = totalSize - suballocItem->size;
4610  while(remainingSize > 0)
4611  {
4612  ++lastSuballocItem;
4613  if(lastSuballocItem == m_Suballocations.cend())
4614  {
4615  return false;
4616  }
4617  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
4618  {
4619  *pSumFreeSize += lastSuballocItem->size;
4620  }
4621  else
4622  {
4623  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
4624  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
4625  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
4626  {
4627  ++*itemsToMakeLostCount;
4628  *pSumItemSize += lastSuballocItem->size;
4629  }
4630  else
4631  {
4632  return false;
4633  }
4634  }
4635  remainingSize = (lastSuballocItem->size < remainingSize) ?
4636  remainingSize - lastSuballocItem->size : 0;
4637  }
4638  }
4639 
4640  // Check next suballocations for BufferImageGranularity conflicts.
4641  // If conflict exists, we must mark more allocations lost or fail.
4642  if(bufferImageGranularity > 1)
4643  {
4644  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
4645  ++nextSuballocItem;
4646  while(nextSuballocItem != m_Suballocations.cend())
4647  {
4648  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
4649  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
4650  {
4651  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
4652  {
4653  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
4654  if(nextSuballoc.hAllocation->CanBecomeLost() &&
4655  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
4656  {
4657  ++*itemsToMakeLostCount;
4658  }
4659  else
4660  {
4661  return false;
4662  }
4663  }
4664  }
4665  else
4666  {
4667  // Already on next page.
4668  break;
4669  }
4670  ++nextSuballocItem;
4671  }
4672  }
4673  }
4674  else
4675  {
4676  const VmaSuballocation& suballoc = *suballocItem;
4677  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
4678 
4679  *pSumFreeSize = suballoc.size;
4680 
4681  // Size of this suballocation is too small for this request: Early return.
4682  if(suballoc.size < allocSize)
4683  {
4684  return false;
4685  }
4686 
4687  // Start from offset equal to beginning of this suballocation.
4688  *pOffset = suballoc.offset;
4689 
4690  // Apply VMA_DEBUG_MARGIN at the beginning.
4691  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
4692  {
4693  *pOffset += VMA_DEBUG_MARGIN;
4694  }
4695 
4696  // Apply alignment.
4697  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
4698  *pOffset = VmaAlignUp(*pOffset, alignment);
4699 
4700  // Check previous suballocations for BufferImageGranularity conflicts.
4701  // Make bigger alignment if necessary.
4702  if(bufferImageGranularity > 1)
4703  {
4704  bool bufferImageGranularityConflict = false;
4705  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
4706  while(prevSuballocItem != m_Suballocations.cbegin())
4707  {
4708  --prevSuballocItem;
4709  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
4710  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
4711  {
4712  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
4713  {
4714  bufferImageGranularityConflict = true;
4715  break;
4716  }
4717  }
4718  else
4719  // Already on previous page.
4720  break;
4721  }
4722  if(bufferImageGranularityConflict)
4723  {
4724  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
4725  }
4726  }
4727 
4728  // Calculate padding at the beginning based on current offset.
4729  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
4730 
4731  // Calculate required margin at the end if this is not last suballocation.
4732  VmaSuballocationList::const_iterator next = suballocItem;
4733  ++next;
4734  const VkDeviceSize requiredEndMargin =
4735  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
4736 
4737  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
4738  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
4739  {
4740  return false;
4741  }
4742 
4743  // Check next suballocations for BufferImageGranularity conflicts.
4744  // If conflict exists, allocation cannot be made here.
4745  if(bufferImageGranularity > 1)
4746  {
4747  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
4748  ++nextSuballocItem;
4749  while(nextSuballocItem != m_Suballocations.cend())
4750  {
4751  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
4752  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
4753  {
4754  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
4755  {
4756  return false;
4757  }
4758  }
4759  else
4760  {
4761  // Already on next page.
4762  break;
4763  }
4764  ++nextSuballocItem;
4765  }
4766  }
4767  }
4768 
4769  // All tests passed: Success. pOffset is already filled.
4770  return true;
4771 }
4772 
4773 bool VmaDeviceMemoryBlock::IsEmpty() const
4774 {
4775  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
4776 }
4777 
4778 void VmaDeviceMemoryBlock::Alloc(
4779  const VmaAllocationRequest& request,
4780  VmaSuballocationType type,
4781  VkDeviceSize allocSize,
4782  VmaAllocation hAllocation)
4783 {
4784  VMA_ASSERT(request.item != m_Suballocations.end());
4785  VmaSuballocation& suballoc = *request.item;
4786  // Given suballocation is a free block.
4787  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
4788  // Given offset is inside this suballocation.
4789  VMA_ASSERT(request.offset >= suballoc.offset);
4790  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
4791  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
4792  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
4793 
4794  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
4795  // it to become used.
4796  UnregisterFreeSuballocation(request.item);
4797 
4798  suballoc.offset = request.offset;
4799  suballoc.size = allocSize;
4800  suballoc.type = type;
4801  suballoc.hAllocation = hAllocation;
4802 
4803  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
4804  if(paddingEnd)
4805  {
4806  VmaSuballocation paddingSuballoc = {};
4807  paddingSuballoc.offset = request.offset + allocSize;
4808  paddingSuballoc.size = paddingEnd;
4809  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4810  VmaSuballocationList::iterator next = request.item;
4811  ++next;
4812  const VmaSuballocationList::iterator paddingEndItem =
4813  m_Suballocations.insert(next, paddingSuballoc);
4814  RegisterFreeSuballocation(paddingEndItem);
4815  }
4816 
4817  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
4818  if(paddingBegin)
4819  {
4820  VmaSuballocation paddingSuballoc = {};
4821  paddingSuballoc.offset = request.offset - paddingBegin;
4822  paddingSuballoc.size = paddingBegin;
4823  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4824  const VmaSuballocationList::iterator paddingBeginItem =
4825  m_Suballocations.insert(request.item, paddingSuballoc);
4826  RegisterFreeSuballocation(paddingBeginItem);
4827  }
4828 
4829  // Update totals.
4830  m_FreeCount = m_FreeCount - 1;
4831  if(paddingBegin > 0)
4832  {
4833  ++m_FreeCount;
4834  }
4835  if(paddingEnd > 0)
4836  {
4837  ++m_FreeCount;
4838  }
4839  m_SumFreeSize -= allocSize;
4840 }
4841 
4842 VmaSuballocationList::iterator VmaDeviceMemoryBlock::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
4843 {
4844  // Change this suballocation to be marked as free.
4845  VmaSuballocation& suballoc = *suballocItem;
4846  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4847  suballoc.hAllocation = VK_NULL_HANDLE;
4848 
4849  // Update totals.
4850  ++m_FreeCount;
4851  m_SumFreeSize += suballoc.size;
4852 
4853  // Merge with previous and/or next suballocation if it's also free.
4854  bool mergeWithNext = false;
4855  bool mergeWithPrev = false;
4856 
4857  VmaSuballocationList::iterator nextItem = suballocItem;
4858  ++nextItem;
4859  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
4860  {
4861  mergeWithNext = true;
4862  }
4863 
4864  VmaSuballocationList::iterator prevItem = suballocItem;
4865  if(suballocItem != m_Suballocations.begin())
4866  {
4867  --prevItem;
4868  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
4869  {
4870  mergeWithPrev = true;
4871  }
4872  }
4873 
4874  if(mergeWithNext)
4875  {
4876  UnregisterFreeSuballocation(nextItem);
4877  MergeFreeWithNext(suballocItem);
4878  }
4879 
4880  if(mergeWithPrev)
4881  {
4882  UnregisterFreeSuballocation(prevItem);
4883  MergeFreeWithNext(prevItem);
4884  RegisterFreeSuballocation(prevItem);
4885  return prevItem;
4886  }
4887  else
4888  {
4889  RegisterFreeSuballocation(suballocItem);
4890  return suballocItem;
4891  }
4892 }
4893 
4894 void VmaDeviceMemoryBlock::Free(const VmaAllocation allocation)
4895 {
4896  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
4897  suballocItem != m_Suballocations.end();
4898  ++suballocItem)
4899  {
4900  VmaSuballocation& suballoc = *suballocItem;
4901  if(suballoc.hAllocation == allocation)
4902  {
4903  FreeSuballocation(suballocItem);
4904  VMA_HEAVY_ASSERT(Validate());
4905  return;
4906  }
4907  }
4908  VMA_ASSERT(0 && "Not found!");
4909 }
4910 
4911 #if VMA_STATS_STRING_ENABLED
4912 
4913 void VmaDeviceMemoryBlock::PrintDetailedMap(class VmaJsonWriter& json) const
4914 {
4915  json.BeginObject();
4916 
4917  json.WriteString("TotalBytes");
4918  json.WriteNumber(m_Size);
4919 
4920  json.WriteString("UnusedBytes");
4921  json.WriteNumber(m_SumFreeSize);
4922 
4923  json.WriteString("Allocations");
4924  json.WriteNumber(m_Suballocations.size() - m_FreeCount);
4925 
4926  json.WriteString("UnusedRanges");
4927  json.WriteNumber(m_FreeCount);
4928 
4929  json.WriteString("Suballocations");
4930  json.BeginArray();
4931  size_t i = 0;
4932  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4933  suballocItem != m_Suballocations.cend();
4934  ++suballocItem, ++i)
4935  {
4936  json.BeginObject(true);
4937 
4938  json.WriteString("Type");
4939  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
4940 
4941  json.WriteString("Size");
4942  json.WriteNumber(suballocItem->size);
4943 
4944  json.WriteString("Offset");
4945  json.WriteNumber(suballocItem->offset);
4946 
4947  json.EndObject();
4948  }
4949  json.EndArray();
4950 
4951  json.EndObject();
4952 }
4953 
4954 #endif // #if VMA_STATS_STRING_ENABLED
4955 
4956 void VmaDeviceMemoryBlock::MergeFreeWithNext(VmaSuballocationList::iterator item)
4957 {
4958  VMA_ASSERT(item != m_Suballocations.end());
4959  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
4960 
4961  VmaSuballocationList::iterator nextItem = item;
4962  ++nextItem;
4963  VMA_ASSERT(nextItem != m_Suballocations.end());
4964  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
4965 
4966  item->size += nextItem->size;
4967  --m_FreeCount;
4968  m_Suballocations.erase(nextItem);
4969 }
4970 
4971 void VmaDeviceMemoryBlock::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
4972 {
4973  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
4974  VMA_ASSERT(item->size > 0);
4975 
4976  // You may want to enable this validation at the beginning or at the end of
4977  // this function, depending on what do you want to check.
4978  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
4979 
4980  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
4981  {
4982  if(m_FreeSuballocationsBySize.empty())
4983  {
4984  m_FreeSuballocationsBySize.push_back(item);
4985  }
4986  else
4987  {
4988  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
4989  }
4990  }
4991 
4992  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
4993 }
4994 
4995 
4996 void VmaDeviceMemoryBlock::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
4997 {
4998  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
4999  VMA_ASSERT(item->size > 0);
5000 
5001  // You may want to enable this validation at the beginning or at the end of
5002  // this function, depending on what do you want to check.
5003  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5004 
5005  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5006  {
5007  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5008  m_FreeSuballocationsBySize.data(),
5009  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
5010  item,
5011  VmaSuballocationItemSizeLess());
5012  for(size_t index = it - m_FreeSuballocationsBySize.data();
5013  index < m_FreeSuballocationsBySize.size();
5014  ++index)
5015  {
5016  if(m_FreeSuballocationsBySize[index] == item)
5017  {
5018  VmaVectorRemove(m_FreeSuballocationsBySize, index);
5019  return;
5020  }
5021  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
5022  }
5023  VMA_ASSERT(0 && "Not found.");
5024  }
5025 
5026  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5027 }
5028 
5029 bool VmaDeviceMemoryBlock::ValidateFreeSuballocationList() const
5030 {
5031  VkDeviceSize lastSize = 0;
5032  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
5033  {
5034  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
5035 
5036  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
5037  {
5038  VMA_ASSERT(0);
5039  return false;
5040  }
5041  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5042  {
5043  VMA_ASSERT(0);
5044  return false;
5045  }
5046  if(it->size < lastSize)
5047  {
5048  VMA_ASSERT(0);
5049  return false;
5050  }
5051 
5052  lastSize = it->size;
5053  }
5054  return true;
5055 }
5056 
5057 static void InitStatInfo(VmaStatInfo& outInfo)
5058 {
5059  memset(&outInfo, 0, sizeof(outInfo));
5060  outInfo.AllocationSizeMin = UINT64_MAX;
5061  outInfo.UnusedRangeSizeMin = UINT64_MAX;
5062 }
5063 
5064 static void CalcAllocationStatInfo(VmaStatInfo& outInfo, const VmaDeviceMemoryBlock& block)
5065 {
5066  outInfo.BlockCount = 1;
5067 
5068  const uint32_t rangeCount = (uint32_t)block.m_Suballocations.size();
5069  outInfo.AllocationCount = rangeCount - block.m_FreeCount;
5070  outInfo.UnusedRangeCount = block.m_FreeCount;
5071 
5072  outInfo.UnusedBytes = block.m_SumFreeSize;
5073  outInfo.UsedBytes = block.m_Size - outInfo.UnusedBytes;
5074 
5075  outInfo.AllocationSizeMin = UINT64_MAX;
5076  outInfo.AllocationSizeMax = 0;
5077  outInfo.UnusedRangeSizeMin = UINT64_MAX;
5078  outInfo.UnusedRangeSizeMax = 0;
5079 
5080  for(VmaSuballocationList::const_iterator suballocItem = block.m_Suballocations.cbegin();
5081  suballocItem != block.m_Suballocations.cend();
5082  ++suballocItem)
5083  {
5084  const VmaSuballocation& suballoc = *suballocItem;
5085  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
5086  {
5087  outInfo.AllocationSizeMin = VMA_MIN(outInfo.AllocationSizeMin, suballoc.size);
5088  outInfo.AllocationSizeMax = VMA_MAX(outInfo.AllocationSizeMax, suballoc.size);
5089  }
5090  else
5091  {
5092  outInfo.UnusedRangeSizeMin = VMA_MIN(outInfo.UnusedRangeSizeMin, suballoc.size);
5093  outInfo.UnusedRangeSizeMax = VMA_MAX(outInfo.UnusedRangeSizeMax, suballoc.size);
5094  }
5095  }
5096 }
5097 
5098 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
5099 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
5100 {
5101  inoutInfo.BlockCount += srcInfo.BlockCount;
5102  inoutInfo.AllocationCount += srcInfo.AllocationCount;
5103  inoutInfo.UnusedRangeCount += srcInfo.UnusedRangeCount;
5104  inoutInfo.UsedBytes += srcInfo.UsedBytes;
5105  inoutInfo.UnusedBytes += srcInfo.UnusedBytes;
5106  inoutInfo.AllocationSizeMin = VMA_MIN(inoutInfo.AllocationSizeMin, srcInfo.AllocationSizeMin);
5107  inoutInfo.AllocationSizeMax = VMA_MAX(inoutInfo.AllocationSizeMax, srcInfo.AllocationSizeMax);
5108  inoutInfo.UnusedRangeSizeMin = VMA_MIN(inoutInfo.UnusedRangeSizeMin, srcInfo.UnusedRangeSizeMin);
5109  inoutInfo.UnusedRangeSizeMax = VMA_MAX(inoutInfo.UnusedRangeSizeMax, srcInfo.UnusedRangeSizeMax);
5110 }
5111 
5112 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
5113 {
5114  inoutInfo.AllocationSizeAvg = (inoutInfo.AllocationCount > 0) ?
5115  VmaRoundDiv<VkDeviceSize>(inoutInfo.UsedBytes, inoutInfo.AllocationCount) : 0;
5116  inoutInfo.UnusedRangeSizeAvg = (inoutInfo.UnusedRangeCount > 0) ?
5117  VmaRoundDiv<VkDeviceSize>(inoutInfo.UnusedBytes, inoutInfo.UnusedRangeCount) : 0;
5118 }
5119 
5120 VmaPool_T::VmaPool_T(
5121  VmaAllocator hAllocator,
5122  const VmaPoolCreateInfo& createInfo) :
5123  m_BlockVector(
5124  hAllocator,
5125  createInfo.memoryTypeIndex,
5126  (createInfo.flags & VMA_POOL_CREATE_PERSISTENT_MAP_BIT) != 0 ?
5127  VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED,
5128  createInfo.blockSize,
5129  createInfo.minBlockCount,
5130  createInfo.maxBlockCount,
5131  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
5132  createInfo.frameInUseCount,
5133  true) // isCustomPool
5134 {
5135 }
5136 
5137 VmaPool_T::~VmaPool_T()
5138 {
5139 }
5140 
5141 #if VMA_STATS_STRING_ENABLED
5142 
5143 #endif // #if VMA_STATS_STRING_ENABLED
5144 
5145 VmaBlockVector::VmaBlockVector(
5146  VmaAllocator hAllocator,
5147  uint32_t memoryTypeIndex,
5148  VMA_BLOCK_VECTOR_TYPE blockVectorType,
5149  VkDeviceSize preferredBlockSize,
5150  size_t minBlockCount,
5151  size_t maxBlockCount,
5152  VkDeviceSize bufferImageGranularity,
5153  uint32_t frameInUseCount,
5154  bool isCustomPool) :
5155  m_hAllocator(hAllocator),
5156  m_MemoryTypeIndex(memoryTypeIndex),
5157  m_BlockVectorType(blockVectorType),
5158  m_PreferredBlockSize(preferredBlockSize),
5159  m_MinBlockCount(minBlockCount),
5160  m_MaxBlockCount(maxBlockCount),
5161  m_BufferImageGranularity(bufferImageGranularity),
5162  m_FrameInUseCount(frameInUseCount),
5163  m_IsCustomPool(isCustomPool),
5164  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
5165  m_HasEmptyBlock(false),
5166  m_pDefragmentator(VMA_NULL)
5167 {
5168 }
5169 
5170 VmaBlockVector::~VmaBlockVector()
5171 {
5172  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
5173 
5174  for(size_t i = m_Blocks.size(); i--; )
5175  {
5176  m_Blocks[i]->Destroy(m_hAllocator);
5177  vma_delete(m_hAllocator, m_Blocks[i]);
5178  }
5179 }
5180 
5181 VkResult VmaBlockVector::CreateMinBlocks()
5182 {
5183  for(size_t i = 0; i < m_MinBlockCount; ++i)
5184  {
5185  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
5186  if(res != VK_SUCCESS)
5187  {
5188  return res;
5189  }
5190  }
5191  return VK_SUCCESS;
5192 }
5193 
5194 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
5195 {
5196  pStats->size = 0;
5197  pStats->unusedSize = 0;
5198  pStats->allocationCount = 0;
5199  pStats->unusedRangeCount = 0;
5200  pStats->unusedRangeSizeMax = 0;
5201 
5202  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5203 
5204  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5205  {
5206  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
5207  VMA_ASSERT(pBlock);
5208  VMA_HEAVY_ASSERT(pBlock->Validate());
5209 
5210  const uint32_t rangeCount = (uint32_t)pBlock->m_Suballocations.size();
5211 
5212  pStats->size += pBlock->m_Size;
5213  pStats->unusedSize += pBlock->m_SumFreeSize;
5214  pStats->allocationCount += rangeCount - pBlock->m_FreeCount;
5215  pStats->unusedRangeCount += pBlock->m_FreeCount;
5216  pStats->unusedRangeSizeMax = VMA_MAX(pStats->unusedRangeSizeMax, pBlock->GetUnusedRangeSizeMax());
5217  }
5218 }
5219 
5220 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
5221 
5222 VkResult VmaBlockVector::Allocate(
5223  VmaPool hCurrentPool,
5224  uint32_t currentFrameIndex,
5225  const VkMemoryRequirements& vkMemReq,
5226  const VmaAllocationCreateInfo& createInfo,
5227  VmaSuballocationType suballocType,
5228  VmaAllocation* pAllocation)
5229 {
5230  // Validate flags.
5231  if(((createInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0) !=
5232  (m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED))
5233  {
5234  VMA_ASSERT(0 && "Usage of VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT must match VMA_POOL_CREATE_PERSISTENT_MAP_BIT.");
5235  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5236  }
5237 
5238  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5239 
5240  // 1. Search existing allocations. Try to allocate without making other allocations lost.
5241  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
5242  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
5243  {
5244  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
5245  VMA_ASSERT(pCurrBlock);
5246  VmaAllocationRequest currRequest = {};
5247  if(pCurrBlock->CreateAllocationRequest(
5248  currentFrameIndex,
5249  m_FrameInUseCount,
5250  m_BufferImageGranularity,
5251  vkMemReq.size,
5252  vkMemReq.alignment,
5253  suballocType,
5254  false, // canMakeOtherLost
5255  &currRequest))
5256  {
5257  // Allocate from pCurrBlock.
5258  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
5259 
5260  // We no longer have an empty Allocation.
5261  if(pCurrBlock->IsEmpty())
5262  {
5263  m_HasEmptyBlock = false;
5264  }
5265 
5266  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
5267  pCurrBlock->Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
5268  (*pAllocation)->InitBlockAllocation(
5269  hCurrentPool,
5270  pCurrBlock,
5271  currRequest.offset,
5272  vkMemReq.alignment,
5273  vkMemReq.size,
5274  suballocType,
5275  createInfo.pUserData,
5276  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5277  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
5278  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
5279  return VK_SUCCESS;
5280  }
5281  }
5282 
5283  const bool canCreateNewBlock =
5284  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
5285  (m_Blocks.size() < m_MaxBlockCount);
5286 
5287  // 2. Try to create new block.
5288  if(canCreateNewBlock)
5289  {
5290  // 2.1. Start with full preferredBlockSize.
5291  VkDeviceSize blockSize = m_PreferredBlockSize;
5292  size_t newBlockIndex = 0;
5293  VkResult res = CreateBlock(blockSize, &newBlockIndex);
5294  // Allocating blocks of other sizes is allowed only in default pools.
5295  // In custom pools block size is fixed.
5296  if(res < 0 && m_IsCustomPool == false)
5297  {
5298  // 2.2. Try half the size.
5299  blockSize /= 2;
5300  if(blockSize >= vkMemReq.size)
5301  {
5302  res = CreateBlock(blockSize, &newBlockIndex);
5303  if(res < 0)
5304  {
5305  // 2.3. Try quarter the size.
5306  blockSize /= 2;
5307  if(blockSize >= vkMemReq.size)
5308  {
5309  res = CreateBlock(blockSize, &newBlockIndex);
5310  }
5311  }
5312  }
5313  }
5314  if(res == VK_SUCCESS)
5315  {
5316  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
5317  VMA_ASSERT(pBlock->m_Size >= vkMemReq.size);
5318 
5319  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
5320  VmaAllocationRequest allocRequest = {};
5321  allocRequest.item = pBlock->m_Suballocations.begin();
5322  allocRequest.offset = 0;
5323  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
5324  pBlock->Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
5325  (*pAllocation)->InitBlockAllocation(
5326  hCurrentPool,
5327  pBlock,
5328  allocRequest.offset,
5329  vkMemReq.alignment,
5330  vkMemReq.size,
5331  suballocType,
5332  createInfo.pUserData,
5333  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5334  VMA_HEAVY_ASSERT(pBlock->Validate());
5335  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
5336 
5337  return VK_SUCCESS;
5338  }
5339  }
5340 
5341  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
5342 
5343  // 3. Try to allocate from existing blocks with making other allocations lost.
5344  if(canMakeOtherLost)
5345  {
5346  uint32_t tryIndex = 0;
5347  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
5348  {
5349  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
5350  VmaAllocationRequest bestRequest = {};
5351  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
5352 
5353  // 1. Search existing allocations.
5354  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
5355  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
5356  {
5357  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
5358  VMA_ASSERT(pCurrBlock);
5359  VmaAllocationRequest currRequest = {};
5360  if(pCurrBlock->CreateAllocationRequest(
5361  currentFrameIndex,
5362  m_FrameInUseCount,
5363  m_BufferImageGranularity,
5364  vkMemReq.size,
5365  vkMemReq.alignment,
5366  suballocType,
5367  canMakeOtherLost,
5368  &currRequest))
5369  {
5370  const VkDeviceSize currRequestCost = currRequest.CalcCost();
5371  if(pBestRequestBlock == VMA_NULL ||
5372  currRequestCost < bestRequestCost)
5373  {
5374  pBestRequestBlock = pCurrBlock;
5375  bestRequest = currRequest;
5376  bestRequestCost = currRequestCost;
5377 
5378  if(bestRequestCost == 0)
5379  {
5380  break;
5381  }
5382  }
5383  }
5384  }
5385 
5386  if(pBestRequestBlock != VMA_NULL)
5387  {
5388  if(pBestRequestBlock->MakeRequestedAllocationsLost(
5389  currentFrameIndex,
5390  m_FrameInUseCount,
5391  &bestRequest))
5392  {
5393  // We no longer have an empty Allocation.
5394  if(pBestRequestBlock->IsEmpty())
5395  {
5396  m_HasEmptyBlock = false;
5397  }
5398  // Allocate from this pBlock.
5399  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
5400  pBestRequestBlock->Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
5401  (*pAllocation)->InitBlockAllocation(
5402  hCurrentPool,
5403  pBestRequestBlock,
5404  bestRequest.offset,
5405  vkMemReq.alignment,
5406  vkMemReq.size,
5407  suballocType,
5408  createInfo.pUserData,
5409  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5410  VMA_HEAVY_ASSERT(pBlock->Validate());
5411  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
5412  return VK_SUCCESS;
5413  }
5414  // else: Some allocations must have been touched while we are here. Next try.
5415  }
5416  else
5417  {
5418  // Could not find place in any of the blocks - break outer loop.
5419  break;
5420  }
5421  }
5422  /* Maximum number of tries exceeded - a very unlike event when many other
5423  threads are simultaneously touching allocations making it impossible to make
5424  lost at the same time as we try to allocate. */
5425  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
5426  {
5427  return VK_ERROR_TOO_MANY_OBJECTS;
5428  }
5429  }
5430 
5431  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5432 }
5433 
5434 void VmaBlockVector::Free(
5435  VmaAllocation hAllocation)
5436 {
5437  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
5438 
5439  // Scope for lock.
5440  {
5441  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5442 
5443  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
5444 
5445  pBlock->Free(hAllocation);
5446  VMA_HEAVY_ASSERT(pBlock->Validate());
5447 
5448  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
5449 
5450  // pBlock became empty after this deallocation.
5451  if(pBlock->IsEmpty())
5452  {
5453  // Already has empty Allocation. We don't want to have two, so delete this one.
5454  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
5455  {
5456  pBlockToDelete = pBlock;
5457  Remove(pBlock);
5458  }
5459  // We now have first empty Allocation.
5460  else
5461  {
5462  m_HasEmptyBlock = true;
5463  }
5464  }
5465  // Must be called after srcBlockIndex is used, because later it may become invalid!
5466  IncrementallySortBlocks();
5467  }
5468 
5469  // Destruction of a free Allocation. Deferred until this point, outside of mutex
5470  // lock, for performance reason.
5471  if(pBlockToDelete != VMA_NULL)
5472  {
5473  VMA_DEBUG_LOG(" Deleted empty allocation");
5474  pBlockToDelete->Destroy(m_hAllocator);
5475  vma_delete(m_hAllocator, pBlockToDelete);
5476  }
5477 }
5478 
5479 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
5480 {
5481  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5482  {
5483  if(m_Blocks[blockIndex] == pBlock)
5484  {
5485  VmaVectorRemove(m_Blocks, blockIndex);
5486  return;
5487  }
5488  }
5489  VMA_ASSERT(0);
5490 }
5491 
5492 void VmaBlockVector::IncrementallySortBlocks()
5493 {
5494  // Bubble sort only until first swap.
5495  for(size_t i = 1; i < m_Blocks.size(); ++i)
5496  {
5497  if(m_Blocks[i - 1]->m_SumFreeSize > m_Blocks[i]->m_SumFreeSize)
5498  {
5499  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
5500  return;
5501  }
5502  }
5503 }
5504 
5505 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
5506 {
5507  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
5508  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
5509  allocInfo.allocationSize = blockSize;
5510  VkDeviceMemory mem = VK_NULL_HANDLE;
5511  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
5512  if(res < 0)
5513  {
5514  return res;
5515  }
5516 
5517  // New VkDeviceMemory successfully created.
5518 
5519  // Map memory if needed.
5520  void* pMappedData = VMA_NULL;
5521  const bool persistentMap = (m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED);
5522  if(persistentMap && m_hAllocator->m_UnmapPersistentlyMappedMemoryCounter == 0)
5523  {
5524  res = (*m_hAllocator->GetVulkanFunctions().vkMapMemory)(
5525  m_hAllocator->m_hDevice,
5526  mem,
5527  0,
5528  VK_WHOLE_SIZE,
5529  0,
5530  &pMappedData);
5531  if(res < 0)
5532  {
5533  VMA_DEBUG_LOG(" vkMapMemory FAILED");
5534  m_hAllocator->FreeVulkanMemory(m_MemoryTypeIndex, blockSize, mem);
5535  return res;
5536  }
5537  }
5538 
5539  // Create new Allocation for it.
5540  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
5541  pBlock->Init(
5542  m_MemoryTypeIndex,
5543  (VMA_BLOCK_VECTOR_TYPE)m_BlockVectorType,
5544  mem,
5545  allocInfo.allocationSize,
5546  persistentMap,
5547  pMappedData);
5548 
5549  m_Blocks.push_back(pBlock);
5550  if(pNewBlockIndex != VMA_NULL)
5551  {
5552  *pNewBlockIndex = m_Blocks.size() - 1;
5553  }
5554 
5555  return VK_SUCCESS;
5556 }
5557 
5558 #if VMA_STATS_STRING_ENABLED
5559 
5560 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
5561 {
5562  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5563 
5564  json.BeginObject();
5565 
5566  if(m_IsCustomPool)
5567  {
5568  json.WriteString("MemoryTypeIndex");
5569  json.WriteNumber(m_MemoryTypeIndex);
5570 
5571  if(m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
5572  {
5573  json.WriteString("Mapped");
5574  json.WriteBool(true);
5575  }
5576 
5577  json.WriteString("BlockSize");
5578  json.WriteNumber(m_PreferredBlockSize);
5579 
5580  json.WriteString("BlockCount");
5581  json.BeginObject(true);
5582  if(m_MinBlockCount > 0)
5583  {
5584  json.WriteString("Min");
5585  json.WriteNumber(m_MinBlockCount);
5586  }
5587  if(m_MaxBlockCount < SIZE_MAX)
5588  {
5589  json.WriteString("Max");
5590  json.WriteNumber(m_MaxBlockCount);
5591  }
5592  json.WriteString("Cur");
5593  json.WriteNumber(m_Blocks.size());
5594  json.EndObject();
5595 
5596  if(m_FrameInUseCount > 0)
5597  {
5598  json.WriteString("FrameInUseCount");
5599  json.WriteNumber(m_FrameInUseCount);
5600  }
5601  }
5602  else
5603  {
5604  json.WriteString("PreferredBlockSize");
5605  json.WriteNumber(m_PreferredBlockSize);
5606  }
5607 
5608  json.WriteString("Blocks");
5609  json.BeginArray();
5610  for(size_t i = 0; i < m_Blocks.size(); ++i)
5611  {
5612  m_Blocks[i]->PrintDetailedMap(json);
5613  }
5614  json.EndArray();
5615 
5616  json.EndObject();
5617 }
5618 
5619 #endif // #if VMA_STATS_STRING_ENABLED
5620 
5621 void VmaBlockVector::UnmapPersistentlyMappedMemory()
5622 {
5623  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5624 
5625  for(size_t i = m_Blocks.size(); i--; )
5626  {
5627  VmaDeviceMemoryBlock* pBlock = m_Blocks[i];
5628  if(pBlock->m_pMappedData != VMA_NULL)
5629  {
5630  VMA_ASSERT(pBlock->m_PersistentMap != false);
5631  (m_hAllocator->GetVulkanFunctions().vkUnmapMemory)(m_hAllocator->m_hDevice, pBlock->m_hMemory);
5632  pBlock->m_pMappedData = VMA_NULL;
5633  }
5634  }
5635 }
5636 
5637 VkResult VmaBlockVector::MapPersistentlyMappedMemory()
5638 {
5639  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5640 
5641  VkResult finalResult = VK_SUCCESS;
5642  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
5643  {
5644  VmaDeviceMemoryBlock* pBlock = m_Blocks[i];
5645  if(pBlock->m_PersistentMap)
5646  {
5647  VMA_ASSERT(pBlock->m_pMappedData == nullptr);
5648  VkResult localResult = (*m_hAllocator->GetVulkanFunctions().vkMapMemory)(
5649  m_hAllocator->m_hDevice,
5650  pBlock->m_hMemory,
5651  0,
5652  VK_WHOLE_SIZE,
5653  0,
5654  &pBlock->m_pMappedData);
5655  if(localResult != VK_SUCCESS)
5656  {
5657  finalResult = localResult;
5658  }
5659  }
5660  }
5661  return finalResult;
5662 }
5663 
5664 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
5665  VmaAllocator hAllocator,
5666  uint32_t currentFrameIndex)
5667 {
5668  if(m_pDefragmentator == VMA_NULL)
5669  {
5670  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
5671  hAllocator,
5672  this,
5673  currentFrameIndex);
5674  }
5675 
5676  return m_pDefragmentator;
5677 }
5678 
5679 VkResult VmaBlockVector::Defragment(
5680  VmaDefragmentationStats* pDefragmentationStats,
5681  VkDeviceSize& maxBytesToMove,
5682  uint32_t& maxAllocationsToMove)
5683 {
5684  if(m_pDefragmentator == VMA_NULL)
5685  {
5686  return VK_SUCCESS;
5687  }
5688 
5689  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5690 
5691  // Defragment.
5692  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
5693 
5694  // Accumulate statistics.
5695  if(pDefragmentationStats != VMA_NULL)
5696  {
5697  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
5698  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
5699  pDefragmentationStats->bytesMoved += bytesMoved;
5700  pDefragmentationStats->allocationsMoved += allocationsMoved;
5701  VMA_ASSERT(bytesMoved <= maxBytesToMove);
5702  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
5703  maxBytesToMove -= bytesMoved;
5704  maxAllocationsToMove -= allocationsMoved;
5705  }
5706 
5707  // Free empty blocks.
5708  m_HasEmptyBlock = false;
5709  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
5710  {
5711  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
5712  if(pBlock->IsEmpty())
5713  {
5714  if(m_Blocks.size() > m_MinBlockCount)
5715  {
5716  if(pDefragmentationStats != VMA_NULL)
5717  {
5718  ++pDefragmentationStats->deviceMemoryBlocksFreed;
5719  pDefragmentationStats->bytesFreed += pBlock->m_Size;
5720  }
5721 
5722  VmaVectorRemove(m_Blocks, blockIndex);
5723  pBlock->Destroy(m_hAllocator);
5724  vma_delete(m_hAllocator, pBlock);
5725  }
5726  else
5727  {
5728  m_HasEmptyBlock = true;
5729  }
5730  }
5731  }
5732 
5733  return result;
5734 }
5735 
5736 void VmaBlockVector::DestroyDefragmentator()
5737 {
5738  if(m_pDefragmentator != VMA_NULL)
5739  {
5740  vma_delete(m_hAllocator, m_pDefragmentator);
5741  m_pDefragmentator = VMA_NULL;
5742  }
5743 }
5744 
5745 void VmaBlockVector::MakePoolAllocationsLost(
5746  uint32_t currentFrameIndex,
5747  size_t* pLostAllocationCount)
5748 {
5749  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5750 
5751  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5752  {
5753  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
5754  VMA_ASSERT(pBlock);
5755  pBlock->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
5756  }
5757 }
5758 
5759 void VmaBlockVector::AddStats(VmaStats* pStats)
5760 {
5761  const uint32_t memTypeIndex = m_MemoryTypeIndex;
5762  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
5763 
5764  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5765 
5766  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5767  {
5768  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
5769  VMA_ASSERT(pBlock);
5770  VMA_HEAVY_ASSERT(pBlock->Validate());
5771  VmaStatInfo allocationStatInfo;
5772  CalcAllocationStatInfo(allocationStatInfo, *pBlock);
5773  VmaAddStatInfo(pStats->total, allocationStatInfo);
5774  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
5775  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
5776  }
5777 }
5778 
5780 // VmaDefragmentator members definition
5781 
5782 VmaDefragmentator::VmaDefragmentator(
5783  VmaAllocator hAllocator,
5784  VmaBlockVector* pBlockVector,
5785  uint32_t currentFrameIndex) :
5786  m_hAllocator(hAllocator),
5787  m_pBlockVector(pBlockVector),
5788  m_CurrentFrameIndex(currentFrameIndex),
5789  m_BytesMoved(0),
5790  m_AllocationsMoved(0),
5791  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
5792  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
5793 {
5794 }
5795 
5796 VmaDefragmentator::~VmaDefragmentator()
5797 {
5798  for(size_t i = m_Blocks.size(); i--; )
5799  {
5800  vma_delete(m_hAllocator, m_Blocks[i]);
5801  }
5802 }
5803 
5804 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
5805 {
5806  AllocationInfo allocInfo;
5807  allocInfo.m_hAllocation = hAlloc;
5808  allocInfo.m_pChanged = pChanged;
5809  m_Allocations.push_back(allocInfo);
5810 }
5811 
5812 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
5813 {
5814  // It has already been mapped for defragmentation.
5815  if(m_pMappedDataForDefragmentation)
5816  {
5817  *ppMappedData = m_pMappedDataForDefragmentation;
5818  return VK_SUCCESS;
5819  }
5820 
5821  // It is persistently mapped.
5822  if(m_pBlock->m_PersistentMap)
5823  {
5824  VMA_ASSERT(m_pBlock->m_pMappedData != VMA_NULL);
5825  *ppMappedData = m_pBlock->m_pMappedData;
5826  return VK_SUCCESS;
5827  }
5828 
5829  // Map on first usage.
5830  VkResult res = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
5831  hAllocator->m_hDevice,
5832  m_pBlock->m_hMemory,
5833  0,
5834  VK_WHOLE_SIZE,
5835  0,
5836  &m_pMappedDataForDefragmentation);
5837  *ppMappedData = m_pMappedDataForDefragmentation;
5838  return res;
5839 }
5840 
5841 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
5842 {
5843  if(m_pMappedDataForDefragmentation != VMA_NULL)
5844  {
5845  (hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_pBlock->m_hMemory);
5846  }
5847 }
5848 
5849 VkResult VmaDefragmentator::DefragmentRound(
5850  VkDeviceSize maxBytesToMove,
5851  uint32_t maxAllocationsToMove)
5852 {
5853  if(m_Blocks.empty())
5854  {
5855  return VK_SUCCESS;
5856  }
5857 
5858  size_t srcBlockIndex = m_Blocks.size() - 1;
5859  size_t srcAllocIndex = SIZE_MAX;
5860  for(;;)
5861  {
5862  // 1. Find next allocation to move.
5863  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
5864  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
5865  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
5866  {
5867  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
5868  {
5869  // Finished: no more allocations to process.
5870  if(srcBlockIndex == 0)
5871  {
5872  return VK_SUCCESS;
5873  }
5874  else
5875  {
5876  --srcBlockIndex;
5877  srcAllocIndex = SIZE_MAX;
5878  }
5879  }
5880  else
5881  {
5882  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
5883  }
5884  }
5885 
5886  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
5887  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
5888 
5889  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
5890  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
5891  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
5892  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
5893 
5894  // 2. Try to find new place for this allocation in preceding or current block.
5895  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
5896  {
5897  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
5898  VmaAllocationRequest dstAllocRequest;
5899  if(pDstBlockInfo->m_pBlock->CreateAllocationRequest(
5900  m_CurrentFrameIndex,
5901  m_pBlockVector->GetFrameInUseCount(),
5902  m_pBlockVector->GetBufferImageGranularity(),
5903  size,
5904  alignment,
5905  suballocType,
5906  false, // canMakeOtherLost
5907  &dstAllocRequest) &&
5908  MoveMakesSense(
5909  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
5910  {
5911  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
5912 
5913  // Reached limit on number of allocations or bytes to move.
5914  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
5915  (m_BytesMoved + size > maxBytesToMove))
5916  {
5917  return VK_INCOMPLETE;
5918  }
5919 
5920  void* pDstMappedData = VMA_NULL;
5921  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
5922  if(res != VK_SUCCESS)
5923  {
5924  return res;
5925  }
5926 
5927  void* pSrcMappedData = VMA_NULL;
5928  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
5929  if(res != VK_SUCCESS)
5930  {
5931  return res;
5932  }
5933 
5934  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
5935  memcpy(
5936  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
5937  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
5938  static_cast<size_t>(size));
5939 
5940  pDstBlockInfo->m_pBlock->Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
5941  pSrcBlockInfo->m_pBlock->Free(allocInfo.m_hAllocation);
5942 
5943  allocInfo.m_hAllocation->ChangeBlockAllocation(pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
5944 
5945  if(allocInfo.m_pChanged != VMA_NULL)
5946  {
5947  *allocInfo.m_pChanged = VK_TRUE;
5948  }
5949 
5950  ++m_AllocationsMoved;
5951  m_BytesMoved += size;
5952 
5953  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
5954 
5955  break;
5956  }
5957  }
5958 
5959  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
5960 
5961  if(srcAllocIndex > 0)
5962  {
5963  --srcAllocIndex;
5964  }
5965  else
5966  {
5967  if(srcBlockIndex > 0)
5968  {
5969  --srcBlockIndex;
5970  srcAllocIndex = SIZE_MAX;
5971  }
5972  else
5973  {
5974  return VK_SUCCESS;
5975  }
5976  }
5977  }
5978 }
5979 
5980 VkResult VmaDefragmentator::Defragment(
5981  VkDeviceSize maxBytesToMove,
5982  uint32_t maxAllocationsToMove)
5983 {
5984  if(m_Allocations.empty())
5985  {
5986  return VK_SUCCESS;
5987  }
5988 
5989  // Create block info for each block.
5990  const size_t blockCount = m_pBlockVector->m_Blocks.size();
5991  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
5992  {
5993  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
5994  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
5995  m_Blocks.push_back(pBlockInfo);
5996  }
5997 
5998  // Sort them by m_pBlock pointer value.
5999  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
6000 
6001  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
6002  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
6003  {
6004  AllocationInfo& allocInfo = m_Allocations[blockIndex];
6005  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
6006  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
6007  {
6008  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
6009  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
6010  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
6011  {
6012  (*it)->m_Allocations.push_back(allocInfo);
6013  }
6014  else
6015  {
6016  VMA_ASSERT(0);
6017  }
6018  }
6019  }
6020  m_Allocations.clear();
6021 
6022  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6023  {
6024  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
6025  pBlockInfo->CalcHasNonMovableAllocations();
6026  pBlockInfo->SortAllocationsBySizeDescecnding();
6027  }
6028 
6029  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
6030  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
6031 
6032  // Execute defragmentation rounds (the main part).
6033  VkResult result = VK_SUCCESS;
6034  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
6035  {
6036  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
6037  }
6038 
6039  // Unmap blocks that were mapped for defragmentation.
6040  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6041  {
6042  m_Blocks[blockIndex]->Unmap(m_hAllocator);
6043  }
6044 
6045  return result;
6046 }
6047 
6048 bool VmaDefragmentator::MoveMakesSense(
6049  size_t dstBlockIndex, VkDeviceSize dstOffset,
6050  size_t srcBlockIndex, VkDeviceSize srcOffset)
6051 {
6052  if(dstBlockIndex < srcBlockIndex)
6053  {
6054  return true;
6055  }
6056  if(dstBlockIndex > srcBlockIndex)
6057  {
6058  return false;
6059  }
6060  if(dstOffset < srcOffset)
6061  {
6062  return true;
6063  }
6064  return false;
6065 }
6066 
6068 // VmaAllocator_T
6069 
6070 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
6071  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
6072  m_PhysicalDevice(pCreateInfo->physicalDevice),
6073  m_hDevice(pCreateInfo->device),
6074  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
6075  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
6076  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
6077  m_UnmapPersistentlyMappedMemoryCounter(0),
6078  m_PreferredLargeHeapBlockSize(0),
6079  m_PreferredSmallHeapBlockSize(0),
6080  m_CurrentFrameIndex(0),
6081  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
6082 {
6083  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
6084 
6085  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
6086  memset(&m_MemProps, 0, sizeof(m_MemProps));
6087  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
6088 
6089  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
6090  memset(&m_pOwnAllocations, 0, sizeof(m_pOwnAllocations));
6091 
6092  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
6093  {
6094  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
6095  }
6096 
6097  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
6098  {
6099  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
6100  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
6101  }
6102 
6103  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
6104 
6105  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
6106  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
6107 
6108  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
6109  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
6110  m_PreferredSmallHeapBlockSize = (pCreateInfo->preferredSmallHeapBlockSize != 0) ?
6111  pCreateInfo->preferredSmallHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE);
6112 
6113  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
6114  {
6115  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
6116  {
6117  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
6118  if(limit != VK_WHOLE_SIZE)
6119  {
6120  m_HeapSizeLimit[heapIndex] = limit;
6121  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
6122  {
6123  m_MemProps.memoryHeaps[heapIndex].size = limit;
6124  }
6125  }
6126  }
6127  }
6128 
6129  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6130  {
6131  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
6132 
6133  for(size_t blockVectorTypeIndex = 0; blockVectorTypeIndex < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorTypeIndex)
6134  {
6135  m_pBlockVectors[memTypeIndex][blockVectorTypeIndex] = vma_new(this, VmaBlockVector)(
6136  this,
6137  memTypeIndex,
6138  static_cast<VMA_BLOCK_VECTOR_TYPE>(blockVectorTypeIndex),
6139  preferredBlockSize,
6140  0,
6141  SIZE_MAX,
6142  GetBufferImageGranularity(),
6143  pCreateInfo->frameInUseCount,
6144  false); // isCustomPool
6145  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
6146  // becase minBlockCount is 0.
6147  m_pOwnAllocations[memTypeIndex][blockVectorTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
6148  }
6149  }
6150 }
6151 
6152 VmaAllocator_T::~VmaAllocator_T()
6153 {
6154  VMA_ASSERT(m_Pools.empty());
6155 
6156  for(size_t i = GetMemoryTypeCount(); i--; )
6157  {
6158  for(size_t j = VMA_BLOCK_VECTOR_TYPE_COUNT; j--; )
6159  {
6160  vma_delete(this, m_pOwnAllocations[i][j]);
6161  vma_delete(this, m_pBlockVectors[i][j]);
6162  }
6163  }
6164 }
6165 
6166 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
6167 {
6168 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
6169  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
6170  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
6171  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
6172  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
6173  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
6174  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
6175  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
6176  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
6177  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
6178  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
6179  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
6180  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
6181  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
6182  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
6183 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
6184 
6185  if(pVulkanFunctions != VMA_NULL)
6186  {
6187  m_VulkanFunctions = *pVulkanFunctions;
6188  }
6189 
6190  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
6191  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
6192  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
6193  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
6194  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
6195  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
6196  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
6197  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
6198  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
6199  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
6200  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
6201  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
6202  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
6203  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
6204  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
6205  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
6206 }
6207 
6208 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
6209 {
6210  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
6211  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
6212  return (heapSize <= VMA_SMALL_HEAP_MAX_SIZE) ?
6213  m_PreferredSmallHeapBlockSize : m_PreferredLargeHeapBlockSize;
6214 }
6215 
6216 VkResult VmaAllocator_T::AllocateMemoryOfType(
6217  const VkMemoryRequirements& vkMemReq,
6218  const VmaAllocationCreateInfo& createInfo,
6219  uint32_t memTypeIndex,
6220  VmaSuballocationType suballocType,
6221  VmaAllocation* pAllocation)
6222 {
6223  VMA_ASSERT(pAllocation != VMA_NULL);
6224  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
6225 
6226  uint32_t blockVectorType = VmaAllocationCreateFlagsToBlockVectorType(createInfo.flags);
6227  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
6228  VMA_ASSERT(blockVector);
6229 
6230  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
6231  // Heuristics: Allocate own memory if requested size if greater than half of preferred block size.
6232  const bool ownMemory =
6233  (createInfo.flags & VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT) != 0 ||
6234  VMA_DEBUG_ALWAYS_OWN_MEMORY ||
6235  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
6236  vkMemReq.size > preferredBlockSize / 2);
6237 
6238  if(ownMemory)
6239  {
6240  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6241  {
6242  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6243  }
6244  else
6245  {
6246  return AllocateOwnMemory(
6247  vkMemReq.size,
6248  suballocType,
6249  memTypeIndex,
6250  (createInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0,
6251  createInfo.pUserData,
6252  pAllocation);
6253  }
6254  }
6255  else
6256  {
6257  VkResult res = blockVector->Allocate(
6258  VK_NULL_HANDLE, // hCurrentPool
6259  m_CurrentFrameIndex.load(),
6260  vkMemReq,
6261  createInfo,
6262  suballocType,
6263  pAllocation);
6264  if(res == VK_SUCCESS)
6265  {
6266  return res;
6267  }
6268 
6269  // 5. Try own memory.
6270  res = AllocateOwnMemory(
6271  vkMemReq.size,
6272  suballocType,
6273  memTypeIndex,
6274  (createInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0,
6275  createInfo.pUserData,
6276  pAllocation);
6277  if(res == VK_SUCCESS)
6278  {
6279  // Succeeded: AllocateOwnMemory function already filld pMemory, nothing more to do here.
6280  VMA_DEBUG_LOG(" Allocated as OwnMemory");
6281  return VK_SUCCESS;
6282  }
6283  else
6284  {
6285  // Everything failed: Return error code.
6286  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
6287  return res;
6288  }
6289  }
6290 }
6291 
6292 VkResult VmaAllocator_T::AllocateOwnMemory(
6293  VkDeviceSize size,
6294  VmaSuballocationType suballocType,
6295  uint32_t memTypeIndex,
6296  bool map,
6297  void* pUserData,
6298  VmaAllocation* pAllocation)
6299 {
6300  VMA_ASSERT(pAllocation);
6301 
6302  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
6303  allocInfo.memoryTypeIndex = memTypeIndex;
6304  allocInfo.allocationSize = size;
6305 
6306  // Allocate VkDeviceMemory.
6307  VkDeviceMemory hMemory = VK_NULL_HANDLE;
6308  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
6309  if(res < 0)
6310  {
6311  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
6312  return res;
6313  }
6314 
6315  void* pMappedData = nullptr;
6316  if(map)
6317  {
6318  if(m_UnmapPersistentlyMappedMemoryCounter == 0)
6319  {
6320  res = vkMapMemory(m_hDevice, hMemory, 0, VK_WHOLE_SIZE, 0, &pMappedData);
6321  if(res < 0)
6322  {
6323  VMA_DEBUG_LOG(" vkMapMemory FAILED");
6324  FreeVulkanMemory(memTypeIndex, size, hMemory);
6325  return res;
6326  }
6327  }
6328  }
6329 
6330  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load());
6331  (*pAllocation)->InitOwnAllocation(memTypeIndex, hMemory, suballocType, map, pMappedData, size, pUserData);
6332 
6333  // Register it in m_pOwnAllocations.
6334  {
6335  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6336  AllocationVectorType* pOwnAllocations = m_pOwnAllocations[memTypeIndex][map ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED];
6337  VMA_ASSERT(pOwnAllocations);
6338  VmaVectorInsertSorted<VmaPointerLess>(*pOwnAllocations, *pAllocation);
6339  }
6340 
6341  VMA_DEBUG_LOG(" Allocated OwnMemory MemoryTypeIndex=#%u", memTypeIndex);
6342 
6343  return VK_SUCCESS;
6344 }
6345 
6346 VkResult VmaAllocator_T::AllocateMemory(
6347  const VkMemoryRequirements& vkMemReq,
6348  const VmaAllocationCreateInfo& createInfo,
6349  VmaSuballocationType suballocType,
6350  VmaAllocation* pAllocation)
6351 {
6352  if((createInfo.flags & VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT) != 0 &&
6353  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6354  {
6355  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
6356  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6357  }
6358  if((createInfo.pool != VK_NULL_HANDLE) &&
6359  ((createInfo.flags & (VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT)) != 0))
6360  {
6361  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT when pool != null is invalid.");
6362  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6363  }
6364 
6365  if(createInfo.pool != VK_NULL_HANDLE)
6366  {
6367  return createInfo.pool->m_BlockVector.Allocate(
6368  createInfo.pool,
6369  m_CurrentFrameIndex.load(),
6370  vkMemReq,
6371  createInfo,
6372  suballocType,
6373  pAllocation);
6374  }
6375  else
6376  {
6377  // Bit mask of memory Vulkan types acceptable for this allocation.
6378  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
6379  uint32_t memTypeIndex = UINT32_MAX;
6380  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
6381  if(res == VK_SUCCESS)
6382  {
6383  res = AllocateMemoryOfType(vkMemReq, createInfo, memTypeIndex, suballocType, pAllocation);
6384  // Succeeded on first try.
6385  if(res == VK_SUCCESS)
6386  {
6387  return res;
6388  }
6389  // Allocation from this memory type failed. Try other compatible memory types.
6390  else
6391  {
6392  for(;;)
6393  {
6394  // Remove old memTypeIndex from list of possibilities.
6395  memoryTypeBits &= ~(1u << memTypeIndex);
6396  // Find alternative memTypeIndex.
6397  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
6398  if(res == VK_SUCCESS)
6399  {
6400  res = AllocateMemoryOfType(vkMemReq, createInfo, memTypeIndex, suballocType, pAllocation);
6401  // Allocation from this alternative memory type succeeded.
6402  if(res == VK_SUCCESS)
6403  {
6404  return res;
6405  }
6406  // else: Allocation from this memory type failed. Try next one - next loop iteration.
6407  }
6408  // No other matching memory type index could be found.
6409  else
6410  {
6411  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
6412  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6413  }
6414  }
6415  }
6416  }
6417  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
6418  else
6419  return res;
6420  }
6421 }
6422 
6423 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
6424 {
6425  VMA_ASSERT(allocation);
6426 
6427  if(allocation->CanBecomeLost() == false ||
6428  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
6429  {
6430  switch(allocation->GetType())
6431  {
6432  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
6433  {
6434  VmaBlockVector* pBlockVector = VMA_NULL;
6435  VmaPool hPool = allocation->GetPool();
6436  if(hPool != VK_NULL_HANDLE)
6437  {
6438  pBlockVector = &hPool->m_BlockVector;
6439  }
6440  else
6441  {
6442  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
6443  const VMA_BLOCK_VECTOR_TYPE blockVectorType = allocation->GetBlockVectorType();
6444  pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
6445  }
6446  pBlockVector->Free(allocation);
6447  }
6448  break;
6449  case VmaAllocation_T::ALLOCATION_TYPE_OWN:
6450  FreeOwnMemory(allocation);
6451  break;
6452  default:
6453  VMA_ASSERT(0);
6454  }
6455  }
6456 
6457  vma_delete(this, allocation);
6458 }
6459 
6460 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
6461 {
6462  // Initialize.
6463  InitStatInfo(pStats->total);
6464  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
6465  InitStatInfo(pStats->memoryType[i]);
6466  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
6467  InitStatInfo(pStats->memoryHeap[i]);
6468 
6469  // Process default pools.
6470  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6471  {
6472  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
6473  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
6474  {
6475  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
6476  VMA_ASSERT(pBlockVector);
6477  pBlockVector->AddStats(pStats);
6478  }
6479  }
6480 
6481  // Process custom pools.
6482  {
6483  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6484  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
6485  {
6486  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
6487  }
6488  }
6489 
6490  // Process own allocations.
6491  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6492  {
6493  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
6494  VmaMutexLock ownAllocationsLock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6495  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
6496  {
6497  AllocationVectorType* const pOwnAllocVector = m_pOwnAllocations[memTypeIndex][blockVectorType];
6498  VMA_ASSERT(pOwnAllocVector);
6499  for(size_t allocIndex = 0, allocCount = pOwnAllocVector->size(); allocIndex < allocCount; ++allocIndex)
6500  {
6501  VmaStatInfo allocationStatInfo;
6502  (*pOwnAllocVector)[allocIndex]->OwnAllocCalcStatsInfo(allocationStatInfo);
6503  VmaAddStatInfo(pStats->total, allocationStatInfo);
6504  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
6505  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
6506  }
6507  }
6508  }
6509 
6510  // Postprocess.
6511  VmaPostprocessCalcStatInfo(pStats->total);
6512  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
6513  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
6514  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
6515  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
6516 }
6517 
6518 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
6519 
6520 void VmaAllocator_T::UnmapPersistentlyMappedMemory()
6521 {
6522  if(m_UnmapPersistentlyMappedMemoryCounter++ == 0)
6523  {
6524  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
6525  {
6526  for(uint32_t memTypeIndex = m_MemProps.memoryTypeCount; memTypeIndex--; )
6527  {
6528  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
6529  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
6530  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
6531  {
6532  // Process OwnAllocations.
6533  {
6534  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6535  AllocationVectorType* pOwnAllocationsVector = m_pOwnAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6536  for(size_t ownAllocIndex = pOwnAllocationsVector->size(); ownAllocIndex--; )
6537  {
6538  VmaAllocation hAlloc = (*pOwnAllocationsVector)[ownAllocIndex];
6539  hAlloc->OwnAllocUnmapPersistentlyMappedMemory(this);
6540  }
6541  }
6542 
6543  // Process normal Allocations.
6544  {
6545  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6546  pBlockVector->UnmapPersistentlyMappedMemory();
6547  }
6548  }
6549  }
6550 
6551  // Process custom pools.
6552  {
6553  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6554  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
6555  {
6556  m_Pools[poolIndex]->GetBlockVector().UnmapPersistentlyMappedMemory();
6557  }
6558  }
6559  }
6560  }
6561 }
6562 
6563 VkResult VmaAllocator_T::MapPersistentlyMappedMemory()
6564 {
6565  VMA_ASSERT(m_UnmapPersistentlyMappedMemoryCounter > 0);
6566  if(--m_UnmapPersistentlyMappedMemoryCounter == 0)
6567  {
6568  VkResult finalResult = VK_SUCCESS;
6569  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
6570  {
6571  // Process custom pools.
6572  {
6573  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6574  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
6575  {
6576  m_Pools[poolIndex]->GetBlockVector().MapPersistentlyMappedMemory();
6577  }
6578  }
6579 
6580  for(uint32_t memTypeIndex = 0; memTypeIndex < m_MemProps.memoryTypeCount; ++memTypeIndex)
6581  {
6582  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
6583  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
6584  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
6585  {
6586  // Process OwnAllocations.
6587  {
6588  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6589  AllocationVectorType* pAllocationsVector = m_pOwnAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6590  for(size_t ownAllocIndex = 0, ownAllocCount = pAllocationsVector->size(); ownAllocIndex < ownAllocCount; ++ownAllocIndex)
6591  {
6592  VmaAllocation hAlloc = (*pAllocationsVector)[ownAllocIndex];
6593  hAlloc->OwnAllocMapPersistentlyMappedMemory(this);
6594  }
6595  }
6596 
6597  // Process normal Allocations.
6598  {
6599  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6600  VkResult localResult = pBlockVector->MapPersistentlyMappedMemory();
6601  if(localResult != VK_SUCCESS)
6602  {
6603  finalResult = localResult;
6604  }
6605  }
6606  }
6607  }
6608  }
6609  return finalResult;
6610  }
6611  else
6612  return VK_SUCCESS;
6613 }
6614 
6615 VkResult VmaAllocator_T::Defragment(
6616  VmaAllocation* pAllocations,
6617  size_t allocationCount,
6618  VkBool32* pAllocationsChanged,
6619  const VmaDefragmentationInfo* pDefragmentationInfo,
6620  VmaDefragmentationStats* pDefragmentationStats)
6621 {
6622  if(pAllocationsChanged != VMA_NULL)
6623  {
6624  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
6625  }
6626  if(pDefragmentationStats != VMA_NULL)
6627  {
6628  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
6629  }
6630 
6631  if(m_UnmapPersistentlyMappedMemoryCounter > 0)
6632  {
6633  VMA_DEBUG_LOG("ERROR: Cannot defragment when inside vmaUnmapPersistentlyMappedMemory.");
6634  return VK_ERROR_MEMORY_MAP_FAILED;
6635  }
6636 
6637  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
6638 
6639  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
6640 
6641  const size_t poolCount = m_Pools.size();
6642 
6643  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
6644  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
6645  {
6646  VmaAllocation hAlloc = pAllocations[allocIndex];
6647  VMA_ASSERT(hAlloc);
6648  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
6649  // OwnAlloc cannot be defragmented.
6650  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
6651  // Only HOST_VISIBLE memory types can be defragmented.
6652  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
6653  // Lost allocation cannot be defragmented.
6654  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
6655  {
6656  VmaBlockVector* pAllocBlockVector = nullptr;
6657 
6658  const VmaPool hAllocPool = hAlloc->GetPool();
6659  // This allocation belongs to custom pool.
6660  if(hAllocPool != VK_NULL_HANDLE)
6661  {
6662  pAllocBlockVector = &hAllocPool->GetBlockVector();
6663  }
6664  // This allocation belongs to general pool.
6665  else
6666  {
6667  pAllocBlockVector = m_pBlockVectors[memTypeIndex][hAlloc->GetBlockVectorType()];
6668  }
6669 
6670  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
6671 
6672  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
6673  &pAllocationsChanged[allocIndex] : VMA_NULL;
6674  pDefragmentator->AddAllocation(hAlloc, pChanged);
6675  }
6676  }
6677 
6678  VkResult result = VK_SUCCESS;
6679 
6680  // ======== Main processing.
6681 
6682  VkDeviceSize maxBytesToMove = SIZE_MAX;
6683  uint32_t maxAllocationsToMove = UINT32_MAX;
6684  if(pDefragmentationInfo != VMA_NULL)
6685  {
6686  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
6687  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
6688  }
6689 
6690  // Process standard memory.
6691  for(uint32_t memTypeIndex = 0;
6692  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
6693  ++memTypeIndex)
6694  {
6695  // Only HOST_VISIBLE memory types can be defragmented.
6696  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
6697  {
6698  for(uint32_t blockVectorType = 0;
6699  (blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT) && (result == VK_SUCCESS);
6700  ++blockVectorType)
6701  {
6702  result = m_pBlockVectors[memTypeIndex][blockVectorType]->Defragment(
6703  pDefragmentationStats,
6704  maxBytesToMove,
6705  maxAllocationsToMove);
6706  }
6707  }
6708  }
6709 
6710  // Process custom pools.
6711  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
6712  {
6713  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
6714  pDefragmentationStats,
6715  maxBytesToMove,
6716  maxAllocationsToMove);
6717  }
6718 
6719  // ======== Destroy defragmentators.
6720 
6721  // Process custom pools.
6722  for(size_t poolIndex = poolCount; poolIndex--; )
6723  {
6724  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
6725  }
6726 
6727  // Process standard memory.
6728  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
6729  {
6730  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
6731  {
6732  for(size_t blockVectorType = VMA_BLOCK_VECTOR_TYPE_COUNT; blockVectorType--; )
6733  {
6734  m_pBlockVectors[memTypeIndex][blockVectorType]->DestroyDefragmentator();
6735  }
6736  }
6737  }
6738 
6739  return result;
6740 }
6741 
6742 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
6743 {
6744  if(hAllocation->CanBecomeLost())
6745  {
6746  /*
6747  Warning: This is a carefully designed algorithm.
6748  Do not modify unless you really know what you're doing :)
6749  */
6750  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
6751  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
6752  for(;;)
6753  {
6754  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
6755  {
6756  pAllocationInfo->memoryType = UINT32_MAX;
6757  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
6758  pAllocationInfo->offset = 0;
6759  pAllocationInfo->size = hAllocation->GetSize();
6760  pAllocationInfo->pMappedData = VMA_NULL;
6761  pAllocationInfo->pUserData = hAllocation->GetUserData();
6762  return;
6763  }
6764  else if(localLastUseFrameIndex == localCurrFrameIndex)
6765  {
6766  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
6767  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
6768  pAllocationInfo->offset = hAllocation->GetOffset();
6769  pAllocationInfo->size = hAllocation->GetSize();
6770  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
6771  pAllocationInfo->pUserData = hAllocation->GetUserData();
6772  return;
6773  }
6774  else // Last use time earlier than current time.
6775  {
6776  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
6777  {
6778  localLastUseFrameIndex = localCurrFrameIndex;
6779  }
6780  }
6781  }
6782  }
6783  // We could use the same code here, but for performance reasons we don't need to use the hAllocation.LastUseFrameIndex atomic.
6784  else
6785  {
6786  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
6787  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
6788  pAllocationInfo->offset = hAllocation->GetOffset();
6789  pAllocationInfo->size = hAllocation->GetSize();
6790  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
6791  pAllocationInfo->pUserData = hAllocation->GetUserData();
6792  }
6793 }
6794 
6795 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
6796 {
6797  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
6798 
6799  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
6800 
6801  if(newCreateInfo.maxBlockCount == 0)
6802  {
6803  newCreateInfo.maxBlockCount = SIZE_MAX;
6804  }
6805  if(newCreateInfo.blockSize == 0)
6806  {
6807  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
6808  }
6809 
6810  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
6811 
6812  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
6813  if(res != VK_SUCCESS)
6814  {
6815  vma_delete(this, *pPool);
6816  *pPool = VMA_NULL;
6817  return res;
6818  }
6819 
6820  // Add to m_Pools.
6821  {
6822  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6823  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
6824  }
6825 
6826  return VK_SUCCESS;
6827 }
6828 
6829 void VmaAllocator_T::DestroyPool(VmaPool pool)
6830 {
6831  // Remove from m_Pools.
6832  {
6833  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6834  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
6835  VMA_ASSERT(success && "Pool not found in Allocator.");
6836  }
6837 
6838  vma_delete(this, pool);
6839 }
6840 
6841 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
6842 {
6843  pool->m_BlockVector.GetPoolStats(pPoolStats);
6844 }
6845 
6846 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
6847 {
6848  m_CurrentFrameIndex.store(frameIndex);
6849 }
6850 
6851 void VmaAllocator_T::MakePoolAllocationsLost(
6852  VmaPool hPool,
6853  size_t* pLostAllocationCount)
6854 {
6855  hPool->m_BlockVector.MakePoolAllocationsLost(
6856  m_CurrentFrameIndex.load(),
6857  pLostAllocationCount);
6858 }
6859 
6860 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
6861 {
6862  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST);
6863  (*pAllocation)->InitLost();
6864 }
6865 
6866 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
6867 {
6868  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
6869 
6870  VkResult res;
6871  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
6872  {
6873  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
6874  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
6875  {
6876  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
6877  if(res == VK_SUCCESS)
6878  {
6879  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
6880  }
6881  }
6882  else
6883  {
6884  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
6885  }
6886  }
6887  else
6888  {
6889  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
6890  }
6891 
6892  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
6893  {
6894  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
6895  }
6896 
6897  return res;
6898 }
6899 
6900 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
6901 {
6902  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
6903  {
6904  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
6905  }
6906 
6907  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
6908 
6909  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
6910  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
6911  {
6912  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
6913  m_HeapSizeLimit[heapIndex] += size;
6914  }
6915 }
6916 
6917 void VmaAllocator_T::FreeOwnMemory(VmaAllocation allocation)
6918 {
6919  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_OWN);
6920 
6921  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
6922  {
6923  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6924  AllocationVectorType* const pOwnAllocations = m_pOwnAllocations[memTypeIndex][allocation->GetBlockVectorType()];
6925  VMA_ASSERT(pOwnAllocations);
6926  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pOwnAllocations, allocation);
6927  VMA_ASSERT(success);
6928  }
6929 
6930  VkDeviceMemory hMemory = allocation->GetMemory();
6931 
6932  if(allocation->GetMappedData() != VMA_NULL)
6933  {
6934  vkUnmapMemory(m_hDevice, hMemory);
6935  }
6936 
6937  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
6938 
6939  VMA_DEBUG_LOG(" Freed OwnMemory MemoryTypeIndex=%u", memTypeIndex);
6940 }
6941 
6942 #if VMA_STATS_STRING_ENABLED
6943 
6944 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
6945 {
6946  bool ownAllocationsStarted = false;
6947  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6948  {
6949  VmaMutexLock ownAllocationsLock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6950  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
6951  {
6952  AllocationVectorType* const pOwnAllocVector = m_pOwnAllocations[memTypeIndex][blockVectorType];
6953  VMA_ASSERT(pOwnAllocVector);
6954  if(pOwnAllocVector->empty() == false)
6955  {
6956  if(ownAllocationsStarted == false)
6957  {
6958  ownAllocationsStarted = true;
6959  json.WriteString("OwnAllocations");
6960  json.BeginObject();
6961  }
6962 
6963  json.BeginString("Type ");
6964  json.ContinueString(memTypeIndex);
6965  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
6966  {
6967  json.ContinueString(" Mapped");
6968  }
6969  json.EndString();
6970 
6971  json.BeginArray();
6972 
6973  for(size_t i = 0; i < pOwnAllocVector->size(); ++i)
6974  {
6975  const VmaAllocation hAlloc = (*pOwnAllocVector)[i];
6976  json.BeginObject(true);
6977 
6978  json.WriteString("Size");
6979  json.WriteNumber(hAlloc->GetSize());
6980 
6981  json.WriteString("Type");
6982  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
6983 
6984  json.EndObject();
6985  }
6986 
6987  json.EndArray();
6988  }
6989  }
6990  }
6991  if(ownAllocationsStarted)
6992  {
6993  json.EndObject();
6994  }
6995 
6996  {
6997  bool allocationsStarted = false;
6998  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6999  {
7000  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
7001  {
7002  if(m_pBlockVectors[memTypeIndex][blockVectorType]->IsEmpty() == false)
7003  {
7004  if(allocationsStarted == false)
7005  {
7006  allocationsStarted = true;
7007  json.WriteString("DefaultPools");
7008  json.BeginObject();
7009  }
7010 
7011  json.BeginString("Type ");
7012  json.ContinueString(memTypeIndex);
7013  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
7014  {
7015  json.ContinueString(" Mapped");
7016  }
7017  json.EndString();
7018 
7019  m_pBlockVectors[memTypeIndex][blockVectorType]->PrintDetailedMap(json);
7020  }
7021  }
7022  }
7023  if(allocationsStarted)
7024  {
7025  json.EndObject();
7026  }
7027  }
7028 
7029  {
7030  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7031  const size_t poolCount = m_Pools.size();
7032  if(poolCount > 0)
7033  {
7034  json.WriteString("Pools");
7035  json.BeginArray();
7036  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
7037  {
7038  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
7039  }
7040  json.EndArray();
7041  }
7042  }
7043 }
7044 
7045 #endif // #if VMA_STATS_STRING_ENABLED
7046 
7047 static VkResult AllocateMemoryForImage(
7048  VmaAllocator allocator,
7049  VkImage image,
7050  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7051  VmaSuballocationType suballocType,
7052  VmaAllocation* pAllocation)
7053 {
7054  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
7055 
7056  VkMemoryRequirements vkMemReq = {};
7057  (*allocator->GetVulkanFunctions().vkGetImageMemoryRequirements)(allocator->m_hDevice, image, &vkMemReq);
7058 
7059  return allocator->AllocateMemory(
7060  vkMemReq,
7061  *pAllocationCreateInfo,
7062  suballocType,
7063  pAllocation);
7064 }
7065 
7067 // Public interface
7068 
7069 VkResult vmaCreateAllocator(
7070  const VmaAllocatorCreateInfo* pCreateInfo,
7071  VmaAllocator* pAllocator)
7072 {
7073  VMA_ASSERT(pCreateInfo && pAllocator);
7074  VMA_DEBUG_LOG("vmaCreateAllocator");
7075  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
7076  return VK_SUCCESS;
7077 }
7078 
7079 void vmaDestroyAllocator(
7080  VmaAllocator allocator)
7081 {
7082  if(allocator != VK_NULL_HANDLE)
7083  {
7084  VMA_DEBUG_LOG("vmaDestroyAllocator");
7085  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
7086  vma_delete(&allocationCallbacks, allocator);
7087  }
7088 }
7089 
7091  VmaAllocator allocator,
7092  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
7093 {
7094  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
7095  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
7096 }
7097 
7099  VmaAllocator allocator,
7100  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
7101 {
7102  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
7103  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
7104 }
7105 
7107  VmaAllocator allocator,
7108  uint32_t memoryTypeIndex,
7109  VkMemoryPropertyFlags* pFlags)
7110 {
7111  VMA_ASSERT(allocator && pFlags);
7112  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
7113  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
7114 }
7115 
7117  VmaAllocator allocator,
7118  uint32_t frameIndex)
7119 {
7120  VMA_ASSERT(allocator);
7121  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
7122 
7123  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7124 
7125  allocator->SetCurrentFrameIndex(frameIndex);
7126 }
7127 
7128 void vmaCalculateStats(
7129  VmaAllocator allocator,
7130  VmaStats* pStats)
7131 {
7132  VMA_ASSERT(allocator && pStats);
7133  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7134  allocator->CalculateStats(pStats);
7135 }
7136 
7137 #if VMA_STATS_STRING_ENABLED
7138 
7139 void vmaBuildStatsString(
7140  VmaAllocator allocator,
7141  char** ppStatsString,
7142  VkBool32 detailedMap)
7143 {
7144  VMA_ASSERT(allocator && ppStatsString);
7145  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7146 
7147  VmaStringBuilder sb(allocator);
7148  {
7149  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
7150  json.BeginObject();
7151 
7152  VmaStats stats;
7153  allocator->CalculateStats(&stats);
7154 
7155  json.WriteString("Total");
7156  VmaPrintStatInfo(json, stats.total);
7157 
7158  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
7159  {
7160  json.BeginString("Heap ");
7161  json.ContinueString(heapIndex);
7162  json.EndString();
7163  json.BeginObject();
7164 
7165  json.WriteString("Size");
7166  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
7167 
7168  json.WriteString("Flags");
7169  json.BeginArray(true);
7170  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
7171  {
7172  json.WriteString("DEVICE_LOCAL");
7173  }
7174  json.EndArray();
7175 
7176  if(stats.memoryHeap[heapIndex].BlockCount > 0)
7177  {
7178  json.WriteString("Stats");
7179  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
7180  }
7181 
7182  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
7183  {
7184  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
7185  {
7186  json.BeginString("Type ");
7187  json.ContinueString(typeIndex);
7188  json.EndString();
7189 
7190  json.BeginObject();
7191 
7192  json.WriteString("Flags");
7193  json.BeginArray(true);
7194  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
7195  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
7196  {
7197  json.WriteString("DEVICE_LOCAL");
7198  }
7199  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7200  {
7201  json.WriteString("HOST_VISIBLE");
7202  }
7203  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
7204  {
7205  json.WriteString("HOST_COHERENT");
7206  }
7207  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
7208  {
7209  json.WriteString("HOST_CACHED");
7210  }
7211  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
7212  {
7213  json.WriteString("LAZILY_ALLOCATED");
7214  }
7215  json.EndArray();
7216 
7217  if(stats.memoryType[typeIndex].BlockCount > 0)
7218  {
7219  json.WriteString("Stats");
7220  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
7221  }
7222 
7223  json.EndObject();
7224  }
7225  }
7226 
7227  json.EndObject();
7228  }
7229  if(detailedMap == VK_TRUE)
7230  {
7231  allocator->PrintDetailedMap(json);
7232  }
7233 
7234  json.EndObject();
7235  }
7236 
7237  const size_t len = sb.GetLength();
7238  char* const pChars = vma_new_array(allocator, char, len + 1);
7239  if(len > 0)
7240  {
7241  memcpy(pChars, sb.GetData(), len);
7242  }
7243  pChars[len] = '\0';
7244  *ppStatsString = pChars;
7245 }
7246 
7247 void vmaFreeStatsString(
7248  VmaAllocator allocator,
7249  char* pStatsString)
7250 {
7251  if(pStatsString != VMA_NULL)
7252  {
7253  VMA_ASSERT(allocator);
7254  size_t len = strlen(pStatsString);
7255  vma_delete_array(allocator, pStatsString, len + 1);
7256  }
7257 }
7258 
7259 #endif // #if VMA_STATS_STRING_ENABLED
7260 
7263 VkResult vmaFindMemoryTypeIndex(
7264  VmaAllocator allocator,
7265  uint32_t memoryTypeBits,
7266  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7267  uint32_t* pMemoryTypeIndex)
7268 {
7269  VMA_ASSERT(allocator != VK_NULL_HANDLE);
7270  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
7271  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
7272 
7273  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
7274  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
7275  if(preferredFlags == 0)
7276  {
7277  preferredFlags = requiredFlags;
7278  }
7279  // preferredFlags, if not 0, must be a superset of requiredFlags.
7280  VMA_ASSERT((requiredFlags & ~preferredFlags) == 0);
7281 
7282  // Convert usage to requiredFlags and preferredFlags.
7283  switch(pAllocationCreateInfo->usage)
7284  {
7286  break;
7288  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
7289  break;
7291  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
7292  break;
7294  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7295  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
7296  break;
7298  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7299  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
7300  break;
7301  default:
7302  break;
7303  }
7304 
7305  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0)
7306  {
7307  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7308  }
7309 
7310  *pMemoryTypeIndex = UINT32_MAX;
7311  uint32_t minCost = UINT32_MAX;
7312  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
7313  memTypeIndex < allocator->GetMemoryTypeCount();
7314  ++memTypeIndex, memTypeBit <<= 1)
7315  {
7316  // This memory type is acceptable according to memoryTypeBits bitmask.
7317  if((memTypeBit & memoryTypeBits) != 0)
7318  {
7319  const VkMemoryPropertyFlags currFlags =
7320  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
7321  // This memory type contains requiredFlags.
7322  if((requiredFlags & ~currFlags) == 0)
7323  {
7324  // Calculate cost as number of bits from preferredFlags not present in this memory type.
7325  uint32_t currCost = CountBitsSet(preferredFlags & ~currFlags);
7326  // Remember memory type with lowest cost.
7327  if(currCost < minCost)
7328  {
7329  *pMemoryTypeIndex = memTypeIndex;
7330  if(currCost == 0)
7331  {
7332  return VK_SUCCESS;
7333  }
7334  minCost = currCost;
7335  }
7336  }
7337  }
7338  }
7339  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
7340 }
7341 
7342 VkResult vmaCreatePool(
7343  VmaAllocator allocator,
7344  const VmaPoolCreateInfo* pCreateInfo,
7345  VmaPool* pPool)
7346 {
7347  VMA_ASSERT(allocator && pCreateInfo && pPool);
7348 
7349  VMA_DEBUG_LOG("vmaCreatePool");
7350 
7351  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7352 
7353  return allocator->CreatePool(pCreateInfo, pPool);
7354 }
7355 
7356 void vmaDestroyPool(
7357  VmaAllocator allocator,
7358  VmaPool pool)
7359 {
7360  VMA_ASSERT(allocator && pool);
7361 
7362  VMA_DEBUG_LOG("vmaDestroyPool");
7363 
7364  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7365 
7366  allocator->DestroyPool(pool);
7367 }
7368 
7369 void vmaGetPoolStats(
7370  VmaAllocator allocator,
7371  VmaPool pool,
7372  VmaPoolStats* pPoolStats)
7373 {
7374  VMA_ASSERT(allocator && pool && pPoolStats);
7375 
7376  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7377 
7378  allocator->GetPoolStats(pool, pPoolStats);
7379 }
7380 
7382  VmaAllocator allocator,
7383  VmaPool pool,
7384  size_t* pLostAllocationCount)
7385 {
7386  VMA_ASSERT(allocator && pool);
7387 
7388  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7389 
7390  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
7391 }
7392 
7393 VkResult vmaAllocateMemory(
7394  VmaAllocator allocator,
7395  const VkMemoryRequirements* pVkMemoryRequirements,
7396  const VmaAllocationCreateInfo* pCreateInfo,
7397  VmaAllocation* pAllocation,
7398  VmaAllocationInfo* pAllocationInfo)
7399 {
7400  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
7401 
7402  VMA_DEBUG_LOG("vmaAllocateMemory");
7403 
7404  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7405 
7406  VkResult result = allocator->AllocateMemory(
7407  *pVkMemoryRequirements,
7408  *pCreateInfo,
7409  VMA_SUBALLOCATION_TYPE_UNKNOWN,
7410  pAllocation);
7411 
7412  if(pAllocationInfo && result == VK_SUCCESS)
7413  {
7414  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7415  }
7416 
7417  return result;
7418 }
7419 
7421  VmaAllocator allocator,
7422  VkBuffer buffer,
7423  const VmaAllocationCreateInfo* pCreateInfo,
7424  VmaAllocation* pAllocation,
7425  VmaAllocationInfo* pAllocationInfo)
7426 {
7427  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
7428 
7429  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
7430 
7431  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7432 
7433  VkMemoryRequirements vkMemReq = {};
7434  (*allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements)(allocator->m_hDevice, buffer, &vkMemReq);
7435 
7436  VkResult result = allocator->AllocateMemory(
7437  vkMemReq,
7438  *pCreateInfo,
7439  VMA_SUBALLOCATION_TYPE_BUFFER,
7440  pAllocation);
7441 
7442  if(pAllocationInfo && result == VK_SUCCESS)
7443  {
7444  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7445  }
7446 
7447  return result;
7448 }
7449 
7450 VkResult vmaAllocateMemoryForImage(
7451  VmaAllocator allocator,
7452  VkImage image,
7453  const VmaAllocationCreateInfo* pCreateInfo,
7454  VmaAllocation* pAllocation,
7455  VmaAllocationInfo* pAllocationInfo)
7456 {
7457  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
7458 
7459  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
7460 
7461  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7462 
7463  VkResult result = AllocateMemoryForImage(
7464  allocator,
7465  image,
7466  pCreateInfo,
7467  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
7468  pAllocation);
7469 
7470  if(pAllocationInfo && result == VK_SUCCESS)
7471  {
7472  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7473  }
7474 
7475  return result;
7476 }
7477 
7478 void vmaFreeMemory(
7479  VmaAllocator allocator,
7480  VmaAllocation allocation)
7481 {
7482  VMA_ASSERT(allocator && allocation);
7483 
7484  VMA_DEBUG_LOG("vmaFreeMemory");
7485 
7486  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7487 
7488  allocator->FreeMemory(allocation);
7489 }
7490 
7492  VmaAllocator allocator,
7493  VmaAllocation allocation,
7494  VmaAllocationInfo* pAllocationInfo)
7495 {
7496  VMA_ASSERT(allocator && allocation && pAllocationInfo);
7497 
7498  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7499 
7500  allocator->GetAllocationInfo(allocation, pAllocationInfo);
7501 }
7502 
7504  VmaAllocator allocator,
7505  VmaAllocation allocation,
7506  void* pUserData)
7507 {
7508  VMA_ASSERT(allocator && allocation);
7509 
7510  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7511 
7512  allocation->SetUserData(pUserData);
7513 }
7514 
7516  VmaAllocator allocator,
7517  VmaAllocation* pAllocation)
7518 {
7519  VMA_ASSERT(allocator && pAllocation);
7520 
7521  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
7522 
7523  allocator->CreateLostAllocation(pAllocation);
7524 }
7525 
7526 VkResult vmaMapMemory(
7527  VmaAllocator allocator,
7528  VmaAllocation allocation,
7529  void** ppData)
7530 {
7531  VMA_ASSERT(allocator && allocation && ppData);
7532 
7533  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7534 
7535  return vkMapMemory(allocator->m_hDevice, allocation->GetMemory(),
7536  allocation->GetOffset(), allocation->GetSize(), 0, ppData);
7537 }
7538 
7539 void vmaUnmapMemory(
7540  VmaAllocator allocator,
7541  VmaAllocation allocation)
7542 {
7543  VMA_ASSERT(allocator && allocation);
7544 
7545  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7546 
7547  vkUnmapMemory(allocator->m_hDevice, allocation->GetMemory());
7548 }
7549 
7550 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator)
7551 {
7552  VMA_ASSERT(allocator);
7553 
7554  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7555 
7556  allocator->UnmapPersistentlyMappedMemory();
7557 }
7558 
7559 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator)
7560 {
7561  VMA_ASSERT(allocator);
7562 
7563  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7564 
7565  return allocator->MapPersistentlyMappedMemory();
7566 }
7567 
7568 VkResult vmaDefragment(
7569  VmaAllocator allocator,
7570  VmaAllocation* pAllocations,
7571  size_t allocationCount,
7572  VkBool32* pAllocationsChanged,
7573  const VmaDefragmentationInfo *pDefragmentationInfo,
7574  VmaDefragmentationStats* pDefragmentationStats)
7575 {
7576  VMA_ASSERT(allocator && pAllocations);
7577 
7578  VMA_DEBUG_LOG("vmaDefragment");
7579 
7580  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7581 
7582  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
7583 }
7584 
7585 VkResult vmaCreateBuffer(
7586  VmaAllocator allocator,
7587  const VkBufferCreateInfo* pBufferCreateInfo,
7588  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7589  VkBuffer* pBuffer,
7590  VmaAllocation* pAllocation,
7591  VmaAllocationInfo* pAllocationInfo)
7592 {
7593  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
7594 
7595  VMA_DEBUG_LOG("vmaCreateBuffer");
7596 
7597  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7598 
7599  *pBuffer = VK_NULL_HANDLE;
7600  *pAllocation = VK_NULL_HANDLE;
7601 
7602  // 1. Create VkBuffer.
7603  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
7604  allocator->m_hDevice,
7605  pBufferCreateInfo,
7606  allocator->GetAllocationCallbacks(),
7607  pBuffer);
7608  if(res >= 0)
7609  {
7610  // 2. vkGetBufferMemoryRequirements.
7611  VkMemoryRequirements vkMemReq = {};
7612  (*allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements)(allocator->m_hDevice, *pBuffer, &vkMemReq);
7613 
7614  // 3. Allocate memory using allocator.
7615  res = allocator->AllocateMemory(
7616  vkMemReq,
7617  *pAllocationCreateInfo,
7618  VMA_SUBALLOCATION_TYPE_BUFFER,
7619  pAllocation);
7620  if(res >= 0)
7621  {
7622  // 3. Bind buffer with memory.
7623  res = (*allocator->GetVulkanFunctions().vkBindBufferMemory)(
7624  allocator->m_hDevice,
7625  *pBuffer,
7626  (*pAllocation)->GetMemory(),
7627  (*pAllocation)->GetOffset());
7628  if(res >= 0)
7629  {
7630  // All steps succeeded.
7631  if(pAllocationInfo != VMA_NULL)
7632  {
7633  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7634  }
7635  return VK_SUCCESS;
7636  }
7637  allocator->FreeMemory(*pAllocation);
7638  *pAllocation = VK_NULL_HANDLE;
7639  return res;
7640  }
7641  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
7642  *pBuffer = VK_NULL_HANDLE;
7643  return res;
7644  }
7645  return res;
7646 }
7647 
7648 void vmaDestroyBuffer(
7649  VmaAllocator allocator,
7650  VkBuffer buffer,
7651  VmaAllocation allocation)
7652 {
7653  if(buffer != VK_NULL_HANDLE)
7654  {
7655  VMA_ASSERT(allocator);
7656 
7657  VMA_DEBUG_LOG("vmaDestroyBuffer");
7658 
7659  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7660 
7661  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
7662 
7663  allocator->FreeMemory(allocation);
7664  }
7665 }
7666 
7667 VkResult vmaCreateImage(
7668  VmaAllocator allocator,
7669  const VkImageCreateInfo* pImageCreateInfo,
7670  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7671  VkImage* pImage,
7672  VmaAllocation* pAllocation,
7673  VmaAllocationInfo* pAllocationInfo)
7674 {
7675  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
7676 
7677  VMA_DEBUG_LOG("vmaCreateImage");
7678 
7679  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7680 
7681  *pImage = VK_NULL_HANDLE;
7682  *pAllocation = VK_NULL_HANDLE;
7683 
7684  // 1. Create VkImage.
7685  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
7686  allocator->m_hDevice,
7687  pImageCreateInfo,
7688  allocator->GetAllocationCallbacks(),
7689  pImage);
7690  if(res >= 0)
7691  {
7692  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
7693  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
7694  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
7695 
7696  // 2. Allocate memory using allocator.
7697  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
7698  if(res >= 0)
7699  {
7700  // 3. Bind image with memory.
7701  res = (*allocator->GetVulkanFunctions().vkBindImageMemory)(
7702  allocator->m_hDevice,
7703  *pImage,
7704  (*pAllocation)->GetMemory(),
7705  (*pAllocation)->GetOffset());
7706  if(res >= 0)
7707  {
7708  // All steps succeeded.
7709  if(pAllocationInfo != VMA_NULL)
7710  {
7711  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7712  }
7713  return VK_SUCCESS;
7714  }
7715  allocator->FreeMemory(*pAllocation);
7716  *pAllocation = VK_NULL_HANDLE;
7717  return res;
7718  }
7719  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
7720  *pImage = VK_NULL_HANDLE;
7721  return res;
7722  }
7723  return res;
7724 }
7725 
7726 void vmaDestroyImage(
7727  VmaAllocator allocator,
7728  VkImage image,
7729  VmaAllocation allocation)
7730 {
7731  if(image != VK_NULL_HANDLE)
7732  {
7733  VMA_ASSERT(allocator);
7734 
7735  VMA_DEBUG_LOG("vmaDestroyImage");
7736 
7737  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7738 
7739  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
7740 
7741  allocator->FreeMemory(allocation);
7742  }
7743 }
7744 
7745 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:434
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:457
Definition: vk_mem_alloc.h:786
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
uint32_t BlockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:570
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:444
Memory will be used for frequent writing on device and readback on host (download).
Definition: vk_mem_alloc.h:637
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:438
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:914
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:1067
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation.
void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator)
Unmaps persistently mapped memory of types that are HOST_COHERENT and DEVICE_LOCAL.
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:838
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:686
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:719
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:403
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks.
Definition: vk_mem_alloc.h:469
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:788
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:516
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:451
VkDeviceSize preferredSmallHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from small heaps <= 512 MB...
Definition: vk_mem_alloc.h:466
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:441
VkFlags VmaAllocatorFlags
Definition: vk_mem_alloc.h:431
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:1071
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:486
VmaStatInfo total
Definition: vk_mem_alloc.h:588
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:1079
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:702
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1062
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:442
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:460
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:792
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:924
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:439
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:721
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:808
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:844
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:795
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
struct VmaVulkanFunctions VmaVulkanFunctions
Definition: vk_mem_alloc.h:695
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:1057
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VkDeviceSize AllocationSizeMax
Definition: vk_mem_alloc.h:579
Definition: vk_mem_alloc.h:766
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:1075
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:440
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:584
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:675
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:1077
VmaMemoryUsage
Definition: vk_mem_alloc.h:623
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:713
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:427
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
VmaAllocatorFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:422
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region.
Definition: vk_mem_alloc.h:854
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:435
Definition: vk_mem_alloc.h:567
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
Definition: vk_mem_alloc.h:803
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:414
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:418
VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator)
Maps back persistently mapped memory of types that are HOST_COHERENT and DEVICE_LOCAL.
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:798
VkDeviceSize UnusedRangeSizeMax
Definition: vk_mem_alloc.h:580
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:397
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:708
Definition: vk_mem_alloc.h:699
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:437
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool.
Definition: vk_mem_alloc.h:816
VkDeviceSize AllocationSizeMin
Definition: vk_mem_alloc.h:579
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory.
Definition: vk_mem_alloc.h:472
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:847
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:726
const VkDeviceSize * pHeapSizeLimit
Either NULL or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:504
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:586
VkDeviceSize AllocationSizeAvg
Definition: vk_mem_alloc.h:579
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:446
uint32_t AllocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:572
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:416
Definition: vk_mem_alloc.h:693
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:445
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:830
VmaAllocatorFlags flags
Flags for created allocator. Use VmaAllocatorFlagBits enum.
Definition: vk_mem_alloc.h:454
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
VkDeviceSize UsedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:576
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:935
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:654
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps. ...
Definition: vk_mem_alloc.h:463
uint32_t UnusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:574
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:835
Memory will be mapped on host. Could be used for transfer to/from device.
Definition: vk_mem_alloc.h:631
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
struct VmaStats VmaStats
General statistics from current state of Allocator.
VkDeviceSize UnusedRangeSizeAvg
Definition: vk_mem_alloc.h:580
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:919
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1073
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
Definition: vk_mem_alloc.h:433
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:697
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:443
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:447
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:757
void * pMappedData
Pointer to the beginning of this allocation as mapped data. Null if this alloaction is not persistent...
Definition: vk_mem_alloc.h:930
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
No intended memory usage specified.
Definition: vk_mem_alloc.h:626
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:436
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
Definition: vk_mem_alloc.h:638
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:900
Memory will be used for frequent (dynamic) updates from host and reads on device (upload).
Definition: vk_mem_alloc.h:634
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:642
Definition: vk_mem_alloc.h:429
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:665
Memory will be used on device only, so faster access from the device is preferred. No need to be mappable on host.
Definition: vk_mem_alloc.h:628
struct VmaStatInfo VmaStatInfo
VkDeviceSize UnusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:578
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:587
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:841
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:784
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize UnusedRangeSizeMin
Definition: vk_mem_alloc.h:580
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:905
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.