Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
429 #include <vulkan/vulkan.h>
430 
432 
436 VK_DEFINE_HANDLE(VmaAllocator)
437 
438 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
440  VmaAllocator allocator,
441  uint32_t memoryType,
442  VkDeviceMemory memory,
443  VkDeviceSize size);
445 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
446  VmaAllocator allocator,
447  uint32_t memoryType,
448  VkDeviceMemory memory,
449  VkDeviceSize size);
450 
456 typedef struct VmaDeviceMemoryCallbacks {
462 
464 typedef enum VmaAllocatorFlagBits {
470 
473 typedef VkFlags VmaAllocatorFlags;
474 
475 typedef struct VmaVulkanFunctions {
476  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
477  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
478  PFN_vkAllocateMemory vkAllocateMemory;
479  PFN_vkFreeMemory vkFreeMemory;
480  PFN_vkMapMemory vkMapMemory;
481  PFN_vkUnmapMemory vkUnmapMemory;
482  PFN_vkBindBufferMemory vkBindBufferMemory;
483  PFN_vkBindImageMemory vkBindImageMemory;
484  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
485  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
486  PFN_vkCreateBuffer vkCreateBuffer;
487  PFN_vkDestroyBuffer vkDestroyBuffer;
488  PFN_vkCreateImage vkCreateImage;
489  PFN_vkDestroyImage vkDestroyImage;
491 
494 {
498 
499  VkPhysicalDevice physicalDevice;
501 
502  VkDevice device;
504 
507 
510 
511  const VkAllocationCallbacks* pAllocationCallbacks;
513 
528  uint32_t frameInUseCount;
546  const VkDeviceSize* pHeapSizeLimit;
560 
562 VkResult vmaCreateAllocator(
563  const VmaAllocatorCreateInfo* pCreateInfo,
564  VmaAllocator* pAllocator);
565 
568  VmaAllocator allocator);
569 
575  VmaAllocator allocator,
576  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
577 
583  VmaAllocator allocator,
584  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
585 
593  VmaAllocator allocator,
594  uint32_t memoryTypeIndex,
595  VkMemoryPropertyFlags* pFlags);
596 
606  VmaAllocator allocator,
607  uint32_t frameIndex);
608 
609 typedef struct VmaStatInfo
610 {
612  uint32_t BlockCount;
614  uint32_t AllocationCount;
618  VkDeviceSize UsedBytes;
620  VkDeviceSize UnusedBytes;
621  VkDeviceSize AllocationSizeMin, AllocationSizeAvg, AllocationSizeMax;
622  VkDeviceSize UnusedRangeSizeMin, UnusedRangeSizeAvg, UnusedRangeSizeMax;
623 } VmaStatInfo;
624 
626 typedef struct VmaStats
627 {
628  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
629  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
631 } VmaStats;
632 
634 void vmaCalculateStats(
635  VmaAllocator allocator,
636  VmaStats* pStats);
637 
638 #define VMA_STATS_STRING_ENABLED 1
639 
640 #if VMA_STATS_STRING_ENABLED
641 
643 
646  VmaAllocator allocator,
647  char** ppStatsString,
648  VkBool32 detailedMap);
649 
650 void vmaFreeStatsString(
651  VmaAllocator allocator,
652  char* pStatsString);
653 
654 #endif // #if VMA_STATS_STRING_ENABLED
655 
658 
663 VK_DEFINE_HANDLE(VmaPool)
664 
665 typedef enum VmaMemoryUsage
666 {
672 
675 
678 
682 
697 
736 
739 typedef VkFlags VmaAllocationCreateFlags;
740 
742 {
755  VkMemoryPropertyFlags requiredFlags;
761  VkMemoryPropertyFlags preferredFlags;
763  void* pUserData;
768  VmaPool pool;
770 
785 VkResult vmaFindMemoryTypeIndex(
786  VmaAllocator allocator,
787  uint32_t memoryTypeBits,
788  const VmaAllocationCreateInfo* pAllocationCreateInfo,
789  uint32_t* pMemoryTypeIndex);
790 
793 
798 typedef enum VmaPoolCreateFlagBits {
827 
830 typedef VkFlags VmaPoolCreateFlags;
831 
834 typedef struct VmaPoolCreateInfo {
837  uint32_t memoryTypeIndex;
845  VkDeviceSize blockSize;
872  uint32_t frameInUseCount;
874 
877 typedef struct VmaPoolStats {
880  VkDeviceSize size;
883  VkDeviceSize unusedSize;
890 } VmaPoolStats;
891 
898 VkResult vmaCreatePool(
899  VmaAllocator allocator,
900  const VmaPoolCreateInfo* pCreateInfo,
901  VmaPool* pPool);
902 
905 void vmaDestroyPool(
906  VmaAllocator allocator,
907  VmaPool pool);
908 
915 void vmaGetPoolStats(
916  VmaAllocator allocator,
917  VmaPool pool,
918  VmaPoolStats* pPoolStats);
919 
927  VmaAllocator allocator,
928  VmaPool pool,
929  size_t* pLostAllocationCount);
930 
931 VK_DEFINE_HANDLE(VmaAllocation)
932 
933 
935 typedef struct VmaAllocationInfo {
940  uint32_t memoryType;
949  VkDeviceMemory deviceMemory;
954  VkDeviceSize offset;
959  VkDeviceSize size;
965  void* pMappedData;
970  void* pUserData;
972 
983 VkResult vmaAllocateMemory(
984  VmaAllocator allocator,
985  const VkMemoryRequirements* pVkMemoryRequirements,
986  const VmaAllocationCreateInfo* pCreateInfo,
987  VmaAllocation* pAllocation,
988  VmaAllocationInfo* pAllocationInfo);
989 
997  VmaAllocator allocator,
998  VkBuffer buffer,
999  const VmaAllocationCreateInfo* pCreateInfo,
1000  VmaAllocation* pAllocation,
1001  VmaAllocationInfo* pAllocationInfo);
1002 
1004 VkResult vmaAllocateMemoryForImage(
1005  VmaAllocator allocator,
1006  VkImage image,
1007  const VmaAllocationCreateInfo* pCreateInfo,
1008  VmaAllocation* pAllocation,
1009  VmaAllocationInfo* pAllocationInfo);
1010 
1012 void vmaFreeMemory(
1013  VmaAllocator allocator,
1014  VmaAllocation allocation);
1015 
1018  VmaAllocator allocator,
1019  VmaAllocation allocation,
1020  VmaAllocationInfo* pAllocationInfo);
1021 
1024  VmaAllocator allocator,
1025  VmaAllocation allocation,
1026  void* pUserData);
1027 
1039  VmaAllocator allocator,
1040  VmaAllocation* pAllocation);
1041 
1050 VkResult vmaMapMemory(
1051  VmaAllocator allocator,
1052  VmaAllocation allocation,
1053  void** ppData);
1054 
1055 void vmaUnmapMemory(
1056  VmaAllocator allocator,
1057  VmaAllocation allocation);
1058 
1077 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator);
1078 
1086 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator);
1087 
1089 typedef struct VmaDefragmentationInfo {
1094  VkDeviceSize maxBytesToMove;
1101 
1103 typedef struct VmaDefragmentationStats {
1105  VkDeviceSize bytesMoved;
1107  VkDeviceSize bytesFreed;
1113 
1184 VkResult vmaDefragment(
1185  VmaAllocator allocator,
1186  VmaAllocation* pAllocations,
1187  size_t allocationCount,
1188  VkBool32* pAllocationsChanged,
1189  const VmaDefragmentationInfo *pDefragmentationInfo,
1190  VmaDefragmentationStats* pDefragmentationStats);
1191 
1194 
1217 VkResult vmaCreateBuffer(
1218  VmaAllocator allocator,
1219  const VkBufferCreateInfo* pBufferCreateInfo,
1220  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1221  VkBuffer* pBuffer,
1222  VmaAllocation* pAllocation,
1223  VmaAllocationInfo* pAllocationInfo);
1224 
1225 void vmaDestroyBuffer(
1226  VmaAllocator allocator,
1227  VkBuffer buffer,
1228  VmaAllocation allocation);
1229 
1231 VkResult vmaCreateImage(
1232  VmaAllocator allocator,
1233  const VkImageCreateInfo* pImageCreateInfo,
1234  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1235  VkImage* pImage,
1236  VmaAllocation* pAllocation,
1237  VmaAllocationInfo* pAllocationInfo);
1238 
1239 void vmaDestroyImage(
1240  VmaAllocator allocator,
1241  VkImage image,
1242  VmaAllocation allocation);
1243 
1246 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
1247 
1248 // For Visual Studio IntelliSense.
1249 #ifdef __INTELLISENSE__
1250 #define VMA_IMPLEMENTATION
1251 #endif
1252 
1253 #ifdef VMA_IMPLEMENTATION
1254 #undef VMA_IMPLEMENTATION
1255 
1256 #include <cstdint>
1257 #include <cstdlib>
1258 #include <cstring>
1259 
1260 /*******************************************************************************
1261 CONFIGURATION SECTION
1262 
1263 Define some of these macros before each #include of this header or change them
1264 here if you need other then default behavior depending on your environment.
1265 */
1266 
1267 /*
1268 Define this macro to 1 to make the library fetch pointers to Vulkan functions
1269 internally, like:
1270 
1271  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
1272 
1273 Define to 0 if you are going to provide you own pointers to Vulkan functions via
1274 VmaAllocatorCreateInfo::pVulkanFunctions.
1275 */
1276 #ifndef VMA_STATIC_VULKAN_FUNCTIONS
1277 #define VMA_STATIC_VULKAN_FUNCTIONS 1
1278 #endif
1279 
1280 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
1281 //#define VMA_USE_STL_CONTAINERS 1
1282 
1283 /* Set this macro to 1 to make the library including and using STL containers:
1284 std::pair, std::vector, std::list, std::unordered_map.
1285 
1286 Set it to 0 or undefined to make the library using its own implementation of
1287 the containers.
1288 */
1289 #if VMA_USE_STL_CONTAINERS
1290  #define VMA_USE_STL_VECTOR 1
1291  #define VMA_USE_STL_UNORDERED_MAP 1
1292  #define VMA_USE_STL_LIST 1
1293 #endif
1294 
1295 #if VMA_USE_STL_VECTOR
1296  #include <vector>
1297 #endif
1298 
1299 #if VMA_USE_STL_UNORDERED_MAP
1300  #include <unordered_map>
1301 #endif
1302 
1303 #if VMA_USE_STL_LIST
1304  #include <list>
1305 #endif
1306 
1307 /*
1308 Following headers are used in this CONFIGURATION section only, so feel free to
1309 remove them if not needed.
1310 */
1311 #include <cassert> // for assert
1312 #include <algorithm> // for min, max
1313 #include <mutex> // for std::mutex
1314 #include <atomic> // for std::atomic
1315 
1316 #if !defined(_WIN32)
1317  #include <malloc.h> // for aligned_alloc()
1318 #endif
1319 
1320 // Normal assert to check for programmer's errors, especially in Debug configuration.
1321 #ifndef VMA_ASSERT
1322  #ifdef _DEBUG
1323  #define VMA_ASSERT(expr) assert(expr)
1324  #else
1325  #define VMA_ASSERT(expr)
1326  #endif
1327 #endif
1328 
1329 // Assert that will be called very often, like inside data structures e.g. operator[].
1330 // Making it non-empty can make program slow.
1331 #ifndef VMA_HEAVY_ASSERT
1332  #ifdef _DEBUG
1333  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
1334  #else
1335  #define VMA_HEAVY_ASSERT(expr)
1336  #endif
1337 #endif
1338 
1339 #ifndef VMA_NULL
1340  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
1341  #define VMA_NULL nullptr
1342 #endif
1343 
1344 #ifndef VMA_ALIGN_OF
1345  #define VMA_ALIGN_OF(type) (__alignof(type))
1346 #endif
1347 
1348 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
1349  #if defined(_WIN32)
1350  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
1351  #else
1352  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
1353  #endif
1354 #endif
1355 
1356 #ifndef VMA_SYSTEM_FREE
1357  #if defined(_WIN32)
1358  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
1359  #else
1360  #define VMA_SYSTEM_FREE(ptr) free(ptr)
1361  #endif
1362 #endif
1363 
1364 #ifndef VMA_MIN
1365  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
1366 #endif
1367 
1368 #ifndef VMA_MAX
1369  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
1370 #endif
1371 
1372 #ifndef VMA_SWAP
1373  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
1374 #endif
1375 
1376 #ifndef VMA_SORT
1377  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
1378 #endif
1379 
1380 #ifndef VMA_DEBUG_LOG
1381  #define VMA_DEBUG_LOG(format, ...)
1382  /*
1383  #define VMA_DEBUG_LOG(format, ...) do { \
1384  printf(format, __VA_ARGS__); \
1385  printf("\n"); \
1386  } while(false)
1387  */
1388 #endif
1389 
1390 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
1391 #if VMA_STATS_STRING_ENABLED
1392  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
1393  {
1394  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
1395  }
1396  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
1397  {
1398  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
1399  }
1400  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
1401  {
1402  snprintf(outStr, strLen, "%p", ptr);
1403  }
1404 #endif
1405 
1406 #ifndef VMA_MUTEX
1407  class VmaMutex
1408  {
1409  public:
1410  VmaMutex() { }
1411  ~VmaMutex() { }
1412  void Lock() { m_Mutex.lock(); }
1413  void Unlock() { m_Mutex.unlock(); }
1414  private:
1415  std::mutex m_Mutex;
1416  };
1417  #define VMA_MUTEX VmaMutex
1418 #endif
1419 
1420 /*
1421 If providing your own implementation, you need to implement a subset of std::atomic:
1422 
1423 - Constructor(uint32_t desired)
1424 - uint32_t load() const
1425 - void store(uint32_t desired)
1426 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
1427 */
1428 #ifndef VMA_ATOMIC_UINT32
1429  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
1430 #endif
1431 
1432 #ifndef VMA_BEST_FIT
1433 
1445  #define VMA_BEST_FIT (1)
1446 #endif
1447 
1448 #ifndef VMA_DEBUG_ALWAYS_OWN_MEMORY
1449 
1453  #define VMA_DEBUG_ALWAYS_OWN_MEMORY (0)
1454 #endif
1455 
1456 #ifndef VMA_DEBUG_ALIGNMENT
1457 
1461  #define VMA_DEBUG_ALIGNMENT (1)
1462 #endif
1463 
1464 #ifndef VMA_DEBUG_MARGIN
1465 
1469  #define VMA_DEBUG_MARGIN (0)
1470 #endif
1471 
1472 #ifndef VMA_DEBUG_GLOBAL_MUTEX
1473 
1477  #define VMA_DEBUG_GLOBAL_MUTEX (0)
1478 #endif
1479 
1480 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
1481 
1485  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
1486 #endif
1487 
1488 #ifndef VMA_SMALL_HEAP_MAX_SIZE
1489  #define VMA_SMALL_HEAP_MAX_SIZE (512 * 1024 * 1024)
1491 #endif
1492 
1493 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
1494  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256 * 1024 * 1024)
1496 #endif
1497 
1498 #ifndef VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE
1499  #define VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE (64 * 1024 * 1024)
1501 #endif
1502 
1503 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
1504 
1505 /*******************************************************************************
1506 END OF CONFIGURATION
1507 */
1508 
1509 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
1510  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
1511 
1512 // Returns number of bits set to 1 in (v).
1513 static inline uint32_t CountBitsSet(uint32_t v)
1514 {
1515  uint32_t c = v - ((v >> 1) & 0x55555555);
1516  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
1517  c = ((c >> 4) + c) & 0x0F0F0F0F;
1518  c = ((c >> 8) + c) & 0x00FF00FF;
1519  c = ((c >> 16) + c) & 0x0000FFFF;
1520  return c;
1521 }
1522 
1523 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
1524 // Use types like uint32_t, uint64_t as T.
1525 template <typename T>
1526 static inline T VmaAlignUp(T val, T align)
1527 {
1528  return (val + align - 1) / align * align;
1529 }
1530 
1531 // Division with mathematical rounding to nearest number.
1532 template <typename T>
1533 inline T VmaRoundDiv(T x, T y)
1534 {
1535  return (x + (y / (T)2)) / y;
1536 }
1537 
1538 #ifndef VMA_SORT
1539 
1540 template<typename Iterator, typename Compare>
1541 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
1542 {
1543  Iterator centerValue = end; --centerValue;
1544  Iterator insertIndex = beg;
1545  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
1546  {
1547  if(cmp(*memTypeIndex, *centerValue))
1548  {
1549  if(insertIndex != memTypeIndex)
1550  {
1551  VMA_SWAP(*memTypeIndex, *insertIndex);
1552  }
1553  ++insertIndex;
1554  }
1555  }
1556  if(insertIndex != centerValue)
1557  {
1558  VMA_SWAP(*insertIndex, *centerValue);
1559  }
1560  return insertIndex;
1561 }
1562 
1563 template<typename Iterator, typename Compare>
1564 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
1565 {
1566  if(beg < end)
1567  {
1568  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
1569  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
1570  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
1571  }
1572 }
1573 
1574 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
1575 
1576 #endif // #ifndef VMA_SORT
1577 
1578 /*
1579 Returns true if two memory blocks occupy overlapping pages.
1580 ResourceA must be in less memory offset than ResourceB.
1581 
1582 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
1583 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
1584 */
1585 static inline bool VmaBlocksOnSamePage(
1586  VkDeviceSize resourceAOffset,
1587  VkDeviceSize resourceASize,
1588  VkDeviceSize resourceBOffset,
1589  VkDeviceSize pageSize)
1590 {
1591  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
1592  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
1593  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
1594  VkDeviceSize resourceBStart = resourceBOffset;
1595  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
1596  return resourceAEndPage == resourceBStartPage;
1597 }
1598 
1599 enum VmaSuballocationType
1600 {
1601  VMA_SUBALLOCATION_TYPE_FREE = 0,
1602  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
1603  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
1604  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
1605  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
1606  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
1607  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
1608 };
1609 
1610 /*
1611 Returns true if given suballocation types could conflict and must respect
1612 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
1613 or linear image and another one is optimal image. If type is unknown, behave
1614 conservatively.
1615 */
1616 static inline bool VmaIsBufferImageGranularityConflict(
1617  VmaSuballocationType suballocType1,
1618  VmaSuballocationType suballocType2)
1619 {
1620  if(suballocType1 > suballocType2)
1621  {
1622  VMA_SWAP(suballocType1, suballocType2);
1623  }
1624 
1625  switch(suballocType1)
1626  {
1627  case VMA_SUBALLOCATION_TYPE_FREE:
1628  return false;
1629  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
1630  return true;
1631  case VMA_SUBALLOCATION_TYPE_BUFFER:
1632  return
1633  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
1634  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1635  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
1636  return
1637  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
1638  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
1639  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1640  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
1641  return
1642  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1643  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
1644  return false;
1645  default:
1646  VMA_ASSERT(0);
1647  return true;
1648  }
1649 }
1650 
1651 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
1652 struct VmaMutexLock
1653 {
1654 public:
1655  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
1656  m_pMutex(useMutex ? &mutex : VMA_NULL)
1657  {
1658  if(m_pMutex)
1659  {
1660  m_pMutex->Lock();
1661  }
1662  }
1663 
1664  ~VmaMutexLock()
1665  {
1666  if(m_pMutex)
1667  {
1668  m_pMutex->Unlock();
1669  }
1670  }
1671 
1672 private:
1673  VMA_MUTEX* m_pMutex;
1674 };
1675 
1676 #if VMA_DEBUG_GLOBAL_MUTEX
1677  static VMA_MUTEX gDebugGlobalMutex;
1678  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
1679 #else
1680  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
1681 #endif
1682 
1683 // Minimum size of a free suballocation to register it in the free suballocation collection.
1684 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
1685 
1686 /*
1687 Performs binary search and returns iterator to first element that is greater or
1688 equal to (key), according to comparison (cmp).
1689 
1690 Cmp should return true if first argument is less than second argument.
1691 
1692 Returned value is the found element, if present in the collection or place where
1693 new element with value (key) should be inserted.
1694 */
1695 template <typename IterT, typename KeyT, typename CmpT>
1696 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
1697 {
1698  size_t down = 0, up = (end - beg);
1699  while(down < up)
1700  {
1701  const size_t mid = (down + up) / 2;
1702  if(cmp(*(beg+mid), key))
1703  {
1704  down = mid + 1;
1705  }
1706  else
1707  {
1708  up = mid;
1709  }
1710  }
1711  return beg + down;
1712 }
1713 
1715 // Memory allocation
1716 
1717 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
1718 {
1719  if((pAllocationCallbacks != VMA_NULL) &&
1720  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
1721  {
1722  return (*pAllocationCallbacks->pfnAllocation)(
1723  pAllocationCallbacks->pUserData,
1724  size,
1725  alignment,
1726  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1727  }
1728  else
1729  {
1730  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
1731  }
1732 }
1733 
1734 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
1735 {
1736  if((pAllocationCallbacks != VMA_NULL) &&
1737  (pAllocationCallbacks->pfnFree != VMA_NULL))
1738  {
1739  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
1740  }
1741  else
1742  {
1743  VMA_SYSTEM_FREE(ptr);
1744  }
1745 }
1746 
1747 template<typename T>
1748 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
1749 {
1750  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
1751 }
1752 
1753 template<typename T>
1754 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
1755 {
1756  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
1757 }
1758 
1759 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
1760 
1761 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
1762 
1763 template<typename T>
1764 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
1765 {
1766  ptr->~T();
1767  VmaFree(pAllocationCallbacks, ptr);
1768 }
1769 
1770 template<typename T>
1771 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
1772 {
1773  if(ptr != VMA_NULL)
1774  {
1775  for(size_t i = count; i--; )
1776  {
1777  ptr[i].~T();
1778  }
1779  VmaFree(pAllocationCallbacks, ptr);
1780  }
1781 }
1782 
1783 // STL-compatible allocator.
1784 template<typename T>
1785 class VmaStlAllocator
1786 {
1787 public:
1788  const VkAllocationCallbacks* const m_pCallbacks;
1789  typedef T value_type;
1790 
1791  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
1792  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
1793 
1794  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
1795  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
1796 
1797  template<typename U>
1798  bool operator==(const VmaStlAllocator<U>& rhs) const
1799  {
1800  return m_pCallbacks == rhs.m_pCallbacks;
1801  }
1802  template<typename U>
1803  bool operator!=(const VmaStlAllocator<U>& rhs) const
1804  {
1805  return m_pCallbacks != rhs.m_pCallbacks;
1806  }
1807 
1808  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
1809 };
1810 
1811 #if VMA_USE_STL_VECTOR
1812 
1813 #define VmaVector std::vector
1814 
1815 template<typename T, typename allocatorT>
1816 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
1817 {
1818  vec.insert(vec.begin() + index, item);
1819 }
1820 
1821 template<typename T, typename allocatorT>
1822 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
1823 {
1824  vec.erase(vec.begin() + index);
1825 }
1826 
1827 #else // #if VMA_USE_STL_VECTOR
1828 
1829 /* Class with interface compatible with subset of std::vector.
1830 T must be POD because constructors and destructors are not called and memcpy is
1831 used for these objects. */
1832 template<typename T, typename AllocatorT>
1833 class VmaVector
1834 {
1835 public:
1836  typedef T value_type;
1837 
1838  VmaVector(const AllocatorT& allocator) :
1839  m_Allocator(allocator),
1840  m_pArray(VMA_NULL),
1841  m_Count(0),
1842  m_Capacity(0)
1843  {
1844  }
1845 
1846  VmaVector(size_t count, const AllocatorT& allocator) :
1847  m_Allocator(allocator),
1848  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
1849  m_Count(count),
1850  m_Capacity(count)
1851  {
1852  }
1853 
1854  VmaVector(const VmaVector<T, AllocatorT>& src) :
1855  m_Allocator(src.m_Allocator),
1856  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
1857  m_Count(src.m_Count),
1858  m_Capacity(src.m_Count)
1859  {
1860  if(m_Count != 0)
1861  {
1862  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
1863  }
1864  }
1865 
1866  ~VmaVector()
1867  {
1868  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1869  }
1870 
1871  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
1872  {
1873  if(&rhs != this)
1874  {
1875  resize(rhs.m_Count);
1876  if(m_Count != 0)
1877  {
1878  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
1879  }
1880  }
1881  return *this;
1882  }
1883 
1884  bool empty() const { return m_Count == 0; }
1885  size_t size() const { return m_Count; }
1886  T* data() { return m_pArray; }
1887  const T* data() const { return m_pArray; }
1888 
1889  T& operator[](size_t index)
1890  {
1891  VMA_HEAVY_ASSERT(index < m_Count);
1892  return m_pArray[index];
1893  }
1894  const T& operator[](size_t index) const
1895  {
1896  VMA_HEAVY_ASSERT(index < m_Count);
1897  return m_pArray[index];
1898  }
1899 
1900  T& front()
1901  {
1902  VMA_HEAVY_ASSERT(m_Count > 0);
1903  return m_pArray[0];
1904  }
1905  const T& front() const
1906  {
1907  VMA_HEAVY_ASSERT(m_Count > 0);
1908  return m_pArray[0];
1909  }
1910  T& back()
1911  {
1912  VMA_HEAVY_ASSERT(m_Count > 0);
1913  return m_pArray[m_Count - 1];
1914  }
1915  const T& back() const
1916  {
1917  VMA_HEAVY_ASSERT(m_Count > 0);
1918  return m_pArray[m_Count - 1];
1919  }
1920 
1921  void reserve(size_t newCapacity, bool freeMemory = false)
1922  {
1923  newCapacity = VMA_MAX(newCapacity, m_Count);
1924 
1925  if((newCapacity < m_Capacity) && !freeMemory)
1926  {
1927  newCapacity = m_Capacity;
1928  }
1929 
1930  if(newCapacity != m_Capacity)
1931  {
1932  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
1933  if(m_Count != 0)
1934  {
1935  memcpy(newArray, m_pArray, m_Count * sizeof(T));
1936  }
1937  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1938  m_Capacity = newCapacity;
1939  m_pArray = newArray;
1940  }
1941  }
1942 
1943  void resize(size_t newCount, bool freeMemory = false)
1944  {
1945  size_t newCapacity = m_Capacity;
1946  if(newCount > m_Capacity)
1947  {
1948  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
1949  }
1950  else if(freeMemory)
1951  {
1952  newCapacity = newCount;
1953  }
1954 
1955  if(newCapacity != m_Capacity)
1956  {
1957  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
1958  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
1959  if(elementsToCopy != 0)
1960  {
1961  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
1962  }
1963  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1964  m_Capacity = newCapacity;
1965  m_pArray = newArray;
1966  }
1967 
1968  m_Count = newCount;
1969  }
1970 
1971  void clear(bool freeMemory = false)
1972  {
1973  resize(0, freeMemory);
1974  }
1975 
1976  void insert(size_t index, const T& src)
1977  {
1978  VMA_HEAVY_ASSERT(index <= m_Count);
1979  const size_t oldCount = size();
1980  resize(oldCount + 1);
1981  if(index < oldCount)
1982  {
1983  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
1984  }
1985  m_pArray[index] = src;
1986  }
1987 
1988  void remove(size_t index)
1989  {
1990  VMA_HEAVY_ASSERT(index < m_Count);
1991  const size_t oldCount = size();
1992  if(index < oldCount - 1)
1993  {
1994  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
1995  }
1996  resize(oldCount - 1);
1997  }
1998 
1999  void push_back(const T& src)
2000  {
2001  const size_t newIndex = size();
2002  resize(newIndex + 1);
2003  m_pArray[newIndex] = src;
2004  }
2005 
2006  void pop_back()
2007  {
2008  VMA_HEAVY_ASSERT(m_Count > 0);
2009  resize(size() - 1);
2010  }
2011 
2012  void push_front(const T& src)
2013  {
2014  insert(0, src);
2015  }
2016 
2017  void pop_front()
2018  {
2019  VMA_HEAVY_ASSERT(m_Count > 0);
2020  remove(0);
2021  }
2022 
2023  typedef T* iterator;
2024 
2025  iterator begin() { return m_pArray; }
2026  iterator end() { return m_pArray + m_Count; }
2027 
2028 private:
2029  AllocatorT m_Allocator;
2030  T* m_pArray;
2031  size_t m_Count;
2032  size_t m_Capacity;
2033 };
2034 
2035 template<typename T, typename allocatorT>
2036 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
2037 {
2038  vec.insert(index, item);
2039 }
2040 
2041 template<typename T, typename allocatorT>
2042 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
2043 {
2044  vec.remove(index);
2045 }
2046 
2047 #endif // #if VMA_USE_STL_VECTOR
2048 
2049 template<typename CmpLess, typename VectorT>
2050 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
2051 {
2052  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2053  vector.data(),
2054  vector.data() + vector.size(),
2055  value,
2056  CmpLess()) - vector.data();
2057  VmaVectorInsert(vector, indexToInsert, value);
2058  return indexToInsert;
2059 }
2060 
2061 template<typename CmpLess, typename VectorT>
2062 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
2063 {
2064  CmpLess comparator;
2065  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2066  vector.begin(),
2067  vector.end(),
2068  value,
2069  comparator);
2070  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
2071  {
2072  size_t indexToRemove = it - vector.begin();
2073  VmaVectorRemove(vector, indexToRemove);
2074  return true;
2075  }
2076  return false;
2077 }
2078 
2079 template<typename CmpLess, typename VectorT>
2080 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
2081 {
2082  CmpLess comparator;
2083  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2084  vector.data(),
2085  vector.data() + vector.size(),
2086  value,
2087  comparator);
2088  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
2089  {
2090  return it - vector.begin();
2091  }
2092  else
2093  {
2094  return vector.size();
2095  }
2096 }
2097 
2099 // class VmaPoolAllocator
2100 
2101 /*
2102 Allocator for objects of type T using a list of arrays (pools) to speed up
2103 allocation. Number of elements that can be allocated is not bounded because
2104 allocator can create multiple blocks.
2105 */
2106 template<typename T>
2107 class VmaPoolAllocator
2108 {
2109 public:
2110  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
2111  ~VmaPoolAllocator();
2112  void Clear();
2113  T* Alloc();
2114  void Free(T* ptr);
2115 
2116 private:
2117  union Item
2118  {
2119  uint32_t NextFreeIndex;
2120  T Value;
2121  };
2122 
2123  struct ItemBlock
2124  {
2125  Item* pItems;
2126  uint32_t FirstFreeIndex;
2127  };
2128 
2129  const VkAllocationCallbacks* m_pAllocationCallbacks;
2130  size_t m_ItemsPerBlock;
2131  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
2132 
2133  ItemBlock& CreateNewBlock();
2134 };
2135 
2136 template<typename T>
2137 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
2138  m_pAllocationCallbacks(pAllocationCallbacks),
2139  m_ItemsPerBlock(itemsPerBlock),
2140  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
2141 {
2142  VMA_ASSERT(itemsPerBlock > 0);
2143 }
2144 
2145 template<typename T>
2146 VmaPoolAllocator<T>::~VmaPoolAllocator()
2147 {
2148  Clear();
2149 }
2150 
2151 template<typename T>
2152 void VmaPoolAllocator<T>::Clear()
2153 {
2154  for(size_t i = m_ItemBlocks.size(); i--; )
2155  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
2156  m_ItemBlocks.clear();
2157 }
2158 
2159 template<typename T>
2160 T* VmaPoolAllocator<T>::Alloc()
2161 {
2162  for(size_t i = m_ItemBlocks.size(); i--; )
2163  {
2164  ItemBlock& block = m_ItemBlocks[i];
2165  // This block has some free items: Use first one.
2166  if(block.FirstFreeIndex != UINT32_MAX)
2167  {
2168  Item* const pItem = &block.pItems[block.FirstFreeIndex];
2169  block.FirstFreeIndex = pItem->NextFreeIndex;
2170  return &pItem->Value;
2171  }
2172  }
2173 
2174  // No block has free item: Create new one and use it.
2175  ItemBlock& newBlock = CreateNewBlock();
2176  Item* const pItem = &newBlock.pItems[0];
2177  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
2178  return &pItem->Value;
2179 }
2180 
2181 template<typename T>
2182 void VmaPoolAllocator<T>::Free(T* ptr)
2183 {
2184  // Search all memory blocks to find ptr.
2185  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
2186  {
2187  ItemBlock& block = m_ItemBlocks[i];
2188 
2189  // Casting to union.
2190  Item* pItemPtr;
2191  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
2192 
2193  // Check if pItemPtr is in address range of this block.
2194  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
2195  {
2196  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
2197  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
2198  block.FirstFreeIndex = index;
2199  return;
2200  }
2201  }
2202  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
2203 }
2204 
2205 template<typename T>
2206 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
2207 {
2208  ItemBlock newBlock = {
2209  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
2210 
2211  m_ItemBlocks.push_back(newBlock);
2212 
2213  // Setup singly-linked list of all free items in this block.
2214  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
2215  newBlock.pItems[i].NextFreeIndex = i + 1;
2216  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
2217  return m_ItemBlocks.back();
2218 }
2219 
2221 // class VmaRawList, VmaList
2222 
2223 #if VMA_USE_STL_LIST
2224 
2225 #define VmaList std::list
2226 
2227 #else // #if VMA_USE_STL_LIST
2228 
2229 template<typename T>
2230 struct VmaListItem
2231 {
2232  VmaListItem* pPrev;
2233  VmaListItem* pNext;
2234  T Value;
2235 };
2236 
2237 // Doubly linked list.
2238 template<typename T>
2239 class VmaRawList
2240 {
2241 public:
2242  typedef VmaListItem<T> ItemType;
2243 
2244  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
2245  ~VmaRawList();
2246  void Clear();
2247 
2248  size_t GetCount() const { return m_Count; }
2249  bool IsEmpty() const { return m_Count == 0; }
2250 
2251  ItemType* Front() { return m_pFront; }
2252  const ItemType* Front() const { return m_pFront; }
2253  ItemType* Back() { return m_pBack; }
2254  const ItemType* Back() const { return m_pBack; }
2255 
2256  ItemType* PushBack();
2257  ItemType* PushFront();
2258  ItemType* PushBack(const T& value);
2259  ItemType* PushFront(const T& value);
2260  void PopBack();
2261  void PopFront();
2262 
2263  // Item can be null - it means PushBack.
2264  ItemType* InsertBefore(ItemType* pItem);
2265  // Item can be null - it means PushFront.
2266  ItemType* InsertAfter(ItemType* pItem);
2267 
2268  ItemType* InsertBefore(ItemType* pItem, const T& value);
2269  ItemType* InsertAfter(ItemType* pItem, const T& value);
2270 
2271  void Remove(ItemType* pItem);
2272 
2273 private:
2274  const VkAllocationCallbacks* const m_pAllocationCallbacks;
2275  VmaPoolAllocator<ItemType> m_ItemAllocator;
2276  ItemType* m_pFront;
2277  ItemType* m_pBack;
2278  size_t m_Count;
2279 
2280  // Declared not defined, to block copy constructor and assignment operator.
2281  VmaRawList(const VmaRawList<T>& src);
2282  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
2283 };
2284 
2285 template<typename T>
2286 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
2287  m_pAllocationCallbacks(pAllocationCallbacks),
2288  m_ItemAllocator(pAllocationCallbacks, 128),
2289  m_pFront(VMA_NULL),
2290  m_pBack(VMA_NULL),
2291  m_Count(0)
2292 {
2293 }
2294 
2295 template<typename T>
2296 VmaRawList<T>::~VmaRawList()
2297 {
2298  // Intentionally not calling Clear, because that would be unnecessary
2299  // computations to return all items to m_ItemAllocator as free.
2300 }
2301 
2302 template<typename T>
2303 void VmaRawList<T>::Clear()
2304 {
2305  if(IsEmpty() == false)
2306  {
2307  ItemType* pItem = m_pBack;
2308  while(pItem != VMA_NULL)
2309  {
2310  ItemType* const pPrevItem = pItem->pPrev;
2311  m_ItemAllocator.Free(pItem);
2312  pItem = pPrevItem;
2313  }
2314  m_pFront = VMA_NULL;
2315  m_pBack = VMA_NULL;
2316  m_Count = 0;
2317  }
2318 }
2319 
2320 template<typename T>
2321 VmaListItem<T>* VmaRawList<T>::PushBack()
2322 {
2323  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2324  pNewItem->pNext = VMA_NULL;
2325  if(IsEmpty())
2326  {
2327  pNewItem->pPrev = VMA_NULL;
2328  m_pFront = pNewItem;
2329  m_pBack = pNewItem;
2330  m_Count = 1;
2331  }
2332  else
2333  {
2334  pNewItem->pPrev = m_pBack;
2335  m_pBack->pNext = pNewItem;
2336  m_pBack = pNewItem;
2337  ++m_Count;
2338  }
2339  return pNewItem;
2340 }
2341 
2342 template<typename T>
2343 VmaListItem<T>* VmaRawList<T>::PushFront()
2344 {
2345  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2346  pNewItem->pPrev = VMA_NULL;
2347  if(IsEmpty())
2348  {
2349  pNewItem->pNext = VMA_NULL;
2350  m_pFront = pNewItem;
2351  m_pBack = pNewItem;
2352  m_Count = 1;
2353  }
2354  else
2355  {
2356  pNewItem->pNext = m_pFront;
2357  m_pFront->pPrev = pNewItem;
2358  m_pFront = pNewItem;
2359  ++m_Count;
2360  }
2361  return pNewItem;
2362 }
2363 
2364 template<typename T>
2365 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
2366 {
2367  ItemType* const pNewItem = PushBack();
2368  pNewItem->Value = value;
2369  return pNewItem;
2370 }
2371 
2372 template<typename T>
2373 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
2374 {
2375  ItemType* const pNewItem = PushFront();
2376  pNewItem->Value = value;
2377  return pNewItem;
2378 }
2379 
2380 template<typename T>
2381 void VmaRawList<T>::PopBack()
2382 {
2383  VMA_HEAVY_ASSERT(m_Count > 0);
2384  ItemType* const pBackItem = m_pBack;
2385  ItemType* const pPrevItem = pBackItem->pPrev;
2386  if(pPrevItem != VMA_NULL)
2387  {
2388  pPrevItem->pNext = VMA_NULL;
2389  }
2390  m_pBack = pPrevItem;
2391  m_ItemAllocator.Free(pBackItem);
2392  --m_Count;
2393 }
2394 
2395 template<typename T>
2396 void VmaRawList<T>::PopFront()
2397 {
2398  VMA_HEAVY_ASSERT(m_Count > 0);
2399  ItemType* const pFrontItem = m_pFront;
2400  ItemType* const pNextItem = pFrontItem->pNext;
2401  if(pNextItem != VMA_NULL)
2402  {
2403  pNextItem->pPrev = VMA_NULL;
2404  }
2405  m_pFront = pNextItem;
2406  m_ItemAllocator.Free(pFrontItem);
2407  --m_Count;
2408 }
2409 
2410 template<typename T>
2411 void VmaRawList<T>::Remove(ItemType* pItem)
2412 {
2413  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
2414  VMA_HEAVY_ASSERT(m_Count > 0);
2415 
2416  if(pItem->pPrev != VMA_NULL)
2417  {
2418  pItem->pPrev->pNext = pItem->pNext;
2419  }
2420  else
2421  {
2422  VMA_HEAVY_ASSERT(m_pFront == pItem);
2423  m_pFront = pItem->pNext;
2424  }
2425 
2426  if(pItem->pNext != VMA_NULL)
2427  {
2428  pItem->pNext->pPrev = pItem->pPrev;
2429  }
2430  else
2431  {
2432  VMA_HEAVY_ASSERT(m_pBack == pItem);
2433  m_pBack = pItem->pPrev;
2434  }
2435 
2436  m_ItemAllocator.Free(pItem);
2437  --m_Count;
2438 }
2439 
2440 template<typename T>
2441 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
2442 {
2443  if(pItem != VMA_NULL)
2444  {
2445  ItemType* const prevItem = pItem->pPrev;
2446  ItemType* const newItem = m_ItemAllocator.Alloc();
2447  newItem->pPrev = prevItem;
2448  newItem->pNext = pItem;
2449  pItem->pPrev = newItem;
2450  if(prevItem != VMA_NULL)
2451  {
2452  prevItem->pNext = newItem;
2453  }
2454  else
2455  {
2456  VMA_HEAVY_ASSERT(m_pFront == pItem);
2457  m_pFront = newItem;
2458  }
2459  ++m_Count;
2460  return newItem;
2461  }
2462  else
2463  return PushBack();
2464 }
2465 
2466 template<typename T>
2467 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
2468 {
2469  if(pItem != VMA_NULL)
2470  {
2471  ItemType* const nextItem = pItem->pNext;
2472  ItemType* const newItem = m_ItemAllocator.Alloc();
2473  newItem->pNext = nextItem;
2474  newItem->pPrev = pItem;
2475  pItem->pNext = newItem;
2476  if(nextItem != VMA_NULL)
2477  {
2478  nextItem->pPrev = newItem;
2479  }
2480  else
2481  {
2482  VMA_HEAVY_ASSERT(m_pBack == pItem);
2483  m_pBack = newItem;
2484  }
2485  ++m_Count;
2486  return newItem;
2487  }
2488  else
2489  return PushFront();
2490 }
2491 
2492 template<typename T>
2493 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
2494 {
2495  ItemType* const newItem = InsertBefore(pItem);
2496  newItem->Value = value;
2497  return newItem;
2498 }
2499 
2500 template<typename T>
2501 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
2502 {
2503  ItemType* const newItem = InsertAfter(pItem);
2504  newItem->Value = value;
2505  return newItem;
2506 }
2507 
2508 template<typename T, typename AllocatorT>
2509 class VmaList
2510 {
2511 public:
2512  class iterator
2513  {
2514  public:
2515  iterator() :
2516  m_pList(VMA_NULL),
2517  m_pItem(VMA_NULL)
2518  {
2519  }
2520 
2521  T& operator*() const
2522  {
2523  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2524  return m_pItem->Value;
2525  }
2526  T* operator->() const
2527  {
2528  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2529  return &m_pItem->Value;
2530  }
2531 
2532  iterator& operator++()
2533  {
2534  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2535  m_pItem = m_pItem->pNext;
2536  return *this;
2537  }
2538  iterator& operator--()
2539  {
2540  if(m_pItem != VMA_NULL)
2541  {
2542  m_pItem = m_pItem->pPrev;
2543  }
2544  else
2545  {
2546  VMA_HEAVY_ASSERT(!m_pList.IsEmpty());
2547  m_pItem = m_pList->Back();
2548  }
2549  return *this;
2550  }
2551 
2552  iterator operator++(int)
2553  {
2554  iterator result = *this;
2555  ++*this;
2556  return result;
2557  }
2558  iterator operator--(int)
2559  {
2560  iterator result = *this;
2561  --*this;
2562  return result;
2563  }
2564 
2565  bool operator==(const iterator& rhs) const
2566  {
2567  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2568  return m_pItem == rhs.m_pItem;
2569  }
2570  bool operator!=(const iterator& rhs) const
2571  {
2572  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2573  return m_pItem != rhs.m_pItem;
2574  }
2575 
2576  private:
2577  VmaRawList<T>* m_pList;
2578  VmaListItem<T>* m_pItem;
2579 
2580  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
2581  m_pList(pList),
2582  m_pItem(pItem)
2583  {
2584  }
2585 
2586  friend class VmaList<T, AllocatorT>;
2587  };
2588 
2589  class const_iterator
2590  {
2591  public:
2592  const_iterator() :
2593  m_pList(VMA_NULL),
2594  m_pItem(VMA_NULL)
2595  {
2596  }
2597 
2598  const_iterator(const iterator& src) :
2599  m_pList(src.m_pList),
2600  m_pItem(src.m_pItem)
2601  {
2602  }
2603 
2604  const T& operator*() const
2605  {
2606  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2607  return m_pItem->Value;
2608  }
2609  const T* operator->() const
2610  {
2611  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2612  return &m_pItem->Value;
2613  }
2614 
2615  const_iterator& operator++()
2616  {
2617  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2618  m_pItem = m_pItem->pNext;
2619  return *this;
2620  }
2621  const_iterator& operator--()
2622  {
2623  if(m_pItem != VMA_NULL)
2624  {
2625  m_pItem = m_pItem->pPrev;
2626  }
2627  else
2628  {
2629  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
2630  m_pItem = m_pList->Back();
2631  }
2632  return *this;
2633  }
2634 
2635  const_iterator operator++(int)
2636  {
2637  const_iterator result = *this;
2638  ++*this;
2639  return result;
2640  }
2641  const_iterator operator--(int)
2642  {
2643  const_iterator result = *this;
2644  --*this;
2645  return result;
2646  }
2647 
2648  bool operator==(const const_iterator& rhs) const
2649  {
2650  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2651  return m_pItem == rhs.m_pItem;
2652  }
2653  bool operator!=(const const_iterator& rhs) const
2654  {
2655  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2656  return m_pItem != rhs.m_pItem;
2657  }
2658 
2659  private:
2660  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
2661  m_pList(pList),
2662  m_pItem(pItem)
2663  {
2664  }
2665 
2666  const VmaRawList<T>* m_pList;
2667  const VmaListItem<T>* m_pItem;
2668 
2669  friend class VmaList<T, AllocatorT>;
2670  };
2671 
2672  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
2673 
2674  bool empty() const { return m_RawList.IsEmpty(); }
2675  size_t size() const { return m_RawList.GetCount(); }
2676 
2677  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
2678  iterator end() { return iterator(&m_RawList, VMA_NULL); }
2679 
2680  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
2681  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
2682 
2683  void clear() { m_RawList.Clear(); }
2684  void push_back(const T& value) { m_RawList.PushBack(value); }
2685  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
2686  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
2687 
2688 private:
2689  VmaRawList<T> m_RawList;
2690 };
2691 
2692 #endif // #if VMA_USE_STL_LIST
2693 
2695 // class VmaMap
2696 
2697 // Unused in this version.
2698 #if 0
2699 
2700 #if VMA_USE_STL_UNORDERED_MAP
2701 
2702 #define VmaPair std::pair
2703 
2704 #define VMA_MAP_TYPE(KeyT, ValueT) \
2705  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
2706 
2707 #else // #if VMA_USE_STL_UNORDERED_MAP
2708 
2709 template<typename T1, typename T2>
2710 struct VmaPair
2711 {
2712  T1 first;
2713  T2 second;
2714 
2715  VmaPair() : first(), second() { }
2716  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
2717 };
2718 
2719 /* Class compatible with subset of interface of std::unordered_map.
2720 KeyT, ValueT must be POD because they will be stored in VmaVector.
2721 */
2722 template<typename KeyT, typename ValueT>
2723 class VmaMap
2724 {
2725 public:
2726  typedef VmaPair<KeyT, ValueT> PairType;
2727  typedef PairType* iterator;
2728 
2729  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
2730 
2731  iterator begin() { return m_Vector.begin(); }
2732  iterator end() { return m_Vector.end(); }
2733 
2734  void insert(const PairType& pair);
2735  iterator find(const KeyT& key);
2736  void erase(iterator it);
2737 
2738 private:
2739  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
2740 };
2741 
2742 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
2743 
2744 template<typename FirstT, typename SecondT>
2745 struct VmaPairFirstLess
2746 {
2747  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
2748  {
2749  return lhs.first < rhs.first;
2750  }
2751  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
2752  {
2753  return lhs.first < rhsFirst;
2754  }
2755 };
2756 
2757 template<typename KeyT, typename ValueT>
2758 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
2759 {
2760  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2761  m_Vector.data(),
2762  m_Vector.data() + m_Vector.size(),
2763  pair,
2764  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
2765  VmaVectorInsert(m_Vector, indexToInsert, pair);
2766 }
2767 
2768 template<typename KeyT, typename ValueT>
2769 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
2770 {
2771  PairType* it = VmaBinaryFindFirstNotLess(
2772  m_Vector.data(),
2773  m_Vector.data() + m_Vector.size(),
2774  key,
2775  VmaPairFirstLess<KeyT, ValueT>());
2776  if((it != m_Vector.end()) && (it->first == key))
2777  {
2778  return it;
2779  }
2780  else
2781  {
2782  return m_Vector.end();
2783  }
2784 }
2785 
2786 template<typename KeyT, typename ValueT>
2787 void VmaMap<KeyT, ValueT>::erase(iterator it)
2788 {
2789  VmaVectorRemove(m_Vector, it - m_Vector.begin());
2790 }
2791 
2792 #endif // #if VMA_USE_STL_UNORDERED_MAP
2793 
2794 #endif // #if 0
2795 
2797 
2798 class VmaDeviceMemoryBlock;
2799 
2800 enum VMA_BLOCK_VECTOR_TYPE
2801 {
2802  VMA_BLOCK_VECTOR_TYPE_UNMAPPED,
2803  VMA_BLOCK_VECTOR_TYPE_MAPPED,
2804  VMA_BLOCK_VECTOR_TYPE_COUNT
2805 };
2806 
2807 static VMA_BLOCK_VECTOR_TYPE VmaAllocationCreateFlagsToBlockVectorType(VmaAllocationCreateFlags flags)
2808 {
2809  return (flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0 ?
2810  VMA_BLOCK_VECTOR_TYPE_MAPPED :
2811  VMA_BLOCK_VECTOR_TYPE_UNMAPPED;
2812 }
2813 
2814 struct VmaAllocation_T
2815 {
2816 public:
2817  enum ALLOCATION_TYPE
2818  {
2819  ALLOCATION_TYPE_NONE,
2820  ALLOCATION_TYPE_BLOCK,
2821  ALLOCATION_TYPE_OWN,
2822  };
2823 
2824  VmaAllocation_T(uint32_t currentFrameIndex) :
2825  m_Alignment(1),
2826  m_Size(0),
2827  m_pUserData(VMA_NULL),
2828  m_Type(ALLOCATION_TYPE_NONE),
2829  m_SuballocationType(VMA_SUBALLOCATION_TYPE_UNKNOWN),
2830  m_LastUseFrameIndex(currentFrameIndex)
2831  {
2832  }
2833 
2834  void InitBlockAllocation(
2835  VmaPool hPool,
2836  VmaDeviceMemoryBlock* block,
2837  VkDeviceSize offset,
2838  VkDeviceSize alignment,
2839  VkDeviceSize size,
2840  VmaSuballocationType suballocationType,
2841  void* pUserData,
2842  bool canBecomeLost)
2843  {
2844  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
2845  VMA_ASSERT(block != VMA_NULL);
2846  m_Type = ALLOCATION_TYPE_BLOCK;
2847  m_Alignment = alignment;
2848  m_Size = size;
2849  m_pUserData = pUserData;
2850  m_SuballocationType = suballocationType;
2851  m_BlockAllocation.m_hPool = hPool;
2852  m_BlockAllocation.m_Block = block;
2853  m_BlockAllocation.m_Offset = offset;
2854  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
2855  }
2856 
2857  void InitLost()
2858  {
2859  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
2860  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
2861  m_Type = ALLOCATION_TYPE_BLOCK;
2862  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
2863  m_BlockAllocation.m_Block = VMA_NULL;
2864  m_BlockAllocation.m_Offset = 0;
2865  m_BlockAllocation.m_CanBecomeLost = true;
2866  }
2867 
2868  void ChangeBlockAllocation(
2869  VmaDeviceMemoryBlock* block,
2870  VkDeviceSize offset)
2871  {
2872  VMA_ASSERT(block != VMA_NULL);
2873  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
2874  m_BlockAllocation.m_Block = block;
2875  m_BlockAllocation.m_Offset = offset;
2876  }
2877 
2878  void InitOwnAllocation(
2879  uint32_t memoryTypeIndex,
2880  VkDeviceMemory hMemory,
2881  VmaSuballocationType suballocationType,
2882  bool persistentMap,
2883  void* pMappedData,
2884  VkDeviceSize size,
2885  void* pUserData)
2886  {
2887  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
2888  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
2889  m_Type = ALLOCATION_TYPE_OWN;
2890  m_Alignment = 0;
2891  m_Size = size;
2892  m_pUserData = pUserData;
2893  m_SuballocationType = suballocationType;
2894  m_OwnAllocation.m_MemoryTypeIndex = memoryTypeIndex;
2895  m_OwnAllocation.m_hMemory = hMemory;
2896  m_OwnAllocation.m_PersistentMap = persistentMap;
2897  m_OwnAllocation.m_pMappedData = pMappedData;
2898  }
2899 
2900  ALLOCATION_TYPE GetType() const { return m_Type; }
2901  VkDeviceSize GetAlignment() const { return m_Alignment; }
2902  VkDeviceSize GetSize() const { return m_Size; }
2903  void* GetUserData() const { return m_pUserData; }
2904  void SetUserData(void* pUserData) { m_pUserData = pUserData; }
2905  VmaSuballocationType GetSuballocationType() const { return m_SuballocationType; }
2906 
2907  VmaDeviceMemoryBlock* GetBlock() const
2908  {
2909  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
2910  return m_BlockAllocation.m_Block;
2911  }
2912  VkDeviceSize GetOffset() const;
2913  VkDeviceMemory GetMemory() const;
2914  uint32_t GetMemoryTypeIndex() const;
2915  VMA_BLOCK_VECTOR_TYPE GetBlockVectorType() const;
2916  void* GetMappedData() const;
2917  bool CanBecomeLost() const;
2918  VmaPool GetPool() const;
2919 
2920  VkResult OwnAllocMapPersistentlyMappedMemory(VmaAllocator hAllocator);
2921  void OwnAllocUnmapPersistentlyMappedMemory(VmaAllocator hAllocator);
2922 
2923  uint32_t GetLastUseFrameIndex() const
2924  {
2925  return m_LastUseFrameIndex.load();
2926  }
2927  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
2928  {
2929  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
2930  }
2931  /*
2932  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
2933  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
2934  - Else, returns false.
2935 
2936  If hAllocation is already lost, assert - you should not call it then.
2937  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
2938  */
2939  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
2940 
2941  void OwnAllocCalcStatsInfo(VmaStatInfo& outInfo)
2942  {
2943  VMA_ASSERT(m_Type == ALLOCATION_TYPE_OWN);
2944  outInfo.BlockCount = 1;
2945  outInfo.AllocationCount = 1;
2946  outInfo.UnusedRangeCount = 0;
2947  outInfo.UsedBytes = m_Size;
2948  outInfo.UnusedBytes = 0;
2949  outInfo.AllocationSizeMin = outInfo.AllocationSizeMax = m_Size;
2950  outInfo.UnusedRangeSizeMin = UINT64_MAX;
2951  outInfo.UnusedRangeSizeMax = 0;
2952  }
2953 
2954 private:
2955  VkDeviceSize m_Alignment;
2956  VkDeviceSize m_Size;
2957  void* m_pUserData;
2958  ALLOCATION_TYPE m_Type;
2959  VmaSuballocationType m_SuballocationType;
2960  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
2961 
2962  // Allocation out of VmaDeviceMemoryBlock.
2963  struct BlockAllocation
2964  {
2965  VmaPool m_hPool; // Null if belongs to general memory.
2966  VmaDeviceMemoryBlock* m_Block;
2967  VkDeviceSize m_Offset;
2968  bool m_CanBecomeLost;
2969  };
2970 
2971  // Allocation for an object that has its own private VkDeviceMemory.
2972  struct OwnAllocation
2973  {
2974  uint32_t m_MemoryTypeIndex;
2975  VkDeviceMemory m_hMemory;
2976  bool m_PersistentMap;
2977  void* m_pMappedData;
2978  };
2979 
2980  union
2981  {
2982  // Allocation out of VmaDeviceMemoryBlock.
2983  BlockAllocation m_BlockAllocation;
2984  // Allocation for an object that has its own private VkDeviceMemory.
2985  OwnAllocation m_OwnAllocation;
2986  };
2987 };
2988 
2989 /*
2990 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
2991 allocated memory block or free.
2992 */
2993 struct VmaSuballocation
2994 {
2995  VkDeviceSize offset;
2996  VkDeviceSize size;
2997  VmaAllocation hAllocation;
2998  VmaSuballocationType type;
2999 };
3000 
3001 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
3002 
3003 // Cost of one additional allocation lost, as equivalent in bytes.
3004 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
3005 
3006 /*
3007 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
3008 
3009 If canMakeOtherLost was false:
3010 - item points to a FREE suballocation.
3011 - itemsToMakeLostCount is 0.
3012 
3013 If canMakeOtherLost was true:
3014 - item points to first of sequence of suballocations, which are either FREE,
3015  or point to VmaAllocations that can become lost.
3016 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
3017  the requested allocation to succeed.
3018 */
3019 struct VmaAllocationRequest
3020 {
3021  VkDeviceSize offset;
3022  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
3023  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
3024  VmaSuballocationList::iterator item;
3025  size_t itemsToMakeLostCount;
3026 
3027  VkDeviceSize CalcCost() const
3028  {
3029  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
3030  }
3031 };
3032 
3033 /*
3034 Represents a single block of device memory (VkDeviceMemory ) with all the
3035 data about its regions (aka suballocations, VmaAllocation), assigned and free.
3036 
3037 Thread-safety: This class must be externally synchronized.
3038 */
3039 class VmaDeviceMemoryBlock
3040 {
3041 public:
3042  uint32_t m_MemoryTypeIndex;
3043  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
3044  VkDeviceMemory m_hMemory;
3045  VkDeviceSize m_Size;
3046  bool m_PersistentMap;
3047  void* m_pMappedData;
3048  uint32_t m_FreeCount;
3049  VkDeviceSize m_SumFreeSize;
3050  VmaSuballocationList m_Suballocations;
3051  // Suballocations that are free and have size greater than certain threshold.
3052  // Sorted by size, ascending.
3053  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
3054 
3055  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
3056 
3057  ~VmaDeviceMemoryBlock()
3058  {
3059  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
3060  }
3061 
3062  // Always call after construction.
3063  void Init(
3064  uint32_t newMemoryTypeIndex,
3065  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
3066  VkDeviceMemory newMemory,
3067  VkDeviceSize newSize,
3068  bool persistentMap,
3069  void* pMappedData);
3070  // Always call before destruction.
3071  void Destroy(VmaAllocator allocator);
3072 
3073  // Validates all data structures inside this object. If not valid, returns false.
3074  bool Validate() const;
3075 
3076  // Tries to find a place for suballocation with given parameters inside this allocation.
3077  // If succeeded, fills pAllocationRequest and returns true.
3078  // If failed, returns false.
3079  bool CreateAllocationRequest(
3080  uint32_t currentFrameIndex,
3081  uint32_t frameInUseCount,
3082  VkDeviceSize bufferImageGranularity,
3083  VkDeviceSize allocSize,
3084  VkDeviceSize allocAlignment,
3085  VmaSuballocationType allocType,
3086  bool canMakeOtherLost,
3087  VmaAllocationRequest* pAllocationRequest);
3088 
3089  bool MakeRequestedAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount, VmaAllocationRequest* pAllocationRequest);
3090 
3091  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3092 
3093  // Returns true if this allocation is empty - contains only single free suballocation.
3094  bool IsEmpty() const;
3095 
3096  // Makes actual allocation based on request. Request must already be checked
3097  // and valid.
3098  void Alloc(
3099  const VmaAllocationRequest& request,
3100  VmaSuballocationType type,
3101  VkDeviceSize allocSize,
3102  VmaAllocation hAllocation);
3103 
3104  // Frees suballocation assigned to given memory region.
3105  void Free(const VmaAllocation allocation);
3106 
3107 #if VMA_STATS_STRING_ENABLED
3108  void PrintDetailedMap(class VmaJsonWriter& json) const;
3109 #endif
3110 
3111 private:
3112  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
3113  // If yes, fills pOffset and returns true. If no, returns false.
3114  bool CheckAllocation(
3115  uint32_t currentFrameIndex,
3116  uint32_t frameInUseCount,
3117  VkDeviceSize bufferImageGranularity,
3118  VkDeviceSize allocSize,
3119  VkDeviceSize allocAlignment,
3120  VmaSuballocationType allocType,
3121  VmaSuballocationList::const_iterator suballocItem,
3122  bool canMakeOtherLost,
3123  VkDeviceSize* pOffset,
3124  size_t* itemsToMakeLostCount,
3125  VkDeviceSize* pSumFreeSize,
3126  VkDeviceSize* pSumItemSize) const;
3127 
3128  // Given free suballocation, it merges it with following one, which must also be free.
3129  void MergeFreeWithNext(VmaSuballocationList::iterator item);
3130  // Releases given suballocation, making it free.
3131  // Merges it with adjacent free suballocations if applicable.
3132  // Returns iterator to new free suballocation at this place.
3133  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
3134  // Given free suballocation, it inserts it into sorted list of
3135  // m_FreeSuballocationsBySize if it's suitable.
3136  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
3137  // Given free suballocation, it removes it from sorted list of
3138  // m_FreeSuballocationsBySize if it's suitable.
3139  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
3140 
3141  bool ValidateFreeSuballocationList() const;
3142 };
3143 
3144 struct VmaPointerLess
3145 {
3146  bool operator()(const void* lhs, const void* rhs) const
3147  {
3148  return lhs < rhs;
3149  }
3150 };
3151 
3152 class VmaDefragmentator;
3153 
3154 /*
3155 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
3156 Vulkan memory type.
3157 
3158 Synchronized internally with a mutex.
3159 */
3160 struct VmaBlockVector
3161 {
3162  VmaBlockVector(
3163  VmaAllocator hAllocator,
3164  uint32_t memoryTypeIndex,
3165  VMA_BLOCK_VECTOR_TYPE blockVectorType,
3166  VkDeviceSize preferredBlockSize,
3167  size_t minBlockCount,
3168  size_t maxBlockCount,
3169  VkDeviceSize bufferImageGranularity,
3170  uint32_t frameInUseCount,
3171  bool isCustomPool);
3172  ~VmaBlockVector();
3173 
3174  VkResult CreateMinBlocks();
3175 
3176  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
3177  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
3178  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
3179  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
3180  VMA_BLOCK_VECTOR_TYPE GetBlockVectorType() const { return m_BlockVectorType; }
3181 
3182  void GetPoolStats(VmaPoolStats* pStats);
3183 
3184  bool IsEmpty() const { return m_Blocks.empty(); }
3185 
3186  VkResult Allocate(
3187  VmaPool hCurrentPool,
3188  uint32_t currentFrameIndex,
3189  const VkMemoryRequirements& vkMemReq,
3190  const VmaAllocationCreateInfo& createInfo,
3191  VmaSuballocationType suballocType,
3192  VmaAllocation* pAllocation);
3193 
3194  void Free(
3195  VmaAllocation hAllocation);
3196 
3197  // Adds statistics of this BlockVector to pStats.
3198  void AddStats(VmaStats* pStats);
3199 
3200 #if VMA_STATS_STRING_ENABLED
3201  void PrintDetailedMap(class VmaJsonWriter& json);
3202 #endif
3203 
3204  void UnmapPersistentlyMappedMemory();
3205  VkResult MapPersistentlyMappedMemory();
3206 
3207  void MakePoolAllocationsLost(
3208  uint32_t currentFrameIndex,
3209  size_t* pLostAllocationCount);
3210 
3211  VmaDefragmentator* EnsureDefragmentator(
3212  VmaAllocator hAllocator,
3213  uint32_t currentFrameIndex);
3214 
3215  VkResult Defragment(
3216  VmaDefragmentationStats* pDefragmentationStats,
3217  VkDeviceSize& maxBytesToMove,
3218  uint32_t& maxAllocationsToMove);
3219 
3220  void DestroyDefragmentator();
3221 
3222 private:
3223  friend class VmaDefragmentator;
3224 
3225  const VmaAllocator m_hAllocator;
3226  const uint32_t m_MemoryTypeIndex;
3227  const VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
3228  const VkDeviceSize m_PreferredBlockSize;
3229  const size_t m_MinBlockCount;
3230  const size_t m_MaxBlockCount;
3231  const VkDeviceSize m_BufferImageGranularity;
3232  const uint32_t m_FrameInUseCount;
3233  const bool m_IsCustomPool;
3234  VMA_MUTEX m_Mutex;
3235  // Incrementally sorted by sumFreeSize, ascending.
3236  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
3237  /* There can be at most one allocation that is completely empty - a
3238  hysteresis to avoid pessimistic case of alternating creation and destruction
3239  of a VkDeviceMemory. */
3240  bool m_HasEmptyBlock;
3241  VmaDefragmentator* m_pDefragmentator;
3242 
3243  // Finds and removes given block from vector.
3244  void Remove(VmaDeviceMemoryBlock* pBlock);
3245 
3246  // Performs single step in sorting m_Blocks. They may not be fully sorted
3247  // after this call.
3248  void IncrementallySortBlocks();
3249 
3250  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
3251 };
3252 
3253 struct VmaPool_T
3254 {
3255 public:
3256  VmaBlockVector m_BlockVector;
3257 
3258  // Takes ownership.
3259  VmaPool_T(
3260  VmaAllocator hAllocator,
3261  const VmaPoolCreateInfo& createInfo);
3262  ~VmaPool_T();
3263 
3264  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
3265 
3266 #if VMA_STATS_STRING_ENABLED
3267  //void PrintDetailedMap(class VmaStringBuilder& sb);
3268 #endif
3269 };
3270 
3271 class VmaDefragmentator
3272 {
3273  const VmaAllocator m_hAllocator;
3274  VmaBlockVector* const m_pBlockVector;
3275  uint32_t m_CurrentFrameIndex;
3276  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
3277  VkDeviceSize m_BytesMoved;
3278  uint32_t m_AllocationsMoved;
3279 
3280  struct AllocationInfo
3281  {
3282  VmaAllocation m_hAllocation;
3283  VkBool32* m_pChanged;
3284 
3285  AllocationInfo() :
3286  m_hAllocation(VK_NULL_HANDLE),
3287  m_pChanged(VMA_NULL)
3288  {
3289  }
3290  };
3291 
3292  struct AllocationInfoSizeGreater
3293  {
3294  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
3295  {
3296  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
3297  }
3298  };
3299 
3300  // Used between AddAllocation and Defragment.
3301  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3302 
3303  struct BlockInfo
3304  {
3305  VmaDeviceMemoryBlock* m_pBlock;
3306  bool m_HasNonMovableAllocations;
3307  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3308 
3309  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
3310  m_pBlock(VMA_NULL),
3311  m_HasNonMovableAllocations(true),
3312  m_Allocations(pAllocationCallbacks),
3313  m_pMappedDataForDefragmentation(VMA_NULL)
3314  {
3315  }
3316 
3317  void CalcHasNonMovableAllocations()
3318  {
3319  const size_t blockAllocCount =
3320  m_pBlock->m_Suballocations.size() - m_pBlock->m_FreeCount;
3321  const size_t defragmentAllocCount = m_Allocations.size();
3322  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
3323  }
3324 
3325  void SortAllocationsBySizeDescecnding()
3326  {
3327  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
3328  }
3329 
3330  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
3331  void Unmap(VmaAllocator hAllocator);
3332 
3333  private:
3334  // Not null if mapped for defragmentation only, not persistently mapped.
3335  void* m_pMappedDataForDefragmentation;
3336  };
3337 
3338  struct BlockPointerLess
3339  {
3340  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
3341  {
3342  return pLhsBlockInfo->m_pBlock < pRhsBlock;
3343  }
3344  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3345  {
3346  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
3347  }
3348  };
3349 
3350  // 1. Blocks with some non-movable allocations go first.
3351  // 2. Blocks with smaller sumFreeSize go first.
3352  struct BlockInfoCompareMoveDestination
3353  {
3354  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3355  {
3356  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
3357  {
3358  return true;
3359  }
3360  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
3361  {
3362  return false;
3363  }
3364  if(pLhsBlockInfo->m_pBlock->m_SumFreeSize < pRhsBlockInfo->m_pBlock->m_SumFreeSize)
3365  {
3366  return true;
3367  }
3368  return false;
3369  }
3370  };
3371 
3372  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
3373  BlockInfoVector m_Blocks;
3374 
3375  VkResult DefragmentRound(
3376  VkDeviceSize maxBytesToMove,
3377  uint32_t maxAllocationsToMove);
3378 
3379  static bool MoveMakesSense(
3380  size_t dstBlockIndex, VkDeviceSize dstOffset,
3381  size_t srcBlockIndex, VkDeviceSize srcOffset);
3382 
3383 public:
3384  VmaDefragmentator(
3385  VmaAllocator hAllocator,
3386  VmaBlockVector* pBlockVector,
3387  uint32_t currentFrameIndex);
3388 
3389  ~VmaDefragmentator();
3390 
3391  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
3392  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
3393 
3394  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
3395 
3396  VkResult Defragment(
3397  VkDeviceSize maxBytesToMove,
3398  uint32_t maxAllocationsToMove);
3399 };
3400 
3401 // Main allocator object.
3402 struct VmaAllocator_T
3403 {
3404  bool m_UseMutex;
3405  VkDevice m_hDevice;
3406  bool m_AllocationCallbacksSpecified;
3407  VkAllocationCallbacks m_AllocationCallbacks;
3408  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
3409  // Non-zero when we are inside UnmapPersistentlyMappedMemory...MapPersistentlyMappedMemory.
3410  // Counter to allow nested calls to these functions.
3411  uint32_t m_UnmapPersistentlyMappedMemoryCounter;
3412 
3413  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
3414  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
3415  VMA_MUTEX m_HeapSizeLimitMutex;
3416 
3417  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
3418  VkPhysicalDeviceMemoryProperties m_MemProps;
3419 
3420  // Default pools.
3421  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
3422 
3423  // Each vector is sorted by memory (handle value).
3424  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
3425  AllocationVectorType* m_pOwnAllocations[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
3426  VMA_MUTEX m_OwnAllocationsMutex[VK_MAX_MEMORY_TYPES];
3427 
3428  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
3429  ~VmaAllocator_T();
3430 
3431  const VkAllocationCallbacks* GetAllocationCallbacks() const
3432  {
3433  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
3434  }
3435  const VmaVulkanFunctions& GetVulkanFunctions() const
3436  {
3437  return m_VulkanFunctions;
3438  }
3439 
3440  VkDeviceSize GetBufferImageGranularity() const
3441  {
3442  return VMA_MAX(
3443  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
3444  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
3445  }
3446 
3447  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
3448  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
3449 
3450  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
3451  {
3452  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
3453  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
3454  }
3455 
3456  // Main allocation function.
3457  VkResult AllocateMemory(
3458  const VkMemoryRequirements& vkMemReq,
3459  const VmaAllocationCreateInfo& createInfo,
3460  VmaSuballocationType suballocType,
3461  VmaAllocation* pAllocation);
3462 
3463  // Main deallocation function.
3464  void FreeMemory(const VmaAllocation allocation);
3465 
3466  void CalculateStats(VmaStats* pStats);
3467 
3468 #if VMA_STATS_STRING_ENABLED
3469  void PrintDetailedMap(class VmaJsonWriter& json);
3470 #endif
3471 
3472  void UnmapPersistentlyMappedMemory();
3473  VkResult MapPersistentlyMappedMemory();
3474 
3475  VkResult Defragment(
3476  VmaAllocation* pAllocations,
3477  size_t allocationCount,
3478  VkBool32* pAllocationsChanged,
3479  const VmaDefragmentationInfo* pDefragmentationInfo,
3480  VmaDefragmentationStats* pDefragmentationStats);
3481 
3482  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
3483 
3484  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
3485  void DestroyPool(VmaPool pool);
3486  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
3487 
3488  void SetCurrentFrameIndex(uint32_t frameIndex);
3489 
3490  void MakePoolAllocationsLost(
3491  VmaPool hPool,
3492  size_t* pLostAllocationCount);
3493 
3494  void CreateLostAllocation(VmaAllocation* pAllocation);
3495 
3496  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
3497  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
3498 
3499 private:
3500  VkDeviceSize m_PreferredLargeHeapBlockSize;
3501  VkDeviceSize m_PreferredSmallHeapBlockSize;
3502 
3503  VkPhysicalDevice m_PhysicalDevice;
3504  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
3505 
3506  VMA_MUTEX m_PoolsMutex;
3507  // Protected by m_PoolsMutex. Sorted by pointer value.
3508  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
3509 
3510  VmaVulkanFunctions m_VulkanFunctions;
3511 
3512  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
3513 
3514  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
3515 
3516  VkResult AllocateMemoryOfType(
3517  const VkMemoryRequirements& vkMemReq,
3518  const VmaAllocationCreateInfo& createInfo,
3519  uint32_t memTypeIndex,
3520  VmaSuballocationType suballocType,
3521  VmaAllocation* pAllocation);
3522 
3523  // Allocates and registers new VkDeviceMemory specifically for single allocation.
3524  VkResult AllocateOwnMemory(
3525  VkDeviceSize size,
3526  VmaSuballocationType suballocType,
3527  uint32_t memTypeIndex,
3528  bool map,
3529  void* pUserData,
3530  VmaAllocation* pAllocation);
3531 
3532  // Tries to free pMemory as Own Memory. Returns true if found and freed.
3533  void FreeOwnMemory(VmaAllocation allocation);
3534 };
3535 
3537 // Memory allocation #2 after VmaAllocator_T definition
3538 
3539 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
3540 {
3541  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
3542 }
3543 
3544 static void VmaFree(VmaAllocator hAllocator, void* ptr)
3545 {
3546  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
3547 }
3548 
3549 template<typename T>
3550 static T* VmaAllocate(VmaAllocator hAllocator)
3551 {
3552  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
3553 }
3554 
3555 template<typename T>
3556 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
3557 {
3558  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
3559 }
3560 
3561 template<typename T>
3562 static void vma_delete(VmaAllocator hAllocator, T* ptr)
3563 {
3564  if(ptr != VMA_NULL)
3565  {
3566  ptr->~T();
3567  VmaFree(hAllocator, ptr);
3568  }
3569 }
3570 
3571 template<typename T>
3572 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
3573 {
3574  if(ptr != VMA_NULL)
3575  {
3576  for(size_t i = count; i--; )
3577  ptr[i].~T();
3578  VmaFree(hAllocator, ptr);
3579  }
3580 }
3581 
3583 // VmaStringBuilder
3584 
3585 #if VMA_STATS_STRING_ENABLED
3586 
3587 class VmaStringBuilder
3588 {
3589 public:
3590  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
3591  size_t GetLength() const { return m_Data.size(); }
3592  const char* GetData() const { return m_Data.data(); }
3593 
3594  void Add(char ch) { m_Data.push_back(ch); }
3595  void Add(const char* pStr);
3596  void AddNewLine() { Add('\n'); }
3597  void AddNumber(uint32_t num);
3598  void AddNumber(uint64_t num);
3599  void AddPointer(const void* ptr);
3600 
3601 private:
3602  VmaVector< char, VmaStlAllocator<char> > m_Data;
3603 };
3604 
3605 void VmaStringBuilder::Add(const char* pStr)
3606 {
3607  const size_t strLen = strlen(pStr);
3608  if(strLen > 0)
3609  {
3610  const size_t oldCount = m_Data.size();
3611  m_Data.resize(oldCount + strLen);
3612  memcpy(m_Data.data() + oldCount, pStr, strLen);
3613  }
3614 }
3615 
3616 void VmaStringBuilder::AddNumber(uint32_t num)
3617 {
3618  char buf[11];
3619  VmaUint32ToStr(buf, sizeof(buf), num);
3620  Add(buf);
3621 }
3622 
3623 void VmaStringBuilder::AddNumber(uint64_t num)
3624 {
3625  char buf[21];
3626  VmaUint64ToStr(buf, sizeof(buf), num);
3627  Add(buf);
3628 }
3629 
3630 void VmaStringBuilder::AddPointer(const void* ptr)
3631 {
3632  char buf[21];
3633  VmaPtrToStr(buf, sizeof(buf), ptr);
3634  Add(buf);
3635 }
3636 
3637 #endif // #if VMA_STATS_STRING_ENABLED
3638 
3640 // VmaJsonWriter
3641 
3642 #if VMA_STATS_STRING_ENABLED
3643 
3644 class VmaJsonWriter
3645 {
3646 public:
3647  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
3648  ~VmaJsonWriter();
3649 
3650  void BeginObject(bool singleLine = false);
3651  void EndObject();
3652 
3653  void BeginArray(bool singleLine = false);
3654  void EndArray();
3655 
3656  void WriteString(const char* pStr);
3657  void BeginString(const char* pStr = VMA_NULL);
3658  void ContinueString(const char* pStr);
3659  void ContinueString(uint32_t n);
3660  void ContinueString(uint64_t n);
3661  void EndString(const char* pStr = VMA_NULL);
3662 
3663  void WriteNumber(uint32_t n);
3664  void WriteNumber(uint64_t n);
3665  void WriteBool(bool b);
3666  void WriteNull();
3667 
3668 private:
3669  static const char* const INDENT;
3670 
3671  enum COLLECTION_TYPE
3672  {
3673  COLLECTION_TYPE_OBJECT,
3674  COLLECTION_TYPE_ARRAY,
3675  };
3676  struct StackItem
3677  {
3678  COLLECTION_TYPE type;
3679  uint32_t valueCount;
3680  bool singleLineMode;
3681  };
3682 
3683  VmaStringBuilder& m_SB;
3684  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
3685  bool m_InsideString;
3686 
3687  void BeginValue(bool isString);
3688  void WriteIndent(bool oneLess = false);
3689 };
3690 
3691 const char* const VmaJsonWriter::INDENT = " ";
3692 
3693 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
3694  m_SB(sb),
3695  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
3696  m_InsideString(false)
3697 {
3698 }
3699 
3700 VmaJsonWriter::~VmaJsonWriter()
3701 {
3702  VMA_ASSERT(!m_InsideString);
3703  VMA_ASSERT(m_Stack.empty());
3704 }
3705 
3706 void VmaJsonWriter::BeginObject(bool singleLine)
3707 {
3708  VMA_ASSERT(!m_InsideString);
3709 
3710  BeginValue(false);
3711  m_SB.Add('{');
3712 
3713  StackItem item;
3714  item.type = COLLECTION_TYPE_OBJECT;
3715  item.valueCount = 0;
3716  item.singleLineMode = singleLine;
3717  m_Stack.push_back(item);
3718 }
3719 
3720 void VmaJsonWriter::EndObject()
3721 {
3722  VMA_ASSERT(!m_InsideString);
3723 
3724  WriteIndent(true);
3725  m_SB.Add('}');
3726 
3727  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
3728  m_Stack.pop_back();
3729 }
3730 
3731 void VmaJsonWriter::BeginArray(bool singleLine)
3732 {
3733  VMA_ASSERT(!m_InsideString);
3734 
3735  BeginValue(false);
3736  m_SB.Add('[');
3737 
3738  StackItem item;
3739  item.type = COLLECTION_TYPE_ARRAY;
3740  item.valueCount = 0;
3741  item.singleLineMode = singleLine;
3742  m_Stack.push_back(item);
3743 }
3744 
3745 void VmaJsonWriter::EndArray()
3746 {
3747  VMA_ASSERT(!m_InsideString);
3748 
3749  WriteIndent(true);
3750  m_SB.Add(']');
3751 
3752  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
3753  m_Stack.pop_back();
3754 }
3755 
3756 void VmaJsonWriter::WriteString(const char* pStr)
3757 {
3758  BeginString(pStr);
3759  EndString();
3760 }
3761 
3762 void VmaJsonWriter::BeginString(const char* pStr)
3763 {
3764  VMA_ASSERT(!m_InsideString);
3765 
3766  BeginValue(true);
3767  m_SB.Add('"');
3768  m_InsideString = true;
3769  if(pStr != VMA_NULL && pStr[0] != '\0')
3770  {
3771  ContinueString(pStr);
3772  }
3773 }
3774 
3775 void VmaJsonWriter::ContinueString(const char* pStr)
3776 {
3777  VMA_ASSERT(m_InsideString);
3778 
3779  const size_t strLen = strlen(pStr);
3780  for(size_t i = 0; i < strLen; ++i)
3781  {
3782  char ch = pStr[i];
3783  if(ch == '\'')
3784  {
3785  m_SB.Add("\\\\");
3786  }
3787  else if(ch == '"')
3788  {
3789  m_SB.Add("\\\"");
3790  }
3791  else if(ch >= 32)
3792  {
3793  m_SB.Add(ch);
3794  }
3795  else switch(ch)
3796  {
3797  case '\n':
3798  m_SB.Add("\\n");
3799  break;
3800  case '\r':
3801  m_SB.Add("\\r");
3802  break;
3803  case '\t':
3804  m_SB.Add("\\t");
3805  break;
3806  default:
3807  VMA_ASSERT(0 && "Character not currently supported.");
3808  break;
3809  }
3810  }
3811 }
3812 
3813 void VmaJsonWriter::ContinueString(uint32_t n)
3814 {
3815  VMA_ASSERT(m_InsideString);
3816  m_SB.AddNumber(n);
3817 }
3818 
3819 void VmaJsonWriter::ContinueString(uint64_t n)
3820 {
3821  VMA_ASSERT(m_InsideString);
3822  m_SB.AddNumber(n);
3823 }
3824 
3825 void VmaJsonWriter::EndString(const char* pStr)
3826 {
3827  VMA_ASSERT(m_InsideString);
3828  if(pStr != VMA_NULL && pStr[0] != '\0')
3829  {
3830  ContinueString(pStr);
3831  }
3832  m_SB.Add('"');
3833  m_InsideString = false;
3834 }
3835 
3836 void VmaJsonWriter::WriteNumber(uint32_t n)
3837 {
3838  VMA_ASSERT(!m_InsideString);
3839  BeginValue(false);
3840  m_SB.AddNumber(n);
3841 }
3842 
3843 void VmaJsonWriter::WriteNumber(uint64_t n)
3844 {
3845  VMA_ASSERT(!m_InsideString);
3846  BeginValue(false);
3847  m_SB.AddNumber(n);
3848 }
3849 
3850 void VmaJsonWriter::WriteBool(bool b)
3851 {
3852  VMA_ASSERT(!m_InsideString);
3853  BeginValue(false);
3854  m_SB.Add(b ? "true" : "false");
3855 }
3856 
3857 void VmaJsonWriter::WriteNull()
3858 {
3859  VMA_ASSERT(!m_InsideString);
3860  BeginValue(false);
3861  m_SB.Add("null");
3862 }
3863 
3864 void VmaJsonWriter::BeginValue(bool isString)
3865 {
3866  if(!m_Stack.empty())
3867  {
3868  StackItem& currItem = m_Stack.back();
3869  if(currItem.type == COLLECTION_TYPE_OBJECT &&
3870  currItem.valueCount % 2 == 0)
3871  {
3872  VMA_ASSERT(isString);
3873  }
3874 
3875  if(currItem.type == COLLECTION_TYPE_OBJECT &&
3876  currItem.valueCount % 2 != 0)
3877  {
3878  m_SB.Add(": ");
3879  }
3880  else if(currItem.valueCount > 0)
3881  {
3882  m_SB.Add(", ");
3883  WriteIndent();
3884  }
3885  else
3886  {
3887  WriteIndent();
3888  }
3889  ++currItem.valueCount;
3890  }
3891 }
3892 
3893 void VmaJsonWriter::WriteIndent(bool oneLess)
3894 {
3895  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
3896  {
3897  m_SB.AddNewLine();
3898 
3899  size_t count = m_Stack.size();
3900  if(count > 0 && oneLess)
3901  {
3902  --count;
3903  }
3904  for(size_t i = 0; i < count; ++i)
3905  {
3906  m_SB.Add(INDENT);
3907  }
3908  }
3909 }
3910 
3911 #endif // #if VMA_STATS_STRING_ENABLED
3912 
3914 
3915 VkDeviceSize VmaAllocation_T::GetOffset() const
3916 {
3917  switch(m_Type)
3918  {
3919  case ALLOCATION_TYPE_BLOCK:
3920  return m_BlockAllocation.m_Offset;
3921  case ALLOCATION_TYPE_OWN:
3922  return 0;
3923  default:
3924  VMA_ASSERT(0);
3925  return 0;
3926  }
3927 }
3928 
3929 VkDeviceMemory VmaAllocation_T::GetMemory() const
3930 {
3931  switch(m_Type)
3932  {
3933  case ALLOCATION_TYPE_BLOCK:
3934  return m_BlockAllocation.m_Block->m_hMemory;
3935  case ALLOCATION_TYPE_OWN:
3936  return m_OwnAllocation.m_hMemory;
3937  default:
3938  VMA_ASSERT(0);
3939  return VK_NULL_HANDLE;
3940  }
3941 }
3942 
3943 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
3944 {
3945  switch(m_Type)
3946  {
3947  case ALLOCATION_TYPE_BLOCK:
3948  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
3949  case ALLOCATION_TYPE_OWN:
3950  return m_OwnAllocation.m_MemoryTypeIndex;
3951  default:
3952  VMA_ASSERT(0);
3953  return UINT32_MAX;
3954  }
3955 }
3956 
3957 VMA_BLOCK_VECTOR_TYPE VmaAllocation_T::GetBlockVectorType() const
3958 {
3959  switch(m_Type)
3960  {
3961  case ALLOCATION_TYPE_BLOCK:
3962  return m_BlockAllocation.m_Block->m_BlockVectorType;
3963  case ALLOCATION_TYPE_OWN:
3964  return (m_OwnAllocation.m_PersistentMap ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED);
3965  default:
3966  VMA_ASSERT(0);
3967  return VMA_BLOCK_VECTOR_TYPE_COUNT;
3968  }
3969 }
3970 
3971 void* VmaAllocation_T::GetMappedData() const
3972 {
3973  switch(m_Type)
3974  {
3975  case ALLOCATION_TYPE_BLOCK:
3976  if(m_BlockAllocation.m_Block->m_pMappedData != VMA_NULL)
3977  {
3978  return (char*)m_BlockAllocation.m_Block->m_pMappedData + m_BlockAllocation.m_Offset;
3979  }
3980  else
3981  {
3982  return VMA_NULL;
3983  }
3984  break;
3985  case ALLOCATION_TYPE_OWN:
3986  return m_OwnAllocation.m_pMappedData;
3987  default:
3988  VMA_ASSERT(0);
3989  return VMA_NULL;
3990  }
3991 }
3992 
3993 bool VmaAllocation_T::CanBecomeLost() const
3994 {
3995  switch(m_Type)
3996  {
3997  case ALLOCATION_TYPE_BLOCK:
3998  return m_BlockAllocation.m_CanBecomeLost;
3999  case ALLOCATION_TYPE_OWN:
4000  return false;
4001  default:
4002  VMA_ASSERT(0);
4003  return false;
4004  }
4005 }
4006 
4007 VmaPool VmaAllocation_T::GetPool() const
4008 {
4009  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4010  return m_BlockAllocation.m_hPool;
4011 }
4012 
4013 VkResult VmaAllocation_T::OwnAllocMapPersistentlyMappedMemory(VmaAllocator hAllocator)
4014 {
4015  VMA_ASSERT(m_Type == ALLOCATION_TYPE_OWN);
4016  if(m_OwnAllocation.m_PersistentMap)
4017  {
4018  return (*hAllocator->GetVulkanFunctions().vkMapMemory)(
4019  hAllocator->m_hDevice,
4020  m_OwnAllocation.m_hMemory,
4021  0,
4022  VK_WHOLE_SIZE,
4023  0,
4024  &m_OwnAllocation.m_pMappedData);
4025  }
4026  return VK_SUCCESS;
4027 }
4028 void VmaAllocation_T::OwnAllocUnmapPersistentlyMappedMemory(VmaAllocator hAllocator)
4029 {
4030  VMA_ASSERT(m_Type == ALLOCATION_TYPE_OWN);
4031  if(m_OwnAllocation.m_pMappedData)
4032  {
4033  VMA_ASSERT(m_OwnAllocation.m_PersistentMap);
4034  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_OwnAllocation.m_hMemory);
4035  m_OwnAllocation.m_pMappedData = VMA_NULL;
4036  }
4037 }
4038 
4039 
4040 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4041 {
4042  VMA_ASSERT(CanBecomeLost());
4043 
4044  /*
4045  Warning: This is a carefully designed algorithm.
4046  Do not modify unless you really know what you're doing :)
4047  */
4048  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
4049  for(;;)
4050  {
4051  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
4052  {
4053  VMA_ASSERT(0);
4054  return false;
4055  }
4056  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
4057  {
4058  return false;
4059  }
4060  else // Last use time earlier than current time.
4061  {
4062  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
4063  {
4064  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
4065  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
4066  return true;
4067  }
4068  }
4069  }
4070 }
4071 
4072 #if VMA_STATS_STRING_ENABLED
4073 
4074 // Correspond to values of enum VmaSuballocationType.
4075 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
4076  "FREE",
4077  "UNKNOWN",
4078  "BUFFER",
4079  "IMAGE_UNKNOWN",
4080  "IMAGE_LINEAR",
4081  "IMAGE_OPTIMAL",
4082 };
4083 
4084 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
4085 {
4086  json.BeginObject();
4087 
4088  json.WriteString("Blocks");
4089  json.WriteNumber(stat.BlockCount);
4090 
4091  json.WriteString("Allocations");
4092  json.WriteNumber(stat.AllocationCount);
4093 
4094  json.WriteString("UnusedRanges");
4095  json.WriteNumber(stat.UnusedRangeCount);
4096 
4097  json.WriteString("UsedBytes");
4098  json.WriteNumber(stat.UsedBytes);
4099 
4100  json.WriteString("UnusedBytes");
4101  json.WriteNumber(stat.UnusedBytes);
4102 
4103  if(stat.AllocationCount > 1)
4104  {
4105  json.WriteString("AllocationSize");
4106  json.BeginObject(true);
4107  json.WriteString("Min");
4108  json.WriteNumber(stat.AllocationSizeMin);
4109  json.WriteString("Avg");
4110  json.WriteNumber(stat.AllocationSizeAvg);
4111  json.WriteString("Max");
4112  json.WriteNumber(stat.AllocationSizeMax);
4113  json.EndObject();
4114  }
4115 
4116  if(stat.UnusedRangeCount > 1)
4117  {
4118  json.WriteString("UnusedRangeSize");
4119  json.BeginObject(true);
4120  json.WriteString("Min");
4121  json.WriteNumber(stat.UnusedRangeSizeMin);
4122  json.WriteString("Avg");
4123  json.WriteNumber(stat.UnusedRangeSizeAvg);
4124  json.WriteString("Max");
4125  json.WriteNumber(stat.UnusedRangeSizeMax);
4126  json.EndObject();
4127  }
4128 
4129  json.EndObject();
4130 }
4131 
4132 #endif // #if VMA_STATS_STRING_ENABLED
4133 
4134 struct VmaSuballocationItemSizeLess
4135 {
4136  bool operator()(
4137  const VmaSuballocationList::iterator lhs,
4138  const VmaSuballocationList::iterator rhs) const
4139  {
4140  return lhs->size < rhs->size;
4141  }
4142  bool operator()(
4143  const VmaSuballocationList::iterator lhs,
4144  VkDeviceSize rhsSize) const
4145  {
4146  return lhs->size < rhsSize;
4147  }
4148 };
4149 
4150 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
4151  m_MemoryTypeIndex(UINT32_MAX),
4152  m_BlockVectorType(VMA_BLOCK_VECTOR_TYPE_COUNT),
4153  m_hMemory(VK_NULL_HANDLE),
4154  m_Size(0),
4155  m_PersistentMap(false),
4156  m_pMappedData(VMA_NULL),
4157  m_FreeCount(0),
4158  m_SumFreeSize(0),
4159  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
4160  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
4161 {
4162 }
4163 
4164 void VmaDeviceMemoryBlock::Init(
4165  uint32_t newMemoryTypeIndex,
4166  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
4167  VkDeviceMemory newMemory,
4168  VkDeviceSize newSize,
4169  bool persistentMap,
4170  void* pMappedData)
4171 {
4172  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
4173 
4174  m_MemoryTypeIndex = newMemoryTypeIndex;
4175  m_BlockVectorType = newBlockVectorType;
4176  m_hMemory = newMemory;
4177  m_Size = newSize;
4178  m_PersistentMap = persistentMap;
4179  m_pMappedData = pMappedData;
4180  m_FreeCount = 1;
4181  m_SumFreeSize = newSize;
4182 
4183  m_Suballocations.clear();
4184  m_FreeSuballocationsBySize.clear();
4185 
4186  VmaSuballocation suballoc = {};
4187  suballoc.offset = 0;
4188  suballoc.size = newSize;
4189  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4190  suballoc.hAllocation = VK_NULL_HANDLE;
4191 
4192  m_Suballocations.push_back(suballoc);
4193  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
4194  --suballocItem;
4195  m_FreeSuballocationsBySize.push_back(suballocItem);
4196 }
4197 
4198 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
4199 {
4200  // This is the most important assert in the entire library.
4201  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
4202  VMA_ASSERT(IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
4203 
4204  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
4205  if(m_pMappedData != VMA_NULL)
4206  {
4207  (allocator->GetVulkanFunctions().vkUnmapMemory)(allocator->m_hDevice, m_hMemory);
4208  m_pMappedData = VMA_NULL;
4209  }
4210 
4211  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Size, m_hMemory);
4212  m_hMemory = VK_NULL_HANDLE;
4213 }
4214 
4215 bool VmaDeviceMemoryBlock::Validate() const
4216 {
4217  if((m_hMemory == VK_NULL_HANDLE) ||
4218  (m_Size == 0) ||
4219  m_Suballocations.empty())
4220  {
4221  return false;
4222  }
4223 
4224  // Expected offset of new suballocation as calculates from previous ones.
4225  VkDeviceSize calculatedOffset = 0;
4226  // Expected number of free suballocations as calculated from traversing their list.
4227  uint32_t calculatedFreeCount = 0;
4228  // Expected sum size of free suballocations as calculated from traversing their list.
4229  VkDeviceSize calculatedSumFreeSize = 0;
4230  // Expected number of free suballocations that should be registered in
4231  // m_FreeSuballocationsBySize calculated from traversing their list.
4232  size_t freeSuballocationsToRegister = 0;
4233  // True if previous visisted suballocation was free.
4234  bool prevFree = false;
4235 
4236  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4237  suballocItem != m_Suballocations.cend();
4238  ++suballocItem)
4239  {
4240  const VmaSuballocation& subAlloc = *suballocItem;
4241 
4242  // Actual offset of this suballocation doesn't match expected one.
4243  if(subAlloc.offset != calculatedOffset)
4244  {
4245  return false;
4246  }
4247 
4248  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
4249  // Two adjacent free suballocations are invalid. They should be merged.
4250  if(prevFree && currFree)
4251  {
4252  return false;
4253  }
4254  prevFree = currFree;
4255 
4256  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
4257  {
4258  return false;
4259  }
4260 
4261  if(currFree)
4262  {
4263  calculatedSumFreeSize += subAlloc.size;
4264  ++calculatedFreeCount;
4265  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
4266  {
4267  ++freeSuballocationsToRegister;
4268  }
4269  }
4270 
4271  calculatedOffset += subAlloc.size;
4272  }
4273 
4274  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
4275  // match expected one.
4276  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
4277  {
4278  return false;
4279  }
4280 
4281  VkDeviceSize lastSize = 0;
4282  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
4283  {
4284  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
4285 
4286  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
4287  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
4288  {
4289  return false;
4290  }
4291  // They must be sorted by size ascending.
4292  if(suballocItem->size < lastSize)
4293  {
4294  return false;
4295  }
4296 
4297  lastSize = suballocItem->size;
4298  }
4299 
4300  // Check if totals match calculacted values.
4301  return
4302  (calculatedOffset == m_Size) &&
4303  (calculatedSumFreeSize == m_SumFreeSize) &&
4304  (calculatedFreeCount == m_FreeCount);
4305 }
4306 
4307 /*
4308 How many suitable free suballocations to analyze before choosing best one.
4309 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
4310  be chosen.
4311 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
4312  suballocations will be analized and best one will be chosen.
4313 - Any other value is also acceptable.
4314 */
4315 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
4316 
4317 bool VmaDeviceMemoryBlock::CreateAllocationRequest(
4318  uint32_t currentFrameIndex,
4319  uint32_t frameInUseCount,
4320  VkDeviceSize bufferImageGranularity,
4321  VkDeviceSize allocSize,
4322  VkDeviceSize allocAlignment,
4323  VmaSuballocationType allocType,
4324  bool canMakeOtherLost,
4325  VmaAllocationRequest* pAllocationRequest)
4326 {
4327  VMA_ASSERT(allocSize > 0);
4328  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
4329  VMA_ASSERT(pAllocationRequest != VMA_NULL);
4330  VMA_HEAVY_ASSERT(Validate());
4331 
4332  // There is not enough total free space in this block to fullfill the request: Early return.
4333  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
4334  {
4335  return false;
4336  }
4337 
4338  // New algorithm, efficiently searching freeSuballocationsBySize.
4339  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
4340  if(freeSuballocCount > 0)
4341  {
4342  if(VMA_BEST_FIT)
4343  {
4344  // Find first free suballocation with size not less than allocSize.
4345  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
4346  m_FreeSuballocationsBySize.data(),
4347  m_FreeSuballocationsBySize.data() + freeSuballocCount,
4348  allocSize,
4349  VmaSuballocationItemSizeLess());
4350  size_t index = it - m_FreeSuballocationsBySize.data();
4351  for(; index < freeSuballocCount; ++index)
4352  {
4353  if(CheckAllocation(
4354  currentFrameIndex,
4355  frameInUseCount,
4356  bufferImageGranularity,
4357  allocSize,
4358  allocAlignment,
4359  allocType,
4360  m_FreeSuballocationsBySize[index],
4361  false, // canMakeOtherLost
4362  &pAllocationRequest->offset,
4363  &pAllocationRequest->itemsToMakeLostCount,
4364  &pAllocationRequest->sumFreeSize,
4365  &pAllocationRequest->sumItemSize))
4366  {
4367  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
4368  return true;
4369  }
4370  }
4371  }
4372  else
4373  {
4374  // Search staring from biggest suballocations.
4375  for(size_t index = freeSuballocCount; index--; )
4376  {
4377  if(CheckAllocation(
4378  currentFrameIndex,
4379  frameInUseCount,
4380  bufferImageGranularity,
4381  allocSize,
4382  allocAlignment,
4383  allocType,
4384  m_FreeSuballocationsBySize[index],
4385  false, // canMakeOtherLost
4386  &pAllocationRequest->offset,
4387  &pAllocationRequest->itemsToMakeLostCount,
4388  &pAllocationRequest->sumFreeSize,
4389  &pAllocationRequest->sumItemSize))
4390  {
4391  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
4392  return true;
4393  }
4394  }
4395  }
4396  }
4397 
4398  if(canMakeOtherLost)
4399  {
4400  // Brute-force algorithm. TODO: Come up with something better.
4401 
4402  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
4403  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
4404 
4405  VmaAllocationRequest tmpAllocRequest = {};
4406  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
4407  suballocIt != m_Suballocations.end();
4408  ++suballocIt)
4409  {
4410  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
4411  suballocIt->hAllocation->CanBecomeLost())
4412  {
4413  if(CheckAllocation(
4414  currentFrameIndex,
4415  frameInUseCount,
4416  bufferImageGranularity,
4417  allocSize,
4418  allocAlignment,
4419  allocType,
4420  suballocIt,
4421  canMakeOtherLost,
4422  &tmpAllocRequest.offset,
4423  &tmpAllocRequest.itemsToMakeLostCount,
4424  &tmpAllocRequest.sumFreeSize,
4425  &tmpAllocRequest.sumItemSize))
4426  {
4427  tmpAllocRequest.item = suballocIt;
4428 
4429  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
4430  {
4431  *pAllocationRequest = tmpAllocRequest;
4432  }
4433  }
4434  }
4435  }
4436 
4437  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
4438  {
4439  return true;
4440  }
4441  }
4442 
4443  return false;
4444 }
4445 
4446 bool VmaDeviceMemoryBlock::MakeRequestedAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount, VmaAllocationRequest* pAllocationRequest)
4447 {
4448  while(pAllocationRequest->itemsToMakeLostCount > 0)
4449  {
4450  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
4451  {
4452  ++pAllocationRequest->item;
4453  }
4454  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
4455  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
4456  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
4457  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
4458  {
4459  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
4460  --pAllocationRequest->itemsToMakeLostCount;
4461  }
4462  else
4463  {
4464  return false;
4465  }
4466  }
4467 
4468  VMA_HEAVY_ASSERT(Validate());
4469  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
4470  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
4471 
4472  return true;
4473 }
4474 
4475 uint32_t VmaDeviceMemoryBlock::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4476 {
4477  uint32_t lostAllocationCount = 0;
4478  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
4479  it != m_Suballocations.end();
4480  ++it)
4481  {
4482  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
4483  it->hAllocation->CanBecomeLost() &&
4484  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
4485  {
4486  it = FreeSuballocation(it);
4487  ++lostAllocationCount;
4488  }
4489  }
4490  return lostAllocationCount;
4491 }
4492 
4493 bool VmaDeviceMemoryBlock::CheckAllocation(
4494  uint32_t currentFrameIndex,
4495  uint32_t frameInUseCount,
4496  VkDeviceSize bufferImageGranularity,
4497  VkDeviceSize allocSize,
4498  VkDeviceSize allocAlignment,
4499  VmaSuballocationType allocType,
4500  VmaSuballocationList::const_iterator suballocItem,
4501  bool canMakeOtherLost,
4502  VkDeviceSize* pOffset,
4503  size_t* itemsToMakeLostCount,
4504  VkDeviceSize* pSumFreeSize,
4505  VkDeviceSize* pSumItemSize) const
4506 {
4507  VMA_ASSERT(allocSize > 0);
4508  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
4509  VMA_ASSERT(suballocItem != m_Suballocations.cend());
4510  VMA_ASSERT(pOffset != VMA_NULL);
4511 
4512  *itemsToMakeLostCount = 0;
4513  *pSumFreeSize = 0;
4514  *pSumItemSize = 0;
4515 
4516  if(canMakeOtherLost)
4517  {
4518  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
4519  {
4520  *pSumFreeSize = suballocItem->size;
4521  }
4522  else
4523  {
4524  if(suballocItem->hAllocation->CanBecomeLost() &&
4525  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
4526  {
4527  ++*itemsToMakeLostCount;
4528  *pSumItemSize = suballocItem->size;
4529  }
4530  else
4531  {
4532  return false;
4533  }
4534  }
4535 
4536  // Remaining size is too small for this request: Early return.
4537  if(m_Size - suballocItem->offset < allocSize)
4538  {
4539  return false;
4540  }
4541 
4542  // Start from offset equal to beginning of this suballocation.
4543  *pOffset = suballocItem->offset;
4544 
4545  // Apply VMA_DEBUG_MARGIN at the beginning.
4546  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
4547  {
4548  *pOffset += VMA_DEBUG_MARGIN;
4549  }
4550 
4551  // Apply alignment.
4552  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
4553  *pOffset = VmaAlignUp(*pOffset, alignment);
4554 
4555  // Check previous suballocations for BufferImageGranularity conflicts.
4556  // Make bigger alignment if necessary.
4557  if(bufferImageGranularity > 1)
4558  {
4559  bool bufferImageGranularityConflict = false;
4560  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
4561  while(prevSuballocItem != m_Suballocations.cbegin())
4562  {
4563  --prevSuballocItem;
4564  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
4565  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
4566  {
4567  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
4568  {
4569  bufferImageGranularityConflict = true;
4570  break;
4571  }
4572  }
4573  else
4574  // Already on previous page.
4575  break;
4576  }
4577  if(bufferImageGranularityConflict)
4578  {
4579  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
4580  }
4581  }
4582 
4583  // Now that we have final *pOffset, check if we are past suballocItem.
4584  // If yes, return false - this function should be called for another suballocItem as starting point.
4585  if(*pOffset >= suballocItem->offset + suballocItem->size)
4586  {
4587  return false;
4588  }
4589 
4590  // Calculate padding at the beginning based on current offset.
4591  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
4592 
4593  // Calculate required margin at the end if this is not last suballocation.
4594  VmaSuballocationList::const_iterator next = suballocItem;
4595  ++next;
4596  const VkDeviceSize requiredEndMargin =
4597  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
4598 
4599  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
4600  // Another early return check.
4601  if(suballocItem->offset + totalSize > m_Size)
4602  {
4603  return false;
4604  }
4605 
4606  // Advance lastSuballocItem until desired size is reached.
4607  // Update itemsToMakeLostCount.
4608  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
4609  if(totalSize > suballocItem->size)
4610  {
4611  VkDeviceSize remainingSize = totalSize - suballocItem->size;
4612  while(remainingSize > 0)
4613  {
4614  ++lastSuballocItem;
4615  if(lastSuballocItem == m_Suballocations.cend())
4616  {
4617  return false;
4618  }
4619  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
4620  {
4621  *pSumFreeSize += lastSuballocItem->size;
4622  }
4623  else
4624  {
4625  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
4626  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
4627  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
4628  {
4629  ++*itemsToMakeLostCount;
4630  *pSumItemSize += lastSuballocItem->size;
4631  }
4632  else
4633  {
4634  return false;
4635  }
4636  }
4637  remainingSize = (lastSuballocItem->size < remainingSize) ?
4638  remainingSize - lastSuballocItem->size : 0;
4639  }
4640  }
4641 
4642  // Check next suballocations for BufferImageGranularity conflicts.
4643  // If conflict exists, we must mark more allocations lost or fail.
4644  if(bufferImageGranularity > 1)
4645  {
4646  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
4647  ++nextSuballocItem;
4648  while(nextSuballocItem != m_Suballocations.cend())
4649  {
4650  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
4651  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
4652  {
4653  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
4654  {
4655  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
4656  if(nextSuballoc.hAllocation->CanBecomeLost() &&
4657  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
4658  {
4659  ++*itemsToMakeLostCount;
4660  }
4661  else
4662  {
4663  return false;
4664  }
4665  }
4666  }
4667  else
4668  {
4669  // Already on next page.
4670  break;
4671  }
4672  ++nextSuballocItem;
4673  }
4674  }
4675  }
4676  else
4677  {
4678  const VmaSuballocation& suballoc = *suballocItem;
4679  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
4680 
4681  *pSumFreeSize = suballoc.size;
4682 
4683  // Size of this suballocation is too small for this request: Early return.
4684  if(suballoc.size < allocSize)
4685  {
4686  return false;
4687  }
4688 
4689  // Start from offset equal to beginning of this suballocation.
4690  *pOffset = suballoc.offset;
4691 
4692  // Apply VMA_DEBUG_MARGIN at the beginning.
4693  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
4694  {
4695  *pOffset += VMA_DEBUG_MARGIN;
4696  }
4697 
4698  // Apply alignment.
4699  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
4700  *pOffset = VmaAlignUp(*pOffset, alignment);
4701 
4702  // Check previous suballocations for BufferImageGranularity conflicts.
4703  // Make bigger alignment if necessary.
4704  if(bufferImageGranularity > 1)
4705  {
4706  bool bufferImageGranularityConflict = false;
4707  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
4708  while(prevSuballocItem != m_Suballocations.cbegin())
4709  {
4710  --prevSuballocItem;
4711  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
4712  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
4713  {
4714  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
4715  {
4716  bufferImageGranularityConflict = true;
4717  break;
4718  }
4719  }
4720  else
4721  // Already on previous page.
4722  break;
4723  }
4724  if(bufferImageGranularityConflict)
4725  {
4726  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
4727  }
4728  }
4729 
4730  // Calculate padding at the beginning based on current offset.
4731  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
4732 
4733  // Calculate required margin at the end if this is not last suballocation.
4734  VmaSuballocationList::const_iterator next = suballocItem;
4735  ++next;
4736  const VkDeviceSize requiredEndMargin =
4737  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
4738 
4739  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
4740  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
4741  {
4742  return false;
4743  }
4744 
4745  // Check next suballocations for BufferImageGranularity conflicts.
4746  // If conflict exists, allocation cannot be made here.
4747  if(bufferImageGranularity > 1)
4748  {
4749  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
4750  ++nextSuballocItem;
4751  while(nextSuballocItem != m_Suballocations.cend())
4752  {
4753  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
4754  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
4755  {
4756  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
4757  {
4758  return false;
4759  }
4760  }
4761  else
4762  {
4763  // Already on next page.
4764  break;
4765  }
4766  ++nextSuballocItem;
4767  }
4768  }
4769  }
4770 
4771  // All tests passed: Success. pOffset is already filled.
4772  return true;
4773 }
4774 
4775 bool VmaDeviceMemoryBlock::IsEmpty() const
4776 {
4777  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
4778 }
4779 
4780 void VmaDeviceMemoryBlock::Alloc(
4781  const VmaAllocationRequest& request,
4782  VmaSuballocationType type,
4783  VkDeviceSize allocSize,
4784  VmaAllocation hAllocation)
4785 {
4786  VMA_ASSERT(request.item != m_Suballocations.end());
4787  VmaSuballocation& suballoc = *request.item;
4788  // Given suballocation is a free block.
4789  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
4790  // Given offset is inside this suballocation.
4791  VMA_ASSERT(request.offset >= suballoc.offset);
4792  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
4793  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
4794  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
4795 
4796  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
4797  // it to become used.
4798  UnregisterFreeSuballocation(request.item);
4799 
4800  suballoc.offset = request.offset;
4801  suballoc.size = allocSize;
4802  suballoc.type = type;
4803  suballoc.hAllocation = hAllocation;
4804 
4805  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
4806  if(paddingEnd)
4807  {
4808  VmaSuballocation paddingSuballoc = {};
4809  paddingSuballoc.offset = request.offset + allocSize;
4810  paddingSuballoc.size = paddingEnd;
4811  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4812  VmaSuballocationList::iterator next = request.item;
4813  ++next;
4814  const VmaSuballocationList::iterator paddingEndItem =
4815  m_Suballocations.insert(next, paddingSuballoc);
4816  RegisterFreeSuballocation(paddingEndItem);
4817  }
4818 
4819  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
4820  if(paddingBegin)
4821  {
4822  VmaSuballocation paddingSuballoc = {};
4823  paddingSuballoc.offset = request.offset - paddingBegin;
4824  paddingSuballoc.size = paddingBegin;
4825  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4826  const VmaSuballocationList::iterator paddingBeginItem =
4827  m_Suballocations.insert(request.item, paddingSuballoc);
4828  RegisterFreeSuballocation(paddingBeginItem);
4829  }
4830 
4831  // Update totals.
4832  m_FreeCount = m_FreeCount - 1;
4833  if(paddingBegin > 0)
4834  {
4835  ++m_FreeCount;
4836  }
4837  if(paddingEnd > 0)
4838  {
4839  ++m_FreeCount;
4840  }
4841  m_SumFreeSize -= allocSize;
4842 }
4843 
4844 VmaSuballocationList::iterator VmaDeviceMemoryBlock::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
4845 {
4846  // Change this suballocation to be marked as free.
4847  VmaSuballocation& suballoc = *suballocItem;
4848  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4849  suballoc.hAllocation = VK_NULL_HANDLE;
4850 
4851  // Update totals.
4852  ++m_FreeCount;
4853  m_SumFreeSize += suballoc.size;
4854 
4855  // Merge with previous and/or next suballocation if it's also free.
4856  bool mergeWithNext = false;
4857  bool mergeWithPrev = false;
4858 
4859  VmaSuballocationList::iterator nextItem = suballocItem;
4860  ++nextItem;
4861  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
4862  {
4863  mergeWithNext = true;
4864  }
4865 
4866  VmaSuballocationList::iterator prevItem = suballocItem;
4867  if(suballocItem != m_Suballocations.begin())
4868  {
4869  --prevItem;
4870  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
4871  {
4872  mergeWithPrev = true;
4873  }
4874  }
4875 
4876  if(mergeWithNext)
4877  {
4878  UnregisterFreeSuballocation(nextItem);
4879  MergeFreeWithNext(suballocItem);
4880  }
4881 
4882  if(mergeWithPrev)
4883  {
4884  UnregisterFreeSuballocation(prevItem);
4885  MergeFreeWithNext(prevItem);
4886  RegisterFreeSuballocation(prevItem);
4887  return prevItem;
4888  }
4889  else
4890  {
4891  RegisterFreeSuballocation(suballocItem);
4892  return suballocItem;
4893  }
4894 }
4895 
4896 void VmaDeviceMemoryBlock::Free(const VmaAllocation allocation)
4897 {
4898  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
4899  suballocItem != m_Suballocations.end();
4900  ++suballocItem)
4901  {
4902  VmaSuballocation& suballoc = *suballocItem;
4903  if(suballoc.hAllocation == allocation)
4904  {
4905  FreeSuballocation(suballocItem);
4906  VMA_HEAVY_ASSERT(Validate());
4907  return;
4908  }
4909  }
4910  VMA_ASSERT(0 && "Not found!");
4911 }
4912 
4913 #if VMA_STATS_STRING_ENABLED
4914 
4915 void VmaDeviceMemoryBlock::PrintDetailedMap(class VmaJsonWriter& json) const
4916 {
4917  json.BeginObject();
4918 
4919  json.WriteString("TotalBytes");
4920  json.WriteNumber(m_Size);
4921 
4922  json.WriteString("UnusedBytes");
4923  json.WriteNumber(m_SumFreeSize);
4924 
4925  json.WriteString("Allocations");
4926  json.WriteNumber(m_Suballocations.size() - m_FreeCount);
4927 
4928  json.WriteString("UnusedRanges");
4929  json.WriteNumber(m_FreeCount);
4930 
4931  json.WriteString("Suballocations");
4932  json.BeginArray();
4933  size_t i = 0;
4934  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4935  suballocItem != m_Suballocations.cend();
4936  ++suballocItem, ++i)
4937  {
4938  json.BeginObject(true);
4939 
4940  json.WriteString("Type");
4941  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
4942 
4943  json.WriteString("Size");
4944  json.WriteNumber(suballocItem->size);
4945 
4946  json.WriteString("Offset");
4947  json.WriteNumber(suballocItem->offset);
4948 
4949  json.EndObject();
4950  }
4951  json.EndArray();
4952 
4953  json.EndObject();
4954 }
4955 
4956 #endif // #if VMA_STATS_STRING_ENABLED
4957 
4958 void VmaDeviceMemoryBlock::MergeFreeWithNext(VmaSuballocationList::iterator item)
4959 {
4960  VMA_ASSERT(item != m_Suballocations.end());
4961  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
4962 
4963  VmaSuballocationList::iterator nextItem = item;
4964  ++nextItem;
4965  VMA_ASSERT(nextItem != m_Suballocations.end());
4966  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
4967 
4968  item->size += nextItem->size;
4969  --m_FreeCount;
4970  m_Suballocations.erase(nextItem);
4971 }
4972 
4973 void VmaDeviceMemoryBlock::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
4974 {
4975  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
4976  VMA_ASSERT(item->size > 0);
4977 
4978  // You may want to enable this validation at the beginning or at the end of
4979  // this function, depending on what do you want to check.
4980  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
4981 
4982  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
4983  {
4984  if(m_FreeSuballocationsBySize.empty())
4985  {
4986  m_FreeSuballocationsBySize.push_back(item);
4987  }
4988  else
4989  {
4990  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
4991  }
4992  }
4993 
4994  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
4995 }
4996 
4997 
4998 void VmaDeviceMemoryBlock::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
4999 {
5000  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5001  VMA_ASSERT(item->size > 0);
5002 
5003  // You may want to enable this validation at the beginning or at the end of
5004  // this function, depending on what do you want to check.
5005  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5006 
5007  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5008  {
5009  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5010  m_FreeSuballocationsBySize.data(),
5011  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
5012  item,
5013  VmaSuballocationItemSizeLess());
5014  for(size_t index = it - m_FreeSuballocationsBySize.data();
5015  index < m_FreeSuballocationsBySize.size();
5016  ++index)
5017  {
5018  if(m_FreeSuballocationsBySize[index] == item)
5019  {
5020  VmaVectorRemove(m_FreeSuballocationsBySize, index);
5021  return;
5022  }
5023  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
5024  }
5025  VMA_ASSERT(0 && "Not found.");
5026  }
5027 
5028  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5029 }
5030 
5031 bool VmaDeviceMemoryBlock::ValidateFreeSuballocationList() const
5032 {
5033  VkDeviceSize lastSize = 0;
5034  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
5035  {
5036  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
5037 
5038  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
5039  {
5040  VMA_ASSERT(0);
5041  return false;
5042  }
5043  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5044  {
5045  VMA_ASSERT(0);
5046  return false;
5047  }
5048  if(it->size < lastSize)
5049  {
5050  VMA_ASSERT(0);
5051  return false;
5052  }
5053 
5054  lastSize = it->size;
5055  }
5056  return true;
5057 }
5058 
5059 static void InitStatInfo(VmaStatInfo& outInfo)
5060 {
5061  memset(&outInfo, 0, sizeof(outInfo));
5062  outInfo.AllocationSizeMin = UINT64_MAX;
5063  outInfo.UnusedRangeSizeMin = UINT64_MAX;
5064 }
5065 
5066 static void CalcAllocationStatInfo(VmaStatInfo& outInfo, const VmaDeviceMemoryBlock& block)
5067 {
5068  outInfo.BlockCount = 1;
5069 
5070  const uint32_t rangeCount = (uint32_t)block.m_Suballocations.size();
5071  outInfo.AllocationCount = rangeCount - block.m_FreeCount;
5072  outInfo.UnusedRangeCount = block.m_FreeCount;
5073 
5074  outInfo.UnusedBytes = block.m_SumFreeSize;
5075  outInfo.UsedBytes = block.m_Size - outInfo.UnusedBytes;
5076 
5077  outInfo.AllocationSizeMin = UINT64_MAX;
5078  outInfo.AllocationSizeMax = 0;
5079  outInfo.UnusedRangeSizeMin = UINT64_MAX;
5080  outInfo.UnusedRangeSizeMax = 0;
5081 
5082  for(VmaSuballocationList::const_iterator suballocItem = block.m_Suballocations.cbegin();
5083  suballocItem != block.m_Suballocations.cend();
5084  ++suballocItem)
5085  {
5086  const VmaSuballocation& suballoc = *suballocItem;
5087  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
5088  {
5089  outInfo.AllocationSizeMin = VMA_MIN(outInfo.AllocationSizeMin, suballoc.size);
5090  outInfo.AllocationSizeMax = VMA_MAX(outInfo.AllocationSizeMax, suballoc.size);
5091  }
5092  else
5093  {
5094  outInfo.UnusedRangeSizeMin = VMA_MIN(outInfo.UnusedRangeSizeMin, suballoc.size);
5095  outInfo.UnusedRangeSizeMax = VMA_MAX(outInfo.UnusedRangeSizeMax, suballoc.size);
5096  }
5097  }
5098 }
5099 
5100 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
5101 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
5102 {
5103  inoutInfo.BlockCount += srcInfo.BlockCount;
5104  inoutInfo.AllocationCount += srcInfo.AllocationCount;
5105  inoutInfo.UnusedRangeCount += srcInfo.UnusedRangeCount;
5106  inoutInfo.UsedBytes += srcInfo.UsedBytes;
5107  inoutInfo.UnusedBytes += srcInfo.UnusedBytes;
5108  inoutInfo.AllocationSizeMin = VMA_MIN(inoutInfo.AllocationSizeMin, srcInfo.AllocationSizeMin);
5109  inoutInfo.AllocationSizeMax = VMA_MAX(inoutInfo.AllocationSizeMax, srcInfo.AllocationSizeMax);
5110  inoutInfo.UnusedRangeSizeMin = VMA_MIN(inoutInfo.UnusedRangeSizeMin, srcInfo.UnusedRangeSizeMin);
5111  inoutInfo.UnusedRangeSizeMax = VMA_MAX(inoutInfo.UnusedRangeSizeMax, srcInfo.UnusedRangeSizeMax);
5112 }
5113 
5114 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
5115 {
5116  inoutInfo.AllocationSizeAvg = (inoutInfo.AllocationCount > 0) ?
5117  VmaRoundDiv<VkDeviceSize>(inoutInfo.UsedBytes, inoutInfo.AllocationCount) : 0;
5118  inoutInfo.UnusedRangeSizeAvg = (inoutInfo.UnusedRangeCount > 0) ?
5119  VmaRoundDiv<VkDeviceSize>(inoutInfo.UnusedBytes, inoutInfo.UnusedRangeCount) : 0;
5120 }
5121 
5122 VmaPool_T::VmaPool_T(
5123  VmaAllocator hAllocator,
5124  const VmaPoolCreateInfo& createInfo) :
5125  m_BlockVector(
5126  hAllocator,
5127  createInfo.memoryTypeIndex,
5128  (createInfo.flags & VMA_POOL_CREATE_PERSISTENT_MAP_BIT) != 0 ?
5129  VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED,
5130  createInfo.blockSize,
5131  createInfo.minBlockCount,
5132  createInfo.maxBlockCount,
5133  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
5134  createInfo.frameInUseCount,
5135  true) // isCustomPool
5136 {
5137 }
5138 
5139 VmaPool_T::~VmaPool_T()
5140 {
5141 }
5142 
5143 #if VMA_STATS_STRING_ENABLED
5144 
5145 #endif // #if VMA_STATS_STRING_ENABLED
5146 
5147 VmaBlockVector::VmaBlockVector(
5148  VmaAllocator hAllocator,
5149  uint32_t memoryTypeIndex,
5150  VMA_BLOCK_VECTOR_TYPE blockVectorType,
5151  VkDeviceSize preferredBlockSize,
5152  size_t minBlockCount,
5153  size_t maxBlockCount,
5154  VkDeviceSize bufferImageGranularity,
5155  uint32_t frameInUseCount,
5156  bool isCustomPool) :
5157  m_hAllocator(hAllocator),
5158  m_MemoryTypeIndex(memoryTypeIndex),
5159  m_BlockVectorType(blockVectorType),
5160  m_PreferredBlockSize(preferredBlockSize),
5161  m_MinBlockCount(minBlockCount),
5162  m_MaxBlockCount(maxBlockCount),
5163  m_BufferImageGranularity(bufferImageGranularity),
5164  m_FrameInUseCount(frameInUseCount),
5165  m_IsCustomPool(isCustomPool),
5166  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
5167  m_HasEmptyBlock(false),
5168  m_pDefragmentator(VMA_NULL)
5169 {
5170 }
5171 
5172 VmaBlockVector::~VmaBlockVector()
5173 {
5174  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
5175 
5176  for(size_t i = m_Blocks.size(); i--; )
5177  {
5178  m_Blocks[i]->Destroy(m_hAllocator);
5179  vma_delete(m_hAllocator, m_Blocks[i]);
5180  }
5181 }
5182 
5183 VkResult VmaBlockVector::CreateMinBlocks()
5184 {
5185  for(size_t i = 0; i < m_MinBlockCount; ++i)
5186  {
5187  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
5188  if(res != VK_SUCCESS)
5189  {
5190  return res;
5191  }
5192  }
5193  return VK_SUCCESS;
5194 }
5195 
5196 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
5197 {
5198  pStats->size = 0;
5199  pStats->unusedSize = 0;
5200  pStats->allocationCount = 0;
5201  pStats->unusedRangeCount = 0;
5202 
5203  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5204 
5205  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5206  {
5207  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
5208  VMA_ASSERT(pBlock);
5209  VMA_HEAVY_ASSERT(pBlock->Validate());
5210 
5211  const uint32_t rangeCount = (uint32_t)pBlock->m_Suballocations.size();
5212 
5213  pStats->size += pBlock->m_Size;
5214  pStats->unusedSize += pBlock->m_SumFreeSize;
5215  pStats->allocationCount += rangeCount - pBlock->m_FreeCount;
5216  pStats->unusedRangeCount += pBlock->m_FreeCount;
5217  }
5218 }
5219 
5220 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
5221 
5222 VkResult VmaBlockVector::Allocate(
5223  VmaPool hCurrentPool,
5224  uint32_t currentFrameIndex,
5225  const VkMemoryRequirements& vkMemReq,
5226  const VmaAllocationCreateInfo& createInfo,
5227  VmaSuballocationType suballocType,
5228  VmaAllocation* pAllocation)
5229 {
5230  // Validate flags.
5231  if(((createInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0) !=
5232  (m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED))
5233  {
5234  VMA_ASSERT(0 && "Usage of VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT must match VMA_POOL_CREATE_PERSISTENT_MAP_BIT.");
5235  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5236  }
5237 
5238  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5239 
5240  // 1. Search existing allocations. Try to allocate without making other allocations lost.
5241  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
5242  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
5243  {
5244  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
5245  VMA_ASSERT(pCurrBlock);
5246  VmaAllocationRequest currRequest = {};
5247  if(pCurrBlock->CreateAllocationRequest(
5248  currentFrameIndex,
5249  m_FrameInUseCount,
5250  m_BufferImageGranularity,
5251  vkMemReq.size,
5252  vkMemReq.alignment,
5253  suballocType,
5254  false, // canMakeOtherLost
5255  &currRequest))
5256  {
5257  // Allocate from pCurrBlock.
5258  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
5259 
5260  // We no longer have an empty Allocation.
5261  if(pCurrBlock->IsEmpty())
5262  {
5263  m_HasEmptyBlock = false;
5264  }
5265 
5266  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
5267  pCurrBlock->Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
5268  (*pAllocation)->InitBlockAllocation(
5269  hCurrentPool,
5270  pCurrBlock,
5271  currRequest.offset,
5272  vkMemReq.alignment,
5273  vkMemReq.size,
5274  suballocType,
5275  createInfo.pUserData,
5276  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5277  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
5278  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
5279  return VK_SUCCESS;
5280  }
5281  }
5282 
5283  const bool canCreateNewBlock =
5284  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
5285  (m_Blocks.size() < m_MaxBlockCount);
5286 
5287  // 2. Try to create new block.
5288  if(canCreateNewBlock)
5289  {
5290  // 2.1. Start with full preferredBlockSize.
5291  VkDeviceSize blockSize = m_PreferredBlockSize;
5292  size_t newBlockIndex = 0;
5293  VkResult res = CreateBlock(blockSize, &newBlockIndex);
5294  // Allocating blocks of other sizes is allowed only in default pools.
5295  // In custom pools block size is fixed.
5296  if(res < 0 && m_IsCustomPool == false)
5297  {
5298  // 2.2. Try half the size.
5299  blockSize /= 2;
5300  if(blockSize >= vkMemReq.size)
5301  {
5302  res = CreateBlock(blockSize, &newBlockIndex);
5303  if(res < 0)
5304  {
5305  // 2.3. Try quarter the size.
5306  blockSize /= 2;
5307  if(blockSize >= vkMemReq.size)
5308  {
5309  res = CreateBlock(blockSize, &newBlockIndex);
5310  }
5311  }
5312  }
5313  }
5314  if(res == VK_SUCCESS)
5315  {
5316  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
5317  VMA_ASSERT(pBlock->m_Size >= vkMemReq.size);
5318 
5319  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
5320  VmaAllocationRequest allocRequest = {};
5321  allocRequest.item = pBlock->m_Suballocations.begin();
5322  allocRequest.offset = 0;
5323  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
5324  pBlock->Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
5325  (*pAllocation)->InitBlockAllocation(
5326  hCurrentPool,
5327  pBlock,
5328  allocRequest.offset,
5329  vkMemReq.alignment,
5330  vkMemReq.size,
5331  suballocType,
5332  createInfo.pUserData,
5333  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5334  VMA_HEAVY_ASSERT(pBlock->Validate());
5335  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
5336 
5337  return VK_SUCCESS;
5338  }
5339  }
5340 
5341  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
5342 
5343  // 3. Try to allocate from existing blocks with making other allocations lost.
5344  if(canMakeOtherLost)
5345  {
5346  uint32_t tryIndex = 0;
5347  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
5348  {
5349  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
5350  VmaAllocationRequest bestRequest = {};
5351  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
5352 
5353  // 1. Search existing allocations.
5354  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
5355  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
5356  {
5357  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
5358  VMA_ASSERT(pCurrBlock);
5359  VmaAllocationRequest currRequest = {};
5360  if(pCurrBlock->CreateAllocationRequest(
5361  currentFrameIndex,
5362  m_FrameInUseCount,
5363  m_BufferImageGranularity,
5364  vkMemReq.size,
5365  vkMemReq.alignment,
5366  suballocType,
5367  canMakeOtherLost,
5368  &currRequest))
5369  {
5370  const VkDeviceSize currRequestCost = currRequest.CalcCost();
5371  if(pBestRequestBlock == VMA_NULL ||
5372  currRequestCost < bestRequestCost)
5373  {
5374  pBestRequestBlock = pCurrBlock;
5375  bestRequest = currRequest;
5376  bestRequestCost = currRequestCost;
5377 
5378  if(bestRequestCost == 0)
5379  {
5380  break;
5381  }
5382  }
5383  }
5384  }
5385 
5386  if(pBestRequestBlock != VMA_NULL)
5387  {
5388  if(pBestRequestBlock->MakeRequestedAllocationsLost(
5389  currentFrameIndex,
5390  m_FrameInUseCount,
5391  &bestRequest))
5392  {
5393  // We no longer have an empty Allocation.
5394  if(pBestRequestBlock->IsEmpty())
5395  {
5396  m_HasEmptyBlock = false;
5397  }
5398  // Allocate from this pBlock.
5399  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
5400  pBestRequestBlock->Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
5401  (*pAllocation)->InitBlockAllocation(
5402  hCurrentPool,
5403  pBestRequestBlock,
5404  bestRequest.offset,
5405  vkMemReq.alignment,
5406  vkMemReq.size,
5407  suballocType,
5408  createInfo.pUserData,
5409  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5410  VMA_HEAVY_ASSERT(pBlock->Validate());
5411  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
5412  return VK_SUCCESS;
5413  }
5414  // else: Some allocations must have been touched while we are here. Next try.
5415  }
5416  else
5417  {
5418  // Could not find place in any of the blocks - break outer loop.
5419  break;
5420  }
5421  }
5422  /* Maximum number of tries exceeded - a very unlike event when many other
5423  threads are simultaneously touching allocations making it impossible to make
5424  lost at the same time as we try to allocate. */
5425  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
5426  {
5427  return VK_ERROR_TOO_MANY_OBJECTS;
5428  }
5429  }
5430 
5431  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5432 }
5433 
5434 void VmaBlockVector::Free(
5435  VmaAllocation hAllocation)
5436 {
5437  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
5438 
5439  // Scope for lock.
5440  {
5441  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5442 
5443  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
5444 
5445  pBlock->Free(hAllocation);
5446  VMA_HEAVY_ASSERT(pBlock->Validate());
5447 
5448  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
5449 
5450  // pBlock became empty after this deallocation.
5451  if(pBlock->IsEmpty())
5452  {
5453  // Already has empty Allocation. We don't want to have two, so delete this one.
5454  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
5455  {
5456  pBlockToDelete = pBlock;
5457  Remove(pBlock);
5458  }
5459  // We now have first empty Allocation.
5460  else
5461  {
5462  m_HasEmptyBlock = true;
5463  }
5464  }
5465  // Must be called after srcBlockIndex is used, because later it may become invalid!
5466  IncrementallySortBlocks();
5467  }
5468 
5469  // Destruction of a free Allocation. Deferred until this point, outside of mutex
5470  // lock, for performance reason.
5471  if(pBlockToDelete != VMA_NULL)
5472  {
5473  VMA_DEBUG_LOG(" Deleted empty allocation");
5474  pBlockToDelete->Destroy(m_hAllocator);
5475  vma_delete(m_hAllocator, pBlockToDelete);
5476  }
5477 }
5478 
5479 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
5480 {
5481  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5482  {
5483  if(m_Blocks[blockIndex] == pBlock)
5484  {
5485  VmaVectorRemove(m_Blocks, blockIndex);
5486  return;
5487  }
5488  }
5489  VMA_ASSERT(0);
5490 }
5491 
5492 void VmaBlockVector::IncrementallySortBlocks()
5493 {
5494  // Bubble sort only until first swap.
5495  for(size_t i = 1; i < m_Blocks.size(); ++i)
5496  {
5497  if(m_Blocks[i - 1]->m_SumFreeSize > m_Blocks[i]->m_SumFreeSize)
5498  {
5499  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
5500  return;
5501  }
5502  }
5503 }
5504 
5505 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
5506 {
5507  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
5508  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
5509  allocInfo.allocationSize = blockSize;
5510  VkDeviceMemory mem = VK_NULL_HANDLE;
5511  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
5512  if(res < 0)
5513  {
5514  return res;
5515  }
5516 
5517  // New VkDeviceMemory successfully created.
5518 
5519  // Map memory if needed.
5520  void* pMappedData = VMA_NULL;
5521  const bool persistentMap = (m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED);
5522  if(persistentMap && m_hAllocator->m_UnmapPersistentlyMappedMemoryCounter == 0)
5523  {
5524  res = (*m_hAllocator->GetVulkanFunctions().vkMapMemory)(
5525  m_hAllocator->m_hDevice,
5526  mem,
5527  0,
5528  VK_WHOLE_SIZE,
5529  0,
5530  &pMappedData);
5531  if(res < 0)
5532  {
5533  VMA_DEBUG_LOG(" vkMapMemory FAILED");
5534  m_hAllocator->FreeVulkanMemory(m_MemoryTypeIndex, blockSize, mem);
5535  return res;
5536  }
5537  }
5538 
5539  // Create new Allocation for it.
5540  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
5541  pBlock->Init(
5542  m_MemoryTypeIndex,
5543  (VMA_BLOCK_VECTOR_TYPE)m_BlockVectorType,
5544  mem,
5545  allocInfo.allocationSize,
5546  persistentMap,
5547  pMappedData);
5548 
5549  m_Blocks.push_back(pBlock);
5550  if(pNewBlockIndex != VMA_NULL)
5551  {
5552  *pNewBlockIndex = m_Blocks.size() - 1;
5553  }
5554 
5555  return VK_SUCCESS;
5556 }
5557 
5558 #if VMA_STATS_STRING_ENABLED
5559 
5560 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
5561 {
5562  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5563 
5564  json.BeginObject();
5565 
5566  if(m_IsCustomPool)
5567  {
5568  json.WriteString("MemoryTypeIndex");
5569  json.WriteNumber(m_MemoryTypeIndex);
5570 
5571  if(m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
5572  {
5573  json.WriteString("Mapped");
5574  json.WriteBool(true);
5575  }
5576 
5577  json.WriteString("BlockSize");
5578  json.WriteNumber(m_PreferredBlockSize);
5579 
5580  json.WriteString("BlockCount");
5581  json.BeginObject(true);
5582  if(m_MinBlockCount > 0)
5583  {
5584  json.WriteString("Min");
5585  json.WriteNumber(m_MinBlockCount);
5586  }
5587  if(m_MaxBlockCount < SIZE_MAX)
5588  {
5589  json.WriteString("Max");
5590  json.WriteNumber(m_MaxBlockCount);
5591  }
5592  json.WriteString("Cur");
5593  json.WriteNumber(m_Blocks.size());
5594  json.EndObject();
5595 
5596  if(m_FrameInUseCount > 0)
5597  {
5598  json.WriteString("FrameInUseCount");
5599  json.WriteNumber(m_FrameInUseCount);
5600  }
5601  }
5602  else
5603  {
5604  json.WriteString("PreferredBlockSize");
5605  json.WriteNumber(m_PreferredBlockSize);
5606  }
5607 
5608  json.WriteString("Blocks");
5609  json.BeginArray();
5610  for(size_t i = 0; i < m_Blocks.size(); ++i)
5611  {
5612  m_Blocks[i]->PrintDetailedMap(json);
5613  }
5614  json.EndArray();
5615 
5616  json.EndObject();
5617 }
5618 
5619 #endif // #if VMA_STATS_STRING_ENABLED
5620 
5621 void VmaBlockVector::UnmapPersistentlyMappedMemory()
5622 {
5623  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5624 
5625  for(size_t i = m_Blocks.size(); i--; )
5626  {
5627  VmaDeviceMemoryBlock* pBlock = m_Blocks[i];
5628  if(pBlock->m_pMappedData != VMA_NULL)
5629  {
5630  VMA_ASSERT(pBlock->m_PersistentMap != false);
5631  (m_hAllocator->GetVulkanFunctions().vkUnmapMemory)(m_hAllocator->m_hDevice, pBlock->m_hMemory);
5632  pBlock->m_pMappedData = VMA_NULL;
5633  }
5634  }
5635 }
5636 
5637 VkResult VmaBlockVector::MapPersistentlyMappedMemory()
5638 {
5639  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5640 
5641  VkResult finalResult = VK_SUCCESS;
5642  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
5643  {
5644  VmaDeviceMemoryBlock* pBlock = m_Blocks[i];
5645  if(pBlock->m_PersistentMap)
5646  {
5647  VMA_ASSERT(pBlock->m_pMappedData == nullptr);
5648  VkResult localResult = (*m_hAllocator->GetVulkanFunctions().vkMapMemory)(
5649  m_hAllocator->m_hDevice,
5650  pBlock->m_hMemory,
5651  0,
5652  VK_WHOLE_SIZE,
5653  0,
5654  &pBlock->m_pMappedData);
5655  if(localResult != VK_SUCCESS)
5656  {
5657  finalResult = localResult;
5658  }
5659  }
5660  }
5661  return finalResult;
5662 }
5663 
5664 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
5665  VmaAllocator hAllocator,
5666  uint32_t currentFrameIndex)
5667 {
5668  if(m_pDefragmentator == VMA_NULL)
5669  {
5670  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
5671  hAllocator,
5672  this,
5673  currentFrameIndex);
5674  }
5675 
5676  return m_pDefragmentator;
5677 }
5678 
5679 VkResult VmaBlockVector::Defragment(
5680  VmaDefragmentationStats* pDefragmentationStats,
5681  VkDeviceSize& maxBytesToMove,
5682  uint32_t& maxAllocationsToMove)
5683 {
5684  if(m_pDefragmentator == VMA_NULL)
5685  {
5686  return VK_SUCCESS;
5687  }
5688 
5689  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5690 
5691  // Defragment.
5692  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
5693 
5694  // Accumulate statistics.
5695  if(pDefragmentationStats != VMA_NULL)
5696  {
5697  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
5698  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
5699  pDefragmentationStats->bytesMoved += bytesMoved;
5700  pDefragmentationStats->allocationsMoved += allocationsMoved;
5701  VMA_ASSERT(bytesMoved <= maxBytesToMove);
5702  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
5703  maxBytesToMove -= bytesMoved;
5704  maxAllocationsToMove -= allocationsMoved;
5705  }
5706 
5707  // Free empty blocks.
5708  m_HasEmptyBlock = false;
5709  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
5710  {
5711  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
5712  if(pBlock->IsEmpty())
5713  {
5714  if(m_Blocks.size() > m_MinBlockCount)
5715  {
5716  if(pDefragmentationStats != VMA_NULL)
5717  {
5718  ++pDefragmentationStats->deviceMemoryBlocksFreed;
5719  pDefragmentationStats->bytesFreed += pBlock->m_Size;
5720  }
5721 
5722  VmaVectorRemove(m_Blocks, blockIndex);
5723  pBlock->Destroy(m_hAllocator);
5724  vma_delete(m_hAllocator, pBlock);
5725  }
5726  else
5727  {
5728  m_HasEmptyBlock = true;
5729  }
5730  }
5731  }
5732 
5733  return result;
5734 }
5735 
5736 void VmaBlockVector::DestroyDefragmentator()
5737 {
5738  if(m_pDefragmentator != VMA_NULL)
5739  {
5740  vma_delete(m_hAllocator, m_pDefragmentator);
5741  m_pDefragmentator = VMA_NULL;
5742  }
5743 }
5744 
5745 void VmaBlockVector::MakePoolAllocationsLost(
5746  uint32_t currentFrameIndex,
5747  size_t* pLostAllocationCount)
5748 {
5749  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5750 
5751  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5752  {
5753  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
5754  VMA_ASSERT(pBlock);
5755  pBlock->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
5756  }
5757 }
5758 
5759 void VmaBlockVector::AddStats(VmaStats* pStats)
5760 {
5761  const uint32_t memTypeIndex = m_MemoryTypeIndex;
5762  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
5763 
5764  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5765 
5766  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5767  {
5768  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
5769  VMA_ASSERT(pBlock);
5770  VMA_HEAVY_ASSERT(pBlock->Validate());
5771  VmaStatInfo allocationStatInfo;
5772  CalcAllocationStatInfo(allocationStatInfo, *pBlock);
5773  VmaAddStatInfo(pStats->total, allocationStatInfo);
5774  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
5775  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
5776  }
5777 }
5778 
5780 // VmaDefragmentator members definition
5781 
5782 VmaDefragmentator::VmaDefragmentator(
5783  VmaAllocator hAllocator,
5784  VmaBlockVector* pBlockVector,
5785  uint32_t currentFrameIndex) :
5786  m_hAllocator(hAllocator),
5787  m_pBlockVector(pBlockVector),
5788  m_CurrentFrameIndex(currentFrameIndex),
5789  m_BytesMoved(0),
5790  m_AllocationsMoved(0),
5791  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
5792  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
5793 {
5794 }
5795 
5796 VmaDefragmentator::~VmaDefragmentator()
5797 {
5798  for(size_t i = m_Blocks.size(); i--; )
5799  {
5800  vma_delete(m_hAllocator, m_Blocks[i]);
5801  }
5802 }
5803 
5804 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
5805 {
5806  AllocationInfo allocInfo;
5807  allocInfo.m_hAllocation = hAlloc;
5808  allocInfo.m_pChanged = pChanged;
5809  m_Allocations.push_back(allocInfo);
5810 }
5811 
5812 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
5813 {
5814  // It has already been mapped for defragmentation.
5815  if(m_pMappedDataForDefragmentation)
5816  {
5817  *ppMappedData = m_pMappedDataForDefragmentation;
5818  return VK_SUCCESS;
5819  }
5820 
5821  // It is persistently mapped.
5822  if(m_pBlock->m_PersistentMap)
5823  {
5824  VMA_ASSERT(m_pBlock->m_pMappedData != VMA_NULL);
5825  *ppMappedData = m_pBlock->m_pMappedData;
5826  return VK_SUCCESS;
5827  }
5828 
5829  // Map on first usage.
5830  VkResult res = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
5831  hAllocator->m_hDevice,
5832  m_pBlock->m_hMemory,
5833  0,
5834  VK_WHOLE_SIZE,
5835  0,
5836  &m_pMappedDataForDefragmentation);
5837  *ppMappedData = m_pMappedDataForDefragmentation;
5838  return res;
5839 }
5840 
5841 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
5842 {
5843  if(m_pMappedDataForDefragmentation != VMA_NULL)
5844  {
5845  (hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_pBlock->m_hMemory);
5846  }
5847 }
5848 
5849 VkResult VmaDefragmentator::DefragmentRound(
5850  VkDeviceSize maxBytesToMove,
5851  uint32_t maxAllocationsToMove)
5852 {
5853  if(m_Blocks.empty())
5854  {
5855  return VK_SUCCESS;
5856  }
5857 
5858  size_t srcBlockIndex = m_Blocks.size() - 1;
5859  size_t srcAllocIndex = SIZE_MAX;
5860  for(;;)
5861  {
5862  // 1. Find next allocation to move.
5863  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
5864  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
5865  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
5866  {
5867  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
5868  {
5869  // Finished: no more allocations to process.
5870  if(srcBlockIndex == 0)
5871  {
5872  return VK_SUCCESS;
5873  }
5874  else
5875  {
5876  --srcBlockIndex;
5877  srcAllocIndex = SIZE_MAX;
5878  }
5879  }
5880  else
5881  {
5882  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
5883  }
5884  }
5885 
5886  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
5887  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
5888 
5889  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
5890  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
5891  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
5892  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
5893 
5894  // 2. Try to find new place for this allocation in preceding or current block.
5895  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
5896  {
5897  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
5898  VmaAllocationRequest dstAllocRequest;
5899  if(pDstBlockInfo->m_pBlock->CreateAllocationRequest(
5900  m_CurrentFrameIndex,
5901  m_pBlockVector->GetFrameInUseCount(),
5902  m_pBlockVector->GetBufferImageGranularity(),
5903  size,
5904  alignment,
5905  suballocType,
5906  false, // canMakeOtherLost
5907  &dstAllocRequest) &&
5908  MoveMakesSense(
5909  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
5910  {
5911  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
5912 
5913  // Reached limit on number of allocations or bytes to move.
5914  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
5915  (m_BytesMoved + size > maxBytesToMove))
5916  {
5917  return VK_INCOMPLETE;
5918  }
5919 
5920  void* pDstMappedData = VMA_NULL;
5921  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
5922  if(res != VK_SUCCESS)
5923  {
5924  return res;
5925  }
5926 
5927  void* pSrcMappedData = VMA_NULL;
5928  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
5929  if(res != VK_SUCCESS)
5930  {
5931  return res;
5932  }
5933 
5934  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
5935  memcpy(
5936  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
5937  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
5938  static_cast<size_t>(size));
5939 
5940  pDstBlockInfo->m_pBlock->Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
5941  pSrcBlockInfo->m_pBlock->Free(allocInfo.m_hAllocation);
5942 
5943  allocInfo.m_hAllocation->ChangeBlockAllocation(pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
5944 
5945  if(allocInfo.m_pChanged != VMA_NULL)
5946  {
5947  *allocInfo.m_pChanged = VK_TRUE;
5948  }
5949 
5950  ++m_AllocationsMoved;
5951  m_BytesMoved += size;
5952 
5953  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
5954 
5955  break;
5956  }
5957  }
5958 
5959  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
5960 
5961  if(srcAllocIndex > 0)
5962  {
5963  --srcAllocIndex;
5964  }
5965  else
5966  {
5967  if(srcBlockIndex > 0)
5968  {
5969  --srcBlockIndex;
5970  srcAllocIndex = SIZE_MAX;
5971  }
5972  else
5973  {
5974  return VK_SUCCESS;
5975  }
5976  }
5977  }
5978 }
5979 
5980 VkResult VmaDefragmentator::Defragment(
5981  VkDeviceSize maxBytesToMove,
5982  uint32_t maxAllocationsToMove)
5983 {
5984  if(m_Allocations.empty())
5985  {
5986  return VK_SUCCESS;
5987  }
5988 
5989  // Create block info for each block.
5990  const size_t blockCount = m_pBlockVector->m_Blocks.size();
5991  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
5992  {
5993  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
5994  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
5995  m_Blocks.push_back(pBlockInfo);
5996  }
5997 
5998  // Sort them by m_pBlock pointer value.
5999  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
6000 
6001  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
6002  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
6003  {
6004  AllocationInfo& allocInfo = m_Allocations[blockIndex];
6005  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
6006  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
6007  {
6008  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
6009  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
6010  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
6011  {
6012  (*it)->m_Allocations.push_back(allocInfo);
6013  }
6014  else
6015  {
6016  VMA_ASSERT(0);
6017  }
6018  }
6019  }
6020  m_Allocations.clear();
6021 
6022  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6023  {
6024  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
6025  pBlockInfo->CalcHasNonMovableAllocations();
6026  pBlockInfo->SortAllocationsBySizeDescecnding();
6027  }
6028 
6029  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
6030  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
6031 
6032  // Execute defragmentation rounds (the main part).
6033  VkResult result = VK_SUCCESS;
6034  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
6035  {
6036  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
6037  }
6038 
6039  // Unmap blocks that were mapped for defragmentation.
6040  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6041  {
6042  m_Blocks[blockIndex]->Unmap(m_hAllocator);
6043  }
6044 
6045  return result;
6046 }
6047 
6048 bool VmaDefragmentator::MoveMakesSense(
6049  size_t dstBlockIndex, VkDeviceSize dstOffset,
6050  size_t srcBlockIndex, VkDeviceSize srcOffset)
6051 {
6052  if(dstBlockIndex < srcBlockIndex)
6053  {
6054  return true;
6055  }
6056  if(dstBlockIndex > srcBlockIndex)
6057  {
6058  return false;
6059  }
6060  if(dstOffset < srcOffset)
6061  {
6062  return true;
6063  }
6064  return false;
6065 }
6066 
6068 // VmaAllocator_T
6069 
6070 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
6071  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
6072  m_PhysicalDevice(pCreateInfo->physicalDevice),
6073  m_hDevice(pCreateInfo->device),
6074  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
6075  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
6076  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
6077  m_UnmapPersistentlyMappedMemoryCounter(0),
6078  m_PreferredLargeHeapBlockSize(0),
6079  m_PreferredSmallHeapBlockSize(0),
6080  m_CurrentFrameIndex(0),
6081  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
6082 {
6083  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
6084 
6085  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
6086  memset(&m_MemProps, 0, sizeof(m_MemProps));
6087  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
6088 
6089  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
6090  memset(&m_pOwnAllocations, 0, sizeof(m_pOwnAllocations));
6091 
6092  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
6093  {
6094  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
6095  }
6096 
6097  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
6098  {
6099  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
6100  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
6101  }
6102 
6103  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
6104 
6105  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
6106  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
6107 
6108  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
6109  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
6110  m_PreferredSmallHeapBlockSize = (pCreateInfo->preferredSmallHeapBlockSize != 0) ?
6111  pCreateInfo->preferredSmallHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE);
6112 
6113  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
6114  {
6115  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
6116  {
6117  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
6118  if(limit != VK_WHOLE_SIZE)
6119  {
6120  m_HeapSizeLimit[heapIndex] = limit;
6121  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
6122  {
6123  m_MemProps.memoryHeaps[heapIndex].size = limit;
6124  }
6125  }
6126  }
6127  }
6128 
6129  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6130  {
6131  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
6132 
6133  for(size_t blockVectorTypeIndex = 0; blockVectorTypeIndex < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorTypeIndex)
6134  {
6135  m_pBlockVectors[memTypeIndex][blockVectorTypeIndex] = vma_new(this, VmaBlockVector)(
6136  this,
6137  memTypeIndex,
6138  static_cast<VMA_BLOCK_VECTOR_TYPE>(blockVectorTypeIndex),
6139  preferredBlockSize,
6140  0,
6141  SIZE_MAX,
6142  GetBufferImageGranularity(),
6143  pCreateInfo->frameInUseCount,
6144  false); // isCustomPool
6145  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
6146  // becase minBlockCount is 0.
6147  m_pOwnAllocations[memTypeIndex][blockVectorTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
6148  }
6149  }
6150 }
6151 
6152 VmaAllocator_T::~VmaAllocator_T()
6153 {
6154  VMA_ASSERT(m_Pools.empty());
6155 
6156  for(size_t i = GetMemoryTypeCount(); i--; )
6157  {
6158  for(size_t j = VMA_BLOCK_VECTOR_TYPE_COUNT; j--; )
6159  {
6160  vma_delete(this, m_pOwnAllocations[i][j]);
6161  vma_delete(this, m_pBlockVectors[i][j]);
6162  }
6163  }
6164 }
6165 
6166 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
6167 {
6168 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
6169  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
6170  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
6171  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
6172  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
6173  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
6174  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
6175  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
6176  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
6177  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
6178  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
6179  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
6180  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
6181  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
6182  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
6183 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
6184 
6185  if(pVulkanFunctions != VMA_NULL)
6186  {
6187  m_VulkanFunctions = *pVulkanFunctions;
6188  }
6189 
6190  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
6191  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
6192  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
6193  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
6194  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
6195  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
6196  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
6197  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
6198  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
6199  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
6200  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
6201  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
6202  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
6203  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
6204  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
6205  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
6206 }
6207 
6208 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
6209 {
6210  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
6211  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
6212  return (heapSize <= VMA_SMALL_HEAP_MAX_SIZE) ?
6213  m_PreferredSmallHeapBlockSize : m_PreferredLargeHeapBlockSize;
6214 }
6215 
6216 VkResult VmaAllocator_T::AllocateMemoryOfType(
6217  const VkMemoryRequirements& vkMemReq,
6218  const VmaAllocationCreateInfo& createInfo,
6219  uint32_t memTypeIndex,
6220  VmaSuballocationType suballocType,
6221  VmaAllocation* pAllocation)
6222 {
6223  VMA_ASSERT(pAllocation != VMA_NULL);
6224  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
6225 
6226  uint32_t blockVectorType = VmaAllocationCreateFlagsToBlockVectorType(createInfo.flags);
6227  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
6228  VMA_ASSERT(blockVector);
6229 
6230  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
6231  // Heuristics: Allocate own memory if requested size if greater than half of preferred block size.
6232  const bool ownMemory =
6233  (createInfo.flags & VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT) != 0 ||
6234  VMA_DEBUG_ALWAYS_OWN_MEMORY ||
6235  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
6236  vkMemReq.size > preferredBlockSize / 2);
6237 
6238  if(ownMemory)
6239  {
6240  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6241  {
6242  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6243  }
6244  else
6245  {
6246  return AllocateOwnMemory(
6247  vkMemReq.size,
6248  suballocType,
6249  memTypeIndex,
6250  (createInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0,
6251  createInfo.pUserData,
6252  pAllocation);
6253  }
6254  }
6255  else
6256  {
6257  VkResult res = blockVector->Allocate(
6258  VK_NULL_HANDLE, // hCurrentPool
6259  m_CurrentFrameIndex.load(),
6260  vkMemReq,
6261  createInfo,
6262  suballocType,
6263  pAllocation);
6264  if(res == VK_SUCCESS)
6265  {
6266  return res;
6267  }
6268 
6269  // 5. Try own memory.
6270  res = AllocateOwnMemory(
6271  vkMemReq.size,
6272  suballocType,
6273  memTypeIndex,
6274  (createInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0,
6275  createInfo.pUserData,
6276  pAllocation);
6277  if(res == VK_SUCCESS)
6278  {
6279  // Succeeded: AllocateOwnMemory function already filld pMemory, nothing more to do here.
6280  VMA_DEBUG_LOG(" Allocated as OwnMemory");
6281  return VK_SUCCESS;
6282  }
6283  else
6284  {
6285  // Everything failed: Return error code.
6286  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
6287  return res;
6288  }
6289  }
6290 }
6291 
6292 VkResult VmaAllocator_T::AllocateOwnMemory(
6293  VkDeviceSize size,
6294  VmaSuballocationType suballocType,
6295  uint32_t memTypeIndex,
6296  bool map,
6297  void* pUserData,
6298  VmaAllocation* pAllocation)
6299 {
6300  VMA_ASSERT(pAllocation);
6301 
6302  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
6303  allocInfo.memoryTypeIndex = memTypeIndex;
6304  allocInfo.allocationSize = size;
6305 
6306  // Allocate VkDeviceMemory.
6307  VkDeviceMemory hMemory = VK_NULL_HANDLE;
6308  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
6309  if(res < 0)
6310  {
6311  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
6312  return res;
6313  }
6314 
6315  void* pMappedData = nullptr;
6316  if(map)
6317  {
6318  if(m_UnmapPersistentlyMappedMemoryCounter == 0)
6319  {
6320  res = vkMapMemory(m_hDevice, hMemory, 0, VK_WHOLE_SIZE, 0, &pMappedData);
6321  if(res < 0)
6322  {
6323  VMA_DEBUG_LOG(" vkMapMemory FAILED");
6324  FreeVulkanMemory(memTypeIndex, size, hMemory);
6325  return res;
6326  }
6327  }
6328  }
6329 
6330  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load());
6331  (*pAllocation)->InitOwnAllocation(memTypeIndex, hMemory, suballocType, map, pMappedData, size, pUserData);
6332 
6333  // Register it in m_pOwnAllocations.
6334  {
6335  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6336  AllocationVectorType* pOwnAllocations = m_pOwnAllocations[memTypeIndex][map ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED];
6337  VMA_ASSERT(pOwnAllocations);
6338  VmaVectorInsertSorted<VmaPointerLess>(*pOwnAllocations, *pAllocation);
6339  }
6340 
6341  VMA_DEBUG_LOG(" Allocated OwnMemory MemoryTypeIndex=#%u", memTypeIndex);
6342 
6343  return VK_SUCCESS;
6344 }
6345 
6346 VkResult VmaAllocator_T::AllocateMemory(
6347  const VkMemoryRequirements& vkMemReq,
6348  const VmaAllocationCreateInfo& createInfo,
6349  VmaSuballocationType suballocType,
6350  VmaAllocation* pAllocation)
6351 {
6352  if((createInfo.flags & VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT) != 0 &&
6353  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6354  {
6355  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
6356  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6357  }
6358  if((createInfo.pool != VK_NULL_HANDLE) &&
6359  ((createInfo.flags & (VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT)) != 0))
6360  {
6361  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT when pool != null is invalid.");
6362  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6363  }
6364 
6365  if(createInfo.pool != VK_NULL_HANDLE)
6366  {
6367  return createInfo.pool->m_BlockVector.Allocate(
6368  createInfo.pool,
6369  m_CurrentFrameIndex.load(),
6370  vkMemReq,
6371  createInfo,
6372  suballocType,
6373  pAllocation);
6374  }
6375  else
6376  {
6377  // Bit mask of memory Vulkan types acceptable for this allocation.
6378  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
6379  uint32_t memTypeIndex = UINT32_MAX;
6380  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
6381  if(res == VK_SUCCESS)
6382  {
6383  res = AllocateMemoryOfType(vkMemReq, createInfo, memTypeIndex, suballocType, pAllocation);
6384  // Succeeded on first try.
6385  if(res == VK_SUCCESS)
6386  {
6387  return res;
6388  }
6389  // Allocation from this memory type failed. Try other compatible memory types.
6390  else
6391  {
6392  for(;;)
6393  {
6394  // Remove old memTypeIndex from list of possibilities.
6395  memoryTypeBits &= ~(1u << memTypeIndex);
6396  // Find alternative memTypeIndex.
6397  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
6398  if(res == VK_SUCCESS)
6399  {
6400  res = AllocateMemoryOfType(vkMemReq, createInfo, memTypeIndex, suballocType, pAllocation);
6401  // Allocation from this alternative memory type succeeded.
6402  if(res == VK_SUCCESS)
6403  {
6404  return res;
6405  }
6406  // else: Allocation from this memory type failed. Try next one - next loop iteration.
6407  }
6408  // No other matching memory type index could be found.
6409  else
6410  {
6411  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
6412  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6413  }
6414  }
6415  }
6416  }
6417  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
6418  else
6419  return res;
6420  }
6421 }
6422 
6423 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
6424 {
6425  VMA_ASSERT(allocation);
6426 
6427  if(allocation->CanBecomeLost() == false ||
6428  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
6429  {
6430  switch(allocation->GetType())
6431  {
6432  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
6433  {
6434  VmaBlockVector* pBlockVector = VMA_NULL;
6435  VmaPool hPool = allocation->GetPool();
6436  if(hPool != VK_NULL_HANDLE)
6437  {
6438  pBlockVector = &hPool->m_BlockVector;
6439  }
6440  else
6441  {
6442  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
6443  const VMA_BLOCK_VECTOR_TYPE blockVectorType = allocation->GetBlockVectorType();
6444  pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
6445  }
6446  pBlockVector->Free(allocation);
6447  }
6448  break;
6449  case VmaAllocation_T::ALLOCATION_TYPE_OWN:
6450  FreeOwnMemory(allocation);
6451  break;
6452  default:
6453  VMA_ASSERT(0);
6454  }
6455  }
6456 
6457  vma_delete(this, allocation);
6458 }
6459 
6460 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
6461 {
6462  // Initialize.
6463  InitStatInfo(pStats->total);
6464  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
6465  InitStatInfo(pStats->memoryType[i]);
6466  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
6467  InitStatInfo(pStats->memoryHeap[i]);
6468 
6469  // Process default pools.
6470  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6471  {
6472  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
6473  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
6474  {
6475  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
6476  VMA_ASSERT(pBlockVector);
6477  pBlockVector->AddStats(pStats);
6478  }
6479  }
6480 
6481  // Process custom pools.
6482  {
6483  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6484  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
6485  {
6486  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
6487  }
6488  }
6489 
6490  // Process own allocations.
6491  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6492  {
6493  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
6494  VmaMutexLock ownAllocationsLock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6495  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
6496  {
6497  AllocationVectorType* const pOwnAllocVector = m_pOwnAllocations[memTypeIndex][blockVectorType];
6498  VMA_ASSERT(pOwnAllocVector);
6499  for(size_t allocIndex = 0, allocCount = pOwnAllocVector->size(); allocIndex < allocCount; ++allocIndex)
6500  {
6501  VmaStatInfo allocationStatInfo;
6502  (*pOwnAllocVector)[allocIndex]->OwnAllocCalcStatsInfo(allocationStatInfo);
6503  VmaAddStatInfo(pStats->total, allocationStatInfo);
6504  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
6505  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
6506  }
6507  }
6508  }
6509 
6510  // Postprocess.
6511  VmaPostprocessCalcStatInfo(pStats->total);
6512  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
6513  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
6514  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
6515  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
6516 }
6517 
6518 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
6519 
6520 void VmaAllocator_T::UnmapPersistentlyMappedMemory()
6521 {
6522  if(m_UnmapPersistentlyMappedMemoryCounter++ == 0)
6523  {
6524  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
6525  {
6526  for(uint32_t memTypeIndex = m_MemProps.memoryTypeCount; memTypeIndex--; )
6527  {
6528  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
6529  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
6530  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
6531  {
6532  // Process OwnAllocations.
6533  {
6534  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6535  AllocationVectorType* pOwnAllocationsVector = m_pOwnAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6536  for(size_t ownAllocIndex = pOwnAllocationsVector->size(); ownAllocIndex--; )
6537  {
6538  VmaAllocation hAlloc = (*pOwnAllocationsVector)[ownAllocIndex];
6539  hAlloc->OwnAllocUnmapPersistentlyMappedMemory(this);
6540  }
6541  }
6542 
6543  // Process normal Allocations.
6544  {
6545  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6546  pBlockVector->UnmapPersistentlyMappedMemory();
6547  }
6548  }
6549  }
6550 
6551  // Process custom pools.
6552  {
6553  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6554  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
6555  {
6556  m_Pools[poolIndex]->GetBlockVector().UnmapPersistentlyMappedMemory();
6557  }
6558  }
6559  }
6560  }
6561 }
6562 
6563 VkResult VmaAllocator_T::MapPersistentlyMappedMemory()
6564 {
6565  VMA_ASSERT(m_UnmapPersistentlyMappedMemoryCounter > 0);
6566  if(--m_UnmapPersistentlyMappedMemoryCounter == 0)
6567  {
6568  VkResult finalResult = VK_SUCCESS;
6569  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
6570  {
6571  // Process custom pools.
6572  {
6573  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6574  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
6575  {
6576  m_Pools[poolIndex]->GetBlockVector().MapPersistentlyMappedMemory();
6577  }
6578  }
6579 
6580  for(uint32_t memTypeIndex = 0; memTypeIndex < m_MemProps.memoryTypeCount; ++memTypeIndex)
6581  {
6582  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
6583  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
6584  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
6585  {
6586  // Process OwnAllocations.
6587  {
6588  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6589  AllocationVectorType* pAllocationsVector = m_pOwnAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6590  for(size_t ownAllocIndex = 0, ownAllocCount = pAllocationsVector->size(); ownAllocIndex < ownAllocCount; ++ownAllocIndex)
6591  {
6592  VmaAllocation hAlloc = (*pAllocationsVector)[ownAllocIndex];
6593  hAlloc->OwnAllocMapPersistentlyMappedMemory(this);
6594  }
6595  }
6596 
6597  // Process normal Allocations.
6598  {
6599  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6600  VkResult localResult = pBlockVector->MapPersistentlyMappedMemory();
6601  if(localResult != VK_SUCCESS)
6602  {
6603  finalResult = localResult;
6604  }
6605  }
6606  }
6607  }
6608  }
6609  return finalResult;
6610  }
6611  else
6612  return VK_SUCCESS;
6613 }
6614 
6615 VkResult VmaAllocator_T::Defragment(
6616  VmaAllocation* pAllocations,
6617  size_t allocationCount,
6618  VkBool32* pAllocationsChanged,
6619  const VmaDefragmentationInfo* pDefragmentationInfo,
6620  VmaDefragmentationStats* pDefragmentationStats)
6621 {
6622  if(pAllocationsChanged != VMA_NULL)
6623  {
6624  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
6625  }
6626  if(pDefragmentationStats != VMA_NULL)
6627  {
6628  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
6629  }
6630 
6631  if(m_UnmapPersistentlyMappedMemoryCounter > 0)
6632  {
6633  VMA_DEBUG_LOG("ERROR: Cannot defragment when inside vmaUnmapPersistentlyMappedMemory.");
6634  return VK_ERROR_MEMORY_MAP_FAILED;
6635  }
6636 
6637  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
6638 
6639  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
6640 
6641  const size_t poolCount = m_Pools.size();
6642 
6643  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
6644  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
6645  {
6646  VmaAllocation hAlloc = pAllocations[allocIndex];
6647  VMA_ASSERT(hAlloc);
6648  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
6649  // OwnAlloc cannot be defragmented.
6650  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
6651  // Only HOST_VISIBLE memory types can be defragmented.
6652  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
6653  // Lost allocation cannot be defragmented.
6654  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
6655  {
6656  VmaBlockVector* pAllocBlockVector = nullptr;
6657 
6658  const VmaPool hAllocPool = hAlloc->GetPool();
6659  // This allocation belongs to custom pool.
6660  if(hAllocPool != VK_NULL_HANDLE)
6661  {
6662  pAllocBlockVector = &hAllocPool->GetBlockVector();
6663  }
6664  // This allocation belongs to general pool.
6665  else
6666  {
6667  pAllocBlockVector = m_pBlockVectors[memTypeIndex][hAlloc->GetBlockVectorType()];
6668  }
6669 
6670  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
6671 
6672  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
6673  &pAllocationsChanged[allocIndex] : VMA_NULL;
6674  pDefragmentator->AddAllocation(hAlloc, pChanged);
6675  }
6676  }
6677 
6678  VkResult result = VK_SUCCESS;
6679 
6680  // ======== Main processing.
6681 
6682  VkDeviceSize maxBytesToMove = SIZE_MAX;
6683  uint32_t maxAllocationsToMove = UINT32_MAX;
6684  if(pDefragmentationInfo != VMA_NULL)
6685  {
6686  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
6687  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
6688  }
6689 
6690  // Process standard memory.
6691  for(uint32_t memTypeIndex = 0;
6692  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
6693  ++memTypeIndex)
6694  {
6695  // Only HOST_VISIBLE memory types can be defragmented.
6696  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
6697  {
6698  for(uint32_t blockVectorType = 0;
6699  (blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT) && (result == VK_SUCCESS);
6700  ++blockVectorType)
6701  {
6702  result = m_pBlockVectors[memTypeIndex][blockVectorType]->Defragment(
6703  pDefragmentationStats,
6704  maxBytesToMove,
6705  maxAllocationsToMove);
6706  }
6707  }
6708  }
6709 
6710  // Process custom pools.
6711  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
6712  {
6713  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
6714  pDefragmentationStats,
6715  maxBytesToMove,
6716  maxAllocationsToMove);
6717  }
6718 
6719  // ======== Destroy defragmentators.
6720 
6721  // Process custom pools.
6722  for(size_t poolIndex = poolCount; poolIndex--; )
6723  {
6724  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
6725  }
6726 
6727  // Process standard memory.
6728  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
6729  {
6730  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
6731  {
6732  for(size_t blockVectorType = VMA_BLOCK_VECTOR_TYPE_COUNT; blockVectorType--; )
6733  {
6734  m_pBlockVectors[memTypeIndex][blockVectorType]->DestroyDefragmentator();
6735  }
6736  }
6737  }
6738 
6739  return result;
6740 }
6741 
6742 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
6743 {
6744  if(hAllocation->CanBecomeLost())
6745  {
6746  /*
6747  Warning: This is a carefully designed algorithm.
6748  Do not modify unless you really know what you're doing :)
6749  */
6750  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
6751  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
6752  for(;;)
6753  {
6754  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
6755  {
6756  pAllocationInfo->memoryType = UINT32_MAX;
6757  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
6758  pAllocationInfo->offset = 0;
6759  pAllocationInfo->size = hAllocation->GetSize();
6760  pAllocationInfo->pMappedData = VMA_NULL;
6761  pAllocationInfo->pUserData = hAllocation->GetUserData();
6762  return;
6763  }
6764  else if(localLastUseFrameIndex == localCurrFrameIndex)
6765  {
6766  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
6767  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
6768  pAllocationInfo->offset = hAllocation->GetOffset();
6769  pAllocationInfo->size = hAllocation->GetSize();
6770  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
6771  pAllocationInfo->pUserData = hAllocation->GetUserData();
6772  return;
6773  }
6774  else // Last use time earlier than current time.
6775  {
6776  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
6777  {
6778  localLastUseFrameIndex = localCurrFrameIndex;
6779  }
6780  }
6781  }
6782  }
6783  // We could use the same code here, but for performance reasons we don't need to use the hAllocation.LastUseFrameIndex atomic.
6784  else
6785  {
6786  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
6787  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
6788  pAllocationInfo->offset = hAllocation->GetOffset();
6789  pAllocationInfo->size = hAllocation->GetSize();
6790  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
6791  pAllocationInfo->pUserData = hAllocation->GetUserData();
6792  }
6793 }
6794 
6795 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
6796 {
6797  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
6798 
6799  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
6800 
6801  if(newCreateInfo.maxBlockCount == 0)
6802  {
6803  newCreateInfo.maxBlockCount = SIZE_MAX;
6804  }
6805  if(newCreateInfo.blockSize == 0)
6806  {
6807  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
6808  }
6809 
6810  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
6811 
6812  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
6813  if(res != VK_SUCCESS)
6814  {
6815  vma_delete(this, *pPool);
6816  *pPool = VMA_NULL;
6817  return res;
6818  }
6819 
6820  // Add to m_Pools.
6821  {
6822  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6823  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
6824  }
6825 
6826  return VK_SUCCESS;
6827 }
6828 
6829 void VmaAllocator_T::DestroyPool(VmaPool pool)
6830 {
6831  // Remove from m_Pools.
6832  {
6833  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6834  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
6835  VMA_ASSERT(success && "Pool not found in Allocator.");
6836  }
6837 
6838  vma_delete(this, pool);
6839 }
6840 
6841 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
6842 {
6843  pool->m_BlockVector.GetPoolStats(pPoolStats);
6844 }
6845 
6846 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
6847 {
6848  m_CurrentFrameIndex.store(frameIndex);
6849 }
6850 
6851 void VmaAllocator_T::MakePoolAllocationsLost(
6852  VmaPool hPool,
6853  size_t* pLostAllocationCount)
6854 {
6855  hPool->m_BlockVector.MakePoolAllocationsLost(
6856  m_CurrentFrameIndex.load(),
6857  pLostAllocationCount);
6858 }
6859 
6860 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
6861 {
6862  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST);
6863  (*pAllocation)->InitLost();
6864 }
6865 
6866 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
6867 {
6868  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
6869 
6870  VkResult res;
6871  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
6872  {
6873  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
6874  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
6875  {
6876  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
6877  if(res == VK_SUCCESS)
6878  {
6879  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
6880  }
6881  }
6882  else
6883  {
6884  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
6885  }
6886  }
6887  else
6888  {
6889  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
6890  }
6891 
6892  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
6893  {
6894  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
6895  }
6896 
6897  return res;
6898 }
6899 
6900 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
6901 {
6902  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
6903  {
6904  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
6905  }
6906 
6907  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
6908 
6909  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
6910  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
6911  {
6912  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
6913  m_HeapSizeLimit[heapIndex] += size;
6914  }
6915 }
6916 
6917 void VmaAllocator_T::FreeOwnMemory(VmaAllocation allocation)
6918 {
6919  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_OWN);
6920 
6921  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
6922  {
6923  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6924  AllocationVectorType* const pOwnAllocations = m_pOwnAllocations[memTypeIndex][allocation->GetBlockVectorType()];
6925  VMA_ASSERT(pOwnAllocations);
6926  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pOwnAllocations, allocation);
6927  VMA_ASSERT(success);
6928  }
6929 
6930  VkDeviceMemory hMemory = allocation->GetMemory();
6931 
6932  if(allocation->GetMappedData() != VMA_NULL)
6933  {
6934  vkUnmapMemory(m_hDevice, hMemory);
6935  }
6936 
6937  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
6938 
6939  VMA_DEBUG_LOG(" Freed OwnMemory MemoryTypeIndex=%u", memTypeIndex);
6940 }
6941 
6942 #if VMA_STATS_STRING_ENABLED
6943 
6944 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
6945 {
6946  bool ownAllocationsStarted = false;
6947  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6948  {
6949  VmaMutexLock ownAllocationsLock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6950  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
6951  {
6952  AllocationVectorType* const pOwnAllocVector = m_pOwnAllocations[memTypeIndex][blockVectorType];
6953  VMA_ASSERT(pOwnAllocVector);
6954  if(pOwnAllocVector->empty() == false)
6955  {
6956  if(ownAllocationsStarted == false)
6957  {
6958  ownAllocationsStarted = true;
6959  json.WriteString("OwnAllocations");
6960  json.BeginObject();
6961  }
6962 
6963  json.BeginString("Type ");
6964  json.ContinueString(memTypeIndex);
6965  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
6966  {
6967  json.ContinueString(" Mapped");
6968  }
6969  json.EndString();
6970 
6971  json.BeginArray();
6972 
6973  for(size_t i = 0; i < pOwnAllocVector->size(); ++i)
6974  {
6975  const VmaAllocation hAlloc = (*pOwnAllocVector)[i];
6976  json.BeginObject(true);
6977 
6978  json.WriteString("Size");
6979  json.WriteNumber(hAlloc->GetSize());
6980 
6981  json.WriteString("Type");
6982  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
6983 
6984  json.EndObject();
6985  }
6986 
6987  json.EndArray();
6988  }
6989  }
6990  }
6991  if(ownAllocationsStarted)
6992  {
6993  json.EndObject();
6994  }
6995 
6996  {
6997  bool allocationsStarted = false;
6998  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6999  {
7000  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
7001  {
7002  if(m_pBlockVectors[memTypeIndex][blockVectorType]->IsEmpty() == false)
7003  {
7004  if(allocationsStarted == false)
7005  {
7006  allocationsStarted = true;
7007  json.WriteString("DefaultPools");
7008  json.BeginObject();
7009  }
7010 
7011  json.BeginString("Type ");
7012  json.ContinueString(memTypeIndex);
7013  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
7014  {
7015  json.ContinueString(" Mapped");
7016  }
7017  json.EndString();
7018 
7019  m_pBlockVectors[memTypeIndex][blockVectorType]->PrintDetailedMap(json);
7020  }
7021  }
7022  }
7023  if(allocationsStarted)
7024  {
7025  json.EndObject();
7026  }
7027  }
7028 
7029  {
7030  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7031  const size_t poolCount = m_Pools.size();
7032  if(poolCount > 0)
7033  {
7034  json.WriteString("Pools");
7035  json.BeginArray();
7036  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
7037  {
7038  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
7039  }
7040  json.EndArray();
7041  }
7042  }
7043 }
7044 
7045 #endif // #if VMA_STATS_STRING_ENABLED
7046 
7047 static VkResult AllocateMemoryForImage(
7048  VmaAllocator allocator,
7049  VkImage image,
7050  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7051  VmaSuballocationType suballocType,
7052  VmaAllocation* pAllocation)
7053 {
7054  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
7055 
7056  VkMemoryRequirements vkMemReq = {};
7057  (*allocator->GetVulkanFunctions().vkGetImageMemoryRequirements)(allocator->m_hDevice, image, &vkMemReq);
7058 
7059  return allocator->AllocateMemory(
7060  vkMemReq,
7061  *pAllocationCreateInfo,
7062  suballocType,
7063  pAllocation);
7064 }
7065 
7067 // Public interface
7068 
7069 VkResult vmaCreateAllocator(
7070  const VmaAllocatorCreateInfo* pCreateInfo,
7071  VmaAllocator* pAllocator)
7072 {
7073  VMA_ASSERT(pCreateInfo && pAllocator);
7074  VMA_DEBUG_LOG("vmaCreateAllocator");
7075  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
7076  return VK_SUCCESS;
7077 }
7078 
7079 void vmaDestroyAllocator(
7080  VmaAllocator allocator)
7081 {
7082  if(allocator != VK_NULL_HANDLE)
7083  {
7084  VMA_DEBUG_LOG("vmaDestroyAllocator");
7085  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
7086  vma_delete(&allocationCallbacks, allocator);
7087  }
7088 }
7089 
7091  VmaAllocator allocator,
7092  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
7093 {
7094  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
7095  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
7096 }
7097 
7099  VmaAllocator allocator,
7100  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
7101 {
7102  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
7103  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
7104 }
7105 
7107  VmaAllocator allocator,
7108  uint32_t memoryTypeIndex,
7109  VkMemoryPropertyFlags* pFlags)
7110 {
7111  VMA_ASSERT(allocator && pFlags);
7112  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
7113  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
7114 }
7115 
7117  VmaAllocator allocator,
7118  uint32_t frameIndex)
7119 {
7120  VMA_ASSERT(allocator);
7121  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
7122 
7123  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7124 
7125  allocator->SetCurrentFrameIndex(frameIndex);
7126 }
7127 
7128 void vmaCalculateStats(
7129  VmaAllocator allocator,
7130  VmaStats* pStats)
7131 {
7132  VMA_ASSERT(allocator && pStats);
7133  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7134  allocator->CalculateStats(pStats);
7135 }
7136 
7137 #if VMA_STATS_STRING_ENABLED
7138 
7139 void vmaBuildStatsString(
7140  VmaAllocator allocator,
7141  char** ppStatsString,
7142  VkBool32 detailedMap)
7143 {
7144  VMA_ASSERT(allocator && ppStatsString);
7145  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7146 
7147  VmaStringBuilder sb(allocator);
7148  {
7149  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
7150  json.BeginObject();
7151 
7152  VmaStats stats;
7153  allocator->CalculateStats(&stats);
7154 
7155  json.WriteString("Total");
7156  VmaPrintStatInfo(json, stats.total);
7157 
7158  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
7159  {
7160  json.BeginString("Heap ");
7161  json.ContinueString(heapIndex);
7162  json.EndString();
7163  json.BeginObject();
7164 
7165  json.WriteString("Size");
7166  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
7167 
7168  json.WriteString("Flags");
7169  json.BeginArray(true);
7170  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
7171  {
7172  json.WriteString("DEVICE_LOCAL");
7173  }
7174  json.EndArray();
7175 
7176  if(stats.memoryHeap[heapIndex].BlockCount > 0)
7177  {
7178  json.WriteString("Stats");
7179  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
7180  }
7181 
7182  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
7183  {
7184  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
7185  {
7186  json.BeginString("Type ");
7187  json.ContinueString(typeIndex);
7188  json.EndString();
7189 
7190  json.BeginObject();
7191 
7192  json.WriteString("Flags");
7193  json.BeginArray(true);
7194  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
7195  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
7196  {
7197  json.WriteString("DEVICE_LOCAL");
7198  }
7199  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7200  {
7201  json.WriteString("HOST_VISIBLE");
7202  }
7203  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
7204  {
7205  json.WriteString("HOST_COHERENT");
7206  }
7207  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
7208  {
7209  json.WriteString("HOST_CACHED");
7210  }
7211  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
7212  {
7213  json.WriteString("LAZILY_ALLOCATED");
7214  }
7215  json.EndArray();
7216 
7217  if(stats.memoryType[typeIndex].BlockCount > 0)
7218  {
7219  json.WriteString("Stats");
7220  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
7221  }
7222 
7223  json.EndObject();
7224  }
7225  }
7226 
7227  json.EndObject();
7228  }
7229  if(detailedMap == VK_TRUE)
7230  {
7231  allocator->PrintDetailedMap(json);
7232  }
7233 
7234  json.EndObject();
7235  }
7236 
7237  const size_t len = sb.GetLength();
7238  char* const pChars = vma_new_array(allocator, char, len + 1);
7239  if(len > 0)
7240  {
7241  memcpy(pChars, sb.GetData(), len);
7242  }
7243  pChars[len] = '\0';
7244  *ppStatsString = pChars;
7245 }
7246 
7247 void vmaFreeStatsString(
7248  VmaAllocator allocator,
7249  char* pStatsString)
7250 {
7251  if(pStatsString != VMA_NULL)
7252  {
7253  VMA_ASSERT(allocator);
7254  size_t len = strlen(pStatsString);
7255  vma_delete_array(allocator, pStatsString, len + 1);
7256  }
7257 }
7258 
7259 #endif // #if VMA_STATS_STRING_ENABLED
7260 
7263 VkResult vmaFindMemoryTypeIndex(
7264  VmaAllocator allocator,
7265  uint32_t memoryTypeBits,
7266  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7267  uint32_t* pMemoryTypeIndex)
7268 {
7269  VMA_ASSERT(allocator != VK_NULL_HANDLE);
7270  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
7271  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
7272 
7273  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
7274  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
7275  if(preferredFlags == 0)
7276  {
7277  preferredFlags = requiredFlags;
7278  }
7279  // preferredFlags, if not 0, must be a superset of requiredFlags.
7280  VMA_ASSERT((requiredFlags & ~preferredFlags) == 0);
7281 
7282  // Convert usage to requiredFlags and preferredFlags.
7283  switch(pAllocationCreateInfo->usage)
7284  {
7286  break;
7288  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
7289  break;
7291  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
7292  break;
7294  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7295  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
7296  break;
7298  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7299  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
7300  break;
7301  default:
7302  break;
7303  }
7304 
7305  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0)
7306  {
7307  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7308  }
7309 
7310  *pMemoryTypeIndex = UINT32_MAX;
7311  uint32_t minCost = UINT32_MAX;
7312  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
7313  memTypeIndex < allocator->GetMemoryTypeCount();
7314  ++memTypeIndex, memTypeBit <<= 1)
7315  {
7316  // This memory type is acceptable according to memoryTypeBits bitmask.
7317  if((memTypeBit & memoryTypeBits) != 0)
7318  {
7319  const VkMemoryPropertyFlags currFlags =
7320  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
7321  // This memory type contains requiredFlags.
7322  if((requiredFlags & ~currFlags) == 0)
7323  {
7324  // Calculate cost as number of bits from preferredFlags not present in this memory type.
7325  uint32_t currCost = CountBitsSet(preferredFlags & ~currFlags);
7326  // Remember memory type with lowest cost.
7327  if(currCost < minCost)
7328  {
7329  *pMemoryTypeIndex = memTypeIndex;
7330  if(currCost == 0)
7331  {
7332  return VK_SUCCESS;
7333  }
7334  minCost = currCost;
7335  }
7336  }
7337  }
7338  }
7339  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
7340 }
7341 
7342 VkResult vmaCreatePool(
7343  VmaAllocator allocator,
7344  const VmaPoolCreateInfo* pCreateInfo,
7345  VmaPool* pPool)
7346 {
7347  VMA_ASSERT(allocator && pCreateInfo && pPool);
7348 
7349  VMA_DEBUG_LOG("vmaCreatePool");
7350 
7351  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7352 
7353  return allocator->CreatePool(pCreateInfo, pPool);
7354 }
7355 
7356 void vmaDestroyPool(
7357  VmaAllocator allocator,
7358  VmaPool pool)
7359 {
7360  VMA_ASSERT(allocator && pool);
7361 
7362  VMA_DEBUG_LOG("vmaDestroyPool");
7363 
7364  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7365 
7366  allocator->DestroyPool(pool);
7367 }
7368 
7369 void vmaGetPoolStats(
7370  VmaAllocator allocator,
7371  VmaPool pool,
7372  VmaPoolStats* pPoolStats)
7373 {
7374  VMA_ASSERT(allocator && pool && pPoolStats);
7375 
7376  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7377 
7378  allocator->GetPoolStats(pool, pPoolStats);
7379 }
7380 
7382  VmaAllocator allocator,
7383  VmaPool pool,
7384  size_t* pLostAllocationCount)
7385 {
7386  VMA_ASSERT(allocator && pool);
7387 
7388  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7389 
7390  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
7391 }
7392 
7393 VkResult vmaAllocateMemory(
7394  VmaAllocator allocator,
7395  const VkMemoryRequirements* pVkMemoryRequirements,
7396  const VmaAllocationCreateInfo* pCreateInfo,
7397  VmaAllocation* pAllocation,
7398  VmaAllocationInfo* pAllocationInfo)
7399 {
7400  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
7401 
7402  VMA_DEBUG_LOG("vmaAllocateMemory");
7403 
7404  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7405 
7406  VkResult result = allocator->AllocateMemory(
7407  *pVkMemoryRequirements,
7408  *pCreateInfo,
7409  VMA_SUBALLOCATION_TYPE_UNKNOWN,
7410  pAllocation);
7411 
7412  if(pAllocationInfo && result == VK_SUCCESS)
7413  {
7414  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7415  }
7416 
7417  return result;
7418 }
7419 
7421  VmaAllocator allocator,
7422  VkBuffer buffer,
7423  const VmaAllocationCreateInfo* pCreateInfo,
7424  VmaAllocation* pAllocation,
7425  VmaAllocationInfo* pAllocationInfo)
7426 {
7427  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
7428 
7429  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
7430 
7431  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7432 
7433  VkMemoryRequirements vkMemReq = {};
7434  (*allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements)(allocator->m_hDevice, buffer, &vkMemReq);
7435 
7436  VkResult result = allocator->AllocateMemory(
7437  vkMemReq,
7438  *pCreateInfo,
7439  VMA_SUBALLOCATION_TYPE_BUFFER,
7440  pAllocation);
7441 
7442  if(pAllocationInfo && result == VK_SUCCESS)
7443  {
7444  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7445  }
7446 
7447  return result;
7448 }
7449 
7450 VkResult vmaAllocateMemoryForImage(
7451  VmaAllocator allocator,
7452  VkImage image,
7453  const VmaAllocationCreateInfo* pCreateInfo,
7454  VmaAllocation* pAllocation,
7455  VmaAllocationInfo* pAllocationInfo)
7456 {
7457  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
7458 
7459  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
7460 
7461  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7462 
7463  VkResult result = AllocateMemoryForImage(
7464  allocator,
7465  image,
7466  pCreateInfo,
7467  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
7468  pAllocation);
7469 
7470  if(pAllocationInfo && result == VK_SUCCESS)
7471  {
7472  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7473  }
7474 
7475  return result;
7476 }
7477 
7478 void vmaFreeMemory(
7479  VmaAllocator allocator,
7480  VmaAllocation allocation)
7481 {
7482  VMA_ASSERT(allocator && allocation);
7483 
7484  VMA_DEBUG_LOG("vmaFreeMemory");
7485 
7486  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7487 
7488  allocator->FreeMemory(allocation);
7489 }
7490 
7492  VmaAllocator allocator,
7493  VmaAllocation allocation,
7494  VmaAllocationInfo* pAllocationInfo)
7495 {
7496  VMA_ASSERT(allocator && allocation && pAllocationInfo);
7497 
7498  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7499 
7500  allocator->GetAllocationInfo(allocation, pAllocationInfo);
7501 }
7502 
7504  VmaAllocator allocator,
7505  VmaAllocation allocation,
7506  void* pUserData)
7507 {
7508  VMA_ASSERT(allocator && allocation);
7509 
7510  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7511 
7512  allocation->SetUserData(pUserData);
7513 }
7514 
7516  VmaAllocator allocator,
7517  VmaAllocation* pAllocation)
7518 {
7519  VMA_ASSERT(allocator && pAllocation);
7520 
7521  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
7522 
7523  allocator->CreateLostAllocation(pAllocation);
7524 }
7525 
7526 VkResult vmaMapMemory(
7527  VmaAllocator allocator,
7528  VmaAllocation allocation,
7529  void** ppData)
7530 {
7531  VMA_ASSERT(allocator && allocation && ppData);
7532 
7533  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7534 
7535  return vkMapMemory(allocator->m_hDevice, allocation->GetMemory(),
7536  allocation->GetOffset(), allocation->GetSize(), 0, ppData);
7537 }
7538 
7539 void vmaUnmapMemory(
7540  VmaAllocator allocator,
7541  VmaAllocation allocation)
7542 {
7543  VMA_ASSERT(allocator && allocation);
7544 
7545  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7546 
7547  vkUnmapMemory(allocator->m_hDevice, allocation->GetMemory());
7548 }
7549 
7550 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator)
7551 {
7552  VMA_ASSERT(allocator);
7553 
7554  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7555 
7556  allocator->UnmapPersistentlyMappedMemory();
7557 }
7558 
7559 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator)
7560 {
7561  VMA_ASSERT(allocator);
7562 
7563  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7564 
7565  return allocator->MapPersistentlyMappedMemory();
7566 }
7567 
7568 VkResult vmaDefragment(
7569  VmaAllocator allocator,
7570  VmaAllocation* pAllocations,
7571  size_t allocationCount,
7572  VkBool32* pAllocationsChanged,
7573  const VmaDefragmentationInfo *pDefragmentationInfo,
7574  VmaDefragmentationStats* pDefragmentationStats)
7575 {
7576  VMA_ASSERT(allocator && pAllocations);
7577 
7578  VMA_DEBUG_LOG("vmaDefragment");
7579 
7580  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7581 
7582  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
7583 }
7584 
7585 VkResult vmaCreateBuffer(
7586  VmaAllocator allocator,
7587  const VkBufferCreateInfo* pBufferCreateInfo,
7588  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7589  VkBuffer* pBuffer,
7590  VmaAllocation* pAllocation,
7591  VmaAllocationInfo* pAllocationInfo)
7592 {
7593  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
7594 
7595  VMA_DEBUG_LOG("vmaCreateBuffer");
7596 
7597  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7598 
7599  *pBuffer = VK_NULL_HANDLE;
7600  *pAllocation = VK_NULL_HANDLE;
7601 
7602  // 1. Create VkBuffer.
7603  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
7604  allocator->m_hDevice,
7605  pBufferCreateInfo,
7606  allocator->GetAllocationCallbacks(),
7607  pBuffer);
7608  if(res >= 0)
7609  {
7610  // 2. vkGetBufferMemoryRequirements.
7611  VkMemoryRequirements vkMemReq = {};
7612  (*allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements)(allocator->m_hDevice, *pBuffer, &vkMemReq);
7613 
7614  // 3. Allocate memory using allocator.
7615  res = allocator->AllocateMemory(
7616  vkMemReq,
7617  *pAllocationCreateInfo,
7618  VMA_SUBALLOCATION_TYPE_BUFFER,
7619  pAllocation);
7620  if(res >= 0)
7621  {
7622  // 3. Bind buffer with memory.
7623  res = (*allocator->GetVulkanFunctions().vkBindBufferMemory)(
7624  allocator->m_hDevice,
7625  *pBuffer,
7626  (*pAllocation)->GetMemory(),
7627  (*pAllocation)->GetOffset());
7628  if(res >= 0)
7629  {
7630  // All steps succeeded.
7631  if(pAllocationInfo != VMA_NULL)
7632  {
7633  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7634  }
7635  return VK_SUCCESS;
7636  }
7637  allocator->FreeMemory(*pAllocation);
7638  *pAllocation = VK_NULL_HANDLE;
7639  return res;
7640  }
7641  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
7642  *pBuffer = VK_NULL_HANDLE;
7643  return res;
7644  }
7645  return res;
7646 }
7647 
7648 void vmaDestroyBuffer(
7649  VmaAllocator allocator,
7650  VkBuffer buffer,
7651  VmaAllocation allocation)
7652 {
7653  if(buffer != VK_NULL_HANDLE)
7654  {
7655  VMA_ASSERT(allocator);
7656 
7657  VMA_DEBUG_LOG("vmaDestroyBuffer");
7658 
7659  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7660 
7661  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
7662 
7663  allocator->FreeMemory(allocation);
7664  }
7665 }
7666 
7667 VkResult vmaCreateImage(
7668  VmaAllocator allocator,
7669  const VkImageCreateInfo* pImageCreateInfo,
7670  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7671  VkImage* pImage,
7672  VmaAllocation* pAllocation,
7673  VmaAllocationInfo* pAllocationInfo)
7674 {
7675  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
7676 
7677  VMA_DEBUG_LOG("vmaCreateImage");
7678 
7679  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7680 
7681  *pImage = VK_NULL_HANDLE;
7682  *pAllocation = VK_NULL_HANDLE;
7683 
7684  // 1. Create VkImage.
7685  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
7686  allocator->m_hDevice,
7687  pImageCreateInfo,
7688  allocator->GetAllocationCallbacks(),
7689  pImage);
7690  if(res >= 0)
7691  {
7692  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
7693  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
7694  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
7695 
7696  // 2. Allocate memory using allocator.
7697  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
7698  if(res >= 0)
7699  {
7700  // 3. Bind image with memory.
7701  res = (*allocator->GetVulkanFunctions().vkBindImageMemory)(
7702  allocator->m_hDevice,
7703  *pImage,
7704  (*pAllocation)->GetMemory(),
7705  (*pAllocation)->GetOffset());
7706  if(res >= 0)
7707  {
7708  // All steps succeeded.
7709  if(pAllocationInfo != VMA_NULL)
7710  {
7711  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7712  }
7713  return VK_SUCCESS;
7714  }
7715  allocator->FreeMemory(*pAllocation);
7716  *pAllocation = VK_NULL_HANDLE;
7717  return res;
7718  }
7719  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
7720  *pImage = VK_NULL_HANDLE;
7721  return res;
7722  }
7723  return res;
7724 }
7725 
7726 void vmaDestroyImage(
7727  VmaAllocator allocator,
7728  VkImage image,
7729  VmaAllocation allocation)
7730 {
7731  if(image != VK_NULL_HANDLE)
7732  {
7733  VMA_ASSERT(allocator);
7734 
7735  VMA_DEBUG_LOG("vmaDestroyImage");
7736 
7737  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7738 
7739  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
7740 
7741  allocator->FreeMemory(allocation);
7742  }
7743 }
7744 
7745 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:476
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:499
Definition: vk_mem_alloc.h:828
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
uint32_t BlockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:612
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:486
Memory will be used for frequent writing on device and readback on host (download).
Definition: vk_mem_alloc.h:679
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:480
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:949
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:1099
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation.
void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator)
Unmaps persistently mapped memory of types that are HOST_COHERENT and DEVICE_LOCAL.
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:880
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:728
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:761
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:445
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks.
Definition: vk_mem_alloc.h:511
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:830
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:558
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:493
VkDeviceSize preferredSmallHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from small heaps <= 512 MB...
Definition: vk_mem_alloc.h:508
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:483
VkFlags VmaAllocatorFlags
Definition: vk_mem_alloc.h:473
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:1103
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:528
VmaStatInfo total
Definition: vk_mem_alloc.h:630
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:1111
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:744
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1094
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:484
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:502
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:834
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:959
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:481
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:763
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:850
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:886
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:837
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
struct VmaVulkanFunctions VmaVulkanFunctions
Definition: vk_mem_alloc.h:737
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:1089
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VkDeviceSize AllocationSizeMax
Definition: vk_mem_alloc.h:621
Definition: vk_mem_alloc.h:808
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:1107
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:482
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:626
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:717
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:1109
VmaMemoryUsage
Definition: vk_mem_alloc.h:665
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:755
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:469
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
VmaAllocatorFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:464
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:477
Definition: vk_mem_alloc.h:609
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
Definition: vk_mem_alloc.h:845
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:456
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:460
VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator)
Maps back persistently mapped memory of types that are HOST_COHERENT and DEVICE_LOCAL.
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:840
VkDeviceSize UnusedRangeSizeMax
Definition: vk_mem_alloc.h:622
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:439
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:750
Definition: vk_mem_alloc.h:741
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:479
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool.
Definition: vk_mem_alloc.h:858
VkDeviceSize AllocationSizeMin
Definition: vk_mem_alloc.h:621
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory.
Definition: vk_mem_alloc.h:514
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:889
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:768
const VkDeviceSize * pHeapSizeLimit
Either NULL or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:546
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:628
VkDeviceSize AllocationSizeAvg
Definition: vk_mem_alloc.h:621
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:488
uint32_t AllocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:614
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:458
Definition: vk_mem_alloc.h:735
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:487
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:872
VmaAllocatorFlags flags
Flags for created allocator. Use VmaAllocatorFlagBits enum.
Definition: vk_mem_alloc.h:496
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
VkDeviceSize UsedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:618
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:970
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:696
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps. ...
Definition: vk_mem_alloc.h:505
uint32_t UnusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:616
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:877
Memory will be mapped on host. Could be used for transfer to/from device.
Definition: vk_mem_alloc.h:673
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
struct VmaStats VmaStats
General statistics from current state of Allocator.
VkDeviceSize UnusedRangeSizeAvg
Definition: vk_mem_alloc.h:622
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:954
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1105
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
Definition: vk_mem_alloc.h:475
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:739
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:485
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:489
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:799
void * pMappedData
Pointer to the beginning of this allocation as mapped data. Null if this alloaction is not persistent...
Definition: vk_mem_alloc.h:965
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
No intended memory usage specified.
Definition: vk_mem_alloc.h:668
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:478
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
Definition: vk_mem_alloc.h:680
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:935
Memory will be used for frequent (dynamic) updates from host and reads on device (upload).
Definition: vk_mem_alloc.h:676
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:684
Definition: vk_mem_alloc.h:471
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:707
Memory will be used on device only, so faster access from the device is preferred. No need to be mappable on host.
Definition: vk_mem_alloc.h:670
struct VmaStatInfo VmaStatInfo
VkDeviceSize UnusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:620
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:629
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:883
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:826
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
VkDeviceSize UnusedRangeSizeMin
Definition: vk_mem_alloc.h:622
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:940
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.