Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
387 #include <vulkan/vulkan.h>
388 
390 
394 VK_DEFINE_HANDLE(VmaAllocator)
395 
396 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
398  VmaAllocator allocator,
399  uint32_t memoryType,
400  VkDeviceMemory memory,
401  VkDeviceSize size);
403 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
404  VmaAllocator allocator,
405  uint32_t memoryType,
406  VkDeviceMemory memory,
407  VkDeviceSize size);
408 
414 typedef struct VmaDeviceMemoryCallbacks {
420 
422 typedef enum VmaAllocatorFlagBits {
428 
431 typedef VkFlags VmaAllocatorFlags;
432 
433 typedef struct VmaVulkanFunctions {
434  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
435  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
436  PFN_vkAllocateMemory vkAllocateMemory;
437  PFN_vkFreeMemory vkFreeMemory;
438  PFN_vkMapMemory vkMapMemory;
439  PFN_vkUnmapMemory vkUnmapMemory;
440  PFN_vkBindBufferMemory vkBindBufferMemory;
441  PFN_vkBindImageMemory vkBindImageMemory;
442  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
443  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
444  PFN_vkCreateBuffer vkCreateBuffer;
445  PFN_vkDestroyBuffer vkDestroyBuffer;
446  PFN_vkCreateImage vkCreateImage;
447  PFN_vkDestroyImage vkDestroyImage;
449 
452 {
456 
457  VkPhysicalDevice physicalDevice;
459 
460  VkDevice device;
462 
465 
468 
469  const VkAllocationCallbacks* pAllocationCallbacks;
471 
486  uint32_t frameInUseCount;
504  const VkDeviceSize* pHeapSizeLimit;
518 
520 VkResult vmaCreateAllocator(
521  const VmaAllocatorCreateInfo* pCreateInfo,
522  VmaAllocator* pAllocator);
523 
526  VmaAllocator allocator);
527 
533  VmaAllocator allocator,
534  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
535 
541  VmaAllocator allocator,
542  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
543 
551  VmaAllocator allocator,
552  uint32_t memoryTypeIndex,
553  VkMemoryPropertyFlags* pFlags);
554 
564  VmaAllocator allocator,
565  uint32_t frameIndex);
566 
567 typedef struct VmaStatInfo
568 {
570  uint32_t BlockCount;
572  uint32_t AllocationCount;
576  VkDeviceSize UsedBytes;
578  VkDeviceSize UnusedBytes;
579  VkDeviceSize AllocationSizeMin, AllocationSizeAvg, AllocationSizeMax;
580  VkDeviceSize UnusedRangeSizeMin, UnusedRangeSizeAvg, UnusedRangeSizeMax;
581 } VmaStatInfo;
582 
584 typedef struct VmaStats
585 {
586  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
587  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
589 } VmaStats;
590 
592 void vmaCalculateStats(
593  VmaAllocator allocator,
594  VmaStats* pStats);
595 
596 #define VMA_STATS_STRING_ENABLED 1
597 
598 #if VMA_STATS_STRING_ENABLED
599 
601 
604  VmaAllocator allocator,
605  char** ppStatsString,
606  VkBool32 detailedMap);
607 
608 void vmaFreeStatsString(
609  VmaAllocator allocator,
610  char* pStatsString);
611 
612 #endif // #if VMA_STATS_STRING_ENABLED
613 
616 
621 VK_DEFINE_HANDLE(VmaPool)
622 
623 typedef enum VmaMemoryUsage
624 {
630 
633 
636 
640 
655 
694 
697 typedef VkFlags VmaAllocationCreateFlags;
698 
700 {
713  VkMemoryPropertyFlags requiredFlags;
719  VkMemoryPropertyFlags preferredFlags;
721  void* pUserData;
726  VmaPool pool;
728 
743 VkResult vmaFindMemoryTypeIndex(
744  VmaAllocator allocator,
745  uint32_t memoryTypeBits,
746  const VmaAllocationCreateInfo* pAllocationCreateInfo,
747  uint32_t* pMemoryTypeIndex);
748 
751 
756 typedef enum VmaPoolCreateFlagBits {
785 
788 typedef VkFlags VmaPoolCreateFlags;
789 
792 typedef struct VmaPoolCreateInfo {
795  uint32_t memoryTypeIndex;
803  VkDeviceSize blockSize;
830  uint32_t frameInUseCount;
832 
835 typedef struct VmaPoolStats {
838  VkDeviceSize size;
841  VkDeviceSize unusedSize;
848 } VmaPoolStats;
849 
856 VkResult vmaCreatePool(
857  VmaAllocator allocator,
858  const VmaPoolCreateInfo* pCreateInfo,
859  VmaPool* pPool);
860 
863 void vmaDestroyPool(
864  VmaAllocator allocator,
865  VmaPool pool);
866 
873 void vmaGetPoolStats(
874  VmaAllocator allocator,
875  VmaPool pool,
876  VmaPoolStats* pPoolStats);
877 
885  VmaAllocator allocator,
886  VmaPool pool,
887  size_t* pLostAllocationCount);
888 
889 VK_DEFINE_HANDLE(VmaAllocation)
890 
891 
893 typedef struct VmaAllocationInfo {
898  uint32_t memoryType;
907  VkDeviceMemory deviceMemory;
912  VkDeviceSize offset;
917  VkDeviceSize size;
923  void* pMappedData;
928  void* pUserData;
930 
941 VkResult vmaAllocateMemory(
942  VmaAllocator allocator,
943  const VkMemoryRequirements* pVkMemoryRequirements,
944  const VmaAllocationCreateInfo* pCreateInfo,
945  VmaAllocation* pAllocation,
946  VmaAllocationInfo* pAllocationInfo);
947 
955  VmaAllocator allocator,
956  VkBuffer buffer,
957  const VmaAllocationCreateInfo* pCreateInfo,
958  VmaAllocation* pAllocation,
959  VmaAllocationInfo* pAllocationInfo);
960 
963  VmaAllocator allocator,
964  VkImage image,
965  const VmaAllocationCreateInfo* pCreateInfo,
966  VmaAllocation* pAllocation,
967  VmaAllocationInfo* pAllocationInfo);
968 
970 void vmaFreeMemory(
971  VmaAllocator allocator,
972  VmaAllocation allocation);
973 
976  VmaAllocator allocator,
977  VmaAllocation allocation,
978  VmaAllocationInfo* pAllocationInfo);
979 
982  VmaAllocator allocator,
983  VmaAllocation allocation,
984  void* pUserData);
985 
997  VmaAllocator allocator,
998  VmaAllocation* pAllocation);
999 
1008 VkResult vmaMapMemory(
1009  VmaAllocator allocator,
1010  VmaAllocation allocation,
1011  void** ppData);
1012 
1013 void vmaUnmapMemory(
1014  VmaAllocator allocator,
1015  VmaAllocation allocation);
1016 
1038 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator);
1039 
1047 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator);
1048 
1050 typedef struct VmaDefragmentationInfo {
1055  VkDeviceSize maxBytesToMove;
1062 
1064 typedef struct VmaDefragmentationStats {
1066  VkDeviceSize bytesMoved;
1068  VkDeviceSize bytesFreed;
1074 
1145 VkResult vmaDefragment(
1146  VmaAllocator allocator,
1147  VmaAllocation* pAllocations,
1148  size_t allocationCount,
1149  VkBool32* pAllocationsChanged,
1150  const VmaDefragmentationInfo *pDefragmentationInfo,
1151  VmaDefragmentationStats* pDefragmentationStats);
1152 
1155 
1178 VkResult vmaCreateBuffer(
1179  VmaAllocator allocator,
1180  const VkBufferCreateInfo* pBufferCreateInfo,
1181  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1182  VkBuffer* pBuffer,
1183  VmaAllocation* pAllocation,
1184  VmaAllocationInfo* pAllocationInfo);
1185 
1194 void vmaDestroyBuffer(
1195  VmaAllocator allocator,
1196  VkBuffer buffer,
1197  VmaAllocation allocation);
1198 
1200 VkResult vmaCreateImage(
1201  VmaAllocator allocator,
1202  const VkImageCreateInfo* pImageCreateInfo,
1203  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1204  VkImage* pImage,
1205  VmaAllocation* pAllocation,
1206  VmaAllocationInfo* pAllocationInfo);
1207 
1216 void vmaDestroyImage(
1217  VmaAllocator allocator,
1218  VkImage image,
1219  VmaAllocation allocation);
1220 
1223 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
1224 
1225 // For Visual Studio IntelliSense.
1226 #ifdef __INTELLISENSE__
1227 #define VMA_IMPLEMENTATION
1228 #endif
1229 
1230 #ifdef VMA_IMPLEMENTATION
1231 #undef VMA_IMPLEMENTATION
1232 
1233 #include <cstdint>
1234 #include <cstdlib>
1235 #include <cstring>
1236 
1237 /*******************************************************************************
1238 CONFIGURATION SECTION
1239 
1240 Define some of these macros before each #include of this header or change them
1241 here if you need other then default behavior depending on your environment.
1242 */
1243 
1244 /*
1245 Define this macro to 1 to make the library fetch pointers to Vulkan functions
1246 internally, like:
1247 
1248  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
1249 
1250 Define to 0 if you are going to provide you own pointers to Vulkan functions via
1251 VmaAllocatorCreateInfo::pVulkanFunctions.
1252 */
1253 #ifndef VMA_STATIC_VULKAN_FUNCTIONS
1254 #define VMA_STATIC_VULKAN_FUNCTIONS 1
1255 #endif
1256 
1257 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
1258 //#define VMA_USE_STL_CONTAINERS 1
1259 
1260 /* Set this macro to 1 to make the library including and using STL containers:
1261 std::pair, std::vector, std::list, std::unordered_map.
1262 
1263 Set it to 0 or undefined to make the library using its own implementation of
1264 the containers.
1265 */
1266 #if VMA_USE_STL_CONTAINERS
1267  #define VMA_USE_STL_VECTOR 1
1268  #define VMA_USE_STL_UNORDERED_MAP 1
1269  #define VMA_USE_STL_LIST 1
1270 #endif
1271 
1272 #if VMA_USE_STL_VECTOR
1273  #include <vector>
1274 #endif
1275 
1276 #if VMA_USE_STL_UNORDERED_MAP
1277  #include <unordered_map>
1278 #endif
1279 
1280 #if VMA_USE_STL_LIST
1281  #include <list>
1282 #endif
1283 
1284 /*
1285 Following headers are used in this CONFIGURATION section only, so feel free to
1286 remove them if not needed.
1287 */
1288 #include <cassert> // for assert
1289 #include <algorithm> // for min, max
1290 #include <mutex> // for std::mutex
1291 #include <atomic> // for std::atomic
1292 
1293 #if !defined(_WIN32)
1294  #include <malloc.h> // for aligned_alloc()
1295 #endif
1296 
1297 // Normal assert to check for programmer's errors, especially in Debug configuration.
1298 #ifndef VMA_ASSERT
1299  #ifdef _DEBUG
1300  #define VMA_ASSERT(expr) assert(expr)
1301  #else
1302  #define VMA_ASSERT(expr)
1303  #endif
1304 #endif
1305 
1306 // Assert that will be called very often, like inside data structures e.g. operator[].
1307 // Making it non-empty can make program slow.
1308 #ifndef VMA_HEAVY_ASSERT
1309  #ifdef _DEBUG
1310  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
1311  #else
1312  #define VMA_HEAVY_ASSERT(expr)
1313  #endif
1314 #endif
1315 
1316 #ifndef VMA_NULL
1317  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
1318  #define VMA_NULL nullptr
1319 #endif
1320 
1321 #ifndef VMA_ALIGN_OF
1322  #define VMA_ALIGN_OF(type) (__alignof(type))
1323 #endif
1324 
1325 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
1326  #if defined(_WIN32)
1327  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
1328  #else
1329  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
1330  #endif
1331 #endif
1332 
1333 #ifndef VMA_SYSTEM_FREE
1334  #if defined(_WIN32)
1335  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
1336  #else
1337  #define VMA_SYSTEM_FREE(ptr) free(ptr)
1338  #endif
1339 #endif
1340 
1341 #ifndef VMA_MIN
1342  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
1343 #endif
1344 
1345 #ifndef VMA_MAX
1346  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
1347 #endif
1348 
1349 #ifndef VMA_SWAP
1350  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
1351 #endif
1352 
1353 #ifndef VMA_SORT
1354  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
1355 #endif
1356 
1357 #ifndef VMA_DEBUG_LOG
1358  #define VMA_DEBUG_LOG(format, ...)
1359  /*
1360  #define VMA_DEBUG_LOG(format, ...) do { \
1361  printf(format, __VA_ARGS__); \
1362  printf("\n"); \
1363  } while(false)
1364  */
1365 #endif
1366 
1367 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
1368 #if VMA_STATS_STRING_ENABLED
1369  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
1370  {
1371  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
1372  }
1373  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
1374  {
1375  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
1376  }
1377  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
1378  {
1379  snprintf(outStr, strLen, "%p", ptr);
1380  }
1381 #endif
1382 
1383 #ifndef VMA_MUTEX
1384  class VmaMutex
1385  {
1386  public:
1387  VmaMutex() { }
1388  ~VmaMutex() { }
1389  void Lock() { m_Mutex.lock(); }
1390  void Unlock() { m_Mutex.unlock(); }
1391  private:
1392  std::mutex m_Mutex;
1393  };
1394  #define VMA_MUTEX VmaMutex
1395 #endif
1396 
1397 /*
1398 If providing your own implementation, you need to implement a subset of std::atomic:
1399 
1400 - Constructor(uint32_t desired)
1401 - uint32_t load() const
1402 - void store(uint32_t desired)
1403 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
1404 */
1405 #ifndef VMA_ATOMIC_UINT32
1406  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
1407 #endif
1408 
1409 #ifndef VMA_BEST_FIT
1410 
1422  #define VMA_BEST_FIT (1)
1423 #endif
1424 
1425 #ifndef VMA_DEBUG_ALWAYS_OWN_MEMORY
1426 
1430  #define VMA_DEBUG_ALWAYS_OWN_MEMORY (0)
1431 #endif
1432 
1433 #ifndef VMA_DEBUG_ALIGNMENT
1434 
1438  #define VMA_DEBUG_ALIGNMENT (1)
1439 #endif
1440 
1441 #ifndef VMA_DEBUG_MARGIN
1442 
1446  #define VMA_DEBUG_MARGIN (0)
1447 #endif
1448 
1449 #ifndef VMA_DEBUG_GLOBAL_MUTEX
1450 
1454  #define VMA_DEBUG_GLOBAL_MUTEX (0)
1455 #endif
1456 
1457 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
1458 
1462  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
1463 #endif
1464 
1465 #ifndef VMA_SMALL_HEAP_MAX_SIZE
1466  #define VMA_SMALL_HEAP_MAX_SIZE (512 * 1024 * 1024)
1468 #endif
1469 
1470 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
1471  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256 * 1024 * 1024)
1473 #endif
1474 
1475 #ifndef VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE
1476  #define VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE (64 * 1024 * 1024)
1478 #endif
1479 
1480 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
1481 
1482 /*******************************************************************************
1483 END OF CONFIGURATION
1484 */
1485 
1486 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
1487  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
1488 
1489 // Returns number of bits set to 1 in (v).
1490 static inline uint32_t CountBitsSet(uint32_t v)
1491 {
1492  uint32_t c = v - ((v >> 1) & 0x55555555);
1493  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
1494  c = ((c >> 4) + c) & 0x0F0F0F0F;
1495  c = ((c >> 8) + c) & 0x00FF00FF;
1496  c = ((c >> 16) + c) & 0x0000FFFF;
1497  return c;
1498 }
1499 
1500 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
1501 // Use types like uint32_t, uint64_t as T.
1502 template <typename T>
1503 static inline T VmaAlignUp(T val, T align)
1504 {
1505  return (val + align - 1) / align * align;
1506 }
1507 
1508 // Division with mathematical rounding to nearest number.
1509 template <typename T>
1510 inline T VmaRoundDiv(T x, T y)
1511 {
1512  return (x + (y / (T)2)) / y;
1513 }
1514 
1515 #ifndef VMA_SORT
1516 
1517 template<typename Iterator, typename Compare>
1518 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
1519 {
1520  Iterator centerValue = end; --centerValue;
1521  Iterator insertIndex = beg;
1522  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
1523  {
1524  if(cmp(*memTypeIndex, *centerValue))
1525  {
1526  if(insertIndex != memTypeIndex)
1527  {
1528  VMA_SWAP(*memTypeIndex, *insertIndex);
1529  }
1530  ++insertIndex;
1531  }
1532  }
1533  if(insertIndex != centerValue)
1534  {
1535  VMA_SWAP(*insertIndex, *centerValue);
1536  }
1537  return insertIndex;
1538 }
1539 
1540 template<typename Iterator, typename Compare>
1541 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
1542 {
1543  if(beg < end)
1544  {
1545  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
1546  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
1547  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
1548  }
1549 }
1550 
1551 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
1552 
1553 #endif // #ifndef VMA_SORT
1554 
1555 /*
1556 Returns true if two memory blocks occupy overlapping pages.
1557 ResourceA must be in less memory offset than ResourceB.
1558 
1559 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
1560 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
1561 */
1562 static inline bool VmaBlocksOnSamePage(
1563  VkDeviceSize resourceAOffset,
1564  VkDeviceSize resourceASize,
1565  VkDeviceSize resourceBOffset,
1566  VkDeviceSize pageSize)
1567 {
1568  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
1569  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
1570  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
1571  VkDeviceSize resourceBStart = resourceBOffset;
1572  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
1573  return resourceAEndPage == resourceBStartPage;
1574 }
1575 
1576 enum VmaSuballocationType
1577 {
1578  VMA_SUBALLOCATION_TYPE_FREE = 0,
1579  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
1580  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
1581  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
1582  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
1583  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
1584  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
1585 };
1586 
1587 /*
1588 Returns true if given suballocation types could conflict and must respect
1589 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
1590 or linear image and another one is optimal image. If type is unknown, behave
1591 conservatively.
1592 */
1593 static inline bool VmaIsBufferImageGranularityConflict(
1594  VmaSuballocationType suballocType1,
1595  VmaSuballocationType suballocType2)
1596 {
1597  if(suballocType1 > suballocType2)
1598  {
1599  VMA_SWAP(suballocType1, suballocType2);
1600  }
1601 
1602  switch(suballocType1)
1603  {
1604  case VMA_SUBALLOCATION_TYPE_FREE:
1605  return false;
1606  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
1607  return true;
1608  case VMA_SUBALLOCATION_TYPE_BUFFER:
1609  return
1610  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
1611  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1612  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
1613  return
1614  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
1615  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
1616  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1617  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
1618  return
1619  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1620  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
1621  return false;
1622  default:
1623  VMA_ASSERT(0);
1624  return true;
1625  }
1626 }
1627 
1628 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
1629 struct VmaMutexLock
1630 {
1631 public:
1632  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
1633  m_pMutex(useMutex ? &mutex : VMA_NULL)
1634  {
1635  if(m_pMutex)
1636  {
1637  m_pMutex->Lock();
1638  }
1639  }
1640 
1641  ~VmaMutexLock()
1642  {
1643  if(m_pMutex)
1644  {
1645  m_pMutex->Unlock();
1646  }
1647  }
1648 
1649 private:
1650  VMA_MUTEX* m_pMutex;
1651 };
1652 
1653 #if VMA_DEBUG_GLOBAL_MUTEX
1654  static VMA_MUTEX gDebugGlobalMutex;
1655  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
1656 #else
1657  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
1658 #endif
1659 
1660 // Minimum size of a free suballocation to register it in the free suballocation collection.
1661 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
1662 
1663 /*
1664 Performs binary search and returns iterator to first element that is greater or
1665 equal to (key), according to comparison (cmp).
1666 
1667 Cmp should return true if first argument is less than second argument.
1668 
1669 Returned value is the found element, if present in the collection or place where
1670 new element with value (key) should be inserted.
1671 */
1672 template <typename IterT, typename KeyT, typename CmpT>
1673 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
1674 {
1675  size_t down = 0, up = (end - beg);
1676  while(down < up)
1677  {
1678  const size_t mid = (down + up) / 2;
1679  if(cmp(*(beg+mid), key))
1680  {
1681  down = mid + 1;
1682  }
1683  else
1684  {
1685  up = mid;
1686  }
1687  }
1688  return beg + down;
1689 }
1690 
1692 // Memory allocation
1693 
1694 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
1695 {
1696  if((pAllocationCallbacks != VMA_NULL) &&
1697  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
1698  {
1699  return (*pAllocationCallbacks->pfnAllocation)(
1700  pAllocationCallbacks->pUserData,
1701  size,
1702  alignment,
1703  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1704  }
1705  else
1706  {
1707  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
1708  }
1709 }
1710 
1711 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
1712 {
1713  if((pAllocationCallbacks != VMA_NULL) &&
1714  (pAllocationCallbacks->pfnFree != VMA_NULL))
1715  {
1716  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
1717  }
1718  else
1719  {
1720  VMA_SYSTEM_FREE(ptr);
1721  }
1722 }
1723 
1724 template<typename T>
1725 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
1726 {
1727  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
1728 }
1729 
1730 template<typename T>
1731 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
1732 {
1733  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
1734 }
1735 
1736 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
1737 
1738 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
1739 
1740 template<typename T>
1741 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
1742 {
1743  ptr->~T();
1744  VmaFree(pAllocationCallbacks, ptr);
1745 }
1746 
1747 template<typename T>
1748 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
1749 {
1750  if(ptr != VMA_NULL)
1751  {
1752  for(size_t i = count; i--; )
1753  {
1754  ptr[i].~T();
1755  }
1756  VmaFree(pAllocationCallbacks, ptr);
1757  }
1758 }
1759 
1760 // STL-compatible allocator.
1761 template<typename T>
1762 class VmaStlAllocator
1763 {
1764 public:
1765  const VkAllocationCallbacks* const m_pCallbacks;
1766  typedef T value_type;
1767 
1768  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
1769  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
1770 
1771  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
1772  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
1773 
1774  template<typename U>
1775  bool operator==(const VmaStlAllocator<U>& rhs) const
1776  {
1777  return m_pCallbacks == rhs.m_pCallbacks;
1778  }
1779  template<typename U>
1780  bool operator!=(const VmaStlAllocator<U>& rhs) const
1781  {
1782  return m_pCallbacks != rhs.m_pCallbacks;
1783  }
1784 
1785  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
1786 };
1787 
1788 #if VMA_USE_STL_VECTOR
1789 
1790 #define VmaVector std::vector
1791 
1792 template<typename T, typename allocatorT>
1793 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
1794 {
1795  vec.insert(vec.begin() + index, item);
1796 }
1797 
1798 template<typename T, typename allocatorT>
1799 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
1800 {
1801  vec.erase(vec.begin() + index);
1802 }
1803 
1804 #else // #if VMA_USE_STL_VECTOR
1805 
1806 /* Class with interface compatible with subset of std::vector.
1807 T must be POD because constructors and destructors are not called and memcpy is
1808 used for these objects. */
1809 template<typename T, typename AllocatorT>
1810 class VmaVector
1811 {
1812 public:
1813  typedef T value_type;
1814 
1815  VmaVector(const AllocatorT& allocator) :
1816  m_Allocator(allocator),
1817  m_pArray(VMA_NULL),
1818  m_Count(0),
1819  m_Capacity(0)
1820  {
1821  }
1822 
1823  VmaVector(size_t count, const AllocatorT& allocator) :
1824  m_Allocator(allocator),
1825  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
1826  m_Count(count),
1827  m_Capacity(count)
1828  {
1829  }
1830 
1831  VmaVector(const VmaVector<T, AllocatorT>& src) :
1832  m_Allocator(src.m_Allocator),
1833  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
1834  m_Count(src.m_Count),
1835  m_Capacity(src.m_Count)
1836  {
1837  if(m_Count != 0)
1838  {
1839  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
1840  }
1841  }
1842 
1843  ~VmaVector()
1844  {
1845  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1846  }
1847 
1848  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
1849  {
1850  if(&rhs != this)
1851  {
1852  resize(rhs.m_Count);
1853  if(m_Count != 0)
1854  {
1855  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
1856  }
1857  }
1858  return *this;
1859  }
1860 
1861  bool empty() const { return m_Count == 0; }
1862  size_t size() const { return m_Count; }
1863  T* data() { return m_pArray; }
1864  const T* data() const { return m_pArray; }
1865 
1866  T& operator[](size_t index)
1867  {
1868  VMA_HEAVY_ASSERT(index < m_Count);
1869  return m_pArray[index];
1870  }
1871  const T& operator[](size_t index) const
1872  {
1873  VMA_HEAVY_ASSERT(index < m_Count);
1874  return m_pArray[index];
1875  }
1876 
1877  T& front()
1878  {
1879  VMA_HEAVY_ASSERT(m_Count > 0);
1880  return m_pArray[0];
1881  }
1882  const T& front() const
1883  {
1884  VMA_HEAVY_ASSERT(m_Count > 0);
1885  return m_pArray[0];
1886  }
1887  T& back()
1888  {
1889  VMA_HEAVY_ASSERT(m_Count > 0);
1890  return m_pArray[m_Count - 1];
1891  }
1892  const T& back() const
1893  {
1894  VMA_HEAVY_ASSERT(m_Count > 0);
1895  return m_pArray[m_Count - 1];
1896  }
1897 
1898  void reserve(size_t newCapacity, bool freeMemory = false)
1899  {
1900  newCapacity = VMA_MAX(newCapacity, m_Count);
1901 
1902  if((newCapacity < m_Capacity) && !freeMemory)
1903  {
1904  newCapacity = m_Capacity;
1905  }
1906 
1907  if(newCapacity != m_Capacity)
1908  {
1909  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
1910  if(m_Count != 0)
1911  {
1912  memcpy(newArray, m_pArray, m_Count * sizeof(T));
1913  }
1914  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1915  m_Capacity = newCapacity;
1916  m_pArray = newArray;
1917  }
1918  }
1919 
1920  void resize(size_t newCount, bool freeMemory = false)
1921  {
1922  size_t newCapacity = m_Capacity;
1923  if(newCount > m_Capacity)
1924  {
1925  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
1926  }
1927  else if(freeMemory)
1928  {
1929  newCapacity = newCount;
1930  }
1931 
1932  if(newCapacity != m_Capacity)
1933  {
1934  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
1935  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
1936  if(elementsToCopy != 0)
1937  {
1938  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
1939  }
1940  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1941  m_Capacity = newCapacity;
1942  m_pArray = newArray;
1943  }
1944 
1945  m_Count = newCount;
1946  }
1947 
1948  void clear(bool freeMemory = false)
1949  {
1950  resize(0, freeMemory);
1951  }
1952 
1953  void insert(size_t index, const T& src)
1954  {
1955  VMA_HEAVY_ASSERT(index <= m_Count);
1956  const size_t oldCount = size();
1957  resize(oldCount + 1);
1958  if(index < oldCount)
1959  {
1960  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
1961  }
1962  m_pArray[index] = src;
1963  }
1964 
1965  void remove(size_t index)
1966  {
1967  VMA_HEAVY_ASSERT(index < m_Count);
1968  const size_t oldCount = size();
1969  if(index < oldCount - 1)
1970  {
1971  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
1972  }
1973  resize(oldCount - 1);
1974  }
1975 
1976  void push_back(const T& src)
1977  {
1978  const size_t newIndex = size();
1979  resize(newIndex + 1);
1980  m_pArray[newIndex] = src;
1981  }
1982 
1983  void pop_back()
1984  {
1985  VMA_HEAVY_ASSERT(m_Count > 0);
1986  resize(size() - 1);
1987  }
1988 
1989  void push_front(const T& src)
1990  {
1991  insert(0, src);
1992  }
1993 
1994  void pop_front()
1995  {
1996  VMA_HEAVY_ASSERT(m_Count > 0);
1997  remove(0);
1998  }
1999 
2000  typedef T* iterator;
2001 
2002  iterator begin() { return m_pArray; }
2003  iterator end() { return m_pArray + m_Count; }
2004 
2005 private:
2006  AllocatorT m_Allocator;
2007  T* m_pArray;
2008  size_t m_Count;
2009  size_t m_Capacity;
2010 };
2011 
2012 template<typename T, typename allocatorT>
2013 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
2014 {
2015  vec.insert(index, item);
2016 }
2017 
2018 template<typename T, typename allocatorT>
2019 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
2020 {
2021  vec.remove(index);
2022 }
2023 
2024 #endif // #if VMA_USE_STL_VECTOR
2025 
2026 template<typename CmpLess, typename VectorT>
2027 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
2028 {
2029  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2030  vector.data(),
2031  vector.data() + vector.size(),
2032  value,
2033  CmpLess()) - vector.data();
2034  VmaVectorInsert(vector, indexToInsert, value);
2035  return indexToInsert;
2036 }
2037 
2038 template<typename CmpLess, typename VectorT>
2039 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
2040 {
2041  CmpLess comparator;
2042  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2043  vector.begin(),
2044  vector.end(),
2045  value,
2046  comparator);
2047  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
2048  {
2049  size_t indexToRemove = it - vector.begin();
2050  VmaVectorRemove(vector, indexToRemove);
2051  return true;
2052  }
2053  return false;
2054 }
2055 
2056 template<typename CmpLess, typename VectorT>
2057 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
2058 {
2059  CmpLess comparator;
2060  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2061  vector.data(),
2062  vector.data() + vector.size(),
2063  value,
2064  comparator);
2065  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
2066  {
2067  return it - vector.begin();
2068  }
2069  else
2070  {
2071  return vector.size();
2072  }
2073 }
2074 
2076 // class VmaPoolAllocator
2077 
2078 /*
2079 Allocator for objects of type T using a list of arrays (pools) to speed up
2080 allocation. Number of elements that can be allocated is not bounded because
2081 allocator can create multiple blocks.
2082 */
2083 template<typename T>
2084 class VmaPoolAllocator
2085 {
2086 public:
2087  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
2088  ~VmaPoolAllocator();
2089  void Clear();
2090  T* Alloc();
2091  void Free(T* ptr);
2092 
2093 private:
2094  union Item
2095  {
2096  uint32_t NextFreeIndex;
2097  T Value;
2098  };
2099 
2100  struct ItemBlock
2101  {
2102  Item* pItems;
2103  uint32_t FirstFreeIndex;
2104  };
2105 
2106  const VkAllocationCallbacks* m_pAllocationCallbacks;
2107  size_t m_ItemsPerBlock;
2108  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
2109 
2110  ItemBlock& CreateNewBlock();
2111 };
2112 
2113 template<typename T>
2114 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
2115  m_pAllocationCallbacks(pAllocationCallbacks),
2116  m_ItemsPerBlock(itemsPerBlock),
2117  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
2118 {
2119  VMA_ASSERT(itemsPerBlock > 0);
2120 }
2121 
2122 template<typename T>
2123 VmaPoolAllocator<T>::~VmaPoolAllocator()
2124 {
2125  Clear();
2126 }
2127 
2128 template<typename T>
2129 void VmaPoolAllocator<T>::Clear()
2130 {
2131  for(size_t i = m_ItemBlocks.size(); i--; )
2132  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
2133  m_ItemBlocks.clear();
2134 }
2135 
2136 template<typename T>
2137 T* VmaPoolAllocator<T>::Alloc()
2138 {
2139  for(size_t i = m_ItemBlocks.size(); i--; )
2140  {
2141  ItemBlock& block = m_ItemBlocks[i];
2142  // This block has some free items: Use first one.
2143  if(block.FirstFreeIndex != UINT32_MAX)
2144  {
2145  Item* const pItem = &block.pItems[block.FirstFreeIndex];
2146  block.FirstFreeIndex = pItem->NextFreeIndex;
2147  return &pItem->Value;
2148  }
2149  }
2150 
2151  // No block has free item: Create new one and use it.
2152  ItemBlock& newBlock = CreateNewBlock();
2153  Item* const pItem = &newBlock.pItems[0];
2154  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
2155  return &pItem->Value;
2156 }
2157 
2158 template<typename T>
2159 void VmaPoolAllocator<T>::Free(T* ptr)
2160 {
2161  // Search all memory blocks to find ptr.
2162  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
2163  {
2164  ItemBlock& block = m_ItemBlocks[i];
2165 
2166  // Casting to union.
2167  Item* pItemPtr;
2168  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
2169 
2170  // Check if pItemPtr is in address range of this block.
2171  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
2172  {
2173  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
2174  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
2175  block.FirstFreeIndex = index;
2176  return;
2177  }
2178  }
2179  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
2180 }
2181 
2182 template<typename T>
2183 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
2184 {
2185  ItemBlock newBlock = {
2186  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
2187 
2188  m_ItemBlocks.push_back(newBlock);
2189 
2190  // Setup singly-linked list of all free items in this block.
2191  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
2192  newBlock.pItems[i].NextFreeIndex = i + 1;
2193  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
2194  return m_ItemBlocks.back();
2195 }
2196 
2198 // class VmaRawList, VmaList
2199 
2200 #if VMA_USE_STL_LIST
2201 
2202 #define VmaList std::list
2203 
2204 #else // #if VMA_USE_STL_LIST
2205 
2206 template<typename T>
2207 struct VmaListItem
2208 {
2209  VmaListItem* pPrev;
2210  VmaListItem* pNext;
2211  T Value;
2212 };
2213 
2214 // Doubly linked list.
2215 template<typename T>
2216 class VmaRawList
2217 {
2218 public:
2219  typedef VmaListItem<T> ItemType;
2220 
2221  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
2222  ~VmaRawList();
2223  void Clear();
2224 
2225  size_t GetCount() const { return m_Count; }
2226  bool IsEmpty() const { return m_Count == 0; }
2227 
2228  ItemType* Front() { return m_pFront; }
2229  const ItemType* Front() const { return m_pFront; }
2230  ItemType* Back() { return m_pBack; }
2231  const ItemType* Back() const { return m_pBack; }
2232 
2233  ItemType* PushBack();
2234  ItemType* PushFront();
2235  ItemType* PushBack(const T& value);
2236  ItemType* PushFront(const T& value);
2237  void PopBack();
2238  void PopFront();
2239 
2240  // Item can be null - it means PushBack.
2241  ItemType* InsertBefore(ItemType* pItem);
2242  // Item can be null - it means PushFront.
2243  ItemType* InsertAfter(ItemType* pItem);
2244 
2245  ItemType* InsertBefore(ItemType* pItem, const T& value);
2246  ItemType* InsertAfter(ItemType* pItem, const T& value);
2247 
2248  void Remove(ItemType* pItem);
2249 
2250 private:
2251  const VkAllocationCallbacks* const m_pAllocationCallbacks;
2252  VmaPoolAllocator<ItemType> m_ItemAllocator;
2253  ItemType* m_pFront;
2254  ItemType* m_pBack;
2255  size_t m_Count;
2256 
2257  // Declared not defined, to block copy constructor and assignment operator.
2258  VmaRawList(const VmaRawList<T>& src);
2259  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
2260 };
2261 
2262 template<typename T>
2263 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
2264  m_pAllocationCallbacks(pAllocationCallbacks),
2265  m_ItemAllocator(pAllocationCallbacks, 128),
2266  m_pFront(VMA_NULL),
2267  m_pBack(VMA_NULL),
2268  m_Count(0)
2269 {
2270 }
2271 
2272 template<typename T>
2273 VmaRawList<T>::~VmaRawList()
2274 {
2275  // Intentionally not calling Clear, because that would be unnecessary
2276  // computations to return all items to m_ItemAllocator as free.
2277 }
2278 
2279 template<typename T>
2280 void VmaRawList<T>::Clear()
2281 {
2282  if(IsEmpty() == false)
2283  {
2284  ItemType* pItem = m_pBack;
2285  while(pItem != VMA_NULL)
2286  {
2287  ItemType* const pPrevItem = pItem->pPrev;
2288  m_ItemAllocator.Free(pItem);
2289  pItem = pPrevItem;
2290  }
2291  m_pFront = VMA_NULL;
2292  m_pBack = VMA_NULL;
2293  m_Count = 0;
2294  }
2295 }
2296 
2297 template<typename T>
2298 VmaListItem<T>* VmaRawList<T>::PushBack()
2299 {
2300  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2301  pNewItem->pNext = VMA_NULL;
2302  if(IsEmpty())
2303  {
2304  pNewItem->pPrev = VMA_NULL;
2305  m_pFront = pNewItem;
2306  m_pBack = pNewItem;
2307  m_Count = 1;
2308  }
2309  else
2310  {
2311  pNewItem->pPrev = m_pBack;
2312  m_pBack->pNext = pNewItem;
2313  m_pBack = pNewItem;
2314  ++m_Count;
2315  }
2316  return pNewItem;
2317 }
2318 
2319 template<typename T>
2320 VmaListItem<T>* VmaRawList<T>::PushFront()
2321 {
2322  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2323  pNewItem->pPrev = VMA_NULL;
2324  if(IsEmpty())
2325  {
2326  pNewItem->pNext = VMA_NULL;
2327  m_pFront = pNewItem;
2328  m_pBack = pNewItem;
2329  m_Count = 1;
2330  }
2331  else
2332  {
2333  pNewItem->pNext = m_pFront;
2334  m_pFront->pPrev = pNewItem;
2335  m_pFront = pNewItem;
2336  ++m_Count;
2337  }
2338  return pNewItem;
2339 }
2340 
2341 template<typename T>
2342 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
2343 {
2344  ItemType* const pNewItem = PushBack();
2345  pNewItem->Value = value;
2346  return pNewItem;
2347 }
2348 
2349 template<typename T>
2350 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
2351 {
2352  ItemType* const pNewItem = PushFront();
2353  pNewItem->Value = value;
2354  return pNewItem;
2355 }
2356 
2357 template<typename T>
2358 void VmaRawList<T>::PopBack()
2359 {
2360  VMA_HEAVY_ASSERT(m_Count > 0);
2361  ItemType* const pBackItem = m_pBack;
2362  ItemType* const pPrevItem = pBackItem->pPrev;
2363  if(pPrevItem != VMA_NULL)
2364  {
2365  pPrevItem->pNext = VMA_NULL;
2366  }
2367  m_pBack = pPrevItem;
2368  m_ItemAllocator.Free(pBackItem);
2369  --m_Count;
2370 }
2371 
2372 template<typename T>
2373 void VmaRawList<T>::PopFront()
2374 {
2375  VMA_HEAVY_ASSERT(m_Count > 0);
2376  ItemType* const pFrontItem = m_pFront;
2377  ItemType* const pNextItem = pFrontItem->pNext;
2378  if(pNextItem != VMA_NULL)
2379  {
2380  pNextItem->pPrev = VMA_NULL;
2381  }
2382  m_pFront = pNextItem;
2383  m_ItemAllocator.Free(pFrontItem);
2384  --m_Count;
2385 }
2386 
2387 template<typename T>
2388 void VmaRawList<T>::Remove(ItemType* pItem)
2389 {
2390  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
2391  VMA_HEAVY_ASSERT(m_Count > 0);
2392 
2393  if(pItem->pPrev != VMA_NULL)
2394  {
2395  pItem->pPrev->pNext = pItem->pNext;
2396  }
2397  else
2398  {
2399  VMA_HEAVY_ASSERT(m_pFront == pItem);
2400  m_pFront = pItem->pNext;
2401  }
2402 
2403  if(pItem->pNext != VMA_NULL)
2404  {
2405  pItem->pNext->pPrev = pItem->pPrev;
2406  }
2407  else
2408  {
2409  VMA_HEAVY_ASSERT(m_pBack == pItem);
2410  m_pBack = pItem->pPrev;
2411  }
2412 
2413  m_ItemAllocator.Free(pItem);
2414  --m_Count;
2415 }
2416 
2417 template<typename T>
2418 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
2419 {
2420  if(pItem != VMA_NULL)
2421  {
2422  ItemType* const prevItem = pItem->pPrev;
2423  ItemType* const newItem = m_ItemAllocator.Alloc();
2424  newItem->pPrev = prevItem;
2425  newItem->pNext = pItem;
2426  pItem->pPrev = newItem;
2427  if(prevItem != VMA_NULL)
2428  {
2429  prevItem->pNext = newItem;
2430  }
2431  else
2432  {
2433  VMA_HEAVY_ASSERT(m_pFront == pItem);
2434  m_pFront = newItem;
2435  }
2436  ++m_Count;
2437  return newItem;
2438  }
2439  else
2440  return PushBack();
2441 }
2442 
2443 template<typename T>
2444 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
2445 {
2446  if(pItem != VMA_NULL)
2447  {
2448  ItemType* const nextItem = pItem->pNext;
2449  ItemType* const newItem = m_ItemAllocator.Alloc();
2450  newItem->pNext = nextItem;
2451  newItem->pPrev = pItem;
2452  pItem->pNext = newItem;
2453  if(nextItem != VMA_NULL)
2454  {
2455  nextItem->pPrev = newItem;
2456  }
2457  else
2458  {
2459  VMA_HEAVY_ASSERT(m_pBack == pItem);
2460  m_pBack = newItem;
2461  }
2462  ++m_Count;
2463  return newItem;
2464  }
2465  else
2466  return PushFront();
2467 }
2468 
2469 template<typename T>
2470 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
2471 {
2472  ItemType* const newItem = InsertBefore(pItem);
2473  newItem->Value = value;
2474  return newItem;
2475 }
2476 
2477 template<typename T>
2478 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
2479 {
2480  ItemType* const newItem = InsertAfter(pItem);
2481  newItem->Value = value;
2482  return newItem;
2483 }
2484 
2485 template<typename T, typename AllocatorT>
2486 class VmaList
2487 {
2488 public:
2489  class iterator
2490  {
2491  public:
2492  iterator() :
2493  m_pList(VMA_NULL),
2494  m_pItem(VMA_NULL)
2495  {
2496  }
2497 
2498  T& operator*() const
2499  {
2500  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2501  return m_pItem->Value;
2502  }
2503  T* operator->() const
2504  {
2505  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2506  return &m_pItem->Value;
2507  }
2508 
2509  iterator& operator++()
2510  {
2511  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2512  m_pItem = m_pItem->pNext;
2513  return *this;
2514  }
2515  iterator& operator--()
2516  {
2517  if(m_pItem != VMA_NULL)
2518  {
2519  m_pItem = m_pItem->pPrev;
2520  }
2521  else
2522  {
2523  VMA_HEAVY_ASSERT(!m_pList.IsEmpty());
2524  m_pItem = m_pList->Back();
2525  }
2526  return *this;
2527  }
2528 
2529  iterator operator++(int)
2530  {
2531  iterator result = *this;
2532  ++*this;
2533  return result;
2534  }
2535  iterator operator--(int)
2536  {
2537  iterator result = *this;
2538  --*this;
2539  return result;
2540  }
2541 
2542  bool operator==(const iterator& rhs) const
2543  {
2544  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2545  return m_pItem == rhs.m_pItem;
2546  }
2547  bool operator!=(const iterator& rhs) const
2548  {
2549  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2550  return m_pItem != rhs.m_pItem;
2551  }
2552 
2553  private:
2554  VmaRawList<T>* m_pList;
2555  VmaListItem<T>* m_pItem;
2556 
2557  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
2558  m_pList(pList),
2559  m_pItem(pItem)
2560  {
2561  }
2562 
2563  friend class VmaList<T, AllocatorT>;
2564  };
2565 
2566  class const_iterator
2567  {
2568  public:
2569  const_iterator() :
2570  m_pList(VMA_NULL),
2571  m_pItem(VMA_NULL)
2572  {
2573  }
2574 
2575  const_iterator(const iterator& src) :
2576  m_pList(src.m_pList),
2577  m_pItem(src.m_pItem)
2578  {
2579  }
2580 
2581  const T& operator*() const
2582  {
2583  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2584  return m_pItem->Value;
2585  }
2586  const T* operator->() const
2587  {
2588  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2589  return &m_pItem->Value;
2590  }
2591 
2592  const_iterator& operator++()
2593  {
2594  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2595  m_pItem = m_pItem->pNext;
2596  return *this;
2597  }
2598  const_iterator& operator--()
2599  {
2600  if(m_pItem != VMA_NULL)
2601  {
2602  m_pItem = m_pItem->pPrev;
2603  }
2604  else
2605  {
2606  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
2607  m_pItem = m_pList->Back();
2608  }
2609  return *this;
2610  }
2611 
2612  const_iterator operator++(int)
2613  {
2614  const_iterator result = *this;
2615  ++*this;
2616  return result;
2617  }
2618  const_iterator operator--(int)
2619  {
2620  const_iterator result = *this;
2621  --*this;
2622  return result;
2623  }
2624 
2625  bool operator==(const const_iterator& rhs) const
2626  {
2627  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2628  return m_pItem == rhs.m_pItem;
2629  }
2630  bool operator!=(const const_iterator& rhs) const
2631  {
2632  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2633  return m_pItem != rhs.m_pItem;
2634  }
2635 
2636  private:
2637  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
2638  m_pList(pList),
2639  m_pItem(pItem)
2640  {
2641  }
2642 
2643  const VmaRawList<T>* m_pList;
2644  const VmaListItem<T>* m_pItem;
2645 
2646  friend class VmaList<T, AllocatorT>;
2647  };
2648 
2649  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
2650 
2651  bool empty() const { return m_RawList.IsEmpty(); }
2652  size_t size() const { return m_RawList.GetCount(); }
2653 
2654  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
2655  iterator end() { return iterator(&m_RawList, VMA_NULL); }
2656 
2657  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
2658  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
2659 
2660  void clear() { m_RawList.Clear(); }
2661  void push_back(const T& value) { m_RawList.PushBack(value); }
2662  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
2663  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
2664 
2665 private:
2666  VmaRawList<T> m_RawList;
2667 };
2668 
2669 #endif // #if VMA_USE_STL_LIST
2670 
2672 // class VmaMap
2673 
2674 // Unused in this version.
2675 #if 0
2676 
2677 #if VMA_USE_STL_UNORDERED_MAP
2678 
2679 #define VmaPair std::pair
2680 
2681 #define VMA_MAP_TYPE(KeyT, ValueT) \
2682  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
2683 
2684 #else // #if VMA_USE_STL_UNORDERED_MAP
2685 
2686 template<typename T1, typename T2>
2687 struct VmaPair
2688 {
2689  T1 first;
2690  T2 second;
2691 
2692  VmaPair() : first(), second() { }
2693  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
2694 };
2695 
2696 /* Class compatible with subset of interface of std::unordered_map.
2697 KeyT, ValueT must be POD because they will be stored in VmaVector.
2698 */
2699 template<typename KeyT, typename ValueT>
2700 class VmaMap
2701 {
2702 public:
2703  typedef VmaPair<KeyT, ValueT> PairType;
2704  typedef PairType* iterator;
2705 
2706  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
2707 
2708  iterator begin() { return m_Vector.begin(); }
2709  iterator end() { return m_Vector.end(); }
2710 
2711  void insert(const PairType& pair);
2712  iterator find(const KeyT& key);
2713  void erase(iterator it);
2714 
2715 private:
2716  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
2717 };
2718 
2719 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
2720 
2721 template<typename FirstT, typename SecondT>
2722 struct VmaPairFirstLess
2723 {
2724  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
2725  {
2726  return lhs.first < rhs.first;
2727  }
2728  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
2729  {
2730  return lhs.first < rhsFirst;
2731  }
2732 };
2733 
2734 template<typename KeyT, typename ValueT>
2735 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
2736 {
2737  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2738  m_Vector.data(),
2739  m_Vector.data() + m_Vector.size(),
2740  pair,
2741  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
2742  VmaVectorInsert(m_Vector, indexToInsert, pair);
2743 }
2744 
2745 template<typename KeyT, typename ValueT>
2746 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
2747 {
2748  PairType* it = VmaBinaryFindFirstNotLess(
2749  m_Vector.data(),
2750  m_Vector.data() + m_Vector.size(),
2751  key,
2752  VmaPairFirstLess<KeyT, ValueT>());
2753  if((it != m_Vector.end()) && (it->first == key))
2754  {
2755  return it;
2756  }
2757  else
2758  {
2759  return m_Vector.end();
2760  }
2761 }
2762 
2763 template<typename KeyT, typename ValueT>
2764 void VmaMap<KeyT, ValueT>::erase(iterator it)
2765 {
2766  VmaVectorRemove(m_Vector, it - m_Vector.begin());
2767 }
2768 
2769 #endif // #if VMA_USE_STL_UNORDERED_MAP
2770 
2771 #endif // #if 0
2772 
2774 
2775 class VmaDeviceMemoryBlock;
2776 
2777 enum VMA_BLOCK_VECTOR_TYPE
2778 {
2779  VMA_BLOCK_VECTOR_TYPE_UNMAPPED,
2780  VMA_BLOCK_VECTOR_TYPE_MAPPED,
2781  VMA_BLOCK_VECTOR_TYPE_COUNT
2782 };
2783 
2784 static VMA_BLOCK_VECTOR_TYPE VmaAllocationCreateFlagsToBlockVectorType(VmaAllocationCreateFlags flags)
2785 {
2786  return (flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0 ?
2787  VMA_BLOCK_VECTOR_TYPE_MAPPED :
2788  VMA_BLOCK_VECTOR_TYPE_UNMAPPED;
2789 }
2790 
2791 struct VmaAllocation_T
2792 {
2793 public:
2794  enum ALLOCATION_TYPE
2795  {
2796  ALLOCATION_TYPE_NONE,
2797  ALLOCATION_TYPE_BLOCK,
2798  ALLOCATION_TYPE_OWN,
2799  };
2800 
2801  VmaAllocation_T(uint32_t currentFrameIndex) :
2802  m_Alignment(1),
2803  m_Size(0),
2804  m_pUserData(VMA_NULL),
2805  m_Type(ALLOCATION_TYPE_NONE),
2806  m_SuballocationType(VMA_SUBALLOCATION_TYPE_UNKNOWN),
2807  m_LastUseFrameIndex(currentFrameIndex)
2808  {
2809  }
2810 
2811  void InitBlockAllocation(
2812  VmaPool hPool,
2813  VmaDeviceMemoryBlock* block,
2814  VkDeviceSize offset,
2815  VkDeviceSize alignment,
2816  VkDeviceSize size,
2817  VmaSuballocationType suballocationType,
2818  void* pUserData,
2819  bool canBecomeLost)
2820  {
2821  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
2822  VMA_ASSERT(block != VMA_NULL);
2823  m_Type = ALLOCATION_TYPE_BLOCK;
2824  m_Alignment = alignment;
2825  m_Size = size;
2826  m_pUserData = pUserData;
2827  m_SuballocationType = suballocationType;
2828  m_BlockAllocation.m_hPool = hPool;
2829  m_BlockAllocation.m_Block = block;
2830  m_BlockAllocation.m_Offset = offset;
2831  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
2832  }
2833 
2834  void InitLost()
2835  {
2836  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
2837  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
2838  m_Type = ALLOCATION_TYPE_BLOCK;
2839  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
2840  m_BlockAllocation.m_Block = VMA_NULL;
2841  m_BlockAllocation.m_Offset = 0;
2842  m_BlockAllocation.m_CanBecomeLost = true;
2843  }
2844 
2845  void ChangeBlockAllocation(
2846  VmaDeviceMemoryBlock* block,
2847  VkDeviceSize offset)
2848  {
2849  VMA_ASSERT(block != VMA_NULL);
2850  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
2851  m_BlockAllocation.m_Block = block;
2852  m_BlockAllocation.m_Offset = offset;
2853  }
2854 
2855  void InitOwnAllocation(
2856  uint32_t memoryTypeIndex,
2857  VkDeviceMemory hMemory,
2858  VmaSuballocationType suballocationType,
2859  bool persistentMap,
2860  void* pMappedData,
2861  VkDeviceSize size,
2862  void* pUserData)
2863  {
2864  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
2865  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
2866  m_Type = ALLOCATION_TYPE_OWN;
2867  m_Alignment = 0;
2868  m_Size = size;
2869  m_pUserData = pUserData;
2870  m_SuballocationType = suballocationType;
2871  m_OwnAllocation.m_MemoryTypeIndex = memoryTypeIndex;
2872  m_OwnAllocation.m_hMemory = hMemory;
2873  m_OwnAllocation.m_PersistentMap = persistentMap;
2874  m_OwnAllocation.m_pMappedData = pMappedData;
2875  }
2876 
2877  ALLOCATION_TYPE GetType() const { return m_Type; }
2878  VkDeviceSize GetAlignment() const { return m_Alignment; }
2879  VkDeviceSize GetSize() const { return m_Size; }
2880  void* GetUserData() const { return m_pUserData; }
2881  void SetUserData(void* pUserData) { m_pUserData = pUserData; }
2882  VmaSuballocationType GetSuballocationType() const { return m_SuballocationType; }
2883 
2884  VmaDeviceMemoryBlock* GetBlock() const
2885  {
2886  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
2887  return m_BlockAllocation.m_Block;
2888  }
2889  VkDeviceSize GetOffset() const;
2890  VkDeviceMemory GetMemory() const;
2891  uint32_t GetMemoryTypeIndex() const;
2892  VMA_BLOCK_VECTOR_TYPE GetBlockVectorType() const;
2893  void* GetMappedData() const;
2894  bool CanBecomeLost() const;
2895  VmaPool GetPool() const;
2896 
2897  VkResult OwnAllocMapPersistentlyMappedMemory(VmaAllocator hAllocator);
2898  void OwnAllocUnmapPersistentlyMappedMemory(VmaAllocator hAllocator);
2899 
2900  uint32_t GetLastUseFrameIndex() const
2901  {
2902  return m_LastUseFrameIndex.load();
2903  }
2904  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
2905  {
2906  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
2907  }
2908  /*
2909  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
2910  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
2911  - Else, returns false.
2912 
2913  If hAllocation is already lost, assert - you should not call it then.
2914  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
2915  */
2916  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
2917 
2918  void OwnAllocCalcStatsInfo(VmaStatInfo& outInfo)
2919  {
2920  VMA_ASSERT(m_Type == ALLOCATION_TYPE_OWN);
2921  outInfo.BlockCount = 1;
2922  outInfo.AllocationCount = 1;
2923  outInfo.UnusedRangeCount = 0;
2924  outInfo.UsedBytes = m_Size;
2925  outInfo.UnusedBytes = 0;
2926  outInfo.AllocationSizeMin = outInfo.AllocationSizeMax = m_Size;
2927  outInfo.UnusedRangeSizeMin = UINT64_MAX;
2928  outInfo.UnusedRangeSizeMax = 0;
2929  }
2930 
2931 private:
2932  VkDeviceSize m_Alignment;
2933  VkDeviceSize m_Size;
2934  void* m_pUserData;
2935  ALLOCATION_TYPE m_Type;
2936  VmaSuballocationType m_SuballocationType;
2937  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
2938 
2939  // Allocation out of VmaDeviceMemoryBlock.
2940  struct BlockAllocation
2941  {
2942  VmaPool m_hPool; // Null if belongs to general memory.
2943  VmaDeviceMemoryBlock* m_Block;
2944  VkDeviceSize m_Offset;
2945  bool m_CanBecomeLost;
2946  };
2947 
2948  // Allocation for an object that has its own private VkDeviceMemory.
2949  struct OwnAllocation
2950  {
2951  uint32_t m_MemoryTypeIndex;
2952  VkDeviceMemory m_hMemory;
2953  bool m_PersistentMap;
2954  void* m_pMappedData;
2955  };
2956 
2957  union
2958  {
2959  // Allocation out of VmaDeviceMemoryBlock.
2960  BlockAllocation m_BlockAllocation;
2961  // Allocation for an object that has its own private VkDeviceMemory.
2962  OwnAllocation m_OwnAllocation;
2963  };
2964 };
2965 
2966 /*
2967 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
2968 allocated memory block or free.
2969 */
2970 struct VmaSuballocation
2971 {
2972  VkDeviceSize offset;
2973  VkDeviceSize size;
2974  VmaAllocation hAllocation;
2975  VmaSuballocationType type;
2976 };
2977 
2978 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
2979 
2980 // Cost of one additional allocation lost, as equivalent in bytes.
2981 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
2982 
2983 /*
2984 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
2985 
2986 If canMakeOtherLost was false:
2987 - item points to a FREE suballocation.
2988 - itemsToMakeLostCount is 0.
2989 
2990 If canMakeOtherLost was true:
2991 - item points to first of sequence of suballocations, which are either FREE,
2992  or point to VmaAllocations that can become lost.
2993 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
2994  the requested allocation to succeed.
2995 */
2996 struct VmaAllocationRequest
2997 {
2998  VkDeviceSize offset;
2999  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
3000  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
3001  VmaSuballocationList::iterator item;
3002  size_t itemsToMakeLostCount;
3003 
3004  VkDeviceSize CalcCost() const
3005  {
3006  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
3007  }
3008 };
3009 
3010 /*
3011 Represents a single block of device memory (VkDeviceMemory ) with all the
3012 data about its regions (aka suballocations, VmaAllocation), assigned and free.
3013 
3014 Thread-safety: This class must be externally synchronized.
3015 */
3016 class VmaDeviceMemoryBlock
3017 {
3018 public:
3019  uint32_t m_MemoryTypeIndex;
3020  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
3021  VkDeviceMemory m_hMemory;
3022  VkDeviceSize m_Size;
3023  bool m_PersistentMap;
3024  void* m_pMappedData;
3025  uint32_t m_FreeCount;
3026  VkDeviceSize m_SumFreeSize;
3027  VmaSuballocationList m_Suballocations;
3028  // Suballocations that are free and have size greater than certain threshold.
3029  // Sorted by size, ascending.
3030  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
3031 
3032  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
3033 
3034  ~VmaDeviceMemoryBlock()
3035  {
3036  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
3037  }
3038 
3039  // Always call after construction.
3040  void Init(
3041  uint32_t newMemoryTypeIndex,
3042  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
3043  VkDeviceMemory newMemory,
3044  VkDeviceSize newSize,
3045  bool persistentMap,
3046  void* pMappedData);
3047  // Always call before destruction.
3048  void Destroy(VmaAllocator allocator);
3049 
3050  // Validates all data structures inside this object. If not valid, returns false.
3051  bool Validate() const;
3052 
3053  // Tries to find a place for suballocation with given parameters inside this allocation.
3054  // If succeeded, fills pAllocationRequest and returns true.
3055  // If failed, returns false.
3056  bool CreateAllocationRequest(
3057  uint32_t currentFrameIndex,
3058  uint32_t frameInUseCount,
3059  VkDeviceSize bufferImageGranularity,
3060  VkDeviceSize allocSize,
3061  VkDeviceSize allocAlignment,
3062  VmaSuballocationType allocType,
3063  bool canMakeOtherLost,
3064  VmaAllocationRequest* pAllocationRequest);
3065 
3066  bool MakeRequestedAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount, VmaAllocationRequest* pAllocationRequest);
3067 
3068  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3069 
3070  // Returns true if this allocation is empty - contains only single free suballocation.
3071  bool IsEmpty() const;
3072 
3073  // Makes actual allocation based on request. Request must already be checked
3074  // and valid.
3075  void Alloc(
3076  const VmaAllocationRequest& request,
3077  VmaSuballocationType type,
3078  VkDeviceSize allocSize,
3079  VmaAllocation hAllocation);
3080 
3081  // Frees suballocation assigned to given memory region.
3082  void Free(const VmaAllocation allocation);
3083 
3084 #if VMA_STATS_STRING_ENABLED
3085  void PrintDetailedMap(class VmaJsonWriter& json) const;
3086 #endif
3087 
3088 private:
3089  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
3090  // If yes, fills pOffset and returns true. If no, returns false.
3091  bool CheckAllocation(
3092  uint32_t currentFrameIndex,
3093  uint32_t frameInUseCount,
3094  VkDeviceSize bufferImageGranularity,
3095  VkDeviceSize allocSize,
3096  VkDeviceSize allocAlignment,
3097  VmaSuballocationType allocType,
3098  VmaSuballocationList::const_iterator suballocItem,
3099  bool canMakeOtherLost,
3100  VkDeviceSize* pOffset,
3101  size_t* itemsToMakeLostCount,
3102  VkDeviceSize* pSumFreeSize,
3103  VkDeviceSize* pSumItemSize) const;
3104 
3105  // Given free suballocation, it merges it with following one, which must also be free.
3106  void MergeFreeWithNext(VmaSuballocationList::iterator item);
3107  // Releases given suballocation, making it free.
3108  // Merges it with adjacent free suballocations if applicable.
3109  // Returns iterator to new free suballocation at this place.
3110  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
3111  // Given free suballocation, it inserts it into sorted list of
3112  // m_FreeSuballocationsBySize if it's suitable.
3113  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
3114  // Given free suballocation, it removes it from sorted list of
3115  // m_FreeSuballocationsBySize if it's suitable.
3116  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
3117 
3118  bool ValidateFreeSuballocationList() const;
3119 };
3120 
3121 struct VmaPointerLess
3122 {
3123  bool operator()(const void* lhs, const void* rhs) const
3124  {
3125  return lhs < rhs;
3126  }
3127 };
3128 
3129 class VmaDefragmentator;
3130 
3131 /*
3132 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
3133 Vulkan memory type.
3134 
3135 Synchronized internally with a mutex.
3136 */
3137 struct VmaBlockVector
3138 {
3139  VmaBlockVector(
3140  VmaAllocator hAllocator,
3141  uint32_t memoryTypeIndex,
3142  VMA_BLOCK_VECTOR_TYPE blockVectorType,
3143  VkDeviceSize preferredBlockSize,
3144  size_t minBlockCount,
3145  size_t maxBlockCount,
3146  VkDeviceSize bufferImageGranularity,
3147  uint32_t frameInUseCount,
3148  bool isCustomPool);
3149  ~VmaBlockVector();
3150 
3151  VkResult CreateMinBlocks();
3152 
3153  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
3154  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
3155  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
3156  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
3157  VMA_BLOCK_VECTOR_TYPE GetBlockVectorType() const { return m_BlockVectorType; }
3158 
3159  void GetPoolStats(VmaPoolStats* pStats);
3160 
3161  bool IsEmpty() const { return m_Blocks.empty(); }
3162 
3163  VkResult Allocate(
3164  VmaPool hCurrentPool,
3165  uint32_t currentFrameIndex,
3166  const VkMemoryRequirements& vkMemReq,
3167  const VmaAllocationCreateInfo& createInfo,
3168  VmaSuballocationType suballocType,
3169  VmaAllocation* pAllocation);
3170 
3171  void Free(
3172  VmaAllocation hAllocation);
3173 
3174  // Adds statistics of this BlockVector to pStats.
3175  void AddStats(VmaStats* pStats);
3176 
3177 #if VMA_STATS_STRING_ENABLED
3178  void PrintDetailedMap(class VmaJsonWriter& json);
3179 #endif
3180 
3181  void UnmapPersistentlyMappedMemory();
3182  VkResult MapPersistentlyMappedMemory();
3183 
3184  void MakePoolAllocationsLost(
3185  uint32_t currentFrameIndex,
3186  size_t* pLostAllocationCount);
3187 
3188  VmaDefragmentator* EnsureDefragmentator(
3189  VmaAllocator hAllocator,
3190  uint32_t currentFrameIndex);
3191 
3192  VkResult Defragment(
3193  VmaDefragmentationStats* pDefragmentationStats,
3194  VkDeviceSize& maxBytesToMove,
3195  uint32_t& maxAllocationsToMove);
3196 
3197  void DestroyDefragmentator();
3198 
3199 private:
3200  friend class VmaDefragmentator;
3201 
3202  const VmaAllocator m_hAllocator;
3203  const uint32_t m_MemoryTypeIndex;
3204  const VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
3205  const VkDeviceSize m_PreferredBlockSize;
3206  const size_t m_MinBlockCount;
3207  const size_t m_MaxBlockCount;
3208  const VkDeviceSize m_BufferImageGranularity;
3209  const uint32_t m_FrameInUseCount;
3210  const bool m_IsCustomPool;
3211  VMA_MUTEX m_Mutex;
3212  // Incrementally sorted by sumFreeSize, ascending.
3213  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
3214  /* There can be at most one allocation that is completely empty - a
3215  hysteresis to avoid pessimistic case of alternating creation and destruction
3216  of a VkDeviceMemory. */
3217  bool m_HasEmptyBlock;
3218  VmaDefragmentator* m_pDefragmentator;
3219 
3220  // Finds and removes given block from vector.
3221  void Remove(VmaDeviceMemoryBlock* pBlock);
3222 
3223  // Performs single step in sorting m_Blocks. They may not be fully sorted
3224  // after this call.
3225  void IncrementallySortBlocks();
3226 
3227  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
3228 };
3229 
3230 struct VmaPool_T
3231 {
3232 public:
3233  VmaBlockVector m_BlockVector;
3234 
3235  // Takes ownership.
3236  VmaPool_T(
3237  VmaAllocator hAllocator,
3238  const VmaPoolCreateInfo& createInfo);
3239  ~VmaPool_T();
3240 
3241  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
3242 
3243 #if VMA_STATS_STRING_ENABLED
3244  //void PrintDetailedMap(class VmaStringBuilder& sb);
3245 #endif
3246 };
3247 
3248 class VmaDefragmentator
3249 {
3250  const VmaAllocator m_hAllocator;
3251  VmaBlockVector* const m_pBlockVector;
3252  uint32_t m_CurrentFrameIndex;
3253  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
3254  VkDeviceSize m_BytesMoved;
3255  uint32_t m_AllocationsMoved;
3256 
3257  struct AllocationInfo
3258  {
3259  VmaAllocation m_hAllocation;
3260  VkBool32* m_pChanged;
3261 
3262  AllocationInfo() :
3263  m_hAllocation(VK_NULL_HANDLE),
3264  m_pChanged(VMA_NULL)
3265  {
3266  }
3267  };
3268 
3269  struct AllocationInfoSizeGreater
3270  {
3271  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
3272  {
3273  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
3274  }
3275  };
3276 
3277  // Used between AddAllocation and Defragment.
3278  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3279 
3280  struct BlockInfo
3281  {
3282  VmaDeviceMemoryBlock* m_pBlock;
3283  bool m_HasNonMovableAllocations;
3284  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3285 
3286  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
3287  m_pBlock(VMA_NULL),
3288  m_HasNonMovableAllocations(true),
3289  m_Allocations(pAllocationCallbacks),
3290  m_pMappedDataForDefragmentation(VMA_NULL)
3291  {
3292  }
3293 
3294  void CalcHasNonMovableAllocations()
3295  {
3296  const size_t blockAllocCount =
3297  m_pBlock->m_Suballocations.size() - m_pBlock->m_FreeCount;
3298  const size_t defragmentAllocCount = m_Allocations.size();
3299  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
3300  }
3301 
3302  void SortAllocationsBySizeDescecnding()
3303  {
3304  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
3305  }
3306 
3307  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
3308  void Unmap(VmaAllocator hAllocator);
3309 
3310  private:
3311  // Not null if mapped for defragmentation only, not persistently mapped.
3312  void* m_pMappedDataForDefragmentation;
3313  };
3314 
3315  struct BlockPointerLess
3316  {
3317  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
3318  {
3319  return pLhsBlockInfo->m_pBlock < pRhsBlock;
3320  }
3321  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3322  {
3323  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
3324  }
3325  };
3326 
3327  // 1. Blocks with some non-movable allocations go first.
3328  // 2. Blocks with smaller sumFreeSize go first.
3329  struct BlockInfoCompareMoveDestination
3330  {
3331  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3332  {
3333  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
3334  {
3335  return true;
3336  }
3337  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
3338  {
3339  return false;
3340  }
3341  if(pLhsBlockInfo->m_pBlock->m_SumFreeSize < pRhsBlockInfo->m_pBlock->m_SumFreeSize)
3342  {
3343  return true;
3344  }
3345  return false;
3346  }
3347  };
3348 
3349  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
3350  BlockInfoVector m_Blocks;
3351 
3352  VkResult DefragmentRound(
3353  VkDeviceSize maxBytesToMove,
3354  uint32_t maxAllocationsToMove);
3355 
3356  static bool MoveMakesSense(
3357  size_t dstBlockIndex, VkDeviceSize dstOffset,
3358  size_t srcBlockIndex, VkDeviceSize srcOffset);
3359 
3360 public:
3361  VmaDefragmentator(
3362  VmaAllocator hAllocator,
3363  VmaBlockVector* pBlockVector,
3364  uint32_t currentFrameIndex);
3365 
3366  ~VmaDefragmentator();
3367 
3368  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
3369  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
3370 
3371  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
3372 
3373  VkResult Defragment(
3374  VkDeviceSize maxBytesToMove,
3375  uint32_t maxAllocationsToMove);
3376 };
3377 
3378 // Main allocator object.
3379 struct VmaAllocator_T
3380 {
3381  bool m_UseMutex;
3382  VkDevice m_hDevice;
3383  bool m_AllocationCallbacksSpecified;
3384  VkAllocationCallbacks m_AllocationCallbacks;
3385  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
3386  // Non-zero when we are inside UnmapPersistentlyMappedMemory...MapPersistentlyMappedMemory.
3387  // Counter to allow nested calls to these functions.
3388  uint32_t m_UnmapPersistentlyMappedMemoryCounter;
3389 
3390  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
3391  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
3392  VMA_MUTEX m_HeapSizeLimitMutex;
3393 
3394  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
3395  VkPhysicalDeviceMemoryProperties m_MemProps;
3396 
3397  // Default pools.
3398  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
3399 
3400  // Each vector is sorted by memory (handle value).
3401  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
3402  AllocationVectorType* m_pOwnAllocations[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
3403  VMA_MUTEX m_OwnAllocationsMutex[VK_MAX_MEMORY_TYPES];
3404 
3405  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
3406  ~VmaAllocator_T();
3407 
3408  const VkAllocationCallbacks* GetAllocationCallbacks() const
3409  {
3410  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
3411  }
3412  const VmaVulkanFunctions& GetVulkanFunctions() const
3413  {
3414  return m_VulkanFunctions;
3415  }
3416 
3417  VkDeviceSize GetBufferImageGranularity() const
3418  {
3419  return VMA_MAX(
3420  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
3421  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
3422  }
3423 
3424  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
3425  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
3426 
3427  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
3428  {
3429  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
3430  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
3431  }
3432 
3433  // Main allocation function.
3434  VkResult AllocateMemory(
3435  const VkMemoryRequirements& vkMemReq,
3436  const VmaAllocationCreateInfo& createInfo,
3437  VmaSuballocationType suballocType,
3438  VmaAllocation* pAllocation);
3439 
3440  // Main deallocation function.
3441  void FreeMemory(const VmaAllocation allocation);
3442 
3443  void CalculateStats(VmaStats* pStats);
3444 
3445 #if VMA_STATS_STRING_ENABLED
3446  void PrintDetailedMap(class VmaJsonWriter& json);
3447 #endif
3448 
3449  void UnmapPersistentlyMappedMemory();
3450  VkResult MapPersistentlyMappedMemory();
3451 
3452  VkResult Defragment(
3453  VmaAllocation* pAllocations,
3454  size_t allocationCount,
3455  VkBool32* pAllocationsChanged,
3456  const VmaDefragmentationInfo* pDefragmentationInfo,
3457  VmaDefragmentationStats* pDefragmentationStats);
3458 
3459  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
3460 
3461  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
3462  void DestroyPool(VmaPool pool);
3463  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
3464 
3465  void SetCurrentFrameIndex(uint32_t frameIndex);
3466 
3467  void MakePoolAllocationsLost(
3468  VmaPool hPool,
3469  size_t* pLostAllocationCount);
3470 
3471  void CreateLostAllocation(VmaAllocation* pAllocation);
3472 
3473  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
3474  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
3475 
3476 private:
3477  VkDeviceSize m_PreferredLargeHeapBlockSize;
3478  VkDeviceSize m_PreferredSmallHeapBlockSize;
3479 
3480  VkPhysicalDevice m_PhysicalDevice;
3481  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
3482 
3483  VMA_MUTEX m_PoolsMutex;
3484  // Protected by m_PoolsMutex. Sorted by pointer value.
3485  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
3486 
3487  VmaVulkanFunctions m_VulkanFunctions;
3488 
3489  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
3490 
3491  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
3492 
3493  VkResult AllocateMemoryOfType(
3494  const VkMemoryRequirements& vkMemReq,
3495  const VmaAllocationCreateInfo& createInfo,
3496  uint32_t memTypeIndex,
3497  VmaSuballocationType suballocType,
3498  VmaAllocation* pAllocation);
3499 
3500  // Allocates and registers new VkDeviceMemory specifically for single allocation.
3501  VkResult AllocateOwnMemory(
3502  VkDeviceSize size,
3503  VmaSuballocationType suballocType,
3504  uint32_t memTypeIndex,
3505  bool map,
3506  void* pUserData,
3507  VmaAllocation* pAllocation);
3508 
3509  // Tries to free pMemory as Own Memory. Returns true if found and freed.
3510  void FreeOwnMemory(VmaAllocation allocation);
3511 };
3512 
3514 // Memory allocation #2 after VmaAllocator_T definition
3515 
3516 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
3517 {
3518  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
3519 }
3520 
3521 static void VmaFree(VmaAllocator hAllocator, void* ptr)
3522 {
3523  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
3524 }
3525 
3526 template<typename T>
3527 static T* VmaAllocate(VmaAllocator hAllocator)
3528 {
3529  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
3530 }
3531 
3532 template<typename T>
3533 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
3534 {
3535  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
3536 }
3537 
3538 template<typename T>
3539 static void vma_delete(VmaAllocator hAllocator, T* ptr)
3540 {
3541  if(ptr != VMA_NULL)
3542  {
3543  ptr->~T();
3544  VmaFree(hAllocator, ptr);
3545  }
3546 }
3547 
3548 template<typename T>
3549 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
3550 {
3551  if(ptr != VMA_NULL)
3552  {
3553  for(size_t i = count; i--; )
3554  ptr[i].~T();
3555  VmaFree(hAllocator, ptr);
3556  }
3557 }
3558 
3560 // VmaStringBuilder
3561 
3562 #if VMA_STATS_STRING_ENABLED
3563 
3564 class VmaStringBuilder
3565 {
3566 public:
3567  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
3568  size_t GetLength() const { return m_Data.size(); }
3569  const char* GetData() const { return m_Data.data(); }
3570 
3571  void Add(char ch) { m_Data.push_back(ch); }
3572  void Add(const char* pStr);
3573  void AddNewLine() { Add('\n'); }
3574  void AddNumber(uint32_t num);
3575  void AddNumber(uint64_t num);
3576  void AddPointer(const void* ptr);
3577 
3578 private:
3579  VmaVector< char, VmaStlAllocator<char> > m_Data;
3580 };
3581 
3582 void VmaStringBuilder::Add(const char* pStr)
3583 {
3584  const size_t strLen = strlen(pStr);
3585  if(strLen > 0)
3586  {
3587  const size_t oldCount = m_Data.size();
3588  m_Data.resize(oldCount + strLen);
3589  memcpy(m_Data.data() + oldCount, pStr, strLen);
3590  }
3591 }
3592 
3593 void VmaStringBuilder::AddNumber(uint32_t num)
3594 {
3595  char buf[11];
3596  VmaUint32ToStr(buf, sizeof(buf), num);
3597  Add(buf);
3598 }
3599 
3600 void VmaStringBuilder::AddNumber(uint64_t num)
3601 {
3602  char buf[21];
3603  VmaUint64ToStr(buf, sizeof(buf), num);
3604  Add(buf);
3605 }
3606 
3607 void VmaStringBuilder::AddPointer(const void* ptr)
3608 {
3609  char buf[21];
3610  VmaPtrToStr(buf, sizeof(buf), ptr);
3611  Add(buf);
3612 }
3613 
3614 #endif // #if VMA_STATS_STRING_ENABLED
3615 
3617 // VmaJsonWriter
3618 
3619 #if VMA_STATS_STRING_ENABLED
3620 
3621 class VmaJsonWriter
3622 {
3623 public:
3624  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
3625  ~VmaJsonWriter();
3626 
3627  void BeginObject(bool singleLine = false);
3628  void EndObject();
3629 
3630  void BeginArray(bool singleLine = false);
3631  void EndArray();
3632 
3633  void WriteString(const char* pStr);
3634  void BeginString(const char* pStr = VMA_NULL);
3635  void ContinueString(const char* pStr);
3636  void ContinueString(uint32_t n);
3637  void ContinueString(uint64_t n);
3638  void EndString(const char* pStr = VMA_NULL);
3639 
3640  void WriteNumber(uint32_t n);
3641  void WriteNumber(uint64_t n);
3642  void WriteBool(bool b);
3643  void WriteNull();
3644 
3645 private:
3646  static const char* const INDENT;
3647 
3648  enum COLLECTION_TYPE
3649  {
3650  COLLECTION_TYPE_OBJECT,
3651  COLLECTION_TYPE_ARRAY,
3652  };
3653  struct StackItem
3654  {
3655  COLLECTION_TYPE type;
3656  uint32_t valueCount;
3657  bool singleLineMode;
3658  };
3659 
3660  VmaStringBuilder& m_SB;
3661  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
3662  bool m_InsideString;
3663 
3664  void BeginValue(bool isString);
3665  void WriteIndent(bool oneLess = false);
3666 };
3667 
3668 const char* const VmaJsonWriter::INDENT = " ";
3669 
3670 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
3671  m_SB(sb),
3672  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
3673  m_InsideString(false)
3674 {
3675 }
3676 
3677 VmaJsonWriter::~VmaJsonWriter()
3678 {
3679  VMA_ASSERT(!m_InsideString);
3680  VMA_ASSERT(m_Stack.empty());
3681 }
3682 
3683 void VmaJsonWriter::BeginObject(bool singleLine)
3684 {
3685  VMA_ASSERT(!m_InsideString);
3686 
3687  BeginValue(false);
3688  m_SB.Add('{');
3689 
3690  StackItem item;
3691  item.type = COLLECTION_TYPE_OBJECT;
3692  item.valueCount = 0;
3693  item.singleLineMode = singleLine;
3694  m_Stack.push_back(item);
3695 }
3696 
3697 void VmaJsonWriter::EndObject()
3698 {
3699  VMA_ASSERT(!m_InsideString);
3700 
3701  WriteIndent(true);
3702  m_SB.Add('}');
3703 
3704  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
3705  m_Stack.pop_back();
3706 }
3707 
3708 void VmaJsonWriter::BeginArray(bool singleLine)
3709 {
3710  VMA_ASSERT(!m_InsideString);
3711 
3712  BeginValue(false);
3713  m_SB.Add('[');
3714 
3715  StackItem item;
3716  item.type = COLLECTION_TYPE_ARRAY;
3717  item.valueCount = 0;
3718  item.singleLineMode = singleLine;
3719  m_Stack.push_back(item);
3720 }
3721 
3722 void VmaJsonWriter::EndArray()
3723 {
3724  VMA_ASSERT(!m_InsideString);
3725 
3726  WriteIndent(true);
3727  m_SB.Add(']');
3728 
3729  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
3730  m_Stack.pop_back();
3731 }
3732 
3733 void VmaJsonWriter::WriteString(const char* pStr)
3734 {
3735  BeginString(pStr);
3736  EndString();
3737 }
3738 
3739 void VmaJsonWriter::BeginString(const char* pStr)
3740 {
3741  VMA_ASSERT(!m_InsideString);
3742 
3743  BeginValue(true);
3744  m_SB.Add('"');
3745  m_InsideString = true;
3746  if(pStr != VMA_NULL && pStr[0] != '\0')
3747  {
3748  ContinueString(pStr);
3749  }
3750 }
3751 
3752 void VmaJsonWriter::ContinueString(const char* pStr)
3753 {
3754  VMA_ASSERT(m_InsideString);
3755 
3756  const size_t strLen = strlen(pStr);
3757  for(size_t i = 0; i < strLen; ++i)
3758  {
3759  char ch = pStr[i];
3760  if(ch == '\'')
3761  {
3762  m_SB.Add("\\\\");
3763  }
3764  else if(ch == '"')
3765  {
3766  m_SB.Add("\\\"");
3767  }
3768  else if(ch >= 32)
3769  {
3770  m_SB.Add(ch);
3771  }
3772  else switch(ch)
3773  {
3774  case '\n':
3775  m_SB.Add("\\n");
3776  break;
3777  case '\r':
3778  m_SB.Add("\\r");
3779  break;
3780  case '\t':
3781  m_SB.Add("\\t");
3782  break;
3783  default:
3784  VMA_ASSERT(0 && "Character not currently supported.");
3785  break;
3786  }
3787  }
3788 }
3789 
3790 void VmaJsonWriter::ContinueString(uint32_t n)
3791 {
3792  VMA_ASSERT(m_InsideString);
3793  m_SB.AddNumber(n);
3794 }
3795 
3796 void VmaJsonWriter::ContinueString(uint64_t n)
3797 {
3798  VMA_ASSERT(m_InsideString);
3799  m_SB.AddNumber(n);
3800 }
3801 
3802 void VmaJsonWriter::EndString(const char* pStr)
3803 {
3804  VMA_ASSERT(m_InsideString);
3805  if(pStr != VMA_NULL && pStr[0] != '\0')
3806  {
3807  ContinueString(pStr);
3808  }
3809  m_SB.Add('"');
3810  m_InsideString = false;
3811 }
3812 
3813 void VmaJsonWriter::WriteNumber(uint32_t n)
3814 {
3815  VMA_ASSERT(!m_InsideString);
3816  BeginValue(false);
3817  m_SB.AddNumber(n);
3818 }
3819 
3820 void VmaJsonWriter::WriteNumber(uint64_t n)
3821 {
3822  VMA_ASSERT(!m_InsideString);
3823  BeginValue(false);
3824  m_SB.AddNumber(n);
3825 }
3826 
3827 void VmaJsonWriter::WriteBool(bool b)
3828 {
3829  VMA_ASSERT(!m_InsideString);
3830  BeginValue(false);
3831  m_SB.Add(b ? "true" : "false");
3832 }
3833 
3834 void VmaJsonWriter::WriteNull()
3835 {
3836  VMA_ASSERT(!m_InsideString);
3837  BeginValue(false);
3838  m_SB.Add("null");
3839 }
3840 
3841 void VmaJsonWriter::BeginValue(bool isString)
3842 {
3843  if(!m_Stack.empty())
3844  {
3845  StackItem& currItem = m_Stack.back();
3846  if(currItem.type == COLLECTION_TYPE_OBJECT &&
3847  currItem.valueCount % 2 == 0)
3848  {
3849  VMA_ASSERT(isString);
3850  }
3851 
3852  if(currItem.type == COLLECTION_TYPE_OBJECT &&
3853  currItem.valueCount % 2 != 0)
3854  {
3855  m_SB.Add(": ");
3856  }
3857  else if(currItem.valueCount > 0)
3858  {
3859  m_SB.Add(", ");
3860  WriteIndent();
3861  }
3862  else
3863  {
3864  WriteIndent();
3865  }
3866  ++currItem.valueCount;
3867  }
3868 }
3869 
3870 void VmaJsonWriter::WriteIndent(bool oneLess)
3871 {
3872  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
3873  {
3874  m_SB.AddNewLine();
3875 
3876  size_t count = m_Stack.size();
3877  if(count > 0 && oneLess)
3878  {
3879  --count;
3880  }
3881  for(size_t i = 0; i < count; ++i)
3882  {
3883  m_SB.Add(INDENT);
3884  }
3885  }
3886 }
3887 
3888 #endif // #if VMA_STATS_STRING_ENABLED
3889 
3891 
3892 VkDeviceSize VmaAllocation_T::GetOffset() const
3893 {
3894  switch(m_Type)
3895  {
3896  case ALLOCATION_TYPE_BLOCK:
3897  return m_BlockAllocation.m_Offset;
3898  case ALLOCATION_TYPE_OWN:
3899  return 0;
3900  default:
3901  VMA_ASSERT(0);
3902  return 0;
3903  }
3904 }
3905 
3906 VkDeviceMemory VmaAllocation_T::GetMemory() const
3907 {
3908  switch(m_Type)
3909  {
3910  case ALLOCATION_TYPE_BLOCK:
3911  return m_BlockAllocation.m_Block->m_hMemory;
3912  case ALLOCATION_TYPE_OWN:
3913  return m_OwnAllocation.m_hMemory;
3914  default:
3915  VMA_ASSERT(0);
3916  return VK_NULL_HANDLE;
3917  }
3918 }
3919 
3920 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
3921 {
3922  switch(m_Type)
3923  {
3924  case ALLOCATION_TYPE_BLOCK:
3925  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
3926  case ALLOCATION_TYPE_OWN:
3927  return m_OwnAllocation.m_MemoryTypeIndex;
3928  default:
3929  VMA_ASSERT(0);
3930  return UINT32_MAX;
3931  }
3932 }
3933 
3934 VMA_BLOCK_VECTOR_TYPE VmaAllocation_T::GetBlockVectorType() const
3935 {
3936  switch(m_Type)
3937  {
3938  case ALLOCATION_TYPE_BLOCK:
3939  return m_BlockAllocation.m_Block->m_BlockVectorType;
3940  case ALLOCATION_TYPE_OWN:
3941  return (m_OwnAllocation.m_PersistentMap ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED);
3942  default:
3943  VMA_ASSERT(0);
3944  return VMA_BLOCK_VECTOR_TYPE_COUNT;
3945  }
3946 }
3947 
3948 void* VmaAllocation_T::GetMappedData() const
3949 {
3950  switch(m_Type)
3951  {
3952  case ALLOCATION_TYPE_BLOCK:
3953  if(m_BlockAllocation.m_Block->m_pMappedData != VMA_NULL)
3954  {
3955  return (char*)m_BlockAllocation.m_Block->m_pMappedData + m_BlockAllocation.m_Offset;
3956  }
3957  else
3958  {
3959  return VMA_NULL;
3960  }
3961  break;
3962  case ALLOCATION_TYPE_OWN:
3963  return m_OwnAllocation.m_pMappedData;
3964  default:
3965  VMA_ASSERT(0);
3966  return VMA_NULL;
3967  }
3968 }
3969 
3970 bool VmaAllocation_T::CanBecomeLost() const
3971 {
3972  switch(m_Type)
3973  {
3974  case ALLOCATION_TYPE_BLOCK:
3975  return m_BlockAllocation.m_CanBecomeLost;
3976  case ALLOCATION_TYPE_OWN:
3977  return false;
3978  default:
3979  VMA_ASSERT(0);
3980  return false;
3981  }
3982 }
3983 
3984 VmaPool VmaAllocation_T::GetPool() const
3985 {
3986  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
3987  return m_BlockAllocation.m_hPool;
3988 }
3989 
3990 VkResult VmaAllocation_T::OwnAllocMapPersistentlyMappedMemory(VmaAllocator hAllocator)
3991 {
3992  VMA_ASSERT(m_Type == ALLOCATION_TYPE_OWN);
3993  if(m_OwnAllocation.m_PersistentMap)
3994  {
3995  return (*hAllocator->GetVulkanFunctions().vkMapMemory)(
3996  hAllocator->m_hDevice,
3997  m_OwnAllocation.m_hMemory,
3998  0,
3999  VK_WHOLE_SIZE,
4000  0,
4001  &m_OwnAllocation.m_pMappedData);
4002  }
4003  return VK_SUCCESS;
4004 }
4005 void VmaAllocation_T::OwnAllocUnmapPersistentlyMappedMemory(VmaAllocator hAllocator)
4006 {
4007  VMA_ASSERT(m_Type == ALLOCATION_TYPE_OWN);
4008  if(m_OwnAllocation.m_pMappedData)
4009  {
4010  VMA_ASSERT(m_OwnAllocation.m_PersistentMap);
4011  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_OwnAllocation.m_hMemory);
4012  m_OwnAllocation.m_pMappedData = VMA_NULL;
4013  }
4014 }
4015 
4016 
4017 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4018 {
4019  VMA_ASSERT(CanBecomeLost());
4020 
4021  /*
4022  Warning: This is a carefully designed algorithm.
4023  Do not modify unless you really know what you're doing :)
4024  */
4025  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
4026  for(;;)
4027  {
4028  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
4029  {
4030  VMA_ASSERT(0);
4031  return false;
4032  }
4033  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
4034  {
4035  return false;
4036  }
4037  else // Last use time earlier than current time.
4038  {
4039  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
4040  {
4041  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
4042  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
4043  return true;
4044  }
4045  }
4046  }
4047 }
4048 
4049 #if VMA_STATS_STRING_ENABLED
4050 
4051 // Correspond to values of enum VmaSuballocationType.
4052 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
4053  "FREE",
4054  "UNKNOWN",
4055  "BUFFER",
4056  "IMAGE_UNKNOWN",
4057  "IMAGE_LINEAR",
4058  "IMAGE_OPTIMAL",
4059 };
4060 
4061 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
4062 {
4063  json.BeginObject();
4064 
4065  json.WriteString("Blocks");
4066  json.WriteNumber(stat.BlockCount);
4067 
4068  json.WriteString("Allocations");
4069  json.WriteNumber(stat.AllocationCount);
4070 
4071  json.WriteString("UnusedRanges");
4072  json.WriteNumber(stat.UnusedRangeCount);
4073 
4074  json.WriteString("UsedBytes");
4075  json.WriteNumber(stat.UsedBytes);
4076 
4077  json.WriteString("UnusedBytes");
4078  json.WriteNumber(stat.UnusedBytes);
4079 
4080  if(stat.AllocationCount > 1)
4081  {
4082  json.WriteString("AllocationSize");
4083  json.BeginObject(true);
4084  json.WriteString("Min");
4085  json.WriteNumber(stat.AllocationSizeMin);
4086  json.WriteString("Avg");
4087  json.WriteNumber(stat.AllocationSizeAvg);
4088  json.WriteString("Max");
4089  json.WriteNumber(stat.AllocationSizeMax);
4090  json.EndObject();
4091  }
4092 
4093  if(stat.UnusedRangeCount > 1)
4094  {
4095  json.WriteString("UnusedRangeSize");
4096  json.BeginObject(true);
4097  json.WriteString("Min");
4098  json.WriteNumber(stat.UnusedRangeSizeMin);
4099  json.WriteString("Avg");
4100  json.WriteNumber(stat.UnusedRangeSizeAvg);
4101  json.WriteString("Max");
4102  json.WriteNumber(stat.UnusedRangeSizeMax);
4103  json.EndObject();
4104  }
4105 
4106  json.EndObject();
4107 }
4108 
4109 #endif // #if VMA_STATS_STRING_ENABLED
4110 
4111 struct VmaSuballocationItemSizeLess
4112 {
4113  bool operator()(
4114  const VmaSuballocationList::iterator lhs,
4115  const VmaSuballocationList::iterator rhs) const
4116  {
4117  return lhs->size < rhs->size;
4118  }
4119  bool operator()(
4120  const VmaSuballocationList::iterator lhs,
4121  VkDeviceSize rhsSize) const
4122  {
4123  return lhs->size < rhsSize;
4124  }
4125 };
4126 
4127 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
4128  m_MemoryTypeIndex(UINT32_MAX),
4129  m_BlockVectorType(VMA_BLOCK_VECTOR_TYPE_COUNT),
4130  m_hMemory(VK_NULL_HANDLE),
4131  m_Size(0),
4132  m_PersistentMap(false),
4133  m_pMappedData(VMA_NULL),
4134  m_FreeCount(0),
4135  m_SumFreeSize(0),
4136  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
4137  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
4138 {
4139 }
4140 
4141 void VmaDeviceMemoryBlock::Init(
4142  uint32_t newMemoryTypeIndex,
4143  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
4144  VkDeviceMemory newMemory,
4145  VkDeviceSize newSize,
4146  bool persistentMap,
4147  void* pMappedData)
4148 {
4149  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
4150 
4151  m_MemoryTypeIndex = newMemoryTypeIndex;
4152  m_BlockVectorType = newBlockVectorType;
4153  m_hMemory = newMemory;
4154  m_Size = newSize;
4155  m_PersistentMap = persistentMap;
4156  m_pMappedData = pMappedData;
4157  m_FreeCount = 1;
4158  m_SumFreeSize = newSize;
4159 
4160  m_Suballocations.clear();
4161  m_FreeSuballocationsBySize.clear();
4162 
4163  VmaSuballocation suballoc = {};
4164  suballoc.offset = 0;
4165  suballoc.size = newSize;
4166  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4167  suballoc.hAllocation = VK_NULL_HANDLE;
4168 
4169  m_Suballocations.push_back(suballoc);
4170  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
4171  --suballocItem;
4172  m_FreeSuballocationsBySize.push_back(suballocItem);
4173 }
4174 
4175 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
4176 {
4177  // This is the most important assert in the entire library.
4178  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
4179  VMA_ASSERT(IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
4180 
4181  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
4182  if(m_pMappedData != VMA_NULL)
4183  {
4184  (allocator->GetVulkanFunctions().vkUnmapMemory)(allocator->m_hDevice, m_hMemory);
4185  m_pMappedData = VMA_NULL;
4186  }
4187 
4188  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Size, m_hMemory);
4189  m_hMemory = VK_NULL_HANDLE;
4190 }
4191 
4192 bool VmaDeviceMemoryBlock::Validate() const
4193 {
4194  if((m_hMemory == VK_NULL_HANDLE) ||
4195  (m_Size == 0) ||
4196  m_Suballocations.empty())
4197  {
4198  return false;
4199  }
4200 
4201  // Expected offset of new suballocation as calculates from previous ones.
4202  VkDeviceSize calculatedOffset = 0;
4203  // Expected number of free suballocations as calculated from traversing their list.
4204  uint32_t calculatedFreeCount = 0;
4205  // Expected sum size of free suballocations as calculated from traversing their list.
4206  VkDeviceSize calculatedSumFreeSize = 0;
4207  // Expected number of free suballocations that should be registered in
4208  // m_FreeSuballocationsBySize calculated from traversing their list.
4209  size_t freeSuballocationsToRegister = 0;
4210  // True if previous visisted suballocation was free.
4211  bool prevFree = false;
4212 
4213  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4214  suballocItem != m_Suballocations.cend();
4215  ++suballocItem)
4216  {
4217  const VmaSuballocation& subAlloc = *suballocItem;
4218 
4219  // Actual offset of this suballocation doesn't match expected one.
4220  if(subAlloc.offset != calculatedOffset)
4221  {
4222  return false;
4223  }
4224 
4225  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
4226  // Two adjacent free suballocations are invalid. They should be merged.
4227  if(prevFree && currFree)
4228  {
4229  return false;
4230  }
4231  prevFree = currFree;
4232 
4233  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
4234  {
4235  return false;
4236  }
4237 
4238  if(currFree)
4239  {
4240  calculatedSumFreeSize += subAlloc.size;
4241  ++calculatedFreeCount;
4242  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
4243  {
4244  ++freeSuballocationsToRegister;
4245  }
4246  }
4247 
4248  calculatedOffset += subAlloc.size;
4249  }
4250 
4251  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
4252  // match expected one.
4253  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
4254  {
4255  return false;
4256  }
4257 
4258  VkDeviceSize lastSize = 0;
4259  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
4260  {
4261  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
4262 
4263  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
4264  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
4265  {
4266  return false;
4267  }
4268  // They must be sorted by size ascending.
4269  if(suballocItem->size < lastSize)
4270  {
4271  return false;
4272  }
4273 
4274  lastSize = suballocItem->size;
4275  }
4276 
4277  // Check if totals match calculacted values.
4278  return
4279  (calculatedOffset == m_Size) &&
4280  (calculatedSumFreeSize == m_SumFreeSize) &&
4281  (calculatedFreeCount == m_FreeCount);
4282 }
4283 
4284 /*
4285 How many suitable free suballocations to analyze before choosing best one.
4286 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
4287  be chosen.
4288 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
4289  suballocations will be analized and best one will be chosen.
4290 - Any other value is also acceptable.
4291 */
4292 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
4293 
4294 bool VmaDeviceMemoryBlock::CreateAllocationRequest(
4295  uint32_t currentFrameIndex,
4296  uint32_t frameInUseCount,
4297  VkDeviceSize bufferImageGranularity,
4298  VkDeviceSize allocSize,
4299  VkDeviceSize allocAlignment,
4300  VmaSuballocationType allocType,
4301  bool canMakeOtherLost,
4302  VmaAllocationRequest* pAllocationRequest)
4303 {
4304  VMA_ASSERT(allocSize > 0);
4305  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
4306  VMA_ASSERT(pAllocationRequest != VMA_NULL);
4307  VMA_HEAVY_ASSERT(Validate());
4308 
4309  // There is not enough total free space in this block to fullfill the request: Early return.
4310  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
4311  {
4312  return false;
4313  }
4314 
4315  // New algorithm, efficiently searching freeSuballocationsBySize.
4316  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
4317  if(freeSuballocCount > 0)
4318  {
4319  if(VMA_BEST_FIT)
4320  {
4321  // Find first free suballocation with size not less than allocSize.
4322  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
4323  m_FreeSuballocationsBySize.data(),
4324  m_FreeSuballocationsBySize.data() + freeSuballocCount,
4325  allocSize,
4326  VmaSuballocationItemSizeLess());
4327  size_t index = it - m_FreeSuballocationsBySize.data();
4328  for(; index < freeSuballocCount; ++index)
4329  {
4330  if(CheckAllocation(
4331  currentFrameIndex,
4332  frameInUseCount,
4333  bufferImageGranularity,
4334  allocSize,
4335  allocAlignment,
4336  allocType,
4337  m_FreeSuballocationsBySize[index],
4338  false, // canMakeOtherLost
4339  &pAllocationRequest->offset,
4340  &pAllocationRequest->itemsToMakeLostCount,
4341  &pAllocationRequest->sumFreeSize,
4342  &pAllocationRequest->sumItemSize))
4343  {
4344  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
4345  return true;
4346  }
4347  }
4348  }
4349  else
4350  {
4351  // Search staring from biggest suballocations.
4352  for(size_t index = freeSuballocCount; index--; )
4353  {
4354  if(CheckAllocation(
4355  currentFrameIndex,
4356  frameInUseCount,
4357  bufferImageGranularity,
4358  allocSize,
4359  allocAlignment,
4360  allocType,
4361  m_FreeSuballocationsBySize[index],
4362  false, // canMakeOtherLost
4363  &pAllocationRequest->offset,
4364  &pAllocationRequest->itemsToMakeLostCount,
4365  &pAllocationRequest->sumFreeSize,
4366  &pAllocationRequest->sumItemSize))
4367  {
4368  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
4369  return true;
4370  }
4371  }
4372  }
4373  }
4374 
4375  if(canMakeOtherLost)
4376  {
4377  // Brute-force algorithm. TODO: Come up with something better.
4378 
4379  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
4380  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
4381 
4382  VmaAllocationRequest tmpAllocRequest = {};
4383  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
4384  suballocIt != m_Suballocations.end();
4385  ++suballocIt)
4386  {
4387  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
4388  suballocIt->hAllocation->CanBecomeLost())
4389  {
4390  if(CheckAllocation(
4391  currentFrameIndex,
4392  frameInUseCount,
4393  bufferImageGranularity,
4394  allocSize,
4395  allocAlignment,
4396  allocType,
4397  suballocIt,
4398  canMakeOtherLost,
4399  &tmpAllocRequest.offset,
4400  &tmpAllocRequest.itemsToMakeLostCount,
4401  &tmpAllocRequest.sumFreeSize,
4402  &tmpAllocRequest.sumItemSize))
4403  {
4404  tmpAllocRequest.item = suballocIt;
4405 
4406  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
4407  {
4408  *pAllocationRequest = tmpAllocRequest;
4409  }
4410  }
4411  }
4412  }
4413 
4414  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
4415  {
4416  return true;
4417  }
4418  }
4419 
4420  return false;
4421 }
4422 
4423 bool VmaDeviceMemoryBlock::MakeRequestedAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount, VmaAllocationRequest* pAllocationRequest)
4424 {
4425  while(pAllocationRequest->itemsToMakeLostCount > 0)
4426  {
4427  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
4428  {
4429  ++pAllocationRequest->item;
4430  }
4431  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
4432  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
4433  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
4434  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
4435  {
4436  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
4437  --pAllocationRequest->itemsToMakeLostCount;
4438  }
4439  else
4440  {
4441  return false;
4442  }
4443  }
4444 
4445  VMA_HEAVY_ASSERT(Validate());
4446  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
4447  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
4448 
4449  return true;
4450 }
4451 
4452 uint32_t VmaDeviceMemoryBlock::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4453 {
4454  uint32_t lostAllocationCount = 0;
4455  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
4456  it != m_Suballocations.end();
4457  ++it)
4458  {
4459  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
4460  it->hAllocation->CanBecomeLost() &&
4461  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
4462  {
4463  it = FreeSuballocation(it);
4464  ++lostAllocationCount;
4465  }
4466  }
4467  return lostAllocationCount;
4468 }
4469 
4470 bool VmaDeviceMemoryBlock::CheckAllocation(
4471  uint32_t currentFrameIndex,
4472  uint32_t frameInUseCount,
4473  VkDeviceSize bufferImageGranularity,
4474  VkDeviceSize allocSize,
4475  VkDeviceSize allocAlignment,
4476  VmaSuballocationType allocType,
4477  VmaSuballocationList::const_iterator suballocItem,
4478  bool canMakeOtherLost,
4479  VkDeviceSize* pOffset,
4480  size_t* itemsToMakeLostCount,
4481  VkDeviceSize* pSumFreeSize,
4482  VkDeviceSize* pSumItemSize) const
4483 {
4484  VMA_ASSERT(allocSize > 0);
4485  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
4486  VMA_ASSERT(suballocItem != m_Suballocations.cend());
4487  VMA_ASSERT(pOffset != VMA_NULL);
4488 
4489  *itemsToMakeLostCount = 0;
4490  *pSumFreeSize = 0;
4491  *pSumItemSize = 0;
4492 
4493  if(canMakeOtherLost)
4494  {
4495  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
4496  {
4497  *pSumFreeSize = suballocItem->size;
4498  }
4499  else
4500  {
4501  if(suballocItem->hAllocation->CanBecomeLost() &&
4502  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
4503  {
4504  ++*itemsToMakeLostCount;
4505  *pSumItemSize = suballocItem->size;
4506  }
4507  else
4508  {
4509  return false;
4510  }
4511  }
4512 
4513  // Remaining size is too small for this request: Early return.
4514  if(m_Size - suballocItem->offset < allocSize)
4515  {
4516  return false;
4517  }
4518 
4519  // Start from offset equal to beginning of this suballocation.
4520  *pOffset = suballocItem->offset;
4521 
4522  // Apply VMA_DEBUG_MARGIN at the beginning.
4523  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
4524  {
4525  *pOffset += VMA_DEBUG_MARGIN;
4526  }
4527 
4528  // Apply alignment.
4529  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
4530  *pOffset = VmaAlignUp(*pOffset, alignment);
4531 
4532  // Check previous suballocations for BufferImageGranularity conflicts.
4533  // Make bigger alignment if necessary.
4534  if(bufferImageGranularity > 1)
4535  {
4536  bool bufferImageGranularityConflict = false;
4537  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
4538  while(prevSuballocItem != m_Suballocations.cbegin())
4539  {
4540  --prevSuballocItem;
4541  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
4542  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
4543  {
4544  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
4545  {
4546  bufferImageGranularityConflict = true;
4547  break;
4548  }
4549  }
4550  else
4551  // Already on previous page.
4552  break;
4553  }
4554  if(bufferImageGranularityConflict)
4555  {
4556  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
4557  }
4558  }
4559 
4560  // Now that we have final *pOffset, check if we are past suballocItem.
4561  // If yes, return false - this function should be called for another suballocItem as starting point.
4562  if(*pOffset >= suballocItem->offset + suballocItem->size)
4563  {
4564  return false;
4565  }
4566 
4567  // Calculate padding at the beginning based on current offset.
4568  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
4569 
4570  // Calculate required margin at the end if this is not last suballocation.
4571  VmaSuballocationList::const_iterator next = suballocItem;
4572  ++next;
4573  const VkDeviceSize requiredEndMargin =
4574  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
4575 
4576  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
4577  // Another early return check.
4578  if(suballocItem->offset + totalSize > m_Size)
4579  {
4580  return false;
4581  }
4582 
4583  // Advance lastSuballocItem until desired size is reached.
4584  // Update itemsToMakeLostCount.
4585  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
4586  if(totalSize > suballocItem->size)
4587  {
4588  VkDeviceSize remainingSize = totalSize - suballocItem->size;
4589  while(remainingSize > 0)
4590  {
4591  ++lastSuballocItem;
4592  if(lastSuballocItem == m_Suballocations.cend())
4593  {
4594  return false;
4595  }
4596  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
4597  {
4598  *pSumFreeSize += lastSuballocItem->size;
4599  }
4600  else
4601  {
4602  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
4603  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
4604  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
4605  {
4606  ++*itemsToMakeLostCount;
4607  *pSumItemSize += lastSuballocItem->size;
4608  }
4609  else
4610  {
4611  return false;
4612  }
4613  }
4614  remainingSize = (lastSuballocItem->size < remainingSize) ?
4615  remainingSize - lastSuballocItem->size : 0;
4616  }
4617  }
4618 
4619  // Check next suballocations for BufferImageGranularity conflicts.
4620  // If conflict exists, we must mark more allocations lost or fail.
4621  if(bufferImageGranularity > 1)
4622  {
4623  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
4624  ++nextSuballocItem;
4625  while(nextSuballocItem != m_Suballocations.cend())
4626  {
4627  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
4628  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
4629  {
4630  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
4631  {
4632  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
4633  if(nextSuballoc.hAllocation->CanBecomeLost() &&
4634  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
4635  {
4636  ++*itemsToMakeLostCount;
4637  }
4638  else
4639  {
4640  return false;
4641  }
4642  }
4643  }
4644  else
4645  {
4646  // Already on next page.
4647  break;
4648  }
4649  ++nextSuballocItem;
4650  }
4651  }
4652  }
4653  else
4654  {
4655  const VmaSuballocation& suballoc = *suballocItem;
4656  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
4657 
4658  *pSumFreeSize = suballoc.size;
4659 
4660  // Size of this suballocation is too small for this request: Early return.
4661  if(suballoc.size < allocSize)
4662  {
4663  return false;
4664  }
4665 
4666  // Start from offset equal to beginning of this suballocation.
4667  *pOffset = suballoc.offset;
4668 
4669  // Apply VMA_DEBUG_MARGIN at the beginning.
4670  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
4671  {
4672  *pOffset += VMA_DEBUG_MARGIN;
4673  }
4674 
4675  // Apply alignment.
4676  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
4677  *pOffset = VmaAlignUp(*pOffset, alignment);
4678 
4679  // Check previous suballocations for BufferImageGranularity conflicts.
4680  // Make bigger alignment if necessary.
4681  if(bufferImageGranularity > 1)
4682  {
4683  bool bufferImageGranularityConflict = false;
4684  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
4685  while(prevSuballocItem != m_Suballocations.cbegin())
4686  {
4687  --prevSuballocItem;
4688  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
4689  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
4690  {
4691  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
4692  {
4693  bufferImageGranularityConflict = true;
4694  break;
4695  }
4696  }
4697  else
4698  // Already on previous page.
4699  break;
4700  }
4701  if(bufferImageGranularityConflict)
4702  {
4703  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
4704  }
4705  }
4706 
4707  // Calculate padding at the beginning based on current offset.
4708  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
4709 
4710  // Calculate required margin at the end if this is not last suballocation.
4711  VmaSuballocationList::const_iterator next = suballocItem;
4712  ++next;
4713  const VkDeviceSize requiredEndMargin =
4714  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
4715 
4716  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
4717  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
4718  {
4719  return false;
4720  }
4721 
4722  // Check next suballocations for BufferImageGranularity conflicts.
4723  // If conflict exists, allocation cannot be made here.
4724  if(bufferImageGranularity > 1)
4725  {
4726  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
4727  ++nextSuballocItem;
4728  while(nextSuballocItem != m_Suballocations.cend())
4729  {
4730  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
4731  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
4732  {
4733  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
4734  {
4735  return false;
4736  }
4737  }
4738  else
4739  {
4740  // Already on next page.
4741  break;
4742  }
4743  ++nextSuballocItem;
4744  }
4745  }
4746  }
4747 
4748  // All tests passed: Success. pOffset is already filled.
4749  return true;
4750 }
4751 
4752 bool VmaDeviceMemoryBlock::IsEmpty() const
4753 {
4754  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
4755 }
4756 
4757 void VmaDeviceMemoryBlock::Alloc(
4758  const VmaAllocationRequest& request,
4759  VmaSuballocationType type,
4760  VkDeviceSize allocSize,
4761  VmaAllocation hAllocation)
4762 {
4763  VMA_ASSERT(request.item != m_Suballocations.end());
4764  VmaSuballocation& suballoc = *request.item;
4765  // Given suballocation is a free block.
4766  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
4767  // Given offset is inside this suballocation.
4768  VMA_ASSERT(request.offset >= suballoc.offset);
4769  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
4770  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
4771  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
4772 
4773  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
4774  // it to become used.
4775  UnregisterFreeSuballocation(request.item);
4776 
4777  suballoc.offset = request.offset;
4778  suballoc.size = allocSize;
4779  suballoc.type = type;
4780  suballoc.hAllocation = hAllocation;
4781 
4782  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
4783  if(paddingEnd)
4784  {
4785  VmaSuballocation paddingSuballoc = {};
4786  paddingSuballoc.offset = request.offset + allocSize;
4787  paddingSuballoc.size = paddingEnd;
4788  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4789  VmaSuballocationList::iterator next = request.item;
4790  ++next;
4791  const VmaSuballocationList::iterator paddingEndItem =
4792  m_Suballocations.insert(next, paddingSuballoc);
4793  RegisterFreeSuballocation(paddingEndItem);
4794  }
4795 
4796  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
4797  if(paddingBegin)
4798  {
4799  VmaSuballocation paddingSuballoc = {};
4800  paddingSuballoc.offset = request.offset - paddingBegin;
4801  paddingSuballoc.size = paddingBegin;
4802  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4803  const VmaSuballocationList::iterator paddingBeginItem =
4804  m_Suballocations.insert(request.item, paddingSuballoc);
4805  RegisterFreeSuballocation(paddingBeginItem);
4806  }
4807 
4808  // Update totals.
4809  m_FreeCount = m_FreeCount - 1;
4810  if(paddingBegin > 0)
4811  {
4812  ++m_FreeCount;
4813  }
4814  if(paddingEnd > 0)
4815  {
4816  ++m_FreeCount;
4817  }
4818  m_SumFreeSize -= allocSize;
4819 }
4820 
4821 VmaSuballocationList::iterator VmaDeviceMemoryBlock::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
4822 {
4823  // Change this suballocation to be marked as free.
4824  VmaSuballocation& suballoc = *suballocItem;
4825  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4826  suballoc.hAllocation = VK_NULL_HANDLE;
4827 
4828  // Update totals.
4829  ++m_FreeCount;
4830  m_SumFreeSize += suballoc.size;
4831 
4832  // Merge with previous and/or next suballocation if it's also free.
4833  bool mergeWithNext = false;
4834  bool mergeWithPrev = false;
4835 
4836  VmaSuballocationList::iterator nextItem = suballocItem;
4837  ++nextItem;
4838  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
4839  {
4840  mergeWithNext = true;
4841  }
4842 
4843  VmaSuballocationList::iterator prevItem = suballocItem;
4844  if(suballocItem != m_Suballocations.begin())
4845  {
4846  --prevItem;
4847  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
4848  {
4849  mergeWithPrev = true;
4850  }
4851  }
4852 
4853  if(mergeWithNext)
4854  {
4855  UnregisterFreeSuballocation(nextItem);
4856  MergeFreeWithNext(suballocItem);
4857  }
4858 
4859  if(mergeWithPrev)
4860  {
4861  UnregisterFreeSuballocation(prevItem);
4862  MergeFreeWithNext(prevItem);
4863  RegisterFreeSuballocation(prevItem);
4864  return prevItem;
4865  }
4866  else
4867  {
4868  RegisterFreeSuballocation(suballocItem);
4869  return suballocItem;
4870  }
4871 }
4872 
4873 void VmaDeviceMemoryBlock::Free(const VmaAllocation allocation)
4874 {
4875  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
4876  suballocItem != m_Suballocations.end();
4877  ++suballocItem)
4878  {
4879  VmaSuballocation& suballoc = *suballocItem;
4880  if(suballoc.hAllocation == allocation)
4881  {
4882  FreeSuballocation(suballocItem);
4883  VMA_HEAVY_ASSERT(Validate());
4884  return;
4885  }
4886  }
4887  VMA_ASSERT(0 && "Not found!");
4888 }
4889 
4890 #if VMA_STATS_STRING_ENABLED
4891 
4892 void VmaDeviceMemoryBlock::PrintDetailedMap(class VmaJsonWriter& json) const
4893 {
4894  json.BeginObject();
4895 
4896  json.WriteString("TotalBytes");
4897  json.WriteNumber(m_Size);
4898 
4899  json.WriteString("UnusedBytes");
4900  json.WriteNumber(m_SumFreeSize);
4901 
4902  json.WriteString("Allocations");
4903  json.WriteNumber(m_Suballocations.size() - m_FreeCount);
4904 
4905  json.WriteString("UnusedRanges");
4906  json.WriteNumber(m_FreeCount);
4907 
4908  json.WriteString("Suballocations");
4909  json.BeginArray();
4910  size_t i = 0;
4911  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4912  suballocItem != m_Suballocations.cend();
4913  ++suballocItem, ++i)
4914  {
4915  json.BeginObject(true);
4916 
4917  json.WriteString("Type");
4918  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
4919 
4920  json.WriteString("Size");
4921  json.WriteNumber(suballocItem->size);
4922 
4923  json.WriteString("Offset");
4924  json.WriteNumber(suballocItem->offset);
4925 
4926  json.EndObject();
4927  }
4928  json.EndArray();
4929 
4930  json.EndObject();
4931 }
4932 
4933 #endif // #if VMA_STATS_STRING_ENABLED
4934 
4935 void VmaDeviceMemoryBlock::MergeFreeWithNext(VmaSuballocationList::iterator item)
4936 {
4937  VMA_ASSERT(item != m_Suballocations.end());
4938  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
4939 
4940  VmaSuballocationList::iterator nextItem = item;
4941  ++nextItem;
4942  VMA_ASSERT(nextItem != m_Suballocations.end());
4943  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
4944 
4945  item->size += nextItem->size;
4946  --m_FreeCount;
4947  m_Suballocations.erase(nextItem);
4948 }
4949 
4950 void VmaDeviceMemoryBlock::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
4951 {
4952  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
4953  VMA_ASSERT(item->size > 0);
4954 
4955  // You may want to enable this validation at the beginning or at the end of
4956  // this function, depending on what do you want to check.
4957  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
4958 
4959  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
4960  {
4961  if(m_FreeSuballocationsBySize.empty())
4962  {
4963  m_FreeSuballocationsBySize.push_back(item);
4964  }
4965  else
4966  {
4967  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
4968  }
4969  }
4970 
4971  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
4972 }
4973 
4974 
4975 void VmaDeviceMemoryBlock::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
4976 {
4977  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
4978  VMA_ASSERT(item->size > 0);
4979 
4980  // You may want to enable this validation at the beginning or at the end of
4981  // this function, depending on what do you want to check.
4982  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
4983 
4984  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
4985  {
4986  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
4987  m_FreeSuballocationsBySize.data(),
4988  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
4989  item,
4990  VmaSuballocationItemSizeLess());
4991  for(size_t index = it - m_FreeSuballocationsBySize.data();
4992  index < m_FreeSuballocationsBySize.size();
4993  ++index)
4994  {
4995  if(m_FreeSuballocationsBySize[index] == item)
4996  {
4997  VmaVectorRemove(m_FreeSuballocationsBySize, index);
4998  return;
4999  }
5000  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
5001  }
5002  VMA_ASSERT(0 && "Not found.");
5003  }
5004 
5005  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5006 }
5007 
5008 bool VmaDeviceMemoryBlock::ValidateFreeSuballocationList() const
5009 {
5010  VkDeviceSize lastSize = 0;
5011  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
5012  {
5013  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
5014 
5015  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
5016  {
5017  VMA_ASSERT(0);
5018  return false;
5019  }
5020  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5021  {
5022  VMA_ASSERT(0);
5023  return false;
5024  }
5025  if(it->size < lastSize)
5026  {
5027  VMA_ASSERT(0);
5028  return false;
5029  }
5030 
5031  lastSize = it->size;
5032  }
5033  return true;
5034 }
5035 
5036 static void InitStatInfo(VmaStatInfo& outInfo)
5037 {
5038  memset(&outInfo, 0, sizeof(outInfo));
5039  outInfo.AllocationSizeMin = UINT64_MAX;
5040  outInfo.UnusedRangeSizeMin = UINT64_MAX;
5041 }
5042 
5043 static void CalcAllocationStatInfo(VmaStatInfo& outInfo, const VmaDeviceMemoryBlock& block)
5044 {
5045  outInfo.BlockCount = 1;
5046 
5047  const uint32_t rangeCount = (uint32_t)block.m_Suballocations.size();
5048  outInfo.AllocationCount = rangeCount - block.m_FreeCount;
5049  outInfo.UnusedRangeCount = block.m_FreeCount;
5050 
5051  outInfo.UnusedBytes = block.m_SumFreeSize;
5052  outInfo.UsedBytes = block.m_Size - outInfo.UnusedBytes;
5053 
5054  outInfo.AllocationSizeMin = UINT64_MAX;
5055  outInfo.AllocationSizeMax = 0;
5056  outInfo.UnusedRangeSizeMin = UINT64_MAX;
5057  outInfo.UnusedRangeSizeMax = 0;
5058 
5059  for(VmaSuballocationList::const_iterator suballocItem = block.m_Suballocations.cbegin();
5060  suballocItem != block.m_Suballocations.cend();
5061  ++suballocItem)
5062  {
5063  const VmaSuballocation& suballoc = *suballocItem;
5064  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
5065  {
5066  outInfo.AllocationSizeMin = VMA_MIN(outInfo.AllocationSizeMin, suballoc.size);
5067  outInfo.AllocationSizeMax = VMA_MAX(outInfo.AllocationSizeMax, suballoc.size);
5068  }
5069  else
5070  {
5071  outInfo.UnusedRangeSizeMin = VMA_MIN(outInfo.UnusedRangeSizeMin, suballoc.size);
5072  outInfo.UnusedRangeSizeMax = VMA_MAX(outInfo.UnusedRangeSizeMax, suballoc.size);
5073  }
5074  }
5075 }
5076 
5077 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
5078 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
5079 {
5080  inoutInfo.BlockCount += srcInfo.BlockCount;
5081  inoutInfo.AllocationCount += srcInfo.AllocationCount;
5082  inoutInfo.UnusedRangeCount += srcInfo.UnusedRangeCount;
5083  inoutInfo.UsedBytes += srcInfo.UsedBytes;
5084  inoutInfo.UnusedBytes += srcInfo.UnusedBytes;
5085  inoutInfo.AllocationSizeMin = VMA_MIN(inoutInfo.AllocationSizeMin, srcInfo.AllocationSizeMin);
5086  inoutInfo.AllocationSizeMax = VMA_MAX(inoutInfo.AllocationSizeMax, srcInfo.AllocationSizeMax);
5087  inoutInfo.UnusedRangeSizeMin = VMA_MIN(inoutInfo.UnusedRangeSizeMin, srcInfo.UnusedRangeSizeMin);
5088  inoutInfo.UnusedRangeSizeMax = VMA_MAX(inoutInfo.UnusedRangeSizeMax, srcInfo.UnusedRangeSizeMax);
5089 }
5090 
5091 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
5092 {
5093  inoutInfo.AllocationSizeAvg = (inoutInfo.AllocationCount > 0) ?
5094  VmaRoundDiv<VkDeviceSize>(inoutInfo.UsedBytes, inoutInfo.AllocationCount) : 0;
5095  inoutInfo.UnusedRangeSizeAvg = (inoutInfo.UnusedRangeCount > 0) ?
5096  VmaRoundDiv<VkDeviceSize>(inoutInfo.UnusedBytes, inoutInfo.UnusedRangeCount) : 0;
5097 }
5098 
5099 VmaPool_T::VmaPool_T(
5100  VmaAllocator hAllocator,
5101  const VmaPoolCreateInfo& createInfo) :
5102  m_BlockVector(
5103  hAllocator,
5104  createInfo.memoryTypeIndex,
5105  (createInfo.flags & VMA_POOL_CREATE_PERSISTENT_MAP_BIT) != 0 ?
5106  VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED,
5107  createInfo.blockSize,
5108  createInfo.minBlockCount,
5109  createInfo.maxBlockCount,
5110  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
5111  createInfo.frameInUseCount,
5112  true) // isCustomPool
5113 {
5114 }
5115 
5116 VmaPool_T::~VmaPool_T()
5117 {
5118 }
5119 
5120 #if VMA_STATS_STRING_ENABLED
5121 
5122 #endif // #if VMA_STATS_STRING_ENABLED
5123 
5124 VmaBlockVector::VmaBlockVector(
5125  VmaAllocator hAllocator,
5126  uint32_t memoryTypeIndex,
5127  VMA_BLOCK_VECTOR_TYPE blockVectorType,
5128  VkDeviceSize preferredBlockSize,
5129  size_t minBlockCount,
5130  size_t maxBlockCount,
5131  VkDeviceSize bufferImageGranularity,
5132  uint32_t frameInUseCount,
5133  bool isCustomPool) :
5134  m_hAllocator(hAllocator),
5135  m_MemoryTypeIndex(memoryTypeIndex),
5136  m_BlockVectorType(blockVectorType),
5137  m_PreferredBlockSize(preferredBlockSize),
5138  m_MinBlockCount(minBlockCount),
5139  m_MaxBlockCount(maxBlockCount),
5140  m_BufferImageGranularity(bufferImageGranularity),
5141  m_FrameInUseCount(frameInUseCount),
5142  m_IsCustomPool(isCustomPool),
5143  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
5144  m_HasEmptyBlock(false),
5145  m_pDefragmentator(VMA_NULL)
5146 {
5147 }
5148 
5149 VmaBlockVector::~VmaBlockVector()
5150 {
5151  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
5152 
5153  for(size_t i = m_Blocks.size(); i--; )
5154  {
5155  m_Blocks[i]->Destroy(m_hAllocator);
5156  vma_delete(m_hAllocator, m_Blocks[i]);
5157  }
5158 }
5159 
5160 VkResult VmaBlockVector::CreateMinBlocks()
5161 {
5162  for(size_t i = 0; i < m_MinBlockCount; ++i)
5163  {
5164  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
5165  if(res != VK_SUCCESS)
5166  {
5167  return res;
5168  }
5169  }
5170  return VK_SUCCESS;
5171 }
5172 
5173 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
5174 {
5175  pStats->size = 0;
5176  pStats->unusedSize = 0;
5177  pStats->allocationCount = 0;
5178  pStats->unusedRangeCount = 0;
5179 
5180  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5181 
5182  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5183  {
5184  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
5185  VMA_ASSERT(pBlock);
5186  VMA_HEAVY_ASSERT(pBlock->Validate());
5187 
5188  const uint32_t rangeCount = (uint32_t)pBlock->m_Suballocations.size();
5189 
5190  pStats->size += pBlock->m_Size;
5191  pStats->unusedSize += pBlock->m_SumFreeSize;
5192  pStats->allocationCount += rangeCount - pBlock->m_FreeCount;
5193  pStats->unusedRangeCount += pBlock->m_FreeCount;
5194  }
5195 }
5196 
5197 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
5198 
5199 VkResult VmaBlockVector::Allocate(
5200  VmaPool hCurrentPool,
5201  uint32_t currentFrameIndex,
5202  const VkMemoryRequirements& vkMemReq,
5203  const VmaAllocationCreateInfo& createInfo,
5204  VmaSuballocationType suballocType,
5205  VmaAllocation* pAllocation)
5206 {
5207  // Validate flags.
5208  if(((createInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0) !=
5209  (m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED))
5210  {
5211  VMA_ASSERT(0 && "Usage of VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT must match VMA_POOL_CREATE_PERSISTENT_MAP_BIT.");
5212  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5213  }
5214 
5215  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5216 
5217  // 1. Search existing allocations. Try to allocate without making other allocations lost.
5218  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
5219  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
5220  {
5221  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
5222  VMA_ASSERT(pCurrBlock);
5223  VmaAllocationRequest currRequest = {};
5224  if(pCurrBlock->CreateAllocationRequest(
5225  currentFrameIndex,
5226  m_FrameInUseCount,
5227  m_BufferImageGranularity,
5228  vkMemReq.size,
5229  vkMemReq.alignment,
5230  suballocType,
5231  false, // canMakeOtherLost
5232  &currRequest))
5233  {
5234  // Allocate from pCurrBlock.
5235  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
5236 
5237  // We no longer have an empty Allocation.
5238  if(pCurrBlock->IsEmpty())
5239  {
5240  m_HasEmptyBlock = false;
5241  }
5242 
5243  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
5244  pCurrBlock->Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
5245  (*pAllocation)->InitBlockAllocation(
5246  hCurrentPool,
5247  pCurrBlock,
5248  currRequest.offset,
5249  vkMemReq.alignment,
5250  vkMemReq.size,
5251  suballocType,
5252  createInfo.pUserData,
5253  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5254  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
5255  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
5256  return VK_SUCCESS;
5257  }
5258  }
5259 
5260  const bool canCreateNewBlock =
5261  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
5262  (m_Blocks.size() < m_MaxBlockCount);
5263 
5264  // 2. Try to create new block.
5265  if(canCreateNewBlock)
5266  {
5267  // 2.1. Start with full preferredBlockSize.
5268  VkDeviceSize blockSize = m_PreferredBlockSize;
5269  size_t newBlockIndex = 0;
5270  VkResult res = CreateBlock(blockSize, &newBlockIndex);
5271  // Allocating blocks of other sizes is allowed only in default pools.
5272  // In custom pools block size is fixed.
5273  if(res < 0 && m_IsCustomPool == false)
5274  {
5275  // 2.2. Try half the size.
5276  blockSize /= 2;
5277  if(blockSize >= vkMemReq.size)
5278  {
5279  res = CreateBlock(blockSize, &newBlockIndex);
5280  if(res < 0)
5281  {
5282  // 2.3. Try quarter the size.
5283  blockSize /= 2;
5284  if(blockSize >= vkMemReq.size)
5285  {
5286  res = CreateBlock(blockSize, &newBlockIndex);
5287  }
5288  }
5289  }
5290  }
5291  if(res == VK_SUCCESS)
5292  {
5293  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
5294  VMA_ASSERT(pBlock->m_Size >= vkMemReq.size);
5295 
5296  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
5297  VmaAllocationRequest allocRequest = {};
5298  allocRequest.item = pBlock->m_Suballocations.begin();
5299  allocRequest.offset = 0;
5300  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
5301  pBlock->Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
5302  (*pAllocation)->InitBlockAllocation(
5303  hCurrentPool,
5304  pBlock,
5305  allocRequest.offset,
5306  vkMemReq.alignment,
5307  vkMemReq.size,
5308  suballocType,
5309  createInfo.pUserData,
5310  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5311  VMA_HEAVY_ASSERT(pBlock->Validate());
5312  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
5313 
5314  return VK_SUCCESS;
5315  }
5316  }
5317 
5318  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
5319 
5320  // 3. Try to allocate from existing blocks with making other allocations lost.
5321  if(canMakeOtherLost)
5322  {
5323  uint32_t tryIndex = 0;
5324  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
5325  {
5326  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
5327  VmaAllocationRequest bestRequest = {};
5328  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
5329 
5330  // 1. Search existing allocations.
5331  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
5332  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
5333  {
5334  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
5335  VMA_ASSERT(pCurrBlock);
5336  VmaAllocationRequest currRequest = {};
5337  if(pCurrBlock->CreateAllocationRequest(
5338  currentFrameIndex,
5339  m_FrameInUseCount,
5340  m_BufferImageGranularity,
5341  vkMemReq.size,
5342  vkMemReq.alignment,
5343  suballocType,
5344  canMakeOtherLost,
5345  &currRequest))
5346  {
5347  const VkDeviceSize currRequestCost = currRequest.CalcCost();
5348  if(pBestRequestBlock == VMA_NULL ||
5349  currRequestCost < bestRequestCost)
5350  {
5351  pBestRequestBlock = pCurrBlock;
5352  bestRequest = currRequest;
5353  bestRequestCost = currRequestCost;
5354 
5355  if(bestRequestCost == 0)
5356  {
5357  break;
5358  }
5359  }
5360  }
5361  }
5362 
5363  if(pBestRequestBlock != VMA_NULL)
5364  {
5365  if(pBestRequestBlock->MakeRequestedAllocationsLost(
5366  currentFrameIndex,
5367  m_FrameInUseCount,
5368  &bestRequest))
5369  {
5370  // We no longer have an empty Allocation.
5371  if(pBestRequestBlock->IsEmpty())
5372  {
5373  m_HasEmptyBlock = false;
5374  }
5375  // Allocate from this pBlock.
5376  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
5377  pBestRequestBlock->Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
5378  (*pAllocation)->InitBlockAllocation(
5379  hCurrentPool,
5380  pBestRequestBlock,
5381  bestRequest.offset,
5382  vkMemReq.alignment,
5383  vkMemReq.size,
5384  suballocType,
5385  createInfo.pUserData,
5386  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5387  VMA_HEAVY_ASSERT(pBlock->Validate());
5388  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
5389  return VK_SUCCESS;
5390  }
5391  // else: Some allocations must have been touched while we are here. Next try.
5392  }
5393  else
5394  {
5395  // Could not find place in any of the blocks - break outer loop.
5396  break;
5397  }
5398  }
5399  /* Maximum number of tries exceeded - a very unlike event when many other
5400  threads are simultaneously touching allocations making it impossible to make
5401  lost at the same time as we try to allocate. */
5402  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
5403  {
5404  return VK_ERROR_TOO_MANY_OBJECTS;
5405  }
5406  }
5407 
5408  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5409 }
5410 
5411 void VmaBlockVector::Free(
5412  VmaAllocation hAllocation)
5413 {
5414  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
5415 
5416  // Scope for lock.
5417  {
5418  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5419 
5420  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
5421 
5422  pBlock->Free(hAllocation);
5423  VMA_HEAVY_ASSERT(pBlock->Validate());
5424 
5425  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
5426 
5427  // pBlock became empty after this deallocation.
5428  if(pBlock->IsEmpty())
5429  {
5430  // Already has empty Allocation. We don't want to have two, so delete this one.
5431  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
5432  {
5433  pBlockToDelete = pBlock;
5434  Remove(pBlock);
5435  }
5436  // We now have first empty Allocation.
5437  else
5438  {
5439  m_HasEmptyBlock = true;
5440  }
5441  }
5442  // Must be called after srcBlockIndex is used, because later it may become invalid!
5443  IncrementallySortBlocks();
5444  }
5445 
5446  // Destruction of a free Allocation. Deferred until this point, outside of mutex
5447  // lock, for performance reason.
5448  if(pBlockToDelete != VMA_NULL)
5449  {
5450  VMA_DEBUG_LOG(" Deleted empty allocation");
5451  pBlockToDelete->Destroy(m_hAllocator);
5452  vma_delete(m_hAllocator, pBlockToDelete);
5453  }
5454 }
5455 
5456 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
5457 {
5458  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5459  {
5460  if(m_Blocks[blockIndex] == pBlock)
5461  {
5462  VmaVectorRemove(m_Blocks, blockIndex);
5463  return;
5464  }
5465  }
5466  VMA_ASSERT(0);
5467 }
5468 
5469 void VmaBlockVector::IncrementallySortBlocks()
5470 {
5471  // Bubble sort only until first swap.
5472  for(size_t i = 1; i < m_Blocks.size(); ++i)
5473  {
5474  if(m_Blocks[i - 1]->m_SumFreeSize > m_Blocks[i]->m_SumFreeSize)
5475  {
5476  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
5477  return;
5478  }
5479  }
5480 }
5481 
5482 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
5483 {
5484  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
5485  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
5486  allocInfo.allocationSize = blockSize;
5487  VkDeviceMemory mem = VK_NULL_HANDLE;
5488  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
5489  if(res < 0)
5490  {
5491  return res;
5492  }
5493 
5494  // New VkDeviceMemory successfully created.
5495 
5496  // Map memory if needed.
5497  void* pMappedData = VMA_NULL;
5498  const bool persistentMap = (m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED);
5499  if(persistentMap && m_hAllocator->m_UnmapPersistentlyMappedMemoryCounter == 0)
5500  {
5501  res = (*m_hAllocator->GetVulkanFunctions().vkMapMemory)(
5502  m_hAllocator->m_hDevice,
5503  mem,
5504  0,
5505  VK_WHOLE_SIZE,
5506  0,
5507  &pMappedData);
5508  if(res < 0)
5509  {
5510  VMA_DEBUG_LOG(" vkMapMemory FAILED");
5511  m_hAllocator->FreeVulkanMemory(m_MemoryTypeIndex, blockSize, mem);
5512  return res;
5513  }
5514  }
5515 
5516  // Create new Allocation for it.
5517  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
5518  pBlock->Init(
5519  m_MemoryTypeIndex,
5520  (VMA_BLOCK_VECTOR_TYPE)m_BlockVectorType,
5521  mem,
5522  allocInfo.allocationSize,
5523  persistentMap,
5524  pMappedData);
5525 
5526  m_Blocks.push_back(pBlock);
5527  if(pNewBlockIndex != VMA_NULL)
5528  {
5529  *pNewBlockIndex = m_Blocks.size() - 1;
5530  }
5531 
5532  return VK_SUCCESS;
5533 }
5534 
5535 #if VMA_STATS_STRING_ENABLED
5536 
5537 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
5538 {
5539  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5540 
5541  json.BeginObject();
5542 
5543  if(m_IsCustomPool)
5544  {
5545  json.WriteString("MemoryTypeIndex");
5546  json.WriteNumber(m_MemoryTypeIndex);
5547 
5548  if(m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
5549  {
5550  json.WriteString("Mapped");
5551  json.WriteBool(true);
5552  }
5553 
5554  json.WriteString("BlockSize");
5555  json.WriteNumber(m_PreferredBlockSize);
5556 
5557  json.WriteString("BlockCount");
5558  json.BeginObject(true);
5559  if(m_MinBlockCount > 0)
5560  {
5561  json.WriteString("Min");
5562  json.WriteNumber(m_MinBlockCount);
5563  }
5564  if(m_MaxBlockCount < SIZE_MAX)
5565  {
5566  json.WriteString("Max");
5567  json.WriteNumber(m_MaxBlockCount);
5568  }
5569  json.WriteString("Cur");
5570  json.WriteNumber(m_Blocks.size());
5571  json.EndObject();
5572 
5573  if(m_FrameInUseCount > 0)
5574  {
5575  json.WriteString("FrameInUseCount");
5576  json.WriteNumber(m_FrameInUseCount);
5577  }
5578  }
5579  else
5580  {
5581  json.WriteString("PreferredBlockSize");
5582  json.WriteNumber(m_PreferredBlockSize);
5583  }
5584 
5585  json.WriteString("Blocks");
5586  json.BeginArray();
5587  for(size_t i = 0; i < m_Blocks.size(); ++i)
5588  {
5589  m_Blocks[i]->PrintDetailedMap(json);
5590  }
5591  json.EndArray();
5592 
5593  json.EndObject();
5594 }
5595 
5596 #endif // #if VMA_STATS_STRING_ENABLED
5597 
5598 void VmaBlockVector::UnmapPersistentlyMappedMemory()
5599 {
5600  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5601 
5602  for(size_t i = m_Blocks.size(); i--; )
5603  {
5604  VmaDeviceMemoryBlock* pBlock = m_Blocks[i];
5605  if(pBlock->m_pMappedData != VMA_NULL)
5606  {
5607  VMA_ASSERT(pBlock->m_PersistentMap != false);
5608  (m_hAllocator->GetVulkanFunctions().vkUnmapMemory)(m_hAllocator->m_hDevice, pBlock->m_hMemory);
5609  pBlock->m_pMappedData = VMA_NULL;
5610  }
5611  }
5612 }
5613 
5614 VkResult VmaBlockVector::MapPersistentlyMappedMemory()
5615 {
5616  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5617 
5618  VkResult finalResult = VK_SUCCESS;
5619  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
5620  {
5621  VmaDeviceMemoryBlock* pBlock = m_Blocks[i];
5622  if(pBlock->m_PersistentMap)
5623  {
5624  VMA_ASSERT(pBlock->m_pMappedData == nullptr);
5625  VkResult localResult = (*m_hAllocator->GetVulkanFunctions().vkMapMemory)(
5626  m_hAllocator->m_hDevice,
5627  pBlock->m_hMemory,
5628  0,
5629  VK_WHOLE_SIZE,
5630  0,
5631  &pBlock->m_pMappedData);
5632  if(localResult != VK_SUCCESS)
5633  {
5634  finalResult = localResult;
5635  }
5636  }
5637  }
5638  return finalResult;
5639 }
5640 
5641 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
5642  VmaAllocator hAllocator,
5643  uint32_t currentFrameIndex)
5644 {
5645  if(m_pDefragmentator == VMA_NULL)
5646  {
5647  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
5648  hAllocator,
5649  this,
5650  currentFrameIndex);
5651  }
5652 
5653  return m_pDefragmentator;
5654 }
5655 
5656 VkResult VmaBlockVector::Defragment(
5657  VmaDefragmentationStats* pDefragmentationStats,
5658  VkDeviceSize& maxBytesToMove,
5659  uint32_t& maxAllocationsToMove)
5660 {
5661  if(m_pDefragmentator == VMA_NULL)
5662  {
5663  return VK_SUCCESS;
5664  }
5665 
5666  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5667 
5668  // Defragment.
5669  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
5670 
5671  // Accumulate statistics.
5672  if(pDefragmentationStats != VMA_NULL)
5673  {
5674  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
5675  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
5676  pDefragmentationStats->bytesMoved += bytesMoved;
5677  pDefragmentationStats->allocationsMoved += allocationsMoved;
5678  VMA_ASSERT(bytesMoved <= maxBytesToMove);
5679  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
5680  maxBytesToMove -= bytesMoved;
5681  maxAllocationsToMove -= allocationsMoved;
5682  }
5683 
5684  // Free empty blocks.
5685  m_HasEmptyBlock = false;
5686  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
5687  {
5688  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
5689  if(pBlock->IsEmpty())
5690  {
5691  if(m_Blocks.size() > m_MinBlockCount)
5692  {
5693  if(pDefragmentationStats != VMA_NULL)
5694  {
5695  ++pDefragmentationStats->deviceMemoryBlocksFreed;
5696  pDefragmentationStats->bytesFreed += pBlock->m_Size;
5697  }
5698 
5699  VmaVectorRemove(m_Blocks, blockIndex);
5700  pBlock->Destroy(m_hAllocator);
5701  vma_delete(m_hAllocator, pBlock);
5702  }
5703  else
5704  {
5705  m_HasEmptyBlock = true;
5706  }
5707  }
5708  }
5709 
5710  return result;
5711 }
5712 
5713 void VmaBlockVector::DestroyDefragmentator()
5714 {
5715  if(m_pDefragmentator != VMA_NULL)
5716  {
5717  vma_delete(m_hAllocator, m_pDefragmentator);
5718  m_pDefragmentator = VMA_NULL;
5719  }
5720 }
5721 
5722 void VmaBlockVector::MakePoolAllocationsLost(
5723  uint32_t currentFrameIndex,
5724  size_t* pLostAllocationCount)
5725 {
5726  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5727 
5728  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5729  {
5730  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
5731  VMA_ASSERT(pBlock);
5732  pBlock->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
5733  }
5734 }
5735 
5736 void VmaBlockVector::AddStats(VmaStats* pStats)
5737 {
5738  const uint32_t memTypeIndex = m_MemoryTypeIndex;
5739  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
5740 
5741  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5742 
5743  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5744  {
5745  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
5746  VMA_ASSERT(pBlock);
5747  VMA_HEAVY_ASSERT(pBlock->Validate());
5748  VmaStatInfo allocationStatInfo;
5749  CalcAllocationStatInfo(allocationStatInfo, *pBlock);
5750  VmaAddStatInfo(pStats->total, allocationStatInfo);
5751  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
5752  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
5753  }
5754 }
5755 
5757 // VmaDefragmentator members definition
5758 
5759 VmaDefragmentator::VmaDefragmentator(
5760  VmaAllocator hAllocator,
5761  VmaBlockVector* pBlockVector,
5762  uint32_t currentFrameIndex) :
5763  m_hAllocator(hAllocator),
5764  m_pBlockVector(pBlockVector),
5765  m_CurrentFrameIndex(currentFrameIndex),
5766  m_BytesMoved(0),
5767  m_AllocationsMoved(0),
5768  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
5769  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
5770 {
5771 }
5772 
5773 VmaDefragmentator::~VmaDefragmentator()
5774 {
5775  for(size_t i = m_Blocks.size(); i--; )
5776  {
5777  vma_delete(m_hAllocator, m_Blocks[i]);
5778  }
5779 }
5780 
5781 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
5782 {
5783  AllocationInfo allocInfo;
5784  allocInfo.m_hAllocation = hAlloc;
5785  allocInfo.m_pChanged = pChanged;
5786  m_Allocations.push_back(allocInfo);
5787 }
5788 
5789 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
5790 {
5791  // It has already been mapped for defragmentation.
5792  if(m_pMappedDataForDefragmentation)
5793  {
5794  *ppMappedData = m_pMappedDataForDefragmentation;
5795  return VK_SUCCESS;
5796  }
5797 
5798  // It is persistently mapped.
5799  if(m_pBlock->m_PersistentMap)
5800  {
5801  VMA_ASSERT(m_pBlock->m_pMappedData != VMA_NULL);
5802  *ppMappedData = m_pBlock->m_pMappedData;
5803  return VK_SUCCESS;
5804  }
5805 
5806  // Map on first usage.
5807  VkResult res = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
5808  hAllocator->m_hDevice,
5809  m_pBlock->m_hMemory,
5810  0,
5811  VK_WHOLE_SIZE,
5812  0,
5813  &m_pMappedDataForDefragmentation);
5814  *ppMappedData = m_pMappedDataForDefragmentation;
5815  return res;
5816 }
5817 
5818 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
5819 {
5820  if(m_pMappedDataForDefragmentation != VMA_NULL)
5821  {
5822  (hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_pBlock->m_hMemory);
5823  }
5824 }
5825 
5826 VkResult VmaDefragmentator::DefragmentRound(
5827  VkDeviceSize maxBytesToMove,
5828  uint32_t maxAllocationsToMove)
5829 {
5830  if(m_Blocks.empty())
5831  {
5832  return VK_SUCCESS;
5833  }
5834 
5835  size_t srcBlockIndex = m_Blocks.size() - 1;
5836  size_t srcAllocIndex = SIZE_MAX;
5837  for(;;)
5838  {
5839  // 1. Find next allocation to move.
5840  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
5841  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
5842  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
5843  {
5844  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
5845  {
5846  // Finished: no more allocations to process.
5847  if(srcBlockIndex == 0)
5848  {
5849  return VK_SUCCESS;
5850  }
5851  else
5852  {
5853  --srcBlockIndex;
5854  srcAllocIndex = SIZE_MAX;
5855  }
5856  }
5857  else
5858  {
5859  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
5860  }
5861  }
5862 
5863  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
5864  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
5865 
5866  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
5867  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
5868  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
5869  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
5870 
5871  // 2. Try to find new place for this allocation in preceding or current block.
5872  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
5873  {
5874  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
5875  VmaAllocationRequest dstAllocRequest;
5876  if(pDstBlockInfo->m_pBlock->CreateAllocationRequest(
5877  m_CurrentFrameIndex,
5878  m_pBlockVector->GetFrameInUseCount(),
5879  m_pBlockVector->GetBufferImageGranularity(),
5880  size,
5881  alignment,
5882  suballocType,
5883  false, // canMakeOtherLost
5884  &dstAllocRequest) &&
5885  MoveMakesSense(
5886  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
5887  {
5888  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
5889 
5890  // Reached limit on number of allocations or bytes to move.
5891  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
5892  (m_BytesMoved + size > maxBytesToMove))
5893  {
5894  return VK_INCOMPLETE;
5895  }
5896 
5897  void* pDstMappedData = VMA_NULL;
5898  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
5899  if(res != VK_SUCCESS)
5900  {
5901  return res;
5902  }
5903 
5904  void* pSrcMappedData = VMA_NULL;
5905  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
5906  if(res != VK_SUCCESS)
5907  {
5908  return res;
5909  }
5910 
5911  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
5912  memcpy(
5913  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
5914  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
5915  static_cast<size_t>(size));
5916 
5917  pDstBlockInfo->m_pBlock->Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
5918  pSrcBlockInfo->m_pBlock->Free(allocInfo.m_hAllocation);
5919 
5920  allocInfo.m_hAllocation->ChangeBlockAllocation(pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
5921 
5922  if(allocInfo.m_pChanged != VMA_NULL)
5923  {
5924  *allocInfo.m_pChanged = VK_TRUE;
5925  }
5926 
5927  ++m_AllocationsMoved;
5928  m_BytesMoved += size;
5929 
5930  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
5931 
5932  break;
5933  }
5934  }
5935 
5936  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
5937 
5938  if(srcAllocIndex > 0)
5939  {
5940  --srcAllocIndex;
5941  }
5942  else
5943  {
5944  if(srcBlockIndex > 0)
5945  {
5946  --srcBlockIndex;
5947  srcAllocIndex = SIZE_MAX;
5948  }
5949  else
5950  {
5951  return VK_SUCCESS;
5952  }
5953  }
5954  }
5955 }
5956 
5957 VkResult VmaDefragmentator::Defragment(
5958  VkDeviceSize maxBytesToMove,
5959  uint32_t maxAllocationsToMove)
5960 {
5961  if(m_Allocations.empty())
5962  {
5963  return VK_SUCCESS;
5964  }
5965 
5966  // Create block info for each block.
5967  const size_t blockCount = m_pBlockVector->m_Blocks.size();
5968  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
5969  {
5970  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
5971  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
5972  m_Blocks.push_back(pBlockInfo);
5973  }
5974 
5975  // Sort them by m_pBlock pointer value.
5976  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
5977 
5978  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
5979  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
5980  {
5981  AllocationInfo& allocInfo = m_Allocations[blockIndex];
5982  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
5983  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
5984  {
5985  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
5986  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
5987  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
5988  {
5989  (*it)->m_Allocations.push_back(allocInfo);
5990  }
5991  else
5992  {
5993  VMA_ASSERT(0);
5994  }
5995  }
5996  }
5997  m_Allocations.clear();
5998 
5999  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6000  {
6001  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
6002  pBlockInfo->CalcHasNonMovableAllocations();
6003  pBlockInfo->SortAllocationsBySizeDescecnding();
6004  }
6005 
6006  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
6007  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
6008 
6009  // Execute defragmentation rounds (the main part).
6010  VkResult result = VK_SUCCESS;
6011  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
6012  {
6013  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
6014  }
6015 
6016  // Unmap blocks that were mapped for defragmentation.
6017  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6018  {
6019  m_Blocks[blockIndex]->Unmap(m_hAllocator);
6020  }
6021 
6022  return result;
6023 }
6024 
6025 bool VmaDefragmentator::MoveMakesSense(
6026  size_t dstBlockIndex, VkDeviceSize dstOffset,
6027  size_t srcBlockIndex, VkDeviceSize srcOffset)
6028 {
6029  if(dstBlockIndex < srcBlockIndex)
6030  {
6031  return true;
6032  }
6033  if(dstBlockIndex > srcBlockIndex)
6034  {
6035  return false;
6036  }
6037  if(dstOffset < srcOffset)
6038  {
6039  return true;
6040  }
6041  return false;
6042 }
6043 
6045 // VmaAllocator_T
6046 
6047 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
6048  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
6049  m_PhysicalDevice(pCreateInfo->physicalDevice),
6050  m_hDevice(pCreateInfo->device),
6051  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
6052  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
6053  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
6054  m_UnmapPersistentlyMappedMemoryCounter(0),
6055  m_PreferredLargeHeapBlockSize(0),
6056  m_PreferredSmallHeapBlockSize(0),
6057  m_CurrentFrameIndex(0),
6058  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
6059 {
6060  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
6061 
6062  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
6063  memset(&m_MemProps, 0, sizeof(m_MemProps));
6064  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
6065 
6066  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
6067  memset(&m_pOwnAllocations, 0, sizeof(m_pOwnAllocations));
6068 
6069  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
6070  {
6071  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
6072  }
6073 
6074  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
6075  {
6076  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
6077  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
6078  }
6079 
6080  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
6081 
6082  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
6083  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
6084 
6085  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
6086  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
6087  m_PreferredSmallHeapBlockSize = (pCreateInfo->preferredSmallHeapBlockSize != 0) ?
6088  pCreateInfo->preferredSmallHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE);
6089 
6090  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
6091  {
6092  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
6093  {
6094  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
6095  if(limit != VK_WHOLE_SIZE)
6096  {
6097  m_HeapSizeLimit[heapIndex] = limit;
6098  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
6099  {
6100  m_MemProps.memoryHeaps[heapIndex].size = limit;
6101  }
6102  }
6103  }
6104  }
6105 
6106  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6107  {
6108  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
6109 
6110  for(size_t blockVectorTypeIndex = 0; blockVectorTypeIndex < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorTypeIndex)
6111  {
6112  m_pBlockVectors[memTypeIndex][blockVectorTypeIndex] = vma_new(this, VmaBlockVector)(
6113  this,
6114  memTypeIndex,
6115  static_cast<VMA_BLOCK_VECTOR_TYPE>(blockVectorTypeIndex),
6116  preferredBlockSize,
6117  0,
6118  SIZE_MAX,
6119  GetBufferImageGranularity(),
6120  pCreateInfo->frameInUseCount,
6121  false); // isCustomPool
6122  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
6123  // becase minBlockCount is 0.
6124  m_pOwnAllocations[memTypeIndex][blockVectorTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
6125  }
6126  }
6127 }
6128 
6129 VmaAllocator_T::~VmaAllocator_T()
6130 {
6131  VMA_ASSERT(m_Pools.empty());
6132 
6133  for(size_t i = GetMemoryTypeCount(); i--; )
6134  {
6135  for(size_t j = VMA_BLOCK_VECTOR_TYPE_COUNT; j--; )
6136  {
6137  vma_delete(this, m_pOwnAllocations[i][j]);
6138  vma_delete(this, m_pBlockVectors[i][j]);
6139  }
6140  }
6141 }
6142 
6143 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
6144 {
6145 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
6146  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
6147  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
6148  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
6149  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
6150  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
6151  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
6152  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
6153  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
6154  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
6155  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
6156  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
6157  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
6158  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
6159  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
6160 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
6161 
6162  if(pVulkanFunctions != VMA_NULL)
6163  {
6164  m_VulkanFunctions = *pVulkanFunctions;
6165  }
6166 
6167  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
6168  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
6169  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
6170  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
6171  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
6172  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
6173  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
6174  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
6175  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
6176  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
6177  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
6178  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
6179  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
6180  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
6181  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
6182  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
6183 }
6184 
6185 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
6186 {
6187  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
6188  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
6189  return (heapSize <= VMA_SMALL_HEAP_MAX_SIZE) ?
6190  m_PreferredSmallHeapBlockSize : m_PreferredLargeHeapBlockSize;
6191 }
6192 
6193 VkResult VmaAllocator_T::AllocateMemoryOfType(
6194  const VkMemoryRequirements& vkMemReq,
6195  const VmaAllocationCreateInfo& createInfo,
6196  uint32_t memTypeIndex,
6197  VmaSuballocationType suballocType,
6198  VmaAllocation* pAllocation)
6199 {
6200  VMA_ASSERT(pAllocation != VMA_NULL);
6201  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
6202 
6203  uint32_t blockVectorType = VmaAllocationCreateFlagsToBlockVectorType(createInfo.flags);
6204  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
6205  VMA_ASSERT(blockVector);
6206 
6207  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
6208  // Heuristics: Allocate own memory if requested size if greater than half of preferred block size.
6209  const bool ownMemory =
6210  (createInfo.flags & VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT) != 0 ||
6211  VMA_DEBUG_ALWAYS_OWN_MEMORY ||
6212  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
6213  vkMemReq.size > preferredBlockSize / 2);
6214 
6215  if(ownMemory)
6216  {
6217  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6218  {
6219  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6220  }
6221  else
6222  {
6223  return AllocateOwnMemory(
6224  vkMemReq.size,
6225  suballocType,
6226  memTypeIndex,
6227  (createInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0,
6228  createInfo.pUserData,
6229  pAllocation);
6230  }
6231  }
6232  else
6233  {
6234  VkResult res = blockVector->Allocate(
6235  VK_NULL_HANDLE, // hCurrentPool
6236  m_CurrentFrameIndex.load(),
6237  vkMemReq,
6238  createInfo,
6239  suballocType,
6240  pAllocation);
6241  if(res == VK_SUCCESS)
6242  {
6243  return res;
6244  }
6245 
6246  // 5. Try own memory.
6247  res = AllocateOwnMemory(
6248  vkMemReq.size,
6249  suballocType,
6250  memTypeIndex,
6251  (createInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0,
6252  createInfo.pUserData,
6253  pAllocation);
6254  if(res == VK_SUCCESS)
6255  {
6256  // Succeeded: AllocateOwnMemory function already filld pMemory, nothing more to do here.
6257  VMA_DEBUG_LOG(" Allocated as OwnMemory");
6258  return VK_SUCCESS;
6259  }
6260  else
6261  {
6262  // Everything failed: Return error code.
6263  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
6264  return res;
6265  }
6266  }
6267 }
6268 
6269 VkResult VmaAllocator_T::AllocateOwnMemory(
6270  VkDeviceSize size,
6271  VmaSuballocationType suballocType,
6272  uint32_t memTypeIndex,
6273  bool map,
6274  void* pUserData,
6275  VmaAllocation* pAllocation)
6276 {
6277  VMA_ASSERT(pAllocation);
6278 
6279  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
6280  allocInfo.memoryTypeIndex = memTypeIndex;
6281  allocInfo.allocationSize = size;
6282 
6283  // Allocate VkDeviceMemory.
6284  VkDeviceMemory hMemory = VK_NULL_HANDLE;
6285  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
6286  if(res < 0)
6287  {
6288  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
6289  return res;
6290  }
6291 
6292  void* pMappedData = nullptr;
6293  if(map)
6294  {
6295  if(m_UnmapPersistentlyMappedMemoryCounter == 0)
6296  {
6297  res = vkMapMemory(m_hDevice, hMemory, 0, VK_WHOLE_SIZE, 0, &pMappedData);
6298  if(res < 0)
6299  {
6300  VMA_DEBUG_LOG(" vkMapMemory FAILED");
6301  FreeVulkanMemory(memTypeIndex, size, hMemory);
6302  return res;
6303  }
6304  }
6305  }
6306 
6307  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load());
6308  (*pAllocation)->InitOwnAllocation(memTypeIndex, hMemory, suballocType, map, pMappedData, size, pUserData);
6309 
6310  // Register it in m_pOwnAllocations.
6311  {
6312  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6313  AllocationVectorType* pOwnAllocations = m_pOwnAllocations[memTypeIndex][map ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED];
6314  VMA_ASSERT(pOwnAllocations);
6315  VmaVectorInsertSorted<VmaPointerLess>(*pOwnAllocations, *pAllocation);
6316  }
6317 
6318  VMA_DEBUG_LOG(" Allocated OwnMemory MemoryTypeIndex=#%u", memTypeIndex);
6319 
6320  return VK_SUCCESS;
6321 }
6322 
6323 VkResult VmaAllocator_T::AllocateMemory(
6324  const VkMemoryRequirements& vkMemReq,
6325  const VmaAllocationCreateInfo& createInfo,
6326  VmaSuballocationType suballocType,
6327  VmaAllocation* pAllocation)
6328 {
6329  if((createInfo.flags & VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT) != 0 &&
6330  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6331  {
6332  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
6333  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6334  }
6335  if((createInfo.pool != VK_NULL_HANDLE) &&
6336  ((createInfo.flags & (VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT)) != 0))
6337  {
6338  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT when pool != null is invalid.");
6339  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6340  }
6341 
6342  if(createInfo.pool != VK_NULL_HANDLE)
6343  {
6344  return createInfo.pool->m_BlockVector.Allocate(
6345  createInfo.pool,
6346  m_CurrentFrameIndex.load(),
6347  vkMemReq,
6348  createInfo,
6349  suballocType,
6350  pAllocation);
6351  }
6352  else
6353  {
6354  // Bit mask of memory Vulkan types acceptable for this allocation.
6355  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
6356  uint32_t memTypeIndex = UINT32_MAX;
6357  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
6358  if(res == VK_SUCCESS)
6359  {
6360  res = AllocateMemoryOfType(vkMemReq, createInfo, memTypeIndex, suballocType, pAllocation);
6361  // Succeeded on first try.
6362  if(res == VK_SUCCESS)
6363  {
6364  return res;
6365  }
6366  // Allocation from this memory type failed. Try other compatible memory types.
6367  else
6368  {
6369  for(;;)
6370  {
6371  // Remove old memTypeIndex from list of possibilities.
6372  memoryTypeBits &= ~(1u << memTypeIndex);
6373  // Find alternative memTypeIndex.
6374  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
6375  if(res == VK_SUCCESS)
6376  {
6377  res = AllocateMemoryOfType(vkMemReq, createInfo, memTypeIndex, suballocType, pAllocation);
6378  // Allocation from this alternative memory type succeeded.
6379  if(res == VK_SUCCESS)
6380  {
6381  return res;
6382  }
6383  // else: Allocation from this memory type failed. Try next one - next loop iteration.
6384  }
6385  // No other matching memory type index could be found.
6386  else
6387  {
6388  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
6389  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6390  }
6391  }
6392  }
6393  }
6394  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
6395  else
6396  return res;
6397  }
6398 }
6399 
6400 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
6401 {
6402  VMA_ASSERT(allocation);
6403 
6404  if(allocation->CanBecomeLost() == false ||
6405  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
6406  {
6407  switch(allocation->GetType())
6408  {
6409  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
6410  {
6411  VmaBlockVector* pBlockVector = VMA_NULL;
6412  VmaPool hPool = allocation->GetPool();
6413  if(hPool != VK_NULL_HANDLE)
6414  {
6415  pBlockVector = &hPool->m_BlockVector;
6416  }
6417  else
6418  {
6419  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
6420  const VMA_BLOCK_VECTOR_TYPE blockVectorType = allocation->GetBlockVectorType();
6421  pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
6422  }
6423  pBlockVector->Free(allocation);
6424  }
6425  break;
6426  case VmaAllocation_T::ALLOCATION_TYPE_OWN:
6427  FreeOwnMemory(allocation);
6428  break;
6429  default:
6430  VMA_ASSERT(0);
6431  }
6432  }
6433 
6434  vma_delete(this, allocation);
6435 }
6436 
6437 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
6438 {
6439  // Initialize.
6440  InitStatInfo(pStats->total);
6441  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
6442  InitStatInfo(pStats->memoryType[i]);
6443  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
6444  InitStatInfo(pStats->memoryHeap[i]);
6445 
6446  // Process default pools.
6447  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6448  {
6449  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
6450  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
6451  {
6452  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
6453  VMA_ASSERT(pBlockVector);
6454  pBlockVector->AddStats(pStats);
6455  }
6456  }
6457 
6458  // Process custom pools.
6459  {
6460  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6461  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
6462  {
6463  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
6464  }
6465  }
6466 
6467  // Process own allocations.
6468  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6469  {
6470  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
6471  VmaMutexLock ownAllocationsLock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6472  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
6473  {
6474  AllocationVectorType* const pOwnAllocVector = m_pOwnAllocations[memTypeIndex][blockVectorType];
6475  VMA_ASSERT(pOwnAllocVector);
6476  for(size_t allocIndex = 0, allocCount = pOwnAllocVector->size(); allocIndex < allocCount; ++allocIndex)
6477  {
6478  VmaStatInfo allocationStatInfo;
6479  (*pOwnAllocVector)[allocIndex]->OwnAllocCalcStatsInfo(allocationStatInfo);
6480  VmaAddStatInfo(pStats->total, allocationStatInfo);
6481  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
6482  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
6483  }
6484  }
6485  }
6486 
6487  // Postprocess.
6488  VmaPostprocessCalcStatInfo(pStats->total);
6489  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
6490  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
6491  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
6492  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
6493 }
6494 
6495 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
6496 
6497 void VmaAllocator_T::UnmapPersistentlyMappedMemory()
6498 {
6499  if(m_UnmapPersistentlyMappedMemoryCounter++ == 0)
6500  {
6501  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
6502  {
6503  for(uint32_t memTypeIndex = m_MemProps.memoryTypeCount; memTypeIndex--; )
6504  {
6505  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
6506  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
6507  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
6508  {
6509  // Process OwnAllocations.
6510  {
6511  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6512  AllocationVectorType* pOwnAllocationsVector = m_pOwnAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6513  for(size_t ownAllocIndex = pOwnAllocationsVector->size(); ownAllocIndex--; )
6514  {
6515  VmaAllocation hAlloc = (*pOwnAllocationsVector)[ownAllocIndex];
6516  hAlloc->OwnAllocUnmapPersistentlyMappedMemory(this);
6517  }
6518  }
6519 
6520  // Process normal Allocations.
6521  {
6522  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6523  pBlockVector->UnmapPersistentlyMappedMemory();
6524  }
6525  }
6526  }
6527 
6528  // Process custom pools.
6529  {
6530  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6531  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
6532  {
6533  m_Pools[poolIndex]->GetBlockVector().UnmapPersistentlyMappedMemory();
6534  }
6535  }
6536  }
6537  }
6538 }
6539 
6540 VkResult VmaAllocator_T::MapPersistentlyMappedMemory()
6541 {
6542  VMA_ASSERT(m_UnmapPersistentlyMappedMemoryCounter > 0);
6543  if(--m_UnmapPersistentlyMappedMemoryCounter == 0)
6544  {
6545  VkResult finalResult = VK_SUCCESS;
6546  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
6547  {
6548  // Process custom pools.
6549  {
6550  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6551  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
6552  {
6553  m_Pools[poolIndex]->GetBlockVector().MapPersistentlyMappedMemory();
6554  }
6555  }
6556 
6557  for(uint32_t memTypeIndex = 0; memTypeIndex < m_MemProps.memoryTypeCount; ++memTypeIndex)
6558  {
6559  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
6560  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
6561  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
6562  {
6563  // Process OwnAllocations.
6564  {
6565  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6566  AllocationVectorType* pAllocationsVector = m_pOwnAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6567  for(size_t ownAllocIndex = 0, ownAllocCount = pAllocationsVector->size(); ownAllocIndex < ownAllocCount; ++ownAllocIndex)
6568  {
6569  VmaAllocation hAlloc = (*pAllocationsVector)[ownAllocIndex];
6570  hAlloc->OwnAllocMapPersistentlyMappedMemory(this);
6571  }
6572  }
6573 
6574  // Process normal Allocations.
6575  {
6576  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6577  VkResult localResult = pBlockVector->MapPersistentlyMappedMemory();
6578  if(localResult != VK_SUCCESS)
6579  {
6580  finalResult = localResult;
6581  }
6582  }
6583  }
6584  }
6585  }
6586  return finalResult;
6587  }
6588  else
6589  return VK_SUCCESS;
6590 }
6591 
6592 VkResult VmaAllocator_T::Defragment(
6593  VmaAllocation* pAllocations,
6594  size_t allocationCount,
6595  VkBool32* pAllocationsChanged,
6596  const VmaDefragmentationInfo* pDefragmentationInfo,
6597  VmaDefragmentationStats* pDefragmentationStats)
6598 {
6599  if(pAllocationsChanged != VMA_NULL)
6600  {
6601  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
6602  }
6603  if(pDefragmentationStats != VMA_NULL)
6604  {
6605  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
6606  }
6607 
6608  if(m_UnmapPersistentlyMappedMemoryCounter > 0)
6609  {
6610  VMA_DEBUG_LOG("ERROR: Cannot defragment when inside vmaUnmapPersistentlyMappedMemory.");
6611  return VK_ERROR_MEMORY_MAP_FAILED;
6612  }
6613 
6614  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
6615 
6616  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
6617 
6618  const size_t poolCount = m_Pools.size();
6619 
6620  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
6621  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
6622  {
6623  VmaAllocation hAlloc = pAllocations[allocIndex];
6624  VMA_ASSERT(hAlloc);
6625  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
6626  // OwnAlloc cannot be defragmented.
6627  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
6628  // Only HOST_VISIBLE memory types can be defragmented.
6629  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
6630  // Lost allocation cannot be defragmented.
6631  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
6632  {
6633  VmaBlockVector* pAllocBlockVector = nullptr;
6634 
6635  const VmaPool hAllocPool = hAlloc->GetPool();
6636  // This allocation belongs to custom pool.
6637  if(hAllocPool != VK_NULL_HANDLE)
6638  {
6639  pAllocBlockVector = &hAllocPool->GetBlockVector();
6640  }
6641  // This allocation belongs to general pool.
6642  else
6643  {
6644  pAllocBlockVector = m_pBlockVectors[memTypeIndex][hAlloc->GetBlockVectorType()];
6645  }
6646 
6647  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
6648 
6649  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
6650  &pAllocationsChanged[allocIndex] : VMA_NULL;
6651  pDefragmentator->AddAllocation(hAlloc, pChanged);
6652  }
6653  }
6654 
6655  VkResult result = VK_SUCCESS;
6656 
6657  // ======== Main processing.
6658 
6659  VkDeviceSize maxBytesToMove = SIZE_MAX;
6660  uint32_t maxAllocationsToMove = UINT32_MAX;
6661  if(pDefragmentationInfo != VMA_NULL)
6662  {
6663  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
6664  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
6665  }
6666 
6667  // Process standard memory.
6668  for(uint32_t memTypeIndex = 0;
6669  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
6670  ++memTypeIndex)
6671  {
6672  // Only HOST_VISIBLE memory types can be defragmented.
6673  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
6674  {
6675  for(uint32_t blockVectorType = 0;
6676  (blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT) && (result == VK_SUCCESS);
6677  ++blockVectorType)
6678  {
6679  result = m_pBlockVectors[memTypeIndex][blockVectorType]->Defragment(
6680  pDefragmentationStats,
6681  maxBytesToMove,
6682  maxAllocationsToMove);
6683  }
6684  }
6685  }
6686 
6687  // Process custom pools.
6688  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
6689  {
6690  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
6691  pDefragmentationStats,
6692  maxBytesToMove,
6693  maxAllocationsToMove);
6694  }
6695 
6696  // ======== Destroy defragmentators.
6697 
6698  // Process custom pools.
6699  for(size_t poolIndex = poolCount; poolIndex--; )
6700  {
6701  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
6702  }
6703 
6704  // Process standard memory.
6705  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
6706  {
6707  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
6708  {
6709  for(size_t blockVectorType = VMA_BLOCK_VECTOR_TYPE_COUNT; blockVectorType--; )
6710  {
6711  m_pBlockVectors[memTypeIndex][blockVectorType]->DestroyDefragmentator();
6712  }
6713  }
6714  }
6715 
6716  return result;
6717 }
6718 
6719 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
6720 {
6721  if(hAllocation->CanBecomeLost())
6722  {
6723  /*
6724  Warning: This is a carefully designed algorithm.
6725  Do not modify unless you really know what you're doing :)
6726  */
6727  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
6728  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
6729  for(;;)
6730  {
6731  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
6732  {
6733  pAllocationInfo->memoryType = UINT32_MAX;
6734  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
6735  pAllocationInfo->offset = 0;
6736  pAllocationInfo->size = hAllocation->GetSize();
6737  pAllocationInfo->pMappedData = VMA_NULL;
6738  pAllocationInfo->pUserData = hAllocation->GetUserData();
6739  return;
6740  }
6741  else if(localLastUseFrameIndex == localCurrFrameIndex)
6742  {
6743  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
6744  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
6745  pAllocationInfo->offset = hAllocation->GetOffset();
6746  pAllocationInfo->size = hAllocation->GetSize();
6747  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
6748  pAllocationInfo->pUserData = hAllocation->GetUserData();
6749  return;
6750  }
6751  else // Last use time earlier than current time.
6752  {
6753  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
6754  {
6755  localLastUseFrameIndex = localCurrFrameIndex;
6756  }
6757  }
6758  }
6759  }
6760  // We could use the same code here, but for performance reasons we don't need to use the hAllocation.LastUseFrameIndex atomic.
6761  else
6762  {
6763  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
6764  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
6765  pAllocationInfo->offset = hAllocation->GetOffset();
6766  pAllocationInfo->size = hAllocation->GetSize();
6767  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
6768  pAllocationInfo->pUserData = hAllocation->GetUserData();
6769  }
6770 }
6771 
6772 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
6773 {
6774  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
6775 
6776  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
6777 
6778  if(newCreateInfo.maxBlockCount == 0)
6779  {
6780  newCreateInfo.maxBlockCount = SIZE_MAX;
6781  }
6782  if(newCreateInfo.blockSize == 0)
6783  {
6784  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
6785  }
6786 
6787  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
6788 
6789  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
6790  if(res != VK_SUCCESS)
6791  {
6792  vma_delete(this, *pPool);
6793  *pPool = VMA_NULL;
6794  return res;
6795  }
6796 
6797  // Add to m_Pools.
6798  {
6799  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6800  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
6801  }
6802 
6803  return VK_SUCCESS;
6804 }
6805 
6806 void VmaAllocator_T::DestroyPool(VmaPool pool)
6807 {
6808  // Remove from m_Pools.
6809  {
6810  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6811  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
6812  VMA_ASSERT(success && "Pool not found in Allocator.");
6813  }
6814 
6815  vma_delete(this, pool);
6816 }
6817 
6818 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
6819 {
6820  pool->m_BlockVector.GetPoolStats(pPoolStats);
6821 }
6822 
6823 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
6824 {
6825  m_CurrentFrameIndex.store(frameIndex);
6826 }
6827 
6828 void VmaAllocator_T::MakePoolAllocationsLost(
6829  VmaPool hPool,
6830  size_t* pLostAllocationCount)
6831 {
6832  hPool->m_BlockVector.MakePoolAllocationsLost(
6833  m_CurrentFrameIndex.load(),
6834  pLostAllocationCount);
6835 }
6836 
6837 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
6838 {
6839  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST);
6840  (*pAllocation)->InitLost();
6841 }
6842 
6843 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
6844 {
6845  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
6846 
6847  VkResult res;
6848  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
6849  {
6850  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
6851  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
6852  {
6853  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
6854  if(res == VK_SUCCESS)
6855  {
6856  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
6857  }
6858  }
6859  else
6860  {
6861  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
6862  }
6863  }
6864  else
6865  {
6866  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
6867  }
6868 
6869  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
6870  {
6871  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
6872  }
6873 
6874  return res;
6875 }
6876 
6877 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
6878 {
6879  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
6880  {
6881  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
6882  }
6883 
6884  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
6885 
6886  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
6887  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
6888  {
6889  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
6890  m_HeapSizeLimit[heapIndex] += size;
6891  }
6892 }
6893 
6894 void VmaAllocator_T::FreeOwnMemory(VmaAllocation allocation)
6895 {
6896  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_OWN);
6897 
6898  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
6899  {
6900  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6901  AllocationVectorType* const pOwnAllocations = m_pOwnAllocations[memTypeIndex][allocation->GetBlockVectorType()];
6902  VMA_ASSERT(pOwnAllocations);
6903  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pOwnAllocations, allocation);
6904  VMA_ASSERT(success);
6905  }
6906 
6907  VkDeviceMemory hMemory = allocation->GetMemory();
6908 
6909  if(allocation->GetMappedData() != VMA_NULL)
6910  {
6911  vkUnmapMemory(m_hDevice, hMemory);
6912  }
6913 
6914  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
6915 
6916  VMA_DEBUG_LOG(" Freed OwnMemory MemoryTypeIndex=%u", memTypeIndex);
6917 }
6918 
6919 #if VMA_STATS_STRING_ENABLED
6920 
6921 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
6922 {
6923  bool ownAllocationsStarted = false;
6924  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6925  {
6926  VmaMutexLock ownAllocationsLock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6927  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
6928  {
6929  AllocationVectorType* const pOwnAllocVector = m_pOwnAllocations[memTypeIndex][blockVectorType];
6930  VMA_ASSERT(pOwnAllocVector);
6931  if(pOwnAllocVector->empty() == false)
6932  {
6933  if(ownAllocationsStarted == false)
6934  {
6935  ownAllocationsStarted = true;
6936  json.WriteString("OwnAllocations");
6937  json.BeginObject();
6938  }
6939 
6940  json.BeginString("Type ");
6941  json.ContinueString(memTypeIndex);
6942  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
6943  {
6944  json.ContinueString(" Mapped");
6945  }
6946  json.EndString();
6947 
6948  json.BeginArray();
6949 
6950  for(size_t i = 0; i < pOwnAllocVector->size(); ++i)
6951  {
6952  const VmaAllocation hAlloc = (*pOwnAllocVector)[i];
6953  json.BeginObject(true);
6954 
6955  json.WriteString("Size");
6956  json.WriteNumber(hAlloc->GetSize());
6957 
6958  json.WriteString("Type");
6959  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
6960 
6961  json.EndObject();
6962  }
6963 
6964  json.EndArray();
6965  }
6966  }
6967  }
6968  if(ownAllocationsStarted)
6969  {
6970  json.EndObject();
6971  }
6972 
6973  {
6974  bool allocationsStarted = false;
6975  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6976  {
6977  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
6978  {
6979  if(m_pBlockVectors[memTypeIndex][blockVectorType]->IsEmpty() == false)
6980  {
6981  if(allocationsStarted == false)
6982  {
6983  allocationsStarted = true;
6984  json.WriteString("DefaultPools");
6985  json.BeginObject();
6986  }
6987 
6988  json.BeginString("Type ");
6989  json.ContinueString(memTypeIndex);
6990  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
6991  {
6992  json.ContinueString(" Mapped");
6993  }
6994  json.EndString();
6995 
6996  m_pBlockVectors[memTypeIndex][blockVectorType]->PrintDetailedMap(json);
6997  }
6998  }
6999  }
7000  if(allocationsStarted)
7001  {
7002  json.EndObject();
7003  }
7004  }
7005 
7006  {
7007  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7008  const size_t poolCount = m_Pools.size();
7009  if(poolCount > 0)
7010  {
7011  json.WriteString("Pools");
7012  json.BeginArray();
7013  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
7014  {
7015  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
7016  }
7017  json.EndArray();
7018  }
7019  }
7020 }
7021 
7022 #endif // #if VMA_STATS_STRING_ENABLED
7023 
7024 static VkResult AllocateMemoryForImage(
7025  VmaAllocator allocator,
7026  VkImage image,
7027  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7028  VmaSuballocationType suballocType,
7029  VmaAllocation* pAllocation)
7030 {
7031  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
7032 
7033  VkMemoryRequirements vkMemReq = {};
7034  (*allocator->GetVulkanFunctions().vkGetImageMemoryRequirements)(allocator->m_hDevice, image, &vkMemReq);
7035 
7036  return allocator->AllocateMemory(
7037  vkMemReq,
7038  *pAllocationCreateInfo,
7039  suballocType,
7040  pAllocation);
7041 }
7042 
7044 // Public interface
7045 
7046 VkResult vmaCreateAllocator(
7047  const VmaAllocatorCreateInfo* pCreateInfo,
7048  VmaAllocator* pAllocator)
7049 {
7050  VMA_ASSERT(pCreateInfo && pAllocator);
7051  VMA_DEBUG_LOG("vmaCreateAllocator");
7052  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
7053  return VK_SUCCESS;
7054 }
7055 
7056 void vmaDestroyAllocator(
7057  VmaAllocator allocator)
7058 {
7059  if(allocator != VK_NULL_HANDLE)
7060  {
7061  VMA_DEBUG_LOG("vmaDestroyAllocator");
7062  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
7063  vma_delete(&allocationCallbacks, allocator);
7064  }
7065 }
7066 
7068  VmaAllocator allocator,
7069  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
7070 {
7071  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
7072  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
7073 }
7074 
7076  VmaAllocator allocator,
7077  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
7078 {
7079  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
7080  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
7081 }
7082 
7084  VmaAllocator allocator,
7085  uint32_t memoryTypeIndex,
7086  VkMemoryPropertyFlags* pFlags)
7087 {
7088  VMA_ASSERT(allocator && pFlags);
7089  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
7090  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
7091 }
7092 
7094  VmaAllocator allocator,
7095  uint32_t frameIndex)
7096 {
7097  VMA_ASSERT(allocator);
7098  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
7099 
7100  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7101 
7102  allocator->SetCurrentFrameIndex(frameIndex);
7103 }
7104 
7105 void vmaCalculateStats(
7106  VmaAllocator allocator,
7107  VmaStats* pStats)
7108 {
7109  VMA_ASSERT(allocator && pStats);
7110  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7111  allocator->CalculateStats(pStats);
7112 }
7113 
7114 #if VMA_STATS_STRING_ENABLED
7115 
7116 void vmaBuildStatsString(
7117  VmaAllocator allocator,
7118  char** ppStatsString,
7119  VkBool32 detailedMap)
7120 {
7121  VMA_ASSERT(allocator && ppStatsString);
7122  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7123 
7124  VmaStringBuilder sb(allocator);
7125  {
7126  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
7127  json.BeginObject();
7128 
7129  VmaStats stats;
7130  allocator->CalculateStats(&stats);
7131 
7132  json.WriteString("Total");
7133  VmaPrintStatInfo(json, stats.total);
7134 
7135  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
7136  {
7137  json.BeginString("Heap ");
7138  json.ContinueString(heapIndex);
7139  json.EndString();
7140  json.BeginObject();
7141 
7142  json.WriteString("Size");
7143  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
7144 
7145  json.WriteString("Flags");
7146  json.BeginArray(true);
7147  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
7148  {
7149  json.WriteString("DEVICE_LOCAL");
7150  }
7151  json.EndArray();
7152 
7153  if(stats.memoryHeap[heapIndex].BlockCount > 0)
7154  {
7155  json.WriteString("Stats");
7156  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
7157  }
7158 
7159  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
7160  {
7161  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
7162  {
7163  json.BeginString("Type ");
7164  json.ContinueString(typeIndex);
7165  json.EndString();
7166 
7167  json.BeginObject();
7168 
7169  json.WriteString("Flags");
7170  json.BeginArray(true);
7171  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
7172  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
7173  {
7174  json.WriteString("DEVICE_LOCAL");
7175  }
7176  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7177  {
7178  json.WriteString("HOST_VISIBLE");
7179  }
7180  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
7181  {
7182  json.WriteString("HOST_COHERENT");
7183  }
7184  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
7185  {
7186  json.WriteString("HOST_CACHED");
7187  }
7188  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
7189  {
7190  json.WriteString("LAZILY_ALLOCATED");
7191  }
7192  json.EndArray();
7193 
7194  if(stats.memoryType[typeIndex].BlockCount > 0)
7195  {
7196  json.WriteString("Stats");
7197  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
7198  }
7199 
7200  json.EndObject();
7201  }
7202  }
7203 
7204  json.EndObject();
7205  }
7206  if(detailedMap == VK_TRUE)
7207  {
7208  allocator->PrintDetailedMap(json);
7209  }
7210 
7211  json.EndObject();
7212  }
7213 
7214  const size_t len = sb.GetLength();
7215  char* const pChars = vma_new_array(allocator, char, len + 1);
7216  if(len > 0)
7217  {
7218  memcpy(pChars, sb.GetData(), len);
7219  }
7220  pChars[len] = '\0';
7221  *ppStatsString = pChars;
7222 }
7223 
7224 void vmaFreeStatsString(
7225  VmaAllocator allocator,
7226  char* pStatsString)
7227 {
7228  if(pStatsString != VMA_NULL)
7229  {
7230  VMA_ASSERT(allocator);
7231  size_t len = strlen(pStatsString);
7232  vma_delete_array(allocator, pStatsString, len + 1);
7233  }
7234 }
7235 
7236 #endif // #if VMA_STATS_STRING_ENABLED
7237 
7240 VkResult vmaFindMemoryTypeIndex(
7241  VmaAllocator allocator,
7242  uint32_t memoryTypeBits,
7243  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7244  uint32_t* pMemoryTypeIndex)
7245 {
7246  VMA_ASSERT(allocator != VK_NULL_HANDLE);
7247  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
7248  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
7249 
7250  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
7251  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
7252  if(preferredFlags == 0)
7253  {
7254  preferredFlags = requiredFlags;
7255  }
7256  // preferredFlags, if not 0, must be a superset of requiredFlags.
7257  VMA_ASSERT((requiredFlags & ~preferredFlags) == 0);
7258 
7259  // Convert usage to requiredFlags and preferredFlags.
7260  switch(pAllocationCreateInfo->usage)
7261  {
7263  break;
7265  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
7266  break;
7268  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
7269  break;
7271  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7272  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
7273  break;
7275  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7276  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
7277  break;
7278  default:
7279  break;
7280  }
7281 
7282  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0)
7283  {
7284  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7285  }
7286 
7287  *pMemoryTypeIndex = UINT32_MAX;
7288  uint32_t minCost = UINT32_MAX;
7289  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
7290  memTypeIndex < allocator->GetMemoryTypeCount();
7291  ++memTypeIndex, memTypeBit <<= 1)
7292  {
7293  // This memory type is acceptable according to memoryTypeBits bitmask.
7294  if((memTypeBit & memoryTypeBits) != 0)
7295  {
7296  const VkMemoryPropertyFlags currFlags =
7297  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
7298  // This memory type contains requiredFlags.
7299  if((requiredFlags & ~currFlags) == 0)
7300  {
7301  // Calculate cost as number of bits from preferredFlags not present in this memory type.
7302  uint32_t currCost = CountBitsSet(preferredFlags & ~currFlags);
7303  // Remember memory type with lowest cost.
7304  if(currCost < minCost)
7305  {
7306  *pMemoryTypeIndex = memTypeIndex;
7307  if(currCost == 0)
7308  {
7309  return VK_SUCCESS;
7310  }
7311  minCost = currCost;
7312  }
7313  }
7314  }
7315  }
7316  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
7317 }
7318 
7319 VkResult vmaCreatePool(
7320  VmaAllocator allocator,
7321  const VmaPoolCreateInfo* pCreateInfo,
7322  VmaPool* pPool)
7323 {
7324  VMA_ASSERT(allocator && pCreateInfo && pPool);
7325 
7326  VMA_DEBUG_LOG("vmaCreatePool");
7327 
7328  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7329 
7330  return allocator->CreatePool(pCreateInfo, pPool);
7331 }
7332 
7333 void vmaDestroyPool(
7334  VmaAllocator allocator,
7335  VmaPool pool)
7336 {
7337  VMA_ASSERT(allocator && pool);
7338 
7339  VMA_DEBUG_LOG("vmaDestroyPool");
7340 
7341  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7342 
7343  allocator->DestroyPool(pool);
7344 }
7345 
7346 void vmaGetPoolStats(
7347  VmaAllocator allocator,
7348  VmaPool pool,
7349  VmaPoolStats* pPoolStats)
7350 {
7351  VMA_ASSERT(allocator && pool && pPoolStats);
7352 
7353  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7354 
7355  allocator->GetPoolStats(pool, pPoolStats);
7356 }
7357 
7359  VmaAllocator allocator,
7360  VmaPool pool,
7361  size_t* pLostAllocationCount)
7362 {
7363  VMA_ASSERT(allocator && pool);
7364 
7365  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7366 
7367  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
7368 }
7369 
7370 VkResult vmaAllocateMemory(
7371  VmaAllocator allocator,
7372  const VkMemoryRequirements* pVkMemoryRequirements,
7373  const VmaAllocationCreateInfo* pCreateInfo,
7374  VmaAllocation* pAllocation,
7375  VmaAllocationInfo* pAllocationInfo)
7376 {
7377  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
7378 
7379  VMA_DEBUG_LOG("vmaAllocateMemory");
7380 
7381  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7382 
7383  VkResult result = allocator->AllocateMemory(
7384  *pVkMemoryRequirements,
7385  *pCreateInfo,
7386  VMA_SUBALLOCATION_TYPE_UNKNOWN,
7387  pAllocation);
7388 
7389  if(pAllocationInfo && result == VK_SUCCESS)
7390  {
7391  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7392  }
7393 
7394  return result;
7395 }
7396 
7398  VmaAllocator allocator,
7399  VkBuffer buffer,
7400  const VmaAllocationCreateInfo* pCreateInfo,
7401  VmaAllocation* pAllocation,
7402  VmaAllocationInfo* pAllocationInfo)
7403 {
7404  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
7405 
7406  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
7407 
7408  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7409 
7410  VkMemoryRequirements vkMemReq = {};
7411  (*allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements)(allocator->m_hDevice, buffer, &vkMemReq);
7412 
7413  VkResult result = allocator->AllocateMemory(
7414  vkMemReq,
7415  *pCreateInfo,
7416  VMA_SUBALLOCATION_TYPE_BUFFER,
7417  pAllocation);
7418 
7419  if(pAllocationInfo && result == VK_SUCCESS)
7420  {
7421  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7422  }
7423 
7424  return result;
7425 }
7426 
7427 VkResult vmaAllocateMemoryForImage(
7428  VmaAllocator allocator,
7429  VkImage image,
7430  const VmaAllocationCreateInfo* pCreateInfo,
7431  VmaAllocation* pAllocation,
7432  VmaAllocationInfo* pAllocationInfo)
7433 {
7434  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
7435 
7436  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
7437 
7438  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7439 
7440  VkResult result = AllocateMemoryForImage(
7441  allocator,
7442  image,
7443  pCreateInfo,
7444  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
7445  pAllocation);
7446 
7447  if(pAllocationInfo && result == VK_SUCCESS)
7448  {
7449  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7450  }
7451 
7452  return result;
7453 }
7454 
7455 void vmaFreeMemory(
7456  VmaAllocator allocator,
7457  VmaAllocation allocation)
7458 {
7459  VMA_ASSERT(allocator && allocation);
7460 
7461  VMA_DEBUG_LOG("vmaFreeMemory");
7462 
7463  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7464 
7465  allocator->FreeMemory(allocation);
7466 }
7467 
7469  VmaAllocator allocator,
7470  VmaAllocation allocation,
7471  VmaAllocationInfo* pAllocationInfo)
7472 {
7473  VMA_ASSERT(allocator && allocation && pAllocationInfo);
7474 
7475  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7476 
7477  allocator->GetAllocationInfo(allocation, pAllocationInfo);
7478 }
7479 
7481  VmaAllocator allocator,
7482  VmaAllocation allocation,
7483  void* pUserData)
7484 {
7485  VMA_ASSERT(allocator && allocation);
7486 
7487  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7488 
7489  allocation->SetUserData(pUserData);
7490 }
7491 
7493  VmaAllocator allocator,
7494  VmaAllocation* pAllocation)
7495 {
7496  VMA_ASSERT(allocator && pAllocation);
7497 
7498  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
7499 
7500  allocator->CreateLostAllocation(pAllocation);
7501 }
7502 
7503 VkResult vmaMapMemory(
7504  VmaAllocator allocator,
7505  VmaAllocation allocation,
7506  void** ppData)
7507 {
7508  VMA_ASSERT(allocator && allocation && ppData);
7509 
7510  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7511 
7512  return vkMapMemory(allocator->m_hDevice, allocation->GetMemory(),
7513  allocation->GetOffset(), allocation->GetSize(), 0, ppData);
7514 }
7515 
7516 void vmaUnmapMemory(
7517  VmaAllocator allocator,
7518  VmaAllocation allocation)
7519 {
7520  VMA_ASSERT(allocator && allocation);
7521 
7522  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7523 
7524  vkUnmapMemory(allocator->m_hDevice, allocation->GetMemory());
7525 }
7526 
7527 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator)
7528 {
7529  VMA_ASSERT(allocator);
7530 
7531  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7532 
7533  allocator->UnmapPersistentlyMappedMemory();
7534 }
7535 
7536 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator)
7537 {
7538  VMA_ASSERT(allocator);
7539 
7540  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7541 
7542  return allocator->MapPersistentlyMappedMemory();
7543 }
7544 
7545 VkResult vmaDefragment(
7546  VmaAllocator allocator,
7547  VmaAllocation* pAllocations,
7548  size_t allocationCount,
7549  VkBool32* pAllocationsChanged,
7550  const VmaDefragmentationInfo *pDefragmentationInfo,
7551  VmaDefragmentationStats* pDefragmentationStats)
7552 {
7553  VMA_ASSERT(allocator && pAllocations);
7554 
7555  VMA_DEBUG_LOG("vmaDefragment");
7556 
7557  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7558 
7559  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
7560 }
7561 
7562 VkResult vmaCreateBuffer(
7563  VmaAllocator allocator,
7564  const VkBufferCreateInfo* pBufferCreateInfo,
7565  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7566  VkBuffer* pBuffer,
7567  VmaAllocation* pAllocation,
7568  VmaAllocationInfo* pAllocationInfo)
7569 {
7570  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
7571 
7572  VMA_DEBUG_LOG("vmaCreateBuffer");
7573 
7574  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7575 
7576  *pBuffer = VK_NULL_HANDLE;
7577  *pAllocation = VK_NULL_HANDLE;
7578 
7579  // 1. Create VkBuffer.
7580  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
7581  allocator->m_hDevice,
7582  pBufferCreateInfo,
7583  allocator->GetAllocationCallbacks(),
7584  pBuffer);
7585  if(res >= 0)
7586  {
7587  // 2. vkGetBufferMemoryRequirements.
7588  VkMemoryRequirements vkMemReq = {};
7589  (*allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements)(allocator->m_hDevice, *pBuffer, &vkMemReq);
7590 
7591  // 3. Allocate memory using allocator.
7592  res = allocator->AllocateMemory(
7593  vkMemReq,
7594  *pAllocationCreateInfo,
7595  VMA_SUBALLOCATION_TYPE_BUFFER,
7596  pAllocation);
7597  if(res >= 0)
7598  {
7599  // 3. Bind buffer with memory.
7600  res = (*allocator->GetVulkanFunctions().vkBindBufferMemory)(
7601  allocator->m_hDevice,
7602  *pBuffer,
7603  (*pAllocation)->GetMemory(),
7604  (*pAllocation)->GetOffset());
7605  if(res >= 0)
7606  {
7607  // All steps succeeded.
7608  if(pAllocationInfo != VMA_NULL)
7609  {
7610  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7611  }
7612  return VK_SUCCESS;
7613  }
7614  allocator->FreeMemory(*pAllocation);
7615  *pAllocation = VK_NULL_HANDLE;
7616  return res;
7617  }
7618  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
7619  *pBuffer = VK_NULL_HANDLE;
7620  return res;
7621  }
7622  return res;
7623 }
7624 
7625 void vmaDestroyBuffer(
7626  VmaAllocator allocator,
7627  VkBuffer buffer,
7628  VmaAllocation allocation)
7629 {
7630  if(buffer != VK_NULL_HANDLE)
7631  {
7632  VMA_ASSERT(allocator);
7633 
7634  VMA_DEBUG_LOG("vmaDestroyBuffer");
7635 
7636  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7637 
7638  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
7639 
7640  allocator->FreeMemory(allocation);
7641  }
7642 }
7643 
7644 VkResult vmaCreateImage(
7645  VmaAllocator allocator,
7646  const VkImageCreateInfo* pImageCreateInfo,
7647  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7648  VkImage* pImage,
7649  VmaAllocation* pAllocation,
7650  VmaAllocationInfo* pAllocationInfo)
7651 {
7652  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
7653 
7654  VMA_DEBUG_LOG("vmaCreateImage");
7655 
7656  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7657 
7658  *pImage = VK_NULL_HANDLE;
7659  *pAllocation = VK_NULL_HANDLE;
7660 
7661  // 1. Create VkImage.
7662  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
7663  allocator->m_hDevice,
7664  pImageCreateInfo,
7665  allocator->GetAllocationCallbacks(),
7666  pImage);
7667  if(res >= 0)
7668  {
7669  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
7670  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
7671  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
7672 
7673  // 2. Allocate memory using allocator.
7674  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
7675  if(res >= 0)
7676  {
7677  // 3. Bind image with memory.
7678  res = (*allocator->GetVulkanFunctions().vkBindImageMemory)(
7679  allocator->m_hDevice,
7680  *pImage,
7681  (*pAllocation)->GetMemory(),
7682  (*pAllocation)->GetOffset());
7683  if(res >= 0)
7684  {
7685  // All steps succeeded.
7686  if(pAllocationInfo != VMA_NULL)
7687  {
7688  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7689  }
7690  return VK_SUCCESS;
7691  }
7692  allocator->FreeMemory(*pAllocation);
7693  *pAllocation = VK_NULL_HANDLE;
7694  return res;
7695  }
7696  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
7697  *pImage = VK_NULL_HANDLE;
7698  return res;
7699  }
7700  return res;
7701 }
7702 
7703 void vmaDestroyImage(
7704  VmaAllocator allocator,
7705  VkImage image,
7706  VmaAllocation allocation)
7707 {
7708  if(image != VK_NULL_HANDLE)
7709  {
7710  VMA_ASSERT(allocator);
7711 
7712  VMA_DEBUG_LOG("vmaDestroyImage");
7713 
7714  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7715 
7716  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
7717 
7718  allocator->FreeMemory(allocation);
7719  }
7720 }
7721 
7722 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:434
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:457
Definition: vk_mem_alloc.h:786
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
uint32_t BlockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:570
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:444
Memory will be used for frequent writing on device and readback on host (download).
Definition: vk_mem_alloc.h:637
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:438
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:907
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:1060
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation.
void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator)
Unmaps persistently mapped memory of types that are HOST_COHERENT and DEVICE_LOCAL.
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:838
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:686
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:719
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:403
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks.
Definition: vk_mem_alloc.h:469
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:788
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:516
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:451
VkDeviceSize preferredSmallHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from small heaps <= 512 MB...
Definition: vk_mem_alloc.h:466
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:441
VkFlags VmaAllocatorFlags
Definition: vk_mem_alloc.h:431
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:1064
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:486
VmaStatInfo total
Definition: vk_mem_alloc.h:588
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:1072
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:702
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1055
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:442
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:460
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:792
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:917
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:439
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:721
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:808
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:844
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:795
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
struct VmaVulkanFunctions VmaVulkanFunctions
Definition: vk_mem_alloc.h:695
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:1050
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VkDeviceSize AllocationSizeMax
Definition: vk_mem_alloc.h:579
Definition: vk_mem_alloc.h:766
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:1068
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:440
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:584
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:675
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:1070
VmaMemoryUsage
Definition: vk_mem_alloc.h:623
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:713
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:427
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
VmaAllocatorFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:422
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:435
Definition: vk_mem_alloc.h:567
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
Definition: vk_mem_alloc.h:803
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:414
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:418
VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator)
Maps back persistently mapped memory of types that are HOST_COHERENT and DEVICE_LOCAL.
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:798
VkDeviceSize UnusedRangeSizeMax
Definition: vk_mem_alloc.h:580
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:397
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:708
Definition: vk_mem_alloc.h:699
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:437
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool.
Definition: vk_mem_alloc.h:816
VkDeviceSize AllocationSizeMin
Definition: vk_mem_alloc.h:579
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory.
Definition: vk_mem_alloc.h:472
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:847
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:726
const VkDeviceSize * pHeapSizeLimit
Either NULL or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:504
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:586
VkDeviceSize AllocationSizeAvg
Definition: vk_mem_alloc.h:579
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:446
uint32_t AllocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:572
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:416
Definition: vk_mem_alloc.h:693
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:445
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:830
VmaAllocatorFlags flags
Flags for created allocator. Use VmaAllocatorFlagBits enum.
Definition: vk_mem_alloc.h:454
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
VkDeviceSize UsedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:576
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:928
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:654
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps. ...
Definition: vk_mem_alloc.h:463
uint32_t UnusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:574
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:835
Memory will be mapped on host. Could be used for transfer to/from device.
Definition: vk_mem_alloc.h:631
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
struct VmaStats VmaStats
General statistics from current state of Allocator.
VkDeviceSize UnusedRangeSizeAvg
Definition: vk_mem_alloc.h:580
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:912
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1066
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
Definition: vk_mem_alloc.h:433
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:697
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:443
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:447
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:757
void * pMappedData
Pointer to the beginning of this allocation as mapped data. Null if this alloaction is not persistent...
Definition: vk_mem_alloc.h:923
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
No intended memory usage specified.
Definition: vk_mem_alloc.h:626
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:436
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
Definition: vk_mem_alloc.h:638
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:893
Memory will be used for frequent (dynamic) updates from host and reads on device (upload).
Definition: vk_mem_alloc.h:634
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:642
Definition: vk_mem_alloc.h:429
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:665
Memory will be used on device only, so faster access from the device is preferred. No need to be mappable on host.
Definition: vk_mem_alloc.h:628
struct VmaStatInfo VmaStatInfo
VkDeviceSize UnusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:578
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:587
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:841
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:784
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize UnusedRangeSizeMin
Definition: vk_mem_alloc.h:580
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:898
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.