Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
404 #include <vulkan/vulkan.h>
405 
407 
411 VK_DEFINE_HANDLE(VmaAllocator)
412 
413 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
415  VmaAllocator allocator,
416  uint32_t memoryType,
417  VkDeviceMemory memory,
418  VkDeviceSize size);
420 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
421  VmaAllocator allocator,
422  uint32_t memoryType,
423  VkDeviceMemory memory,
424  VkDeviceSize size);
425 
431 typedef struct VmaDeviceMemoryCallbacks {
437 
439 typedef enum VmaAllocatorFlagBits {
445 
448 typedef VkFlags VmaAllocatorFlags;
449 
452 {
456 
457  VkPhysicalDevice physicalDevice;
459 
460  VkDevice device;
462 
465 
468 
469  const VkAllocationCallbacks* pAllocationCallbacks;
471 
486  uint32_t frameInUseCount;
488 
490 VkResult vmaCreateAllocator(
491  const VmaAllocatorCreateInfo* pCreateInfo,
492  VmaAllocator* pAllocator);
493 
496  VmaAllocator allocator);
497 
503  VmaAllocator allocator,
504  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
505 
511  VmaAllocator allocator,
512  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
513 
521  VmaAllocator allocator,
522  uint32_t memoryTypeIndex,
523  VkMemoryPropertyFlags* pFlags);
524 
534  VmaAllocator allocator,
535  uint32_t frameIndex);
536 
537 typedef struct VmaStatInfo
538 {
540  uint32_t BlockCount;
542  uint32_t AllocationCount;
546  VkDeviceSize UsedBytes;
548  VkDeviceSize UnusedBytes;
549  VkDeviceSize AllocationSizeMin, AllocationSizeAvg, AllocationSizeMax;
550  VkDeviceSize UnusedRangeSizeMin, UnusedRangeSizeAvg, UnusedRangeSizeMax;
551 } VmaStatInfo;
552 
554 struct VmaStats
555 {
556  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
557  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
559 };
560 
562 void vmaCalculateStats(
563  VmaAllocator allocator,
564  VmaStats* pStats);
565 
566 #define VMA_STATS_STRING_ENABLED 1
567 
568 #if VMA_STATS_STRING_ENABLED
569 
571 
574  VmaAllocator allocator,
575  char** ppStatsString,
576  VkBool32 detailedMap);
577 
578 void vmaFreeStatsString(
579  VmaAllocator allocator,
580  char* pStatsString);
581 
582 #endif // #if VMA_STATS_STRING_ENABLED
583 
586 
591 VK_DEFINE_HANDLE(VmaPool)
592 
593 typedef enum VmaMemoryUsage
594 {
600 
603 
606 
610 
625 
664 
667 typedef VkFlags VmaAllocationCreateFlags;
668 
670 {
683  VkMemoryPropertyFlags requiredFlags;
689  VkMemoryPropertyFlags preferredFlags;
691  void* pUserData;
696  VmaPool pool;
698 
713 VkResult vmaFindMemoryTypeIndex(
714  VmaAllocator allocator,
715  uint32_t memoryTypeBits,
716  const VmaAllocationCreateInfo* pAllocationCreateInfo,
717  uint32_t* pMemoryTypeIndex);
718 
721 
726 typedef enum VmaPoolCreateFlagBits {
755 
758 typedef VkFlags VmaPoolCreateFlags;
759 
762 typedef struct VmaPoolCreateInfo {
765  uint32_t memoryTypeIndex;
773  VkDeviceSize blockSize;
800  uint32_t frameInUseCount;
802 
805 typedef struct VmaPoolStats {
808  VkDeviceSize size;
811  VkDeviceSize unusedSize;
818 } VmaPoolStats;
819 
826 VkResult vmaCreatePool(
827  VmaAllocator allocator,
828  const VmaPoolCreateInfo* pCreateInfo,
829  VmaPool* pPool);
830 
833 void vmaDestroyPool(
834  VmaAllocator allocator,
835  VmaPool pool);
836 
843 void vmaGetPoolStats(
844  VmaAllocator allocator,
845  VmaPool pool,
846  VmaPoolStats* pPoolStats);
847 
855  VmaAllocator allocator,
856  VmaPool pool,
857  size_t* pLostAllocationCount);
858 
859 VK_DEFINE_HANDLE(VmaAllocation)
860 
861 
863 typedef struct VmaAllocationInfo {
868  uint32_t memoryType;
877  VkDeviceMemory deviceMemory;
882  VkDeviceSize offset;
887  VkDeviceSize size;
893  void* pMappedData;
898  void* pUserData;
900 
911 VkResult vmaAllocateMemory(
912  VmaAllocator allocator,
913  const VkMemoryRequirements* pVkMemoryRequirements,
914  const VmaAllocationCreateInfo* pCreateInfo,
915  VmaAllocation* pAllocation,
916  VmaAllocationInfo* pAllocationInfo);
917 
925  VmaAllocator allocator,
926  VkBuffer buffer,
927  const VmaAllocationCreateInfo* pCreateInfo,
928  VmaAllocation* pAllocation,
929  VmaAllocationInfo* pAllocationInfo);
930 
933  VmaAllocator allocator,
934  VkImage image,
935  const VmaAllocationCreateInfo* pCreateInfo,
936  VmaAllocation* pAllocation,
937  VmaAllocationInfo* pAllocationInfo);
938 
940 void vmaFreeMemory(
941  VmaAllocator allocator,
942  VmaAllocation allocation);
943 
946  VmaAllocator allocator,
947  VmaAllocation allocation,
948  VmaAllocationInfo* pAllocationInfo);
949 
952  VmaAllocator allocator,
953  VmaAllocation allocation,
954  void* pUserData);
955 
967  VmaAllocator allocator,
968  VmaAllocation* pAllocation);
969 
978 VkResult vmaMapMemory(
979  VmaAllocator allocator,
980  VmaAllocation allocation,
981  void** ppData);
982 
983 void vmaUnmapMemory(
984  VmaAllocator allocator,
985  VmaAllocation allocation);
986 
1005 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator);
1006 
1014 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator);
1015 
1017 typedef struct VmaDefragmentationInfo {
1022  VkDeviceSize maxBytesToMove;
1029 
1031 typedef struct VmaDefragmentationStats {
1033  VkDeviceSize bytesMoved;
1035  VkDeviceSize bytesFreed;
1041 
1112 VkResult vmaDefragment(
1113  VmaAllocator allocator,
1114  VmaAllocation* pAllocations,
1115  size_t allocationCount,
1116  VkBool32* pAllocationsChanged,
1117  const VmaDefragmentationInfo *pDefragmentationInfo,
1118  VmaDefragmentationStats* pDefragmentationStats);
1119 
1122 
1145 VkResult vmaCreateBuffer(
1146  VmaAllocator allocator,
1147  const VkBufferCreateInfo* pBufferCreateInfo,
1148  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1149  VkBuffer* pBuffer,
1150  VmaAllocation* pAllocation,
1151  VmaAllocationInfo* pAllocationInfo);
1152 
1153 void vmaDestroyBuffer(
1154  VmaAllocator allocator,
1155  VkBuffer buffer,
1156  VmaAllocation allocation);
1157 
1159 VkResult vmaCreateImage(
1160  VmaAllocator allocator,
1161  const VkImageCreateInfo* pImageCreateInfo,
1162  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1163  VkImage* pImage,
1164  VmaAllocation* pAllocation,
1165  VmaAllocationInfo* pAllocationInfo);
1166 
1167 void vmaDestroyImage(
1168  VmaAllocator allocator,
1169  VkImage image,
1170  VmaAllocation allocation);
1171 
1174 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
1175 
1176 // For Visual Studio IntelliSense.
1177 #ifdef __INTELLISENSE__
1178 #define VMA_IMPLEMENTATION
1179 #endif
1180 
1181 #ifdef VMA_IMPLEMENTATION
1182 #undef VMA_IMPLEMENTATION
1183 
1184 #include <cstdint>
1185 #include <cstdlib>
1186 #include <cstring>
1187 
1188 /*******************************************************************************
1189 CONFIGURATION SECTION
1190 
1191 Define some of these macros before each #include of this header or change them
1192 here if you need other then default behavior depending on your environment.
1193 */
1194 
1195 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
1196 //#define VMA_USE_STL_CONTAINERS 1
1197 
1198 /* Set this macro to 1 to make the library including and using STL containers:
1199 std::pair, std::vector, std::list, std::unordered_map.
1200 
1201 Set it to 0 or undefined to make the library using its own implementation of
1202 the containers.
1203 */
1204 #if VMA_USE_STL_CONTAINERS
1205  #define VMA_USE_STL_VECTOR 1
1206  #define VMA_USE_STL_UNORDERED_MAP 1
1207  #define VMA_USE_STL_LIST 1
1208 #endif
1209 
1210 #if VMA_USE_STL_VECTOR
1211  #include <vector>
1212 #endif
1213 
1214 #if VMA_USE_STL_UNORDERED_MAP
1215  #include <unordered_map>
1216 #endif
1217 
1218 #if VMA_USE_STL_LIST
1219  #include <list>
1220 #endif
1221 
1222 /*
1223 Following headers are used in this CONFIGURATION section only, so feel free to
1224 remove them if not needed.
1225 */
1226 #include <cassert> // for assert
1227 #include <algorithm> // for min, max
1228 #include <mutex> // for std::mutex
1229 #include <atomic> // for std::atomic_int64_t
1230 
1231 #if !defined(_WIN32)
1232  #include <malloc.h> // for aligned_alloc()
1233 #endif
1234 
1235 // Normal assert to check for programmer's errors, especially in Debug configuration.
1236 #ifndef VMA_ASSERT
1237  #ifdef _DEBUG
1238  #define VMA_ASSERT(expr) assert(expr)
1239  #else
1240  #define VMA_ASSERT(expr)
1241  #endif
1242 #endif
1243 
1244 // Assert that will be called very often, like inside data structures e.g. operator[].
1245 // Making it non-empty can make program slow.
1246 #ifndef VMA_HEAVY_ASSERT
1247  #ifdef _DEBUG
1248  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
1249  #else
1250  #define VMA_HEAVY_ASSERT(expr)
1251  #endif
1252 #endif
1253 
1254 #ifndef VMA_NULL
1255  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
1256  #define VMA_NULL nullptr
1257 #endif
1258 
1259 #ifndef VMA_ALIGN_OF
1260  #define VMA_ALIGN_OF(type) (__alignof(type))
1261 #endif
1262 
1263 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
1264  #if defined(_WIN32)
1265  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
1266  #else
1267  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
1268  #endif
1269 #endif
1270 
1271 #ifndef VMA_SYSTEM_FREE
1272  #if defined(_WIN32)
1273  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
1274  #else
1275  #define VMA_SYSTEM_FREE(ptr) free(ptr)
1276  #endif
1277 #endif
1278 
1279 #ifndef VMA_MIN
1280  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
1281 #endif
1282 
1283 #ifndef VMA_MAX
1284  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
1285 #endif
1286 
1287 #ifndef VMA_SWAP
1288  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
1289 #endif
1290 
1291 #ifndef VMA_SORT
1292  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
1293 #endif
1294 
1295 #ifndef VMA_DEBUG_LOG
1296  #define VMA_DEBUG_LOG(format, ...)
1297  /*
1298  #define VMA_DEBUG_LOG(format, ...) do { \
1299  printf(format, __VA_ARGS__); \
1300  printf("\n"); \
1301  } while(false)
1302  */
1303 #endif
1304 
1305 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
1306 #if VMA_STATS_STRING_ENABLED
1307  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
1308  {
1309  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
1310  }
1311  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
1312  {
1313  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
1314  }
1315  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
1316  {
1317  snprintf(outStr, strLen, "%p", ptr);
1318  }
1319 #endif
1320 
1321 #ifndef VMA_MUTEX
1322  class VmaMutex
1323  {
1324  public:
1325  VmaMutex() { }
1326  ~VmaMutex() { }
1327  void Lock() { m_Mutex.lock(); }
1328  void Unlock() { m_Mutex.unlock(); }
1329  private:
1330  std::mutex m_Mutex;
1331  };
1332  #define VMA_MUTEX VmaMutex
1333 #endif
1334 
1335 /*
1336 If providing your own implementation, you need to implement a subset of std::atomic:
1337 
1338 - Constructor(uint32_t desired)
1339 - uint32_t load() const
1340 - void store(uint32_t desired)
1341 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
1342 */
1343 #ifndef VMA_ATOMIC_UINT32
1344  #define VMA_ATOMIC_UINT32 std::atomic_uint32_t
1345 #endif
1346 
1347 #ifndef VMA_BEST_FIT
1348 
1360  #define VMA_BEST_FIT (1)
1361 #endif
1362 
1363 #ifndef VMA_DEBUG_ALWAYS_OWN_MEMORY
1364 
1368  #define VMA_DEBUG_ALWAYS_OWN_MEMORY (0)
1369 #endif
1370 
1371 #ifndef VMA_DEBUG_ALIGNMENT
1372 
1376  #define VMA_DEBUG_ALIGNMENT (1)
1377 #endif
1378 
1379 #ifndef VMA_DEBUG_MARGIN
1380 
1384  #define VMA_DEBUG_MARGIN (0)
1385 #endif
1386 
1387 #ifndef VMA_DEBUG_GLOBAL_MUTEX
1388 
1392  #define VMA_DEBUG_GLOBAL_MUTEX (0)
1393 #endif
1394 
1395 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
1396 
1400  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
1401 #endif
1402 
1403 #ifndef VMA_SMALL_HEAP_MAX_SIZE
1404  #define VMA_SMALL_HEAP_MAX_SIZE (512 * 1024 * 1024)
1406 #endif
1407 
1408 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
1409  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256 * 1024 * 1024)
1411 #endif
1412 
1413 #ifndef VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE
1414  #define VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE (64 * 1024 * 1024)
1416 #endif
1417 
1418 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
1419 
1420 /*******************************************************************************
1421 END OF CONFIGURATION
1422 */
1423 
1424 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
1425  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
1426 
1427 // Returns number of bits set to 1 in (v).
1428 static inline uint32_t CountBitsSet(uint32_t v)
1429 {
1430  uint32_t c = v - ((v >> 1) & 0x55555555);
1431  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
1432  c = ((c >> 4) + c) & 0x0F0F0F0F;
1433  c = ((c >> 8) + c) & 0x00FF00FF;
1434  c = ((c >> 16) + c) & 0x0000FFFF;
1435  return c;
1436 }
1437 
1438 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
1439 // Use types like uint32_t, uint64_t as T.
1440 template <typename T>
1441 static inline T VmaAlignUp(T val, T align)
1442 {
1443  return (val + align - 1) / align * align;
1444 }
1445 
1446 // Division with mathematical rounding to nearest number.
1447 template <typename T>
1448 inline T VmaRoundDiv(T x, T y)
1449 {
1450  return (x + (y / (T)2)) / y;
1451 }
1452 
1453 #ifndef VMA_SORT
1454 
1455 template<typename Iterator, typename Compare>
1456 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
1457 {
1458  Iterator centerValue = end; --centerValue;
1459  Iterator insertIndex = beg;
1460  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
1461  {
1462  if(cmp(*memTypeIndex, *centerValue))
1463  {
1464  if(insertIndex != memTypeIndex)
1465  {
1466  VMA_SWAP(*memTypeIndex, *insertIndex);
1467  }
1468  ++insertIndex;
1469  }
1470  }
1471  if(insertIndex != centerValue)
1472  {
1473  VMA_SWAP(*insertIndex, *centerValue);
1474  }
1475  return insertIndex;
1476 }
1477 
1478 template<typename Iterator, typename Compare>
1479 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
1480 {
1481  if(beg < end)
1482  {
1483  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
1484  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
1485  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
1486  }
1487 }
1488 
1489 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
1490 
1491 #endif // #ifndef VMA_SORT
1492 
1493 /*
1494 Returns true if two memory blocks occupy overlapping pages.
1495 ResourceA must be in less memory offset than ResourceB.
1496 
1497 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
1498 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
1499 */
1500 static inline bool VmaBlocksOnSamePage(
1501  VkDeviceSize resourceAOffset,
1502  VkDeviceSize resourceASize,
1503  VkDeviceSize resourceBOffset,
1504  VkDeviceSize pageSize)
1505 {
1506  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
1507  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
1508  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
1509  VkDeviceSize resourceBStart = resourceBOffset;
1510  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
1511  return resourceAEndPage == resourceBStartPage;
1512 }
1513 
1514 enum VmaSuballocationType
1515 {
1516  VMA_SUBALLOCATION_TYPE_FREE = 0,
1517  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
1518  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
1519  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
1520  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
1521  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
1522  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
1523 };
1524 
1525 /*
1526 Returns true if given suballocation types could conflict and must respect
1527 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
1528 or linear image and another one is optimal image. If type is unknown, behave
1529 conservatively.
1530 */
1531 static inline bool VmaIsBufferImageGranularityConflict(
1532  VmaSuballocationType suballocType1,
1533  VmaSuballocationType suballocType2)
1534 {
1535  if(suballocType1 > suballocType2)
1536  {
1537  VMA_SWAP(suballocType1, suballocType2);
1538  }
1539 
1540  switch(suballocType1)
1541  {
1542  case VMA_SUBALLOCATION_TYPE_FREE:
1543  return false;
1544  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
1545  return true;
1546  case VMA_SUBALLOCATION_TYPE_BUFFER:
1547  return
1548  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
1549  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1550  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
1551  return
1552  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
1553  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
1554  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1555  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
1556  return
1557  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1558  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
1559  return false;
1560  default:
1561  VMA_ASSERT(0);
1562  return true;
1563  }
1564 }
1565 
1566 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
1567 struct VmaMutexLock
1568 {
1569 public:
1570  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
1571  m_pMutex(useMutex ? &mutex : VMA_NULL)
1572  {
1573  if(m_pMutex)
1574  {
1575  m_pMutex->Lock();
1576  }
1577  }
1578 
1579  ~VmaMutexLock()
1580  {
1581  if(m_pMutex)
1582  {
1583  m_pMutex->Unlock();
1584  }
1585  }
1586 
1587 private:
1588  VMA_MUTEX* m_pMutex;
1589 };
1590 
1591 #if VMA_DEBUG_GLOBAL_MUTEX
1592  static VMA_MUTEX gDebugGlobalMutex;
1593  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
1594 #else
1595  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
1596 #endif
1597 
1598 // Minimum size of a free suballocation to register it in the free suballocation collection.
1599 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
1600 
1601 /*
1602 Performs binary search and returns iterator to first element that is greater or
1603 equal to (key), according to comparison (cmp).
1604 
1605 Cmp should return true if first argument is less than second argument.
1606 
1607 Returned value is the found element, if present in the collection or place where
1608 new element with value (key) should be inserted.
1609 */
1610 template <typename IterT, typename KeyT, typename CmpT>
1611 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
1612 {
1613  size_t down = 0, up = (end - beg);
1614  while(down < up)
1615  {
1616  const size_t mid = (down + up) / 2;
1617  if(cmp(*(beg+mid), key))
1618  {
1619  down = mid + 1;
1620  }
1621  else
1622  {
1623  up = mid;
1624  }
1625  }
1626  return beg + down;
1627 }
1628 
1630 // Memory allocation
1631 
1632 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
1633 {
1634  if((pAllocationCallbacks != VMA_NULL) &&
1635  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
1636  {
1637  return (*pAllocationCallbacks->pfnAllocation)(
1638  pAllocationCallbacks->pUserData,
1639  size,
1640  alignment,
1641  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1642  }
1643  else
1644  {
1645  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
1646  }
1647 }
1648 
1649 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
1650 {
1651  if((pAllocationCallbacks != VMA_NULL) &&
1652  (pAllocationCallbacks->pfnFree != VMA_NULL))
1653  {
1654  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
1655  }
1656  else
1657  {
1658  VMA_SYSTEM_FREE(ptr);
1659  }
1660 }
1661 
1662 template<typename T>
1663 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
1664 {
1665  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
1666 }
1667 
1668 template<typename T>
1669 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
1670 {
1671  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
1672 }
1673 
1674 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
1675 
1676 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
1677 
1678 template<typename T>
1679 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
1680 {
1681  ptr->~T();
1682  VmaFree(pAllocationCallbacks, ptr);
1683 }
1684 
1685 template<typename T>
1686 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
1687 {
1688  if(ptr != VMA_NULL)
1689  {
1690  for(size_t i = count; i--; )
1691  {
1692  ptr[i].~T();
1693  }
1694  VmaFree(pAllocationCallbacks, ptr);
1695  }
1696 }
1697 
1698 // STL-compatible allocator.
1699 template<typename T>
1700 class VmaStlAllocator
1701 {
1702 public:
1703  const VkAllocationCallbacks* const m_pCallbacks;
1704  typedef T value_type;
1705 
1706  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
1707  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
1708 
1709  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
1710  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
1711 
1712  template<typename U>
1713  bool operator==(const VmaStlAllocator<U>& rhs) const
1714  {
1715  return m_pCallbacks == rhs.m_pCallbacks;
1716  }
1717  template<typename U>
1718  bool operator!=(const VmaStlAllocator<U>& rhs) const
1719  {
1720  return m_pCallbacks != rhs.m_pCallbacks;
1721  }
1722 
1723  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
1724 };
1725 
1726 #if VMA_USE_STL_VECTOR
1727 
1728 #define VmaVector std::vector
1729 
1730 template<typename T, typename allocatorT>
1731 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
1732 {
1733  vec.insert(vec.begin() + index, item);
1734 }
1735 
1736 template<typename T, typename allocatorT>
1737 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
1738 {
1739  vec.erase(vec.begin() + index);
1740 }
1741 
1742 #else // #if VMA_USE_STL_VECTOR
1743 
1744 /* Class with interface compatible with subset of std::vector.
1745 T must be POD because constructors and destructors are not called and memcpy is
1746 used for these objects. */
1747 template<typename T, typename AllocatorT>
1748 class VmaVector
1749 {
1750 public:
1751  typedef T value_type;
1752 
1753  VmaVector(const AllocatorT& allocator) :
1754  m_Allocator(allocator),
1755  m_pArray(VMA_NULL),
1756  m_Count(0),
1757  m_Capacity(0)
1758  {
1759  }
1760 
1761  VmaVector(size_t count, const AllocatorT& allocator) :
1762  m_Allocator(allocator),
1763  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
1764  m_Count(count),
1765  m_Capacity(count)
1766  {
1767  }
1768 
1769  VmaVector(const VmaVector<T, AllocatorT>& src) :
1770  m_Allocator(src.m_Allocator),
1771  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
1772  m_Count(src.m_Count),
1773  m_Capacity(src.m_Count)
1774  {
1775  if(m_Count != 0)
1776  {
1777  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
1778  }
1779  }
1780 
1781  ~VmaVector()
1782  {
1783  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1784  }
1785 
1786  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
1787  {
1788  if(&rhs != this)
1789  {
1790  resize(rhs.m_Count);
1791  if(m_Count != 0)
1792  {
1793  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
1794  }
1795  }
1796  return *this;
1797  }
1798 
1799  bool empty() const { return m_Count == 0; }
1800  size_t size() const { return m_Count; }
1801  T* data() { return m_pArray; }
1802  const T* data() const { return m_pArray; }
1803 
1804  T& operator[](size_t index)
1805  {
1806  VMA_HEAVY_ASSERT(index < m_Count);
1807  return m_pArray[index];
1808  }
1809  const T& operator[](size_t index) const
1810  {
1811  VMA_HEAVY_ASSERT(index < m_Count);
1812  return m_pArray[index];
1813  }
1814 
1815  T& front()
1816  {
1817  VMA_HEAVY_ASSERT(m_Count > 0);
1818  return m_pArray[0];
1819  }
1820  const T& front() const
1821  {
1822  VMA_HEAVY_ASSERT(m_Count > 0);
1823  return m_pArray[0];
1824  }
1825  T& back()
1826  {
1827  VMA_HEAVY_ASSERT(m_Count > 0);
1828  return m_pArray[m_Count - 1];
1829  }
1830  const T& back() const
1831  {
1832  VMA_HEAVY_ASSERT(m_Count > 0);
1833  return m_pArray[m_Count - 1];
1834  }
1835 
1836  void reserve(size_t newCapacity, bool freeMemory = false)
1837  {
1838  newCapacity = VMA_MAX(newCapacity, m_Count);
1839 
1840  if((newCapacity < m_Capacity) && !freeMemory)
1841  {
1842  newCapacity = m_Capacity;
1843  }
1844 
1845  if(newCapacity != m_Capacity)
1846  {
1847  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
1848  if(m_Count != 0)
1849  {
1850  memcpy(newArray, m_pArray, m_Count * sizeof(T));
1851  }
1852  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1853  m_Capacity = newCapacity;
1854  m_pArray = newArray;
1855  }
1856  }
1857 
1858  void resize(size_t newCount, bool freeMemory = false)
1859  {
1860  size_t newCapacity = m_Capacity;
1861  if(newCount > m_Capacity)
1862  {
1863  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
1864  }
1865  else if(freeMemory)
1866  {
1867  newCapacity = newCount;
1868  }
1869 
1870  if(newCapacity != m_Capacity)
1871  {
1872  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
1873  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
1874  if(elementsToCopy != 0)
1875  {
1876  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
1877  }
1878  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1879  m_Capacity = newCapacity;
1880  m_pArray = newArray;
1881  }
1882 
1883  m_Count = newCount;
1884  }
1885 
1886  void clear(bool freeMemory = false)
1887  {
1888  resize(0, freeMemory);
1889  }
1890 
1891  void insert(size_t index, const T& src)
1892  {
1893  VMA_HEAVY_ASSERT(index <= m_Count);
1894  const size_t oldCount = size();
1895  resize(oldCount + 1);
1896  if(index < oldCount)
1897  {
1898  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
1899  }
1900  m_pArray[index] = src;
1901  }
1902 
1903  void remove(size_t index)
1904  {
1905  VMA_HEAVY_ASSERT(index < m_Count);
1906  const size_t oldCount = size();
1907  if(index < oldCount - 1)
1908  {
1909  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
1910  }
1911  resize(oldCount - 1);
1912  }
1913 
1914  void push_back(const T& src)
1915  {
1916  const size_t newIndex = size();
1917  resize(newIndex + 1);
1918  m_pArray[newIndex] = src;
1919  }
1920 
1921  void pop_back()
1922  {
1923  VMA_HEAVY_ASSERT(m_Count > 0);
1924  resize(size() - 1);
1925  }
1926 
1927  void push_front(const T& src)
1928  {
1929  insert(0, src);
1930  }
1931 
1932  void pop_front()
1933  {
1934  VMA_HEAVY_ASSERT(m_Count > 0);
1935  remove(0);
1936  }
1937 
1938  typedef T* iterator;
1939 
1940  iterator begin() { return m_pArray; }
1941  iterator end() { return m_pArray + m_Count; }
1942 
1943 private:
1944  AllocatorT m_Allocator;
1945  T* m_pArray;
1946  size_t m_Count;
1947  size_t m_Capacity;
1948 };
1949 
1950 template<typename T, typename allocatorT>
1951 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
1952 {
1953  vec.insert(index, item);
1954 }
1955 
1956 template<typename T, typename allocatorT>
1957 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
1958 {
1959  vec.remove(index);
1960 }
1961 
1962 #endif // #if VMA_USE_STL_VECTOR
1963 
1964 template<typename CmpLess, typename VectorT>
1965 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
1966 {
1967  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
1968  vector.data(),
1969  vector.data() + vector.size(),
1970  value,
1971  CmpLess()) - vector.data();
1972  VmaVectorInsert(vector, indexToInsert, value);
1973  return indexToInsert;
1974 }
1975 
1976 template<typename CmpLess, typename VectorT>
1977 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
1978 {
1979  CmpLess comparator;
1980  VectorT::iterator it = VmaBinaryFindFirstNotLess(
1981  vector.data(),
1982  vector.data() + vector.size(),
1983  value,
1984  comparator);
1985  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
1986  {
1987  size_t indexToRemove = it - vector.begin();
1988  VmaVectorRemove(vector, indexToRemove);
1989  return true;
1990  }
1991  return false;
1992 }
1993 
1994 template<typename CmpLess, typename VectorT>
1995 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
1996 {
1997  CmpLess comparator;
1998  VectorT::iterator it = VmaBinaryFindFirstNotLess(
1999  vector.data(),
2000  vector.data() + vector.size(),
2001  value,
2002  comparator);
2003  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
2004  {
2005  return it - vector.begin();
2006  }
2007  else
2008  {
2009  return vector.size();
2010  }
2011 }
2012 
2014 // class VmaPoolAllocator
2015 
2016 /*
2017 Allocator for objects of type T using a list of arrays (pools) to speed up
2018 allocation. Number of elements that can be allocated is not bounded because
2019 allocator can create multiple blocks.
2020 */
2021 template<typename T>
2022 class VmaPoolAllocator
2023 {
2024 public:
2025  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
2026  ~VmaPoolAllocator();
2027  void Clear();
2028  T* Alloc();
2029  void Free(T* ptr);
2030 
2031 private:
2032  union Item
2033  {
2034  uint32_t NextFreeIndex;
2035  T Value;
2036  };
2037 
2038  struct ItemBlock
2039  {
2040  Item* pItems;
2041  uint32_t FirstFreeIndex;
2042  };
2043 
2044  const VkAllocationCallbacks* m_pAllocationCallbacks;
2045  size_t m_ItemsPerBlock;
2046  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
2047 
2048  ItemBlock& CreateNewBlock();
2049 };
2050 
2051 template<typename T>
2052 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
2053  m_pAllocationCallbacks(pAllocationCallbacks),
2054  m_ItemsPerBlock(itemsPerBlock),
2055  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
2056 {
2057  VMA_ASSERT(itemsPerBlock > 0);
2058 }
2059 
2060 template<typename T>
2061 VmaPoolAllocator<T>::~VmaPoolAllocator()
2062 {
2063  Clear();
2064 }
2065 
2066 template<typename T>
2067 void VmaPoolAllocator<T>::Clear()
2068 {
2069  for(size_t i = m_ItemBlocks.size(); i--; )
2070  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
2071  m_ItemBlocks.clear();
2072 }
2073 
2074 template<typename T>
2075 T* VmaPoolAllocator<T>::Alloc()
2076 {
2077  for(size_t i = m_ItemBlocks.size(); i--; )
2078  {
2079  ItemBlock& block = m_ItemBlocks[i];
2080  // This block has some free items: Use first one.
2081  if(block.FirstFreeIndex != UINT32_MAX)
2082  {
2083  Item* const pItem = &block.pItems[block.FirstFreeIndex];
2084  block.FirstFreeIndex = pItem->NextFreeIndex;
2085  return &pItem->Value;
2086  }
2087  }
2088 
2089  // No block has free item: Create new one and use it.
2090  ItemBlock& newBlock = CreateNewBlock();
2091  Item* const pItem = &newBlock.pItems[0];
2092  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
2093  return &pItem->Value;
2094 }
2095 
2096 template<typename T>
2097 void VmaPoolAllocator<T>::Free(T* ptr)
2098 {
2099  // Search all memory blocks to find ptr.
2100  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
2101  {
2102  ItemBlock& block = m_ItemBlocks[i];
2103 
2104  // Casting to union.
2105  Item* pItemPtr;
2106  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
2107 
2108  // Check if pItemPtr is in address range of this block.
2109  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
2110  {
2111  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
2112  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
2113  block.FirstFreeIndex = index;
2114  return;
2115  }
2116  }
2117  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
2118 }
2119 
2120 template<typename T>
2121 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
2122 {
2123  ItemBlock newBlock = {
2124  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
2125 
2126  m_ItemBlocks.push_back(newBlock);
2127 
2128  // Setup singly-linked list of all free items in this block.
2129  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
2130  newBlock.pItems[i].NextFreeIndex = i + 1;
2131  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
2132  return m_ItemBlocks.back();
2133 }
2134 
2136 // class VmaRawList, VmaList
2137 
2138 #if VMA_USE_STL_LIST
2139 
2140 #define VmaList std::list
2141 
2142 #else // #if VMA_USE_STL_LIST
2143 
2144 template<typename T>
2145 struct VmaListItem
2146 {
2147  VmaListItem* pPrev;
2148  VmaListItem* pNext;
2149  T Value;
2150 };
2151 
2152 // Doubly linked list.
2153 template<typename T>
2154 class VmaRawList
2155 {
2156 public:
2157  typedef VmaListItem<T> ItemType;
2158 
2159  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
2160  ~VmaRawList();
2161  void Clear();
2162 
2163  size_t GetCount() const { return m_Count; }
2164  bool IsEmpty() const { return m_Count == 0; }
2165 
2166  ItemType* Front() { return m_pFront; }
2167  const ItemType* Front() const { return m_pFront; }
2168  ItemType* Back() { return m_pBack; }
2169  const ItemType* Back() const { return m_pBack; }
2170 
2171  ItemType* PushBack();
2172  ItemType* PushFront();
2173  ItemType* PushBack(const T& value);
2174  ItemType* PushFront(const T& value);
2175  void PopBack();
2176  void PopFront();
2177 
2178  // Item can be null - it means PushBack.
2179  ItemType* InsertBefore(ItemType* pItem);
2180  // Item can be null - it means PushFront.
2181  ItemType* InsertAfter(ItemType* pItem);
2182 
2183  ItemType* InsertBefore(ItemType* pItem, const T& value);
2184  ItemType* InsertAfter(ItemType* pItem, const T& value);
2185 
2186  void Remove(ItemType* pItem);
2187 
2188 private:
2189  const VkAllocationCallbacks* const m_pAllocationCallbacks;
2190  VmaPoolAllocator<ItemType> m_ItemAllocator;
2191  ItemType* m_pFront;
2192  ItemType* m_pBack;
2193  size_t m_Count;
2194 
2195  // Declared not defined, to block copy constructor and assignment operator.
2196  VmaRawList(const VmaRawList<T>& src);
2197  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
2198 };
2199 
2200 template<typename T>
2201 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
2202  m_pAllocationCallbacks(pAllocationCallbacks),
2203  m_ItemAllocator(pAllocationCallbacks, 128),
2204  m_pFront(VMA_NULL),
2205  m_pBack(VMA_NULL),
2206  m_Count(0)
2207 {
2208 }
2209 
2210 template<typename T>
2211 VmaRawList<T>::~VmaRawList()
2212 {
2213  // Intentionally not calling Clear, because that would be unnecessary
2214  // computations to return all items to m_ItemAllocator as free.
2215 }
2216 
2217 template<typename T>
2218 void VmaRawList<T>::Clear()
2219 {
2220  if(IsEmpty() == false)
2221  {
2222  ItemType* pItem = m_pBack;
2223  while(pItem != VMA_NULL)
2224  {
2225  ItemType* const pPrevItem = pItem->pPrev;
2226  m_ItemAllocator.Free(pItem);
2227  pItem = pPrevItem;
2228  }
2229  m_pFront = VMA_NULL;
2230  m_pBack = VMA_NULL;
2231  m_Count = 0;
2232  }
2233 }
2234 
2235 template<typename T>
2236 VmaListItem<T>* VmaRawList<T>::PushBack()
2237 {
2238  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2239  pNewItem->pNext = VMA_NULL;
2240  if(IsEmpty())
2241  {
2242  pNewItem->pPrev = VMA_NULL;
2243  m_pFront = pNewItem;
2244  m_pBack = pNewItem;
2245  m_Count = 1;
2246  }
2247  else
2248  {
2249  pNewItem->pPrev = m_pBack;
2250  m_pBack->pNext = pNewItem;
2251  m_pBack = pNewItem;
2252  ++m_Count;
2253  }
2254  return pNewItem;
2255 }
2256 
2257 template<typename T>
2258 VmaListItem<T>* VmaRawList<T>::PushFront()
2259 {
2260  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2261  pNewItem->pPrev = VMA_NULL;
2262  if(IsEmpty())
2263  {
2264  pNewItem->pNext = VMA_NULL;
2265  m_pFront = pNewItem;
2266  m_pBack = pNewItem;
2267  m_Count = 1;
2268  }
2269  else
2270  {
2271  pNewItem->pNext = m_pFront;
2272  m_pFront->pPrev = pNewItem;
2273  m_pFront = pNewItem;
2274  ++m_Count;
2275  }
2276  return pNewItem;
2277 }
2278 
2279 template<typename T>
2280 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
2281 {
2282  ItemType* const pNewItem = PushBack();
2283  pNewItem->Value = value;
2284  return pNewItem;
2285 }
2286 
2287 template<typename T>
2288 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
2289 {
2290  ItemType* const pNewItem = PushFront();
2291  pNewItem->Value = value;
2292  return pNewItem;
2293 }
2294 
2295 template<typename T>
2296 void VmaRawList<T>::PopBack()
2297 {
2298  VMA_HEAVY_ASSERT(m_Count > 0);
2299  ItemType* const pBackItem = m_pBack;
2300  ItemType* const pPrevItem = pBackItem->pPrev;
2301  if(pPrevItem != VMA_NULL)
2302  {
2303  pPrevItem->pNext = VMA_NULL;
2304  }
2305  m_pBack = pPrevItem;
2306  m_ItemAllocator.Free(pBackItem);
2307  --m_Count;
2308 }
2309 
2310 template<typename T>
2311 void VmaRawList<T>::PopFront()
2312 {
2313  VMA_HEAVY_ASSERT(m_Count > 0);
2314  ItemType* const pFrontItem = m_pFront;
2315  ItemType* const pNextItem = pFrontItem->pNext;
2316  if(pNextItem != VMA_NULL)
2317  {
2318  pNextItem->pPrev = VMA_NULL;
2319  }
2320  m_pFront = pNextItem;
2321  m_ItemAllocator.Free(pFrontItem);
2322  --m_Count;
2323 }
2324 
2325 template<typename T>
2326 void VmaRawList<T>::Remove(ItemType* pItem)
2327 {
2328  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
2329  VMA_HEAVY_ASSERT(m_Count > 0);
2330 
2331  if(pItem->pPrev != VMA_NULL)
2332  {
2333  pItem->pPrev->pNext = pItem->pNext;
2334  }
2335  else
2336  {
2337  VMA_HEAVY_ASSERT(m_pFront == pItem);
2338  m_pFront = pItem->pNext;
2339  }
2340 
2341  if(pItem->pNext != VMA_NULL)
2342  {
2343  pItem->pNext->pPrev = pItem->pPrev;
2344  }
2345  else
2346  {
2347  VMA_HEAVY_ASSERT(m_pBack == pItem);
2348  m_pBack = pItem->pPrev;
2349  }
2350 
2351  m_ItemAllocator.Free(pItem);
2352  --m_Count;
2353 }
2354 
2355 template<typename T>
2356 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
2357 {
2358  if(pItem != VMA_NULL)
2359  {
2360  ItemType* const prevItem = pItem->pPrev;
2361  ItemType* const newItem = m_ItemAllocator.Alloc();
2362  newItem->pPrev = prevItem;
2363  newItem->pNext = pItem;
2364  pItem->pPrev = newItem;
2365  if(prevItem != VMA_NULL)
2366  {
2367  prevItem->pNext = newItem;
2368  }
2369  else
2370  {
2371  VMA_HEAVY_ASSERT(m_pFront == pItem);
2372  m_pFront = newItem;
2373  }
2374  ++m_Count;
2375  return newItem;
2376  }
2377  else
2378  return PushBack();
2379 }
2380 
2381 template<typename T>
2382 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
2383 {
2384  if(pItem != VMA_NULL)
2385  {
2386  ItemType* const nextItem = pItem->pNext;
2387  ItemType* const newItem = m_ItemAllocator.Alloc();
2388  newItem->pNext = nextItem;
2389  newItem->pPrev = pItem;
2390  pItem->pNext = newItem;
2391  if(nextItem != VMA_NULL)
2392  {
2393  nextItem->pPrev = newItem;
2394  }
2395  else
2396  {
2397  VMA_HEAVY_ASSERT(m_pBack == pItem);
2398  m_pBack = newItem;
2399  }
2400  ++m_Count;
2401  return newItem;
2402  }
2403  else
2404  return PushFront();
2405 }
2406 
2407 template<typename T>
2408 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
2409 {
2410  ItemType* const newItem = InsertBefore(pItem);
2411  newItem->Value = value;
2412  return newItem;
2413 }
2414 
2415 template<typename T>
2416 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
2417 {
2418  ItemType* const newItem = InsertAfter(pItem);
2419  newItem->Value = value;
2420  return newItem;
2421 }
2422 
2423 template<typename T, typename AllocatorT>
2424 class VmaList
2425 {
2426 public:
2427  class iterator
2428  {
2429  public:
2430  iterator() :
2431  m_pList(VMA_NULL),
2432  m_pItem(VMA_NULL)
2433  {
2434  }
2435 
2436  T& operator*() const
2437  {
2438  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2439  return m_pItem->Value;
2440  }
2441  T* operator->() const
2442  {
2443  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2444  return &m_pItem->Value;
2445  }
2446 
2447  iterator& operator++()
2448  {
2449  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2450  m_pItem = m_pItem->pNext;
2451  return *this;
2452  }
2453  iterator& operator--()
2454  {
2455  if(m_pItem != VMA_NULL)
2456  {
2457  m_pItem = m_pItem->pPrev;
2458  }
2459  else
2460  {
2461  VMA_HEAVY_ASSERT(!m_pList.IsEmpty());
2462  m_pItem = m_pList->Back();
2463  }
2464  return *this;
2465  }
2466 
2467  iterator operator++(int)
2468  {
2469  iterator result = *this;
2470  ++*this;
2471  return result;
2472  }
2473  iterator operator--(int)
2474  {
2475  iterator result = *this;
2476  --*this;
2477  return result;
2478  }
2479 
2480  bool operator==(const iterator& rhs) const
2481  {
2482  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2483  return m_pItem == rhs.m_pItem;
2484  }
2485  bool operator!=(const iterator& rhs) const
2486  {
2487  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2488  return m_pItem != rhs.m_pItem;
2489  }
2490 
2491  private:
2492  VmaRawList<T>* m_pList;
2493  VmaListItem<T>* m_pItem;
2494 
2495  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
2496  m_pList(pList),
2497  m_pItem(pItem)
2498  {
2499  }
2500 
2501  friend class VmaList<T, AllocatorT>;
2502  };
2503 
2504  class const_iterator
2505  {
2506  public:
2507  const_iterator() :
2508  m_pList(VMA_NULL),
2509  m_pItem(VMA_NULL)
2510  {
2511  }
2512 
2513  const_iterator(const iterator& src) :
2514  m_pList(src.m_pList),
2515  m_pItem(src.m_pItem)
2516  {
2517  }
2518 
2519  const T& operator*() const
2520  {
2521  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2522  return m_pItem->Value;
2523  }
2524  const T* operator->() const
2525  {
2526  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2527  return &m_pItem->Value;
2528  }
2529 
2530  const_iterator& operator++()
2531  {
2532  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2533  m_pItem = m_pItem->pNext;
2534  return *this;
2535  }
2536  const_iterator& operator--()
2537  {
2538  if(m_pItem != VMA_NULL)
2539  {
2540  m_pItem = m_pItem->pPrev;
2541  }
2542  else
2543  {
2544  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
2545  m_pItem = m_pList->Back();
2546  }
2547  return *this;
2548  }
2549 
2550  const_iterator operator++(int)
2551  {
2552  const_iterator result = *this;
2553  ++*this;
2554  return result;
2555  }
2556  const_iterator operator--(int)
2557  {
2558  const_iterator result = *this;
2559  --*this;
2560  return result;
2561  }
2562 
2563  bool operator==(const const_iterator& rhs) const
2564  {
2565  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2566  return m_pItem == rhs.m_pItem;
2567  }
2568  bool operator!=(const const_iterator& rhs) const
2569  {
2570  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2571  return m_pItem != rhs.m_pItem;
2572  }
2573 
2574  private:
2575  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
2576  m_pList(pList),
2577  m_pItem(pItem)
2578  {
2579  }
2580 
2581  const VmaRawList<T>* m_pList;
2582  const VmaListItem<T>* m_pItem;
2583 
2584  friend class VmaList<T, AllocatorT>;
2585  };
2586 
2587  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
2588 
2589  bool empty() const { return m_RawList.IsEmpty(); }
2590  size_t size() const { return m_RawList.GetCount(); }
2591 
2592  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
2593  iterator end() { return iterator(&m_RawList, VMA_NULL); }
2594 
2595  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
2596  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
2597 
2598  void clear() { m_RawList.Clear(); }
2599  void push_back(const T& value) { m_RawList.PushBack(value); }
2600  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
2601  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
2602 
2603 private:
2604  VmaRawList<T> m_RawList;
2605 };
2606 
2607 #endif // #if VMA_USE_STL_LIST
2608 
2610 // class VmaMap
2611 
2612 // Unused in this version.
2613 #if 0
2614 
2615 #if VMA_USE_STL_UNORDERED_MAP
2616 
2617 #define VmaPair std::pair
2618 
2619 #define VMA_MAP_TYPE(KeyT, ValueT) \
2620  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
2621 
2622 #else // #if VMA_USE_STL_UNORDERED_MAP
2623 
2624 template<typename T1, typename T2>
2625 struct VmaPair
2626 {
2627  T1 first;
2628  T2 second;
2629 
2630  VmaPair() : first(), second() { }
2631  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
2632 };
2633 
2634 /* Class compatible with subset of interface of std::unordered_map.
2635 KeyT, ValueT must be POD because they will be stored in VmaVector.
2636 */
2637 template<typename KeyT, typename ValueT>
2638 class VmaMap
2639 {
2640 public:
2641  typedef VmaPair<KeyT, ValueT> PairType;
2642  typedef PairType* iterator;
2643 
2644  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
2645 
2646  iterator begin() { return m_Vector.begin(); }
2647  iterator end() { return m_Vector.end(); }
2648 
2649  void insert(const PairType& pair);
2650  iterator find(const KeyT& key);
2651  void erase(iterator it);
2652 
2653 private:
2654  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
2655 };
2656 
2657 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
2658 
2659 template<typename FirstT, typename SecondT>
2660 struct VmaPairFirstLess
2661 {
2662  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
2663  {
2664  return lhs.first < rhs.first;
2665  }
2666  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
2667  {
2668  return lhs.first < rhsFirst;
2669  }
2670 };
2671 
2672 template<typename KeyT, typename ValueT>
2673 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
2674 {
2675  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2676  m_Vector.data(),
2677  m_Vector.data() + m_Vector.size(),
2678  pair,
2679  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
2680  VmaVectorInsert(m_Vector, indexToInsert, pair);
2681 }
2682 
2683 template<typename KeyT, typename ValueT>
2684 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
2685 {
2686  PairType* it = VmaBinaryFindFirstNotLess(
2687  m_Vector.data(),
2688  m_Vector.data() + m_Vector.size(),
2689  key,
2690  VmaPairFirstLess<KeyT, ValueT>());
2691  if((it != m_Vector.end()) && (it->first == key))
2692  {
2693  return it;
2694  }
2695  else
2696  {
2697  return m_Vector.end();
2698  }
2699 }
2700 
2701 template<typename KeyT, typename ValueT>
2702 void VmaMap<KeyT, ValueT>::erase(iterator it)
2703 {
2704  VmaVectorRemove(m_Vector, it - m_Vector.begin());
2705 }
2706 
2707 #endif // #if VMA_USE_STL_UNORDERED_MAP
2708 
2709 #endif // #if 0
2710 
2712 
2713 class VmaDeviceMemoryBlock;
2714 
2715 enum VMA_BLOCK_VECTOR_TYPE
2716 {
2717  VMA_BLOCK_VECTOR_TYPE_UNMAPPED,
2718  VMA_BLOCK_VECTOR_TYPE_MAPPED,
2719  VMA_BLOCK_VECTOR_TYPE_COUNT
2720 };
2721 
2722 static VMA_BLOCK_VECTOR_TYPE VmaAllocationCreateFlagsToBlockVectorType(VmaAllocationCreateFlags flags)
2723 {
2724  return (flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0 ?
2725  VMA_BLOCK_VECTOR_TYPE_MAPPED :
2726  VMA_BLOCK_VECTOR_TYPE_UNMAPPED;
2727 }
2728 
2729 struct VmaAllocation_T
2730 {
2731 public:
2732  enum ALLOCATION_TYPE
2733  {
2734  ALLOCATION_TYPE_NONE,
2735  ALLOCATION_TYPE_BLOCK,
2736  ALLOCATION_TYPE_OWN,
2737  };
2738 
2739  VmaAllocation_T(uint32_t currentFrameIndex) :
2740  m_Alignment(1),
2741  m_Size(0),
2742  m_pUserData(VMA_NULL),
2743  m_Type(ALLOCATION_TYPE_NONE),
2744  m_SuballocationType(VMA_SUBALLOCATION_TYPE_UNKNOWN),
2745  m_LastUseFrameIndex(currentFrameIndex)
2746  {
2747  }
2748 
2749  void InitBlockAllocation(
2750  VmaPool hPool,
2751  VmaDeviceMemoryBlock* block,
2752  VkDeviceSize offset,
2753  VkDeviceSize alignment,
2754  VkDeviceSize size,
2755  VmaSuballocationType suballocationType,
2756  void* pUserData,
2757  bool canBecomeLost)
2758  {
2759  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
2760  VMA_ASSERT(block != VMA_NULL);
2761  m_Type = ALLOCATION_TYPE_BLOCK;
2762  m_Alignment = alignment;
2763  m_Size = size;
2764  m_pUserData = pUserData;
2765  m_SuballocationType = suballocationType;
2766  m_BlockAllocation.m_hPool = hPool;
2767  m_BlockAllocation.m_Block = block;
2768  m_BlockAllocation.m_Offset = offset;
2769  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
2770  }
2771 
2772  void InitLost()
2773  {
2774  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
2775  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
2776  m_Type = ALLOCATION_TYPE_BLOCK;
2777  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
2778  m_BlockAllocation.m_Block = VMA_NULL;
2779  m_BlockAllocation.m_Offset = 0;
2780  m_BlockAllocation.m_CanBecomeLost = true;
2781  }
2782 
2783  void ChangeBlockAllocation(
2784  VmaDeviceMemoryBlock* block,
2785  VkDeviceSize offset)
2786  {
2787  VMA_ASSERT(block != VMA_NULL);
2788  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
2789  m_BlockAllocation.m_Block = block;
2790  m_BlockAllocation.m_Offset = offset;
2791  }
2792 
2793  void InitOwnAllocation(
2794  uint32_t memoryTypeIndex,
2795  VkDeviceMemory hMemory,
2796  VmaSuballocationType suballocationType,
2797  bool persistentMap,
2798  void* pMappedData,
2799  VkDeviceSize size,
2800  void* pUserData)
2801  {
2802  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
2803  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
2804  m_Type = ALLOCATION_TYPE_OWN;
2805  m_Alignment = 0;
2806  m_Size = size;
2807  m_pUserData = pUserData;
2808  m_SuballocationType = suballocationType;
2809  m_OwnAllocation.m_MemoryTypeIndex = memoryTypeIndex;
2810  m_OwnAllocation.m_hMemory = hMemory;
2811  m_OwnAllocation.m_PersistentMap = persistentMap;
2812  m_OwnAllocation.m_pMappedData = pMappedData;
2813  }
2814 
2815  ALLOCATION_TYPE GetType() const { return m_Type; }
2816  VkDeviceSize GetAlignment() const { return m_Alignment; }
2817  VkDeviceSize GetSize() const { return m_Size; }
2818  void* GetUserData() const { return m_pUserData; }
2819  void SetUserData(void* pUserData) { m_pUserData = pUserData; }
2820  VmaSuballocationType GetSuballocationType() const { return m_SuballocationType; }
2821 
2822  VmaDeviceMemoryBlock* GetBlock() const
2823  {
2824  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
2825  return m_BlockAllocation.m_Block;
2826  }
2827  VkDeviceSize GetOffset() const;
2828  VkDeviceMemory GetMemory() const;
2829  uint32_t GetMemoryTypeIndex() const;
2830  VMA_BLOCK_VECTOR_TYPE GetBlockVectorType() const;
2831  void* GetMappedData() const;
2832  bool CanBecomeLost() const;
2833  VmaPool GetPool() const;
2834 
2835  VkResult OwnAllocMapPersistentlyMappedMemory(VkDevice hDevice)
2836  {
2837  VMA_ASSERT(m_Type == ALLOCATION_TYPE_OWN);
2838  if(m_OwnAllocation.m_PersistentMap)
2839  {
2840  return vkMapMemory(hDevice, m_OwnAllocation.m_hMemory, 0, VK_WHOLE_SIZE, 0, &m_OwnAllocation.m_pMappedData);
2841  }
2842  return VK_SUCCESS;
2843  }
2844  void OwnAllocUnmapPersistentlyMappedMemory(VkDevice hDevice)
2845  {
2846  VMA_ASSERT(m_Type == ALLOCATION_TYPE_OWN);
2847  if(m_OwnAllocation.m_pMappedData)
2848  {
2849  VMA_ASSERT(m_OwnAllocation.m_PersistentMap);
2850  vkUnmapMemory(hDevice, m_OwnAllocation.m_hMemory);
2851  m_OwnAllocation.m_pMappedData = VMA_NULL;
2852  }
2853  }
2854 
2855  uint32_t GetLastUseFrameIndex() const
2856  {
2857  return m_LastUseFrameIndex.load();
2858  }
2859  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
2860  {
2861  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
2862  }
2863  /*
2864  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
2865  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
2866  - Else, returns false.
2867 
2868  If hAllocation is already lost, assert - you should not call it then.
2869  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
2870  */
2871  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
2872 
2873 private:
2874  VkDeviceSize m_Alignment;
2875  VkDeviceSize m_Size;
2876  void* m_pUserData;
2877  ALLOCATION_TYPE m_Type;
2878  VmaSuballocationType m_SuballocationType;
2879  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
2880 
2881  // Allocation out of VmaDeviceMemoryBlock.
2882  struct BlockAllocation
2883  {
2884  VmaPool m_hPool; // Null if belongs to general memory.
2885  VmaDeviceMemoryBlock* m_Block;
2886  VkDeviceSize m_Offset;
2887  bool m_CanBecomeLost;
2888  };
2889 
2890  // Allocation for an object that has its own private VkDeviceMemory.
2891  struct OwnAllocation
2892  {
2893  uint32_t m_MemoryTypeIndex;
2894  VkDeviceMemory m_hMemory;
2895  bool m_PersistentMap;
2896  void* m_pMappedData;
2897  };
2898 
2899  union
2900  {
2901  // Allocation out of VmaDeviceMemoryBlock.
2902  BlockAllocation m_BlockAllocation;
2903  // Allocation for an object that has its own private VkDeviceMemory.
2904  OwnAllocation m_OwnAllocation;
2905  };
2906 };
2907 
2908 /*
2909 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
2910 allocated memory block or free.
2911 */
2912 struct VmaSuballocation
2913 {
2914  VkDeviceSize offset;
2915  VkDeviceSize size;
2916  VmaAllocation hAllocation;
2917  VmaSuballocationType type;
2918 };
2919 
2920 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
2921 
2922 // Cost of one additional allocation lost, as equivalent in bytes.
2923 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
2924 
2925 /*
2926 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
2927 
2928 If canMakeOtherLost was false:
2929 - item points to a FREE suballocation.
2930 - itemsToMakeLostCount is 0.
2931 
2932 If canMakeOtherLost was true:
2933 - item points to first of sequence of suballocations, which are either FREE,
2934  or point to VmaAllocations that can become lost.
2935 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
2936  the requested allocation to succeed.
2937 */
2938 struct VmaAllocationRequest
2939 {
2940  VkDeviceSize offset;
2941  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
2942  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
2943  VmaSuballocationList::iterator item;
2944  size_t itemsToMakeLostCount;
2945 
2946  VkDeviceSize CalcCost() const
2947  {
2948  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
2949  }
2950 };
2951 
2952 /*
2953 Represents a single block of device memory (VkDeviceMemory ) with all the
2954 data about its regions (aka suballocations, VmaAllocation), assigned and free.
2955 
2956 Thread-safety: This class must be externally synchronized.
2957 */
2958 class VmaDeviceMemoryBlock
2959 {
2960 public:
2961  uint32_t m_MemoryTypeIndex;
2962  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
2963  VkDeviceMemory m_hMemory;
2964  VkDeviceSize m_Size;
2965  bool m_PersistentMap;
2966  void* m_pMappedData;
2967  uint32_t m_FreeCount;
2968  VkDeviceSize m_SumFreeSize;
2969  VmaSuballocationList m_Suballocations;
2970  // Suballocations that are free and have size greater than certain threshold.
2971  // Sorted by size, ascending.
2972  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
2973 
2974  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
2975 
2976  ~VmaDeviceMemoryBlock()
2977  {
2978  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
2979  }
2980 
2981  // Always call after construction.
2982  void Init(
2983  uint32_t newMemoryTypeIndex,
2984  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
2985  VkDeviceMemory newMemory,
2986  VkDeviceSize newSize,
2987  bool persistentMap,
2988  void* pMappedData);
2989  // Always call before destruction.
2990  void Destroy(VmaAllocator allocator);
2991 
2992  // Validates all data structures inside this object. If not valid, returns false.
2993  bool Validate() const;
2994 
2995  // Tries to find a place for suballocation with given parameters inside this allocation.
2996  // If succeeded, fills pAllocationRequest and returns true.
2997  // If failed, returns false.
2998  bool CreateAllocationRequest(
2999  uint32_t currentFrameIndex,
3000  uint32_t frameInUseCount,
3001  VkDeviceSize bufferImageGranularity,
3002  VkDeviceSize allocSize,
3003  VkDeviceSize allocAlignment,
3004  VmaSuballocationType allocType,
3005  bool canMakeOtherLost,
3006  VmaAllocationRequest* pAllocationRequest);
3007 
3008  bool MakeRequestedAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount, VmaAllocationRequest* pAllocationRequest);
3009 
3010  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3011 
3012  // Returns true if this allocation is empty - contains only single free suballocation.
3013  bool IsEmpty() const;
3014 
3015  // Makes actual allocation based on request. Request must already be checked
3016  // and valid.
3017  void Alloc(
3018  const VmaAllocationRequest& request,
3019  VmaSuballocationType type,
3020  VkDeviceSize allocSize,
3021  VmaAllocation hAllocation);
3022 
3023  // Frees suballocation assigned to given memory region.
3024  void Free(const VmaAllocation allocation);
3025 
3026 #if VMA_STATS_STRING_ENABLED
3027  void PrintDetailedMap(class VmaJsonWriter& json) const;
3028 #endif
3029 
3030 private:
3031  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
3032  // If yes, fills pOffset and returns true. If no, returns false.
3033  bool CheckAllocation(
3034  uint32_t currentFrameIndex,
3035  uint32_t frameInUseCount,
3036  VkDeviceSize bufferImageGranularity,
3037  VkDeviceSize allocSize,
3038  VkDeviceSize allocAlignment,
3039  VmaSuballocationType allocType,
3040  VmaSuballocationList::const_iterator suballocItem,
3041  bool canMakeOtherLost,
3042  VkDeviceSize* pOffset,
3043  size_t* itemsToMakeLostCount,
3044  VkDeviceSize* pSumFreeSize,
3045  VkDeviceSize* pSumItemSize) const;
3046 
3047  // Given free suballocation, it merges it with following one, which must also be free.
3048  void MergeFreeWithNext(VmaSuballocationList::iterator item);
3049  // Releases given suballocation, making it free.
3050  // Merges it with adjacent free suballocations if applicable.
3051  // Returns iterator to new free suballocation at this place.
3052  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
3053  // Given free suballocation, it inserts it into sorted list of
3054  // m_FreeSuballocationsBySize if it's suitable.
3055  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
3056  // Given free suballocation, it removes it from sorted list of
3057  // m_FreeSuballocationsBySize if it's suitable.
3058  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
3059 
3060  bool ValidateFreeSuballocationList() const;
3061 };
3062 
3063 struct VmaPointerLess
3064 {
3065  bool operator()(const void* lhs, const void* rhs) const
3066  {
3067  return lhs < rhs;
3068  }
3069 };
3070 
3071 class VmaDefragmentator;
3072 
3073 /*
3074 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
3075 Vulkan memory type.
3076 
3077 Synchronized internally with a mutex.
3078 */
3079 struct VmaBlockVector
3080 {
3081  VmaBlockVector(
3082  VmaAllocator hAllocator,
3083  uint32_t memoryTypeIndex,
3084  VMA_BLOCK_VECTOR_TYPE blockVectorType,
3085  VkDeviceSize preferredBlockSize,
3086  size_t minBlockCount,
3087  size_t maxBlockCount,
3088  VkDeviceSize bufferImageGranularity,
3089  uint32_t frameInUseCount,
3090  bool isCustomPool);
3091  ~VmaBlockVector();
3092 
3093  VkResult CreateMinBlocks();
3094 
3095  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
3096  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
3097  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
3098  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
3099  VMA_BLOCK_VECTOR_TYPE GetBlockVectorType() const { return m_BlockVectorType; }
3100 
3101  void GetPoolStats(VmaPoolStats* pStats);
3102 
3103  bool IsEmpty() const { return m_Blocks.empty(); }
3104 
3105  VkResult Allocate(
3106  VmaPool hCurrentPool,
3107  uint32_t currentFrameIndex,
3108  const VkMemoryRequirements& vkMemReq,
3109  const VmaAllocationCreateInfo& createInfo,
3110  VmaSuballocationType suballocType,
3111  VmaAllocation* pAllocation);
3112 
3113  void Free(
3114  VmaAllocation hAllocation);
3115 
3116  // Adds statistics of this BlockVector to pStats.
3117  void AddStats(VmaStats* pStats, uint32_t memTypeIndex, uint32_t memHeapIndex);
3118 
3119 #if VMA_STATS_STRING_ENABLED
3120  void PrintDetailedMap(class VmaJsonWriter& json);
3121 #endif
3122 
3123  void UnmapPersistentlyMappedMemory();
3124  VkResult MapPersistentlyMappedMemory();
3125 
3126  void MakePoolAllocationsLost(
3127  uint32_t currentFrameIndex,
3128  size_t* pLostAllocationCount);
3129 
3130  VmaDefragmentator* EnsureDefragmentator(
3131  VkDevice hDevice,
3132  const VkAllocationCallbacks* pAllocationCallbacks,
3133  uint32_t currentFrameIndex);
3134 
3135  VkResult Defragment(
3136  VmaDefragmentationStats* pDefragmentationStats,
3137  VkDeviceSize& maxBytesToMove,
3138  uint32_t& maxAllocationsToMove);
3139 
3140  void DestroyDefragmentator();
3141 
3142 private:
3143  friend class VmaDefragmentator;
3144 
3145  const VmaAllocator m_hAllocator;
3146  const uint32_t m_MemoryTypeIndex;
3147  const VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
3148  const VkDeviceSize m_PreferredBlockSize;
3149  const size_t m_MinBlockCount;
3150  const size_t m_MaxBlockCount;
3151  const VkDeviceSize m_BufferImageGranularity;
3152  const uint32_t m_FrameInUseCount;
3153  const bool m_IsCustomPool;
3154  VMA_MUTEX m_Mutex;
3155  // Incrementally sorted by sumFreeSize, ascending.
3156  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
3157  /* There can be at most one allocation that is completely empty - a
3158  hysteresis to avoid pessimistic case of alternating creation and destruction
3159  of a VkDeviceMemory. */
3160  bool m_HasEmptyBlock;
3161  VmaDefragmentator* m_pDefragmentator;
3162 
3163  // Finds and removes given block from vector.
3164  void Remove(VmaDeviceMemoryBlock* pBlock);
3165 
3166  // Performs single step in sorting m_Blocks. They may not be fully sorted
3167  // after this call.
3168  void IncrementallySortBlocks();
3169 
3170  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
3171 };
3172 
3173 struct VmaPool_T
3174 {
3175 public:
3176  VmaBlockVector m_BlockVector;
3177 
3178  // Takes ownership.
3179  VmaPool_T(
3180  VmaAllocator hAllocator,
3181  const VmaPoolCreateInfo& createInfo);
3182  ~VmaPool_T();
3183 
3184  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
3185 
3186 #if VMA_STATS_STRING_ENABLED
3187  //void PrintDetailedMap(class VmaStringBuilder& sb);
3188 #endif
3189 };
3190 
3191 class VmaDefragmentator
3192 {
3193  const VkDevice m_hDevice;
3194  const VkAllocationCallbacks* const m_pAllocationCallbacks;
3195  VmaBlockVector* const m_pBlockVector;
3196  uint32_t m_CurrentFrameIndex;
3197  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
3198  VkDeviceSize m_BytesMoved;
3199  uint32_t m_AllocationsMoved;
3200 
3201  struct AllocationInfo
3202  {
3203  VmaAllocation m_hAllocation;
3204  VkBool32* m_pChanged;
3205 
3206  AllocationInfo() :
3207  m_hAllocation(VK_NULL_HANDLE),
3208  m_pChanged(VMA_NULL)
3209  {
3210  }
3211  };
3212 
3213  struct AllocationInfoSizeGreater
3214  {
3215  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
3216  {
3217  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
3218  }
3219  };
3220 
3221  // Used between AddAllocation and Defragment.
3222  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3223 
3224  struct BlockInfo
3225  {
3226  VmaDeviceMemoryBlock* m_pBlock;
3227  bool m_HasNonMovableAllocations;
3228  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3229 
3230  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
3231  m_pBlock(VMA_NULL),
3232  m_HasNonMovableAllocations(true),
3233  m_Allocations(pAllocationCallbacks),
3234  m_pMappedDataForDefragmentation(VMA_NULL)
3235  {
3236  }
3237 
3238  void CalcHasNonMovableAllocations()
3239  {
3240  const size_t blockAllocCount =
3241  m_pBlock->m_Suballocations.size() - m_pBlock->m_FreeCount;
3242  const size_t defragmentAllocCount = m_Allocations.size();
3243  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
3244  }
3245 
3246  void SortAllocationsBySizeDescecnding()
3247  {
3248  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
3249  }
3250 
3251  VkResult EnsureMapping(VkDevice hDevice, void** ppMappedData)
3252  {
3253  // It has already been mapped for defragmentation.
3254  if(m_pMappedDataForDefragmentation)
3255  {
3256  *ppMappedData = m_pMappedDataForDefragmentation;
3257  return VK_SUCCESS;
3258  }
3259 
3260  // It is persistently mapped.
3261  if(m_pBlock->m_PersistentMap)
3262  {
3263  VMA_ASSERT(m_pBlock->m_pMappedData != VMA_NULL);
3264  *ppMappedData = m_pBlock->m_pMappedData;
3265  return VK_SUCCESS;
3266  }
3267 
3268  // Map on first usage.
3269  VkResult res = vkMapMemory(hDevice, m_pBlock->m_hMemory, 0, VK_WHOLE_SIZE, 0, &m_pMappedDataForDefragmentation);
3270  *ppMappedData = m_pMappedDataForDefragmentation;
3271  return res;
3272  }
3273 
3274  void Unmap(VkDevice hDevice)
3275  {
3276  if(m_pMappedDataForDefragmentation != VMA_NULL)
3277  {
3278  vkUnmapMemory(hDevice, m_pBlock->m_hMemory);
3279  }
3280  }
3281 
3282  private:
3283  // Not null if mapped for defragmentation only, not persistently mapped.
3284  void* m_pMappedDataForDefragmentation;
3285  };
3286 
3287  struct BlockPointerLess
3288  {
3289  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
3290  {
3291  return pLhsBlockInfo->m_pBlock < pRhsBlock;
3292  }
3293  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3294  {
3295  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
3296  }
3297  };
3298 
3299  // 1. Blocks with some non-movable allocations go first.
3300  // 2. Blocks with smaller sumFreeSize go first.
3301  struct BlockInfoCompareMoveDestination
3302  {
3303  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3304  {
3305  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
3306  {
3307  return true;
3308  }
3309  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
3310  {
3311  return false;
3312  }
3313  if(pLhsBlockInfo->m_pBlock->m_SumFreeSize < pRhsBlockInfo->m_pBlock->m_SumFreeSize)
3314  {
3315  return true;
3316  }
3317  return false;
3318  }
3319  };
3320 
3321  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
3322  BlockInfoVector m_Blocks;
3323 
3324  VkResult DefragmentRound(
3325  VkDeviceSize maxBytesToMove,
3326  uint32_t maxAllocationsToMove);
3327 
3328  static bool MoveMakesSense(
3329  size_t dstBlockIndex, VkDeviceSize dstOffset,
3330  size_t srcBlockIndex, VkDeviceSize srcOffset);
3331 
3332 public:
3333  VmaDefragmentator(
3334  VkDevice hDevice,
3335  const VkAllocationCallbacks* pAllocationCallbacks,
3336  VmaBlockVector* pBlockVector,
3337  uint32_t currentFrameIndex);
3338 
3339  ~VmaDefragmentator();
3340 
3341  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
3342  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
3343 
3344  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
3345 
3346  VkResult Defragment(
3347  VkDeviceSize maxBytesToMove,
3348  uint32_t maxAllocationsToMove);
3349 };
3350 
3351 // Main allocator object.
3352 struct VmaAllocator_T
3353 {
3354  bool m_UseMutex;
3355  VkDevice m_hDevice;
3356  bool m_AllocationCallbacksSpecified;
3357  VkAllocationCallbacks m_AllocationCallbacks;
3358  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
3359  // Non-zero when we are inside UnmapPersistentlyMappedMemory...MapPersistentlyMappedMemory.
3360  // Counter to allow nested calls to these functions.
3361  uint32_t m_UnmapPersistentlyMappedMemoryCounter;
3362 
3363  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
3364  VkPhysicalDeviceMemoryProperties m_MemProps;
3365 
3366  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
3367 
3368  // Each vector is sorted by memory (handle value).
3369  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
3370  AllocationVectorType* m_pOwnAllocations[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
3371  VMA_MUTEX m_OwnAllocationsMutex[VK_MAX_MEMORY_TYPES];
3372 
3373  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
3374  ~VmaAllocator_T();
3375 
3376  const VkAllocationCallbacks* GetAllocationCallbacks() const
3377  {
3378  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
3379  }
3380 
3381  VkDeviceSize GetBufferImageGranularity() const
3382  {
3383  return VMA_MAX(
3384  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
3385  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
3386  }
3387 
3388  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
3389  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
3390 
3391  // Main allocation function.
3392  VkResult AllocateMemory(
3393  const VkMemoryRequirements& vkMemReq,
3394  const VmaAllocationCreateInfo& createInfo,
3395  VmaSuballocationType suballocType,
3396  VmaAllocation* pAllocation);
3397 
3398  // Main deallocation function.
3399  void FreeMemory(const VmaAllocation allocation);
3400 
3401  void CalculateStats(VmaStats* pStats);
3402 
3403 #if VMA_STATS_STRING_ENABLED
3404  void PrintDetailedMap(class VmaJsonWriter& json);
3405 #endif
3406 
3407  void UnmapPersistentlyMappedMemory();
3408  VkResult MapPersistentlyMappedMemory();
3409 
3410  VkResult Defragment(
3411  VmaAllocation* pAllocations,
3412  size_t allocationCount,
3413  VkBool32* pAllocationsChanged,
3414  const VmaDefragmentationInfo* pDefragmentationInfo,
3415  VmaDefragmentationStats* pDefragmentationStats);
3416 
3417  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
3418 
3419  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
3420  void DestroyPool(VmaPool pool);
3421  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
3422 
3423  void SetCurrentFrameIndex(uint32_t frameIndex);
3424 
3425  void MakePoolAllocationsLost(
3426  VmaPool hPool,
3427  size_t* pLostAllocationCount);
3428 
3429  void CreateLostAllocation(VmaAllocation* pAllocation);
3430 
3431 private:
3432  VkDeviceSize m_PreferredLargeHeapBlockSize;
3433  VkDeviceSize m_PreferredSmallHeapBlockSize;
3434 
3435  VkPhysicalDevice m_PhysicalDevice;
3436  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
3437 
3438  VMA_MUTEX m_PoolsMutex;
3439  // Protected by m_PoolsMutex. Sorted by pointer value.
3440  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
3441 
3442  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
3443 
3444  VkResult AllocateMemoryOfType(
3445  const VkMemoryRequirements& vkMemReq,
3446  const VmaAllocationCreateInfo& createInfo,
3447  uint32_t memTypeIndex,
3448  VmaSuballocationType suballocType,
3449  VmaAllocation* pAllocation);
3450 
3451  // Allocates and registers new VkDeviceMemory specifically for single allocation.
3452  VkResult AllocateOwnMemory(
3453  VkDeviceSize size,
3454  VmaSuballocationType suballocType,
3455  uint32_t memTypeIndex,
3456  bool map,
3457  void* pUserData,
3458  VmaAllocation* pAllocation);
3459 
3460  // Tries to free pMemory as Own Memory. Returns true if found and freed.
3461  void FreeOwnMemory(VmaAllocation allocation);
3462 };
3463 
3465 // Memory allocation #2 after VmaAllocator_T definition
3466 
3467 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
3468 {
3469  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
3470 }
3471 
3472 static void VmaFree(VmaAllocator hAllocator, void* ptr)
3473 {
3474  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
3475 }
3476 
3477 template<typename T>
3478 static T* VmaAllocate(VmaAllocator hAllocator)
3479 {
3480  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
3481 }
3482 
3483 template<typename T>
3484 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
3485 {
3486  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
3487 }
3488 
3489 template<typename T>
3490 static void vma_delete(VmaAllocator hAllocator, T* ptr)
3491 {
3492  if(ptr != VMA_NULL)
3493  {
3494  ptr->~T();
3495  VmaFree(hAllocator, ptr);
3496  }
3497 }
3498 
3499 template<typename T>
3500 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
3501 {
3502  if(ptr != VMA_NULL)
3503  {
3504  for(size_t i = count; i--; )
3505  ptr[i].~T();
3506  VmaFree(hAllocator, ptr);
3507  }
3508 }
3509 
3511 // VmaStringBuilder
3512 
3513 #if VMA_STATS_STRING_ENABLED
3514 
3515 class VmaStringBuilder
3516 {
3517 public:
3518  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
3519  size_t GetLength() const { return m_Data.size(); }
3520  const char* GetData() const { return m_Data.data(); }
3521 
3522  void Add(char ch) { m_Data.push_back(ch); }
3523  void Add(const char* pStr);
3524  void AddNewLine() { Add('\n'); }
3525  void AddNumber(uint32_t num);
3526  void AddNumber(uint64_t num);
3527  void AddPointer(const void* ptr);
3528 
3529 private:
3530  VmaVector< char, VmaStlAllocator<char> > m_Data;
3531 };
3532 
3533 void VmaStringBuilder::Add(const char* pStr)
3534 {
3535  const size_t strLen = strlen(pStr);
3536  if(strLen > 0)
3537  {
3538  const size_t oldCount = m_Data.size();
3539  m_Data.resize(oldCount + strLen);
3540  memcpy(m_Data.data() + oldCount, pStr, strLen);
3541  }
3542 }
3543 
3544 void VmaStringBuilder::AddNumber(uint32_t num)
3545 {
3546  char buf[11];
3547  VmaUint32ToStr(buf, sizeof(buf), num);
3548  Add(buf);
3549 }
3550 
3551 void VmaStringBuilder::AddNumber(uint64_t num)
3552 {
3553  char buf[21];
3554  VmaUint64ToStr(buf, sizeof(buf), num);
3555  Add(buf);
3556 }
3557 
3558 void VmaStringBuilder::AddPointer(const void* ptr)
3559 {
3560  char buf[21];
3561  VmaPtrToStr(buf, sizeof(buf), ptr);
3562  Add(buf);
3563 }
3564 
3565 #endif // #if VMA_STATS_STRING_ENABLED
3566 
3568 // VmaJsonWriter
3569 
3570 #if VMA_STATS_STRING_ENABLED
3571 
3572 class VmaJsonWriter
3573 {
3574 public:
3575  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
3576  ~VmaJsonWriter();
3577 
3578  void BeginObject(bool singleLine = false);
3579  void EndObject();
3580 
3581  void BeginArray(bool singleLine = false);
3582  void EndArray();
3583 
3584  void WriteString(const char* pStr);
3585  void BeginString(const char* pStr = VMA_NULL);
3586  void ContinueString(const char* pStr);
3587  void ContinueString(uint32_t n);
3588  void ContinueString(uint64_t n);
3589  void EndString(const char* pStr = VMA_NULL);
3590 
3591  void WriteNumber(uint32_t n);
3592  void WriteNumber(uint64_t n);
3593  void WriteBool(bool b);
3594  void WriteNull();
3595 
3596 private:
3597  static const char* const INDENT;
3598 
3599  enum COLLECTION_TYPE
3600  {
3601  COLLECTION_TYPE_OBJECT,
3602  COLLECTION_TYPE_ARRAY,
3603  };
3604  struct StackItem
3605  {
3606  COLLECTION_TYPE type;
3607  uint32_t valueCount;
3608  bool singleLineMode;
3609  };
3610 
3611  VmaStringBuilder& m_SB;
3612  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
3613  bool m_InsideString;
3614 
3615  void BeginValue(bool isString);
3616  void WriteIndent(bool oneLess = false);
3617 };
3618 
3619 const char* const VmaJsonWriter::INDENT = " ";
3620 
3621 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
3622  m_SB(sb),
3623  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
3624  m_InsideString(false)
3625 {
3626 }
3627 
3628 VmaJsonWriter::~VmaJsonWriter()
3629 {
3630  VMA_ASSERT(!m_InsideString);
3631  VMA_ASSERT(m_Stack.empty());
3632 }
3633 
3634 void VmaJsonWriter::BeginObject(bool singleLine)
3635 {
3636  VMA_ASSERT(!m_InsideString);
3637 
3638  BeginValue(false);
3639  m_SB.Add('{');
3640 
3641  StackItem item;
3642  item.type = COLLECTION_TYPE_OBJECT;
3643  item.valueCount = 0;
3644  item.singleLineMode = singleLine;
3645  m_Stack.push_back(item);
3646 }
3647 
3648 void VmaJsonWriter::EndObject()
3649 {
3650  VMA_ASSERT(!m_InsideString);
3651 
3652  WriteIndent(true);
3653  m_SB.Add('}');
3654 
3655  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
3656  m_Stack.pop_back();
3657 }
3658 
3659 void VmaJsonWriter::BeginArray(bool singleLine)
3660 {
3661  VMA_ASSERT(!m_InsideString);
3662 
3663  BeginValue(false);
3664  m_SB.Add('[');
3665 
3666  StackItem item;
3667  item.type = COLLECTION_TYPE_ARRAY;
3668  item.valueCount = 0;
3669  item.singleLineMode = singleLine;
3670  m_Stack.push_back(item);
3671 }
3672 
3673 void VmaJsonWriter::EndArray()
3674 {
3675  VMA_ASSERT(!m_InsideString);
3676 
3677  WriteIndent(true);
3678  m_SB.Add(']');
3679 
3680  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
3681  m_Stack.pop_back();
3682 }
3683 
3684 void VmaJsonWriter::WriteString(const char* pStr)
3685 {
3686  BeginString(pStr);
3687  EndString();
3688 }
3689 
3690 void VmaJsonWriter::BeginString(const char* pStr)
3691 {
3692  VMA_ASSERT(!m_InsideString);
3693 
3694  BeginValue(true);
3695  m_SB.Add('"');
3696  m_InsideString = true;
3697  if(pStr != VMA_NULL && pStr[0] != '\0')
3698  {
3699  ContinueString(pStr);
3700  }
3701 }
3702 
3703 void VmaJsonWriter::ContinueString(const char* pStr)
3704 {
3705  VMA_ASSERT(m_InsideString);
3706 
3707  const size_t strLen = strlen(pStr);
3708  for(size_t i = 0; i < strLen; ++i)
3709  {
3710  char ch = pStr[i];
3711  if(ch == '\'')
3712  {
3713  m_SB.Add("\\\\");
3714  }
3715  else if(ch == '"')
3716  {
3717  m_SB.Add("\\\"");
3718  }
3719  else if(ch >= 32)
3720  {
3721  m_SB.Add(ch);
3722  }
3723  else switch(ch)
3724  {
3725  case '\n':
3726  m_SB.Add("\\n");
3727  break;
3728  case '\r':
3729  m_SB.Add("\\r");
3730  break;
3731  case '\t':
3732  m_SB.Add("\\t");
3733  break;
3734  default:
3735  VMA_ASSERT(0 && "Character not currently supported.");
3736  break;
3737  }
3738  }
3739 }
3740 
3741 void VmaJsonWriter::ContinueString(uint32_t n)
3742 {
3743  VMA_ASSERT(m_InsideString);
3744  m_SB.AddNumber(n);
3745 }
3746 
3747 void VmaJsonWriter::ContinueString(uint64_t n)
3748 {
3749  VMA_ASSERT(m_InsideString);
3750  m_SB.AddNumber(n);
3751 }
3752 
3753 void VmaJsonWriter::EndString(const char* pStr)
3754 {
3755  VMA_ASSERT(m_InsideString);
3756  if(pStr != VMA_NULL && pStr[0] != '\0')
3757  {
3758  ContinueString(pStr);
3759  }
3760  m_SB.Add('"');
3761  m_InsideString = false;
3762 }
3763 
3764 void VmaJsonWriter::WriteNumber(uint32_t n)
3765 {
3766  VMA_ASSERT(!m_InsideString);
3767  BeginValue(false);
3768  m_SB.AddNumber(n);
3769 }
3770 
3771 void VmaJsonWriter::WriteNumber(uint64_t n)
3772 {
3773  VMA_ASSERT(!m_InsideString);
3774  BeginValue(false);
3775  m_SB.AddNumber(n);
3776 }
3777 
3778 void VmaJsonWriter::WriteBool(bool b)
3779 {
3780  VMA_ASSERT(!m_InsideString);
3781  BeginValue(false);
3782  m_SB.Add(b ? "true" : "false");
3783 }
3784 
3785 void VmaJsonWriter::WriteNull()
3786 {
3787  VMA_ASSERT(!m_InsideString);
3788  BeginValue(false);
3789  m_SB.Add("null");
3790 }
3791 
3792 void VmaJsonWriter::BeginValue(bool isString)
3793 {
3794  if(!m_Stack.empty())
3795  {
3796  StackItem& currItem = m_Stack.back();
3797  if(currItem.type == COLLECTION_TYPE_OBJECT &&
3798  currItem.valueCount % 2 == 0)
3799  {
3800  VMA_ASSERT(isString);
3801  }
3802 
3803  if(currItem.type == COLLECTION_TYPE_OBJECT &&
3804  currItem.valueCount % 2 != 0)
3805  {
3806  m_SB.Add(": ");
3807  }
3808  else if(currItem.valueCount > 0)
3809  {
3810  m_SB.Add(", ");
3811  WriteIndent();
3812  }
3813  else
3814  {
3815  WriteIndent();
3816  }
3817  ++currItem.valueCount;
3818  }
3819 }
3820 
3821 void VmaJsonWriter::WriteIndent(bool oneLess)
3822 {
3823  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
3824  {
3825  m_SB.AddNewLine();
3826 
3827  size_t count = m_Stack.size();
3828  if(count > 0 && oneLess)
3829  {
3830  --count;
3831  }
3832  for(size_t i = 0; i < count; ++i)
3833  {
3834  m_SB.Add(INDENT);
3835  }
3836  }
3837 }
3838 
3839 #endif // #if VMA_STATS_STRING_ENABLED
3840 
3842 
3843 VkDeviceSize VmaAllocation_T::GetOffset() const
3844 {
3845  switch(m_Type)
3846  {
3847  case ALLOCATION_TYPE_BLOCK:
3848  return m_BlockAllocation.m_Offset;
3849  case ALLOCATION_TYPE_OWN:
3850  return 0;
3851  default:
3852  VMA_ASSERT(0);
3853  return 0;
3854  }
3855 }
3856 
3857 VkDeviceMemory VmaAllocation_T::GetMemory() const
3858 {
3859  switch(m_Type)
3860  {
3861  case ALLOCATION_TYPE_BLOCK:
3862  return m_BlockAllocation.m_Block->m_hMemory;
3863  case ALLOCATION_TYPE_OWN:
3864  return m_OwnAllocation.m_hMemory;
3865  default:
3866  VMA_ASSERT(0);
3867  return VK_NULL_HANDLE;
3868  }
3869 }
3870 
3871 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
3872 {
3873  switch(m_Type)
3874  {
3875  case ALLOCATION_TYPE_BLOCK:
3876  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
3877  case ALLOCATION_TYPE_OWN:
3878  return m_OwnAllocation.m_MemoryTypeIndex;
3879  default:
3880  VMA_ASSERT(0);
3881  return UINT32_MAX;
3882  }
3883 }
3884 
3885 VMA_BLOCK_VECTOR_TYPE VmaAllocation_T::GetBlockVectorType() const
3886 {
3887  switch(m_Type)
3888  {
3889  case ALLOCATION_TYPE_BLOCK:
3890  return m_BlockAllocation.m_Block->m_BlockVectorType;
3891  case ALLOCATION_TYPE_OWN:
3892  return (m_OwnAllocation.m_PersistentMap ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED);
3893  default:
3894  VMA_ASSERT(0);
3895  return VMA_BLOCK_VECTOR_TYPE_COUNT;
3896  }
3897 }
3898 
3899 void* VmaAllocation_T::GetMappedData() const
3900 {
3901  switch(m_Type)
3902  {
3903  case ALLOCATION_TYPE_BLOCK:
3904  if(m_BlockAllocation.m_Block->m_pMappedData != VMA_NULL)
3905  {
3906  return (char*)m_BlockAllocation.m_Block->m_pMappedData + m_BlockAllocation.m_Offset;
3907  }
3908  else
3909  {
3910  return VMA_NULL;
3911  }
3912  break;
3913  case ALLOCATION_TYPE_OWN:
3914  return m_OwnAllocation.m_pMappedData;
3915  default:
3916  VMA_ASSERT(0);
3917  return VMA_NULL;
3918  }
3919 }
3920 
3921 bool VmaAllocation_T::CanBecomeLost() const
3922 {
3923  switch(m_Type)
3924  {
3925  case ALLOCATION_TYPE_BLOCK:
3926  return m_BlockAllocation.m_CanBecomeLost;
3927  case ALLOCATION_TYPE_OWN:
3928  return false;
3929  default:
3930  VMA_ASSERT(0);
3931  return false;
3932  }
3933 }
3934 
3935 VmaPool VmaAllocation_T::GetPool() const
3936 {
3937  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
3938  return m_BlockAllocation.m_hPool;
3939 }
3940 
3941 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
3942 {
3943  VMA_ASSERT(CanBecomeLost());
3944 
3945  /*
3946  Warning: This is a carefully designed algorithm.
3947  Do not modify unless you really know what you're doing :)
3948  */
3949  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
3950  for(;;)
3951  {
3952  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
3953  {
3954  VMA_ASSERT(0);
3955  return false;
3956  }
3957  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
3958  {
3959  return false;
3960  }
3961  else // Last use time earlier than current time.
3962  {
3963  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
3964  {
3965  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
3966  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
3967  return true;
3968  }
3969  }
3970  }
3971 }
3972 
3973 #if VMA_STATS_STRING_ENABLED
3974 
3975 // Correspond to values of enum VmaSuballocationType.
3976 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
3977  "FREE",
3978  "UNKNOWN",
3979  "BUFFER",
3980  "IMAGE_UNKNOWN",
3981  "IMAGE_LINEAR",
3982  "IMAGE_OPTIMAL",
3983 };
3984 
3985 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
3986 {
3987  json.BeginObject();
3988 
3989  json.WriteString("Blocks");
3990  json.WriteNumber(stat.BlockCount);
3991 
3992  json.WriteString("Allocations");
3993  json.WriteNumber(stat.AllocationCount);
3994 
3995  json.WriteString("UnusedRanges");
3996  json.WriteNumber(stat.UnusedRangeCount);
3997 
3998  json.WriteString("UsedBytes");
3999  json.WriteNumber(stat.UsedBytes);
4000 
4001  json.WriteString("UnusedBytes");
4002  json.WriteNumber(stat.UnusedBytes);
4003 
4004  json.WriteString("AllocationSize");
4005  json.BeginObject(true);
4006  json.WriteString("Min");
4007  json.WriteNumber(stat.AllocationSizeMin);
4008  json.WriteString("Avg");
4009  json.WriteNumber(stat.AllocationSizeAvg);
4010  json.WriteString("Max");
4011  json.WriteNumber(stat.AllocationSizeMax);
4012  json.EndObject();
4013 
4014  json.WriteString("UnusedRangeSize");
4015  json.BeginObject(true);
4016  json.WriteString("Min");
4017  json.WriteNumber(stat.UnusedRangeSizeMin);
4018  json.WriteString("Avg");
4019  json.WriteNumber(stat.UnusedRangeSizeAvg);
4020  json.WriteString("Max");
4021  json.WriteNumber(stat.UnusedRangeSizeMax);
4022  json.EndObject();
4023 
4024  json.EndObject();
4025 }
4026 
4027 #endif // #if VMA_STATS_STRING_ENABLED
4028 
4029 struct VmaSuballocationItemSizeLess
4030 {
4031  bool operator()(
4032  const VmaSuballocationList::iterator lhs,
4033  const VmaSuballocationList::iterator rhs) const
4034  {
4035  return lhs->size < rhs->size;
4036  }
4037  bool operator()(
4038  const VmaSuballocationList::iterator lhs,
4039  VkDeviceSize rhsSize) const
4040  {
4041  return lhs->size < rhsSize;
4042  }
4043 };
4044 
4045 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
4046  m_MemoryTypeIndex(UINT32_MAX),
4047  m_BlockVectorType(VMA_BLOCK_VECTOR_TYPE_COUNT),
4048  m_hMemory(VK_NULL_HANDLE),
4049  m_Size(0),
4050  m_PersistentMap(false),
4051  m_pMappedData(VMA_NULL),
4052  m_FreeCount(0),
4053  m_SumFreeSize(0),
4054  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
4055  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
4056 {
4057 }
4058 
4059 void VmaDeviceMemoryBlock::Init(
4060  uint32_t newMemoryTypeIndex,
4061  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
4062  VkDeviceMemory newMemory,
4063  VkDeviceSize newSize,
4064  bool persistentMap,
4065  void* pMappedData)
4066 {
4067  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
4068 
4069  m_MemoryTypeIndex = newMemoryTypeIndex;
4070  m_BlockVectorType = newBlockVectorType;
4071  m_hMemory = newMemory;
4072  m_Size = newSize;
4073  m_PersistentMap = persistentMap;
4074  m_pMappedData = pMappedData;
4075  m_FreeCount = 1;
4076  m_SumFreeSize = newSize;
4077 
4078  m_Suballocations.clear();
4079  m_FreeSuballocationsBySize.clear();
4080 
4081  VmaSuballocation suballoc = {};
4082  suballoc.offset = 0;
4083  suballoc.size = newSize;
4084  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4085  suballoc.hAllocation = VK_NULL_HANDLE;
4086 
4087  m_Suballocations.push_back(suballoc);
4088  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
4089  --suballocItem;
4090  m_FreeSuballocationsBySize.push_back(suballocItem);
4091 }
4092 
4093 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
4094 {
4095  // This is the most important assert in the entire library.
4096  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
4097  VMA_ASSERT(IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
4098 
4099  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
4100  if(m_pMappedData != VMA_NULL)
4101  {
4102  vkUnmapMemory(allocator->m_hDevice, m_hMemory);
4103  m_pMappedData = VMA_NULL;
4104  }
4105 
4106  // Callback.
4107  if(allocator->m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
4108  {
4109  (*allocator->m_DeviceMemoryCallbacks.pfnFree)(allocator, m_MemoryTypeIndex, m_hMemory, m_Size);
4110  }
4111 
4112  vkFreeMemory(allocator->m_hDevice, m_hMemory, allocator->GetAllocationCallbacks());
4113  m_hMemory = VK_NULL_HANDLE;
4114 }
4115 
4116 bool VmaDeviceMemoryBlock::Validate() const
4117 {
4118  if((m_hMemory == VK_NULL_HANDLE) ||
4119  (m_Size == 0) ||
4120  m_Suballocations.empty())
4121  {
4122  return false;
4123  }
4124 
4125  // Expected offset of new suballocation as calculates from previous ones.
4126  VkDeviceSize calculatedOffset = 0;
4127  // Expected number of free suballocations as calculated from traversing their list.
4128  uint32_t calculatedFreeCount = 0;
4129  // Expected sum size of free suballocations as calculated from traversing their list.
4130  VkDeviceSize calculatedSumFreeSize = 0;
4131  // Expected number of free suballocations that should be registered in
4132  // m_FreeSuballocationsBySize calculated from traversing their list.
4133  size_t freeSuballocationsToRegister = 0;
4134  // True if previous visisted suballocation was free.
4135  bool prevFree = false;
4136 
4137  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4138  suballocItem != m_Suballocations.cend();
4139  ++suballocItem)
4140  {
4141  const VmaSuballocation& subAlloc = *suballocItem;
4142 
4143  // Actual offset of this suballocation doesn't match expected one.
4144  if(subAlloc.offset != calculatedOffset)
4145  {
4146  return false;
4147  }
4148 
4149  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
4150  // Two adjacent free suballocations are invalid. They should be merged.
4151  if(prevFree && currFree)
4152  {
4153  return false;
4154  }
4155  prevFree = currFree;
4156 
4157  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
4158  {
4159  return false;
4160  }
4161 
4162  if(currFree)
4163  {
4164  calculatedSumFreeSize += subAlloc.size;
4165  ++calculatedFreeCount;
4166  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
4167  {
4168  ++freeSuballocationsToRegister;
4169  }
4170  }
4171 
4172  calculatedOffset += subAlloc.size;
4173  }
4174 
4175  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
4176  // match expected one.
4177  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
4178  {
4179  return false;
4180  }
4181 
4182  VkDeviceSize lastSize = 0;
4183  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
4184  {
4185  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
4186 
4187  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
4188  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
4189  {
4190  return false;
4191  }
4192  // They must be sorted by size ascending.
4193  if(suballocItem->size < lastSize)
4194  {
4195  return false;
4196  }
4197 
4198  lastSize = suballocItem->size;
4199  }
4200 
4201  // Check if totals match calculacted values.
4202  return
4203  (calculatedOffset == m_Size) &&
4204  (calculatedSumFreeSize == m_SumFreeSize) &&
4205  (calculatedFreeCount == m_FreeCount);
4206 }
4207 
4208 /*
4209 How many suitable free suballocations to analyze before choosing best one.
4210 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
4211  be chosen.
4212 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
4213  suballocations will be analized and best one will be chosen.
4214 - Any other value is also acceptable.
4215 */
4216 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
4217 
4218 bool VmaDeviceMemoryBlock::CreateAllocationRequest(
4219  uint32_t currentFrameIndex,
4220  uint32_t frameInUseCount,
4221  VkDeviceSize bufferImageGranularity,
4222  VkDeviceSize allocSize,
4223  VkDeviceSize allocAlignment,
4224  VmaSuballocationType allocType,
4225  bool canMakeOtherLost,
4226  VmaAllocationRequest* pAllocationRequest)
4227 {
4228  VMA_ASSERT(allocSize > 0);
4229  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
4230  VMA_ASSERT(pAllocationRequest != VMA_NULL);
4231  VMA_HEAVY_ASSERT(Validate());
4232 
4233  // There is not enough total free space in this block to fullfill the request: Early return.
4234  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
4235  {
4236  return false;
4237  }
4238 
4239  // New algorithm, efficiently searching freeSuballocationsBySize.
4240  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
4241  if(freeSuballocCount > 0)
4242  {
4243  if(VMA_BEST_FIT)
4244  {
4245  // Find first free suballocation with size not less than allocSize.
4246  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
4247  m_FreeSuballocationsBySize.data(),
4248  m_FreeSuballocationsBySize.data() + freeSuballocCount,
4249  allocSize,
4250  VmaSuballocationItemSizeLess());
4251  size_t index = it - m_FreeSuballocationsBySize.data();
4252  for(; index < freeSuballocCount; ++index)
4253  {
4254  if(CheckAllocation(
4255  currentFrameIndex,
4256  frameInUseCount,
4257  bufferImageGranularity,
4258  allocSize,
4259  allocAlignment,
4260  allocType,
4261  m_FreeSuballocationsBySize[index],
4262  false, // canMakeOtherLost
4263  &pAllocationRequest->offset,
4264  &pAllocationRequest->itemsToMakeLostCount,
4265  &pAllocationRequest->sumFreeSize,
4266  &pAllocationRequest->sumItemSize))
4267  {
4268  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
4269  return true;
4270  }
4271  }
4272  }
4273  else
4274  {
4275  // Search staring from biggest suballocations.
4276  for(size_t index = freeSuballocCount; index--; )
4277  {
4278  if(CheckAllocation(
4279  currentFrameIndex,
4280  frameInUseCount,
4281  bufferImageGranularity,
4282  allocSize,
4283  allocAlignment,
4284  allocType,
4285  m_FreeSuballocationsBySize[index],
4286  false, // canMakeOtherLost
4287  &pAllocationRequest->offset,
4288  &pAllocationRequest->itemsToMakeLostCount,
4289  &pAllocationRequest->sumFreeSize,
4290  &pAllocationRequest->sumItemSize))
4291  {
4292  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
4293  return true;
4294  }
4295  }
4296  }
4297  }
4298 
4299  if(canMakeOtherLost)
4300  {
4301  // Brute-force algorithm. TODO: Come up with something better.
4302 
4303  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
4304  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
4305 
4306  VmaAllocationRequest tmpAllocRequest = {};
4307  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
4308  suballocIt != m_Suballocations.end();
4309  ++suballocIt)
4310  {
4311  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
4312  suballocIt->hAllocation->CanBecomeLost())
4313  {
4314  if(CheckAllocation(
4315  currentFrameIndex,
4316  frameInUseCount,
4317  bufferImageGranularity,
4318  allocSize,
4319  allocAlignment,
4320  allocType,
4321  suballocIt,
4322  canMakeOtherLost,
4323  &tmpAllocRequest.offset,
4324  &tmpAllocRequest.itemsToMakeLostCount,
4325  &tmpAllocRequest.sumFreeSize,
4326  &tmpAllocRequest.sumItemSize))
4327  {
4328  tmpAllocRequest.item = suballocIt;
4329 
4330  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
4331  {
4332  *pAllocationRequest = tmpAllocRequest;
4333  }
4334  }
4335  }
4336  }
4337 
4338  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
4339  {
4340  return true;
4341  }
4342  }
4343 
4344  return false;
4345 }
4346 
4347 bool VmaDeviceMemoryBlock::MakeRequestedAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount, VmaAllocationRequest* pAllocationRequest)
4348 {
4349  while(pAllocationRequest->itemsToMakeLostCount > 0)
4350  {
4351  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
4352  {
4353  ++pAllocationRequest->item;
4354  }
4355  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
4356  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
4357  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
4358  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
4359  {
4360  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
4361  --pAllocationRequest->itemsToMakeLostCount;
4362  }
4363  else
4364  {
4365  return false;
4366  }
4367  }
4368 
4369  VMA_HEAVY_ASSERT(Validate());
4370  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
4371  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
4372 
4373  return true;
4374 }
4375 
4376 uint32_t VmaDeviceMemoryBlock::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4377 {
4378  uint32_t lostAllocationCount = 0;
4379  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
4380  it != m_Suballocations.end();
4381  ++it)
4382  {
4383  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
4384  it->hAllocation->CanBecomeLost() &&
4385  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
4386  {
4387  it = FreeSuballocation(it);
4388  ++lostAllocationCount;
4389  }
4390  }
4391  return lostAllocationCount;
4392 }
4393 
4394 bool VmaDeviceMemoryBlock::CheckAllocation(
4395  uint32_t currentFrameIndex,
4396  uint32_t frameInUseCount,
4397  VkDeviceSize bufferImageGranularity,
4398  VkDeviceSize allocSize,
4399  VkDeviceSize allocAlignment,
4400  VmaSuballocationType allocType,
4401  VmaSuballocationList::const_iterator suballocItem,
4402  bool canMakeOtherLost,
4403  VkDeviceSize* pOffset,
4404  size_t* itemsToMakeLostCount,
4405  VkDeviceSize* pSumFreeSize,
4406  VkDeviceSize* pSumItemSize) const
4407 {
4408  VMA_ASSERT(allocSize > 0);
4409  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
4410  VMA_ASSERT(suballocItem != m_Suballocations.cend());
4411  VMA_ASSERT(pOffset != VMA_NULL);
4412 
4413  *itemsToMakeLostCount = 0;
4414  *pSumFreeSize = 0;
4415  *pSumItemSize = 0;
4416 
4417  if(canMakeOtherLost)
4418  {
4419  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
4420  {
4421  *pSumFreeSize = suballocItem->size;
4422  }
4423  else
4424  {
4425  if(suballocItem->hAllocation->CanBecomeLost() &&
4426  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
4427  {
4428  ++*itemsToMakeLostCount;
4429  *pSumItemSize = suballocItem->size;
4430  }
4431  else
4432  {
4433  return false;
4434  }
4435  }
4436 
4437  // Remaining size is too small for this request: Early return.
4438  if(m_Size - suballocItem->offset < allocSize)
4439  {
4440  return false;
4441  }
4442 
4443  // Start from offset equal to beginning of this suballocation.
4444  *pOffset = suballocItem->offset;
4445 
4446  // Apply VMA_DEBUG_MARGIN at the beginning.
4447  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
4448  {
4449  *pOffset += VMA_DEBUG_MARGIN;
4450  }
4451 
4452  // Apply alignment.
4453  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
4454  *pOffset = VmaAlignUp(*pOffset, alignment);
4455 
4456  // Check previous suballocations for BufferImageGranularity conflicts.
4457  // Make bigger alignment if necessary.
4458  if(bufferImageGranularity > 1)
4459  {
4460  bool bufferImageGranularityConflict = false;
4461  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
4462  while(prevSuballocItem != m_Suballocations.cbegin())
4463  {
4464  --prevSuballocItem;
4465  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
4466  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
4467  {
4468  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
4469  {
4470  bufferImageGranularityConflict = true;
4471  break;
4472  }
4473  }
4474  else
4475  // Already on previous page.
4476  break;
4477  }
4478  if(bufferImageGranularityConflict)
4479  {
4480  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
4481  }
4482  }
4483 
4484  // Now that we have final *pOffset, check if we are past suballocItem.
4485  // If yes, return false - this function should be called for another suballocItem as starting point.
4486  if(*pOffset >= suballocItem->offset + suballocItem->size)
4487  {
4488  return false;
4489  }
4490 
4491  // Calculate padding at the beginning based on current offset.
4492  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
4493 
4494  // Calculate required margin at the end if this is not last suballocation.
4495  VmaSuballocationList::const_iterator next = suballocItem;
4496  ++next;
4497  const VkDeviceSize requiredEndMargin =
4498  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
4499 
4500  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
4501  // Another early return check.
4502  if(suballocItem->offset + totalSize > m_Size)
4503  {
4504  return false;
4505  }
4506 
4507  // Advance lastSuballocItem until desired size is reached.
4508  // Update itemsToMakeLostCount.
4509  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
4510  if(totalSize > suballocItem->size)
4511  {
4512  VkDeviceSize remainingSize = totalSize - suballocItem->size;
4513  while(remainingSize > 0)
4514  {
4515  ++lastSuballocItem;
4516  if(lastSuballocItem == m_Suballocations.cend())
4517  {
4518  return false;
4519  }
4520  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
4521  {
4522  *pSumFreeSize += lastSuballocItem->size;
4523  }
4524  else
4525  {
4526  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
4527  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
4528  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
4529  {
4530  ++*itemsToMakeLostCount;
4531  *pSumItemSize += lastSuballocItem->size;
4532  }
4533  else
4534  {
4535  return false;
4536  }
4537  }
4538  remainingSize = (lastSuballocItem->size < remainingSize) ?
4539  remainingSize - lastSuballocItem->size : 0;
4540  }
4541  }
4542 
4543  // Check next suballocations for BufferImageGranularity conflicts.
4544  // If conflict exists, we must mark more allocations lost or fail.
4545  if(bufferImageGranularity > 1)
4546  {
4547  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
4548  ++nextSuballocItem;
4549  while(nextSuballocItem != m_Suballocations.cend())
4550  {
4551  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
4552  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
4553  {
4554  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
4555  {
4556  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
4557  if(nextSuballoc.hAllocation->CanBecomeLost() &&
4558  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
4559  {
4560  ++*itemsToMakeLostCount;
4561  }
4562  else
4563  {
4564  return false;
4565  }
4566  }
4567  }
4568  else
4569  {
4570  // Already on next page.
4571  break;
4572  }
4573  ++nextSuballocItem;
4574  }
4575  }
4576  }
4577  else
4578  {
4579  const VmaSuballocation& suballoc = *suballocItem;
4580  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
4581 
4582  *pSumFreeSize = suballoc.size;
4583 
4584  // Size of this suballocation is too small for this request: Early return.
4585  if(suballoc.size < allocSize)
4586  {
4587  return false;
4588  }
4589 
4590  // Start from offset equal to beginning of this suballocation.
4591  *pOffset = suballoc.offset;
4592 
4593  // Apply VMA_DEBUG_MARGIN at the beginning.
4594  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
4595  {
4596  *pOffset += VMA_DEBUG_MARGIN;
4597  }
4598 
4599  // Apply alignment.
4600  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
4601  *pOffset = VmaAlignUp(*pOffset, alignment);
4602 
4603  // Check previous suballocations for BufferImageGranularity conflicts.
4604  // Make bigger alignment if necessary.
4605  if(bufferImageGranularity > 1)
4606  {
4607  bool bufferImageGranularityConflict = false;
4608  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
4609  while(prevSuballocItem != m_Suballocations.cbegin())
4610  {
4611  --prevSuballocItem;
4612  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
4613  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
4614  {
4615  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
4616  {
4617  bufferImageGranularityConflict = true;
4618  break;
4619  }
4620  }
4621  else
4622  // Already on previous page.
4623  break;
4624  }
4625  if(bufferImageGranularityConflict)
4626  {
4627  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
4628  }
4629  }
4630 
4631  // Calculate padding at the beginning based on current offset.
4632  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
4633 
4634  // Calculate required margin at the end if this is not last suballocation.
4635  VmaSuballocationList::const_iterator next = suballocItem;
4636  ++next;
4637  const VkDeviceSize requiredEndMargin =
4638  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
4639 
4640  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
4641  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
4642  {
4643  return false;
4644  }
4645 
4646  // Check next suballocations for BufferImageGranularity conflicts.
4647  // If conflict exists, allocation cannot be made here.
4648  if(bufferImageGranularity > 1)
4649  {
4650  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
4651  ++nextSuballocItem;
4652  while(nextSuballocItem != m_Suballocations.cend())
4653  {
4654  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
4655  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
4656  {
4657  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
4658  {
4659  return false;
4660  }
4661  }
4662  else
4663  {
4664  // Already on next page.
4665  break;
4666  }
4667  ++nextSuballocItem;
4668  }
4669  }
4670  }
4671 
4672  // All tests passed: Success. pOffset is already filled.
4673  return true;
4674 }
4675 
4676 bool VmaDeviceMemoryBlock::IsEmpty() const
4677 {
4678  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
4679 }
4680 
4681 void VmaDeviceMemoryBlock::Alloc(
4682  const VmaAllocationRequest& request,
4683  VmaSuballocationType type,
4684  VkDeviceSize allocSize,
4685  VmaAllocation hAllocation)
4686 {
4687  VMA_ASSERT(request.item != m_Suballocations.end());
4688  VmaSuballocation& suballoc = *request.item;
4689  // Given suballocation is a free block.
4690  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
4691  // Given offset is inside this suballocation.
4692  VMA_ASSERT(request.offset >= suballoc.offset);
4693  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
4694  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
4695  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
4696 
4697  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
4698  // it to become used.
4699  UnregisterFreeSuballocation(request.item);
4700 
4701  suballoc.offset = request.offset;
4702  suballoc.size = allocSize;
4703  suballoc.type = type;
4704  suballoc.hAllocation = hAllocation;
4705 
4706  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
4707  if(paddingEnd)
4708  {
4709  VmaSuballocation paddingSuballoc = {};
4710  paddingSuballoc.offset = request.offset + allocSize;
4711  paddingSuballoc.size = paddingEnd;
4712  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4713  VmaSuballocationList::iterator next = request.item;
4714  ++next;
4715  const VmaSuballocationList::iterator paddingEndItem =
4716  m_Suballocations.insert(next, paddingSuballoc);
4717  RegisterFreeSuballocation(paddingEndItem);
4718  }
4719 
4720  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
4721  if(paddingBegin)
4722  {
4723  VmaSuballocation paddingSuballoc = {};
4724  paddingSuballoc.offset = request.offset - paddingBegin;
4725  paddingSuballoc.size = paddingBegin;
4726  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4727  const VmaSuballocationList::iterator paddingBeginItem =
4728  m_Suballocations.insert(request.item, paddingSuballoc);
4729  RegisterFreeSuballocation(paddingBeginItem);
4730  }
4731 
4732  // Update totals.
4733  m_FreeCount = m_FreeCount - 1;
4734  if(paddingBegin > 0)
4735  {
4736  ++m_FreeCount;
4737  }
4738  if(paddingEnd > 0)
4739  {
4740  ++m_FreeCount;
4741  }
4742  m_SumFreeSize -= allocSize;
4743 }
4744 
4745 VmaSuballocationList::iterator VmaDeviceMemoryBlock::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
4746 {
4747  // Change this suballocation to be marked as free.
4748  VmaSuballocation& suballoc = *suballocItem;
4749  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4750  suballoc.hAllocation = VK_NULL_HANDLE;
4751 
4752  // Update totals.
4753  ++m_FreeCount;
4754  m_SumFreeSize += suballoc.size;
4755 
4756  // Merge with previous and/or next suballocation if it's also free.
4757  bool mergeWithNext = false;
4758  bool mergeWithPrev = false;
4759 
4760  VmaSuballocationList::iterator nextItem = suballocItem;
4761  ++nextItem;
4762  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
4763  {
4764  mergeWithNext = true;
4765  }
4766 
4767  VmaSuballocationList::iterator prevItem = suballocItem;
4768  if(suballocItem != m_Suballocations.begin())
4769  {
4770  --prevItem;
4771  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
4772  {
4773  mergeWithPrev = true;
4774  }
4775  }
4776 
4777  if(mergeWithNext)
4778  {
4779  UnregisterFreeSuballocation(nextItem);
4780  MergeFreeWithNext(suballocItem);
4781  }
4782 
4783  if(mergeWithPrev)
4784  {
4785  UnregisterFreeSuballocation(prevItem);
4786  MergeFreeWithNext(prevItem);
4787  RegisterFreeSuballocation(prevItem);
4788  return prevItem;
4789  }
4790  else
4791  {
4792  RegisterFreeSuballocation(suballocItem);
4793  return suballocItem;
4794  }
4795 }
4796 
4797 void VmaDeviceMemoryBlock::Free(const VmaAllocation allocation)
4798 {
4799  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
4800  suballocItem != m_Suballocations.end();
4801  ++suballocItem)
4802  {
4803  VmaSuballocation& suballoc = *suballocItem;
4804  if(suballoc.hAllocation == allocation)
4805  {
4806  FreeSuballocation(suballocItem);
4807  VMA_HEAVY_ASSERT(Validate());
4808  return;
4809  }
4810  }
4811  VMA_ASSERT(0 && "Not found!");
4812 }
4813 
4814 #if VMA_STATS_STRING_ENABLED
4815 
4816 void VmaDeviceMemoryBlock::PrintDetailedMap(class VmaJsonWriter& json) const
4817 {
4818  json.BeginObject();
4819 
4820  json.WriteString("TotalBytes");
4821  json.WriteNumber(m_Size);
4822 
4823  json.WriteString("UnusedBytes");
4824  json.WriteNumber(m_SumFreeSize);
4825 
4826  json.WriteString("Allocations");
4827  json.WriteNumber(m_Suballocations.size() - m_FreeCount);
4828 
4829  json.WriteString("UnusedRanges");
4830  json.WriteNumber(m_FreeCount);
4831 
4832  json.WriteString("Suballocations");
4833  json.BeginArray();
4834  size_t i = 0;
4835  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4836  suballocItem != m_Suballocations.cend();
4837  ++suballocItem, ++i)
4838  {
4839  json.BeginObject(true);
4840 
4841  json.WriteString("Type");
4842  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
4843 
4844  json.WriteString("Size");
4845  json.WriteNumber(suballocItem->size);
4846 
4847  json.WriteString("Offset");
4848  json.WriteNumber(suballocItem->offset);
4849 
4850  json.EndObject();
4851  }
4852  json.EndArray();
4853 
4854  json.EndObject();
4855 }
4856 
4857 #endif // #if VMA_STATS_STRING_ENABLED
4858 
4859 void VmaDeviceMemoryBlock::MergeFreeWithNext(VmaSuballocationList::iterator item)
4860 {
4861  VMA_ASSERT(item != m_Suballocations.end());
4862  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
4863 
4864  VmaSuballocationList::iterator nextItem = item;
4865  ++nextItem;
4866  VMA_ASSERT(nextItem != m_Suballocations.end());
4867  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
4868 
4869  item->size += nextItem->size;
4870  --m_FreeCount;
4871  m_Suballocations.erase(nextItem);
4872 }
4873 
4874 void VmaDeviceMemoryBlock::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
4875 {
4876  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
4877  VMA_ASSERT(item->size > 0);
4878 
4879  // You may want to enable this validation at the beginning or at the end of
4880  // this function, depending on what do you want to check.
4881  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
4882 
4883  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
4884  {
4885  if(m_FreeSuballocationsBySize.empty())
4886  {
4887  m_FreeSuballocationsBySize.push_back(item);
4888  }
4889  else
4890  {
4891  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
4892  }
4893  }
4894 
4895  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
4896 }
4897 
4898 
4899 void VmaDeviceMemoryBlock::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
4900 {
4901  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
4902  VMA_ASSERT(item->size > 0);
4903 
4904  // You may want to enable this validation at the beginning or at the end of
4905  // this function, depending on what do you want to check.
4906  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
4907 
4908  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
4909  {
4910  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
4911  m_FreeSuballocationsBySize.data(),
4912  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
4913  item,
4914  VmaSuballocationItemSizeLess());
4915  for(size_t index = it - m_FreeSuballocationsBySize.data();
4916  index < m_FreeSuballocationsBySize.size();
4917  ++index)
4918  {
4919  if(m_FreeSuballocationsBySize[index] == item)
4920  {
4921  VmaVectorRemove(m_FreeSuballocationsBySize, index);
4922  return;
4923  }
4924  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
4925  }
4926  VMA_ASSERT(0 && "Not found.");
4927  }
4928 
4929  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
4930 }
4931 
4932 bool VmaDeviceMemoryBlock::ValidateFreeSuballocationList() const
4933 {
4934  VkDeviceSize lastSize = 0;
4935  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
4936  {
4937  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
4938 
4939  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
4940  {
4941  VMA_ASSERT(0);
4942  return false;
4943  }
4944  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
4945  {
4946  VMA_ASSERT(0);
4947  return false;
4948  }
4949  if(it->size < lastSize)
4950  {
4951  VMA_ASSERT(0);
4952  return false;
4953  }
4954 
4955  lastSize = it->size;
4956  }
4957  return true;
4958 }
4959 
4960 static void InitStatInfo(VmaStatInfo& outInfo)
4961 {
4962  memset(&outInfo, 0, sizeof(outInfo));
4963  outInfo.AllocationSizeMin = UINT64_MAX;
4964  outInfo.UnusedRangeSizeMin = UINT64_MAX;
4965 }
4966 
4967 static void CalcAllocationStatInfo(VmaStatInfo& outInfo, const VmaDeviceMemoryBlock& alloc)
4968 {
4969  outInfo.BlockCount = 1;
4970 
4971  const uint32_t rangeCount = (uint32_t)alloc.m_Suballocations.size();
4972  outInfo.AllocationCount = rangeCount - alloc.m_FreeCount;
4973  outInfo.UnusedRangeCount = alloc.m_FreeCount;
4974 
4975  outInfo.UnusedBytes = alloc.m_SumFreeSize;
4976  outInfo.UsedBytes = alloc.m_Size - outInfo.UnusedBytes;
4977 
4978  outInfo.AllocationSizeMin = UINT64_MAX;
4979  outInfo.AllocationSizeMax = 0;
4980  outInfo.UnusedRangeSizeMin = UINT64_MAX;
4981  outInfo.UnusedRangeSizeMax = 0;
4982 
4983  for(VmaSuballocationList::const_iterator suballocItem = alloc.m_Suballocations.cbegin();
4984  suballocItem != alloc.m_Suballocations.cend();
4985  ++suballocItem)
4986  {
4987  const VmaSuballocation& suballoc = *suballocItem;
4988  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
4989  {
4990  outInfo.AllocationSizeMin = VMA_MIN(outInfo.AllocationSizeMin, suballoc.size);
4991  outInfo.AllocationSizeMax = VMA_MAX(outInfo.AllocationSizeMax, suballoc.size);
4992  }
4993  else
4994  {
4995  outInfo.UnusedRangeSizeMin = VMA_MIN(outInfo.UnusedRangeSizeMin, suballoc.size);
4996  outInfo.UnusedRangeSizeMax = VMA_MAX(outInfo.UnusedRangeSizeMax, suballoc.size);
4997  }
4998  }
4999 }
5000 
5001 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
5002 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
5003 {
5004  inoutInfo.BlockCount += srcInfo.BlockCount;
5005  inoutInfo.AllocationCount += srcInfo.AllocationCount;
5006  inoutInfo.UnusedRangeCount += srcInfo.UnusedRangeCount;
5007  inoutInfo.UsedBytes += srcInfo.UsedBytes;
5008  inoutInfo.UnusedBytes += srcInfo.UnusedBytes;
5009  inoutInfo.AllocationSizeMin = VMA_MIN(inoutInfo.AllocationSizeMin, srcInfo.AllocationSizeMin);
5010  inoutInfo.AllocationSizeMax = VMA_MAX(inoutInfo.AllocationSizeMax, srcInfo.AllocationSizeMax);
5011  inoutInfo.UnusedRangeSizeMin = VMA_MIN(inoutInfo.UnusedRangeSizeMin, srcInfo.UnusedRangeSizeMin);
5012  inoutInfo.UnusedRangeSizeMax = VMA_MAX(inoutInfo.UnusedRangeSizeMax, srcInfo.UnusedRangeSizeMax);
5013 }
5014 
5015 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
5016 {
5017  inoutInfo.AllocationSizeAvg = (inoutInfo.AllocationCount > 0) ?
5018  VmaRoundDiv<VkDeviceSize>(inoutInfo.UsedBytes, inoutInfo.AllocationCount) : 0;
5019  inoutInfo.UnusedRangeSizeAvg = (inoutInfo.UnusedRangeCount > 0) ?
5020  VmaRoundDiv<VkDeviceSize>(inoutInfo.UnusedBytes, inoutInfo.UnusedRangeCount) : 0;
5021 }
5022 
5023 VmaPool_T::VmaPool_T(
5024  VmaAllocator hAllocator,
5025  const VmaPoolCreateInfo& createInfo) :
5026  m_BlockVector(
5027  hAllocator,
5028  createInfo.memoryTypeIndex,
5029  (createInfo.flags & VMA_POOL_CREATE_PERSISTENT_MAP_BIT) != 0 ?
5030  VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED,
5031  createInfo.blockSize,
5032  createInfo.minBlockCount,
5033  createInfo.maxBlockCount,
5034  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
5035  createInfo.frameInUseCount,
5036  true) // isCustomPool
5037 {
5038 }
5039 
5040 VmaPool_T::~VmaPool_T()
5041 {
5042 }
5043 
5044 #if VMA_STATS_STRING_ENABLED
5045 
5046 /*
5047 void VmaPool_T::PrintDetailedMap(class VmaStringBuilder& sb)
5048 {
5049  sb.Add("\n{\n\"This\": \"");
5050  sb.AddPointer(this);
5051  sb.Add("\",\n\"Flags\": ");
5052  sb.AddNumber(m_Flags);
5053  sb.Add(",\n\"FrameInUseCount\": ");
5054  sb.AddNumber(m_FrameInUseCount);
5055  sb.Add(",\n\"DeviceMemoryBlock\": ");
5056  m_pDeviceMemoryBlock->PrintDetailedMap(sb);
5057  sb.Add("\n}");
5058 }
5059 */
5060 
5061 #endif // #if VMA_STATS_STRING_ENABLED
5062 
5063 VmaBlockVector::VmaBlockVector(
5064  VmaAllocator hAllocator,
5065  uint32_t memoryTypeIndex,
5066  VMA_BLOCK_VECTOR_TYPE blockVectorType,
5067  VkDeviceSize preferredBlockSize,
5068  size_t minBlockCount,
5069  size_t maxBlockCount,
5070  VkDeviceSize bufferImageGranularity,
5071  uint32_t frameInUseCount,
5072  bool isCustomPool) :
5073  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
5074  m_hAllocator(hAllocator),
5075  m_MemoryTypeIndex(memoryTypeIndex),
5076  m_BlockVectorType(blockVectorType),
5077  m_PreferredBlockSize(preferredBlockSize),
5078  m_MinBlockCount(minBlockCount),
5079  m_MaxBlockCount(maxBlockCount),
5080  m_BufferImageGranularity(bufferImageGranularity),
5081  m_FrameInUseCount(frameInUseCount),
5082  m_IsCustomPool(isCustomPool),
5083  m_HasEmptyBlock(false),
5084  m_pDefragmentator(VMA_NULL)
5085 {
5086 }
5087 
5088 VmaBlockVector::~VmaBlockVector()
5089 {
5090  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
5091 
5092  for(size_t i = m_Blocks.size(); i--; )
5093  {
5094  m_Blocks[i]->Destroy(m_hAllocator);
5095  vma_delete(m_hAllocator, m_Blocks[i]);
5096  }
5097 }
5098 
5099 VkResult VmaBlockVector::CreateMinBlocks()
5100 {
5101  for(size_t i = 0; i < m_MinBlockCount; ++i)
5102  {
5103  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
5104  if(res != VK_SUCCESS)
5105  {
5106  return res;
5107  }
5108  }
5109  return VK_SUCCESS;
5110 }
5111 
5112 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
5113 {
5114  pStats->size = 0;
5115  pStats->unusedSize = 0;
5116  pStats->allocationCount = 0;
5117  pStats->unusedRangeCount = 0;
5118 
5119  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5120 
5121  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5122  {
5123  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
5124  VMA_ASSERT(pBlock);
5125  VMA_HEAVY_ASSERT(pBlock->Validate());
5126 
5127  const uint32_t rangeCount = (uint32_t)pBlock->m_Suballocations.size();
5128 
5129  pStats->size += pBlock->m_Size;
5130  pStats->unusedSize += pBlock->m_SumFreeSize;
5131  pStats->allocationCount += rangeCount - pBlock->m_FreeCount;
5132  pStats->unusedRangeCount += pBlock->m_FreeCount;
5133  }
5134 }
5135 
5136 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
5137 
5138 VkResult VmaBlockVector::Allocate(
5139  VmaPool hCurrentPool,
5140  uint32_t currentFrameIndex,
5141  const VkMemoryRequirements& vkMemReq,
5142  const VmaAllocationCreateInfo& createInfo,
5143  VmaSuballocationType suballocType,
5144  VmaAllocation* pAllocation)
5145 {
5146  // Validate flags.
5147  if(((createInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0) !=
5148  (m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED))
5149  {
5150  VMA_ASSERT(0 && "Usage of VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT must match VMA_POOL_CREATE_PERSISTENT_MAP_BIT.");
5151  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5152  }
5153 
5154  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5155 
5156  // 1. Search existing allocations. Try to allocate without making other allocations lost.
5157  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
5158  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
5159  {
5160  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
5161  VMA_ASSERT(pCurrBlock);
5162  VmaAllocationRequest currRequest = {};
5163  if(pCurrBlock->CreateAllocationRequest(
5164  currentFrameIndex,
5165  m_FrameInUseCount,
5166  m_BufferImageGranularity,
5167  vkMemReq.size,
5168  vkMemReq.alignment,
5169  suballocType,
5170  false, // canMakeOtherLost
5171  &currRequest))
5172  {
5173  // Allocate from pCurrBlock.
5174  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
5175 
5176  // We no longer have an empty Allocation.
5177  if(pCurrBlock->IsEmpty())
5178  {
5179  m_HasEmptyBlock = false;
5180  }
5181 
5182  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
5183  pCurrBlock->Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
5184  (*pAllocation)->InitBlockAllocation(
5185  hCurrentPool,
5186  pCurrBlock,
5187  currRequest.offset,
5188  vkMemReq.alignment,
5189  vkMemReq.size,
5190  suballocType,
5191  createInfo.pUserData,
5192  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5193  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
5194  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
5195  return VK_SUCCESS;
5196  }
5197  }
5198 
5199  const bool canCreateNewBlock =
5200  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
5201  (m_Blocks.size() < m_MaxBlockCount);
5202 
5203  // 2. Try to create new block.
5204  if(canCreateNewBlock)
5205  {
5206  // 2.1. Start with full preferredBlockSize.
5207  VkDeviceSize blockSize = m_PreferredBlockSize;
5208  size_t newBlockIndex = 0;
5209  VkResult res = CreateBlock(blockSize, &newBlockIndex);
5210  // Allocating blocks of other sizes is allowed only in default pools.
5211  // In custom pools block size is fixed.
5212  if(res < 0 && m_IsCustomPool == false)
5213  {
5214  // 2.2. Try half the size.
5215  blockSize /= 2;
5216  if(blockSize >= vkMemReq.size)
5217  {
5218  res = CreateBlock(blockSize, &newBlockIndex);
5219  if(res < 0)
5220  {
5221  // 2.3. Try quarter the size.
5222  blockSize /= 2;
5223  if(blockSize >= vkMemReq.size)
5224  {
5225  res = CreateBlock(blockSize, &newBlockIndex);
5226  }
5227  }
5228  }
5229  }
5230  if(res == VK_SUCCESS)
5231  {
5232  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
5233  VMA_ASSERT(pBlock->m_Size >= vkMemReq.size);
5234 
5235  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
5236  VmaAllocationRequest allocRequest = {};
5237  allocRequest.item = pBlock->m_Suballocations.begin();
5238  allocRequest.offset = 0;
5239  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
5240  pBlock->Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
5241  (*pAllocation)->InitBlockAllocation(
5242  hCurrentPool,
5243  pBlock,
5244  allocRequest.offset,
5245  vkMemReq.alignment,
5246  vkMemReq.size,
5247  suballocType,
5248  createInfo.pUserData,
5249  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5250  VMA_HEAVY_ASSERT(pBlock->Validate());
5251  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
5252 
5253  return VK_SUCCESS;
5254  }
5255  }
5256 
5257  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
5258 
5259  // 3. Try to allocate from existing blocks with making other allocations lost.
5260  if(canMakeOtherLost)
5261  {
5262  uint32_t tryIndex = 0;
5263  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
5264  {
5265  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
5266  VmaAllocationRequest bestRequest = {};
5267  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
5268 
5269  // 1. Search existing allocations.
5270  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
5271  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
5272  {
5273  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
5274  VMA_ASSERT(pCurrBlock);
5275  VmaAllocationRequest currRequest = {};
5276  if(pCurrBlock->CreateAllocationRequest(
5277  currentFrameIndex,
5278  m_FrameInUseCount,
5279  m_BufferImageGranularity,
5280  vkMemReq.size,
5281  vkMemReq.alignment,
5282  suballocType,
5283  canMakeOtherLost,
5284  &currRequest))
5285  {
5286  const VkDeviceSize currRequestCost = currRequest.CalcCost();
5287  if(pBestRequestBlock == VMA_NULL ||
5288  currRequestCost < bestRequestCost)
5289  {
5290  pBestRequestBlock = pCurrBlock;
5291  bestRequest = currRequest;
5292  bestRequestCost = currRequestCost;
5293 
5294  if(bestRequestCost == 0)
5295  {
5296  break;
5297  }
5298  }
5299  }
5300  }
5301 
5302  if(pBestRequestBlock != VMA_NULL)
5303  {
5304  if(pBestRequestBlock->MakeRequestedAllocationsLost(
5305  currentFrameIndex,
5306  m_FrameInUseCount,
5307  &bestRequest))
5308  {
5309  // We no longer have an empty Allocation.
5310  if(pBestRequestBlock->IsEmpty())
5311  {
5312  m_HasEmptyBlock = false;
5313  }
5314  // Allocate from this pBlock.
5315  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
5316  pBestRequestBlock->Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
5317  (*pAllocation)->InitBlockAllocation(
5318  hCurrentPool,
5319  pBestRequestBlock,
5320  bestRequest.offset,
5321  vkMemReq.alignment,
5322  vkMemReq.size,
5323  suballocType,
5324  createInfo.pUserData,
5325  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5326  VMA_HEAVY_ASSERT(pBlock->Validate());
5327  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
5328  return VK_SUCCESS;
5329  }
5330  // else: Some allocations must have been touched while we are here. Next try.
5331  }
5332  else
5333  {
5334  // Could not find place in any of the blocks - break outer loop.
5335  break;
5336  }
5337  }
5338  /* Maximum number of tries exceeded - a very unlike event when many other
5339  threads are simultaneously touching allocations making it impossible to make
5340  lost at the same time as we try to allocate. */
5341  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
5342  {
5343  return VK_ERROR_TOO_MANY_OBJECTS;
5344  }
5345  }
5346 
5347  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5348 }
5349 
5350 void VmaBlockVector::Free(
5351  VmaAllocation hAllocation)
5352 {
5353  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
5354 
5355  // Scope for lock.
5356  {
5357  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5358 
5359  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
5360 
5361  pBlock->Free(hAllocation);
5362  VMA_HEAVY_ASSERT(pBlock->Validate());
5363 
5364  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
5365 
5366  // pBlock became empty after this deallocation.
5367  if(pBlock->IsEmpty())
5368  {
5369  // Already has empty Allocation. We don't want to have two, so delete this one.
5370  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
5371  {
5372  pBlockToDelete = pBlock;
5373  Remove(pBlock);
5374  }
5375  // We now have first empty Allocation.
5376  else
5377  {
5378  m_HasEmptyBlock = true;
5379  }
5380  }
5381  // Must be called after srcBlockIndex is used, because later it may become invalid!
5382  IncrementallySortBlocks();
5383  }
5384 
5385  // Destruction of a free Allocation. Deferred until this point, outside of mutex
5386  // lock, for performance reason.
5387  if(pBlockToDelete != VMA_NULL)
5388  {
5389  VMA_DEBUG_LOG(" Deleted empty allocation");
5390  pBlockToDelete->Destroy(m_hAllocator);
5391  vma_delete(m_hAllocator, pBlockToDelete);
5392  }
5393 }
5394 
5395 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
5396 {
5397  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5398  {
5399  if(m_Blocks[blockIndex] == pBlock)
5400  {
5401  VmaVectorRemove(m_Blocks, blockIndex);
5402  return;
5403  }
5404  }
5405  VMA_ASSERT(0);
5406 }
5407 
5408 void VmaBlockVector::IncrementallySortBlocks()
5409 {
5410  // Bubble sort only until first swap.
5411  for(size_t i = 1; i < m_Blocks.size(); ++i)
5412  {
5413  if(m_Blocks[i - 1]->m_SumFreeSize > m_Blocks[i]->m_SumFreeSize)
5414  {
5415  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
5416  return;
5417  }
5418  }
5419 }
5420 
5421 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
5422 {
5423  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
5424  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
5425  allocInfo.allocationSize = blockSize;
5426  const VkDevice hDevice = m_hAllocator->m_hDevice;
5427  VkDeviceMemory mem = VK_NULL_HANDLE;
5428  VkResult res = vkAllocateMemory(hDevice, &allocInfo, m_hAllocator->GetAllocationCallbacks(), &mem);
5429  if(res < 0)
5430  {
5431  return res;
5432  }
5433 
5434  // New VkDeviceMemory successfully created.
5435 
5436  // Map memory if needed.
5437  void* pMappedData = VMA_NULL;
5438  const bool persistentMap = (m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED);
5439  if(persistentMap && m_hAllocator->m_UnmapPersistentlyMappedMemoryCounter == 0)
5440  {
5441  res = vkMapMemory(hDevice, mem, 0, VK_WHOLE_SIZE, 0, &pMappedData);
5442  if(res < 0)
5443  {
5444  VMA_DEBUG_LOG(" vkMapMemory FAILED");
5445  vkFreeMemory(hDevice, mem, m_hAllocator->GetAllocationCallbacks());
5446  return res;
5447  }
5448  }
5449 
5450  // Callback.
5451  if(m_hAllocator->m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
5452  {
5453  (*m_hAllocator->m_DeviceMemoryCallbacks.pfnAllocate)(m_hAllocator, m_MemoryTypeIndex, mem, allocInfo.allocationSize);
5454  }
5455 
5456  // Create new Allocation for it.
5457  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
5458  pBlock->Init(
5459  m_MemoryTypeIndex,
5460  (VMA_BLOCK_VECTOR_TYPE)m_BlockVectorType,
5461  mem,
5462  allocInfo.allocationSize,
5463  persistentMap,
5464  pMappedData);
5465 
5466  m_Blocks.push_back(pBlock);
5467  if(pNewBlockIndex != VMA_NULL)
5468  {
5469  *pNewBlockIndex = m_Blocks.size() - 1;
5470  }
5471 
5472  return VK_SUCCESS;
5473 }
5474 
5475 #if VMA_STATS_STRING_ENABLED
5476 
5477 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
5478 {
5479  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5480  json.BeginArray();
5481  for(size_t i = 0; i < m_Blocks.size(); ++i)
5482  {
5483  m_Blocks[i]->PrintDetailedMap(json);
5484  }
5485  json.EndArray();
5486 }
5487 
5488 #endif // #if VMA_STATS_STRING_ENABLED
5489 
5490 void VmaBlockVector::UnmapPersistentlyMappedMemory()
5491 {
5492  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5493 
5494  for(size_t i = m_Blocks.size(); i--; )
5495  {
5496  VmaDeviceMemoryBlock* pBlock = m_Blocks[i];
5497  if(pBlock->m_pMappedData != VMA_NULL)
5498  {
5499  VMA_ASSERT(pBlock->m_PersistentMap != false);
5500  vkUnmapMemory(m_hAllocator->m_hDevice, pBlock->m_hMemory);
5501  pBlock->m_pMappedData = VMA_NULL;
5502  }
5503  }
5504 }
5505 
5506 VkResult VmaBlockVector::MapPersistentlyMappedMemory()
5507 {
5508  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5509 
5510  VkResult finalResult = VK_SUCCESS;
5511  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
5512  {
5513  VmaDeviceMemoryBlock* pBlock = m_Blocks[i];
5514  if(pBlock->m_PersistentMap)
5515  {
5516  VMA_ASSERT(pBlock->m_pMappedData == nullptr);
5517  VkResult localResult = vkMapMemory(m_hAllocator->m_hDevice, pBlock->m_hMemory, 0, VK_WHOLE_SIZE, 0, &pBlock->m_pMappedData);
5518  if(localResult != VK_SUCCESS)
5519  {
5520  finalResult = localResult;
5521  }
5522  }
5523  }
5524  return finalResult;
5525 }
5526 
5527 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
5528  VkDevice hDevice,
5529  const VkAllocationCallbacks* pAllocationCallbacks,
5530  uint32_t currentFrameIndex)
5531 {
5532  if(m_pDefragmentator == VMA_NULL)
5533  {
5534  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
5535  hDevice,
5536  pAllocationCallbacks,
5537  this,
5538  currentFrameIndex);
5539  }
5540 
5541  return m_pDefragmentator;
5542 }
5543 
5544 VkResult VmaBlockVector::Defragment(
5545  VmaDefragmentationStats* pDefragmentationStats,
5546  VkDeviceSize& maxBytesToMove,
5547  uint32_t& maxAllocationsToMove)
5548 {
5549  if(m_pDefragmentator == VMA_NULL)
5550  {
5551  return VK_SUCCESS;
5552  }
5553 
5554  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5555 
5556  // Defragment.
5557  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
5558 
5559  // Accumulate statistics.
5560  if(pDefragmentationStats != VMA_NULL)
5561  {
5562  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
5563  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
5564  pDefragmentationStats->bytesMoved += bytesMoved;
5565  pDefragmentationStats->allocationsMoved += allocationsMoved;
5566  VMA_ASSERT(bytesMoved <= maxBytesToMove);
5567  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
5568  maxBytesToMove -= bytesMoved;
5569  maxAllocationsToMove -= allocationsMoved;
5570  }
5571 
5572  // Free empty blocks.
5573  m_HasEmptyBlock = false;
5574  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
5575  {
5576  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
5577  if(pBlock->IsEmpty())
5578  {
5579  if(m_Blocks.size() > m_MinBlockCount)
5580  {
5581  if(pDefragmentationStats != VMA_NULL)
5582  {
5583  ++pDefragmentationStats->deviceMemoryBlocksFreed;
5584  pDefragmentationStats->bytesFreed += pBlock->m_Size;
5585  }
5586 
5587  VmaVectorRemove(m_Blocks, blockIndex);
5588  pBlock->Destroy(m_hAllocator);
5589  vma_delete(m_hAllocator, pBlock);
5590  }
5591  else
5592  {
5593  m_HasEmptyBlock = true;
5594  }
5595  }
5596  }
5597 
5598  return result;
5599 }
5600 
5601 void VmaBlockVector::DestroyDefragmentator()
5602 {
5603  if(m_pDefragmentator != VMA_NULL)
5604  {
5605  vma_delete(m_hAllocator, m_pDefragmentator);
5606  m_pDefragmentator = VMA_NULL;
5607  }
5608 }
5609 
5610 void VmaBlockVector::MakePoolAllocationsLost(
5611  uint32_t currentFrameIndex,
5612  size_t* pLostAllocationCount)
5613 {
5614  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5615 
5616  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5617  {
5618  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
5619  VMA_ASSERT(pBlock);
5620  pBlock->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
5621  }
5622 }
5623 
5624 void VmaBlockVector::AddStats(VmaStats* pStats, uint32_t memTypeIndex, uint32_t memHeapIndex)
5625 {
5626  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5627 
5628  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5629  {
5630  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
5631  VMA_ASSERT(pBlock);
5632  VMA_HEAVY_ASSERT(pBlock->Validate());
5633  VmaStatInfo allocationStatInfo;
5634  CalcAllocationStatInfo(allocationStatInfo, *pBlock);
5635  VmaAddStatInfo(pStats->total, allocationStatInfo);
5636  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
5637  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
5638  }
5639 }
5640 
5642 // VmaDefragmentator members definition
5643 
5644 VmaDefragmentator::VmaDefragmentator(
5645  VkDevice hDevice,
5646  const VkAllocationCallbacks* pAllocationCallbacks,
5647  VmaBlockVector* pBlockVector,
5648  uint32_t currentFrameIndex) :
5649  m_hDevice(hDevice),
5650  m_pAllocationCallbacks(pAllocationCallbacks),
5651  m_pBlockVector(pBlockVector),
5652  m_CurrentFrameIndex(currentFrameIndex),
5653  m_BytesMoved(0),
5654  m_AllocationsMoved(0),
5655  m_Allocations(VmaStlAllocator<AllocationInfo>(pAllocationCallbacks)),
5656  m_Blocks(VmaStlAllocator<BlockInfo*>(pAllocationCallbacks))
5657 {
5658 }
5659 
5660 VmaDefragmentator::~VmaDefragmentator()
5661 {
5662  for(size_t i = m_Blocks.size(); i--; )
5663  {
5664  vma_delete(m_pAllocationCallbacks, m_Blocks[i]);
5665  }
5666 }
5667 
5668 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
5669 {
5670  AllocationInfo allocInfo;
5671  allocInfo.m_hAllocation = hAlloc;
5672  allocInfo.m_pChanged = pChanged;
5673  m_Allocations.push_back(allocInfo);
5674 }
5675 
5676 VkResult VmaDefragmentator::DefragmentRound(
5677  VkDeviceSize maxBytesToMove,
5678  uint32_t maxAllocationsToMove)
5679 {
5680  if(m_Blocks.empty())
5681  {
5682  return VK_SUCCESS;
5683  }
5684 
5685  size_t srcBlockIndex = m_Blocks.size() - 1;
5686  size_t srcAllocIndex = SIZE_MAX;
5687  for(;;)
5688  {
5689  // 1. Find next allocation to move.
5690  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
5691  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
5692  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
5693  {
5694  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
5695  {
5696  // Finished: no more allocations to process.
5697  if(srcBlockIndex == 0)
5698  {
5699  return VK_SUCCESS;
5700  }
5701  else
5702  {
5703  --srcBlockIndex;
5704  srcAllocIndex = SIZE_MAX;
5705  }
5706  }
5707  else
5708  {
5709  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
5710  }
5711  }
5712 
5713  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
5714  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
5715 
5716  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
5717  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
5718  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
5719  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
5720 
5721  // 2. Try to find new place for this allocation in preceding or current block.
5722  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
5723  {
5724  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
5725  VmaAllocationRequest dstAllocRequest;
5726  if(pDstBlockInfo->m_pBlock->CreateAllocationRequest(
5727  m_CurrentFrameIndex,
5728  m_pBlockVector->GetFrameInUseCount(),
5729  m_pBlockVector->GetBufferImageGranularity(),
5730  size,
5731  alignment,
5732  suballocType,
5733  false, // canMakeOtherLost
5734  &dstAllocRequest) &&
5735  MoveMakesSense(
5736  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
5737  {
5738  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
5739 
5740  // Reached limit on number of allocations or bytes to move.
5741  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
5742  (m_BytesMoved + size > maxBytesToMove))
5743  {
5744  return VK_INCOMPLETE;
5745  }
5746 
5747  void* pDstMappedData = VMA_NULL;
5748  VkResult res = pDstBlockInfo->EnsureMapping(m_hDevice, &pDstMappedData);
5749  if(res != VK_SUCCESS)
5750  {
5751  return res;
5752  }
5753 
5754  void* pSrcMappedData = VMA_NULL;
5755  res = pSrcBlockInfo->EnsureMapping(m_hDevice, &pSrcMappedData);
5756  if(res != VK_SUCCESS)
5757  {
5758  return res;
5759  }
5760 
5761  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
5762  memcpy(
5763  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
5764  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
5765  size);
5766 
5767  pDstBlockInfo->m_pBlock->Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
5768  pSrcBlockInfo->m_pBlock->Free(allocInfo.m_hAllocation);
5769 
5770  allocInfo.m_hAllocation->ChangeBlockAllocation(pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
5771 
5772  if(allocInfo.m_pChanged != VMA_NULL)
5773  {
5774  *allocInfo.m_pChanged = VK_TRUE;
5775  }
5776 
5777  ++m_AllocationsMoved;
5778  m_BytesMoved += size;
5779 
5780  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
5781 
5782  break;
5783  }
5784  }
5785 
5786  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
5787 
5788  if(srcAllocIndex > 0)
5789  {
5790  --srcAllocIndex;
5791  }
5792  else
5793  {
5794  if(srcBlockIndex > 0)
5795  {
5796  --srcBlockIndex;
5797  srcAllocIndex = SIZE_MAX;
5798  }
5799  else
5800  {
5801  return VK_SUCCESS;
5802  }
5803  }
5804  }
5805 }
5806 
5807 VkResult VmaDefragmentator::Defragment(
5808  VkDeviceSize maxBytesToMove,
5809  uint32_t maxAllocationsToMove)
5810 {
5811  if(m_Allocations.empty())
5812  {
5813  return VK_SUCCESS;
5814  }
5815 
5816  // Create block info for each block.
5817  const size_t blockCount = m_pBlockVector->m_Blocks.size();
5818  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
5819  {
5820  BlockInfo* pBlockInfo = vma_new(m_pAllocationCallbacks, BlockInfo)(m_pAllocationCallbacks);
5821  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
5822  m_Blocks.push_back(pBlockInfo);
5823  }
5824 
5825  // Sort them by m_pBlock pointer value.
5826  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
5827 
5828  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
5829  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
5830  {
5831  AllocationInfo& allocInfo = m_Allocations[blockIndex];
5832  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
5833  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
5834  {
5835  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
5836  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
5837  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
5838  {
5839  (*it)->m_Allocations.push_back(allocInfo);
5840  }
5841  else
5842  {
5843  VMA_ASSERT(0);
5844  }
5845  }
5846  }
5847  m_Allocations.clear();
5848 
5849  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
5850  {
5851  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
5852  pBlockInfo->CalcHasNonMovableAllocations();
5853  pBlockInfo->SortAllocationsBySizeDescecnding();
5854  }
5855 
5856  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
5857  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
5858 
5859  // Execute defragmentation rounds (the main part).
5860  VkResult result = VK_SUCCESS;
5861  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
5862  {
5863  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
5864  }
5865 
5866  // Unmap blocks that were mapped for defragmentation.
5867  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
5868  {
5869  m_Blocks[blockIndex]->Unmap(m_hDevice);
5870  }
5871 
5872  return result;
5873 }
5874 
5875 bool VmaDefragmentator::MoveMakesSense(
5876  size_t dstBlockIndex, VkDeviceSize dstOffset,
5877  size_t srcBlockIndex, VkDeviceSize srcOffset)
5878 {
5879  if(dstBlockIndex < srcBlockIndex)
5880  {
5881  return true;
5882  }
5883  if(dstBlockIndex > srcBlockIndex)
5884  {
5885  return false;
5886  }
5887  if(dstOffset < srcOffset)
5888  {
5889  return true;
5890  }
5891  return false;
5892 }
5893 
5895 // VmaAllocator_T
5896 
5897 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
5898  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
5899  m_PhysicalDevice(pCreateInfo->physicalDevice),
5900  m_hDevice(pCreateInfo->device),
5901  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
5902  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
5903  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
5904  m_UnmapPersistentlyMappedMemoryCounter(0),
5905  m_PreferredLargeHeapBlockSize(0),
5906  m_PreferredSmallHeapBlockSize(0),
5907  m_CurrentFrameIndex(0),
5908  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
5909 {
5910  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
5911 
5912  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
5913  memset(&m_MemProps, 0, sizeof(m_MemProps));
5914  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
5915 
5916  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
5917  memset(&m_pOwnAllocations, 0, sizeof(m_pOwnAllocations));
5918 
5919  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
5920  {
5921  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
5922  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
5923  }
5924 
5925  vkGetPhysicalDeviceProperties(m_PhysicalDevice, &m_PhysicalDeviceProperties);
5926  vkGetPhysicalDeviceMemoryProperties(m_PhysicalDevice, &m_MemProps);
5927 
5928  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
5929  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
5930  m_PreferredSmallHeapBlockSize = (pCreateInfo->preferredSmallHeapBlockSize != 0) ?
5931  pCreateInfo->preferredSmallHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE);
5932 
5933  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
5934  {
5935  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
5936 
5937  for(size_t blockVectorTypeIndex = 0; blockVectorTypeIndex < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorTypeIndex)
5938  {
5939  m_pBlockVectors[memTypeIndex][blockVectorTypeIndex] = vma_new(this, VmaBlockVector)(
5940  this,
5941  memTypeIndex,
5942  static_cast<VMA_BLOCK_VECTOR_TYPE>(blockVectorTypeIndex),
5943  preferredBlockSize,
5944  0,
5945  SIZE_MAX,
5946  GetBufferImageGranularity(),
5947  pCreateInfo->frameInUseCount,
5948  false); // isCustomPool
5949  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
5950  // becase minBlockCount is 0.
5951  m_pOwnAllocations[memTypeIndex][blockVectorTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
5952  }
5953  }
5954 }
5955 
5956 VmaAllocator_T::~VmaAllocator_T()
5957 {
5958  VMA_ASSERT(m_Pools.empty());
5959 
5960  for(size_t i = GetMemoryTypeCount(); i--; )
5961  {
5962  for(size_t j = VMA_BLOCK_VECTOR_TYPE_COUNT; j--; )
5963  {
5964  vma_delete(this, m_pOwnAllocations[i][j]);
5965  vma_delete(this, m_pBlockVectors[i][j]);
5966  }
5967  }
5968 }
5969 
5970 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
5971 {
5972  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[m_MemProps.memoryTypes[memTypeIndex].heapIndex].size;
5973  return (heapSize <= VMA_SMALL_HEAP_MAX_SIZE) ?
5974  m_PreferredSmallHeapBlockSize : m_PreferredLargeHeapBlockSize;
5975 }
5976 
5977 VkResult VmaAllocator_T::AllocateMemoryOfType(
5978  const VkMemoryRequirements& vkMemReq,
5979  const VmaAllocationCreateInfo& createInfo,
5980  uint32_t memTypeIndex,
5981  VmaSuballocationType suballocType,
5982  VmaAllocation* pAllocation)
5983 {
5984  VMA_ASSERT(pAllocation != VMA_NULL);
5985  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
5986 
5987  uint32_t blockVectorType = VmaAllocationCreateFlagsToBlockVectorType(createInfo.flags);
5988  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
5989  VMA_ASSERT(blockVector);
5990 
5991  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
5992  // Heuristics: Allocate own memory if requested size if greater than half of preferred block size.
5993  const bool ownMemory =
5994  (createInfo.flags & VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT) != 0 ||
5995  VMA_DEBUG_ALWAYS_OWN_MEMORY ||
5996  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
5997  vkMemReq.size > preferredBlockSize / 2);
5998 
5999  if(ownMemory)
6000  {
6001  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6002  {
6003  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6004  }
6005  else
6006  {
6007  return AllocateOwnMemory(
6008  vkMemReq.size,
6009  suballocType,
6010  memTypeIndex,
6011  (createInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0,
6012  createInfo.pUserData,
6013  pAllocation);
6014  }
6015  }
6016  else
6017  {
6018  VkResult res = blockVector->Allocate(
6019  VK_NULL_HANDLE, // hCurrentPool
6020  m_CurrentFrameIndex.load(),
6021  vkMemReq,
6022  createInfo,
6023  suballocType,
6024  pAllocation);
6025  if(res == VK_SUCCESS)
6026  {
6027  return res;
6028  }
6029 
6030  // 5. Try own memory.
6031  res = AllocateOwnMemory(
6032  vkMemReq.size,
6033  suballocType,
6034  memTypeIndex,
6035  (createInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0,
6036  createInfo.pUserData,
6037  pAllocation);
6038  if(res == VK_SUCCESS)
6039  {
6040  // Succeeded: AllocateOwnMemory function already filld pMemory, nothing more to do here.
6041  VMA_DEBUG_LOG(" Allocated as OwnMemory");
6042  return VK_SUCCESS;
6043  }
6044  else
6045  {
6046  // Everything failed: Return error code.
6047  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
6048  return res;
6049  }
6050  }
6051 }
6052 
6053 VkResult VmaAllocator_T::AllocateOwnMemory(
6054  VkDeviceSize size,
6055  VmaSuballocationType suballocType,
6056  uint32_t memTypeIndex,
6057  bool map,
6058  void* pUserData,
6059  VmaAllocation* pAllocation)
6060 {
6061  VMA_ASSERT(pAllocation);
6062 
6063  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
6064  allocInfo.memoryTypeIndex = memTypeIndex;
6065  allocInfo.allocationSize = size;
6066 
6067  // Allocate VkDeviceMemory.
6068  VkDeviceMemory hMemory = VK_NULL_HANDLE;
6069  VkResult res = vkAllocateMemory(m_hDevice, &allocInfo, GetAllocationCallbacks(), &hMemory);
6070  if(res < 0)
6071  {
6072  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
6073  return res;
6074  }
6075 
6076  void* pMappedData = nullptr;
6077  if(map)
6078  {
6079  if(m_UnmapPersistentlyMappedMemoryCounter == 0)
6080  {
6081  res = vkMapMemory(m_hDevice, hMemory, 0, VK_WHOLE_SIZE, 0, &pMappedData);
6082  if(res < 0)
6083  {
6084  VMA_DEBUG_LOG(" vkMapMemory FAILED");
6085  vkFreeMemory(m_hDevice, hMemory, GetAllocationCallbacks());
6086  return res;
6087  }
6088  }
6089  }
6090 
6091  // Callback.
6092  if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
6093  {
6094  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, memTypeIndex, hMemory, size);
6095  }
6096 
6097  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load());
6098  (*pAllocation)->InitOwnAllocation(memTypeIndex, hMemory, suballocType, map, pMappedData, size, pUserData);
6099 
6100  // Register it in m_pOwnAllocations.
6101  {
6102  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6103  AllocationVectorType* pOwnAllocations = m_pOwnAllocations[memTypeIndex][map ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED];
6104  VMA_ASSERT(pOwnAllocations);
6105  VmaVectorInsertSorted<VmaPointerLess>(*pOwnAllocations, *pAllocation);
6106  }
6107 
6108  VMA_DEBUG_LOG(" Allocated OwnMemory MemoryTypeIndex=#%u", memTypeIndex);
6109 
6110  return VK_SUCCESS;
6111 }
6112 
6113 VkResult VmaAllocator_T::AllocateMemory(
6114  const VkMemoryRequirements& vkMemReq,
6115  const VmaAllocationCreateInfo& createInfo,
6116  VmaSuballocationType suballocType,
6117  VmaAllocation* pAllocation)
6118 {
6119  if((createInfo.flags & VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT) != 0 &&
6120  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6121  {
6122  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
6123  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6124  }
6125  if((createInfo.pool != VK_NULL_HANDLE) &&
6126  ((createInfo.flags & (VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT)) != 0))
6127  {
6128  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT when pool != null is invalid.");
6129  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6130  }
6131 
6132  if(createInfo.pool != VK_NULL_HANDLE)
6133  {
6134  return createInfo.pool->m_BlockVector.Allocate(
6135  createInfo.pool,
6136  m_CurrentFrameIndex.load(),
6137  vkMemReq,
6138  createInfo,
6139  suballocType,
6140  pAllocation);
6141  }
6142  else
6143  {
6144  // Bit mask of memory Vulkan types acceptable for this allocation.
6145  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
6146  uint32_t memTypeIndex = UINT32_MAX;
6147  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
6148  if(res == VK_SUCCESS)
6149  {
6150  res = AllocateMemoryOfType(vkMemReq, createInfo, memTypeIndex, suballocType, pAllocation);
6151  // Succeeded on first try.
6152  if(res == VK_SUCCESS)
6153  {
6154  return res;
6155  }
6156  // Allocation from this memory type failed. Try other compatible memory types.
6157  else
6158  {
6159  for(;;)
6160  {
6161  // Remove old memTypeIndex from list of possibilities.
6162  memoryTypeBits &= ~(1u << memTypeIndex);
6163  // Find alternative memTypeIndex.
6164  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
6165  if(res == VK_SUCCESS)
6166  {
6167  res = AllocateMemoryOfType(vkMemReq, createInfo, memTypeIndex, suballocType, pAllocation);
6168  // Allocation from this alternative memory type succeeded.
6169  if(res == VK_SUCCESS)
6170  {
6171  return res;
6172  }
6173  // else: Allocation from this memory type failed. Try next one - next loop iteration.
6174  }
6175  // No other matching memory type index could be found.
6176  else
6177  {
6178  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
6179  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6180  }
6181  }
6182  }
6183  }
6184  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
6185  else
6186  return res;
6187  }
6188 }
6189 
6190 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
6191 {
6192  VMA_ASSERT(allocation);
6193 
6194  if(allocation->CanBecomeLost() == false ||
6195  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
6196  {
6197  switch(allocation->GetType())
6198  {
6199  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
6200  {
6201  VmaBlockVector* pBlockVector = VMA_NULL;
6202  VmaPool hPool = allocation->GetPool();
6203  if(hPool != VK_NULL_HANDLE)
6204  {
6205  pBlockVector = &hPool->m_BlockVector;
6206  }
6207  else
6208  {
6209  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
6210  const VMA_BLOCK_VECTOR_TYPE blockVectorType = allocation->GetBlockVectorType();
6211  pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
6212  }
6213  pBlockVector->Free(allocation);
6214  }
6215  break;
6216  case VmaAllocation_T::ALLOCATION_TYPE_OWN:
6217  FreeOwnMemory(allocation);
6218  break;
6219  default:
6220  VMA_ASSERT(0);
6221  }
6222  }
6223 
6224  vma_delete(this, allocation);
6225 }
6226 
6227 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
6228 {
6229  InitStatInfo(pStats->total);
6230  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
6231  InitStatInfo(pStats->memoryType[i]);
6232  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
6233  InitStatInfo(pStats->memoryHeap[i]);
6234 
6235  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6236  {
6237  const uint32_t heapIndex = m_MemProps.memoryTypes[memTypeIndex].heapIndex;
6238  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
6239  {
6240  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
6241  VMA_ASSERT(pBlockVector);
6242  pBlockVector->AddStats(pStats, memTypeIndex, heapIndex);
6243  }
6244  }
6245 
6246  VmaPostprocessCalcStatInfo(pStats->total);
6247  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
6248  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
6249  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
6250  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
6251 }
6252 
6253 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
6254 
6255 void VmaAllocator_T::UnmapPersistentlyMappedMemory()
6256 {
6257  if(m_UnmapPersistentlyMappedMemoryCounter++ == 0)
6258  {
6259  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
6260  {
6261  for(size_t memTypeIndex = m_MemProps.memoryTypeCount; memTypeIndex--; )
6262  {
6263  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
6264  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
6265  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
6266  {
6267  // Process OwnAllocations.
6268  {
6269  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6270  AllocationVectorType* pOwnAllocationsVector = m_pOwnAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6271  for(size_t ownAllocIndex = pOwnAllocationsVector->size(); ownAllocIndex--; )
6272  {
6273  VmaAllocation hAlloc = (*pOwnAllocationsVector)[ownAllocIndex];
6274  hAlloc->OwnAllocUnmapPersistentlyMappedMemory(m_hDevice);
6275  }
6276  }
6277 
6278  // Process normal Allocations.
6279  {
6280  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6281  pBlockVector->UnmapPersistentlyMappedMemory();
6282  }
6283  }
6284  }
6285 
6286  // Process custom pools.
6287  {
6288  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6289  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
6290  {
6291  m_Pools[poolIndex]->GetBlockVector().UnmapPersistentlyMappedMemory();
6292  }
6293  }
6294  }
6295  }
6296 }
6297 
6298 VkResult VmaAllocator_T::MapPersistentlyMappedMemory()
6299 {
6300  VMA_ASSERT(m_UnmapPersistentlyMappedMemoryCounter > 0);
6301  if(--m_UnmapPersistentlyMappedMemoryCounter == 0)
6302  {
6303  VkResult finalResult = VK_SUCCESS;
6304  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
6305  {
6306  // Process custom pools.
6307  {
6308  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6309  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
6310  {
6311  m_Pools[poolIndex]->GetBlockVector().MapPersistentlyMappedMemory();
6312  }
6313  }
6314 
6315  for(size_t memTypeIndex = 0; memTypeIndex < m_MemProps.memoryTypeCount; ++memTypeIndex)
6316  {
6317  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
6318  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
6319  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
6320  {
6321  // Process OwnAllocations.
6322  {
6323  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6324  AllocationVectorType* pAllocationsVector = m_pOwnAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6325  for(size_t ownAllocIndex = 0, ownAllocCount = pAllocationsVector->size(); ownAllocIndex < ownAllocCount; ++ownAllocIndex)
6326  {
6327  VmaAllocation hAlloc = (*pAllocationsVector)[ownAllocIndex];
6328  hAlloc->OwnAllocMapPersistentlyMappedMemory(m_hDevice);
6329  }
6330  }
6331 
6332  // Process normal Allocations.
6333  {
6334  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6335  VkResult localResult = pBlockVector->MapPersistentlyMappedMemory();
6336  if(localResult != VK_SUCCESS)
6337  {
6338  finalResult = localResult;
6339  }
6340  }
6341  }
6342  }
6343  }
6344  return finalResult;
6345  }
6346  else
6347  return VK_SUCCESS;
6348 }
6349 
6350 VkResult VmaAllocator_T::Defragment(
6351  VmaAllocation* pAllocations,
6352  size_t allocationCount,
6353  VkBool32* pAllocationsChanged,
6354  const VmaDefragmentationInfo* pDefragmentationInfo,
6355  VmaDefragmentationStats* pDefragmentationStats)
6356 {
6357  if(pAllocationsChanged != VMA_NULL)
6358  {
6359  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
6360  }
6361  if(pDefragmentationStats != VMA_NULL)
6362  {
6363  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
6364  }
6365 
6366  if(m_UnmapPersistentlyMappedMemoryCounter > 0)
6367  {
6368  VMA_DEBUG_LOG("ERROR: Cannot defragment when inside vmaUnmapPersistentlyMappedMemory.");
6369  return VK_ERROR_MEMORY_MAP_FAILED;
6370  }
6371 
6372  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
6373 
6374  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
6375 
6376  const size_t poolCount = m_Pools.size();
6377 
6378  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
6379  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
6380  {
6381  VmaAllocation hAlloc = pAllocations[allocIndex];
6382  VMA_ASSERT(hAlloc);
6383  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
6384  // OwnAlloc cannot be defragmented.
6385  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
6386  // Only HOST_VISIBLE memory types can be defragmented.
6387  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
6388  // Lost allocation cannot be defragmented.
6389  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
6390  {
6391  VmaBlockVector* pAllocBlockVector = nullptr;
6392 
6393  const VmaPool hAllocPool = hAlloc->GetPool();
6394  // This allocation belongs to custom pool.
6395  if(hAllocPool != VK_NULL_HANDLE)
6396  {
6397  pAllocBlockVector = &hAllocPool->GetBlockVector();
6398  }
6399  // This allocation belongs to general pool.
6400  else
6401  {
6402  pAllocBlockVector = m_pBlockVectors[memTypeIndex][hAlloc->GetBlockVectorType()];
6403  }
6404 
6405  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(
6406  m_hDevice,
6407  GetAllocationCallbacks(),
6408  currentFrameIndex);
6409 
6410  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
6411  &pAllocationsChanged[allocIndex] : VMA_NULL;
6412  pDefragmentator->AddAllocation(hAlloc, pChanged);
6413  }
6414  }
6415 
6416  VkResult result = VK_SUCCESS;
6417 
6418  // ======== Main processing.
6419 
6420  VkDeviceSize maxBytesToMove = SIZE_MAX;
6421  uint32_t maxAllocationsToMove = UINT32_MAX;
6422  if(pDefragmentationInfo != VMA_NULL)
6423  {
6424  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
6425  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
6426  }
6427 
6428  // Process standard memory.
6429  for(uint32_t memTypeIndex = 0;
6430  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
6431  ++memTypeIndex)
6432  {
6433  // Only HOST_VISIBLE memory types can be defragmented.
6434  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
6435  {
6436  for(uint32_t blockVectorType = 0;
6437  (blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT) && (result == VK_SUCCESS);
6438  ++blockVectorType)
6439  {
6440  result = m_pBlockVectors[memTypeIndex][blockVectorType]->Defragment(
6441  pDefragmentationStats,
6442  maxBytesToMove,
6443  maxAllocationsToMove);
6444  }
6445  }
6446  }
6447 
6448  // Process custom pools.
6449  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
6450  {
6451  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
6452  pDefragmentationStats,
6453  maxBytesToMove,
6454  maxAllocationsToMove);
6455  }
6456 
6457  // ======== Destroy defragmentators.
6458 
6459  // Process custom pools.
6460  for(size_t poolIndex = poolCount; poolIndex--; )
6461  {
6462  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
6463  }
6464 
6465  // Process standard memory.
6466  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
6467  {
6468  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
6469  {
6470  for(size_t blockVectorType = VMA_BLOCK_VECTOR_TYPE_COUNT; blockVectorType--; )
6471  {
6472  m_pBlockVectors[memTypeIndex][blockVectorType]->DestroyDefragmentator();
6473  }
6474  }
6475  }
6476 
6477  return result;
6478 }
6479 
6480 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
6481 {
6482  if(hAllocation->CanBecomeLost())
6483  {
6484  /*
6485  Warning: This is a carefully designed algorithm.
6486  Do not modify unless you really know what you're doing :)
6487  */
6488  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
6489  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
6490  for(;;)
6491  {
6492  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
6493  {
6494  pAllocationInfo->memoryType = UINT32_MAX;
6495  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
6496  pAllocationInfo->offset = 0;
6497  pAllocationInfo->size = hAllocation->GetSize();
6498  pAllocationInfo->pMappedData = VMA_NULL;
6499  pAllocationInfo->pUserData = hAllocation->GetUserData();
6500  return;
6501  }
6502  else if(localLastUseFrameIndex == localCurrFrameIndex)
6503  {
6504  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
6505  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
6506  pAllocationInfo->offset = hAllocation->GetOffset();
6507  pAllocationInfo->size = hAllocation->GetSize();
6508  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
6509  pAllocationInfo->pUserData = hAllocation->GetUserData();
6510  return;
6511  }
6512  else // Last use time earlier than current time.
6513  {
6514  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
6515  {
6516  localLastUseFrameIndex = localCurrFrameIndex;
6517  }
6518  }
6519  }
6520  }
6521  // We could use the same code here, but for performance reasons we don't need to use the hAllocation.LastUseFrameIndex atomic.
6522  else
6523  {
6524  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
6525  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
6526  pAllocationInfo->offset = hAllocation->GetOffset();
6527  pAllocationInfo->size = hAllocation->GetSize();
6528  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
6529  pAllocationInfo->pUserData = hAllocation->GetUserData();
6530  }
6531 }
6532 
6533 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
6534 {
6535  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
6536 
6537  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
6538 
6539  if(newCreateInfo.maxBlockCount == 0)
6540  {
6541  newCreateInfo.maxBlockCount = SIZE_MAX;
6542  }
6543  if(newCreateInfo.blockSize == 0)
6544  {
6545  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
6546  }
6547 
6548  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
6549 
6550  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
6551  if(res != VK_SUCCESS)
6552  {
6553  vma_delete(this, *pPool);
6554  *pPool = VMA_NULL;
6555  return res;
6556  }
6557 
6558  // Add to m_Pools.
6559  {
6560  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6561  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
6562  }
6563 
6564  return VK_SUCCESS;
6565 }
6566 
6567 void VmaAllocator_T::DestroyPool(VmaPool pool)
6568 {
6569  // Remove from m_Pools.
6570  {
6571  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6572  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
6573  VMA_ASSERT(success && "Pool not found in Allocator.");
6574  }
6575 
6576  vma_delete(this, pool);
6577 }
6578 
6579 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
6580 {
6581  pool->m_BlockVector.GetPoolStats(pPoolStats);
6582 }
6583 
6584 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
6585 {
6586  m_CurrentFrameIndex.store(frameIndex);
6587 }
6588 
6589 void VmaAllocator_T::MakePoolAllocationsLost(
6590  VmaPool hPool,
6591  size_t* pLostAllocationCount)
6592 {
6593  hPool->m_BlockVector.MakePoolAllocationsLost(
6594  m_CurrentFrameIndex.load(),
6595  pLostAllocationCount);
6596 }
6597 
6598 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
6599 {
6600  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST);
6601  (*pAllocation)->InitLost();
6602 }
6603 
6604 void VmaAllocator_T::FreeOwnMemory(VmaAllocation allocation)
6605 {
6606  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_OWN);
6607 
6608  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
6609  {
6610  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6611  AllocationVectorType* const pOwnAllocations = m_pOwnAllocations[memTypeIndex][allocation->GetBlockVectorType()];
6612  VMA_ASSERT(pOwnAllocations);
6613  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pOwnAllocations, allocation);
6614  VMA_ASSERT(success);
6615  }
6616 
6617  VkDeviceMemory hMemory = allocation->GetMemory();
6618 
6619  // Callback.
6620  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
6621  {
6622  (*m_DeviceMemoryCallbacks.pfnFree)(this, memTypeIndex, hMemory, allocation->GetSize());
6623  }
6624 
6625  if(allocation->GetMappedData() != VMA_NULL)
6626  {
6627  vkUnmapMemory(m_hDevice, hMemory);
6628  }
6629 
6630  vkFreeMemory(m_hDevice, hMemory, GetAllocationCallbacks());
6631 
6632  VMA_DEBUG_LOG(" Freed OwnMemory MemoryTypeIndex=%u", memTypeIndex);
6633 }
6634 
6635 #if VMA_STATS_STRING_ENABLED
6636 
6637 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
6638 {
6639  bool ownAllocationsStarted = false;
6640  for(size_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6641  {
6642  VmaMutexLock ownAllocationsLock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6643  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
6644  {
6645  AllocationVectorType* const pOwnAllocVector = m_pOwnAllocations[memTypeIndex][blockVectorType];
6646  VMA_ASSERT(pOwnAllocVector);
6647  if(pOwnAllocVector->empty() == false)
6648  {
6649  if(ownAllocationsStarted == false)
6650  {
6651  ownAllocationsStarted = true;
6652  json.WriteString("OwnAllocations");
6653  json.BeginObject();
6654  }
6655 
6656  json.BeginString("Type ");
6657  json.ContinueString(memTypeIndex);
6658  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
6659  {
6660  json.ContinueString(" Mapped");
6661  }
6662  json.EndString();
6663 
6664  json.BeginArray();
6665 
6666  for(size_t i = 0; i < pOwnAllocVector->size(); ++i)
6667  {
6668  const VmaAllocation hAlloc = (*pOwnAllocVector)[i];
6669  json.BeginObject(true);
6670 
6671  json.WriteString("Size");
6672  json.WriteNumber(hAlloc->GetSize());
6673 
6674  json.WriteString("Type");
6675  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
6676 
6677  json.EndObject();
6678  }
6679 
6680  json.EndArray();
6681  }
6682  }
6683  }
6684  if(ownAllocationsStarted)
6685  {
6686  json.EndObject();
6687  }
6688 
6689  {
6690  bool allocationsStarted = false;
6691  for(size_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6692  {
6693  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
6694  {
6695  if(m_pBlockVectors[memTypeIndex][blockVectorType]->IsEmpty() == false)
6696  {
6697  if(allocationsStarted == false)
6698  {
6699  allocationsStarted = true;
6700  json.WriteString("Allocations");
6701  json.BeginObject();
6702  }
6703 
6704  json.BeginString("Type ");
6705  json.ContinueString(memTypeIndex);
6706  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
6707  {
6708  json.ContinueString(" Mapped");
6709  }
6710  json.EndString();
6711 
6712  m_pBlockVectors[memTypeIndex][blockVectorType]->PrintDetailedMap(json);
6713  }
6714  }
6715  }
6716  if(allocationsStarted)
6717  {
6718  json.EndObject();
6719  }
6720  }
6721 
6722  {
6723  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6724  const size_t poolCount = m_Pools.size();
6725  if(poolCount > 0)
6726  {
6727  json.WriteString("Pools");
6728  json.BeginArray();
6729  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
6730  {
6731  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
6732  }
6733  json.EndArray();
6734  }
6735  }
6736 }
6737 
6738 #endif // #if VMA_STATS_STRING_ENABLED
6739 
6740 static VkResult AllocateMemoryForImage(
6741  VmaAllocator allocator,
6742  VkImage image,
6743  const VmaAllocationCreateInfo* pAllocationCreateInfo,
6744  VmaSuballocationType suballocType,
6745  VmaAllocation* pAllocation)
6746 {
6747  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
6748 
6749  VkMemoryRequirements vkMemReq = {};
6750  vkGetImageMemoryRequirements(allocator->m_hDevice, image, &vkMemReq);
6751 
6752  return allocator->AllocateMemory(
6753  vkMemReq,
6754  *pAllocationCreateInfo,
6755  suballocType,
6756  pAllocation);
6757 }
6758 
6760 // Public interface
6761 
6762 VkResult vmaCreateAllocator(
6763  const VmaAllocatorCreateInfo* pCreateInfo,
6764  VmaAllocator* pAllocator)
6765 {
6766  VMA_ASSERT(pCreateInfo && pAllocator);
6767  VMA_DEBUG_LOG("vmaCreateAllocator");
6768  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
6769  return VK_SUCCESS;
6770 }
6771 
6772 void vmaDestroyAllocator(
6773  VmaAllocator allocator)
6774 {
6775  if(allocator != VK_NULL_HANDLE)
6776  {
6777  VMA_DEBUG_LOG("vmaDestroyAllocator");
6778  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
6779  vma_delete(&allocationCallbacks, allocator);
6780  }
6781 }
6782 
6784  VmaAllocator allocator,
6785  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
6786 {
6787  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
6788  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
6789 }
6790 
6792  VmaAllocator allocator,
6793  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
6794 {
6795  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
6796  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
6797 }
6798 
6800  VmaAllocator allocator,
6801  uint32_t memoryTypeIndex,
6802  VkMemoryPropertyFlags* pFlags)
6803 {
6804  VMA_ASSERT(allocator && pFlags);
6805  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
6806  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
6807 }
6808 
6810  VmaAllocator allocator,
6811  uint32_t frameIndex)
6812 {
6813  VMA_ASSERT(allocator);
6814  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
6815 
6816  VMA_DEBUG_GLOBAL_MUTEX_LOCK
6817 
6818  allocator->SetCurrentFrameIndex(frameIndex);
6819 }
6820 
6821 void vmaCalculateStats(
6822  VmaAllocator allocator,
6823  VmaStats* pStats)
6824 {
6825  VMA_ASSERT(allocator && pStats);
6826  VMA_DEBUG_GLOBAL_MUTEX_LOCK
6827  allocator->CalculateStats(pStats);
6828 }
6829 
6830 #if VMA_STATS_STRING_ENABLED
6831 
6832 void vmaBuildStatsString(
6833  VmaAllocator allocator,
6834  char** ppStatsString,
6835  VkBool32 detailedMap)
6836 {
6837  VMA_ASSERT(allocator && ppStatsString);
6838  VMA_DEBUG_GLOBAL_MUTEX_LOCK
6839 
6840  VmaStringBuilder sb(allocator);
6841  {
6842  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
6843  json.BeginObject();
6844 
6845  VmaStats stats;
6846  allocator->CalculateStats(&stats);
6847 
6848  json.WriteString("Total");
6849  VmaPrintStatInfo(json, stats.total);
6850 
6851  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
6852  {
6853  json.BeginString("Heap ");
6854  json.ContinueString(heapIndex);
6855  json.EndString();
6856  json.BeginObject();
6857 
6858  json.WriteString("Size");
6859  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
6860 
6861  json.WriteString("Flags");
6862  json.BeginArray(true);
6863  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
6864  {
6865  json.WriteString("DEVICE_LOCAL");
6866  }
6867  json.EndArray();
6868 
6869  if(stats.memoryHeap[heapIndex].BlockCount > 0)
6870  {
6871  json.WriteString("Stats");
6872  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
6873  }
6874 
6875  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
6876  {
6877  if(allocator->m_MemProps.memoryTypes[typeIndex].heapIndex == heapIndex)
6878  {
6879  json.BeginString("Type ");
6880  json.ContinueString(typeIndex);
6881  json.EndString();
6882 
6883  json.BeginObject();
6884 
6885  json.WriteString("Flags");
6886  json.BeginArray(true);
6887  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
6888  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
6889  {
6890  json.WriteString("DEVICE_LOCAL");
6891  }
6892  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
6893  {
6894  json.WriteString("HOST_VISIBLE");
6895  }
6896  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
6897  {
6898  json.WriteString("HOST_COHERENT");
6899  }
6900  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
6901  {
6902  json.WriteString("HOST_CACHED");
6903  }
6904  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
6905  {
6906  json.WriteString("LAZILY_ALLOCATED");
6907  }
6908  json.EndArray();
6909 
6910  if(stats.memoryType[typeIndex].BlockCount > 0)
6911  {
6912  json.WriteString("Stats");
6913  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
6914  }
6915 
6916  json.EndObject();
6917  }
6918  }
6919 
6920  json.EndObject();
6921  }
6922  if(detailedMap == VK_TRUE)
6923  {
6924  allocator->PrintDetailedMap(json);
6925  }
6926 
6927  json.EndObject();
6928  }
6929 
6930  const size_t len = sb.GetLength();
6931  char* const pChars = vma_new_array(allocator, char, len + 1);
6932  if(len > 0)
6933  {
6934  memcpy(pChars, sb.GetData(), len);
6935  }
6936  pChars[len] = '\0';
6937  *ppStatsString = pChars;
6938 }
6939 
6940 void vmaFreeStatsString(
6941  VmaAllocator allocator,
6942  char* pStatsString)
6943 {
6944  if(pStatsString != VMA_NULL)
6945  {
6946  VMA_ASSERT(allocator);
6947  size_t len = strlen(pStatsString);
6948  vma_delete_array(allocator, pStatsString, len + 1);
6949  }
6950 }
6951 
6952 #endif // #if VMA_STATS_STRING_ENABLED
6953 
6956 VkResult vmaFindMemoryTypeIndex(
6957  VmaAllocator allocator,
6958  uint32_t memoryTypeBits,
6959  const VmaAllocationCreateInfo* pAllocationCreateInfo,
6960  uint32_t* pMemoryTypeIndex)
6961 {
6962  VMA_ASSERT(allocator != VK_NULL_HANDLE);
6963  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
6964  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
6965 
6966  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
6967  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
6968  if(preferredFlags == 0)
6969  {
6970  preferredFlags = requiredFlags;
6971  }
6972  // preferredFlags, if not 0, must be a superset of requiredFlags.
6973  VMA_ASSERT((requiredFlags & ~preferredFlags) == 0);
6974 
6975  // Convert usage to requiredFlags and preferredFlags.
6976  switch(pAllocationCreateInfo->usage)
6977  {
6979  break;
6981  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
6982  break;
6984  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
6985  break;
6987  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
6988  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
6989  break;
6991  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
6992  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
6993  break;
6994  default:
6995  break;
6996  }
6997 
6998  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0)
6999  {
7000  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7001  }
7002 
7003  *pMemoryTypeIndex = UINT32_MAX;
7004  uint32_t minCost = UINT32_MAX;
7005  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
7006  memTypeIndex < allocator->GetMemoryTypeCount();
7007  ++memTypeIndex, memTypeBit <<= 1)
7008  {
7009  // This memory type is acceptable according to memoryTypeBits bitmask.
7010  if((memTypeBit & memoryTypeBits) != 0)
7011  {
7012  const VkMemoryPropertyFlags currFlags =
7013  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
7014  // This memory type contains requiredFlags.
7015  if((requiredFlags & ~currFlags) == 0)
7016  {
7017  // Calculate cost as number of bits from preferredFlags not present in this memory type.
7018  uint32_t currCost = CountBitsSet(preferredFlags & ~currFlags);
7019  // Remember memory type with lowest cost.
7020  if(currCost < minCost)
7021  {
7022  *pMemoryTypeIndex = memTypeIndex;
7023  if(currCost == 0)
7024  {
7025  return VK_SUCCESS;
7026  }
7027  minCost = currCost;
7028  }
7029  }
7030  }
7031  }
7032  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
7033 }
7034 
7035 VkResult vmaCreatePool(
7036  VmaAllocator allocator,
7037  const VmaPoolCreateInfo* pCreateInfo,
7038  VmaPool* pPool)
7039 {
7040  VMA_ASSERT(allocator && pCreateInfo && pPool);
7041 
7042  VMA_DEBUG_LOG("vmaCreatePool");
7043 
7044  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7045 
7046  return allocator->CreatePool(pCreateInfo, pPool);
7047 }
7048 
7049 void vmaDestroyPool(
7050  VmaAllocator allocator,
7051  VmaPool pool)
7052 {
7053  VMA_ASSERT(allocator && pool);
7054 
7055  VMA_DEBUG_LOG("vmaDestroyPool");
7056 
7057  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7058 
7059  allocator->DestroyPool(pool);
7060 }
7061 
7062 void vmaGetPoolStats(
7063  VmaAllocator allocator,
7064  VmaPool pool,
7065  VmaPoolStats* pPoolStats)
7066 {
7067  VMA_ASSERT(allocator && pool && pPoolStats);
7068 
7069  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7070 
7071  allocator->GetPoolStats(pool, pPoolStats);
7072 }
7073 
7075  VmaAllocator allocator,
7076  VmaPool pool,
7077  size_t* pLostAllocationCount)
7078 {
7079  VMA_ASSERT(allocator && pool);
7080 
7081  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7082 
7083  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
7084 }
7085 
7086 VkResult vmaAllocateMemory(
7087  VmaAllocator allocator,
7088  const VkMemoryRequirements* pVkMemoryRequirements,
7089  const VmaAllocationCreateInfo* pCreateInfo,
7090  VmaAllocation* pAllocation,
7091  VmaAllocationInfo* pAllocationInfo)
7092 {
7093  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
7094 
7095  VMA_DEBUG_LOG("vmaAllocateMemory");
7096 
7097  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7098 
7099  VkResult result = allocator->AllocateMemory(
7100  *pVkMemoryRequirements,
7101  *pCreateInfo,
7102  VMA_SUBALLOCATION_TYPE_UNKNOWN,
7103  pAllocation);
7104 
7105  if(pAllocationInfo && result == VK_SUCCESS)
7106  {
7107  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7108  }
7109 
7110  return result;
7111 }
7112 
7114  VmaAllocator allocator,
7115  VkBuffer buffer,
7116  const VmaAllocationCreateInfo* pCreateInfo,
7117  VmaAllocation* pAllocation,
7118  VmaAllocationInfo* pAllocationInfo)
7119 {
7120  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
7121 
7122  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
7123 
7124  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7125 
7126  VkMemoryRequirements vkMemReq = {};
7127  vkGetBufferMemoryRequirements(allocator->m_hDevice, buffer, &vkMemReq);
7128 
7129  VkResult result = allocator->AllocateMemory(
7130  vkMemReq,
7131  *pCreateInfo,
7132  VMA_SUBALLOCATION_TYPE_BUFFER,
7133  pAllocation);
7134 
7135  if(pAllocationInfo && result == VK_SUCCESS)
7136  {
7137  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7138  }
7139 
7140  return result;
7141 }
7142 
7143 VkResult vmaAllocateMemoryForImage(
7144  VmaAllocator allocator,
7145  VkImage image,
7146  const VmaAllocationCreateInfo* pCreateInfo,
7147  VmaAllocation* pAllocation,
7148  VmaAllocationInfo* pAllocationInfo)
7149 {
7150  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
7151 
7152  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
7153 
7154  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7155 
7156  VkResult result = AllocateMemoryForImage(
7157  allocator,
7158  image,
7159  pCreateInfo,
7160  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
7161  pAllocation);
7162 
7163  if(pAllocationInfo && result == VK_SUCCESS)
7164  {
7165  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7166  }
7167 
7168  return result;
7169 }
7170 
7171 void vmaFreeMemory(
7172  VmaAllocator allocator,
7173  VmaAllocation allocation)
7174 {
7175  VMA_ASSERT(allocator && allocation);
7176 
7177  VMA_DEBUG_LOG("vmaFreeMemory");
7178 
7179  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7180 
7181  allocator->FreeMemory(allocation);
7182 }
7183 
7185  VmaAllocator allocator,
7186  VmaAllocation allocation,
7187  VmaAllocationInfo* pAllocationInfo)
7188 {
7189  VMA_ASSERT(allocator && allocation && pAllocationInfo);
7190 
7191  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7192 
7193  allocator->GetAllocationInfo(allocation, pAllocationInfo);
7194 }
7195 
7197  VmaAllocator allocator,
7198  VmaAllocation allocation,
7199  void* pUserData)
7200 {
7201  VMA_ASSERT(allocator && allocation);
7202 
7203  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7204 
7205  allocation->SetUserData(pUserData);
7206 }
7207 
7209  VmaAllocator allocator,
7210  VmaAllocation* pAllocation)
7211 {
7212  VMA_ASSERT(allocator && pAllocation);
7213 
7214  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
7215 
7216  allocator->CreateLostAllocation(pAllocation);
7217 }
7218 
7219 VkResult vmaMapMemory(
7220  VmaAllocator allocator,
7221  VmaAllocation allocation,
7222  void** ppData)
7223 {
7224  VMA_ASSERT(allocator && allocation && ppData);
7225 
7226  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7227 
7228  return vkMapMemory(allocator->m_hDevice, allocation->GetMemory(),
7229  allocation->GetOffset(), allocation->GetSize(), 0, ppData);
7230 }
7231 
7232 void vmaUnmapMemory(
7233  VmaAllocator allocator,
7234  VmaAllocation allocation)
7235 {
7236  VMA_ASSERT(allocator && allocation);
7237 
7238  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7239 
7240  vkUnmapMemory(allocator->m_hDevice, allocation->GetMemory());
7241 }
7242 
7243 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator)
7244 {
7245  VMA_ASSERT(allocator);
7246 
7247  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7248 
7249  allocator->UnmapPersistentlyMappedMemory();
7250 }
7251 
7252 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator)
7253 {
7254  VMA_ASSERT(allocator);
7255 
7256  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7257 
7258  return allocator->MapPersistentlyMappedMemory();
7259 }
7260 
7261 VkResult vmaDefragment(
7262  VmaAllocator allocator,
7263  VmaAllocation* pAllocations,
7264  size_t allocationCount,
7265  VkBool32* pAllocationsChanged,
7266  const VmaDefragmentationInfo *pDefragmentationInfo,
7267  VmaDefragmentationStats* pDefragmentationStats)
7268 {
7269  VMA_ASSERT(allocator && pAllocations);
7270 
7271  VMA_DEBUG_LOG("vmaDefragment");
7272 
7273  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7274 
7275  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
7276 }
7277 
7278 VkResult vmaCreateBuffer(
7279  VmaAllocator allocator,
7280  const VkBufferCreateInfo* pBufferCreateInfo,
7281  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7282  VkBuffer* pBuffer,
7283  VmaAllocation* pAllocation,
7284  VmaAllocationInfo* pAllocationInfo)
7285 {
7286  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
7287 
7288  VMA_DEBUG_LOG("vmaCreateBuffer");
7289 
7290  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7291 
7292  *pBuffer = VK_NULL_HANDLE;
7293  *pAllocation = VK_NULL_HANDLE;
7294 
7295  // 1. Create VkBuffer.
7296  VkResult res = vkCreateBuffer(allocator->m_hDevice, pBufferCreateInfo, allocator->GetAllocationCallbacks(), pBuffer);
7297  if(res >= 0)
7298  {
7299  // 2. vkGetBufferMemoryRequirements.
7300  VkMemoryRequirements vkMemReq = {};
7301  vkGetBufferMemoryRequirements(allocator->m_hDevice, *pBuffer, &vkMemReq);
7302 
7303  // 3. Allocate memory using allocator.
7304  res = allocator->AllocateMemory(
7305  vkMemReq,
7306  *pAllocationCreateInfo,
7307  VMA_SUBALLOCATION_TYPE_BUFFER,
7308  pAllocation);
7309  if(res >= 0)
7310  {
7311  // 3. Bind buffer with memory.
7312  res = vkBindBufferMemory(allocator->m_hDevice, *pBuffer, (*pAllocation)->GetMemory(), (*pAllocation)->GetOffset());
7313  if(res >= 0)
7314  {
7315  // All steps succeeded.
7316  if(pAllocationInfo != VMA_NULL)
7317  {
7318  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7319  }
7320  return VK_SUCCESS;
7321  }
7322  allocator->FreeMemory(*pAllocation);
7323  *pAllocation = VK_NULL_HANDLE;
7324  return res;
7325  }
7326  vkDestroyBuffer(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
7327  *pBuffer = VK_NULL_HANDLE;
7328  return res;
7329  }
7330  return res;
7331 }
7332 
7333 void vmaDestroyBuffer(
7334  VmaAllocator allocator,
7335  VkBuffer buffer,
7336  VmaAllocation allocation)
7337 {
7338  if(buffer != VK_NULL_HANDLE)
7339  {
7340  VMA_ASSERT(allocator);
7341 
7342  VMA_DEBUG_LOG("vmaDestroyBuffer");
7343 
7344  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7345 
7346  vkDestroyBuffer(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
7347 
7348  allocator->FreeMemory(allocation);
7349  }
7350 }
7351 
7352 VkResult vmaCreateImage(
7353  VmaAllocator allocator,
7354  const VkImageCreateInfo* pImageCreateInfo,
7355  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7356  VkImage* pImage,
7357  VmaAllocation* pAllocation,
7358  VmaAllocationInfo* pAllocationInfo)
7359 {
7360  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
7361 
7362  VMA_DEBUG_LOG("vmaCreateImage");
7363 
7364  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7365 
7366  *pImage = VK_NULL_HANDLE;
7367  *pAllocation = VK_NULL_HANDLE;
7368 
7369  // 1. Create VkImage.
7370  VkResult res = vkCreateImage(allocator->m_hDevice, pImageCreateInfo, allocator->GetAllocationCallbacks(), pImage);
7371  if(res >= 0)
7372  {
7373  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
7374  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
7375  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
7376 
7377  // 2. Allocate memory using allocator.
7378  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
7379  if(res >= 0)
7380  {
7381  // 3. Bind image with memory.
7382  res = vkBindImageMemory(allocator->m_hDevice, *pImage, (*pAllocation)->GetMemory(), (*pAllocation)->GetOffset());
7383  if(res >= 0)
7384  {
7385  // All steps succeeded.
7386  if(pAllocationInfo != VMA_NULL)
7387  {
7388  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7389  }
7390  return VK_SUCCESS;
7391  }
7392  allocator->FreeMemory(*pAllocation);
7393  *pAllocation = VK_NULL_HANDLE;
7394  return res;
7395  }
7396  vkDestroyImage(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
7397  *pImage = VK_NULL_HANDLE;
7398  return res;
7399  }
7400  return res;
7401 }
7402 
7403 void vmaDestroyImage(
7404  VmaAllocator allocator,
7405  VkImage image,
7406  VmaAllocation allocation)
7407 {
7408  if(image != VK_NULL_HANDLE)
7409  {
7410  VMA_ASSERT(allocator);
7411 
7412  VMA_DEBUG_LOG("vmaDestroyImage");
7413 
7414  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7415 
7416  vkDestroyImage(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
7417 
7418  allocator->FreeMemory(allocation);
7419  }
7420 }
7421 
7422 #endif // #ifdef VMA_IMPLEMENTATION
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:457
Definition: vk_mem_alloc.h:756
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
uint32_t BlockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:540
Memory will be used for frequent writing on device and readback on host (download).
Definition: vk_mem_alloc.h:607
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:877
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:1027
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation.
void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator)
Unmaps persistently mapped memory of types that are HOST_COHERENT and DEVICE_LOCAL.
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:808
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:656
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:689
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:420
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks.
Definition: vk_mem_alloc.h:469
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:758
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:451
VkDeviceSize preferredSmallHeapBlockSize
Size of a single memory block to allocate for resources from a small heap <= 512 MB.
Definition: vk_mem_alloc.h:466
VkFlags VmaAllocatorFlags
Definition: vk_mem_alloc.h:448
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:1031
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:486
VmaStatInfo total
Definition: vk_mem_alloc.h:558
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:1039
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:672
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1022
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:460
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:762
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:887
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:691
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:778
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:814
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:765
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
Definition: vk_mem_alloc.h:665
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:1017
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VkDeviceSize AllocationSizeMax
Definition: vk_mem_alloc.h:549
Definition: vk_mem_alloc.h:736
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:1035
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:554
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:645
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:1037
VmaMemoryUsage
Definition: vk_mem_alloc.h:593
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:683
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:444
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
VmaAllocatorFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:439
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
Definition: vk_mem_alloc.h:537
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
Definition: vk_mem_alloc.h:773
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:431
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:435
VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator)
Maps back persistently mapped memory of types that are HOST_COHERENT and DEVICE_LOCAL.
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:768
VkDeviceSize UnusedRangeSizeMax
Definition: vk_mem_alloc.h:550
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:414
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:678
Definition: vk_mem_alloc.h:669
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool.
Definition: vk_mem_alloc.h:786
VkDeviceSize AllocationSizeMin
Definition: vk_mem_alloc.h:549
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory.
Definition: vk_mem_alloc.h:472
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:817
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:696
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:556
VkDeviceSize AllocationSizeAvg
Definition: vk_mem_alloc.h:549
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
uint32_t AllocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:542
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:433
Definition: vk_mem_alloc.h:663
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:800
VmaAllocatorFlags flags
Flags for created allocator. Use VmaAllocatorFlagBits enum.
Definition: vk_mem_alloc.h:454
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
VkDeviceSize UsedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:546
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:898
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:624
VkDeviceSize preferredLargeHeapBlockSize
Size of a single memory block to allocate for resources.
Definition: vk_mem_alloc.h:463
uint32_t UnusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:544
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:805
Memory will be mapped on host. Could be used for transfer to/from device.
Definition: vk_mem_alloc.h:601
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
VkDeviceSize UnusedRangeSizeAvg
Definition: vk_mem_alloc.h:550
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:882
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1033
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:667
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:727
void * pMappedData
Pointer to the beginning of this allocation as mapped data. Null if this alloaction is not persistent...
Definition: vk_mem_alloc.h:893
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
No intended memory usage specified.
Definition: vk_mem_alloc.h:596
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
Definition: vk_mem_alloc.h:608
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:863
Memory will be used for frequent (dynamic) updates from host and reads on device (upload).
Definition: vk_mem_alloc.h:604
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:612
Definition: vk_mem_alloc.h:446
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:635
Memory will be used on device only, so faster access from the device is preferred. No need to be mappable on host.
Definition: vk_mem_alloc.h:598
struct VmaStatInfo VmaStatInfo
VkDeviceSize UnusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:548
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:557
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:811
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:754
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
VkDeviceSize UnusedRangeSizeMin
Definition: vk_mem_alloc.h:550
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:868
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.