Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
420 #include <vulkan/vulkan.h>
421 
423 
427 VK_DEFINE_HANDLE(VmaAllocator)
428 
429 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
431  VmaAllocator allocator,
432  uint32_t memoryType,
433  VkDeviceMemory memory,
434  VkDeviceSize size);
436 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
437  VmaAllocator allocator,
438  uint32_t memoryType,
439  VkDeviceMemory memory,
440  VkDeviceSize size);
441 
447 typedef struct VmaDeviceMemoryCallbacks {
453 
455 typedef enum VmaAllocatorFlagBits {
461 
464 typedef VkFlags VmaAllocatorFlags;
465 
468 {
472 
473  VkPhysicalDevice physicalDevice;
475 
476  VkDevice device;
478 
481 
484 
485  const VkAllocationCallbacks* pAllocationCallbacks;
487 
502  uint32_t frameInUseCount;
520  const VkDeviceSize* pHeapSizeLimit;
522 
524 VkResult vmaCreateAllocator(
525  const VmaAllocatorCreateInfo* pCreateInfo,
526  VmaAllocator* pAllocator);
527 
530  VmaAllocator allocator);
531 
537  VmaAllocator allocator,
538  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
539 
545  VmaAllocator allocator,
546  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
547 
555  VmaAllocator allocator,
556  uint32_t memoryTypeIndex,
557  VkMemoryPropertyFlags* pFlags);
558 
568  VmaAllocator allocator,
569  uint32_t frameIndex);
570 
571 typedef struct VmaStatInfo
572 {
574  uint32_t BlockCount;
576  uint32_t AllocationCount;
580  VkDeviceSize UsedBytes;
582  VkDeviceSize UnusedBytes;
583  VkDeviceSize AllocationSizeMin, AllocationSizeAvg, AllocationSizeMax;
584  VkDeviceSize UnusedRangeSizeMin, UnusedRangeSizeAvg, UnusedRangeSizeMax;
585 } VmaStatInfo;
586 
588 struct VmaStats
589 {
590  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
591  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
593 };
594 
596 void vmaCalculateStats(
597  VmaAllocator allocator,
598  VmaStats* pStats);
599 
600 #define VMA_STATS_STRING_ENABLED 1
601 
602 #if VMA_STATS_STRING_ENABLED
603 
605 
608  VmaAllocator allocator,
609  char** ppStatsString,
610  VkBool32 detailedMap);
611 
612 void vmaFreeStatsString(
613  VmaAllocator allocator,
614  char* pStatsString);
615 
616 #endif // #if VMA_STATS_STRING_ENABLED
617 
620 
625 VK_DEFINE_HANDLE(VmaPool)
626 
627 typedef enum VmaMemoryUsage
628 {
634 
637 
640 
644 
659 
698 
701 typedef VkFlags VmaAllocationCreateFlags;
702 
704 {
717  VkMemoryPropertyFlags requiredFlags;
723  VkMemoryPropertyFlags preferredFlags;
725  void* pUserData;
730  VmaPool pool;
732 
747 VkResult vmaFindMemoryTypeIndex(
748  VmaAllocator allocator,
749  uint32_t memoryTypeBits,
750  const VmaAllocationCreateInfo* pAllocationCreateInfo,
751  uint32_t* pMemoryTypeIndex);
752 
755 
760 typedef enum VmaPoolCreateFlagBits {
789 
792 typedef VkFlags VmaPoolCreateFlags;
793 
796 typedef struct VmaPoolCreateInfo {
799  uint32_t memoryTypeIndex;
807  VkDeviceSize blockSize;
834  uint32_t frameInUseCount;
836 
839 typedef struct VmaPoolStats {
842  VkDeviceSize size;
845  VkDeviceSize unusedSize;
852 } VmaPoolStats;
853 
860 VkResult vmaCreatePool(
861  VmaAllocator allocator,
862  const VmaPoolCreateInfo* pCreateInfo,
863  VmaPool* pPool);
864 
867 void vmaDestroyPool(
868  VmaAllocator allocator,
869  VmaPool pool);
870 
877 void vmaGetPoolStats(
878  VmaAllocator allocator,
879  VmaPool pool,
880  VmaPoolStats* pPoolStats);
881 
889  VmaAllocator allocator,
890  VmaPool pool,
891  size_t* pLostAllocationCount);
892 
893 VK_DEFINE_HANDLE(VmaAllocation)
894 
895 
897 typedef struct VmaAllocationInfo {
902  uint32_t memoryType;
911  VkDeviceMemory deviceMemory;
916  VkDeviceSize offset;
921  VkDeviceSize size;
927  void* pMappedData;
932  void* pUserData;
934 
945 VkResult vmaAllocateMemory(
946  VmaAllocator allocator,
947  const VkMemoryRequirements* pVkMemoryRequirements,
948  const VmaAllocationCreateInfo* pCreateInfo,
949  VmaAllocation* pAllocation,
950  VmaAllocationInfo* pAllocationInfo);
951 
959  VmaAllocator allocator,
960  VkBuffer buffer,
961  const VmaAllocationCreateInfo* pCreateInfo,
962  VmaAllocation* pAllocation,
963  VmaAllocationInfo* pAllocationInfo);
964 
967  VmaAllocator allocator,
968  VkImage image,
969  const VmaAllocationCreateInfo* pCreateInfo,
970  VmaAllocation* pAllocation,
971  VmaAllocationInfo* pAllocationInfo);
972 
974 void vmaFreeMemory(
975  VmaAllocator allocator,
976  VmaAllocation allocation);
977 
980  VmaAllocator allocator,
981  VmaAllocation allocation,
982  VmaAllocationInfo* pAllocationInfo);
983 
986  VmaAllocator allocator,
987  VmaAllocation allocation,
988  void* pUserData);
989 
1001  VmaAllocator allocator,
1002  VmaAllocation* pAllocation);
1003 
1012 VkResult vmaMapMemory(
1013  VmaAllocator allocator,
1014  VmaAllocation allocation,
1015  void** ppData);
1016 
1017 void vmaUnmapMemory(
1018  VmaAllocator allocator,
1019  VmaAllocation allocation);
1020 
1039 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator);
1040 
1048 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator);
1049 
1051 typedef struct VmaDefragmentationInfo {
1056  VkDeviceSize maxBytesToMove;
1063 
1065 typedef struct VmaDefragmentationStats {
1067  VkDeviceSize bytesMoved;
1069  VkDeviceSize bytesFreed;
1075 
1146 VkResult vmaDefragment(
1147  VmaAllocator allocator,
1148  VmaAllocation* pAllocations,
1149  size_t allocationCount,
1150  VkBool32* pAllocationsChanged,
1151  const VmaDefragmentationInfo *pDefragmentationInfo,
1152  VmaDefragmentationStats* pDefragmentationStats);
1153 
1156 
1179 VkResult vmaCreateBuffer(
1180  VmaAllocator allocator,
1181  const VkBufferCreateInfo* pBufferCreateInfo,
1182  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1183  VkBuffer* pBuffer,
1184  VmaAllocation* pAllocation,
1185  VmaAllocationInfo* pAllocationInfo);
1186 
1187 void vmaDestroyBuffer(
1188  VmaAllocator allocator,
1189  VkBuffer buffer,
1190  VmaAllocation allocation);
1191 
1193 VkResult vmaCreateImage(
1194  VmaAllocator allocator,
1195  const VkImageCreateInfo* pImageCreateInfo,
1196  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1197  VkImage* pImage,
1198  VmaAllocation* pAllocation,
1199  VmaAllocationInfo* pAllocationInfo);
1200 
1201 void vmaDestroyImage(
1202  VmaAllocator allocator,
1203  VkImage image,
1204  VmaAllocation allocation);
1205 
1208 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
1209 
1210 // For Visual Studio IntelliSense.
1211 #ifdef __INTELLISENSE__
1212 #define VMA_IMPLEMENTATION
1213 #endif
1214 
1215 #ifdef VMA_IMPLEMENTATION
1216 #undef VMA_IMPLEMENTATION
1217 
1218 #include <cstdint>
1219 #include <cstdlib>
1220 #include <cstring>
1221 
1222 /*******************************************************************************
1223 CONFIGURATION SECTION
1224 
1225 Define some of these macros before each #include of this header or change them
1226 here if you need other then default behavior depending on your environment.
1227 */
1228 
1229 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
1230 //#define VMA_USE_STL_CONTAINERS 1
1231 
1232 /* Set this macro to 1 to make the library including and using STL containers:
1233 std::pair, std::vector, std::list, std::unordered_map.
1234 
1235 Set it to 0 or undefined to make the library using its own implementation of
1236 the containers.
1237 */
1238 #if VMA_USE_STL_CONTAINERS
1239  #define VMA_USE_STL_VECTOR 1
1240  #define VMA_USE_STL_UNORDERED_MAP 1
1241  #define VMA_USE_STL_LIST 1
1242 #endif
1243 
1244 #if VMA_USE_STL_VECTOR
1245  #include <vector>
1246 #endif
1247 
1248 #if VMA_USE_STL_UNORDERED_MAP
1249  #include <unordered_map>
1250 #endif
1251 
1252 #if VMA_USE_STL_LIST
1253  #include <list>
1254 #endif
1255 
1256 /*
1257 Following headers are used in this CONFIGURATION section only, so feel free to
1258 remove them if not needed.
1259 */
1260 #include <cassert> // for assert
1261 #include <algorithm> // for min, max
1262 #include <mutex> // for std::mutex
1263 #include <atomic> // for std::atomic
1264 
1265 #if !defined(_WIN32)
1266  #include <malloc.h> // for aligned_alloc()
1267 #endif
1268 
1269 // Normal assert to check for programmer's errors, especially in Debug configuration.
1270 #ifndef VMA_ASSERT
1271  #ifdef _DEBUG
1272  #define VMA_ASSERT(expr) assert(expr)
1273  #else
1274  #define VMA_ASSERT(expr)
1275  #endif
1276 #endif
1277 
1278 // Assert that will be called very often, like inside data structures e.g. operator[].
1279 // Making it non-empty can make program slow.
1280 #ifndef VMA_HEAVY_ASSERT
1281  #ifdef _DEBUG
1282  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
1283  #else
1284  #define VMA_HEAVY_ASSERT(expr)
1285  #endif
1286 #endif
1287 
1288 #ifndef VMA_NULL
1289  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
1290  #define VMA_NULL nullptr
1291 #endif
1292 
1293 #ifndef VMA_ALIGN_OF
1294  #define VMA_ALIGN_OF(type) (__alignof(type))
1295 #endif
1296 
1297 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
1298  #if defined(_WIN32)
1299  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
1300  #else
1301  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
1302  #endif
1303 #endif
1304 
1305 #ifndef VMA_SYSTEM_FREE
1306  #if defined(_WIN32)
1307  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
1308  #else
1309  #define VMA_SYSTEM_FREE(ptr) free(ptr)
1310  #endif
1311 #endif
1312 
1313 #ifndef VMA_MIN
1314  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
1315 #endif
1316 
1317 #ifndef VMA_MAX
1318  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
1319 #endif
1320 
1321 #ifndef VMA_SWAP
1322  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
1323 #endif
1324 
1325 #ifndef VMA_SORT
1326  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
1327 #endif
1328 
1329 #ifndef VMA_DEBUG_LOG
1330  #define VMA_DEBUG_LOG(format, ...)
1331  /*
1332  #define VMA_DEBUG_LOG(format, ...) do { \
1333  printf(format, __VA_ARGS__); \
1334  printf("\n"); \
1335  } while(false)
1336  */
1337 #endif
1338 
1339 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
1340 #if VMA_STATS_STRING_ENABLED
1341  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
1342  {
1343  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
1344  }
1345  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
1346  {
1347  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
1348  }
1349  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
1350  {
1351  snprintf(outStr, strLen, "%p", ptr);
1352  }
1353 #endif
1354 
1355 #ifndef VMA_MUTEX
1356  class VmaMutex
1357  {
1358  public:
1359  VmaMutex() { }
1360  ~VmaMutex() { }
1361  void Lock() { m_Mutex.lock(); }
1362  void Unlock() { m_Mutex.unlock(); }
1363  private:
1364  std::mutex m_Mutex;
1365  };
1366  #define VMA_MUTEX VmaMutex
1367 #endif
1368 
1369 /*
1370 If providing your own implementation, you need to implement a subset of std::atomic:
1371 
1372 - Constructor(uint32_t desired)
1373 - uint32_t load() const
1374 - void store(uint32_t desired)
1375 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
1376 */
1377 #ifndef VMA_ATOMIC_UINT32
1378  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
1379 #endif
1380 
1381 #ifndef VMA_BEST_FIT
1382 
1394  #define VMA_BEST_FIT (1)
1395 #endif
1396 
1397 #ifndef VMA_DEBUG_ALWAYS_OWN_MEMORY
1398 
1402  #define VMA_DEBUG_ALWAYS_OWN_MEMORY (0)
1403 #endif
1404 
1405 #ifndef VMA_DEBUG_ALIGNMENT
1406 
1410  #define VMA_DEBUG_ALIGNMENT (1)
1411 #endif
1412 
1413 #ifndef VMA_DEBUG_MARGIN
1414 
1418  #define VMA_DEBUG_MARGIN (0)
1419 #endif
1420 
1421 #ifndef VMA_DEBUG_GLOBAL_MUTEX
1422 
1426  #define VMA_DEBUG_GLOBAL_MUTEX (0)
1427 #endif
1428 
1429 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
1430 
1434  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
1435 #endif
1436 
1437 #ifndef VMA_SMALL_HEAP_MAX_SIZE
1438  #define VMA_SMALL_HEAP_MAX_SIZE (512 * 1024 * 1024)
1440 #endif
1441 
1442 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
1443  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256 * 1024 * 1024)
1445 #endif
1446 
1447 #ifndef VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE
1448  #define VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE (64 * 1024 * 1024)
1450 #endif
1451 
1452 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
1453 
1454 /*******************************************************************************
1455 END OF CONFIGURATION
1456 */
1457 
1458 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
1459  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
1460 
1461 // Returns number of bits set to 1 in (v).
1462 static inline uint32_t CountBitsSet(uint32_t v)
1463 {
1464  uint32_t c = v - ((v >> 1) & 0x55555555);
1465  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
1466  c = ((c >> 4) + c) & 0x0F0F0F0F;
1467  c = ((c >> 8) + c) & 0x00FF00FF;
1468  c = ((c >> 16) + c) & 0x0000FFFF;
1469  return c;
1470 }
1471 
1472 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
1473 // Use types like uint32_t, uint64_t as T.
1474 template <typename T>
1475 static inline T VmaAlignUp(T val, T align)
1476 {
1477  return (val + align - 1) / align * align;
1478 }
1479 
1480 // Division with mathematical rounding to nearest number.
1481 template <typename T>
1482 inline T VmaRoundDiv(T x, T y)
1483 {
1484  return (x + (y / (T)2)) / y;
1485 }
1486 
1487 #ifndef VMA_SORT
1488 
1489 template<typename Iterator, typename Compare>
1490 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
1491 {
1492  Iterator centerValue = end; --centerValue;
1493  Iterator insertIndex = beg;
1494  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
1495  {
1496  if(cmp(*memTypeIndex, *centerValue))
1497  {
1498  if(insertIndex != memTypeIndex)
1499  {
1500  VMA_SWAP(*memTypeIndex, *insertIndex);
1501  }
1502  ++insertIndex;
1503  }
1504  }
1505  if(insertIndex != centerValue)
1506  {
1507  VMA_SWAP(*insertIndex, *centerValue);
1508  }
1509  return insertIndex;
1510 }
1511 
1512 template<typename Iterator, typename Compare>
1513 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
1514 {
1515  if(beg < end)
1516  {
1517  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
1518  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
1519  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
1520  }
1521 }
1522 
1523 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
1524 
1525 #endif // #ifndef VMA_SORT
1526 
1527 /*
1528 Returns true if two memory blocks occupy overlapping pages.
1529 ResourceA must be in less memory offset than ResourceB.
1530 
1531 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
1532 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
1533 */
1534 static inline bool VmaBlocksOnSamePage(
1535  VkDeviceSize resourceAOffset,
1536  VkDeviceSize resourceASize,
1537  VkDeviceSize resourceBOffset,
1538  VkDeviceSize pageSize)
1539 {
1540  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
1541  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
1542  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
1543  VkDeviceSize resourceBStart = resourceBOffset;
1544  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
1545  return resourceAEndPage == resourceBStartPage;
1546 }
1547 
1548 enum VmaSuballocationType
1549 {
1550  VMA_SUBALLOCATION_TYPE_FREE = 0,
1551  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
1552  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
1553  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
1554  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
1555  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
1556  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
1557 };
1558 
1559 /*
1560 Returns true if given suballocation types could conflict and must respect
1561 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
1562 or linear image and another one is optimal image. If type is unknown, behave
1563 conservatively.
1564 */
1565 static inline bool VmaIsBufferImageGranularityConflict(
1566  VmaSuballocationType suballocType1,
1567  VmaSuballocationType suballocType2)
1568 {
1569  if(suballocType1 > suballocType2)
1570  {
1571  VMA_SWAP(suballocType1, suballocType2);
1572  }
1573 
1574  switch(suballocType1)
1575  {
1576  case VMA_SUBALLOCATION_TYPE_FREE:
1577  return false;
1578  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
1579  return true;
1580  case VMA_SUBALLOCATION_TYPE_BUFFER:
1581  return
1582  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
1583  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1584  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
1585  return
1586  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
1587  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
1588  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1589  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
1590  return
1591  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1592  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
1593  return false;
1594  default:
1595  VMA_ASSERT(0);
1596  return true;
1597  }
1598 }
1599 
1600 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
1601 struct VmaMutexLock
1602 {
1603 public:
1604  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
1605  m_pMutex(useMutex ? &mutex : VMA_NULL)
1606  {
1607  if(m_pMutex)
1608  {
1609  m_pMutex->Lock();
1610  }
1611  }
1612 
1613  ~VmaMutexLock()
1614  {
1615  if(m_pMutex)
1616  {
1617  m_pMutex->Unlock();
1618  }
1619  }
1620 
1621 private:
1622  VMA_MUTEX* m_pMutex;
1623 };
1624 
1625 #if VMA_DEBUG_GLOBAL_MUTEX
1626  static VMA_MUTEX gDebugGlobalMutex;
1627  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
1628 #else
1629  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
1630 #endif
1631 
1632 // Minimum size of a free suballocation to register it in the free suballocation collection.
1633 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
1634 
1635 /*
1636 Performs binary search and returns iterator to first element that is greater or
1637 equal to (key), according to comparison (cmp).
1638 
1639 Cmp should return true if first argument is less than second argument.
1640 
1641 Returned value is the found element, if present in the collection or place where
1642 new element with value (key) should be inserted.
1643 */
1644 template <typename IterT, typename KeyT, typename CmpT>
1645 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
1646 {
1647  size_t down = 0, up = (end - beg);
1648  while(down < up)
1649  {
1650  const size_t mid = (down + up) / 2;
1651  if(cmp(*(beg+mid), key))
1652  {
1653  down = mid + 1;
1654  }
1655  else
1656  {
1657  up = mid;
1658  }
1659  }
1660  return beg + down;
1661 }
1662 
1664 // Memory allocation
1665 
1666 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
1667 {
1668  if((pAllocationCallbacks != VMA_NULL) &&
1669  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
1670  {
1671  return (*pAllocationCallbacks->pfnAllocation)(
1672  pAllocationCallbacks->pUserData,
1673  size,
1674  alignment,
1675  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1676  }
1677  else
1678  {
1679  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
1680  }
1681 }
1682 
1683 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
1684 {
1685  if((pAllocationCallbacks != VMA_NULL) &&
1686  (pAllocationCallbacks->pfnFree != VMA_NULL))
1687  {
1688  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
1689  }
1690  else
1691  {
1692  VMA_SYSTEM_FREE(ptr);
1693  }
1694 }
1695 
1696 template<typename T>
1697 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
1698 {
1699  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
1700 }
1701 
1702 template<typename T>
1703 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
1704 {
1705  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
1706 }
1707 
1708 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
1709 
1710 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
1711 
1712 template<typename T>
1713 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
1714 {
1715  ptr->~T();
1716  VmaFree(pAllocationCallbacks, ptr);
1717 }
1718 
1719 template<typename T>
1720 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
1721 {
1722  if(ptr != VMA_NULL)
1723  {
1724  for(size_t i = count; i--; )
1725  {
1726  ptr[i].~T();
1727  }
1728  VmaFree(pAllocationCallbacks, ptr);
1729  }
1730 }
1731 
1732 // STL-compatible allocator.
1733 template<typename T>
1734 class VmaStlAllocator
1735 {
1736 public:
1737  const VkAllocationCallbacks* const m_pCallbacks;
1738  typedef T value_type;
1739 
1740  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
1741  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
1742 
1743  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
1744  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
1745 
1746  template<typename U>
1747  bool operator==(const VmaStlAllocator<U>& rhs) const
1748  {
1749  return m_pCallbacks == rhs.m_pCallbacks;
1750  }
1751  template<typename U>
1752  bool operator!=(const VmaStlAllocator<U>& rhs) const
1753  {
1754  return m_pCallbacks != rhs.m_pCallbacks;
1755  }
1756 
1757  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
1758 };
1759 
1760 #if VMA_USE_STL_VECTOR
1761 
1762 #define VmaVector std::vector
1763 
1764 template<typename T, typename allocatorT>
1765 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
1766 {
1767  vec.insert(vec.begin() + index, item);
1768 }
1769 
1770 template<typename T, typename allocatorT>
1771 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
1772 {
1773  vec.erase(vec.begin() + index);
1774 }
1775 
1776 #else // #if VMA_USE_STL_VECTOR
1777 
1778 /* Class with interface compatible with subset of std::vector.
1779 T must be POD because constructors and destructors are not called and memcpy is
1780 used for these objects. */
1781 template<typename T, typename AllocatorT>
1782 class VmaVector
1783 {
1784 public:
1785  typedef T value_type;
1786 
1787  VmaVector(const AllocatorT& allocator) :
1788  m_Allocator(allocator),
1789  m_pArray(VMA_NULL),
1790  m_Count(0),
1791  m_Capacity(0)
1792  {
1793  }
1794 
1795  VmaVector(size_t count, const AllocatorT& allocator) :
1796  m_Allocator(allocator),
1797  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
1798  m_Count(count),
1799  m_Capacity(count)
1800  {
1801  }
1802 
1803  VmaVector(const VmaVector<T, AllocatorT>& src) :
1804  m_Allocator(src.m_Allocator),
1805  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
1806  m_Count(src.m_Count),
1807  m_Capacity(src.m_Count)
1808  {
1809  if(m_Count != 0)
1810  {
1811  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
1812  }
1813  }
1814 
1815  ~VmaVector()
1816  {
1817  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1818  }
1819 
1820  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
1821  {
1822  if(&rhs != this)
1823  {
1824  resize(rhs.m_Count);
1825  if(m_Count != 0)
1826  {
1827  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
1828  }
1829  }
1830  return *this;
1831  }
1832 
1833  bool empty() const { return m_Count == 0; }
1834  size_t size() const { return m_Count; }
1835  T* data() { return m_pArray; }
1836  const T* data() const { return m_pArray; }
1837 
1838  T& operator[](size_t index)
1839  {
1840  VMA_HEAVY_ASSERT(index < m_Count);
1841  return m_pArray[index];
1842  }
1843  const T& operator[](size_t index) const
1844  {
1845  VMA_HEAVY_ASSERT(index < m_Count);
1846  return m_pArray[index];
1847  }
1848 
1849  T& front()
1850  {
1851  VMA_HEAVY_ASSERT(m_Count > 0);
1852  return m_pArray[0];
1853  }
1854  const T& front() const
1855  {
1856  VMA_HEAVY_ASSERT(m_Count > 0);
1857  return m_pArray[0];
1858  }
1859  T& back()
1860  {
1861  VMA_HEAVY_ASSERT(m_Count > 0);
1862  return m_pArray[m_Count - 1];
1863  }
1864  const T& back() const
1865  {
1866  VMA_HEAVY_ASSERT(m_Count > 0);
1867  return m_pArray[m_Count - 1];
1868  }
1869 
1870  void reserve(size_t newCapacity, bool freeMemory = false)
1871  {
1872  newCapacity = VMA_MAX(newCapacity, m_Count);
1873 
1874  if((newCapacity < m_Capacity) && !freeMemory)
1875  {
1876  newCapacity = m_Capacity;
1877  }
1878 
1879  if(newCapacity != m_Capacity)
1880  {
1881  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
1882  if(m_Count != 0)
1883  {
1884  memcpy(newArray, m_pArray, m_Count * sizeof(T));
1885  }
1886  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1887  m_Capacity = newCapacity;
1888  m_pArray = newArray;
1889  }
1890  }
1891 
1892  void resize(size_t newCount, bool freeMemory = false)
1893  {
1894  size_t newCapacity = m_Capacity;
1895  if(newCount > m_Capacity)
1896  {
1897  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
1898  }
1899  else if(freeMemory)
1900  {
1901  newCapacity = newCount;
1902  }
1903 
1904  if(newCapacity != m_Capacity)
1905  {
1906  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
1907  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
1908  if(elementsToCopy != 0)
1909  {
1910  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
1911  }
1912  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1913  m_Capacity = newCapacity;
1914  m_pArray = newArray;
1915  }
1916 
1917  m_Count = newCount;
1918  }
1919 
1920  void clear(bool freeMemory = false)
1921  {
1922  resize(0, freeMemory);
1923  }
1924 
1925  void insert(size_t index, const T& src)
1926  {
1927  VMA_HEAVY_ASSERT(index <= m_Count);
1928  const size_t oldCount = size();
1929  resize(oldCount + 1);
1930  if(index < oldCount)
1931  {
1932  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
1933  }
1934  m_pArray[index] = src;
1935  }
1936 
1937  void remove(size_t index)
1938  {
1939  VMA_HEAVY_ASSERT(index < m_Count);
1940  const size_t oldCount = size();
1941  if(index < oldCount - 1)
1942  {
1943  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
1944  }
1945  resize(oldCount - 1);
1946  }
1947 
1948  void push_back(const T& src)
1949  {
1950  const size_t newIndex = size();
1951  resize(newIndex + 1);
1952  m_pArray[newIndex] = src;
1953  }
1954 
1955  void pop_back()
1956  {
1957  VMA_HEAVY_ASSERT(m_Count > 0);
1958  resize(size() - 1);
1959  }
1960 
1961  void push_front(const T& src)
1962  {
1963  insert(0, src);
1964  }
1965 
1966  void pop_front()
1967  {
1968  VMA_HEAVY_ASSERT(m_Count > 0);
1969  remove(0);
1970  }
1971 
1972  typedef T* iterator;
1973 
1974  iterator begin() { return m_pArray; }
1975  iterator end() { return m_pArray + m_Count; }
1976 
1977 private:
1978  AllocatorT m_Allocator;
1979  T* m_pArray;
1980  size_t m_Count;
1981  size_t m_Capacity;
1982 };
1983 
1984 template<typename T, typename allocatorT>
1985 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
1986 {
1987  vec.insert(index, item);
1988 }
1989 
1990 template<typename T, typename allocatorT>
1991 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
1992 {
1993  vec.remove(index);
1994 }
1995 
1996 #endif // #if VMA_USE_STL_VECTOR
1997 
1998 template<typename CmpLess, typename VectorT>
1999 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
2000 {
2001  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2002  vector.data(),
2003  vector.data() + vector.size(),
2004  value,
2005  CmpLess()) - vector.data();
2006  VmaVectorInsert(vector, indexToInsert, value);
2007  return indexToInsert;
2008 }
2009 
2010 template<typename CmpLess, typename VectorT>
2011 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
2012 {
2013  CmpLess comparator;
2014  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2015  vector.data(),
2016  vector.data() + vector.size(),
2017  value,
2018  comparator);
2019  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
2020  {
2021  size_t indexToRemove = it - vector.begin();
2022  VmaVectorRemove(vector, indexToRemove);
2023  return true;
2024  }
2025  return false;
2026 }
2027 
2028 template<typename CmpLess, typename VectorT>
2029 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
2030 {
2031  CmpLess comparator;
2032  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2033  vector.data(),
2034  vector.data() + vector.size(),
2035  value,
2036  comparator);
2037  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
2038  {
2039  return it - vector.begin();
2040  }
2041  else
2042  {
2043  return vector.size();
2044  }
2045 }
2046 
2048 // class VmaPoolAllocator
2049 
2050 /*
2051 Allocator for objects of type T using a list of arrays (pools) to speed up
2052 allocation. Number of elements that can be allocated is not bounded because
2053 allocator can create multiple blocks.
2054 */
2055 template<typename T>
2056 class VmaPoolAllocator
2057 {
2058 public:
2059  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
2060  ~VmaPoolAllocator();
2061  void Clear();
2062  T* Alloc();
2063  void Free(T* ptr);
2064 
2065 private:
2066  union Item
2067  {
2068  uint32_t NextFreeIndex;
2069  T Value;
2070  };
2071 
2072  struct ItemBlock
2073  {
2074  Item* pItems;
2075  uint32_t FirstFreeIndex;
2076  };
2077 
2078  const VkAllocationCallbacks* m_pAllocationCallbacks;
2079  size_t m_ItemsPerBlock;
2080  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
2081 
2082  ItemBlock& CreateNewBlock();
2083 };
2084 
2085 template<typename T>
2086 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
2087  m_pAllocationCallbacks(pAllocationCallbacks),
2088  m_ItemsPerBlock(itemsPerBlock),
2089  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
2090 {
2091  VMA_ASSERT(itemsPerBlock > 0);
2092 }
2093 
2094 template<typename T>
2095 VmaPoolAllocator<T>::~VmaPoolAllocator()
2096 {
2097  Clear();
2098 }
2099 
2100 template<typename T>
2101 void VmaPoolAllocator<T>::Clear()
2102 {
2103  for(size_t i = m_ItemBlocks.size(); i--; )
2104  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
2105  m_ItemBlocks.clear();
2106 }
2107 
2108 template<typename T>
2109 T* VmaPoolAllocator<T>::Alloc()
2110 {
2111  for(size_t i = m_ItemBlocks.size(); i--; )
2112  {
2113  ItemBlock& block = m_ItemBlocks[i];
2114  // This block has some free items: Use first one.
2115  if(block.FirstFreeIndex != UINT32_MAX)
2116  {
2117  Item* const pItem = &block.pItems[block.FirstFreeIndex];
2118  block.FirstFreeIndex = pItem->NextFreeIndex;
2119  return &pItem->Value;
2120  }
2121  }
2122 
2123  // No block has free item: Create new one and use it.
2124  ItemBlock& newBlock = CreateNewBlock();
2125  Item* const pItem = &newBlock.pItems[0];
2126  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
2127  return &pItem->Value;
2128 }
2129 
2130 template<typename T>
2131 void VmaPoolAllocator<T>::Free(T* ptr)
2132 {
2133  // Search all memory blocks to find ptr.
2134  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
2135  {
2136  ItemBlock& block = m_ItemBlocks[i];
2137 
2138  // Casting to union.
2139  Item* pItemPtr;
2140  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
2141 
2142  // Check if pItemPtr is in address range of this block.
2143  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
2144  {
2145  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
2146  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
2147  block.FirstFreeIndex = index;
2148  return;
2149  }
2150  }
2151  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
2152 }
2153 
2154 template<typename T>
2155 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
2156 {
2157  ItemBlock newBlock = {
2158  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
2159 
2160  m_ItemBlocks.push_back(newBlock);
2161 
2162  // Setup singly-linked list of all free items in this block.
2163  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
2164  newBlock.pItems[i].NextFreeIndex = i + 1;
2165  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
2166  return m_ItemBlocks.back();
2167 }
2168 
2170 // class VmaRawList, VmaList
2171 
2172 #if VMA_USE_STL_LIST
2173 
2174 #define VmaList std::list
2175 
2176 #else // #if VMA_USE_STL_LIST
2177 
2178 template<typename T>
2179 struct VmaListItem
2180 {
2181  VmaListItem* pPrev;
2182  VmaListItem* pNext;
2183  T Value;
2184 };
2185 
2186 // Doubly linked list.
2187 template<typename T>
2188 class VmaRawList
2189 {
2190 public:
2191  typedef VmaListItem<T> ItemType;
2192 
2193  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
2194  ~VmaRawList();
2195  void Clear();
2196 
2197  size_t GetCount() const { return m_Count; }
2198  bool IsEmpty() const { return m_Count == 0; }
2199 
2200  ItemType* Front() { return m_pFront; }
2201  const ItemType* Front() const { return m_pFront; }
2202  ItemType* Back() { return m_pBack; }
2203  const ItemType* Back() const { return m_pBack; }
2204 
2205  ItemType* PushBack();
2206  ItemType* PushFront();
2207  ItemType* PushBack(const T& value);
2208  ItemType* PushFront(const T& value);
2209  void PopBack();
2210  void PopFront();
2211 
2212  // Item can be null - it means PushBack.
2213  ItemType* InsertBefore(ItemType* pItem);
2214  // Item can be null - it means PushFront.
2215  ItemType* InsertAfter(ItemType* pItem);
2216 
2217  ItemType* InsertBefore(ItemType* pItem, const T& value);
2218  ItemType* InsertAfter(ItemType* pItem, const T& value);
2219 
2220  void Remove(ItemType* pItem);
2221 
2222 private:
2223  const VkAllocationCallbacks* const m_pAllocationCallbacks;
2224  VmaPoolAllocator<ItemType> m_ItemAllocator;
2225  ItemType* m_pFront;
2226  ItemType* m_pBack;
2227  size_t m_Count;
2228 
2229  // Declared not defined, to block copy constructor and assignment operator.
2230  VmaRawList(const VmaRawList<T>& src);
2231  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
2232 };
2233 
2234 template<typename T>
2235 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
2236  m_pAllocationCallbacks(pAllocationCallbacks),
2237  m_ItemAllocator(pAllocationCallbacks, 128),
2238  m_pFront(VMA_NULL),
2239  m_pBack(VMA_NULL),
2240  m_Count(0)
2241 {
2242 }
2243 
2244 template<typename T>
2245 VmaRawList<T>::~VmaRawList()
2246 {
2247  // Intentionally not calling Clear, because that would be unnecessary
2248  // computations to return all items to m_ItemAllocator as free.
2249 }
2250 
2251 template<typename T>
2252 void VmaRawList<T>::Clear()
2253 {
2254  if(IsEmpty() == false)
2255  {
2256  ItemType* pItem = m_pBack;
2257  while(pItem != VMA_NULL)
2258  {
2259  ItemType* const pPrevItem = pItem->pPrev;
2260  m_ItemAllocator.Free(pItem);
2261  pItem = pPrevItem;
2262  }
2263  m_pFront = VMA_NULL;
2264  m_pBack = VMA_NULL;
2265  m_Count = 0;
2266  }
2267 }
2268 
2269 template<typename T>
2270 VmaListItem<T>* VmaRawList<T>::PushBack()
2271 {
2272  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2273  pNewItem->pNext = VMA_NULL;
2274  if(IsEmpty())
2275  {
2276  pNewItem->pPrev = VMA_NULL;
2277  m_pFront = pNewItem;
2278  m_pBack = pNewItem;
2279  m_Count = 1;
2280  }
2281  else
2282  {
2283  pNewItem->pPrev = m_pBack;
2284  m_pBack->pNext = pNewItem;
2285  m_pBack = pNewItem;
2286  ++m_Count;
2287  }
2288  return pNewItem;
2289 }
2290 
2291 template<typename T>
2292 VmaListItem<T>* VmaRawList<T>::PushFront()
2293 {
2294  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2295  pNewItem->pPrev = VMA_NULL;
2296  if(IsEmpty())
2297  {
2298  pNewItem->pNext = VMA_NULL;
2299  m_pFront = pNewItem;
2300  m_pBack = pNewItem;
2301  m_Count = 1;
2302  }
2303  else
2304  {
2305  pNewItem->pNext = m_pFront;
2306  m_pFront->pPrev = pNewItem;
2307  m_pFront = pNewItem;
2308  ++m_Count;
2309  }
2310  return pNewItem;
2311 }
2312 
2313 template<typename T>
2314 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
2315 {
2316  ItemType* const pNewItem = PushBack();
2317  pNewItem->Value = value;
2318  return pNewItem;
2319 }
2320 
2321 template<typename T>
2322 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
2323 {
2324  ItemType* const pNewItem = PushFront();
2325  pNewItem->Value = value;
2326  return pNewItem;
2327 }
2328 
2329 template<typename T>
2330 void VmaRawList<T>::PopBack()
2331 {
2332  VMA_HEAVY_ASSERT(m_Count > 0);
2333  ItemType* const pBackItem = m_pBack;
2334  ItemType* const pPrevItem = pBackItem->pPrev;
2335  if(pPrevItem != VMA_NULL)
2336  {
2337  pPrevItem->pNext = VMA_NULL;
2338  }
2339  m_pBack = pPrevItem;
2340  m_ItemAllocator.Free(pBackItem);
2341  --m_Count;
2342 }
2343 
2344 template<typename T>
2345 void VmaRawList<T>::PopFront()
2346 {
2347  VMA_HEAVY_ASSERT(m_Count > 0);
2348  ItemType* const pFrontItem = m_pFront;
2349  ItemType* const pNextItem = pFrontItem->pNext;
2350  if(pNextItem != VMA_NULL)
2351  {
2352  pNextItem->pPrev = VMA_NULL;
2353  }
2354  m_pFront = pNextItem;
2355  m_ItemAllocator.Free(pFrontItem);
2356  --m_Count;
2357 }
2358 
2359 template<typename T>
2360 void VmaRawList<T>::Remove(ItemType* pItem)
2361 {
2362  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
2363  VMA_HEAVY_ASSERT(m_Count > 0);
2364 
2365  if(pItem->pPrev != VMA_NULL)
2366  {
2367  pItem->pPrev->pNext = pItem->pNext;
2368  }
2369  else
2370  {
2371  VMA_HEAVY_ASSERT(m_pFront == pItem);
2372  m_pFront = pItem->pNext;
2373  }
2374 
2375  if(pItem->pNext != VMA_NULL)
2376  {
2377  pItem->pNext->pPrev = pItem->pPrev;
2378  }
2379  else
2380  {
2381  VMA_HEAVY_ASSERT(m_pBack == pItem);
2382  m_pBack = pItem->pPrev;
2383  }
2384 
2385  m_ItemAllocator.Free(pItem);
2386  --m_Count;
2387 }
2388 
2389 template<typename T>
2390 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
2391 {
2392  if(pItem != VMA_NULL)
2393  {
2394  ItemType* const prevItem = pItem->pPrev;
2395  ItemType* const newItem = m_ItemAllocator.Alloc();
2396  newItem->pPrev = prevItem;
2397  newItem->pNext = pItem;
2398  pItem->pPrev = newItem;
2399  if(prevItem != VMA_NULL)
2400  {
2401  prevItem->pNext = newItem;
2402  }
2403  else
2404  {
2405  VMA_HEAVY_ASSERT(m_pFront == pItem);
2406  m_pFront = newItem;
2407  }
2408  ++m_Count;
2409  return newItem;
2410  }
2411  else
2412  return PushBack();
2413 }
2414 
2415 template<typename T>
2416 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
2417 {
2418  if(pItem != VMA_NULL)
2419  {
2420  ItemType* const nextItem = pItem->pNext;
2421  ItemType* const newItem = m_ItemAllocator.Alloc();
2422  newItem->pNext = nextItem;
2423  newItem->pPrev = pItem;
2424  pItem->pNext = newItem;
2425  if(nextItem != VMA_NULL)
2426  {
2427  nextItem->pPrev = newItem;
2428  }
2429  else
2430  {
2431  VMA_HEAVY_ASSERT(m_pBack == pItem);
2432  m_pBack = newItem;
2433  }
2434  ++m_Count;
2435  return newItem;
2436  }
2437  else
2438  return PushFront();
2439 }
2440 
2441 template<typename T>
2442 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
2443 {
2444  ItemType* const newItem = InsertBefore(pItem);
2445  newItem->Value = value;
2446  return newItem;
2447 }
2448 
2449 template<typename T>
2450 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
2451 {
2452  ItemType* const newItem = InsertAfter(pItem);
2453  newItem->Value = value;
2454  return newItem;
2455 }
2456 
2457 template<typename T, typename AllocatorT>
2458 class VmaList
2459 {
2460 public:
2461  class iterator
2462  {
2463  public:
2464  iterator() :
2465  m_pList(VMA_NULL),
2466  m_pItem(VMA_NULL)
2467  {
2468  }
2469 
2470  T& operator*() const
2471  {
2472  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2473  return m_pItem->Value;
2474  }
2475  T* operator->() const
2476  {
2477  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2478  return &m_pItem->Value;
2479  }
2480 
2481  iterator& operator++()
2482  {
2483  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2484  m_pItem = m_pItem->pNext;
2485  return *this;
2486  }
2487  iterator& operator--()
2488  {
2489  if(m_pItem != VMA_NULL)
2490  {
2491  m_pItem = m_pItem->pPrev;
2492  }
2493  else
2494  {
2495  VMA_HEAVY_ASSERT(!m_pList.IsEmpty());
2496  m_pItem = m_pList->Back();
2497  }
2498  return *this;
2499  }
2500 
2501  iterator operator++(int)
2502  {
2503  iterator result = *this;
2504  ++*this;
2505  return result;
2506  }
2507  iterator operator--(int)
2508  {
2509  iterator result = *this;
2510  --*this;
2511  return result;
2512  }
2513 
2514  bool operator==(const iterator& rhs) const
2515  {
2516  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2517  return m_pItem == rhs.m_pItem;
2518  }
2519  bool operator!=(const iterator& rhs) const
2520  {
2521  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2522  return m_pItem != rhs.m_pItem;
2523  }
2524 
2525  private:
2526  VmaRawList<T>* m_pList;
2527  VmaListItem<T>* m_pItem;
2528 
2529  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
2530  m_pList(pList),
2531  m_pItem(pItem)
2532  {
2533  }
2534 
2535  friend class VmaList<T, AllocatorT>;
2536  };
2537 
2538  class const_iterator
2539  {
2540  public:
2541  const_iterator() :
2542  m_pList(VMA_NULL),
2543  m_pItem(VMA_NULL)
2544  {
2545  }
2546 
2547  const_iterator(const iterator& src) :
2548  m_pList(src.m_pList),
2549  m_pItem(src.m_pItem)
2550  {
2551  }
2552 
2553  const T& operator*() const
2554  {
2555  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2556  return m_pItem->Value;
2557  }
2558  const T* operator->() const
2559  {
2560  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2561  return &m_pItem->Value;
2562  }
2563 
2564  const_iterator& operator++()
2565  {
2566  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2567  m_pItem = m_pItem->pNext;
2568  return *this;
2569  }
2570  const_iterator& operator--()
2571  {
2572  if(m_pItem != VMA_NULL)
2573  {
2574  m_pItem = m_pItem->pPrev;
2575  }
2576  else
2577  {
2578  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
2579  m_pItem = m_pList->Back();
2580  }
2581  return *this;
2582  }
2583 
2584  const_iterator operator++(int)
2585  {
2586  const_iterator result = *this;
2587  ++*this;
2588  return result;
2589  }
2590  const_iterator operator--(int)
2591  {
2592  const_iterator result = *this;
2593  --*this;
2594  return result;
2595  }
2596 
2597  bool operator==(const const_iterator& rhs) const
2598  {
2599  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2600  return m_pItem == rhs.m_pItem;
2601  }
2602  bool operator!=(const const_iterator& rhs) const
2603  {
2604  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2605  return m_pItem != rhs.m_pItem;
2606  }
2607 
2608  private:
2609  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
2610  m_pList(pList),
2611  m_pItem(pItem)
2612  {
2613  }
2614 
2615  const VmaRawList<T>* m_pList;
2616  const VmaListItem<T>* m_pItem;
2617 
2618  friend class VmaList<T, AllocatorT>;
2619  };
2620 
2621  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
2622 
2623  bool empty() const { return m_RawList.IsEmpty(); }
2624  size_t size() const { return m_RawList.GetCount(); }
2625 
2626  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
2627  iterator end() { return iterator(&m_RawList, VMA_NULL); }
2628 
2629  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
2630  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
2631 
2632  void clear() { m_RawList.Clear(); }
2633  void push_back(const T& value) { m_RawList.PushBack(value); }
2634  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
2635  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
2636 
2637 private:
2638  VmaRawList<T> m_RawList;
2639 };
2640 
2641 #endif // #if VMA_USE_STL_LIST
2642 
2644 // class VmaMap
2645 
2646 // Unused in this version.
2647 #if 0
2648 
2649 #if VMA_USE_STL_UNORDERED_MAP
2650 
2651 #define VmaPair std::pair
2652 
2653 #define VMA_MAP_TYPE(KeyT, ValueT) \
2654  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
2655 
2656 #else // #if VMA_USE_STL_UNORDERED_MAP
2657 
2658 template<typename T1, typename T2>
2659 struct VmaPair
2660 {
2661  T1 first;
2662  T2 second;
2663 
2664  VmaPair() : first(), second() { }
2665  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
2666 };
2667 
2668 /* Class compatible with subset of interface of std::unordered_map.
2669 KeyT, ValueT must be POD because they will be stored in VmaVector.
2670 */
2671 template<typename KeyT, typename ValueT>
2672 class VmaMap
2673 {
2674 public:
2675  typedef VmaPair<KeyT, ValueT> PairType;
2676  typedef PairType* iterator;
2677 
2678  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
2679 
2680  iterator begin() { return m_Vector.begin(); }
2681  iterator end() { return m_Vector.end(); }
2682 
2683  void insert(const PairType& pair);
2684  iterator find(const KeyT& key);
2685  void erase(iterator it);
2686 
2687 private:
2688  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
2689 };
2690 
2691 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
2692 
2693 template<typename FirstT, typename SecondT>
2694 struct VmaPairFirstLess
2695 {
2696  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
2697  {
2698  return lhs.first < rhs.first;
2699  }
2700  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
2701  {
2702  return lhs.first < rhsFirst;
2703  }
2704 };
2705 
2706 template<typename KeyT, typename ValueT>
2707 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
2708 {
2709  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2710  m_Vector.data(),
2711  m_Vector.data() + m_Vector.size(),
2712  pair,
2713  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
2714  VmaVectorInsert(m_Vector, indexToInsert, pair);
2715 }
2716 
2717 template<typename KeyT, typename ValueT>
2718 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
2719 {
2720  PairType* it = VmaBinaryFindFirstNotLess(
2721  m_Vector.data(),
2722  m_Vector.data() + m_Vector.size(),
2723  key,
2724  VmaPairFirstLess<KeyT, ValueT>());
2725  if((it != m_Vector.end()) && (it->first == key))
2726  {
2727  return it;
2728  }
2729  else
2730  {
2731  return m_Vector.end();
2732  }
2733 }
2734 
2735 template<typename KeyT, typename ValueT>
2736 void VmaMap<KeyT, ValueT>::erase(iterator it)
2737 {
2738  VmaVectorRemove(m_Vector, it - m_Vector.begin());
2739 }
2740 
2741 #endif // #if VMA_USE_STL_UNORDERED_MAP
2742 
2743 #endif // #if 0
2744 
2746 
2747 class VmaDeviceMemoryBlock;
2748 
2749 enum VMA_BLOCK_VECTOR_TYPE
2750 {
2751  VMA_BLOCK_VECTOR_TYPE_UNMAPPED,
2752  VMA_BLOCK_VECTOR_TYPE_MAPPED,
2753  VMA_BLOCK_VECTOR_TYPE_COUNT
2754 };
2755 
2756 static VMA_BLOCK_VECTOR_TYPE VmaAllocationCreateFlagsToBlockVectorType(VmaAllocationCreateFlags flags)
2757 {
2758  return (flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0 ?
2759  VMA_BLOCK_VECTOR_TYPE_MAPPED :
2760  VMA_BLOCK_VECTOR_TYPE_UNMAPPED;
2761 }
2762 
2763 struct VmaAllocation_T
2764 {
2765 public:
2766  enum ALLOCATION_TYPE
2767  {
2768  ALLOCATION_TYPE_NONE,
2769  ALLOCATION_TYPE_BLOCK,
2770  ALLOCATION_TYPE_OWN,
2771  };
2772 
2773  VmaAllocation_T(uint32_t currentFrameIndex) :
2774  m_Alignment(1),
2775  m_Size(0),
2776  m_pUserData(VMA_NULL),
2777  m_Type(ALLOCATION_TYPE_NONE),
2778  m_SuballocationType(VMA_SUBALLOCATION_TYPE_UNKNOWN),
2779  m_LastUseFrameIndex(currentFrameIndex)
2780  {
2781  }
2782 
2783  void InitBlockAllocation(
2784  VmaPool hPool,
2785  VmaDeviceMemoryBlock* block,
2786  VkDeviceSize offset,
2787  VkDeviceSize alignment,
2788  VkDeviceSize size,
2789  VmaSuballocationType suballocationType,
2790  void* pUserData,
2791  bool canBecomeLost)
2792  {
2793  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
2794  VMA_ASSERT(block != VMA_NULL);
2795  m_Type = ALLOCATION_TYPE_BLOCK;
2796  m_Alignment = alignment;
2797  m_Size = size;
2798  m_pUserData = pUserData;
2799  m_SuballocationType = suballocationType;
2800  m_BlockAllocation.m_hPool = hPool;
2801  m_BlockAllocation.m_Block = block;
2802  m_BlockAllocation.m_Offset = offset;
2803  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
2804  }
2805 
2806  void InitLost()
2807  {
2808  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
2809  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
2810  m_Type = ALLOCATION_TYPE_BLOCK;
2811  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
2812  m_BlockAllocation.m_Block = VMA_NULL;
2813  m_BlockAllocation.m_Offset = 0;
2814  m_BlockAllocation.m_CanBecomeLost = true;
2815  }
2816 
2817  void ChangeBlockAllocation(
2818  VmaDeviceMemoryBlock* block,
2819  VkDeviceSize offset)
2820  {
2821  VMA_ASSERT(block != VMA_NULL);
2822  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
2823  m_BlockAllocation.m_Block = block;
2824  m_BlockAllocation.m_Offset = offset;
2825  }
2826 
2827  void InitOwnAllocation(
2828  uint32_t memoryTypeIndex,
2829  VkDeviceMemory hMemory,
2830  VmaSuballocationType suballocationType,
2831  bool persistentMap,
2832  void* pMappedData,
2833  VkDeviceSize size,
2834  void* pUserData)
2835  {
2836  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
2837  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
2838  m_Type = ALLOCATION_TYPE_OWN;
2839  m_Alignment = 0;
2840  m_Size = size;
2841  m_pUserData = pUserData;
2842  m_SuballocationType = suballocationType;
2843  m_OwnAllocation.m_MemoryTypeIndex = memoryTypeIndex;
2844  m_OwnAllocation.m_hMemory = hMemory;
2845  m_OwnAllocation.m_PersistentMap = persistentMap;
2846  m_OwnAllocation.m_pMappedData = pMappedData;
2847  }
2848 
2849  ALLOCATION_TYPE GetType() const { return m_Type; }
2850  VkDeviceSize GetAlignment() const { return m_Alignment; }
2851  VkDeviceSize GetSize() const { return m_Size; }
2852  void* GetUserData() const { return m_pUserData; }
2853  void SetUserData(void* pUserData) { m_pUserData = pUserData; }
2854  VmaSuballocationType GetSuballocationType() const { return m_SuballocationType; }
2855 
2856  VmaDeviceMemoryBlock* GetBlock() const
2857  {
2858  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
2859  return m_BlockAllocation.m_Block;
2860  }
2861  VkDeviceSize GetOffset() const;
2862  VkDeviceMemory GetMemory() const;
2863  uint32_t GetMemoryTypeIndex() const;
2864  VMA_BLOCK_VECTOR_TYPE GetBlockVectorType() const;
2865  void* GetMappedData() const;
2866  bool CanBecomeLost() const;
2867  VmaPool GetPool() const;
2868 
2869  VkResult OwnAllocMapPersistentlyMappedMemory(VkDevice hDevice)
2870  {
2871  VMA_ASSERT(m_Type == ALLOCATION_TYPE_OWN);
2872  if(m_OwnAllocation.m_PersistentMap)
2873  {
2874  return vkMapMemory(hDevice, m_OwnAllocation.m_hMemory, 0, VK_WHOLE_SIZE, 0, &m_OwnAllocation.m_pMappedData);
2875  }
2876  return VK_SUCCESS;
2877  }
2878  void OwnAllocUnmapPersistentlyMappedMemory(VkDevice hDevice)
2879  {
2880  VMA_ASSERT(m_Type == ALLOCATION_TYPE_OWN);
2881  if(m_OwnAllocation.m_pMappedData)
2882  {
2883  VMA_ASSERT(m_OwnAllocation.m_PersistentMap);
2884  vkUnmapMemory(hDevice, m_OwnAllocation.m_hMemory);
2885  m_OwnAllocation.m_pMappedData = VMA_NULL;
2886  }
2887  }
2888 
2889  uint32_t GetLastUseFrameIndex() const
2890  {
2891  return m_LastUseFrameIndex.load();
2892  }
2893  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
2894  {
2895  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
2896  }
2897  /*
2898  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
2899  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
2900  - Else, returns false.
2901 
2902  If hAllocation is already lost, assert - you should not call it then.
2903  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
2904  */
2905  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
2906 
2907  void OwnAllocCalcStatsInfo(VmaStatInfo& outInfo)
2908  {
2909  VMA_ASSERT(m_Type == ALLOCATION_TYPE_OWN);
2910  outInfo.BlockCount = 1;
2911  outInfo.AllocationCount = 1;
2912  outInfo.UnusedRangeCount = 0;
2913  outInfo.UsedBytes = m_Size;
2914  outInfo.UnusedBytes = 0;
2915  outInfo.AllocationSizeMin = outInfo.AllocationSizeMax = m_Size;
2916  outInfo.UnusedRangeSizeMin = UINT64_MAX;
2917  outInfo.UnusedRangeSizeMax = 0;
2918  }
2919 
2920 private:
2921  VkDeviceSize m_Alignment;
2922  VkDeviceSize m_Size;
2923  void* m_pUserData;
2924  ALLOCATION_TYPE m_Type;
2925  VmaSuballocationType m_SuballocationType;
2926  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
2927 
2928  // Allocation out of VmaDeviceMemoryBlock.
2929  struct BlockAllocation
2930  {
2931  VmaPool m_hPool; // Null if belongs to general memory.
2932  VmaDeviceMemoryBlock* m_Block;
2933  VkDeviceSize m_Offset;
2934  bool m_CanBecomeLost;
2935  };
2936 
2937  // Allocation for an object that has its own private VkDeviceMemory.
2938  struct OwnAllocation
2939  {
2940  uint32_t m_MemoryTypeIndex;
2941  VkDeviceMemory m_hMemory;
2942  bool m_PersistentMap;
2943  void* m_pMappedData;
2944  };
2945 
2946  union
2947  {
2948  // Allocation out of VmaDeviceMemoryBlock.
2949  BlockAllocation m_BlockAllocation;
2950  // Allocation for an object that has its own private VkDeviceMemory.
2951  OwnAllocation m_OwnAllocation;
2952  };
2953 };
2954 
2955 /*
2956 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
2957 allocated memory block or free.
2958 */
2959 struct VmaSuballocation
2960 {
2961  VkDeviceSize offset;
2962  VkDeviceSize size;
2963  VmaAllocation hAllocation;
2964  VmaSuballocationType type;
2965 };
2966 
2967 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
2968 
2969 // Cost of one additional allocation lost, as equivalent in bytes.
2970 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
2971 
2972 /*
2973 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
2974 
2975 If canMakeOtherLost was false:
2976 - item points to a FREE suballocation.
2977 - itemsToMakeLostCount is 0.
2978 
2979 If canMakeOtherLost was true:
2980 - item points to first of sequence of suballocations, which are either FREE,
2981  or point to VmaAllocations that can become lost.
2982 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
2983  the requested allocation to succeed.
2984 */
2985 struct VmaAllocationRequest
2986 {
2987  VkDeviceSize offset;
2988  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
2989  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
2990  VmaSuballocationList::iterator item;
2991  size_t itemsToMakeLostCount;
2992 
2993  VkDeviceSize CalcCost() const
2994  {
2995  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
2996  }
2997 };
2998 
2999 /*
3000 Represents a single block of device memory (VkDeviceMemory ) with all the
3001 data about its regions (aka suballocations, VmaAllocation), assigned and free.
3002 
3003 Thread-safety: This class must be externally synchronized.
3004 */
3005 class VmaDeviceMemoryBlock
3006 {
3007 public:
3008  uint32_t m_MemoryTypeIndex;
3009  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
3010  VkDeviceMemory m_hMemory;
3011  VkDeviceSize m_Size;
3012  bool m_PersistentMap;
3013  void* m_pMappedData;
3014  uint32_t m_FreeCount;
3015  VkDeviceSize m_SumFreeSize;
3016  VmaSuballocationList m_Suballocations;
3017  // Suballocations that are free and have size greater than certain threshold.
3018  // Sorted by size, ascending.
3019  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
3020 
3021  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
3022 
3023  ~VmaDeviceMemoryBlock()
3024  {
3025  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
3026  }
3027 
3028  // Always call after construction.
3029  void Init(
3030  uint32_t newMemoryTypeIndex,
3031  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
3032  VkDeviceMemory newMemory,
3033  VkDeviceSize newSize,
3034  bool persistentMap,
3035  void* pMappedData);
3036  // Always call before destruction.
3037  void Destroy(VmaAllocator allocator);
3038 
3039  // Validates all data structures inside this object. If not valid, returns false.
3040  bool Validate() const;
3041 
3042  // Tries to find a place for suballocation with given parameters inside this allocation.
3043  // If succeeded, fills pAllocationRequest and returns true.
3044  // If failed, returns false.
3045  bool CreateAllocationRequest(
3046  uint32_t currentFrameIndex,
3047  uint32_t frameInUseCount,
3048  VkDeviceSize bufferImageGranularity,
3049  VkDeviceSize allocSize,
3050  VkDeviceSize allocAlignment,
3051  VmaSuballocationType allocType,
3052  bool canMakeOtherLost,
3053  VmaAllocationRequest* pAllocationRequest);
3054 
3055  bool MakeRequestedAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount, VmaAllocationRequest* pAllocationRequest);
3056 
3057  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3058 
3059  // Returns true if this allocation is empty - contains only single free suballocation.
3060  bool IsEmpty() const;
3061 
3062  // Makes actual allocation based on request. Request must already be checked
3063  // and valid.
3064  void Alloc(
3065  const VmaAllocationRequest& request,
3066  VmaSuballocationType type,
3067  VkDeviceSize allocSize,
3068  VmaAllocation hAllocation);
3069 
3070  // Frees suballocation assigned to given memory region.
3071  void Free(const VmaAllocation allocation);
3072 
3073 #if VMA_STATS_STRING_ENABLED
3074  void PrintDetailedMap(class VmaJsonWriter& json) const;
3075 #endif
3076 
3077 private:
3078  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
3079  // If yes, fills pOffset and returns true. If no, returns false.
3080  bool CheckAllocation(
3081  uint32_t currentFrameIndex,
3082  uint32_t frameInUseCount,
3083  VkDeviceSize bufferImageGranularity,
3084  VkDeviceSize allocSize,
3085  VkDeviceSize allocAlignment,
3086  VmaSuballocationType allocType,
3087  VmaSuballocationList::const_iterator suballocItem,
3088  bool canMakeOtherLost,
3089  VkDeviceSize* pOffset,
3090  size_t* itemsToMakeLostCount,
3091  VkDeviceSize* pSumFreeSize,
3092  VkDeviceSize* pSumItemSize) const;
3093 
3094  // Given free suballocation, it merges it with following one, which must also be free.
3095  void MergeFreeWithNext(VmaSuballocationList::iterator item);
3096  // Releases given suballocation, making it free.
3097  // Merges it with adjacent free suballocations if applicable.
3098  // Returns iterator to new free suballocation at this place.
3099  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
3100  // Given free suballocation, it inserts it into sorted list of
3101  // m_FreeSuballocationsBySize if it's suitable.
3102  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
3103  // Given free suballocation, it removes it from sorted list of
3104  // m_FreeSuballocationsBySize if it's suitable.
3105  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
3106 
3107  bool ValidateFreeSuballocationList() const;
3108 };
3109 
3110 struct VmaPointerLess
3111 {
3112  bool operator()(const void* lhs, const void* rhs) const
3113  {
3114  return lhs < rhs;
3115  }
3116 };
3117 
3118 class VmaDefragmentator;
3119 
3120 /*
3121 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
3122 Vulkan memory type.
3123 
3124 Synchronized internally with a mutex.
3125 */
3126 struct VmaBlockVector
3127 {
3128  VmaBlockVector(
3129  VmaAllocator hAllocator,
3130  uint32_t memoryTypeIndex,
3131  VMA_BLOCK_VECTOR_TYPE blockVectorType,
3132  VkDeviceSize preferredBlockSize,
3133  size_t minBlockCount,
3134  size_t maxBlockCount,
3135  VkDeviceSize bufferImageGranularity,
3136  uint32_t frameInUseCount,
3137  bool isCustomPool);
3138  ~VmaBlockVector();
3139 
3140  VkResult CreateMinBlocks();
3141 
3142  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
3143  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
3144  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
3145  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
3146  VMA_BLOCK_VECTOR_TYPE GetBlockVectorType() const { return m_BlockVectorType; }
3147 
3148  void GetPoolStats(VmaPoolStats* pStats);
3149 
3150  bool IsEmpty() const { return m_Blocks.empty(); }
3151 
3152  VkResult Allocate(
3153  VmaPool hCurrentPool,
3154  uint32_t currentFrameIndex,
3155  const VkMemoryRequirements& vkMemReq,
3156  const VmaAllocationCreateInfo& createInfo,
3157  VmaSuballocationType suballocType,
3158  VmaAllocation* pAllocation);
3159 
3160  void Free(
3161  VmaAllocation hAllocation);
3162 
3163  // Adds statistics of this BlockVector to pStats.
3164  void AddStats(VmaStats* pStats);
3165 
3166 #if VMA_STATS_STRING_ENABLED
3167  void PrintDetailedMap(class VmaJsonWriter& json);
3168 #endif
3169 
3170  void UnmapPersistentlyMappedMemory();
3171  VkResult MapPersistentlyMappedMemory();
3172 
3173  void MakePoolAllocationsLost(
3174  uint32_t currentFrameIndex,
3175  size_t* pLostAllocationCount);
3176 
3177  VmaDefragmentator* EnsureDefragmentator(
3178  VkDevice hDevice,
3179  const VkAllocationCallbacks* pAllocationCallbacks,
3180  uint32_t currentFrameIndex);
3181 
3182  VkResult Defragment(
3183  VmaDefragmentationStats* pDefragmentationStats,
3184  VkDeviceSize& maxBytesToMove,
3185  uint32_t& maxAllocationsToMove);
3186 
3187  void DestroyDefragmentator();
3188 
3189 private:
3190  friend class VmaDefragmentator;
3191 
3192  const VmaAllocator m_hAllocator;
3193  const uint32_t m_MemoryTypeIndex;
3194  const VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
3195  const VkDeviceSize m_PreferredBlockSize;
3196  const size_t m_MinBlockCount;
3197  const size_t m_MaxBlockCount;
3198  const VkDeviceSize m_BufferImageGranularity;
3199  const uint32_t m_FrameInUseCount;
3200  const bool m_IsCustomPool;
3201  VMA_MUTEX m_Mutex;
3202  // Incrementally sorted by sumFreeSize, ascending.
3203  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
3204  /* There can be at most one allocation that is completely empty - a
3205  hysteresis to avoid pessimistic case of alternating creation and destruction
3206  of a VkDeviceMemory. */
3207  bool m_HasEmptyBlock;
3208  VmaDefragmentator* m_pDefragmentator;
3209 
3210  // Finds and removes given block from vector.
3211  void Remove(VmaDeviceMemoryBlock* pBlock);
3212 
3213  // Performs single step in sorting m_Blocks. They may not be fully sorted
3214  // after this call.
3215  void IncrementallySortBlocks();
3216 
3217  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
3218 };
3219 
3220 struct VmaPool_T
3221 {
3222 public:
3223  VmaBlockVector m_BlockVector;
3224 
3225  // Takes ownership.
3226  VmaPool_T(
3227  VmaAllocator hAllocator,
3228  const VmaPoolCreateInfo& createInfo);
3229  ~VmaPool_T();
3230 
3231  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
3232 
3233 #if VMA_STATS_STRING_ENABLED
3234  //void PrintDetailedMap(class VmaStringBuilder& sb);
3235 #endif
3236 };
3237 
3238 class VmaDefragmentator
3239 {
3240  const VkDevice m_hDevice;
3241  const VkAllocationCallbacks* const m_pAllocationCallbacks;
3242  VmaBlockVector* const m_pBlockVector;
3243  uint32_t m_CurrentFrameIndex;
3244  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
3245  VkDeviceSize m_BytesMoved;
3246  uint32_t m_AllocationsMoved;
3247 
3248  struct AllocationInfo
3249  {
3250  VmaAllocation m_hAllocation;
3251  VkBool32* m_pChanged;
3252 
3253  AllocationInfo() :
3254  m_hAllocation(VK_NULL_HANDLE),
3255  m_pChanged(VMA_NULL)
3256  {
3257  }
3258  };
3259 
3260  struct AllocationInfoSizeGreater
3261  {
3262  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
3263  {
3264  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
3265  }
3266  };
3267 
3268  // Used between AddAllocation and Defragment.
3269  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3270 
3271  struct BlockInfo
3272  {
3273  VmaDeviceMemoryBlock* m_pBlock;
3274  bool m_HasNonMovableAllocations;
3275  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3276 
3277  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
3278  m_pBlock(VMA_NULL),
3279  m_HasNonMovableAllocations(true),
3280  m_Allocations(pAllocationCallbacks),
3281  m_pMappedDataForDefragmentation(VMA_NULL)
3282  {
3283  }
3284 
3285  void CalcHasNonMovableAllocations()
3286  {
3287  const size_t blockAllocCount =
3288  m_pBlock->m_Suballocations.size() - m_pBlock->m_FreeCount;
3289  const size_t defragmentAllocCount = m_Allocations.size();
3290  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
3291  }
3292 
3293  void SortAllocationsBySizeDescecnding()
3294  {
3295  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
3296  }
3297 
3298  VkResult EnsureMapping(VkDevice hDevice, void** ppMappedData)
3299  {
3300  // It has already been mapped for defragmentation.
3301  if(m_pMappedDataForDefragmentation)
3302  {
3303  *ppMappedData = m_pMappedDataForDefragmentation;
3304  return VK_SUCCESS;
3305  }
3306 
3307  // It is persistently mapped.
3308  if(m_pBlock->m_PersistentMap)
3309  {
3310  VMA_ASSERT(m_pBlock->m_pMappedData != VMA_NULL);
3311  *ppMappedData = m_pBlock->m_pMappedData;
3312  return VK_SUCCESS;
3313  }
3314 
3315  // Map on first usage.
3316  VkResult res = vkMapMemory(hDevice, m_pBlock->m_hMemory, 0, VK_WHOLE_SIZE, 0, &m_pMappedDataForDefragmentation);
3317  *ppMappedData = m_pMappedDataForDefragmentation;
3318  return res;
3319  }
3320 
3321  void Unmap(VkDevice hDevice)
3322  {
3323  if(m_pMappedDataForDefragmentation != VMA_NULL)
3324  {
3325  vkUnmapMemory(hDevice, m_pBlock->m_hMemory);
3326  }
3327  }
3328 
3329  private:
3330  // Not null if mapped for defragmentation only, not persistently mapped.
3331  void* m_pMappedDataForDefragmentation;
3332  };
3333 
3334  struct BlockPointerLess
3335  {
3336  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
3337  {
3338  return pLhsBlockInfo->m_pBlock < pRhsBlock;
3339  }
3340  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3341  {
3342  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
3343  }
3344  };
3345 
3346  // 1. Blocks with some non-movable allocations go first.
3347  // 2. Blocks with smaller sumFreeSize go first.
3348  struct BlockInfoCompareMoveDestination
3349  {
3350  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3351  {
3352  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
3353  {
3354  return true;
3355  }
3356  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
3357  {
3358  return false;
3359  }
3360  if(pLhsBlockInfo->m_pBlock->m_SumFreeSize < pRhsBlockInfo->m_pBlock->m_SumFreeSize)
3361  {
3362  return true;
3363  }
3364  return false;
3365  }
3366  };
3367 
3368  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
3369  BlockInfoVector m_Blocks;
3370 
3371  VkResult DefragmentRound(
3372  VkDeviceSize maxBytesToMove,
3373  uint32_t maxAllocationsToMove);
3374 
3375  static bool MoveMakesSense(
3376  size_t dstBlockIndex, VkDeviceSize dstOffset,
3377  size_t srcBlockIndex, VkDeviceSize srcOffset);
3378 
3379 public:
3380  VmaDefragmentator(
3381  VkDevice hDevice,
3382  const VkAllocationCallbacks* pAllocationCallbacks,
3383  VmaBlockVector* pBlockVector,
3384  uint32_t currentFrameIndex);
3385 
3386  ~VmaDefragmentator();
3387 
3388  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
3389  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
3390 
3391  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
3392 
3393  VkResult Defragment(
3394  VkDeviceSize maxBytesToMove,
3395  uint32_t maxAllocationsToMove);
3396 };
3397 
3398 // Main allocator object.
3399 struct VmaAllocator_T
3400 {
3401  bool m_UseMutex;
3402  VkDevice m_hDevice;
3403  bool m_AllocationCallbacksSpecified;
3404  VkAllocationCallbacks m_AllocationCallbacks;
3405  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
3406  // Non-zero when we are inside UnmapPersistentlyMappedMemory...MapPersistentlyMappedMemory.
3407  // Counter to allow nested calls to these functions.
3408  uint32_t m_UnmapPersistentlyMappedMemoryCounter;
3409 
3410  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
3411  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
3412  VMA_MUTEX m_HeapSizeLimitMutex;
3413 
3414  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
3415  VkPhysicalDeviceMemoryProperties m_MemProps;
3416 
3417  // Default pools.
3418  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
3419 
3420  // Each vector is sorted by memory (handle value).
3421  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
3422  AllocationVectorType* m_pOwnAllocations[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
3423  VMA_MUTEX m_OwnAllocationsMutex[VK_MAX_MEMORY_TYPES];
3424 
3425  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
3426  ~VmaAllocator_T();
3427 
3428  const VkAllocationCallbacks* GetAllocationCallbacks() const
3429  {
3430  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
3431  }
3432 
3433  VkDeviceSize GetBufferImageGranularity() const
3434  {
3435  return VMA_MAX(
3436  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
3437  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
3438  }
3439 
3440  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
3441  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
3442 
3443  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
3444  {
3445  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
3446  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
3447  }
3448 
3449  // Main allocation function.
3450  VkResult AllocateMemory(
3451  const VkMemoryRequirements& vkMemReq,
3452  const VmaAllocationCreateInfo& createInfo,
3453  VmaSuballocationType suballocType,
3454  VmaAllocation* pAllocation);
3455 
3456  // Main deallocation function.
3457  void FreeMemory(const VmaAllocation allocation);
3458 
3459  void CalculateStats(VmaStats* pStats);
3460 
3461 #if VMA_STATS_STRING_ENABLED
3462  void PrintDetailedMap(class VmaJsonWriter& json);
3463 #endif
3464 
3465  void UnmapPersistentlyMappedMemory();
3466  VkResult MapPersistentlyMappedMemory();
3467 
3468  VkResult Defragment(
3469  VmaAllocation* pAllocations,
3470  size_t allocationCount,
3471  VkBool32* pAllocationsChanged,
3472  const VmaDefragmentationInfo* pDefragmentationInfo,
3473  VmaDefragmentationStats* pDefragmentationStats);
3474 
3475  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
3476 
3477  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
3478  void DestroyPool(VmaPool pool);
3479  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
3480 
3481  void SetCurrentFrameIndex(uint32_t frameIndex);
3482 
3483  void MakePoolAllocationsLost(
3484  VmaPool hPool,
3485  size_t* pLostAllocationCount);
3486 
3487  void CreateLostAllocation(VmaAllocation* pAllocation);
3488 
3489  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
3490  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
3491 
3492 private:
3493  VkDeviceSize m_PreferredLargeHeapBlockSize;
3494  VkDeviceSize m_PreferredSmallHeapBlockSize;
3495 
3496  VkPhysicalDevice m_PhysicalDevice;
3497  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
3498 
3499  VMA_MUTEX m_PoolsMutex;
3500  // Protected by m_PoolsMutex. Sorted by pointer value.
3501  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
3502 
3503  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
3504 
3505  VkResult AllocateMemoryOfType(
3506  const VkMemoryRequirements& vkMemReq,
3507  const VmaAllocationCreateInfo& createInfo,
3508  uint32_t memTypeIndex,
3509  VmaSuballocationType suballocType,
3510  VmaAllocation* pAllocation);
3511 
3512  // Allocates and registers new VkDeviceMemory specifically for single allocation.
3513  VkResult AllocateOwnMemory(
3514  VkDeviceSize size,
3515  VmaSuballocationType suballocType,
3516  uint32_t memTypeIndex,
3517  bool map,
3518  void* pUserData,
3519  VmaAllocation* pAllocation);
3520 
3521  // Tries to free pMemory as Own Memory. Returns true if found and freed.
3522  void FreeOwnMemory(VmaAllocation allocation);
3523 };
3524 
3526 // Memory allocation #2 after VmaAllocator_T definition
3527 
3528 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
3529 {
3530  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
3531 }
3532 
3533 static void VmaFree(VmaAllocator hAllocator, void* ptr)
3534 {
3535  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
3536 }
3537 
3538 template<typename T>
3539 static T* VmaAllocate(VmaAllocator hAllocator)
3540 {
3541  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
3542 }
3543 
3544 template<typename T>
3545 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
3546 {
3547  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
3548 }
3549 
3550 template<typename T>
3551 static void vma_delete(VmaAllocator hAllocator, T* ptr)
3552 {
3553  if(ptr != VMA_NULL)
3554  {
3555  ptr->~T();
3556  VmaFree(hAllocator, ptr);
3557  }
3558 }
3559 
3560 template<typename T>
3561 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
3562 {
3563  if(ptr != VMA_NULL)
3564  {
3565  for(size_t i = count; i--; )
3566  ptr[i].~T();
3567  VmaFree(hAllocator, ptr);
3568  }
3569 }
3570 
3572 // VmaStringBuilder
3573 
3574 #if VMA_STATS_STRING_ENABLED
3575 
3576 class VmaStringBuilder
3577 {
3578 public:
3579  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
3580  size_t GetLength() const { return m_Data.size(); }
3581  const char* GetData() const { return m_Data.data(); }
3582 
3583  void Add(char ch) { m_Data.push_back(ch); }
3584  void Add(const char* pStr);
3585  void AddNewLine() { Add('\n'); }
3586  void AddNumber(uint32_t num);
3587  void AddNumber(uint64_t num);
3588  void AddPointer(const void* ptr);
3589 
3590 private:
3591  VmaVector< char, VmaStlAllocator<char> > m_Data;
3592 };
3593 
3594 void VmaStringBuilder::Add(const char* pStr)
3595 {
3596  const size_t strLen = strlen(pStr);
3597  if(strLen > 0)
3598  {
3599  const size_t oldCount = m_Data.size();
3600  m_Data.resize(oldCount + strLen);
3601  memcpy(m_Data.data() + oldCount, pStr, strLen);
3602  }
3603 }
3604 
3605 void VmaStringBuilder::AddNumber(uint32_t num)
3606 {
3607  char buf[11];
3608  VmaUint32ToStr(buf, sizeof(buf), num);
3609  Add(buf);
3610 }
3611 
3612 void VmaStringBuilder::AddNumber(uint64_t num)
3613 {
3614  char buf[21];
3615  VmaUint64ToStr(buf, sizeof(buf), num);
3616  Add(buf);
3617 }
3618 
3619 void VmaStringBuilder::AddPointer(const void* ptr)
3620 {
3621  char buf[21];
3622  VmaPtrToStr(buf, sizeof(buf), ptr);
3623  Add(buf);
3624 }
3625 
3626 #endif // #if VMA_STATS_STRING_ENABLED
3627 
3629 // VmaJsonWriter
3630 
3631 #if VMA_STATS_STRING_ENABLED
3632 
3633 class VmaJsonWriter
3634 {
3635 public:
3636  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
3637  ~VmaJsonWriter();
3638 
3639  void BeginObject(bool singleLine = false);
3640  void EndObject();
3641 
3642  void BeginArray(bool singleLine = false);
3643  void EndArray();
3644 
3645  void WriteString(const char* pStr);
3646  void BeginString(const char* pStr = VMA_NULL);
3647  void ContinueString(const char* pStr);
3648  void ContinueString(uint32_t n);
3649  void ContinueString(uint64_t n);
3650  void EndString(const char* pStr = VMA_NULL);
3651 
3652  void WriteNumber(uint32_t n);
3653  void WriteNumber(uint64_t n);
3654  void WriteBool(bool b);
3655  void WriteNull();
3656 
3657 private:
3658  static const char* const INDENT;
3659 
3660  enum COLLECTION_TYPE
3661  {
3662  COLLECTION_TYPE_OBJECT,
3663  COLLECTION_TYPE_ARRAY,
3664  };
3665  struct StackItem
3666  {
3667  COLLECTION_TYPE type;
3668  uint32_t valueCount;
3669  bool singleLineMode;
3670  };
3671 
3672  VmaStringBuilder& m_SB;
3673  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
3674  bool m_InsideString;
3675 
3676  void BeginValue(bool isString);
3677  void WriteIndent(bool oneLess = false);
3678 };
3679 
3680 const char* const VmaJsonWriter::INDENT = " ";
3681 
3682 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
3683  m_SB(sb),
3684  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
3685  m_InsideString(false)
3686 {
3687 }
3688 
3689 VmaJsonWriter::~VmaJsonWriter()
3690 {
3691  VMA_ASSERT(!m_InsideString);
3692  VMA_ASSERT(m_Stack.empty());
3693 }
3694 
3695 void VmaJsonWriter::BeginObject(bool singleLine)
3696 {
3697  VMA_ASSERT(!m_InsideString);
3698 
3699  BeginValue(false);
3700  m_SB.Add('{');
3701 
3702  StackItem item;
3703  item.type = COLLECTION_TYPE_OBJECT;
3704  item.valueCount = 0;
3705  item.singleLineMode = singleLine;
3706  m_Stack.push_back(item);
3707 }
3708 
3709 void VmaJsonWriter::EndObject()
3710 {
3711  VMA_ASSERT(!m_InsideString);
3712 
3713  WriteIndent(true);
3714  m_SB.Add('}');
3715 
3716  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
3717  m_Stack.pop_back();
3718 }
3719 
3720 void VmaJsonWriter::BeginArray(bool singleLine)
3721 {
3722  VMA_ASSERT(!m_InsideString);
3723 
3724  BeginValue(false);
3725  m_SB.Add('[');
3726 
3727  StackItem item;
3728  item.type = COLLECTION_TYPE_ARRAY;
3729  item.valueCount = 0;
3730  item.singleLineMode = singleLine;
3731  m_Stack.push_back(item);
3732 }
3733 
3734 void VmaJsonWriter::EndArray()
3735 {
3736  VMA_ASSERT(!m_InsideString);
3737 
3738  WriteIndent(true);
3739  m_SB.Add(']');
3740 
3741  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
3742  m_Stack.pop_back();
3743 }
3744 
3745 void VmaJsonWriter::WriteString(const char* pStr)
3746 {
3747  BeginString(pStr);
3748  EndString();
3749 }
3750 
3751 void VmaJsonWriter::BeginString(const char* pStr)
3752 {
3753  VMA_ASSERT(!m_InsideString);
3754 
3755  BeginValue(true);
3756  m_SB.Add('"');
3757  m_InsideString = true;
3758  if(pStr != VMA_NULL && pStr[0] != '\0')
3759  {
3760  ContinueString(pStr);
3761  }
3762 }
3763 
3764 void VmaJsonWriter::ContinueString(const char* pStr)
3765 {
3766  VMA_ASSERT(m_InsideString);
3767 
3768  const size_t strLen = strlen(pStr);
3769  for(size_t i = 0; i < strLen; ++i)
3770  {
3771  char ch = pStr[i];
3772  if(ch == '\'')
3773  {
3774  m_SB.Add("\\\\");
3775  }
3776  else if(ch == '"')
3777  {
3778  m_SB.Add("\\\"");
3779  }
3780  else if(ch >= 32)
3781  {
3782  m_SB.Add(ch);
3783  }
3784  else switch(ch)
3785  {
3786  case '\n':
3787  m_SB.Add("\\n");
3788  break;
3789  case '\r':
3790  m_SB.Add("\\r");
3791  break;
3792  case '\t':
3793  m_SB.Add("\\t");
3794  break;
3795  default:
3796  VMA_ASSERT(0 && "Character not currently supported.");
3797  break;
3798  }
3799  }
3800 }
3801 
3802 void VmaJsonWriter::ContinueString(uint32_t n)
3803 {
3804  VMA_ASSERT(m_InsideString);
3805  m_SB.AddNumber(n);
3806 }
3807 
3808 void VmaJsonWriter::ContinueString(uint64_t n)
3809 {
3810  VMA_ASSERT(m_InsideString);
3811  m_SB.AddNumber(n);
3812 }
3813 
3814 void VmaJsonWriter::EndString(const char* pStr)
3815 {
3816  VMA_ASSERT(m_InsideString);
3817  if(pStr != VMA_NULL && pStr[0] != '\0')
3818  {
3819  ContinueString(pStr);
3820  }
3821  m_SB.Add('"');
3822  m_InsideString = false;
3823 }
3824 
3825 void VmaJsonWriter::WriteNumber(uint32_t n)
3826 {
3827  VMA_ASSERT(!m_InsideString);
3828  BeginValue(false);
3829  m_SB.AddNumber(n);
3830 }
3831 
3832 void VmaJsonWriter::WriteNumber(uint64_t n)
3833 {
3834  VMA_ASSERT(!m_InsideString);
3835  BeginValue(false);
3836  m_SB.AddNumber(n);
3837 }
3838 
3839 void VmaJsonWriter::WriteBool(bool b)
3840 {
3841  VMA_ASSERT(!m_InsideString);
3842  BeginValue(false);
3843  m_SB.Add(b ? "true" : "false");
3844 }
3845 
3846 void VmaJsonWriter::WriteNull()
3847 {
3848  VMA_ASSERT(!m_InsideString);
3849  BeginValue(false);
3850  m_SB.Add("null");
3851 }
3852 
3853 void VmaJsonWriter::BeginValue(bool isString)
3854 {
3855  if(!m_Stack.empty())
3856  {
3857  StackItem& currItem = m_Stack.back();
3858  if(currItem.type == COLLECTION_TYPE_OBJECT &&
3859  currItem.valueCount % 2 == 0)
3860  {
3861  VMA_ASSERT(isString);
3862  }
3863 
3864  if(currItem.type == COLLECTION_TYPE_OBJECT &&
3865  currItem.valueCount % 2 != 0)
3866  {
3867  m_SB.Add(": ");
3868  }
3869  else if(currItem.valueCount > 0)
3870  {
3871  m_SB.Add(", ");
3872  WriteIndent();
3873  }
3874  else
3875  {
3876  WriteIndent();
3877  }
3878  ++currItem.valueCount;
3879  }
3880 }
3881 
3882 void VmaJsonWriter::WriteIndent(bool oneLess)
3883 {
3884  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
3885  {
3886  m_SB.AddNewLine();
3887 
3888  size_t count = m_Stack.size();
3889  if(count > 0 && oneLess)
3890  {
3891  --count;
3892  }
3893  for(size_t i = 0; i < count; ++i)
3894  {
3895  m_SB.Add(INDENT);
3896  }
3897  }
3898 }
3899 
3900 #endif // #if VMA_STATS_STRING_ENABLED
3901 
3903 
3904 VkDeviceSize VmaAllocation_T::GetOffset() const
3905 {
3906  switch(m_Type)
3907  {
3908  case ALLOCATION_TYPE_BLOCK:
3909  return m_BlockAllocation.m_Offset;
3910  case ALLOCATION_TYPE_OWN:
3911  return 0;
3912  default:
3913  VMA_ASSERT(0);
3914  return 0;
3915  }
3916 }
3917 
3918 VkDeviceMemory VmaAllocation_T::GetMemory() const
3919 {
3920  switch(m_Type)
3921  {
3922  case ALLOCATION_TYPE_BLOCK:
3923  return m_BlockAllocation.m_Block->m_hMemory;
3924  case ALLOCATION_TYPE_OWN:
3925  return m_OwnAllocation.m_hMemory;
3926  default:
3927  VMA_ASSERT(0);
3928  return VK_NULL_HANDLE;
3929  }
3930 }
3931 
3932 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
3933 {
3934  switch(m_Type)
3935  {
3936  case ALLOCATION_TYPE_BLOCK:
3937  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
3938  case ALLOCATION_TYPE_OWN:
3939  return m_OwnAllocation.m_MemoryTypeIndex;
3940  default:
3941  VMA_ASSERT(0);
3942  return UINT32_MAX;
3943  }
3944 }
3945 
3946 VMA_BLOCK_VECTOR_TYPE VmaAllocation_T::GetBlockVectorType() const
3947 {
3948  switch(m_Type)
3949  {
3950  case ALLOCATION_TYPE_BLOCK:
3951  return m_BlockAllocation.m_Block->m_BlockVectorType;
3952  case ALLOCATION_TYPE_OWN:
3953  return (m_OwnAllocation.m_PersistentMap ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED);
3954  default:
3955  VMA_ASSERT(0);
3956  return VMA_BLOCK_VECTOR_TYPE_COUNT;
3957  }
3958 }
3959 
3960 void* VmaAllocation_T::GetMappedData() const
3961 {
3962  switch(m_Type)
3963  {
3964  case ALLOCATION_TYPE_BLOCK:
3965  if(m_BlockAllocation.m_Block->m_pMappedData != VMA_NULL)
3966  {
3967  return (char*)m_BlockAllocation.m_Block->m_pMappedData + m_BlockAllocation.m_Offset;
3968  }
3969  else
3970  {
3971  return VMA_NULL;
3972  }
3973  break;
3974  case ALLOCATION_TYPE_OWN:
3975  return m_OwnAllocation.m_pMappedData;
3976  default:
3977  VMA_ASSERT(0);
3978  return VMA_NULL;
3979  }
3980 }
3981 
3982 bool VmaAllocation_T::CanBecomeLost() const
3983 {
3984  switch(m_Type)
3985  {
3986  case ALLOCATION_TYPE_BLOCK:
3987  return m_BlockAllocation.m_CanBecomeLost;
3988  case ALLOCATION_TYPE_OWN:
3989  return false;
3990  default:
3991  VMA_ASSERT(0);
3992  return false;
3993  }
3994 }
3995 
3996 VmaPool VmaAllocation_T::GetPool() const
3997 {
3998  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
3999  return m_BlockAllocation.m_hPool;
4000 }
4001 
4002 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4003 {
4004  VMA_ASSERT(CanBecomeLost());
4005 
4006  /*
4007  Warning: This is a carefully designed algorithm.
4008  Do not modify unless you really know what you're doing :)
4009  */
4010  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
4011  for(;;)
4012  {
4013  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
4014  {
4015  VMA_ASSERT(0);
4016  return false;
4017  }
4018  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
4019  {
4020  return false;
4021  }
4022  else // Last use time earlier than current time.
4023  {
4024  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
4025  {
4026  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
4027  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
4028  return true;
4029  }
4030  }
4031  }
4032 }
4033 
4034 #if VMA_STATS_STRING_ENABLED
4035 
4036 // Correspond to values of enum VmaSuballocationType.
4037 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
4038  "FREE",
4039  "UNKNOWN",
4040  "BUFFER",
4041  "IMAGE_UNKNOWN",
4042  "IMAGE_LINEAR",
4043  "IMAGE_OPTIMAL",
4044 };
4045 
4046 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
4047 {
4048  json.BeginObject();
4049 
4050  json.WriteString("Blocks");
4051  json.WriteNumber(stat.BlockCount);
4052 
4053  json.WriteString("Allocations");
4054  json.WriteNumber(stat.AllocationCount);
4055 
4056  json.WriteString("UnusedRanges");
4057  json.WriteNumber(stat.UnusedRangeCount);
4058 
4059  json.WriteString("UsedBytes");
4060  json.WriteNumber(stat.UsedBytes);
4061 
4062  json.WriteString("UnusedBytes");
4063  json.WriteNumber(stat.UnusedBytes);
4064 
4065  if(stat.AllocationCount > 1)
4066  {
4067  json.WriteString("AllocationSize");
4068  json.BeginObject(true);
4069  json.WriteString("Min");
4070  json.WriteNumber(stat.AllocationSizeMin);
4071  json.WriteString("Avg");
4072  json.WriteNumber(stat.AllocationSizeAvg);
4073  json.WriteString("Max");
4074  json.WriteNumber(stat.AllocationSizeMax);
4075  json.EndObject();
4076  }
4077 
4078  if(stat.UnusedRangeCount > 1)
4079  {
4080  json.WriteString("UnusedRangeSize");
4081  json.BeginObject(true);
4082  json.WriteString("Min");
4083  json.WriteNumber(stat.UnusedRangeSizeMin);
4084  json.WriteString("Avg");
4085  json.WriteNumber(stat.UnusedRangeSizeAvg);
4086  json.WriteString("Max");
4087  json.WriteNumber(stat.UnusedRangeSizeMax);
4088  json.EndObject();
4089  }
4090 
4091  json.EndObject();
4092 }
4093 
4094 #endif // #if VMA_STATS_STRING_ENABLED
4095 
4096 struct VmaSuballocationItemSizeLess
4097 {
4098  bool operator()(
4099  const VmaSuballocationList::iterator lhs,
4100  const VmaSuballocationList::iterator rhs) const
4101  {
4102  return lhs->size < rhs->size;
4103  }
4104  bool operator()(
4105  const VmaSuballocationList::iterator lhs,
4106  VkDeviceSize rhsSize) const
4107  {
4108  return lhs->size < rhsSize;
4109  }
4110 };
4111 
4112 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
4113  m_MemoryTypeIndex(UINT32_MAX),
4114  m_BlockVectorType(VMA_BLOCK_VECTOR_TYPE_COUNT),
4115  m_hMemory(VK_NULL_HANDLE),
4116  m_Size(0),
4117  m_PersistentMap(false),
4118  m_pMappedData(VMA_NULL),
4119  m_FreeCount(0),
4120  m_SumFreeSize(0),
4121  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
4122  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
4123 {
4124 }
4125 
4126 void VmaDeviceMemoryBlock::Init(
4127  uint32_t newMemoryTypeIndex,
4128  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
4129  VkDeviceMemory newMemory,
4130  VkDeviceSize newSize,
4131  bool persistentMap,
4132  void* pMappedData)
4133 {
4134  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
4135 
4136  m_MemoryTypeIndex = newMemoryTypeIndex;
4137  m_BlockVectorType = newBlockVectorType;
4138  m_hMemory = newMemory;
4139  m_Size = newSize;
4140  m_PersistentMap = persistentMap;
4141  m_pMappedData = pMappedData;
4142  m_FreeCount = 1;
4143  m_SumFreeSize = newSize;
4144 
4145  m_Suballocations.clear();
4146  m_FreeSuballocationsBySize.clear();
4147 
4148  VmaSuballocation suballoc = {};
4149  suballoc.offset = 0;
4150  suballoc.size = newSize;
4151  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4152  suballoc.hAllocation = VK_NULL_HANDLE;
4153 
4154  m_Suballocations.push_back(suballoc);
4155  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
4156  --suballocItem;
4157  m_FreeSuballocationsBySize.push_back(suballocItem);
4158 }
4159 
4160 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
4161 {
4162  // This is the most important assert in the entire library.
4163  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
4164  VMA_ASSERT(IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
4165 
4166  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
4167  if(m_pMappedData != VMA_NULL)
4168  {
4169  vkUnmapMemory(allocator->m_hDevice, m_hMemory);
4170  m_pMappedData = VMA_NULL;
4171  }
4172 
4173  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Size, m_hMemory);
4174  m_hMemory = VK_NULL_HANDLE;
4175 }
4176 
4177 bool VmaDeviceMemoryBlock::Validate() const
4178 {
4179  if((m_hMemory == VK_NULL_HANDLE) ||
4180  (m_Size == 0) ||
4181  m_Suballocations.empty())
4182  {
4183  return false;
4184  }
4185 
4186  // Expected offset of new suballocation as calculates from previous ones.
4187  VkDeviceSize calculatedOffset = 0;
4188  // Expected number of free suballocations as calculated from traversing their list.
4189  uint32_t calculatedFreeCount = 0;
4190  // Expected sum size of free suballocations as calculated from traversing their list.
4191  VkDeviceSize calculatedSumFreeSize = 0;
4192  // Expected number of free suballocations that should be registered in
4193  // m_FreeSuballocationsBySize calculated from traversing their list.
4194  size_t freeSuballocationsToRegister = 0;
4195  // True if previous visisted suballocation was free.
4196  bool prevFree = false;
4197 
4198  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4199  suballocItem != m_Suballocations.cend();
4200  ++suballocItem)
4201  {
4202  const VmaSuballocation& subAlloc = *suballocItem;
4203 
4204  // Actual offset of this suballocation doesn't match expected one.
4205  if(subAlloc.offset != calculatedOffset)
4206  {
4207  return false;
4208  }
4209 
4210  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
4211  // Two adjacent free suballocations are invalid. They should be merged.
4212  if(prevFree && currFree)
4213  {
4214  return false;
4215  }
4216  prevFree = currFree;
4217 
4218  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
4219  {
4220  return false;
4221  }
4222 
4223  if(currFree)
4224  {
4225  calculatedSumFreeSize += subAlloc.size;
4226  ++calculatedFreeCount;
4227  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
4228  {
4229  ++freeSuballocationsToRegister;
4230  }
4231  }
4232 
4233  calculatedOffset += subAlloc.size;
4234  }
4235 
4236  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
4237  // match expected one.
4238  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
4239  {
4240  return false;
4241  }
4242 
4243  VkDeviceSize lastSize = 0;
4244  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
4245  {
4246  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
4247 
4248  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
4249  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
4250  {
4251  return false;
4252  }
4253  // They must be sorted by size ascending.
4254  if(suballocItem->size < lastSize)
4255  {
4256  return false;
4257  }
4258 
4259  lastSize = suballocItem->size;
4260  }
4261 
4262  // Check if totals match calculacted values.
4263  return
4264  (calculatedOffset == m_Size) &&
4265  (calculatedSumFreeSize == m_SumFreeSize) &&
4266  (calculatedFreeCount == m_FreeCount);
4267 }
4268 
4269 /*
4270 How many suitable free suballocations to analyze before choosing best one.
4271 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
4272  be chosen.
4273 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
4274  suballocations will be analized and best one will be chosen.
4275 - Any other value is also acceptable.
4276 */
4277 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
4278 
4279 bool VmaDeviceMemoryBlock::CreateAllocationRequest(
4280  uint32_t currentFrameIndex,
4281  uint32_t frameInUseCount,
4282  VkDeviceSize bufferImageGranularity,
4283  VkDeviceSize allocSize,
4284  VkDeviceSize allocAlignment,
4285  VmaSuballocationType allocType,
4286  bool canMakeOtherLost,
4287  VmaAllocationRequest* pAllocationRequest)
4288 {
4289  VMA_ASSERT(allocSize > 0);
4290  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
4291  VMA_ASSERT(pAllocationRequest != VMA_NULL);
4292  VMA_HEAVY_ASSERT(Validate());
4293 
4294  // There is not enough total free space in this block to fullfill the request: Early return.
4295  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
4296  {
4297  return false;
4298  }
4299 
4300  // New algorithm, efficiently searching freeSuballocationsBySize.
4301  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
4302  if(freeSuballocCount > 0)
4303  {
4304  if(VMA_BEST_FIT)
4305  {
4306  // Find first free suballocation with size not less than allocSize.
4307  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
4308  m_FreeSuballocationsBySize.data(),
4309  m_FreeSuballocationsBySize.data() + freeSuballocCount,
4310  allocSize,
4311  VmaSuballocationItemSizeLess());
4312  size_t index = it - m_FreeSuballocationsBySize.data();
4313  for(; index < freeSuballocCount; ++index)
4314  {
4315  if(CheckAllocation(
4316  currentFrameIndex,
4317  frameInUseCount,
4318  bufferImageGranularity,
4319  allocSize,
4320  allocAlignment,
4321  allocType,
4322  m_FreeSuballocationsBySize[index],
4323  false, // canMakeOtherLost
4324  &pAllocationRequest->offset,
4325  &pAllocationRequest->itemsToMakeLostCount,
4326  &pAllocationRequest->sumFreeSize,
4327  &pAllocationRequest->sumItemSize))
4328  {
4329  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
4330  return true;
4331  }
4332  }
4333  }
4334  else
4335  {
4336  // Search staring from biggest suballocations.
4337  for(size_t index = freeSuballocCount; index--; )
4338  {
4339  if(CheckAllocation(
4340  currentFrameIndex,
4341  frameInUseCount,
4342  bufferImageGranularity,
4343  allocSize,
4344  allocAlignment,
4345  allocType,
4346  m_FreeSuballocationsBySize[index],
4347  false, // canMakeOtherLost
4348  &pAllocationRequest->offset,
4349  &pAllocationRequest->itemsToMakeLostCount,
4350  &pAllocationRequest->sumFreeSize,
4351  &pAllocationRequest->sumItemSize))
4352  {
4353  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
4354  return true;
4355  }
4356  }
4357  }
4358  }
4359 
4360  if(canMakeOtherLost)
4361  {
4362  // Brute-force algorithm. TODO: Come up with something better.
4363 
4364  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
4365  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
4366 
4367  VmaAllocationRequest tmpAllocRequest = {};
4368  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
4369  suballocIt != m_Suballocations.end();
4370  ++suballocIt)
4371  {
4372  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
4373  suballocIt->hAllocation->CanBecomeLost())
4374  {
4375  if(CheckAllocation(
4376  currentFrameIndex,
4377  frameInUseCount,
4378  bufferImageGranularity,
4379  allocSize,
4380  allocAlignment,
4381  allocType,
4382  suballocIt,
4383  canMakeOtherLost,
4384  &tmpAllocRequest.offset,
4385  &tmpAllocRequest.itemsToMakeLostCount,
4386  &tmpAllocRequest.sumFreeSize,
4387  &tmpAllocRequest.sumItemSize))
4388  {
4389  tmpAllocRequest.item = suballocIt;
4390 
4391  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
4392  {
4393  *pAllocationRequest = tmpAllocRequest;
4394  }
4395  }
4396  }
4397  }
4398 
4399  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
4400  {
4401  return true;
4402  }
4403  }
4404 
4405  return false;
4406 }
4407 
4408 bool VmaDeviceMemoryBlock::MakeRequestedAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount, VmaAllocationRequest* pAllocationRequest)
4409 {
4410  while(pAllocationRequest->itemsToMakeLostCount > 0)
4411  {
4412  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
4413  {
4414  ++pAllocationRequest->item;
4415  }
4416  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
4417  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
4418  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
4419  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
4420  {
4421  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
4422  --pAllocationRequest->itemsToMakeLostCount;
4423  }
4424  else
4425  {
4426  return false;
4427  }
4428  }
4429 
4430  VMA_HEAVY_ASSERT(Validate());
4431  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
4432  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
4433 
4434  return true;
4435 }
4436 
4437 uint32_t VmaDeviceMemoryBlock::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4438 {
4439  uint32_t lostAllocationCount = 0;
4440  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
4441  it != m_Suballocations.end();
4442  ++it)
4443  {
4444  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
4445  it->hAllocation->CanBecomeLost() &&
4446  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
4447  {
4448  it = FreeSuballocation(it);
4449  ++lostAllocationCount;
4450  }
4451  }
4452  return lostAllocationCount;
4453 }
4454 
4455 bool VmaDeviceMemoryBlock::CheckAllocation(
4456  uint32_t currentFrameIndex,
4457  uint32_t frameInUseCount,
4458  VkDeviceSize bufferImageGranularity,
4459  VkDeviceSize allocSize,
4460  VkDeviceSize allocAlignment,
4461  VmaSuballocationType allocType,
4462  VmaSuballocationList::const_iterator suballocItem,
4463  bool canMakeOtherLost,
4464  VkDeviceSize* pOffset,
4465  size_t* itemsToMakeLostCount,
4466  VkDeviceSize* pSumFreeSize,
4467  VkDeviceSize* pSumItemSize) const
4468 {
4469  VMA_ASSERT(allocSize > 0);
4470  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
4471  VMA_ASSERT(suballocItem != m_Suballocations.cend());
4472  VMA_ASSERT(pOffset != VMA_NULL);
4473 
4474  *itemsToMakeLostCount = 0;
4475  *pSumFreeSize = 0;
4476  *pSumItemSize = 0;
4477 
4478  if(canMakeOtherLost)
4479  {
4480  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
4481  {
4482  *pSumFreeSize = suballocItem->size;
4483  }
4484  else
4485  {
4486  if(suballocItem->hAllocation->CanBecomeLost() &&
4487  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
4488  {
4489  ++*itemsToMakeLostCount;
4490  *pSumItemSize = suballocItem->size;
4491  }
4492  else
4493  {
4494  return false;
4495  }
4496  }
4497 
4498  // Remaining size is too small for this request: Early return.
4499  if(m_Size - suballocItem->offset < allocSize)
4500  {
4501  return false;
4502  }
4503 
4504  // Start from offset equal to beginning of this suballocation.
4505  *pOffset = suballocItem->offset;
4506 
4507  // Apply VMA_DEBUG_MARGIN at the beginning.
4508  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
4509  {
4510  *pOffset += VMA_DEBUG_MARGIN;
4511  }
4512 
4513  // Apply alignment.
4514  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
4515  *pOffset = VmaAlignUp(*pOffset, alignment);
4516 
4517  // Check previous suballocations for BufferImageGranularity conflicts.
4518  // Make bigger alignment if necessary.
4519  if(bufferImageGranularity > 1)
4520  {
4521  bool bufferImageGranularityConflict = false;
4522  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
4523  while(prevSuballocItem != m_Suballocations.cbegin())
4524  {
4525  --prevSuballocItem;
4526  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
4527  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
4528  {
4529  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
4530  {
4531  bufferImageGranularityConflict = true;
4532  break;
4533  }
4534  }
4535  else
4536  // Already on previous page.
4537  break;
4538  }
4539  if(bufferImageGranularityConflict)
4540  {
4541  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
4542  }
4543  }
4544 
4545  // Now that we have final *pOffset, check if we are past suballocItem.
4546  // If yes, return false - this function should be called for another suballocItem as starting point.
4547  if(*pOffset >= suballocItem->offset + suballocItem->size)
4548  {
4549  return false;
4550  }
4551 
4552  // Calculate padding at the beginning based on current offset.
4553  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
4554 
4555  // Calculate required margin at the end if this is not last suballocation.
4556  VmaSuballocationList::const_iterator next = suballocItem;
4557  ++next;
4558  const VkDeviceSize requiredEndMargin =
4559  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
4560 
4561  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
4562  // Another early return check.
4563  if(suballocItem->offset + totalSize > m_Size)
4564  {
4565  return false;
4566  }
4567 
4568  // Advance lastSuballocItem until desired size is reached.
4569  // Update itemsToMakeLostCount.
4570  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
4571  if(totalSize > suballocItem->size)
4572  {
4573  VkDeviceSize remainingSize = totalSize - suballocItem->size;
4574  while(remainingSize > 0)
4575  {
4576  ++lastSuballocItem;
4577  if(lastSuballocItem == m_Suballocations.cend())
4578  {
4579  return false;
4580  }
4581  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
4582  {
4583  *pSumFreeSize += lastSuballocItem->size;
4584  }
4585  else
4586  {
4587  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
4588  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
4589  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
4590  {
4591  ++*itemsToMakeLostCount;
4592  *pSumItemSize += lastSuballocItem->size;
4593  }
4594  else
4595  {
4596  return false;
4597  }
4598  }
4599  remainingSize = (lastSuballocItem->size < remainingSize) ?
4600  remainingSize - lastSuballocItem->size : 0;
4601  }
4602  }
4603 
4604  // Check next suballocations for BufferImageGranularity conflicts.
4605  // If conflict exists, we must mark more allocations lost or fail.
4606  if(bufferImageGranularity > 1)
4607  {
4608  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
4609  ++nextSuballocItem;
4610  while(nextSuballocItem != m_Suballocations.cend())
4611  {
4612  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
4613  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
4614  {
4615  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
4616  {
4617  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
4618  if(nextSuballoc.hAllocation->CanBecomeLost() &&
4619  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
4620  {
4621  ++*itemsToMakeLostCount;
4622  }
4623  else
4624  {
4625  return false;
4626  }
4627  }
4628  }
4629  else
4630  {
4631  // Already on next page.
4632  break;
4633  }
4634  ++nextSuballocItem;
4635  }
4636  }
4637  }
4638  else
4639  {
4640  const VmaSuballocation& suballoc = *suballocItem;
4641  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
4642 
4643  *pSumFreeSize = suballoc.size;
4644 
4645  // Size of this suballocation is too small for this request: Early return.
4646  if(suballoc.size < allocSize)
4647  {
4648  return false;
4649  }
4650 
4651  // Start from offset equal to beginning of this suballocation.
4652  *pOffset = suballoc.offset;
4653 
4654  // Apply VMA_DEBUG_MARGIN at the beginning.
4655  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
4656  {
4657  *pOffset += VMA_DEBUG_MARGIN;
4658  }
4659 
4660  // Apply alignment.
4661  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
4662  *pOffset = VmaAlignUp(*pOffset, alignment);
4663 
4664  // Check previous suballocations for BufferImageGranularity conflicts.
4665  // Make bigger alignment if necessary.
4666  if(bufferImageGranularity > 1)
4667  {
4668  bool bufferImageGranularityConflict = false;
4669  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
4670  while(prevSuballocItem != m_Suballocations.cbegin())
4671  {
4672  --prevSuballocItem;
4673  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
4674  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
4675  {
4676  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
4677  {
4678  bufferImageGranularityConflict = true;
4679  break;
4680  }
4681  }
4682  else
4683  // Already on previous page.
4684  break;
4685  }
4686  if(bufferImageGranularityConflict)
4687  {
4688  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
4689  }
4690  }
4691 
4692  // Calculate padding at the beginning based on current offset.
4693  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
4694 
4695  // Calculate required margin at the end if this is not last suballocation.
4696  VmaSuballocationList::const_iterator next = suballocItem;
4697  ++next;
4698  const VkDeviceSize requiredEndMargin =
4699  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
4700 
4701  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
4702  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
4703  {
4704  return false;
4705  }
4706 
4707  // Check next suballocations for BufferImageGranularity conflicts.
4708  // If conflict exists, allocation cannot be made here.
4709  if(bufferImageGranularity > 1)
4710  {
4711  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
4712  ++nextSuballocItem;
4713  while(nextSuballocItem != m_Suballocations.cend())
4714  {
4715  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
4716  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
4717  {
4718  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
4719  {
4720  return false;
4721  }
4722  }
4723  else
4724  {
4725  // Already on next page.
4726  break;
4727  }
4728  ++nextSuballocItem;
4729  }
4730  }
4731  }
4732 
4733  // All tests passed: Success. pOffset is already filled.
4734  return true;
4735 }
4736 
4737 bool VmaDeviceMemoryBlock::IsEmpty() const
4738 {
4739  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
4740 }
4741 
4742 void VmaDeviceMemoryBlock::Alloc(
4743  const VmaAllocationRequest& request,
4744  VmaSuballocationType type,
4745  VkDeviceSize allocSize,
4746  VmaAllocation hAllocation)
4747 {
4748  VMA_ASSERT(request.item != m_Suballocations.end());
4749  VmaSuballocation& suballoc = *request.item;
4750  // Given suballocation is a free block.
4751  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
4752  // Given offset is inside this suballocation.
4753  VMA_ASSERT(request.offset >= suballoc.offset);
4754  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
4755  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
4756  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
4757 
4758  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
4759  // it to become used.
4760  UnregisterFreeSuballocation(request.item);
4761 
4762  suballoc.offset = request.offset;
4763  suballoc.size = allocSize;
4764  suballoc.type = type;
4765  suballoc.hAllocation = hAllocation;
4766 
4767  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
4768  if(paddingEnd)
4769  {
4770  VmaSuballocation paddingSuballoc = {};
4771  paddingSuballoc.offset = request.offset + allocSize;
4772  paddingSuballoc.size = paddingEnd;
4773  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4774  VmaSuballocationList::iterator next = request.item;
4775  ++next;
4776  const VmaSuballocationList::iterator paddingEndItem =
4777  m_Suballocations.insert(next, paddingSuballoc);
4778  RegisterFreeSuballocation(paddingEndItem);
4779  }
4780 
4781  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
4782  if(paddingBegin)
4783  {
4784  VmaSuballocation paddingSuballoc = {};
4785  paddingSuballoc.offset = request.offset - paddingBegin;
4786  paddingSuballoc.size = paddingBegin;
4787  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4788  const VmaSuballocationList::iterator paddingBeginItem =
4789  m_Suballocations.insert(request.item, paddingSuballoc);
4790  RegisterFreeSuballocation(paddingBeginItem);
4791  }
4792 
4793  // Update totals.
4794  m_FreeCount = m_FreeCount - 1;
4795  if(paddingBegin > 0)
4796  {
4797  ++m_FreeCount;
4798  }
4799  if(paddingEnd > 0)
4800  {
4801  ++m_FreeCount;
4802  }
4803  m_SumFreeSize -= allocSize;
4804 }
4805 
4806 VmaSuballocationList::iterator VmaDeviceMemoryBlock::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
4807 {
4808  // Change this suballocation to be marked as free.
4809  VmaSuballocation& suballoc = *suballocItem;
4810  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4811  suballoc.hAllocation = VK_NULL_HANDLE;
4812 
4813  // Update totals.
4814  ++m_FreeCount;
4815  m_SumFreeSize += suballoc.size;
4816 
4817  // Merge with previous and/or next suballocation if it's also free.
4818  bool mergeWithNext = false;
4819  bool mergeWithPrev = false;
4820 
4821  VmaSuballocationList::iterator nextItem = suballocItem;
4822  ++nextItem;
4823  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
4824  {
4825  mergeWithNext = true;
4826  }
4827 
4828  VmaSuballocationList::iterator prevItem = suballocItem;
4829  if(suballocItem != m_Suballocations.begin())
4830  {
4831  --prevItem;
4832  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
4833  {
4834  mergeWithPrev = true;
4835  }
4836  }
4837 
4838  if(mergeWithNext)
4839  {
4840  UnregisterFreeSuballocation(nextItem);
4841  MergeFreeWithNext(suballocItem);
4842  }
4843 
4844  if(mergeWithPrev)
4845  {
4846  UnregisterFreeSuballocation(prevItem);
4847  MergeFreeWithNext(prevItem);
4848  RegisterFreeSuballocation(prevItem);
4849  return prevItem;
4850  }
4851  else
4852  {
4853  RegisterFreeSuballocation(suballocItem);
4854  return suballocItem;
4855  }
4856 }
4857 
4858 void VmaDeviceMemoryBlock::Free(const VmaAllocation allocation)
4859 {
4860  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
4861  suballocItem != m_Suballocations.end();
4862  ++suballocItem)
4863  {
4864  VmaSuballocation& suballoc = *suballocItem;
4865  if(suballoc.hAllocation == allocation)
4866  {
4867  FreeSuballocation(suballocItem);
4868  VMA_HEAVY_ASSERT(Validate());
4869  return;
4870  }
4871  }
4872  VMA_ASSERT(0 && "Not found!");
4873 }
4874 
4875 #if VMA_STATS_STRING_ENABLED
4876 
4877 void VmaDeviceMemoryBlock::PrintDetailedMap(class VmaJsonWriter& json) const
4878 {
4879  json.BeginObject();
4880 
4881  json.WriteString("TotalBytes");
4882  json.WriteNumber(m_Size);
4883 
4884  json.WriteString("UnusedBytes");
4885  json.WriteNumber(m_SumFreeSize);
4886 
4887  json.WriteString("Allocations");
4888  json.WriteNumber(m_Suballocations.size() - m_FreeCount);
4889 
4890  json.WriteString("UnusedRanges");
4891  json.WriteNumber(m_FreeCount);
4892 
4893  json.WriteString("Suballocations");
4894  json.BeginArray();
4895  size_t i = 0;
4896  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4897  suballocItem != m_Suballocations.cend();
4898  ++suballocItem, ++i)
4899  {
4900  json.BeginObject(true);
4901 
4902  json.WriteString("Type");
4903  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
4904 
4905  json.WriteString("Size");
4906  json.WriteNumber(suballocItem->size);
4907 
4908  json.WriteString("Offset");
4909  json.WriteNumber(suballocItem->offset);
4910 
4911  json.EndObject();
4912  }
4913  json.EndArray();
4914 
4915  json.EndObject();
4916 }
4917 
4918 #endif // #if VMA_STATS_STRING_ENABLED
4919 
4920 void VmaDeviceMemoryBlock::MergeFreeWithNext(VmaSuballocationList::iterator item)
4921 {
4922  VMA_ASSERT(item != m_Suballocations.end());
4923  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
4924 
4925  VmaSuballocationList::iterator nextItem = item;
4926  ++nextItem;
4927  VMA_ASSERT(nextItem != m_Suballocations.end());
4928  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
4929 
4930  item->size += nextItem->size;
4931  --m_FreeCount;
4932  m_Suballocations.erase(nextItem);
4933 }
4934 
4935 void VmaDeviceMemoryBlock::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
4936 {
4937  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
4938  VMA_ASSERT(item->size > 0);
4939 
4940  // You may want to enable this validation at the beginning or at the end of
4941  // this function, depending on what do you want to check.
4942  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
4943 
4944  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
4945  {
4946  if(m_FreeSuballocationsBySize.empty())
4947  {
4948  m_FreeSuballocationsBySize.push_back(item);
4949  }
4950  else
4951  {
4952  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
4953  }
4954  }
4955 
4956  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
4957 }
4958 
4959 
4960 void VmaDeviceMemoryBlock::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
4961 {
4962  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
4963  VMA_ASSERT(item->size > 0);
4964 
4965  // You may want to enable this validation at the beginning or at the end of
4966  // this function, depending on what do you want to check.
4967  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
4968 
4969  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
4970  {
4971  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
4972  m_FreeSuballocationsBySize.data(),
4973  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
4974  item,
4975  VmaSuballocationItemSizeLess());
4976  for(size_t index = it - m_FreeSuballocationsBySize.data();
4977  index < m_FreeSuballocationsBySize.size();
4978  ++index)
4979  {
4980  if(m_FreeSuballocationsBySize[index] == item)
4981  {
4982  VmaVectorRemove(m_FreeSuballocationsBySize, index);
4983  return;
4984  }
4985  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
4986  }
4987  VMA_ASSERT(0 && "Not found.");
4988  }
4989 
4990  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
4991 }
4992 
4993 bool VmaDeviceMemoryBlock::ValidateFreeSuballocationList() const
4994 {
4995  VkDeviceSize lastSize = 0;
4996  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
4997  {
4998  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
4999 
5000  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
5001  {
5002  VMA_ASSERT(0);
5003  return false;
5004  }
5005  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5006  {
5007  VMA_ASSERT(0);
5008  return false;
5009  }
5010  if(it->size < lastSize)
5011  {
5012  VMA_ASSERT(0);
5013  return false;
5014  }
5015 
5016  lastSize = it->size;
5017  }
5018  return true;
5019 }
5020 
5021 static void InitStatInfo(VmaStatInfo& outInfo)
5022 {
5023  memset(&outInfo, 0, sizeof(outInfo));
5024  outInfo.AllocationSizeMin = UINT64_MAX;
5025  outInfo.UnusedRangeSizeMin = UINT64_MAX;
5026 }
5027 
5028 static void CalcAllocationStatInfo(VmaStatInfo& outInfo, const VmaDeviceMemoryBlock& block)
5029 {
5030  outInfo.BlockCount = 1;
5031 
5032  const uint32_t rangeCount = (uint32_t)block.m_Suballocations.size();
5033  outInfo.AllocationCount = rangeCount - block.m_FreeCount;
5034  outInfo.UnusedRangeCount = block.m_FreeCount;
5035 
5036  outInfo.UnusedBytes = block.m_SumFreeSize;
5037  outInfo.UsedBytes = block.m_Size - outInfo.UnusedBytes;
5038 
5039  outInfo.AllocationSizeMin = UINT64_MAX;
5040  outInfo.AllocationSizeMax = 0;
5041  outInfo.UnusedRangeSizeMin = UINT64_MAX;
5042  outInfo.UnusedRangeSizeMax = 0;
5043 
5044  for(VmaSuballocationList::const_iterator suballocItem = block.m_Suballocations.cbegin();
5045  suballocItem != block.m_Suballocations.cend();
5046  ++suballocItem)
5047  {
5048  const VmaSuballocation& suballoc = *suballocItem;
5049  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
5050  {
5051  outInfo.AllocationSizeMin = VMA_MIN(outInfo.AllocationSizeMin, suballoc.size);
5052  outInfo.AllocationSizeMax = VMA_MAX(outInfo.AllocationSizeMax, suballoc.size);
5053  }
5054  else
5055  {
5056  outInfo.UnusedRangeSizeMin = VMA_MIN(outInfo.UnusedRangeSizeMin, suballoc.size);
5057  outInfo.UnusedRangeSizeMax = VMA_MAX(outInfo.UnusedRangeSizeMax, suballoc.size);
5058  }
5059  }
5060 }
5061 
5062 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
5063 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
5064 {
5065  inoutInfo.BlockCount += srcInfo.BlockCount;
5066  inoutInfo.AllocationCount += srcInfo.AllocationCount;
5067  inoutInfo.UnusedRangeCount += srcInfo.UnusedRangeCount;
5068  inoutInfo.UsedBytes += srcInfo.UsedBytes;
5069  inoutInfo.UnusedBytes += srcInfo.UnusedBytes;
5070  inoutInfo.AllocationSizeMin = VMA_MIN(inoutInfo.AllocationSizeMin, srcInfo.AllocationSizeMin);
5071  inoutInfo.AllocationSizeMax = VMA_MAX(inoutInfo.AllocationSizeMax, srcInfo.AllocationSizeMax);
5072  inoutInfo.UnusedRangeSizeMin = VMA_MIN(inoutInfo.UnusedRangeSizeMin, srcInfo.UnusedRangeSizeMin);
5073  inoutInfo.UnusedRangeSizeMax = VMA_MAX(inoutInfo.UnusedRangeSizeMax, srcInfo.UnusedRangeSizeMax);
5074 }
5075 
5076 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
5077 {
5078  inoutInfo.AllocationSizeAvg = (inoutInfo.AllocationCount > 0) ?
5079  VmaRoundDiv<VkDeviceSize>(inoutInfo.UsedBytes, inoutInfo.AllocationCount) : 0;
5080  inoutInfo.UnusedRangeSizeAvg = (inoutInfo.UnusedRangeCount > 0) ?
5081  VmaRoundDiv<VkDeviceSize>(inoutInfo.UnusedBytes, inoutInfo.UnusedRangeCount) : 0;
5082 }
5083 
5084 VmaPool_T::VmaPool_T(
5085  VmaAllocator hAllocator,
5086  const VmaPoolCreateInfo& createInfo) :
5087  m_BlockVector(
5088  hAllocator,
5089  createInfo.memoryTypeIndex,
5090  (createInfo.flags & VMA_POOL_CREATE_PERSISTENT_MAP_BIT) != 0 ?
5091  VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED,
5092  createInfo.blockSize,
5093  createInfo.minBlockCount,
5094  createInfo.maxBlockCount,
5095  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
5096  createInfo.frameInUseCount,
5097  true) // isCustomPool
5098 {
5099 }
5100 
5101 VmaPool_T::~VmaPool_T()
5102 {
5103 }
5104 
5105 #if VMA_STATS_STRING_ENABLED
5106 
5107 #endif // #if VMA_STATS_STRING_ENABLED
5108 
5109 VmaBlockVector::VmaBlockVector(
5110  VmaAllocator hAllocator,
5111  uint32_t memoryTypeIndex,
5112  VMA_BLOCK_VECTOR_TYPE blockVectorType,
5113  VkDeviceSize preferredBlockSize,
5114  size_t minBlockCount,
5115  size_t maxBlockCount,
5116  VkDeviceSize bufferImageGranularity,
5117  uint32_t frameInUseCount,
5118  bool isCustomPool) :
5119  m_hAllocator(hAllocator),
5120  m_MemoryTypeIndex(memoryTypeIndex),
5121  m_BlockVectorType(blockVectorType),
5122  m_PreferredBlockSize(preferredBlockSize),
5123  m_MinBlockCount(minBlockCount),
5124  m_MaxBlockCount(maxBlockCount),
5125  m_BufferImageGranularity(bufferImageGranularity),
5126  m_FrameInUseCount(frameInUseCount),
5127  m_IsCustomPool(isCustomPool),
5128  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
5129  m_HasEmptyBlock(false),
5130  m_pDefragmentator(VMA_NULL)
5131 {
5132 }
5133 
5134 VmaBlockVector::~VmaBlockVector()
5135 {
5136  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
5137 
5138  for(size_t i = m_Blocks.size(); i--; )
5139  {
5140  m_Blocks[i]->Destroy(m_hAllocator);
5141  vma_delete(m_hAllocator, m_Blocks[i]);
5142  }
5143 }
5144 
5145 VkResult VmaBlockVector::CreateMinBlocks()
5146 {
5147  for(size_t i = 0; i < m_MinBlockCount; ++i)
5148  {
5149  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
5150  if(res != VK_SUCCESS)
5151  {
5152  return res;
5153  }
5154  }
5155  return VK_SUCCESS;
5156 }
5157 
5158 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
5159 {
5160  pStats->size = 0;
5161  pStats->unusedSize = 0;
5162  pStats->allocationCount = 0;
5163  pStats->unusedRangeCount = 0;
5164 
5165  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5166 
5167  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5168  {
5169  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
5170  VMA_ASSERT(pBlock);
5171  VMA_HEAVY_ASSERT(pBlock->Validate());
5172 
5173  const uint32_t rangeCount = (uint32_t)pBlock->m_Suballocations.size();
5174 
5175  pStats->size += pBlock->m_Size;
5176  pStats->unusedSize += pBlock->m_SumFreeSize;
5177  pStats->allocationCount += rangeCount - pBlock->m_FreeCount;
5178  pStats->unusedRangeCount += pBlock->m_FreeCount;
5179  }
5180 }
5181 
5182 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
5183 
5184 VkResult VmaBlockVector::Allocate(
5185  VmaPool hCurrentPool,
5186  uint32_t currentFrameIndex,
5187  const VkMemoryRequirements& vkMemReq,
5188  const VmaAllocationCreateInfo& createInfo,
5189  VmaSuballocationType suballocType,
5190  VmaAllocation* pAllocation)
5191 {
5192  // Validate flags.
5193  if(((createInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0) !=
5194  (m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED))
5195  {
5196  VMA_ASSERT(0 && "Usage of VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT must match VMA_POOL_CREATE_PERSISTENT_MAP_BIT.");
5197  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5198  }
5199 
5200  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5201 
5202  // 1. Search existing allocations. Try to allocate without making other allocations lost.
5203  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
5204  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
5205  {
5206  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
5207  VMA_ASSERT(pCurrBlock);
5208  VmaAllocationRequest currRequest = {};
5209  if(pCurrBlock->CreateAllocationRequest(
5210  currentFrameIndex,
5211  m_FrameInUseCount,
5212  m_BufferImageGranularity,
5213  vkMemReq.size,
5214  vkMemReq.alignment,
5215  suballocType,
5216  false, // canMakeOtherLost
5217  &currRequest))
5218  {
5219  // Allocate from pCurrBlock.
5220  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
5221 
5222  // We no longer have an empty Allocation.
5223  if(pCurrBlock->IsEmpty())
5224  {
5225  m_HasEmptyBlock = false;
5226  }
5227 
5228  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
5229  pCurrBlock->Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
5230  (*pAllocation)->InitBlockAllocation(
5231  hCurrentPool,
5232  pCurrBlock,
5233  currRequest.offset,
5234  vkMemReq.alignment,
5235  vkMemReq.size,
5236  suballocType,
5237  createInfo.pUserData,
5238  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5239  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
5240  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
5241  return VK_SUCCESS;
5242  }
5243  }
5244 
5245  const bool canCreateNewBlock =
5246  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
5247  (m_Blocks.size() < m_MaxBlockCount);
5248 
5249  // 2. Try to create new block.
5250  if(canCreateNewBlock)
5251  {
5252  // 2.1. Start with full preferredBlockSize.
5253  VkDeviceSize blockSize = m_PreferredBlockSize;
5254  size_t newBlockIndex = 0;
5255  VkResult res = CreateBlock(blockSize, &newBlockIndex);
5256  // Allocating blocks of other sizes is allowed only in default pools.
5257  // In custom pools block size is fixed.
5258  if(res < 0 && m_IsCustomPool == false)
5259  {
5260  // 2.2. Try half the size.
5261  blockSize /= 2;
5262  if(blockSize >= vkMemReq.size)
5263  {
5264  res = CreateBlock(blockSize, &newBlockIndex);
5265  if(res < 0)
5266  {
5267  // 2.3. Try quarter the size.
5268  blockSize /= 2;
5269  if(blockSize >= vkMemReq.size)
5270  {
5271  res = CreateBlock(blockSize, &newBlockIndex);
5272  }
5273  }
5274  }
5275  }
5276  if(res == VK_SUCCESS)
5277  {
5278  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
5279  VMA_ASSERT(pBlock->m_Size >= vkMemReq.size);
5280 
5281  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
5282  VmaAllocationRequest allocRequest = {};
5283  allocRequest.item = pBlock->m_Suballocations.begin();
5284  allocRequest.offset = 0;
5285  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
5286  pBlock->Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
5287  (*pAllocation)->InitBlockAllocation(
5288  hCurrentPool,
5289  pBlock,
5290  allocRequest.offset,
5291  vkMemReq.alignment,
5292  vkMemReq.size,
5293  suballocType,
5294  createInfo.pUserData,
5295  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5296  VMA_HEAVY_ASSERT(pBlock->Validate());
5297  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
5298 
5299  return VK_SUCCESS;
5300  }
5301  }
5302 
5303  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
5304 
5305  // 3. Try to allocate from existing blocks with making other allocations lost.
5306  if(canMakeOtherLost)
5307  {
5308  uint32_t tryIndex = 0;
5309  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
5310  {
5311  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
5312  VmaAllocationRequest bestRequest = {};
5313  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
5314 
5315  // 1. Search existing allocations.
5316  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
5317  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
5318  {
5319  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
5320  VMA_ASSERT(pCurrBlock);
5321  VmaAllocationRequest currRequest = {};
5322  if(pCurrBlock->CreateAllocationRequest(
5323  currentFrameIndex,
5324  m_FrameInUseCount,
5325  m_BufferImageGranularity,
5326  vkMemReq.size,
5327  vkMemReq.alignment,
5328  suballocType,
5329  canMakeOtherLost,
5330  &currRequest))
5331  {
5332  const VkDeviceSize currRequestCost = currRequest.CalcCost();
5333  if(pBestRequestBlock == VMA_NULL ||
5334  currRequestCost < bestRequestCost)
5335  {
5336  pBestRequestBlock = pCurrBlock;
5337  bestRequest = currRequest;
5338  bestRequestCost = currRequestCost;
5339 
5340  if(bestRequestCost == 0)
5341  {
5342  break;
5343  }
5344  }
5345  }
5346  }
5347 
5348  if(pBestRequestBlock != VMA_NULL)
5349  {
5350  if(pBestRequestBlock->MakeRequestedAllocationsLost(
5351  currentFrameIndex,
5352  m_FrameInUseCount,
5353  &bestRequest))
5354  {
5355  // We no longer have an empty Allocation.
5356  if(pBestRequestBlock->IsEmpty())
5357  {
5358  m_HasEmptyBlock = false;
5359  }
5360  // Allocate from this pBlock.
5361  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
5362  pBestRequestBlock->Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
5363  (*pAllocation)->InitBlockAllocation(
5364  hCurrentPool,
5365  pBestRequestBlock,
5366  bestRequest.offset,
5367  vkMemReq.alignment,
5368  vkMemReq.size,
5369  suballocType,
5370  createInfo.pUserData,
5371  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5372  VMA_HEAVY_ASSERT(pBlock->Validate());
5373  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
5374  return VK_SUCCESS;
5375  }
5376  // else: Some allocations must have been touched while we are here. Next try.
5377  }
5378  else
5379  {
5380  // Could not find place in any of the blocks - break outer loop.
5381  break;
5382  }
5383  }
5384  /* Maximum number of tries exceeded - a very unlike event when many other
5385  threads are simultaneously touching allocations making it impossible to make
5386  lost at the same time as we try to allocate. */
5387  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
5388  {
5389  return VK_ERROR_TOO_MANY_OBJECTS;
5390  }
5391  }
5392 
5393  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5394 }
5395 
5396 void VmaBlockVector::Free(
5397  VmaAllocation hAllocation)
5398 {
5399  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
5400 
5401  // Scope for lock.
5402  {
5403  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5404 
5405  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
5406 
5407  pBlock->Free(hAllocation);
5408  VMA_HEAVY_ASSERT(pBlock->Validate());
5409 
5410  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
5411 
5412  // pBlock became empty after this deallocation.
5413  if(pBlock->IsEmpty())
5414  {
5415  // Already has empty Allocation. We don't want to have two, so delete this one.
5416  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
5417  {
5418  pBlockToDelete = pBlock;
5419  Remove(pBlock);
5420  }
5421  // We now have first empty Allocation.
5422  else
5423  {
5424  m_HasEmptyBlock = true;
5425  }
5426  }
5427  // Must be called after srcBlockIndex is used, because later it may become invalid!
5428  IncrementallySortBlocks();
5429  }
5430 
5431  // Destruction of a free Allocation. Deferred until this point, outside of mutex
5432  // lock, for performance reason.
5433  if(pBlockToDelete != VMA_NULL)
5434  {
5435  VMA_DEBUG_LOG(" Deleted empty allocation");
5436  pBlockToDelete->Destroy(m_hAllocator);
5437  vma_delete(m_hAllocator, pBlockToDelete);
5438  }
5439 }
5440 
5441 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
5442 {
5443  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5444  {
5445  if(m_Blocks[blockIndex] == pBlock)
5446  {
5447  VmaVectorRemove(m_Blocks, blockIndex);
5448  return;
5449  }
5450  }
5451  VMA_ASSERT(0);
5452 }
5453 
5454 void VmaBlockVector::IncrementallySortBlocks()
5455 {
5456  // Bubble sort only until first swap.
5457  for(size_t i = 1; i < m_Blocks.size(); ++i)
5458  {
5459  if(m_Blocks[i - 1]->m_SumFreeSize > m_Blocks[i]->m_SumFreeSize)
5460  {
5461  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
5462  return;
5463  }
5464  }
5465 }
5466 
5467 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
5468 {
5469  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
5470  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
5471  allocInfo.allocationSize = blockSize;
5472  const VkDevice hDevice = m_hAllocator->m_hDevice;
5473  VkDeviceMemory mem = VK_NULL_HANDLE;
5474  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
5475  if(res < 0)
5476  {
5477  return res;
5478  }
5479 
5480  // New VkDeviceMemory successfully created.
5481 
5482  // Map memory if needed.
5483  void* pMappedData = VMA_NULL;
5484  const bool persistentMap = (m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED);
5485  if(persistentMap && m_hAllocator->m_UnmapPersistentlyMappedMemoryCounter == 0)
5486  {
5487  res = vkMapMemory(hDevice, mem, 0, VK_WHOLE_SIZE, 0, &pMappedData);
5488  if(res < 0)
5489  {
5490  VMA_DEBUG_LOG(" vkMapMemory FAILED");
5491  m_hAllocator->FreeVulkanMemory(m_MemoryTypeIndex, blockSize, mem);
5492  return res;
5493  }
5494  }
5495 
5496  // Create new Allocation for it.
5497  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
5498  pBlock->Init(
5499  m_MemoryTypeIndex,
5500  (VMA_BLOCK_VECTOR_TYPE)m_BlockVectorType,
5501  mem,
5502  allocInfo.allocationSize,
5503  persistentMap,
5504  pMappedData);
5505 
5506  m_Blocks.push_back(pBlock);
5507  if(pNewBlockIndex != VMA_NULL)
5508  {
5509  *pNewBlockIndex = m_Blocks.size() - 1;
5510  }
5511 
5512  return VK_SUCCESS;
5513 }
5514 
5515 #if VMA_STATS_STRING_ENABLED
5516 
5517 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
5518 {
5519  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5520 
5521  json.BeginObject();
5522 
5523  if(m_IsCustomPool)
5524  {
5525  json.WriteString("MemoryTypeIndex");
5526  json.WriteNumber(m_MemoryTypeIndex);
5527 
5528  if(m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
5529  {
5530  json.WriteString("Mapped");
5531  json.WriteBool(true);
5532  }
5533 
5534  json.WriteString("BlockSize");
5535  json.WriteNumber(m_PreferredBlockSize);
5536 
5537  json.WriteString("BlockCount");
5538  json.BeginObject(true);
5539  if(m_MinBlockCount > 0)
5540  {
5541  json.WriteString("Min");
5542  json.WriteNumber(m_MinBlockCount);
5543  }
5544  if(m_MaxBlockCount < SIZE_MAX)
5545  {
5546  json.WriteString("Max");
5547  json.WriteNumber(m_MaxBlockCount);
5548  }
5549  json.WriteString("Cur");
5550  json.WriteNumber(m_Blocks.size());
5551  json.EndObject();
5552 
5553  if(m_FrameInUseCount > 0)
5554  {
5555  json.WriteString("FrameInUseCount");
5556  json.WriteNumber(m_FrameInUseCount);
5557  }
5558  }
5559  else
5560  {
5561  json.WriteString("PreferredBlockSize");
5562  json.WriteNumber(m_PreferredBlockSize);
5563  }
5564 
5565  json.WriteString("Blocks");
5566  json.BeginArray();
5567  for(size_t i = 0; i < m_Blocks.size(); ++i)
5568  {
5569  m_Blocks[i]->PrintDetailedMap(json);
5570  }
5571  json.EndArray();
5572 
5573  json.EndObject();
5574 }
5575 
5576 #endif // #if VMA_STATS_STRING_ENABLED
5577 
5578 void VmaBlockVector::UnmapPersistentlyMappedMemory()
5579 {
5580  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5581 
5582  for(size_t i = m_Blocks.size(); i--; )
5583  {
5584  VmaDeviceMemoryBlock* pBlock = m_Blocks[i];
5585  if(pBlock->m_pMappedData != VMA_NULL)
5586  {
5587  VMA_ASSERT(pBlock->m_PersistentMap != false);
5588  vkUnmapMemory(m_hAllocator->m_hDevice, pBlock->m_hMemory);
5589  pBlock->m_pMappedData = VMA_NULL;
5590  }
5591  }
5592 }
5593 
5594 VkResult VmaBlockVector::MapPersistentlyMappedMemory()
5595 {
5596  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5597 
5598  VkResult finalResult = VK_SUCCESS;
5599  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
5600  {
5601  VmaDeviceMemoryBlock* pBlock = m_Blocks[i];
5602  if(pBlock->m_PersistentMap)
5603  {
5604  VMA_ASSERT(pBlock->m_pMappedData == nullptr);
5605  VkResult localResult = vkMapMemory(m_hAllocator->m_hDevice, pBlock->m_hMemory, 0, VK_WHOLE_SIZE, 0, &pBlock->m_pMappedData);
5606  if(localResult != VK_SUCCESS)
5607  {
5608  finalResult = localResult;
5609  }
5610  }
5611  }
5612  return finalResult;
5613 }
5614 
5615 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
5616  VkDevice hDevice,
5617  const VkAllocationCallbacks* pAllocationCallbacks,
5618  uint32_t currentFrameIndex)
5619 {
5620  if(m_pDefragmentator == VMA_NULL)
5621  {
5622  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
5623  hDevice,
5624  pAllocationCallbacks,
5625  this,
5626  currentFrameIndex);
5627  }
5628 
5629  return m_pDefragmentator;
5630 }
5631 
5632 VkResult VmaBlockVector::Defragment(
5633  VmaDefragmentationStats* pDefragmentationStats,
5634  VkDeviceSize& maxBytesToMove,
5635  uint32_t& maxAllocationsToMove)
5636 {
5637  if(m_pDefragmentator == VMA_NULL)
5638  {
5639  return VK_SUCCESS;
5640  }
5641 
5642  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5643 
5644  // Defragment.
5645  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
5646 
5647  // Accumulate statistics.
5648  if(pDefragmentationStats != VMA_NULL)
5649  {
5650  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
5651  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
5652  pDefragmentationStats->bytesMoved += bytesMoved;
5653  pDefragmentationStats->allocationsMoved += allocationsMoved;
5654  VMA_ASSERT(bytesMoved <= maxBytesToMove);
5655  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
5656  maxBytesToMove -= bytesMoved;
5657  maxAllocationsToMove -= allocationsMoved;
5658  }
5659 
5660  // Free empty blocks.
5661  m_HasEmptyBlock = false;
5662  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
5663  {
5664  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
5665  if(pBlock->IsEmpty())
5666  {
5667  if(m_Blocks.size() > m_MinBlockCount)
5668  {
5669  if(pDefragmentationStats != VMA_NULL)
5670  {
5671  ++pDefragmentationStats->deviceMemoryBlocksFreed;
5672  pDefragmentationStats->bytesFreed += pBlock->m_Size;
5673  }
5674 
5675  VmaVectorRemove(m_Blocks, blockIndex);
5676  pBlock->Destroy(m_hAllocator);
5677  vma_delete(m_hAllocator, pBlock);
5678  }
5679  else
5680  {
5681  m_HasEmptyBlock = true;
5682  }
5683  }
5684  }
5685 
5686  return result;
5687 }
5688 
5689 void VmaBlockVector::DestroyDefragmentator()
5690 {
5691  if(m_pDefragmentator != VMA_NULL)
5692  {
5693  vma_delete(m_hAllocator, m_pDefragmentator);
5694  m_pDefragmentator = VMA_NULL;
5695  }
5696 }
5697 
5698 void VmaBlockVector::MakePoolAllocationsLost(
5699  uint32_t currentFrameIndex,
5700  size_t* pLostAllocationCount)
5701 {
5702  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5703 
5704  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5705  {
5706  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
5707  VMA_ASSERT(pBlock);
5708  pBlock->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
5709  }
5710 }
5711 
5712 void VmaBlockVector::AddStats(VmaStats* pStats)
5713 {
5714  const uint32_t memTypeIndex = m_MemoryTypeIndex;
5715  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
5716 
5717  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5718 
5719  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5720  {
5721  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
5722  VMA_ASSERT(pBlock);
5723  VMA_HEAVY_ASSERT(pBlock->Validate());
5724  VmaStatInfo allocationStatInfo;
5725  CalcAllocationStatInfo(allocationStatInfo, *pBlock);
5726  VmaAddStatInfo(pStats->total, allocationStatInfo);
5727  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
5728  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
5729  }
5730 }
5731 
5733 // VmaDefragmentator members definition
5734 
5735 VmaDefragmentator::VmaDefragmentator(
5736  VkDevice hDevice,
5737  const VkAllocationCallbacks* pAllocationCallbacks,
5738  VmaBlockVector* pBlockVector,
5739  uint32_t currentFrameIndex) :
5740  m_hDevice(hDevice),
5741  m_pAllocationCallbacks(pAllocationCallbacks),
5742  m_pBlockVector(pBlockVector),
5743  m_CurrentFrameIndex(currentFrameIndex),
5744  m_BytesMoved(0),
5745  m_AllocationsMoved(0),
5746  m_Allocations(VmaStlAllocator<AllocationInfo>(pAllocationCallbacks)),
5747  m_Blocks(VmaStlAllocator<BlockInfo*>(pAllocationCallbacks))
5748 {
5749 }
5750 
5751 VmaDefragmentator::~VmaDefragmentator()
5752 {
5753  for(size_t i = m_Blocks.size(); i--; )
5754  {
5755  vma_delete(m_pAllocationCallbacks, m_Blocks[i]);
5756  }
5757 }
5758 
5759 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
5760 {
5761  AllocationInfo allocInfo;
5762  allocInfo.m_hAllocation = hAlloc;
5763  allocInfo.m_pChanged = pChanged;
5764  m_Allocations.push_back(allocInfo);
5765 }
5766 
5767 VkResult VmaDefragmentator::DefragmentRound(
5768  VkDeviceSize maxBytesToMove,
5769  uint32_t maxAllocationsToMove)
5770 {
5771  if(m_Blocks.empty())
5772  {
5773  return VK_SUCCESS;
5774  }
5775 
5776  size_t srcBlockIndex = m_Blocks.size() - 1;
5777  size_t srcAllocIndex = SIZE_MAX;
5778  for(;;)
5779  {
5780  // 1. Find next allocation to move.
5781  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
5782  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
5783  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
5784  {
5785  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
5786  {
5787  // Finished: no more allocations to process.
5788  if(srcBlockIndex == 0)
5789  {
5790  return VK_SUCCESS;
5791  }
5792  else
5793  {
5794  --srcBlockIndex;
5795  srcAllocIndex = SIZE_MAX;
5796  }
5797  }
5798  else
5799  {
5800  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
5801  }
5802  }
5803 
5804  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
5805  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
5806 
5807  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
5808  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
5809  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
5810  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
5811 
5812  // 2. Try to find new place for this allocation in preceding or current block.
5813  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
5814  {
5815  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
5816  VmaAllocationRequest dstAllocRequest;
5817  if(pDstBlockInfo->m_pBlock->CreateAllocationRequest(
5818  m_CurrentFrameIndex,
5819  m_pBlockVector->GetFrameInUseCount(),
5820  m_pBlockVector->GetBufferImageGranularity(),
5821  size,
5822  alignment,
5823  suballocType,
5824  false, // canMakeOtherLost
5825  &dstAllocRequest) &&
5826  MoveMakesSense(
5827  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
5828  {
5829  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
5830 
5831  // Reached limit on number of allocations or bytes to move.
5832  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
5833  (m_BytesMoved + size > maxBytesToMove))
5834  {
5835  return VK_INCOMPLETE;
5836  }
5837 
5838  void* pDstMappedData = VMA_NULL;
5839  VkResult res = pDstBlockInfo->EnsureMapping(m_hDevice, &pDstMappedData);
5840  if(res != VK_SUCCESS)
5841  {
5842  return res;
5843  }
5844 
5845  void* pSrcMappedData = VMA_NULL;
5846  res = pSrcBlockInfo->EnsureMapping(m_hDevice, &pSrcMappedData);
5847  if(res != VK_SUCCESS)
5848  {
5849  return res;
5850  }
5851 
5852  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
5853  memcpy(
5854  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
5855  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
5856  static_cast<size_t>(size));
5857 
5858  pDstBlockInfo->m_pBlock->Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
5859  pSrcBlockInfo->m_pBlock->Free(allocInfo.m_hAllocation);
5860 
5861  allocInfo.m_hAllocation->ChangeBlockAllocation(pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
5862 
5863  if(allocInfo.m_pChanged != VMA_NULL)
5864  {
5865  *allocInfo.m_pChanged = VK_TRUE;
5866  }
5867 
5868  ++m_AllocationsMoved;
5869  m_BytesMoved += size;
5870 
5871  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
5872 
5873  break;
5874  }
5875  }
5876 
5877  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
5878 
5879  if(srcAllocIndex > 0)
5880  {
5881  --srcAllocIndex;
5882  }
5883  else
5884  {
5885  if(srcBlockIndex > 0)
5886  {
5887  --srcBlockIndex;
5888  srcAllocIndex = SIZE_MAX;
5889  }
5890  else
5891  {
5892  return VK_SUCCESS;
5893  }
5894  }
5895  }
5896 }
5897 
5898 VkResult VmaDefragmentator::Defragment(
5899  VkDeviceSize maxBytesToMove,
5900  uint32_t maxAllocationsToMove)
5901 {
5902  if(m_Allocations.empty())
5903  {
5904  return VK_SUCCESS;
5905  }
5906 
5907  // Create block info for each block.
5908  const size_t blockCount = m_pBlockVector->m_Blocks.size();
5909  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
5910  {
5911  BlockInfo* pBlockInfo = vma_new(m_pAllocationCallbacks, BlockInfo)(m_pAllocationCallbacks);
5912  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
5913  m_Blocks.push_back(pBlockInfo);
5914  }
5915 
5916  // Sort them by m_pBlock pointer value.
5917  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
5918 
5919  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
5920  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
5921  {
5922  AllocationInfo& allocInfo = m_Allocations[blockIndex];
5923  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
5924  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
5925  {
5926  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
5927  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
5928  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
5929  {
5930  (*it)->m_Allocations.push_back(allocInfo);
5931  }
5932  else
5933  {
5934  VMA_ASSERT(0);
5935  }
5936  }
5937  }
5938  m_Allocations.clear();
5939 
5940  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
5941  {
5942  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
5943  pBlockInfo->CalcHasNonMovableAllocations();
5944  pBlockInfo->SortAllocationsBySizeDescecnding();
5945  }
5946 
5947  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
5948  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
5949 
5950  // Execute defragmentation rounds (the main part).
5951  VkResult result = VK_SUCCESS;
5952  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
5953  {
5954  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
5955  }
5956 
5957  // Unmap blocks that were mapped for defragmentation.
5958  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
5959  {
5960  m_Blocks[blockIndex]->Unmap(m_hDevice);
5961  }
5962 
5963  return result;
5964 }
5965 
5966 bool VmaDefragmentator::MoveMakesSense(
5967  size_t dstBlockIndex, VkDeviceSize dstOffset,
5968  size_t srcBlockIndex, VkDeviceSize srcOffset)
5969 {
5970  if(dstBlockIndex < srcBlockIndex)
5971  {
5972  return true;
5973  }
5974  if(dstBlockIndex > srcBlockIndex)
5975  {
5976  return false;
5977  }
5978  if(dstOffset < srcOffset)
5979  {
5980  return true;
5981  }
5982  return false;
5983 }
5984 
5986 // VmaAllocator_T
5987 
5988 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
5989  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
5990  m_PhysicalDevice(pCreateInfo->physicalDevice),
5991  m_hDevice(pCreateInfo->device),
5992  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
5993  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
5994  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
5995  m_UnmapPersistentlyMappedMemoryCounter(0),
5996  m_PreferredLargeHeapBlockSize(0),
5997  m_PreferredSmallHeapBlockSize(0),
5998  m_CurrentFrameIndex(0),
5999  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
6000 {
6001  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
6002 
6003  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
6004  memset(&m_MemProps, 0, sizeof(m_MemProps));
6005  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
6006 
6007  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
6008  memset(&m_pOwnAllocations, 0, sizeof(m_pOwnAllocations));
6009 
6010  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
6011  {
6012  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
6013  }
6014 
6015  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
6016  {
6017  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
6018  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
6019  }
6020 
6021  vkGetPhysicalDeviceProperties(m_PhysicalDevice, &m_PhysicalDeviceProperties);
6022  vkGetPhysicalDeviceMemoryProperties(m_PhysicalDevice, &m_MemProps);
6023 
6024  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
6025  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
6026  m_PreferredSmallHeapBlockSize = (pCreateInfo->preferredSmallHeapBlockSize != 0) ?
6027  pCreateInfo->preferredSmallHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE);
6028 
6029  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
6030  {
6031  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
6032  {
6033  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
6034  if(limit != VK_WHOLE_SIZE)
6035  {
6036  m_HeapSizeLimit[heapIndex] = limit;
6037  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
6038  {
6039  m_MemProps.memoryHeaps[heapIndex].size = limit;
6040  }
6041  }
6042  }
6043  }
6044 
6045  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6046  {
6047  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
6048 
6049  for(size_t blockVectorTypeIndex = 0; blockVectorTypeIndex < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorTypeIndex)
6050  {
6051  m_pBlockVectors[memTypeIndex][blockVectorTypeIndex] = vma_new(this, VmaBlockVector)(
6052  this,
6053  memTypeIndex,
6054  static_cast<VMA_BLOCK_VECTOR_TYPE>(blockVectorTypeIndex),
6055  preferredBlockSize,
6056  0,
6057  SIZE_MAX,
6058  GetBufferImageGranularity(),
6059  pCreateInfo->frameInUseCount,
6060  false); // isCustomPool
6061  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
6062  // becase minBlockCount is 0.
6063  m_pOwnAllocations[memTypeIndex][blockVectorTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
6064  }
6065  }
6066 }
6067 
6068 VmaAllocator_T::~VmaAllocator_T()
6069 {
6070  VMA_ASSERT(m_Pools.empty());
6071 
6072  for(size_t i = GetMemoryTypeCount(); i--; )
6073  {
6074  for(size_t j = VMA_BLOCK_VECTOR_TYPE_COUNT; j--; )
6075  {
6076  vma_delete(this, m_pOwnAllocations[i][j]);
6077  vma_delete(this, m_pBlockVectors[i][j]);
6078  }
6079  }
6080 }
6081 
6082 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
6083 {
6084  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
6085  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
6086  return (heapSize <= VMA_SMALL_HEAP_MAX_SIZE) ?
6087  m_PreferredSmallHeapBlockSize : m_PreferredLargeHeapBlockSize;
6088 }
6089 
6090 VkResult VmaAllocator_T::AllocateMemoryOfType(
6091  const VkMemoryRequirements& vkMemReq,
6092  const VmaAllocationCreateInfo& createInfo,
6093  uint32_t memTypeIndex,
6094  VmaSuballocationType suballocType,
6095  VmaAllocation* pAllocation)
6096 {
6097  VMA_ASSERT(pAllocation != VMA_NULL);
6098  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
6099 
6100  uint32_t blockVectorType = VmaAllocationCreateFlagsToBlockVectorType(createInfo.flags);
6101  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
6102  VMA_ASSERT(blockVector);
6103 
6104  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
6105  // Heuristics: Allocate own memory if requested size if greater than half of preferred block size.
6106  const bool ownMemory =
6107  (createInfo.flags & VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT) != 0 ||
6108  VMA_DEBUG_ALWAYS_OWN_MEMORY ||
6109  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
6110  vkMemReq.size > preferredBlockSize / 2);
6111 
6112  if(ownMemory)
6113  {
6114  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6115  {
6116  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6117  }
6118  else
6119  {
6120  return AllocateOwnMemory(
6121  vkMemReq.size,
6122  suballocType,
6123  memTypeIndex,
6124  (createInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0,
6125  createInfo.pUserData,
6126  pAllocation);
6127  }
6128  }
6129  else
6130  {
6131  VkResult res = blockVector->Allocate(
6132  VK_NULL_HANDLE, // hCurrentPool
6133  m_CurrentFrameIndex.load(),
6134  vkMemReq,
6135  createInfo,
6136  suballocType,
6137  pAllocation);
6138  if(res == VK_SUCCESS)
6139  {
6140  return res;
6141  }
6142 
6143  // 5. Try own memory.
6144  res = AllocateOwnMemory(
6145  vkMemReq.size,
6146  suballocType,
6147  memTypeIndex,
6148  (createInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0,
6149  createInfo.pUserData,
6150  pAllocation);
6151  if(res == VK_SUCCESS)
6152  {
6153  // Succeeded: AllocateOwnMemory function already filld pMemory, nothing more to do here.
6154  VMA_DEBUG_LOG(" Allocated as OwnMemory");
6155  return VK_SUCCESS;
6156  }
6157  else
6158  {
6159  // Everything failed: Return error code.
6160  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
6161  return res;
6162  }
6163  }
6164 }
6165 
6166 VkResult VmaAllocator_T::AllocateOwnMemory(
6167  VkDeviceSize size,
6168  VmaSuballocationType suballocType,
6169  uint32_t memTypeIndex,
6170  bool map,
6171  void* pUserData,
6172  VmaAllocation* pAllocation)
6173 {
6174  VMA_ASSERT(pAllocation);
6175 
6176  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
6177  allocInfo.memoryTypeIndex = memTypeIndex;
6178  allocInfo.allocationSize = size;
6179 
6180  // Allocate VkDeviceMemory.
6181  VkDeviceMemory hMemory = VK_NULL_HANDLE;
6182  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
6183  if(res < 0)
6184  {
6185  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
6186  return res;
6187  }
6188 
6189  void* pMappedData = nullptr;
6190  if(map)
6191  {
6192  if(m_UnmapPersistentlyMappedMemoryCounter == 0)
6193  {
6194  res = vkMapMemory(m_hDevice, hMemory, 0, VK_WHOLE_SIZE, 0, &pMappedData);
6195  if(res < 0)
6196  {
6197  VMA_DEBUG_LOG(" vkMapMemory FAILED");
6198  FreeVulkanMemory(memTypeIndex, size, hMemory);
6199  return res;
6200  }
6201  }
6202  }
6203 
6204  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load());
6205  (*pAllocation)->InitOwnAllocation(memTypeIndex, hMemory, suballocType, map, pMappedData, size, pUserData);
6206 
6207  // Register it in m_pOwnAllocations.
6208  {
6209  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6210  AllocationVectorType* pOwnAllocations = m_pOwnAllocations[memTypeIndex][map ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED];
6211  VMA_ASSERT(pOwnAllocations);
6212  VmaVectorInsertSorted<VmaPointerLess>(*pOwnAllocations, *pAllocation);
6213  }
6214 
6215  VMA_DEBUG_LOG(" Allocated OwnMemory MemoryTypeIndex=#%u", memTypeIndex);
6216 
6217  return VK_SUCCESS;
6218 }
6219 
6220 VkResult VmaAllocator_T::AllocateMemory(
6221  const VkMemoryRequirements& vkMemReq,
6222  const VmaAllocationCreateInfo& createInfo,
6223  VmaSuballocationType suballocType,
6224  VmaAllocation* pAllocation)
6225 {
6226  if((createInfo.flags & VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT) != 0 &&
6227  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6228  {
6229  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
6230  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6231  }
6232  if((createInfo.pool != VK_NULL_HANDLE) &&
6233  ((createInfo.flags & (VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT)) != 0))
6234  {
6235  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT when pool != null is invalid.");
6236  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6237  }
6238 
6239  if(createInfo.pool != VK_NULL_HANDLE)
6240  {
6241  return createInfo.pool->m_BlockVector.Allocate(
6242  createInfo.pool,
6243  m_CurrentFrameIndex.load(),
6244  vkMemReq,
6245  createInfo,
6246  suballocType,
6247  pAllocation);
6248  }
6249  else
6250  {
6251  // Bit mask of memory Vulkan types acceptable for this allocation.
6252  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
6253  uint32_t memTypeIndex = UINT32_MAX;
6254  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
6255  if(res == VK_SUCCESS)
6256  {
6257  res = AllocateMemoryOfType(vkMemReq, createInfo, memTypeIndex, suballocType, pAllocation);
6258  // Succeeded on first try.
6259  if(res == VK_SUCCESS)
6260  {
6261  return res;
6262  }
6263  // Allocation from this memory type failed. Try other compatible memory types.
6264  else
6265  {
6266  for(;;)
6267  {
6268  // Remove old memTypeIndex from list of possibilities.
6269  memoryTypeBits &= ~(1u << memTypeIndex);
6270  // Find alternative memTypeIndex.
6271  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
6272  if(res == VK_SUCCESS)
6273  {
6274  res = AllocateMemoryOfType(vkMemReq, createInfo, memTypeIndex, suballocType, pAllocation);
6275  // Allocation from this alternative memory type succeeded.
6276  if(res == VK_SUCCESS)
6277  {
6278  return res;
6279  }
6280  // else: Allocation from this memory type failed. Try next one - next loop iteration.
6281  }
6282  // No other matching memory type index could be found.
6283  else
6284  {
6285  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
6286  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6287  }
6288  }
6289  }
6290  }
6291  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
6292  else
6293  return res;
6294  }
6295 }
6296 
6297 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
6298 {
6299  VMA_ASSERT(allocation);
6300 
6301  if(allocation->CanBecomeLost() == false ||
6302  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
6303  {
6304  switch(allocation->GetType())
6305  {
6306  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
6307  {
6308  VmaBlockVector* pBlockVector = VMA_NULL;
6309  VmaPool hPool = allocation->GetPool();
6310  if(hPool != VK_NULL_HANDLE)
6311  {
6312  pBlockVector = &hPool->m_BlockVector;
6313  }
6314  else
6315  {
6316  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
6317  const VMA_BLOCK_VECTOR_TYPE blockVectorType = allocation->GetBlockVectorType();
6318  pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
6319  }
6320  pBlockVector->Free(allocation);
6321  }
6322  break;
6323  case VmaAllocation_T::ALLOCATION_TYPE_OWN:
6324  FreeOwnMemory(allocation);
6325  break;
6326  default:
6327  VMA_ASSERT(0);
6328  }
6329  }
6330 
6331  vma_delete(this, allocation);
6332 }
6333 
6334 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
6335 {
6336  // Initialize.
6337  InitStatInfo(pStats->total);
6338  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
6339  InitStatInfo(pStats->memoryType[i]);
6340  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
6341  InitStatInfo(pStats->memoryHeap[i]);
6342 
6343  // Process default pools.
6344  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6345  {
6346  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
6347  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
6348  {
6349  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
6350  VMA_ASSERT(pBlockVector);
6351  pBlockVector->AddStats(pStats);
6352  }
6353  }
6354 
6355  // Process custom pools.
6356  {
6357  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6358  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
6359  {
6360  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
6361  }
6362  }
6363 
6364  // Process own allocations.
6365  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6366  {
6367  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
6368  VmaMutexLock ownAllocationsLock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6369  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
6370  {
6371  AllocationVectorType* const pOwnAllocVector = m_pOwnAllocations[memTypeIndex][blockVectorType];
6372  VMA_ASSERT(pOwnAllocVector);
6373  for(size_t allocIndex = 0, allocCount = pOwnAllocVector->size(); allocIndex < allocCount; ++allocIndex)
6374  {
6375  VmaStatInfo allocationStatInfo;
6376  (*pOwnAllocVector)[allocIndex]->OwnAllocCalcStatsInfo(allocationStatInfo);
6377  VmaAddStatInfo(pStats->total, allocationStatInfo);
6378  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
6379  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
6380  }
6381  }
6382  }
6383 
6384  // Postprocess.
6385  VmaPostprocessCalcStatInfo(pStats->total);
6386  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
6387  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
6388  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
6389  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
6390 }
6391 
6392 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
6393 
6394 void VmaAllocator_T::UnmapPersistentlyMappedMemory()
6395 {
6396  if(m_UnmapPersistentlyMappedMemoryCounter++ == 0)
6397  {
6398  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
6399  {
6400  for(uint32_t memTypeIndex = m_MemProps.memoryTypeCount; memTypeIndex--; )
6401  {
6402  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
6403  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
6404  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
6405  {
6406  // Process OwnAllocations.
6407  {
6408  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6409  AllocationVectorType* pOwnAllocationsVector = m_pOwnAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6410  for(size_t ownAllocIndex = pOwnAllocationsVector->size(); ownAllocIndex--; )
6411  {
6412  VmaAllocation hAlloc = (*pOwnAllocationsVector)[ownAllocIndex];
6413  hAlloc->OwnAllocUnmapPersistentlyMappedMemory(m_hDevice);
6414  }
6415  }
6416 
6417  // Process normal Allocations.
6418  {
6419  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6420  pBlockVector->UnmapPersistentlyMappedMemory();
6421  }
6422  }
6423  }
6424 
6425  // Process custom pools.
6426  {
6427  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6428  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
6429  {
6430  m_Pools[poolIndex]->GetBlockVector().UnmapPersistentlyMappedMemory();
6431  }
6432  }
6433  }
6434  }
6435 }
6436 
6437 VkResult VmaAllocator_T::MapPersistentlyMappedMemory()
6438 {
6439  VMA_ASSERT(m_UnmapPersistentlyMappedMemoryCounter > 0);
6440  if(--m_UnmapPersistentlyMappedMemoryCounter == 0)
6441  {
6442  VkResult finalResult = VK_SUCCESS;
6443  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
6444  {
6445  // Process custom pools.
6446  {
6447  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6448  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
6449  {
6450  m_Pools[poolIndex]->GetBlockVector().MapPersistentlyMappedMemory();
6451  }
6452  }
6453 
6454  for(uint32_t memTypeIndex = 0; memTypeIndex < m_MemProps.memoryTypeCount; ++memTypeIndex)
6455  {
6456  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
6457  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
6458  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
6459  {
6460  // Process OwnAllocations.
6461  {
6462  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6463  AllocationVectorType* pAllocationsVector = m_pOwnAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6464  for(size_t ownAllocIndex = 0, ownAllocCount = pAllocationsVector->size(); ownAllocIndex < ownAllocCount; ++ownAllocIndex)
6465  {
6466  VmaAllocation hAlloc = (*pAllocationsVector)[ownAllocIndex];
6467  hAlloc->OwnAllocMapPersistentlyMappedMemory(m_hDevice);
6468  }
6469  }
6470 
6471  // Process normal Allocations.
6472  {
6473  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6474  VkResult localResult = pBlockVector->MapPersistentlyMappedMemory();
6475  if(localResult != VK_SUCCESS)
6476  {
6477  finalResult = localResult;
6478  }
6479  }
6480  }
6481  }
6482  }
6483  return finalResult;
6484  }
6485  else
6486  return VK_SUCCESS;
6487 }
6488 
6489 VkResult VmaAllocator_T::Defragment(
6490  VmaAllocation* pAllocations,
6491  size_t allocationCount,
6492  VkBool32* pAllocationsChanged,
6493  const VmaDefragmentationInfo* pDefragmentationInfo,
6494  VmaDefragmentationStats* pDefragmentationStats)
6495 {
6496  if(pAllocationsChanged != VMA_NULL)
6497  {
6498  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
6499  }
6500  if(pDefragmentationStats != VMA_NULL)
6501  {
6502  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
6503  }
6504 
6505  if(m_UnmapPersistentlyMappedMemoryCounter > 0)
6506  {
6507  VMA_DEBUG_LOG("ERROR: Cannot defragment when inside vmaUnmapPersistentlyMappedMemory.");
6508  return VK_ERROR_MEMORY_MAP_FAILED;
6509  }
6510 
6511  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
6512 
6513  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
6514 
6515  const size_t poolCount = m_Pools.size();
6516 
6517  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
6518  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
6519  {
6520  VmaAllocation hAlloc = pAllocations[allocIndex];
6521  VMA_ASSERT(hAlloc);
6522  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
6523  // OwnAlloc cannot be defragmented.
6524  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
6525  // Only HOST_VISIBLE memory types can be defragmented.
6526  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
6527  // Lost allocation cannot be defragmented.
6528  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
6529  {
6530  VmaBlockVector* pAllocBlockVector = nullptr;
6531 
6532  const VmaPool hAllocPool = hAlloc->GetPool();
6533  // This allocation belongs to custom pool.
6534  if(hAllocPool != VK_NULL_HANDLE)
6535  {
6536  pAllocBlockVector = &hAllocPool->GetBlockVector();
6537  }
6538  // This allocation belongs to general pool.
6539  else
6540  {
6541  pAllocBlockVector = m_pBlockVectors[memTypeIndex][hAlloc->GetBlockVectorType()];
6542  }
6543 
6544  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(
6545  m_hDevice,
6546  GetAllocationCallbacks(),
6547  currentFrameIndex);
6548 
6549  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
6550  &pAllocationsChanged[allocIndex] : VMA_NULL;
6551  pDefragmentator->AddAllocation(hAlloc, pChanged);
6552  }
6553  }
6554 
6555  VkResult result = VK_SUCCESS;
6556 
6557  // ======== Main processing.
6558 
6559  VkDeviceSize maxBytesToMove = SIZE_MAX;
6560  uint32_t maxAllocationsToMove = UINT32_MAX;
6561  if(pDefragmentationInfo != VMA_NULL)
6562  {
6563  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
6564  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
6565  }
6566 
6567  // Process standard memory.
6568  for(uint32_t memTypeIndex = 0;
6569  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
6570  ++memTypeIndex)
6571  {
6572  // Only HOST_VISIBLE memory types can be defragmented.
6573  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
6574  {
6575  for(uint32_t blockVectorType = 0;
6576  (blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT) && (result == VK_SUCCESS);
6577  ++blockVectorType)
6578  {
6579  result = m_pBlockVectors[memTypeIndex][blockVectorType]->Defragment(
6580  pDefragmentationStats,
6581  maxBytesToMove,
6582  maxAllocationsToMove);
6583  }
6584  }
6585  }
6586 
6587  // Process custom pools.
6588  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
6589  {
6590  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
6591  pDefragmentationStats,
6592  maxBytesToMove,
6593  maxAllocationsToMove);
6594  }
6595 
6596  // ======== Destroy defragmentators.
6597 
6598  // Process custom pools.
6599  for(size_t poolIndex = poolCount; poolIndex--; )
6600  {
6601  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
6602  }
6603 
6604  // Process standard memory.
6605  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
6606  {
6607  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
6608  {
6609  for(size_t blockVectorType = VMA_BLOCK_VECTOR_TYPE_COUNT; blockVectorType--; )
6610  {
6611  m_pBlockVectors[memTypeIndex][blockVectorType]->DestroyDefragmentator();
6612  }
6613  }
6614  }
6615 
6616  return result;
6617 }
6618 
6619 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
6620 {
6621  if(hAllocation->CanBecomeLost())
6622  {
6623  /*
6624  Warning: This is a carefully designed algorithm.
6625  Do not modify unless you really know what you're doing :)
6626  */
6627  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
6628  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
6629  for(;;)
6630  {
6631  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
6632  {
6633  pAllocationInfo->memoryType = UINT32_MAX;
6634  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
6635  pAllocationInfo->offset = 0;
6636  pAllocationInfo->size = hAllocation->GetSize();
6637  pAllocationInfo->pMappedData = VMA_NULL;
6638  pAllocationInfo->pUserData = hAllocation->GetUserData();
6639  return;
6640  }
6641  else if(localLastUseFrameIndex == localCurrFrameIndex)
6642  {
6643  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
6644  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
6645  pAllocationInfo->offset = hAllocation->GetOffset();
6646  pAllocationInfo->size = hAllocation->GetSize();
6647  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
6648  pAllocationInfo->pUserData = hAllocation->GetUserData();
6649  return;
6650  }
6651  else // Last use time earlier than current time.
6652  {
6653  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
6654  {
6655  localLastUseFrameIndex = localCurrFrameIndex;
6656  }
6657  }
6658  }
6659  }
6660  // We could use the same code here, but for performance reasons we don't need to use the hAllocation.LastUseFrameIndex atomic.
6661  else
6662  {
6663  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
6664  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
6665  pAllocationInfo->offset = hAllocation->GetOffset();
6666  pAllocationInfo->size = hAllocation->GetSize();
6667  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
6668  pAllocationInfo->pUserData = hAllocation->GetUserData();
6669  }
6670 }
6671 
6672 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
6673 {
6674  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
6675 
6676  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
6677 
6678  if(newCreateInfo.maxBlockCount == 0)
6679  {
6680  newCreateInfo.maxBlockCount = SIZE_MAX;
6681  }
6682  if(newCreateInfo.blockSize == 0)
6683  {
6684  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
6685  }
6686 
6687  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
6688 
6689  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
6690  if(res != VK_SUCCESS)
6691  {
6692  vma_delete(this, *pPool);
6693  *pPool = VMA_NULL;
6694  return res;
6695  }
6696 
6697  // Add to m_Pools.
6698  {
6699  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6700  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
6701  }
6702 
6703  return VK_SUCCESS;
6704 }
6705 
6706 void VmaAllocator_T::DestroyPool(VmaPool pool)
6707 {
6708  // Remove from m_Pools.
6709  {
6710  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6711  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
6712  VMA_ASSERT(success && "Pool not found in Allocator.");
6713  }
6714 
6715  vma_delete(this, pool);
6716 }
6717 
6718 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
6719 {
6720  pool->m_BlockVector.GetPoolStats(pPoolStats);
6721 }
6722 
6723 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
6724 {
6725  m_CurrentFrameIndex.store(frameIndex);
6726 }
6727 
6728 void VmaAllocator_T::MakePoolAllocationsLost(
6729  VmaPool hPool,
6730  size_t* pLostAllocationCount)
6731 {
6732  hPool->m_BlockVector.MakePoolAllocationsLost(
6733  m_CurrentFrameIndex.load(),
6734  pLostAllocationCount);
6735 }
6736 
6737 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
6738 {
6739  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST);
6740  (*pAllocation)->InitLost();
6741 }
6742 
6743 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
6744 {
6745  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
6746 
6747  VkResult res;
6748  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
6749  {
6750  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
6751  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
6752  {
6753  res = vkAllocateMemory(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
6754  if(res == VK_SUCCESS)
6755  {
6756  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
6757  }
6758  }
6759  else
6760  {
6761  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
6762  }
6763  }
6764  else
6765  {
6766  res = vkAllocateMemory(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
6767  }
6768 
6769  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
6770  {
6771  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
6772  }
6773 
6774  return res;
6775 }
6776 
6777 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
6778 {
6779  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
6780  {
6781  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
6782  }
6783 
6784  vkFreeMemory(m_hDevice, hMemory, GetAllocationCallbacks());
6785 
6786  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
6787  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
6788  {
6789  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
6790  m_HeapSizeLimit[heapIndex] += size;
6791  }
6792 }
6793 
6794 void VmaAllocator_T::FreeOwnMemory(VmaAllocation allocation)
6795 {
6796  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_OWN);
6797 
6798  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
6799  {
6800  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6801  AllocationVectorType* const pOwnAllocations = m_pOwnAllocations[memTypeIndex][allocation->GetBlockVectorType()];
6802  VMA_ASSERT(pOwnAllocations);
6803  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pOwnAllocations, allocation);
6804  VMA_ASSERT(success);
6805  }
6806 
6807  VkDeviceMemory hMemory = allocation->GetMemory();
6808 
6809  if(allocation->GetMappedData() != VMA_NULL)
6810  {
6811  vkUnmapMemory(m_hDevice, hMemory);
6812  }
6813 
6814  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
6815 
6816  VMA_DEBUG_LOG(" Freed OwnMemory MemoryTypeIndex=%u", memTypeIndex);
6817 }
6818 
6819 #if VMA_STATS_STRING_ENABLED
6820 
6821 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
6822 {
6823  bool ownAllocationsStarted = false;
6824  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6825  {
6826  VmaMutexLock ownAllocationsLock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6827  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
6828  {
6829  AllocationVectorType* const pOwnAllocVector = m_pOwnAllocations[memTypeIndex][blockVectorType];
6830  VMA_ASSERT(pOwnAllocVector);
6831  if(pOwnAllocVector->empty() == false)
6832  {
6833  if(ownAllocationsStarted == false)
6834  {
6835  ownAllocationsStarted = true;
6836  json.WriteString("OwnAllocations");
6837  json.BeginObject();
6838  }
6839 
6840  json.BeginString("Type ");
6841  json.ContinueString(memTypeIndex);
6842  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
6843  {
6844  json.ContinueString(" Mapped");
6845  }
6846  json.EndString();
6847 
6848  json.BeginArray();
6849 
6850  for(size_t i = 0; i < pOwnAllocVector->size(); ++i)
6851  {
6852  const VmaAllocation hAlloc = (*pOwnAllocVector)[i];
6853  json.BeginObject(true);
6854 
6855  json.WriteString("Size");
6856  json.WriteNumber(hAlloc->GetSize());
6857 
6858  json.WriteString("Type");
6859  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
6860 
6861  json.EndObject();
6862  }
6863 
6864  json.EndArray();
6865  }
6866  }
6867  }
6868  if(ownAllocationsStarted)
6869  {
6870  json.EndObject();
6871  }
6872 
6873  {
6874  bool allocationsStarted = false;
6875  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6876  {
6877  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
6878  {
6879  if(m_pBlockVectors[memTypeIndex][blockVectorType]->IsEmpty() == false)
6880  {
6881  if(allocationsStarted == false)
6882  {
6883  allocationsStarted = true;
6884  json.WriteString("DefaultPools");
6885  json.BeginObject();
6886  }
6887 
6888  json.BeginString("Type ");
6889  json.ContinueString(memTypeIndex);
6890  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
6891  {
6892  json.ContinueString(" Mapped");
6893  }
6894  json.EndString();
6895 
6896  m_pBlockVectors[memTypeIndex][blockVectorType]->PrintDetailedMap(json);
6897  }
6898  }
6899  }
6900  if(allocationsStarted)
6901  {
6902  json.EndObject();
6903  }
6904  }
6905 
6906  {
6907  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6908  const size_t poolCount = m_Pools.size();
6909  if(poolCount > 0)
6910  {
6911  json.WriteString("Pools");
6912  json.BeginArray();
6913  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
6914  {
6915  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
6916  }
6917  json.EndArray();
6918  }
6919  }
6920 }
6921 
6922 #endif // #if VMA_STATS_STRING_ENABLED
6923 
6924 static VkResult AllocateMemoryForImage(
6925  VmaAllocator allocator,
6926  VkImage image,
6927  const VmaAllocationCreateInfo* pAllocationCreateInfo,
6928  VmaSuballocationType suballocType,
6929  VmaAllocation* pAllocation)
6930 {
6931  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
6932 
6933  VkMemoryRequirements vkMemReq = {};
6934  vkGetImageMemoryRequirements(allocator->m_hDevice, image, &vkMemReq);
6935 
6936  return allocator->AllocateMemory(
6937  vkMemReq,
6938  *pAllocationCreateInfo,
6939  suballocType,
6940  pAllocation);
6941 }
6942 
6944 // Public interface
6945 
6946 VkResult vmaCreateAllocator(
6947  const VmaAllocatorCreateInfo* pCreateInfo,
6948  VmaAllocator* pAllocator)
6949 {
6950  VMA_ASSERT(pCreateInfo && pAllocator);
6951  VMA_DEBUG_LOG("vmaCreateAllocator");
6952  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
6953  return VK_SUCCESS;
6954 }
6955 
6956 void vmaDestroyAllocator(
6957  VmaAllocator allocator)
6958 {
6959  if(allocator != VK_NULL_HANDLE)
6960  {
6961  VMA_DEBUG_LOG("vmaDestroyAllocator");
6962  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
6963  vma_delete(&allocationCallbacks, allocator);
6964  }
6965 }
6966 
6968  VmaAllocator allocator,
6969  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
6970 {
6971  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
6972  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
6973 }
6974 
6976  VmaAllocator allocator,
6977  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
6978 {
6979  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
6980  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
6981 }
6982 
6984  VmaAllocator allocator,
6985  uint32_t memoryTypeIndex,
6986  VkMemoryPropertyFlags* pFlags)
6987 {
6988  VMA_ASSERT(allocator && pFlags);
6989  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
6990  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
6991 }
6992 
6994  VmaAllocator allocator,
6995  uint32_t frameIndex)
6996 {
6997  VMA_ASSERT(allocator);
6998  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
6999 
7000  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7001 
7002  allocator->SetCurrentFrameIndex(frameIndex);
7003 }
7004 
7005 void vmaCalculateStats(
7006  VmaAllocator allocator,
7007  VmaStats* pStats)
7008 {
7009  VMA_ASSERT(allocator && pStats);
7010  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7011  allocator->CalculateStats(pStats);
7012 }
7013 
7014 #if VMA_STATS_STRING_ENABLED
7015 
7016 void vmaBuildStatsString(
7017  VmaAllocator allocator,
7018  char** ppStatsString,
7019  VkBool32 detailedMap)
7020 {
7021  VMA_ASSERT(allocator && ppStatsString);
7022  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7023 
7024  VmaStringBuilder sb(allocator);
7025  {
7026  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
7027  json.BeginObject();
7028 
7029  VmaStats stats;
7030  allocator->CalculateStats(&stats);
7031 
7032  json.WriteString("Total");
7033  VmaPrintStatInfo(json, stats.total);
7034 
7035  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
7036  {
7037  json.BeginString("Heap ");
7038  json.ContinueString(heapIndex);
7039  json.EndString();
7040  json.BeginObject();
7041 
7042  json.WriteString("Size");
7043  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
7044 
7045  json.WriteString("Flags");
7046  json.BeginArray(true);
7047  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
7048  {
7049  json.WriteString("DEVICE_LOCAL");
7050  }
7051  json.EndArray();
7052 
7053  if(stats.memoryHeap[heapIndex].BlockCount > 0)
7054  {
7055  json.WriteString("Stats");
7056  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
7057  }
7058 
7059  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
7060  {
7061  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
7062  {
7063  json.BeginString("Type ");
7064  json.ContinueString(typeIndex);
7065  json.EndString();
7066 
7067  json.BeginObject();
7068 
7069  json.WriteString("Flags");
7070  json.BeginArray(true);
7071  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
7072  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
7073  {
7074  json.WriteString("DEVICE_LOCAL");
7075  }
7076  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7077  {
7078  json.WriteString("HOST_VISIBLE");
7079  }
7080  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
7081  {
7082  json.WriteString("HOST_COHERENT");
7083  }
7084  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
7085  {
7086  json.WriteString("HOST_CACHED");
7087  }
7088  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
7089  {
7090  json.WriteString("LAZILY_ALLOCATED");
7091  }
7092  json.EndArray();
7093 
7094  if(stats.memoryType[typeIndex].BlockCount > 0)
7095  {
7096  json.WriteString("Stats");
7097  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
7098  }
7099 
7100  json.EndObject();
7101  }
7102  }
7103 
7104  json.EndObject();
7105  }
7106  if(detailedMap == VK_TRUE)
7107  {
7108  allocator->PrintDetailedMap(json);
7109  }
7110 
7111  json.EndObject();
7112  }
7113 
7114  const size_t len = sb.GetLength();
7115  char* const pChars = vma_new_array(allocator, char, len + 1);
7116  if(len > 0)
7117  {
7118  memcpy(pChars, sb.GetData(), len);
7119  }
7120  pChars[len] = '\0';
7121  *ppStatsString = pChars;
7122 }
7123 
7124 void vmaFreeStatsString(
7125  VmaAllocator allocator,
7126  char* pStatsString)
7127 {
7128  if(pStatsString != VMA_NULL)
7129  {
7130  VMA_ASSERT(allocator);
7131  size_t len = strlen(pStatsString);
7132  vma_delete_array(allocator, pStatsString, len + 1);
7133  }
7134 }
7135 
7136 #endif // #if VMA_STATS_STRING_ENABLED
7137 
7140 VkResult vmaFindMemoryTypeIndex(
7141  VmaAllocator allocator,
7142  uint32_t memoryTypeBits,
7143  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7144  uint32_t* pMemoryTypeIndex)
7145 {
7146  VMA_ASSERT(allocator != VK_NULL_HANDLE);
7147  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
7148  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
7149 
7150  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
7151  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
7152  if(preferredFlags == 0)
7153  {
7154  preferredFlags = requiredFlags;
7155  }
7156  // preferredFlags, if not 0, must be a superset of requiredFlags.
7157  VMA_ASSERT((requiredFlags & ~preferredFlags) == 0);
7158 
7159  // Convert usage to requiredFlags and preferredFlags.
7160  switch(pAllocationCreateInfo->usage)
7161  {
7163  break;
7165  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
7166  break;
7168  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
7169  break;
7171  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7172  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
7173  break;
7175  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7176  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
7177  break;
7178  default:
7179  break;
7180  }
7181 
7182  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0)
7183  {
7184  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7185  }
7186 
7187  *pMemoryTypeIndex = UINT32_MAX;
7188  uint32_t minCost = UINT32_MAX;
7189  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
7190  memTypeIndex < allocator->GetMemoryTypeCount();
7191  ++memTypeIndex, memTypeBit <<= 1)
7192  {
7193  // This memory type is acceptable according to memoryTypeBits bitmask.
7194  if((memTypeBit & memoryTypeBits) != 0)
7195  {
7196  const VkMemoryPropertyFlags currFlags =
7197  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
7198  // This memory type contains requiredFlags.
7199  if((requiredFlags & ~currFlags) == 0)
7200  {
7201  // Calculate cost as number of bits from preferredFlags not present in this memory type.
7202  uint32_t currCost = CountBitsSet(preferredFlags & ~currFlags);
7203  // Remember memory type with lowest cost.
7204  if(currCost < minCost)
7205  {
7206  *pMemoryTypeIndex = memTypeIndex;
7207  if(currCost == 0)
7208  {
7209  return VK_SUCCESS;
7210  }
7211  minCost = currCost;
7212  }
7213  }
7214  }
7215  }
7216  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
7217 }
7218 
7219 VkResult vmaCreatePool(
7220  VmaAllocator allocator,
7221  const VmaPoolCreateInfo* pCreateInfo,
7222  VmaPool* pPool)
7223 {
7224  VMA_ASSERT(allocator && pCreateInfo && pPool);
7225 
7226  VMA_DEBUG_LOG("vmaCreatePool");
7227 
7228  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7229 
7230  return allocator->CreatePool(pCreateInfo, pPool);
7231 }
7232 
7233 void vmaDestroyPool(
7234  VmaAllocator allocator,
7235  VmaPool pool)
7236 {
7237  VMA_ASSERT(allocator && pool);
7238 
7239  VMA_DEBUG_LOG("vmaDestroyPool");
7240 
7241  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7242 
7243  allocator->DestroyPool(pool);
7244 }
7245 
7246 void vmaGetPoolStats(
7247  VmaAllocator allocator,
7248  VmaPool pool,
7249  VmaPoolStats* pPoolStats)
7250 {
7251  VMA_ASSERT(allocator && pool && pPoolStats);
7252 
7253  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7254 
7255  allocator->GetPoolStats(pool, pPoolStats);
7256 }
7257 
7259  VmaAllocator allocator,
7260  VmaPool pool,
7261  size_t* pLostAllocationCount)
7262 {
7263  VMA_ASSERT(allocator && pool);
7264 
7265  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7266 
7267  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
7268 }
7269 
7270 VkResult vmaAllocateMemory(
7271  VmaAllocator allocator,
7272  const VkMemoryRequirements* pVkMemoryRequirements,
7273  const VmaAllocationCreateInfo* pCreateInfo,
7274  VmaAllocation* pAllocation,
7275  VmaAllocationInfo* pAllocationInfo)
7276 {
7277  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
7278 
7279  VMA_DEBUG_LOG("vmaAllocateMemory");
7280 
7281  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7282 
7283  VkResult result = allocator->AllocateMemory(
7284  *pVkMemoryRequirements,
7285  *pCreateInfo,
7286  VMA_SUBALLOCATION_TYPE_UNKNOWN,
7287  pAllocation);
7288 
7289  if(pAllocationInfo && result == VK_SUCCESS)
7290  {
7291  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7292  }
7293 
7294  return result;
7295 }
7296 
7298  VmaAllocator allocator,
7299  VkBuffer buffer,
7300  const VmaAllocationCreateInfo* pCreateInfo,
7301  VmaAllocation* pAllocation,
7302  VmaAllocationInfo* pAllocationInfo)
7303 {
7304  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
7305 
7306  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
7307 
7308  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7309 
7310  VkMemoryRequirements vkMemReq = {};
7311  vkGetBufferMemoryRequirements(allocator->m_hDevice, buffer, &vkMemReq);
7312 
7313  VkResult result = allocator->AllocateMemory(
7314  vkMemReq,
7315  *pCreateInfo,
7316  VMA_SUBALLOCATION_TYPE_BUFFER,
7317  pAllocation);
7318 
7319  if(pAllocationInfo && result == VK_SUCCESS)
7320  {
7321  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7322  }
7323 
7324  return result;
7325 }
7326 
7327 VkResult vmaAllocateMemoryForImage(
7328  VmaAllocator allocator,
7329  VkImage image,
7330  const VmaAllocationCreateInfo* pCreateInfo,
7331  VmaAllocation* pAllocation,
7332  VmaAllocationInfo* pAllocationInfo)
7333 {
7334  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
7335 
7336  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
7337 
7338  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7339 
7340  VkResult result = AllocateMemoryForImage(
7341  allocator,
7342  image,
7343  pCreateInfo,
7344  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
7345  pAllocation);
7346 
7347  if(pAllocationInfo && result == VK_SUCCESS)
7348  {
7349  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7350  }
7351 
7352  return result;
7353 }
7354 
7355 void vmaFreeMemory(
7356  VmaAllocator allocator,
7357  VmaAllocation allocation)
7358 {
7359  VMA_ASSERT(allocator && allocation);
7360 
7361  VMA_DEBUG_LOG("vmaFreeMemory");
7362 
7363  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7364 
7365  allocator->FreeMemory(allocation);
7366 }
7367 
7369  VmaAllocator allocator,
7370  VmaAllocation allocation,
7371  VmaAllocationInfo* pAllocationInfo)
7372 {
7373  VMA_ASSERT(allocator && allocation && pAllocationInfo);
7374 
7375  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7376 
7377  allocator->GetAllocationInfo(allocation, pAllocationInfo);
7378 }
7379 
7381  VmaAllocator allocator,
7382  VmaAllocation allocation,
7383  void* pUserData)
7384 {
7385  VMA_ASSERT(allocator && allocation);
7386 
7387  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7388 
7389  allocation->SetUserData(pUserData);
7390 }
7391 
7393  VmaAllocator allocator,
7394  VmaAllocation* pAllocation)
7395 {
7396  VMA_ASSERT(allocator && pAllocation);
7397 
7398  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
7399 
7400  allocator->CreateLostAllocation(pAllocation);
7401 }
7402 
7403 VkResult vmaMapMemory(
7404  VmaAllocator allocator,
7405  VmaAllocation allocation,
7406  void** ppData)
7407 {
7408  VMA_ASSERT(allocator && allocation && ppData);
7409 
7410  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7411 
7412  return vkMapMemory(allocator->m_hDevice, allocation->GetMemory(),
7413  allocation->GetOffset(), allocation->GetSize(), 0, ppData);
7414 }
7415 
7416 void vmaUnmapMemory(
7417  VmaAllocator allocator,
7418  VmaAllocation allocation)
7419 {
7420  VMA_ASSERT(allocator && allocation);
7421 
7422  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7423 
7424  vkUnmapMemory(allocator->m_hDevice, allocation->GetMemory());
7425 }
7426 
7427 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator)
7428 {
7429  VMA_ASSERT(allocator);
7430 
7431  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7432 
7433  allocator->UnmapPersistentlyMappedMemory();
7434 }
7435 
7436 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator)
7437 {
7438  VMA_ASSERT(allocator);
7439 
7440  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7441 
7442  return allocator->MapPersistentlyMappedMemory();
7443 }
7444 
7445 VkResult vmaDefragment(
7446  VmaAllocator allocator,
7447  VmaAllocation* pAllocations,
7448  size_t allocationCount,
7449  VkBool32* pAllocationsChanged,
7450  const VmaDefragmentationInfo *pDefragmentationInfo,
7451  VmaDefragmentationStats* pDefragmentationStats)
7452 {
7453  VMA_ASSERT(allocator && pAllocations);
7454 
7455  VMA_DEBUG_LOG("vmaDefragment");
7456 
7457  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7458 
7459  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
7460 }
7461 
7462 VkResult vmaCreateBuffer(
7463  VmaAllocator allocator,
7464  const VkBufferCreateInfo* pBufferCreateInfo,
7465  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7466  VkBuffer* pBuffer,
7467  VmaAllocation* pAllocation,
7468  VmaAllocationInfo* pAllocationInfo)
7469 {
7470  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
7471 
7472  VMA_DEBUG_LOG("vmaCreateBuffer");
7473 
7474  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7475 
7476  *pBuffer = VK_NULL_HANDLE;
7477  *pAllocation = VK_NULL_HANDLE;
7478 
7479  // 1. Create VkBuffer.
7480  VkResult res = vkCreateBuffer(allocator->m_hDevice, pBufferCreateInfo, allocator->GetAllocationCallbacks(), pBuffer);
7481  if(res >= 0)
7482  {
7483  // 2. vkGetBufferMemoryRequirements.
7484  VkMemoryRequirements vkMemReq = {};
7485  vkGetBufferMemoryRequirements(allocator->m_hDevice, *pBuffer, &vkMemReq);
7486 
7487  // 3. Allocate memory using allocator.
7488  res = allocator->AllocateMemory(
7489  vkMemReq,
7490  *pAllocationCreateInfo,
7491  VMA_SUBALLOCATION_TYPE_BUFFER,
7492  pAllocation);
7493  if(res >= 0)
7494  {
7495  // 3. Bind buffer with memory.
7496  res = vkBindBufferMemory(allocator->m_hDevice, *pBuffer, (*pAllocation)->GetMemory(), (*pAllocation)->GetOffset());
7497  if(res >= 0)
7498  {
7499  // All steps succeeded.
7500  if(pAllocationInfo != VMA_NULL)
7501  {
7502  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7503  }
7504  return VK_SUCCESS;
7505  }
7506  allocator->FreeMemory(*pAllocation);
7507  *pAllocation = VK_NULL_HANDLE;
7508  return res;
7509  }
7510  vkDestroyBuffer(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
7511  *pBuffer = VK_NULL_HANDLE;
7512  return res;
7513  }
7514  return res;
7515 }
7516 
7517 void vmaDestroyBuffer(
7518  VmaAllocator allocator,
7519  VkBuffer buffer,
7520  VmaAllocation allocation)
7521 {
7522  if(buffer != VK_NULL_HANDLE)
7523  {
7524  VMA_ASSERT(allocator);
7525 
7526  VMA_DEBUG_LOG("vmaDestroyBuffer");
7527 
7528  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7529 
7530  vkDestroyBuffer(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
7531 
7532  allocator->FreeMemory(allocation);
7533  }
7534 }
7535 
7536 VkResult vmaCreateImage(
7537  VmaAllocator allocator,
7538  const VkImageCreateInfo* pImageCreateInfo,
7539  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7540  VkImage* pImage,
7541  VmaAllocation* pAllocation,
7542  VmaAllocationInfo* pAllocationInfo)
7543 {
7544  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
7545 
7546  VMA_DEBUG_LOG("vmaCreateImage");
7547 
7548  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7549 
7550  *pImage = VK_NULL_HANDLE;
7551  *pAllocation = VK_NULL_HANDLE;
7552 
7553  // 1. Create VkImage.
7554  VkResult res = vkCreateImage(allocator->m_hDevice, pImageCreateInfo, allocator->GetAllocationCallbacks(), pImage);
7555  if(res >= 0)
7556  {
7557  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
7558  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
7559  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
7560 
7561  // 2. Allocate memory using allocator.
7562  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
7563  if(res >= 0)
7564  {
7565  // 3. Bind image with memory.
7566  res = vkBindImageMemory(allocator->m_hDevice, *pImage, (*pAllocation)->GetMemory(), (*pAllocation)->GetOffset());
7567  if(res >= 0)
7568  {
7569  // All steps succeeded.
7570  if(pAllocationInfo != VMA_NULL)
7571  {
7572  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7573  }
7574  return VK_SUCCESS;
7575  }
7576  allocator->FreeMemory(*pAllocation);
7577  *pAllocation = VK_NULL_HANDLE;
7578  return res;
7579  }
7580  vkDestroyImage(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
7581  *pImage = VK_NULL_HANDLE;
7582  return res;
7583  }
7584  return res;
7585 }
7586 
7587 void vmaDestroyImage(
7588  VmaAllocator allocator,
7589  VkImage image,
7590  VmaAllocation allocation)
7591 {
7592  if(image != VK_NULL_HANDLE)
7593  {
7594  VMA_ASSERT(allocator);
7595 
7596  VMA_DEBUG_LOG("vmaDestroyImage");
7597 
7598  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7599 
7600  vkDestroyImage(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
7601 
7602  allocator->FreeMemory(allocation);
7603  }
7604 }
7605 
7606 #endif // #ifdef VMA_IMPLEMENTATION
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:473
Definition: vk_mem_alloc.h:790
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
uint32_t BlockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:574
Memory will be used for frequent writing on device and readback on host (download).
Definition: vk_mem_alloc.h:641
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:911
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:1061
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation.
void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator)
Unmaps persistently mapped memory of types that are HOST_COHERENT and DEVICE_LOCAL.
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:842
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:690
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:723
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:436
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks.
Definition: vk_mem_alloc.h:485
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:792
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:467
VkDeviceSize preferredSmallHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from small heaps <= 512 MB...
Definition: vk_mem_alloc.h:482
VkFlags VmaAllocatorFlags
Definition: vk_mem_alloc.h:464
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:1065
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:502
VmaStatInfo total
Definition: vk_mem_alloc.h:592
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:1073
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:706
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1056
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:476
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:796
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:921
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:725
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:812
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:848
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:799
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
Definition: vk_mem_alloc.h:699
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:1051
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VkDeviceSize AllocationSizeMax
Definition: vk_mem_alloc.h:583
Definition: vk_mem_alloc.h:770
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:1069
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:588
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:679
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:1071
VmaMemoryUsage
Definition: vk_mem_alloc.h:627
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:717
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:460
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
VmaAllocatorFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:455
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
Definition: vk_mem_alloc.h:571
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
Definition: vk_mem_alloc.h:807
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:447
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:451
VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator)
Maps back persistently mapped memory of types that are HOST_COHERENT and DEVICE_LOCAL.
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:802
VkDeviceSize UnusedRangeSizeMax
Definition: vk_mem_alloc.h:584
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:430
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:712
Definition: vk_mem_alloc.h:703
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool.
Definition: vk_mem_alloc.h:820
VkDeviceSize AllocationSizeMin
Definition: vk_mem_alloc.h:583
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory.
Definition: vk_mem_alloc.h:488
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:851
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:730
const VkDeviceSize * pHeapSizeLimit
Either NULL or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:520
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:590
VkDeviceSize AllocationSizeAvg
Definition: vk_mem_alloc.h:583
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
uint32_t AllocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:576
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:449
Definition: vk_mem_alloc.h:697
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:834
VmaAllocatorFlags flags
Flags for created allocator. Use VmaAllocatorFlagBits enum.
Definition: vk_mem_alloc.h:470
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
VkDeviceSize UsedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:580
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:932
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:658
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps. ...
Definition: vk_mem_alloc.h:479
uint32_t UnusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:578
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:839
Memory will be mapped on host. Could be used for transfer to/from device.
Definition: vk_mem_alloc.h:635
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
VkDeviceSize UnusedRangeSizeAvg
Definition: vk_mem_alloc.h:584
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:916
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1067
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:701
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:761
void * pMappedData
Pointer to the beginning of this allocation as mapped data. Null if this alloaction is not persistent...
Definition: vk_mem_alloc.h:927
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
No intended memory usage specified.
Definition: vk_mem_alloc.h:630
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
Definition: vk_mem_alloc.h:642
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:897
Memory will be used for frequent (dynamic) updates from host and reads on device (upload).
Definition: vk_mem_alloc.h:638
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:646
Definition: vk_mem_alloc.h:462
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:669
Memory will be used on device only, so faster access from the device is preferred. No need to be mappable on host.
Definition: vk_mem_alloc.h:632
struct VmaStatInfo VmaStatInfo
VkDeviceSize UnusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:582
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:591
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:845
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:788
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
VkDeviceSize UnusedRangeSizeMin
Definition: vk_mem_alloc.h:584
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:902
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.