Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
429 #include <vulkan/vulkan.h>
430 
432 
436 VK_DEFINE_HANDLE(VmaAllocator)
437 
438 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
440  VmaAllocator allocator,
441  uint32_t memoryType,
442  VkDeviceMemory memory,
443  VkDeviceSize size);
445 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
446  VmaAllocator allocator,
447  uint32_t memoryType,
448  VkDeviceMemory memory,
449  VkDeviceSize size);
450 
456 typedef struct VmaDeviceMemoryCallbacks {
462 
464 typedef enum VmaAllocatorFlagBits {
470 
473 typedef VkFlags VmaAllocatorFlags;
474 
475 typedef struct VmaVulkanFunctions {
476  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
477  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
478  PFN_vkAllocateMemory vkAllocateMemory;
479  PFN_vkFreeMemory vkFreeMemory;
480  PFN_vkMapMemory vkMapMemory;
481  PFN_vkUnmapMemory vkUnmapMemory;
482  PFN_vkBindBufferMemory vkBindBufferMemory;
483  PFN_vkBindImageMemory vkBindImageMemory;
484  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
485  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
486  PFN_vkCreateBuffer vkCreateBuffer;
487  PFN_vkDestroyBuffer vkDestroyBuffer;
488  PFN_vkCreateImage vkCreateImage;
489  PFN_vkDestroyImage vkDestroyImage;
491 
494 {
498 
499  VkPhysicalDevice physicalDevice;
501 
502  VkDevice device;
504 
507 
510 
511  const VkAllocationCallbacks* pAllocationCallbacks;
513 
528  uint32_t frameInUseCount;
546  const VkDeviceSize* pHeapSizeLimit;
560 
562 VkResult vmaCreateAllocator(
563  const VmaAllocatorCreateInfo* pCreateInfo,
564  VmaAllocator* pAllocator);
565 
568  VmaAllocator allocator);
569 
575  VmaAllocator allocator,
576  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
577 
583  VmaAllocator allocator,
584  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
585 
593  VmaAllocator allocator,
594  uint32_t memoryTypeIndex,
595  VkMemoryPropertyFlags* pFlags);
596 
606  VmaAllocator allocator,
607  uint32_t frameIndex);
608 
609 typedef struct VmaStatInfo
610 {
612  uint32_t BlockCount;
614  uint32_t AllocationCount;
618  VkDeviceSize UsedBytes;
620  VkDeviceSize UnusedBytes;
621  VkDeviceSize AllocationSizeMin, AllocationSizeAvg, AllocationSizeMax;
622  VkDeviceSize UnusedRangeSizeMin, UnusedRangeSizeAvg, UnusedRangeSizeMax;
623 } VmaStatInfo;
624 
626 typedef struct VmaStats
627 {
628  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
629  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
631 } VmaStats;
632 
634 void vmaCalculateStats(
635  VmaAllocator allocator,
636  VmaStats* pStats);
637 
638 #define VMA_STATS_STRING_ENABLED 1
639 
640 #if VMA_STATS_STRING_ENABLED
641 
643 
646  VmaAllocator allocator,
647  char** ppStatsString,
648  VkBool32 detailedMap);
649 
650 void vmaFreeStatsString(
651  VmaAllocator allocator,
652  char* pStatsString);
653 
654 #endif // #if VMA_STATS_STRING_ENABLED
655 
658 
663 VK_DEFINE_HANDLE(VmaPool)
664 
665 typedef enum VmaMemoryUsage
666 {
672 
675 
678 
682 
697 
736 
739 typedef VkFlags VmaAllocationCreateFlags;
740 
742 {
755  VkMemoryPropertyFlags requiredFlags;
761  VkMemoryPropertyFlags preferredFlags;
763  void* pUserData;
768  VmaPool pool;
770 
785 VkResult vmaFindMemoryTypeIndex(
786  VmaAllocator allocator,
787  uint32_t memoryTypeBits,
788  const VmaAllocationCreateInfo* pAllocationCreateInfo,
789  uint32_t* pMemoryTypeIndex);
790 
793 
798 typedef enum VmaPoolCreateFlagBits {
827 
830 typedef VkFlags VmaPoolCreateFlags;
831 
834 typedef struct VmaPoolCreateInfo {
837  uint32_t memoryTypeIndex;
845  VkDeviceSize blockSize;
872  uint32_t frameInUseCount;
874 
877 typedef struct VmaPoolStats {
880  VkDeviceSize size;
883  VkDeviceSize unusedSize;
890 } VmaPoolStats;
891 
898 VkResult vmaCreatePool(
899  VmaAllocator allocator,
900  const VmaPoolCreateInfo* pCreateInfo,
901  VmaPool* pPool);
902 
905 void vmaDestroyPool(
906  VmaAllocator allocator,
907  VmaPool pool);
908 
915 void vmaGetPoolStats(
916  VmaAllocator allocator,
917  VmaPool pool,
918  VmaPoolStats* pPoolStats);
919 
927  VmaAllocator allocator,
928  VmaPool pool,
929  size_t* pLostAllocationCount);
930 
931 VK_DEFINE_HANDLE(VmaAllocation)
932 
933 
935 typedef struct VmaAllocationInfo {
940  uint32_t memoryType;
949  VkDeviceMemory deviceMemory;
954  VkDeviceSize offset;
959  VkDeviceSize size;
965  void* pMappedData;
970  void* pUserData;
972 
983 VkResult vmaAllocateMemory(
984  VmaAllocator allocator,
985  const VkMemoryRequirements* pVkMemoryRequirements,
986  const VmaAllocationCreateInfo* pCreateInfo,
987  VmaAllocation* pAllocation,
988  VmaAllocationInfo* pAllocationInfo);
989 
997  VmaAllocator allocator,
998  VkBuffer buffer,
999  const VmaAllocationCreateInfo* pCreateInfo,
1000  VmaAllocation* pAllocation,
1001  VmaAllocationInfo* pAllocationInfo);
1002 
1004 VkResult vmaAllocateMemoryForImage(
1005  VmaAllocator allocator,
1006  VkImage image,
1007  const VmaAllocationCreateInfo* pCreateInfo,
1008  VmaAllocation* pAllocation,
1009  VmaAllocationInfo* pAllocationInfo);
1010 
1012 void vmaFreeMemory(
1013  VmaAllocator allocator,
1014  VmaAllocation allocation);
1015 
1018  VmaAllocator allocator,
1019  VmaAllocation allocation,
1020  VmaAllocationInfo* pAllocationInfo);
1021 
1024  VmaAllocator allocator,
1025  VmaAllocation allocation,
1026  void* pUserData);
1027 
1039  VmaAllocator allocator,
1040  VmaAllocation* pAllocation);
1041 
1050 VkResult vmaMapMemory(
1051  VmaAllocator allocator,
1052  VmaAllocation allocation,
1053  void** ppData);
1054 
1055 void vmaUnmapMemory(
1056  VmaAllocator allocator,
1057  VmaAllocation allocation);
1058 
1077 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator);
1078 
1086 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator);
1087 
1089 typedef struct VmaDefragmentationInfo {
1094  VkDeviceSize maxBytesToMove;
1101 
1103 typedef struct VmaDefragmentationStats {
1105  VkDeviceSize bytesMoved;
1107  VkDeviceSize bytesFreed;
1113 
1184 VkResult vmaDefragment(
1185  VmaAllocator allocator,
1186  VmaAllocation* pAllocations,
1187  size_t allocationCount,
1188  VkBool32* pAllocationsChanged,
1189  const VmaDefragmentationInfo *pDefragmentationInfo,
1190  VmaDefragmentationStats* pDefragmentationStats);
1191 
1194 
1217 VkResult vmaCreateBuffer(
1218  VmaAllocator allocator,
1219  const VkBufferCreateInfo* pBufferCreateInfo,
1220  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1221  VkBuffer* pBuffer,
1222  VmaAllocation* pAllocation,
1223  VmaAllocationInfo* pAllocationInfo);
1224 
1225 void vmaDestroyBuffer(
1226  VmaAllocator allocator,
1227  VkBuffer buffer,
1228  VmaAllocation allocation);
1229 
1231 VkResult vmaCreateImage(
1232  VmaAllocator allocator,
1233  const VkImageCreateInfo* pImageCreateInfo,
1234  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1235  VkImage* pImage,
1236  VmaAllocation* pAllocation,
1237  VmaAllocationInfo* pAllocationInfo);
1238 
1239 void vmaDestroyImage(
1240  VmaAllocator allocator,
1241  VkImage image,
1242  VmaAllocation allocation);
1243 
1246 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
1247 
1248 // For Visual Studio IntelliSense.
1249 #ifdef __INTELLISENSE__
1250 #define VMA_IMPLEMENTATION
1251 #endif
1252 
1253 #ifdef VMA_IMPLEMENTATION
1254 #undef VMA_IMPLEMENTATION
1255 
1256 #include <cstdint>
1257 #include <cstdlib>
1258 #include <cstring>
1259 
1260 /*******************************************************************************
1261 CONFIGURATION SECTION
1262 
1263 Define some of these macros before each #include of this header or change them
1264 here if you need other then default behavior depending on your environment.
1265 */
1266 
1267 /*
1268 Define this macro to 1 to make the library fetch pointers to Vulkan functions
1269 internally, like:
1270 
1271  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
1272 
1273 Remove this macro if you are going to provide you own pointers to Vulkan
1274 functions via VmaAllocatorCreateInfo::pVulkanFunctions.
1275 */
1276 #define VMA_STATIC_VULKAN_FUNCTIONS 1
1277 
1278 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
1279 //#define VMA_USE_STL_CONTAINERS 1
1280 
1281 /* Set this macro to 1 to make the library including and using STL containers:
1282 std::pair, std::vector, std::list, std::unordered_map.
1283 
1284 Set it to 0 or undefined to make the library using its own implementation of
1285 the containers.
1286 */
1287 #if VMA_USE_STL_CONTAINERS
1288  #define VMA_USE_STL_VECTOR 1
1289  #define VMA_USE_STL_UNORDERED_MAP 1
1290  #define VMA_USE_STL_LIST 1
1291 #endif
1292 
1293 #if VMA_USE_STL_VECTOR
1294  #include <vector>
1295 #endif
1296 
1297 #if VMA_USE_STL_UNORDERED_MAP
1298  #include <unordered_map>
1299 #endif
1300 
1301 #if VMA_USE_STL_LIST
1302  #include <list>
1303 #endif
1304 
1305 /*
1306 Following headers are used in this CONFIGURATION section only, so feel free to
1307 remove them if not needed.
1308 */
1309 #include <cassert> // for assert
1310 #include <algorithm> // for min, max
1311 #include <mutex> // for std::mutex
1312 #include <atomic> // for std::atomic
1313 
1314 #if !defined(_WIN32)
1315  #include <malloc.h> // for aligned_alloc()
1316 #endif
1317 
1318 // Normal assert to check for programmer's errors, especially in Debug configuration.
1319 #ifndef VMA_ASSERT
1320  #ifdef _DEBUG
1321  #define VMA_ASSERT(expr) assert(expr)
1322  #else
1323  #define VMA_ASSERT(expr)
1324  #endif
1325 #endif
1326 
1327 // Assert that will be called very often, like inside data structures e.g. operator[].
1328 // Making it non-empty can make program slow.
1329 #ifndef VMA_HEAVY_ASSERT
1330  #ifdef _DEBUG
1331  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
1332  #else
1333  #define VMA_HEAVY_ASSERT(expr)
1334  #endif
1335 #endif
1336 
1337 #ifndef VMA_NULL
1338  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
1339  #define VMA_NULL nullptr
1340 #endif
1341 
1342 #ifndef VMA_ALIGN_OF
1343  #define VMA_ALIGN_OF(type) (__alignof(type))
1344 #endif
1345 
1346 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
1347  #if defined(_WIN32)
1348  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
1349  #else
1350  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
1351  #endif
1352 #endif
1353 
1354 #ifndef VMA_SYSTEM_FREE
1355  #if defined(_WIN32)
1356  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
1357  #else
1358  #define VMA_SYSTEM_FREE(ptr) free(ptr)
1359  #endif
1360 #endif
1361 
1362 #ifndef VMA_MIN
1363  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
1364 #endif
1365 
1366 #ifndef VMA_MAX
1367  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
1368 #endif
1369 
1370 #ifndef VMA_SWAP
1371  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
1372 #endif
1373 
1374 #ifndef VMA_SORT
1375  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
1376 #endif
1377 
1378 #ifndef VMA_DEBUG_LOG
1379  #define VMA_DEBUG_LOG(format, ...)
1380  /*
1381  #define VMA_DEBUG_LOG(format, ...) do { \
1382  printf(format, __VA_ARGS__); \
1383  printf("\n"); \
1384  } while(false)
1385  */
1386 #endif
1387 
1388 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
1389 #if VMA_STATS_STRING_ENABLED
1390  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
1391  {
1392  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
1393  }
1394  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
1395  {
1396  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
1397  }
1398  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
1399  {
1400  snprintf(outStr, strLen, "%p", ptr);
1401  }
1402 #endif
1403 
1404 #ifndef VMA_MUTEX
1405  class VmaMutex
1406  {
1407  public:
1408  VmaMutex() { }
1409  ~VmaMutex() { }
1410  void Lock() { m_Mutex.lock(); }
1411  void Unlock() { m_Mutex.unlock(); }
1412  private:
1413  std::mutex m_Mutex;
1414  };
1415  #define VMA_MUTEX VmaMutex
1416 #endif
1417 
1418 /*
1419 If providing your own implementation, you need to implement a subset of std::atomic:
1420 
1421 - Constructor(uint32_t desired)
1422 - uint32_t load() const
1423 - void store(uint32_t desired)
1424 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
1425 */
1426 #ifndef VMA_ATOMIC_UINT32
1427  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
1428 #endif
1429 
1430 #ifndef VMA_BEST_FIT
1431 
1443  #define VMA_BEST_FIT (1)
1444 #endif
1445 
1446 #ifndef VMA_DEBUG_ALWAYS_OWN_MEMORY
1447 
1451  #define VMA_DEBUG_ALWAYS_OWN_MEMORY (0)
1452 #endif
1453 
1454 #ifndef VMA_DEBUG_ALIGNMENT
1455 
1459  #define VMA_DEBUG_ALIGNMENT (1)
1460 #endif
1461 
1462 #ifndef VMA_DEBUG_MARGIN
1463 
1467  #define VMA_DEBUG_MARGIN (0)
1468 #endif
1469 
1470 #ifndef VMA_DEBUG_GLOBAL_MUTEX
1471 
1475  #define VMA_DEBUG_GLOBAL_MUTEX (0)
1476 #endif
1477 
1478 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
1479 
1483  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
1484 #endif
1485 
1486 #ifndef VMA_SMALL_HEAP_MAX_SIZE
1487  #define VMA_SMALL_HEAP_MAX_SIZE (512 * 1024 * 1024)
1489 #endif
1490 
1491 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
1492  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256 * 1024 * 1024)
1494 #endif
1495 
1496 #ifndef VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE
1497  #define VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE (64 * 1024 * 1024)
1499 #endif
1500 
1501 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
1502 
1503 /*******************************************************************************
1504 END OF CONFIGURATION
1505 */
1506 
1507 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
1508  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
1509 
1510 // Returns number of bits set to 1 in (v).
1511 static inline uint32_t CountBitsSet(uint32_t v)
1512 {
1513  uint32_t c = v - ((v >> 1) & 0x55555555);
1514  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
1515  c = ((c >> 4) + c) & 0x0F0F0F0F;
1516  c = ((c >> 8) + c) & 0x00FF00FF;
1517  c = ((c >> 16) + c) & 0x0000FFFF;
1518  return c;
1519 }
1520 
1521 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
1522 // Use types like uint32_t, uint64_t as T.
1523 template <typename T>
1524 static inline T VmaAlignUp(T val, T align)
1525 {
1526  return (val + align - 1) / align * align;
1527 }
1528 
1529 // Division with mathematical rounding to nearest number.
1530 template <typename T>
1531 inline T VmaRoundDiv(T x, T y)
1532 {
1533  return (x + (y / (T)2)) / y;
1534 }
1535 
1536 #ifndef VMA_SORT
1537 
1538 template<typename Iterator, typename Compare>
1539 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
1540 {
1541  Iterator centerValue = end; --centerValue;
1542  Iterator insertIndex = beg;
1543  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
1544  {
1545  if(cmp(*memTypeIndex, *centerValue))
1546  {
1547  if(insertIndex != memTypeIndex)
1548  {
1549  VMA_SWAP(*memTypeIndex, *insertIndex);
1550  }
1551  ++insertIndex;
1552  }
1553  }
1554  if(insertIndex != centerValue)
1555  {
1556  VMA_SWAP(*insertIndex, *centerValue);
1557  }
1558  return insertIndex;
1559 }
1560 
1561 template<typename Iterator, typename Compare>
1562 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
1563 {
1564  if(beg < end)
1565  {
1566  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
1567  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
1568  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
1569  }
1570 }
1571 
1572 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
1573 
1574 #endif // #ifndef VMA_SORT
1575 
1576 /*
1577 Returns true if two memory blocks occupy overlapping pages.
1578 ResourceA must be in less memory offset than ResourceB.
1579 
1580 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
1581 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
1582 */
1583 static inline bool VmaBlocksOnSamePage(
1584  VkDeviceSize resourceAOffset,
1585  VkDeviceSize resourceASize,
1586  VkDeviceSize resourceBOffset,
1587  VkDeviceSize pageSize)
1588 {
1589  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
1590  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
1591  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
1592  VkDeviceSize resourceBStart = resourceBOffset;
1593  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
1594  return resourceAEndPage == resourceBStartPage;
1595 }
1596 
1597 enum VmaSuballocationType
1598 {
1599  VMA_SUBALLOCATION_TYPE_FREE = 0,
1600  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
1601  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
1602  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
1603  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
1604  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
1605  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
1606 };
1607 
1608 /*
1609 Returns true if given suballocation types could conflict and must respect
1610 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
1611 or linear image and another one is optimal image. If type is unknown, behave
1612 conservatively.
1613 */
1614 static inline bool VmaIsBufferImageGranularityConflict(
1615  VmaSuballocationType suballocType1,
1616  VmaSuballocationType suballocType2)
1617 {
1618  if(suballocType1 > suballocType2)
1619  {
1620  VMA_SWAP(suballocType1, suballocType2);
1621  }
1622 
1623  switch(suballocType1)
1624  {
1625  case VMA_SUBALLOCATION_TYPE_FREE:
1626  return false;
1627  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
1628  return true;
1629  case VMA_SUBALLOCATION_TYPE_BUFFER:
1630  return
1631  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
1632  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1633  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
1634  return
1635  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
1636  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
1637  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1638  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
1639  return
1640  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1641  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
1642  return false;
1643  default:
1644  VMA_ASSERT(0);
1645  return true;
1646  }
1647 }
1648 
1649 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
1650 struct VmaMutexLock
1651 {
1652 public:
1653  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
1654  m_pMutex(useMutex ? &mutex : VMA_NULL)
1655  {
1656  if(m_pMutex)
1657  {
1658  m_pMutex->Lock();
1659  }
1660  }
1661 
1662  ~VmaMutexLock()
1663  {
1664  if(m_pMutex)
1665  {
1666  m_pMutex->Unlock();
1667  }
1668  }
1669 
1670 private:
1671  VMA_MUTEX* m_pMutex;
1672 };
1673 
1674 #if VMA_DEBUG_GLOBAL_MUTEX
1675  static VMA_MUTEX gDebugGlobalMutex;
1676  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
1677 #else
1678  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
1679 #endif
1680 
1681 // Minimum size of a free suballocation to register it in the free suballocation collection.
1682 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
1683 
1684 /*
1685 Performs binary search and returns iterator to first element that is greater or
1686 equal to (key), according to comparison (cmp).
1687 
1688 Cmp should return true if first argument is less than second argument.
1689 
1690 Returned value is the found element, if present in the collection or place where
1691 new element with value (key) should be inserted.
1692 */
1693 template <typename IterT, typename KeyT, typename CmpT>
1694 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
1695 {
1696  size_t down = 0, up = (end - beg);
1697  while(down < up)
1698  {
1699  const size_t mid = (down + up) / 2;
1700  if(cmp(*(beg+mid), key))
1701  {
1702  down = mid + 1;
1703  }
1704  else
1705  {
1706  up = mid;
1707  }
1708  }
1709  return beg + down;
1710 }
1711 
1713 // Memory allocation
1714 
1715 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
1716 {
1717  if((pAllocationCallbacks != VMA_NULL) &&
1718  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
1719  {
1720  return (*pAllocationCallbacks->pfnAllocation)(
1721  pAllocationCallbacks->pUserData,
1722  size,
1723  alignment,
1724  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1725  }
1726  else
1727  {
1728  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
1729  }
1730 }
1731 
1732 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
1733 {
1734  if((pAllocationCallbacks != VMA_NULL) &&
1735  (pAllocationCallbacks->pfnFree != VMA_NULL))
1736  {
1737  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
1738  }
1739  else
1740  {
1741  VMA_SYSTEM_FREE(ptr);
1742  }
1743 }
1744 
1745 template<typename T>
1746 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
1747 {
1748  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
1749 }
1750 
1751 template<typename T>
1752 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
1753 {
1754  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
1755 }
1756 
1757 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
1758 
1759 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
1760 
1761 template<typename T>
1762 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
1763 {
1764  ptr->~T();
1765  VmaFree(pAllocationCallbacks, ptr);
1766 }
1767 
1768 template<typename T>
1769 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
1770 {
1771  if(ptr != VMA_NULL)
1772  {
1773  for(size_t i = count; i--; )
1774  {
1775  ptr[i].~T();
1776  }
1777  VmaFree(pAllocationCallbacks, ptr);
1778  }
1779 }
1780 
1781 // STL-compatible allocator.
1782 template<typename T>
1783 class VmaStlAllocator
1784 {
1785 public:
1786  const VkAllocationCallbacks* const m_pCallbacks;
1787  typedef T value_type;
1788 
1789  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
1790  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
1791 
1792  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
1793  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
1794 
1795  template<typename U>
1796  bool operator==(const VmaStlAllocator<U>& rhs) const
1797  {
1798  return m_pCallbacks == rhs.m_pCallbacks;
1799  }
1800  template<typename U>
1801  bool operator!=(const VmaStlAllocator<U>& rhs) const
1802  {
1803  return m_pCallbacks != rhs.m_pCallbacks;
1804  }
1805 
1806  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
1807 };
1808 
1809 #if VMA_USE_STL_VECTOR
1810 
1811 #define VmaVector std::vector
1812 
1813 template<typename T, typename allocatorT>
1814 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
1815 {
1816  vec.insert(vec.begin() + index, item);
1817 }
1818 
1819 template<typename T, typename allocatorT>
1820 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
1821 {
1822  vec.erase(vec.begin() + index);
1823 }
1824 
1825 #else // #if VMA_USE_STL_VECTOR
1826 
1827 /* Class with interface compatible with subset of std::vector.
1828 T must be POD because constructors and destructors are not called and memcpy is
1829 used for these objects. */
1830 template<typename T, typename AllocatorT>
1831 class VmaVector
1832 {
1833 public:
1834  typedef T value_type;
1835 
1836  VmaVector(const AllocatorT& allocator) :
1837  m_Allocator(allocator),
1838  m_pArray(VMA_NULL),
1839  m_Count(0),
1840  m_Capacity(0)
1841  {
1842  }
1843 
1844  VmaVector(size_t count, const AllocatorT& allocator) :
1845  m_Allocator(allocator),
1846  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
1847  m_Count(count),
1848  m_Capacity(count)
1849  {
1850  }
1851 
1852  VmaVector(const VmaVector<T, AllocatorT>& src) :
1853  m_Allocator(src.m_Allocator),
1854  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
1855  m_Count(src.m_Count),
1856  m_Capacity(src.m_Count)
1857  {
1858  if(m_Count != 0)
1859  {
1860  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
1861  }
1862  }
1863 
1864  ~VmaVector()
1865  {
1866  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1867  }
1868 
1869  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
1870  {
1871  if(&rhs != this)
1872  {
1873  resize(rhs.m_Count);
1874  if(m_Count != 0)
1875  {
1876  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
1877  }
1878  }
1879  return *this;
1880  }
1881 
1882  bool empty() const { return m_Count == 0; }
1883  size_t size() const { return m_Count; }
1884  T* data() { return m_pArray; }
1885  const T* data() const { return m_pArray; }
1886 
1887  T& operator[](size_t index)
1888  {
1889  VMA_HEAVY_ASSERT(index < m_Count);
1890  return m_pArray[index];
1891  }
1892  const T& operator[](size_t index) const
1893  {
1894  VMA_HEAVY_ASSERT(index < m_Count);
1895  return m_pArray[index];
1896  }
1897 
1898  T& front()
1899  {
1900  VMA_HEAVY_ASSERT(m_Count > 0);
1901  return m_pArray[0];
1902  }
1903  const T& front() const
1904  {
1905  VMA_HEAVY_ASSERT(m_Count > 0);
1906  return m_pArray[0];
1907  }
1908  T& back()
1909  {
1910  VMA_HEAVY_ASSERT(m_Count > 0);
1911  return m_pArray[m_Count - 1];
1912  }
1913  const T& back() const
1914  {
1915  VMA_HEAVY_ASSERT(m_Count > 0);
1916  return m_pArray[m_Count - 1];
1917  }
1918 
1919  void reserve(size_t newCapacity, bool freeMemory = false)
1920  {
1921  newCapacity = VMA_MAX(newCapacity, m_Count);
1922 
1923  if((newCapacity < m_Capacity) && !freeMemory)
1924  {
1925  newCapacity = m_Capacity;
1926  }
1927 
1928  if(newCapacity != m_Capacity)
1929  {
1930  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
1931  if(m_Count != 0)
1932  {
1933  memcpy(newArray, m_pArray, m_Count * sizeof(T));
1934  }
1935  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1936  m_Capacity = newCapacity;
1937  m_pArray = newArray;
1938  }
1939  }
1940 
1941  void resize(size_t newCount, bool freeMemory = false)
1942  {
1943  size_t newCapacity = m_Capacity;
1944  if(newCount > m_Capacity)
1945  {
1946  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
1947  }
1948  else if(freeMemory)
1949  {
1950  newCapacity = newCount;
1951  }
1952 
1953  if(newCapacity != m_Capacity)
1954  {
1955  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
1956  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
1957  if(elementsToCopy != 0)
1958  {
1959  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
1960  }
1961  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1962  m_Capacity = newCapacity;
1963  m_pArray = newArray;
1964  }
1965 
1966  m_Count = newCount;
1967  }
1968 
1969  void clear(bool freeMemory = false)
1970  {
1971  resize(0, freeMemory);
1972  }
1973 
1974  void insert(size_t index, const T& src)
1975  {
1976  VMA_HEAVY_ASSERT(index <= m_Count);
1977  const size_t oldCount = size();
1978  resize(oldCount + 1);
1979  if(index < oldCount)
1980  {
1981  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
1982  }
1983  m_pArray[index] = src;
1984  }
1985 
1986  void remove(size_t index)
1987  {
1988  VMA_HEAVY_ASSERT(index < m_Count);
1989  const size_t oldCount = size();
1990  if(index < oldCount - 1)
1991  {
1992  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
1993  }
1994  resize(oldCount - 1);
1995  }
1996 
1997  void push_back(const T& src)
1998  {
1999  const size_t newIndex = size();
2000  resize(newIndex + 1);
2001  m_pArray[newIndex] = src;
2002  }
2003 
2004  void pop_back()
2005  {
2006  VMA_HEAVY_ASSERT(m_Count > 0);
2007  resize(size() - 1);
2008  }
2009 
2010  void push_front(const T& src)
2011  {
2012  insert(0, src);
2013  }
2014 
2015  void pop_front()
2016  {
2017  VMA_HEAVY_ASSERT(m_Count > 0);
2018  remove(0);
2019  }
2020 
2021  typedef T* iterator;
2022 
2023  iterator begin() { return m_pArray; }
2024  iterator end() { return m_pArray + m_Count; }
2025 
2026 private:
2027  AllocatorT m_Allocator;
2028  T* m_pArray;
2029  size_t m_Count;
2030  size_t m_Capacity;
2031 };
2032 
2033 template<typename T, typename allocatorT>
2034 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
2035 {
2036  vec.insert(index, item);
2037 }
2038 
2039 template<typename T, typename allocatorT>
2040 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
2041 {
2042  vec.remove(index);
2043 }
2044 
2045 #endif // #if VMA_USE_STL_VECTOR
2046 
2047 template<typename CmpLess, typename VectorT>
2048 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
2049 {
2050  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2051  vector.data(),
2052  vector.data() + vector.size(),
2053  value,
2054  CmpLess()) - vector.data();
2055  VmaVectorInsert(vector, indexToInsert, value);
2056  return indexToInsert;
2057 }
2058 
2059 template<typename CmpLess, typename VectorT>
2060 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
2061 {
2062  CmpLess comparator;
2063  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2064  vector.begin(),
2065  vector.end(),
2066  value,
2067  comparator);
2068  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
2069  {
2070  size_t indexToRemove = it - vector.begin();
2071  VmaVectorRemove(vector, indexToRemove);
2072  return true;
2073  }
2074  return false;
2075 }
2076 
2077 template<typename CmpLess, typename VectorT>
2078 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
2079 {
2080  CmpLess comparator;
2081  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2082  vector.data(),
2083  vector.data() + vector.size(),
2084  value,
2085  comparator);
2086  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
2087  {
2088  return it - vector.begin();
2089  }
2090  else
2091  {
2092  return vector.size();
2093  }
2094 }
2095 
2097 // class VmaPoolAllocator
2098 
2099 /*
2100 Allocator for objects of type T using a list of arrays (pools) to speed up
2101 allocation. Number of elements that can be allocated is not bounded because
2102 allocator can create multiple blocks.
2103 */
2104 template<typename T>
2105 class VmaPoolAllocator
2106 {
2107 public:
2108  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
2109  ~VmaPoolAllocator();
2110  void Clear();
2111  T* Alloc();
2112  void Free(T* ptr);
2113 
2114 private:
2115  union Item
2116  {
2117  uint32_t NextFreeIndex;
2118  T Value;
2119  };
2120 
2121  struct ItemBlock
2122  {
2123  Item* pItems;
2124  uint32_t FirstFreeIndex;
2125  };
2126 
2127  const VkAllocationCallbacks* m_pAllocationCallbacks;
2128  size_t m_ItemsPerBlock;
2129  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
2130 
2131  ItemBlock& CreateNewBlock();
2132 };
2133 
2134 template<typename T>
2135 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
2136  m_pAllocationCallbacks(pAllocationCallbacks),
2137  m_ItemsPerBlock(itemsPerBlock),
2138  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
2139 {
2140  VMA_ASSERT(itemsPerBlock > 0);
2141 }
2142 
2143 template<typename T>
2144 VmaPoolAllocator<T>::~VmaPoolAllocator()
2145 {
2146  Clear();
2147 }
2148 
2149 template<typename T>
2150 void VmaPoolAllocator<T>::Clear()
2151 {
2152  for(size_t i = m_ItemBlocks.size(); i--; )
2153  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
2154  m_ItemBlocks.clear();
2155 }
2156 
2157 template<typename T>
2158 T* VmaPoolAllocator<T>::Alloc()
2159 {
2160  for(size_t i = m_ItemBlocks.size(); i--; )
2161  {
2162  ItemBlock& block = m_ItemBlocks[i];
2163  // This block has some free items: Use first one.
2164  if(block.FirstFreeIndex != UINT32_MAX)
2165  {
2166  Item* const pItem = &block.pItems[block.FirstFreeIndex];
2167  block.FirstFreeIndex = pItem->NextFreeIndex;
2168  return &pItem->Value;
2169  }
2170  }
2171 
2172  // No block has free item: Create new one and use it.
2173  ItemBlock& newBlock = CreateNewBlock();
2174  Item* const pItem = &newBlock.pItems[0];
2175  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
2176  return &pItem->Value;
2177 }
2178 
2179 template<typename T>
2180 void VmaPoolAllocator<T>::Free(T* ptr)
2181 {
2182  // Search all memory blocks to find ptr.
2183  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
2184  {
2185  ItemBlock& block = m_ItemBlocks[i];
2186 
2187  // Casting to union.
2188  Item* pItemPtr;
2189  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
2190 
2191  // Check if pItemPtr is in address range of this block.
2192  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
2193  {
2194  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
2195  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
2196  block.FirstFreeIndex = index;
2197  return;
2198  }
2199  }
2200  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
2201 }
2202 
2203 template<typename T>
2204 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
2205 {
2206  ItemBlock newBlock = {
2207  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
2208 
2209  m_ItemBlocks.push_back(newBlock);
2210 
2211  // Setup singly-linked list of all free items in this block.
2212  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
2213  newBlock.pItems[i].NextFreeIndex = i + 1;
2214  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
2215  return m_ItemBlocks.back();
2216 }
2217 
2219 // class VmaRawList, VmaList
2220 
2221 #if VMA_USE_STL_LIST
2222 
2223 #define VmaList std::list
2224 
2225 #else // #if VMA_USE_STL_LIST
2226 
2227 template<typename T>
2228 struct VmaListItem
2229 {
2230  VmaListItem* pPrev;
2231  VmaListItem* pNext;
2232  T Value;
2233 };
2234 
2235 // Doubly linked list.
2236 template<typename T>
2237 class VmaRawList
2238 {
2239 public:
2240  typedef VmaListItem<T> ItemType;
2241 
2242  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
2243  ~VmaRawList();
2244  void Clear();
2245 
2246  size_t GetCount() const { return m_Count; }
2247  bool IsEmpty() const { return m_Count == 0; }
2248 
2249  ItemType* Front() { return m_pFront; }
2250  const ItemType* Front() const { return m_pFront; }
2251  ItemType* Back() { return m_pBack; }
2252  const ItemType* Back() const { return m_pBack; }
2253 
2254  ItemType* PushBack();
2255  ItemType* PushFront();
2256  ItemType* PushBack(const T& value);
2257  ItemType* PushFront(const T& value);
2258  void PopBack();
2259  void PopFront();
2260 
2261  // Item can be null - it means PushBack.
2262  ItemType* InsertBefore(ItemType* pItem);
2263  // Item can be null - it means PushFront.
2264  ItemType* InsertAfter(ItemType* pItem);
2265 
2266  ItemType* InsertBefore(ItemType* pItem, const T& value);
2267  ItemType* InsertAfter(ItemType* pItem, const T& value);
2268 
2269  void Remove(ItemType* pItem);
2270 
2271 private:
2272  const VkAllocationCallbacks* const m_pAllocationCallbacks;
2273  VmaPoolAllocator<ItemType> m_ItemAllocator;
2274  ItemType* m_pFront;
2275  ItemType* m_pBack;
2276  size_t m_Count;
2277 
2278  // Declared not defined, to block copy constructor and assignment operator.
2279  VmaRawList(const VmaRawList<T>& src);
2280  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
2281 };
2282 
2283 template<typename T>
2284 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
2285  m_pAllocationCallbacks(pAllocationCallbacks),
2286  m_ItemAllocator(pAllocationCallbacks, 128),
2287  m_pFront(VMA_NULL),
2288  m_pBack(VMA_NULL),
2289  m_Count(0)
2290 {
2291 }
2292 
2293 template<typename T>
2294 VmaRawList<T>::~VmaRawList()
2295 {
2296  // Intentionally not calling Clear, because that would be unnecessary
2297  // computations to return all items to m_ItemAllocator as free.
2298 }
2299 
2300 template<typename T>
2301 void VmaRawList<T>::Clear()
2302 {
2303  if(IsEmpty() == false)
2304  {
2305  ItemType* pItem = m_pBack;
2306  while(pItem != VMA_NULL)
2307  {
2308  ItemType* const pPrevItem = pItem->pPrev;
2309  m_ItemAllocator.Free(pItem);
2310  pItem = pPrevItem;
2311  }
2312  m_pFront = VMA_NULL;
2313  m_pBack = VMA_NULL;
2314  m_Count = 0;
2315  }
2316 }
2317 
2318 template<typename T>
2319 VmaListItem<T>* VmaRawList<T>::PushBack()
2320 {
2321  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2322  pNewItem->pNext = VMA_NULL;
2323  if(IsEmpty())
2324  {
2325  pNewItem->pPrev = VMA_NULL;
2326  m_pFront = pNewItem;
2327  m_pBack = pNewItem;
2328  m_Count = 1;
2329  }
2330  else
2331  {
2332  pNewItem->pPrev = m_pBack;
2333  m_pBack->pNext = pNewItem;
2334  m_pBack = pNewItem;
2335  ++m_Count;
2336  }
2337  return pNewItem;
2338 }
2339 
2340 template<typename T>
2341 VmaListItem<T>* VmaRawList<T>::PushFront()
2342 {
2343  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2344  pNewItem->pPrev = VMA_NULL;
2345  if(IsEmpty())
2346  {
2347  pNewItem->pNext = VMA_NULL;
2348  m_pFront = pNewItem;
2349  m_pBack = pNewItem;
2350  m_Count = 1;
2351  }
2352  else
2353  {
2354  pNewItem->pNext = m_pFront;
2355  m_pFront->pPrev = pNewItem;
2356  m_pFront = pNewItem;
2357  ++m_Count;
2358  }
2359  return pNewItem;
2360 }
2361 
2362 template<typename T>
2363 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
2364 {
2365  ItemType* const pNewItem = PushBack();
2366  pNewItem->Value = value;
2367  return pNewItem;
2368 }
2369 
2370 template<typename T>
2371 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
2372 {
2373  ItemType* const pNewItem = PushFront();
2374  pNewItem->Value = value;
2375  return pNewItem;
2376 }
2377 
2378 template<typename T>
2379 void VmaRawList<T>::PopBack()
2380 {
2381  VMA_HEAVY_ASSERT(m_Count > 0);
2382  ItemType* const pBackItem = m_pBack;
2383  ItemType* const pPrevItem = pBackItem->pPrev;
2384  if(pPrevItem != VMA_NULL)
2385  {
2386  pPrevItem->pNext = VMA_NULL;
2387  }
2388  m_pBack = pPrevItem;
2389  m_ItemAllocator.Free(pBackItem);
2390  --m_Count;
2391 }
2392 
2393 template<typename T>
2394 void VmaRawList<T>::PopFront()
2395 {
2396  VMA_HEAVY_ASSERT(m_Count > 0);
2397  ItemType* const pFrontItem = m_pFront;
2398  ItemType* const pNextItem = pFrontItem->pNext;
2399  if(pNextItem != VMA_NULL)
2400  {
2401  pNextItem->pPrev = VMA_NULL;
2402  }
2403  m_pFront = pNextItem;
2404  m_ItemAllocator.Free(pFrontItem);
2405  --m_Count;
2406 }
2407 
2408 template<typename T>
2409 void VmaRawList<T>::Remove(ItemType* pItem)
2410 {
2411  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
2412  VMA_HEAVY_ASSERT(m_Count > 0);
2413 
2414  if(pItem->pPrev != VMA_NULL)
2415  {
2416  pItem->pPrev->pNext = pItem->pNext;
2417  }
2418  else
2419  {
2420  VMA_HEAVY_ASSERT(m_pFront == pItem);
2421  m_pFront = pItem->pNext;
2422  }
2423 
2424  if(pItem->pNext != VMA_NULL)
2425  {
2426  pItem->pNext->pPrev = pItem->pPrev;
2427  }
2428  else
2429  {
2430  VMA_HEAVY_ASSERT(m_pBack == pItem);
2431  m_pBack = pItem->pPrev;
2432  }
2433 
2434  m_ItemAllocator.Free(pItem);
2435  --m_Count;
2436 }
2437 
2438 template<typename T>
2439 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
2440 {
2441  if(pItem != VMA_NULL)
2442  {
2443  ItemType* const prevItem = pItem->pPrev;
2444  ItemType* const newItem = m_ItemAllocator.Alloc();
2445  newItem->pPrev = prevItem;
2446  newItem->pNext = pItem;
2447  pItem->pPrev = newItem;
2448  if(prevItem != VMA_NULL)
2449  {
2450  prevItem->pNext = newItem;
2451  }
2452  else
2453  {
2454  VMA_HEAVY_ASSERT(m_pFront == pItem);
2455  m_pFront = newItem;
2456  }
2457  ++m_Count;
2458  return newItem;
2459  }
2460  else
2461  return PushBack();
2462 }
2463 
2464 template<typename T>
2465 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
2466 {
2467  if(pItem != VMA_NULL)
2468  {
2469  ItemType* const nextItem = pItem->pNext;
2470  ItemType* const newItem = m_ItemAllocator.Alloc();
2471  newItem->pNext = nextItem;
2472  newItem->pPrev = pItem;
2473  pItem->pNext = newItem;
2474  if(nextItem != VMA_NULL)
2475  {
2476  nextItem->pPrev = newItem;
2477  }
2478  else
2479  {
2480  VMA_HEAVY_ASSERT(m_pBack == pItem);
2481  m_pBack = newItem;
2482  }
2483  ++m_Count;
2484  return newItem;
2485  }
2486  else
2487  return PushFront();
2488 }
2489 
2490 template<typename T>
2491 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
2492 {
2493  ItemType* const newItem = InsertBefore(pItem);
2494  newItem->Value = value;
2495  return newItem;
2496 }
2497 
2498 template<typename T>
2499 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
2500 {
2501  ItemType* const newItem = InsertAfter(pItem);
2502  newItem->Value = value;
2503  return newItem;
2504 }
2505 
2506 template<typename T, typename AllocatorT>
2507 class VmaList
2508 {
2509 public:
2510  class iterator
2511  {
2512  public:
2513  iterator() :
2514  m_pList(VMA_NULL),
2515  m_pItem(VMA_NULL)
2516  {
2517  }
2518 
2519  T& operator*() const
2520  {
2521  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2522  return m_pItem->Value;
2523  }
2524  T* operator->() const
2525  {
2526  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2527  return &m_pItem->Value;
2528  }
2529 
2530  iterator& operator++()
2531  {
2532  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2533  m_pItem = m_pItem->pNext;
2534  return *this;
2535  }
2536  iterator& operator--()
2537  {
2538  if(m_pItem != VMA_NULL)
2539  {
2540  m_pItem = m_pItem->pPrev;
2541  }
2542  else
2543  {
2544  VMA_HEAVY_ASSERT(!m_pList.IsEmpty());
2545  m_pItem = m_pList->Back();
2546  }
2547  return *this;
2548  }
2549 
2550  iterator operator++(int)
2551  {
2552  iterator result = *this;
2553  ++*this;
2554  return result;
2555  }
2556  iterator operator--(int)
2557  {
2558  iterator result = *this;
2559  --*this;
2560  return result;
2561  }
2562 
2563  bool operator==(const iterator& rhs) const
2564  {
2565  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2566  return m_pItem == rhs.m_pItem;
2567  }
2568  bool operator!=(const iterator& rhs) const
2569  {
2570  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2571  return m_pItem != rhs.m_pItem;
2572  }
2573 
2574  private:
2575  VmaRawList<T>* m_pList;
2576  VmaListItem<T>* m_pItem;
2577 
2578  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
2579  m_pList(pList),
2580  m_pItem(pItem)
2581  {
2582  }
2583 
2584  friend class VmaList<T, AllocatorT>;
2585  };
2586 
2587  class const_iterator
2588  {
2589  public:
2590  const_iterator() :
2591  m_pList(VMA_NULL),
2592  m_pItem(VMA_NULL)
2593  {
2594  }
2595 
2596  const_iterator(const iterator& src) :
2597  m_pList(src.m_pList),
2598  m_pItem(src.m_pItem)
2599  {
2600  }
2601 
2602  const T& operator*() const
2603  {
2604  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2605  return m_pItem->Value;
2606  }
2607  const T* operator->() const
2608  {
2609  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2610  return &m_pItem->Value;
2611  }
2612 
2613  const_iterator& operator++()
2614  {
2615  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2616  m_pItem = m_pItem->pNext;
2617  return *this;
2618  }
2619  const_iterator& operator--()
2620  {
2621  if(m_pItem != VMA_NULL)
2622  {
2623  m_pItem = m_pItem->pPrev;
2624  }
2625  else
2626  {
2627  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
2628  m_pItem = m_pList->Back();
2629  }
2630  return *this;
2631  }
2632 
2633  const_iterator operator++(int)
2634  {
2635  const_iterator result = *this;
2636  ++*this;
2637  return result;
2638  }
2639  const_iterator operator--(int)
2640  {
2641  const_iterator result = *this;
2642  --*this;
2643  return result;
2644  }
2645 
2646  bool operator==(const const_iterator& rhs) const
2647  {
2648  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2649  return m_pItem == rhs.m_pItem;
2650  }
2651  bool operator!=(const const_iterator& rhs) const
2652  {
2653  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2654  return m_pItem != rhs.m_pItem;
2655  }
2656 
2657  private:
2658  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
2659  m_pList(pList),
2660  m_pItem(pItem)
2661  {
2662  }
2663 
2664  const VmaRawList<T>* m_pList;
2665  const VmaListItem<T>* m_pItem;
2666 
2667  friend class VmaList<T, AllocatorT>;
2668  };
2669 
2670  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
2671 
2672  bool empty() const { return m_RawList.IsEmpty(); }
2673  size_t size() const { return m_RawList.GetCount(); }
2674 
2675  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
2676  iterator end() { return iterator(&m_RawList, VMA_NULL); }
2677 
2678  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
2679  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
2680 
2681  void clear() { m_RawList.Clear(); }
2682  void push_back(const T& value) { m_RawList.PushBack(value); }
2683  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
2684  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
2685 
2686 private:
2687  VmaRawList<T> m_RawList;
2688 };
2689 
2690 #endif // #if VMA_USE_STL_LIST
2691 
2693 // class VmaMap
2694 
2695 // Unused in this version.
2696 #if 0
2697 
2698 #if VMA_USE_STL_UNORDERED_MAP
2699 
2700 #define VmaPair std::pair
2701 
2702 #define VMA_MAP_TYPE(KeyT, ValueT) \
2703  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
2704 
2705 #else // #if VMA_USE_STL_UNORDERED_MAP
2706 
2707 template<typename T1, typename T2>
2708 struct VmaPair
2709 {
2710  T1 first;
2711  T2 second;
2712 
2713  VmaPair() : first(), second() { }
2714  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
2715 };
2716 
2717 /* Class compatible with subset of interface of std::unordered_map.
2718 KeyT, ValueT must be POD because they will be stored in VmaVector.
2719 */
2720 template<typename KeyT, typename ValueT>
2721 class VmaMap
2722 {
2723 public:
2724  typedef VmaPair<KeyT, ValueT> PairType;
2725  typedef PairType* iterator;
2726 
2727  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
2728 
2729  iterator begin() { return m_Vector.begin(); }
2730  iterator end() { return m_Vector.end(); }
2731 
2732  void insert(const PairType& pair);
2733  iterator find(const KeyT& key);
2734  void erase(iterator it);
2735 
2736 private:
2737  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
2738 };
2739 
2740 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
2741 
2742 template<typename FirstT, typename SecondT>
2743 struct VmaPairFirstLess
2744 {
2745  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
2746  {
2747  return lhs.first < rhs.first;
2748  }
2749  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
2750  {
2751  return lhs.first < rhsFirst;
2752  }
2753 };
2754 
2755 template<typename KeyT, typename ValueT>
2756 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
2757 {
2758  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2759  m_Vector.data(),
2760  m_Vector.data() + m_Vector.size(),
2761  pair,
2762  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
2763  VmaVectorInsert(m_Vector, indexToInsert, pair);
2764 }
2765 
2766 template<typename KeyT, typename ValueT>
2767 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
2768 {
2769  PairType* it = VmaBinaryFindFirstNotLess(
2770  m_Vector.data(),
2771  m_Vector.data() + m_Vector.size(),
2772  key,
2773  VmaPairFirstLess<KeyT, ValueT>());
2774  if((it != m_Vector.end()) && (it->first == key))
2775  {
2776  return it;
2777  }
2778  else
2779  {
2780  return m_Vector.end();
2781  }
2782 }
2783 
2784 template<typename KeyT, typename ValueT>
2785 void VmaMap<KeyT, ValueT>::erase(iterator it)
2786 {
2787  VmaVectorRemove(m_Vector, it - m_Vector.begin());
2788 }
2789 
2790 #endif // #if VMA_USE_STL_UNORDERED_MAP
2791 
2792 #endif // #if 0
2793 
2795 
2796 class VmaDeviceMemoryBlock;
2797 
2798 enum VMA_BLOCK_VECTOR_TYPE
2799 {
2800  VMA_BLOCK_VECTOR_TYPE_UNMAPPED,
2801  VMA_BLOCK_VECTOR_TYPE_MAPPED,
2802  VMA_BLOCK_VECTOR_TYPE_COUNT
2803 };
2804 
2805 static VMA_BLOCK_VECTOR_TYPE VmaAllocationCreateFlagsToBlockVectorType(VmaAllocationCreateFlags flags)
2806 {
2807  return (flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0 ?
2808  VMA_BLOCK_VECTOR_TYPE_MAPPED :
2809  VMA_BLOCK_VECTOR_TYPE_UNMAPPED;
2810 }
2811 
2812 struct VmaAllocation_T
2813 {
2814 public:
2815  enum ALLOCATION_TYPE
2816  {
2817  ALLOCATION_TYPE_NONE,
2818  ALLOCATION_TYPE_BLOCK,
2819  ALLOCATION_TYPE_OWN,
2820  };
2821 
2822  VmaAllocation_T(uint32_t currentFrameIndex) :
2823  m_Alignment(1),
2824  m_Size(0),
2825  m_pUserData(VMA_NULL),
2826  m_Type(ALLOCATION_TYPE_NONE),
2827  m_SuballocationType(VMA_SUBALLOCATION_TYPE_UNKNOWN),
2828  m_LastUseFrameIndex(currentFrameIndex)
2829  {
2830  }
2831 
2832  void InitBlockAllocation(
2833  VmaPool hPool,
2834  VmaDeviceMemoryBlock* block,
2835  VkDeviceSize offset,
2836  VkDeviceSize alignment,
2837  VkDeviceSize size,
2838  VmaSuballocationType suballocationType,
2839  void* pUserData,
2840  bool canBecomeLost)
2841  {
2842  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
2843  VMA_ASSERT(block != VMA_NULL);
2844  m_Type = ALLOCATION_TYPE_BLOCK;
2845  m_Alignment = alignment;
2846  m_Size = size;
2847  m_pUserData = pUserData;
2848  m_SuballocationType = suballocationType;
2849  m_BlockAllocation.m_hPool = hPool;
2850  m_BlockAllocation.m_Block = block;
2851  m_BlockAllocation.m_Offset = offset;
2852  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
2853  }
2854 
2855  void InitLost()
2856  {
2857  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
2858  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
2859  m_Type = ALLOCATION_TYPE_BLOCK;
2860  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
2861  m_BlockAllocation.m_Block = VMA_NULL;
2862  m_BlockAllocation.m_Offset = 0;
2863  m_BlockAllocation.m_CanBecomeLost = true;
2864  }
2865 
2866  void ChangeBlockAllocation(
2867  VmaDeviceMemoryBlock* block,
2868  VkDeviceSize offset)
2869  {
2870  VMA_ASSERT(block != VMA_NULL);
2871  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
2872  m_BlockAllocation.m_Block = block;
2873  m_BlockAllocation.m_Offset = offset;
2874  }
2875 
2876  void InitOwnAllocation(
2877  uint32_t memoryTypeIndex,
2878  VkDeviceMemory hMemory,
2879  VmaSuballocationType suballocationType,
2880  bool persistentMap,
2881  void* pMappedData,
2882  VkDeviceSize size,
2883  void* pUserData)
2884  {
2885  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
2886  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
2887  m_Type = ALLOCATION_TYPE_OWN;
2888  m_Alignment = 0;
2889  m_Size = size;
2890  m_pUserData = pUserData;
2891  m_SuballocationType = suballocationType;
2892  m_OwnAllocation.m_MemoryTypeIndex = memoryTypeIndex;
2893  m_OwnAllocation.m_hMemory = hMemory;
2894  m_OwnAllocation.m_PersistentMap = persistentMap;
2895  m_OwnAllocation.m_pMappedData = pMappedData;
2896  }
2897 
2898  ALLOCATION_TYPE GetType() const { return m_Type; }
2899  VkDeviceSize GetAlignment() const { return m_Alignment; }
2900  VkDeviceSize GetSize() const { return m_Size; }
2901  void* GetUserData() const { return m_pUserData; }
2902  void SetUserData(void* pUserData) { m_pUserData = pUserData; }
2903  VmaSuballocationType GetSuballocationType() const { return m_SuballocationType; }
2904 
2905  VmaDeviceMemoryBlock* GetBlock() const
2906  {
2907  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
2908  return m_BlockAllocation.m_Block;
2909  }
2910  VkDeviceSize GetOffset() const;
2911  VkDeviceMemory GetMemory() const;
2912  uint32_t GetMemoryTypeIndex() const;
2913  VMA_BLOCK_VECTOR_TYPE GetBlockVectorType() const;
2914  void* GetMappedData() const;
2915  bool CanBecomeLost() const;
2916  VmaPool GetPool() const;
2917 
2918  VkResult OwnAllocMapPersistentlyMappedMemory(VmaAllocator hAllocator);
2919  void OwnAllocUnmapPersistentlyMappedMemory(VmaAllocator hAllocator);
2920 
2921  uint32_t GetLastUseFrameIndex() const
2922  {
2923  return m_LastUseFrameIndex.load();
2924  }
2925  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
2926  {
2927  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
2928  }
2929  /*
2930  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
2931  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
2932  - Else, returns false.
2933 
2934  If hAllocation is already lost, assert - you should not call it then.
2935  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
2936  */
2937  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
2938 
2939  void OwnAllocCalcStatsInfo(VmaStatInfo& outInfo)
2940  {
2941  VMA_ASSERT(m_Type == ALLOCATION_TYPE_OWN);
2942  outInfo.BlockCount = 1;
2943  outInfo.AllocationCount = 1;
2944  outInfo.UnusedRangeCount = 0;
2945  outInfo.UsedBytes = m_Size;
2946  outInfo.UnusedBytes = 0;
2947  outInfo.AllocationSizeMin = outInfo.AllocationSizeMax = m_Size;
2948  outInfo.UnusedRangeSizeMin = UINT64_MAX;
2949  outInfo.UnusedRangeSizeMax = 0;
2950  }
2951 
2952 private:
2953  VkDeviceSize m_Alignment;
2954  VkDeviceSize m_Size;
2955  void* m_pUserData;
2956  ALLOCATION_TYPE m_Type;
2957  VmaSuballocationType m_SuballocationType;
2958  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
2959 
2960  // Allocation out of VmaDeviceMemoryBlock.
2961  struct BlockAllocation
2962  {
2963  VmaPool m_hPool; // Null if belongs to general memory.
2964  VmaDeviceMemoryBlock* m_Block;
2965  VkDeviceSize m_Offset;
2966  bool m_CanBecomeLost;
2967  };
2968 
2969  // Allocation for an object that has its own private VkDeviceMemory.
2970  struct OwnAllocation
2971  {
2972  uint32_t m_MemoryTypeIndex;
2973  VkDeviceMemory m_hMemory;
2974  bool m_PersistentMap;
2975  void* m_pMappedData;
2976  };
2977 
2978  union
2979  {
2980  // Allocation out of VmaDeviceMemoryBlock.
2981  BlockAllocation m_BlockAllocation;
2982  // Allocation for an object that has its own private VkDeviceMemory.
2983  OwnAllocation m_OwnAllocation;
2984  };
2985 };
2986 
2987 /*
2988 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
2989 allocated memory block or free.
2990 */
2991 struct VmaSuballocation
2992 {
2993  VkDeviceSize offset;
2994  VkDeviceSize size;
2995  VmaAllocation hAllocation;
2996  VmaSuballocationType type;
2997 };
2998 
2999 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
3000 
3001 // Cost of one additional allocation lost, as equivalent in bytes.
3002 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
3003 
3004 /*
3005 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
3006 
3007 If canMakeOtherLost was false:
3008 - item points to a FREE suballocation.
3009 - itemsToMakeLostCount is 0.
3010 
3011 If canMakeOtherLost was true:
3012 - item points to first of sequence of suballocations, which are either FREE,
3013  or point to VmaAllocations that can become lost.
3014 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
3015  the requested allocation to succeed.
3016 */
3017 struct VmaAllocationRequest
3018 {
3019  VkDeviceSize offset;
3020  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
3021  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
3022  VmaSuballocationList::iterator item;
3023  size_t itemsToMakeLostCount;
3024 
3025  VkDeviceSize CalcCost() const
3026  {
3027  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
3028  }
3029 };
3030 
3031 /*
3032 Represents a single block of device memory (VkDeviceMemory ) with all the
3033 data about its regions (aka suballocations, VmaAllocation), assigned and free.
3034 
3035 Thread-safety: This class must be externally synchronized.
3036 */
3037 class VmaDeviceMemoryBlock
3038 {
3039 public:
3040  uint32_t m_MemoryTypeIndex;
3041  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
3042  VkDeviceMemory m_hMemory;
3043  VkDeviceSize m_Size;
3044  bool m_PersistentMap;
3045  void* m_pMappedData;
3046  uint32_t m_FreeCount;
3047  VkDeviceSize m_SumFreeSize;
3048  VmaSuballocationList m_Suballocations;
3049  // Suballocations that are free and have size greater than certain threshold.
3050  // Sorted by size, ascending.
3051  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
3052 
3053  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
3054 
3055  ~VmaDeviceMemoryBlock()
3056  {
3057  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
3058  }
3059 
3060  // Always call after construction.
3061  void Init(
3062  uint32_t newMemoryTypeIndex,
3063  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
3064  VkDeviceMemory newMemory,
3065  VkDeviceSize newSize,
3066  bool persistentMap,
3067  void* pMappedData);
3068  // Always call before destruction.
3069  void Destroy(VmaAllocator allocator);
3070 
3071  // Validates all data structures inside this object. If not valid, returns false.
3072  bool Validate() const;
3073 
3074  // Tries to find a place for suballocation with given parameters inside this allocation.
3075  // If succeeded, fills pAllocationRequest and returns true.
3076  // If failed, returns false.
3077  bool CreateAllocationRequest(
3078  uint32_t currentFrameIndex,
3079  uint32_t frameInUseCount,
3080  VkDeviceSize bufferImageGranularity,
3081  VkDeviceSize allocSize,
3082  VkDeviceSize allocAlignment,
3083  VmaSuballocationType allocType,
3084  bool canMakeOtherLost,
3085  VmaAllocationRequest* pAllocationRequest);
3086 
3087  bool MakeRequestedAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount, VmaAllocationRequest* pAllocationRequest);
3088 
3089  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3090 
3091  // Returns true if this allocation is empty - contains only single free suballocation.
3092  bool IsEmpty() const;
3093 
3094  // Makes actual allocation based on request. Request must already be checked
3095  // and valid.
3096  void Alloc(
3097  const VmaAllocationRequest& request,
3098  VmaSuballocationType type,
3099  VkDeviceSize allocSize,
3100  VmaAllocation hAllocation);
3101 
3102  // Frees suballocation assigned to given memory region.
3103  void Free(const VmaAllocation allocation);
3104 
3105 #if VMA_STATS_STRING_ENABLED
3106  void PrintDetailedMap(class VmaJsonWriter& json) const;
3107 #endif
3108 
3109 private:
3110  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
3111  // If yes, fills pOffset and returns true. If no, returns false.
3112  bool CheckAllocation(
3113  uint32_t currentFrameIndex,
3114  uint32_t frameInUseCount,
3115  VkDeviceSize bufferImageGranularity,
3116  VkDeviceSize allocSize,
3117  VkDeviceSize allocAlignment,
3118  VmaSuballocationType allocType,
3119  VmaSuballocationList::const_iterator suballocItem,
3120  bool canMakeOtherLost,
3121  VkDeviceSize* pOffset,
3122  size_t* itemsToMakeLostCount,
3123  VkDeviceSize* pSumFreeSize,
3124  VkDeviceSize* pSumItemSize) const;
3125 
3126  // Given free suballocation, it merges it with following one, which must also be free.
3127  void MergeFreeWithNext(VmaSuballocationList::iterator item);
3128  // Releases given suballocation, making it free.
3129  // Merges it with adjacent free suballocations if applicable.
3130  // Returns iterator to new free suballocation at this place.
3131  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
3132  // Given free suballocation, it inserts it into sorted list of
3133  // m_FreeSuballocationsBySize if it's suitable.
3134  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
3135  // Given free suballocation, it removes it from sorted list of
3136  // m_FreeSuballocationsBySize if it's suitable.
3137  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
3138 
3139  bool ValidateFreeSuballocationList() const;
3140 };
3141 
3142 struct VmaPointerLess
3143 {
3144  bool operator()(const void* lhs, const void* rhs) const
3145  {
3146  return lhs < rhs;
3147  }
3148 };
3149 
3150 class VmaDefragmentator;
3151 
3152 /*
3153 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
3154 Vulkan memory type.
3155 
3156 Synchronized internally with a mutex.
3157 */
3158 struct VmaBlockVector
3159 {
3160  VmaBlockVector(
3161  VmaAllocator hAllocator,
3162  uint32_t memoryTypeIndex,
3163  VMA_BLOCK_VECTOR_TYPE blockVectorType,
3164  VkDeviceSize preferredBlockSize,
3165  size_t minBlockCount,
3166  size_t maxBlockCount,
3167  VkDeviceSize bufferImageGranularity,
3168  uint32_t frameInUseCount,
3169  bool isCustomPool);
3170  ~VmaBlockVector();
3171 
3172  VkResult CreateMinBlocks();
3173 
3174  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
3175  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
3176  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
3177  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
3178  VMA_BLOCK_VECTOR_TYPE GetBlockVectorType() const { return m_BlockVectorType; }
3179 
3180  void GetPoolStats(VmaPoolStats* pStats);
3181 
3182  bool IsEmpty() const { return m_Blocks.empty(); }
3183 
3184  VkResult Allocate(
3185  VmaPool hCurrentPool,
3186  uint32_t currentFrameIndex,
3187  const VkMemoryRequirements& vkMemReq,
3188  const VmaAllocationCreateInfo& createInfo,
3189  VmaSuballocationType suballocType,
3190  VmaAllocation* pAllocation);
3191 
3192  void Free(
3193  VmaAllocation hAllocation);
3194 
3195  // Adds statistics of this BlockVector to pStats.
3196  void AddStats(VmaStats* pStats);
3197 
3198 #if VMA_STATS_STRING_ENABLED
3199  void PrintDetailedMap(class VmaJsonWriter& json);
3200 #endif
3201 
3202  void UnmapPersistentlyMappedMemory();
3203  VkResult MapPersistentlyMappedMemory();
3204 
3205  void MakePoolAllocationsLost(
3206  uint32_t currentFrameIndex,
3207  size_t* pLostAllocationCount);
3208 
3209  VmaDefragmentator* EnsureDefragmentator(
3210  VmaAllocator hAllocator,
3211  uint32_t currentFrameIndex);
3212 
3213  VkResult Defragment(
3214  VmaDefragmentationStats* pDefragmentationStats,
3215  VkDeviceSize& maxBytesToMove,
3216  uint32_t& maxAllocationsToMove);
3217 
3218  void DestroyDefragmentator();
3219 
3220 private:
3221  friend class VmaDefragmentator;
3222 
3223  const VmaAllocator m_hAllocator;
3224  const uint32_t m_MemoryTypeIndex;
3225  const VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
3226  const VkDeviceSize m_PreferredBlockSize;
3227  const size_t m_MinBlockCount;
3228  const size_t m_MaxBlockCount;
3229  const VkDeviceSize m_BufferImageGranularity;
3230  const uint32_t m_FrameInUseCount;
3231  const bool m_IsCustomPool;
3232  VMA_MUTEX m_Mutex;
3233  // Incrementally sorted by sumFreeSize, ascending.
3234  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
3235  /* There can be at most one allocation that is completely empty - a
3236  hysteresis to avoid pessimistic case of alternating creation and destruction
3237  of a VkDeviceMemory. */
3238  bool m_HasEmptyBlock;
3239  VmaDefragmentator* m_pDefragmentator;
3240 
3241  // Finds and removes given block from vector.
3242  void Remove(VmaDeviceMemoryBlock* pBlock);
3243 
3244  // Performs single step in sorting m_Blocks. They may not be fully sorted
3245  // after this call.
3246  void IncrementallySortBlocks();
3247 
3248  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
3249 };
3250 
3251 struct VmaPool_T
3252 {
3253 public:
3254  VmaBlockVector m_BlockVector;
3255 
3256  // Takes ownership.
3257  VmaPool_T(
3258  VmaAllocator hAllocator,
3259  const VmaPoolCreateInfo& createInfo);
3260  ~VmaPool_T();
3261 
3262  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
3263 
3264 #if VMA_STATS_STRING_ENABLED
3265  //void PrintDetailedMap(class VmaStringBuilder& sb);
3266 #endif
3267 };
3268 
3269 class VmaDefragmentator
3270 {
3271  const VmaAllocator m_hAllocator;
3272  VmaBlockVector* const m_pBlockVector;
3273  uint32_t m_CurrentFrameIndex;
3274  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
3275  VkDeviceSize m_BytesMoved;
3276  uint32_t m_AllocationsMoved;
3277 
3278  struct AllocationInfo
3279  {
3280  VmaAllocation m_hAllocation;
3281  VkBool32* m_pChanged;
3282 
3283  AllocationInfo() :
3284  m_hAllocation(VK_NULL_HANDLE),
3285  m_pChanged(VMA_NULL)
3286  {
3287  }
3288  };
3289 
3290  struct AllocationInfoSizeGreater
3291  {
3292  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
3293  {
3294  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
3295  }
3296  };
3297 
3298  // Used between AddAllocation and Defragment.
3299  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3300 
3301  struct BlockInfo
3302  {
3303  VmaDeviceMemoryBlock* m_pBlock;
3304  bool m_HasNonMovableAllocations;
3305  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3306 
3307  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
3308  m_pBlock(VMA_NULL),
3309  m_HasNonMovableAllocations(true),
3310  m_Allocations(pAllocationCallbacks),
3311  m_pMappedDataForDefragmentation(VMA_NULL)
3312  {
3313  }
3314 
3315  void CalcHasNonMovableAllocations()
3316  {
3317  const size_t blockAllocCount =
3318  m_pBlock->m_Suballocations.size() - m_pBlock->m_FreeCount;
3319  const size_t defragmentAllocCount = m_Allocations.size();
3320  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
3321  }
3322 
3323  void SortAllocationsBySizeDescecnding()
3324  {
3325  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
3326  }
3327 
3328  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
3329  void Unmap(VmaAllocator hAllocator);
3330 
3331  private:
3332  // Not null if mapped for defragmentation only, not persistently mapped.
3333  void* m_pMappedDataForDefragmentation;
3334  };
3335 
3336  struct BlockPointerLess
3337  {
3338  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
3339  {
3340  return pLhsBlockInfo->m_pBlock < pRhsBlock;
3341  }
3342  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3343  {
3344  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
3345  }
3346  };
3347 
3348  // 1. Blocks with some non-movable allocations go first.
3349  // 2. Blocks with smaller sumFreeSize go first.
3350  struct BlockInfoCompareMoveDestination
3351  {
3352  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3353  {
3354  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
3355  {
3356  return true;
3357  }
3358  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
3359  {
3360  return false;
3361  }
3362  if(pLhsBlockInfo->m_pBlock->m_SumFreeSize < pRhsBlockInfo->m_pBlock->m_SumFreeSize)
3363  {
3364  return true;
3365  }
3366  return false;
3367  }
3368  };
3369 
3370  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
3371  BlockInfoVector m_Blocks;
3372 
3373  VkResult DefragmentRound(
3374  VkDeviceSize maxBytesToMove,
3375  uint32_t maxAllocationsToMove);
3376 
3377  static bool MoveMakesSense(
3378  size_t dstBlockIndex, VkDeviceSize dstOffset,
3379  size_t srcBlockIndex, VkDeviceSize srcOffset);
3380 
3381 public:
3382  VmaDefragmentator(
3383  VmaAllocator hAllocator,
3384  VmaBlockVector* pBlockVector,
3385  uint32_t currentFrameIndex);
3386 
3387  ~VmaDefragmentator();
3388 
3389  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
3390  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
3391 
3392  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
3393 
3394  VkResult Defragment(
3395  VkDeviceSize maxBytesToMove,
3396  uint32_t maxAllocationsToMove);
3397 };
3398 
3399 // Main allocator object.
3400 struct VmaAllocator_T
3401 {
3402  bool m_UseMutex;
3403  VkDevice m_hDevice;
3404  bool m_AllocationCallbacksSpecified;
3405  VkAllocationCallbacks m_AllocationCallbacks;
3406  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
3407  // Non-zero when we are inside UnmapPersistentlyMappedMemory...MapPersistentlyMappedMemory.
3408  // Counter to allow nested calls to these functions.
3409  uint32_t m_UnmapPersistentlyMappedMemoryCounter;
3410 
3411  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
3412  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
3413  VMA_MUTEX m_HeapSizeLimitMutex;
3414 
3415  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
3416  VkPhysicalDeviceMemoryProperties m_MemProps;
3417 
3418  // Default pools.
3419  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
3420 
3421  // Each vector is sorted by memory (handle value).
3422  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
3423  AllocationVectorType* m_pOwnAllocations[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
3424  VMA_MUTEX m_OwnAllocationsMutex[VK_MAX_MEMORY_TYPES];
3425 
3426  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
3427  ~VmaAllocator_T();
3428 
3429  const VkAllocationCallbacks* GetAllocationCallbacks() const
3430  {
3431  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
3432  }
3433  const VmaVulkanFunctions& GetVulkanFunctions() const
3434  {
3435  return m_VulkanFunctions;
3436  }
3437 
3438  VkDeviceSize GetBufferImageGranularity() const
3439  {
3440  return VMA_MAX(
3441  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
3442  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
3443  }
3444 
3445  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
3446  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
3447 
3448  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
3449  {
3450  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
3451  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
3452  }
3453 
3454  // Main allocation function.
3455  VkResult AllocateMemory(
3456  const VkMemoryRequirements& vkMemReq,
3457  const VmaAllocationCreateInfo& createInfo,
3458  VmaSuballocationType suballocType,
3459  VmaAllocation* pAllocation);
3460 
3461  // Main deallocation function.
3462  void FreeMemory(const VmaAllocation allocation);
3463 
3464  void CalculateStats(VmaStats* pStats);
3465 
3466 #if VMA_STATS_STRING_ENABLED
3467  void PrintDetailedMap(class VmaJsonWriter& json);
3468 #endif
3469 
3470  void UnmapPersistentlyMappedMemory();
3471  VkResult MapPersistentlyMappedMemory();
3472 
3473  VkResult Defragment(
3474  VmaAllocation* pAllocations,
3475  size_t allocationCount,
3476  VkBool32* pAllocationsChanged,
3477  const VmaDefragmentationInfo* pDefragmentationInfo,
3478  VmaDefragmentationStats* pDefragmentationStats);
3479 
3480  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
3481 
3482  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
3483  void DestroyPool(VmaPool pool);
3484  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
3485 
3486  void SetCurrentFrameIndex(uint32_t frameIndex);
3487 
3488  void MakePoolAllocationsLost(
3489  VmaPool hPool,
3490  size_t* pLostAllocationCount);
3491 
3492  void CreateLostAllocation(VmaAllocation* pAllocation);
3493 
3494  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
3495  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
3496 
3497 private:
3498  VkDeviceSize m_PreferredLargeHeapBlockSize;
3499  VkDeviceSize m_PreferredSmallHeapBlockSize;
3500 
3501  VkPhysicalDevice m_PhysicalDevice;
3502  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
3503 
3504  VMA_MUTEX m_PoolsMutex;
3505  // Protected by m_PoolsMutex. Sorted by pointer value.
3506  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
3507 
3508  VmaVulkanFunctions m_VulkanFunctions;
3509 
3510  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
3511 
3512  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
3513 
3514  VkResult AllocateMemoryOfType(
3515  const VkMemoryRequirements& vkMemReq,
3516  const VmaAllocationCreateInfo& createInfo,
3517  uint32_t memTypeIndex,
3518  VmaSuballocationType suballocType,
3519  VmaAllocation* pAllocation);
3520 
3521  // Allocates and registers new VkDeviceMemory specifically for single allocation.
3522  VkResult AllocateOwnMemory(
3523  VkDeviceSize size,
3524  VmaSuballocationType suballocType,
3525  uint32_t memTypeIndex,
3526  bool map,
3527  void* pUserData,
3528  VmaAllocation* pAllocation);
3529 
3530  // Tries to free pMemory as Own Memory. Returns true if found and freed.
3531  void FreeOwnMemory(VmaAllocation allocation);
3532 };
3533 
3535 // Memory allocation #2 after VmaAllocator_T definition
3536 
3537 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
3538 {
3539  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
3540 }
3541 
3542 static void VmaFree(VmaAllocator hAllocator, void* ptr)
3543 {
3544  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
3545 }
3546 
3547 template<typename T>
3548 static T* VmaAllocate(VmaAllocator hAllocator)
3549 {
3550  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
3551 }
3552 
3553 template<typename T>
3554 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
3555 {
3556  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
3557 }
3558 
3559 template<typename T>
3560 static void vma_delete(VmaAllocator hAllocator, T* ptr)
3561 {
3562  if(ptr != VMA_NULL)
3563  {
3564  ptr->~T();
3565  VmaFree(hAllocator, ptr);
3566  }
3567 }
3568 
3569 template<typename T>
3570 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
3571 {
3572  if(ptr != VMA_NULL)
3573  {
3574  for(size_t i = count; i--; )
3575  ptr[i].~T();
3576  VmaFree(hAllocator, ptr);
3577  }
3578 }
3579 
3581 // VmaStringBuilder
3582 
3583 #if VMA_STATS_STRING_ENABLED
3584 
3585 class VmaStringBuilder
3586 {
3587 public:
3588  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
3589  size_t GetLength() const { return m_Data.size(); }
3590  const char* GetData() const { return m_Data.data(); }
3591 
3592  void Add(char ch) { m_Data.push_back(ch); }
3593  void Add(const char* pStr);
3594  void AddNewLine() { Add('\n'); }
3595  void AddNumber(uint32_t num);
3596  void AddNumber(uint64_t num);
3597  void AddPointer(const void* ptr);
3598 
3599 private:
3600  VmaVector< char, VmaStlAllocator<char> > m_Data;
3601 };
3602 
3603 void VmaStringBuilder::Add(const char* pStr)
3604 {
3605  const size_t strLen = strlen(pStr);
3606  if(strLen > 0)
3607  {
3608  const size_t oldCount = m_Data.size();
3609  m_Data.resize(oldCount + strLen);
3610  memcpy(m_Data.data() + oldCount, pStr, strLen);
3611  }
3612 }
3613 
3614 void VmaStringBuilder::AddNumber(uint32_t num)
3615 {
3616  char buf[11];
3617  VmaUint32ToStr(buf, sizeof(buf), num);
3618  Add(buf);
3619 }
3620 
3621 void VmaStringBuilder::AddNumber(uint64_t num)
3622 {
3623  char buf[21];
3624  VmaUint64ToStr(buf, sizeof(buf), num);
3625  Add(buf);
3626 }
3627 
3628 void VmaStringBuilder::AddPointer(const void* ptr)
3629 {
3630  char buf[21];
3631  VmaPtrToStr(buf, sizeof(buf), ptr);
3632  Add(buf);
3633 }
3634 
3635 #endif // #if VMA_STATS_STRING_ENABLED
3636 
3638 // VmaJsonWriter
3639 
3640 #if VMA_STATS_STRING_ENABLED
3641 
3642 class VmaJsonWriter
3643 {
3644 public:
3645  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
3646  ~VmaJsonWriter();
3647 
3648  void BeginObject(bool singleLine = false);
3649  void EndObject();
3650 
3651  void BeginArray(bool singleLine = false);
3652  void EndArray();
3653 
3654  void WriteString(const char* pStr);
3655  void BeginString(const char* pStr = VMA_NULL);
3656  void ContinueString(const char* pStr);
3657  void ContinueString(uint32_t n);
3658  void ContinueString(uint64_t n);
3659  void EndString(const char* pStr = VMA_NULL);
3660 
3661  void WriteNumber(uint32_t n);
3662  void WriteNumber(uint64_t n);
3663  void WriteBool(bool b);
3664  void WriteNull();
3665 
3666 private:
3667  static const char* const INDENT;
3668 
3669  enum COLLECTION_TYPE
3670  {
3671  COLLECTION_TYPE_OBJECT,
3672  COLLECTION_TYPE_ARRAY,
3673  };
3674  struct StackItem
3675  {
3676  COLLECTION_TYPE type;
3677  uint32_t valueCount;
3678  bool singleLineMode;
3679  };
3680 
3681  VmaStringBuilder& m_SB;
3682  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
3683  bool m_InsideString;
3684 
3685  void BeginValue(bool isString);
3686  void WriteIndent(bool oneLess = false);
3687 };
3688 
3689 const char* const VmaJsonWriter::INDENT = " ";
3690 
3691 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
3692  m_SB(sb),
3693  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
3694  m_InsideString(false)
3695 {
3696 }
3697 
3698 VmaJsonWriter::~VmaJsonWriter()
3699 {
3700  VMA_ASSERT(!m_InsideString);
3701  VMA_ASSERT(m_Stack.empty());
3702 }
3703 
3704 void VmaJsonWriter::BeginObject(bool singleLine)
3705 {
3706  VMA_ASSERT(!m_InsideString);
3707 
3708  BeginValue(false);
3709  m_SB.Add('{');
3710 
3711  StackItem item;
3712  item.type = COLLECTION_TYPE_OBJECT;
3713  item.valueCount = 0;
3714  item.singleLineMode = singleLine;
3715  m_Stack.push_back(item);
3716 }
3717 
3718 void VmaJsonWriter::EndObject()
3719 {
3720  VMA_ASSERT(!m_InsideString);
3721 
3722  WriteIndent(true);
3723  m_SB.Add('}');
3724 
3725  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
3726  m_Stack.pop_back();
3727 }
3728 
3729 void VmaJsonWriter::BeginArray(bool singleLine)
3730 {
3731  VMA_ASSERT(!m_InsideString);
3732 
3733  BeginValue(false);
3734  m_SB.Add('[');
3735 
3736  StackItem item;
3737  item.type = COLLECTION_TYPE_ARRAY;
3738  item.valueCount = 0;
3739  item.singleLineMode = singleLine;
3740  m_Stack.push_back(item);
3741 }
3742 
3743 void VmaJsonWriter::EndArray()
3744 {
3745  VMA_ASSERT(!m_InsideString);
3746 
3747  WriteIndent(true);
3748  m_SB.Add(']');
3749 
3750  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
3751  m_Stack.pop_back();
3752 }
3753 
3754 void VmaJsonWriter::WriteString(const char* pStr)
3755 {
3756  BeginString(pStr);
3757  EndString();
3758 }
3759 
3760 void VmaJsonWriter::BeginString(const char* pStr)
3761 {
3762  VMA_ASSERT(!m_InsideString);
3763 
3764  BeginValue(true);
3765  m_SB.Add('"');
3766  m_InsideString = true;
3767  if(pStr != VMA_NULL && pStr[0] != '\0')
3768  {
3769  ContinueString(pStr);
3770  }
3771 }
3772 
3773 void VmaJsonWriter::ContinueString(const char* pStr)
3774 {
3775  VMA_ASSERT(m_InsideString);
3776 
3777  const size_t strLen = strlen(pStr);
3778  for(size_t i = 0; i < strLen; ++i)
3779  {
3780  char ch = pStr[i];
3781  if(ch == '\'')
3782  {
3783  m_SB.Add("\\\\");
3784  }
3785  else if(ch == '"')
3786  {
3787  m_SB.Add("\\\"");
3788  }
3789  else if(ch >= 32)
3790  {
3791  m_SB.Add(ch);
3792  }
3793  else switch(ch)
3794  {
3795  case '\n':
3796  m_SB.Add("\\n");
3797  break;
3798  case '\r':
3799  m_SB.Add("\\r");
3800  break;
3801  case '\t':
3802  m_SB.Add("\\t");
3803  break;
3804  default:
3805  VMA_ASSERT(0 && "Character not currently supported.");
3806  break;
3807  }
3808  }
3809 }
3810 
3811 void VmaJsonWriter::ContinueString(uint32_t n)
3812 {
3813  VMA_ASSERT(m_InsideString);
3814  m_SB.AddNumber(n);
3815 }
3816 
3817 void VmaJsonWriter::ContinueString(uint64_t n)
3818 {
3819  VMA_ASSERT(m_InsideString);
3820  m_SB.AddNumber(n);
3821 }
3822 
3823 void VmaJsonWriter::EndString(const char* pStr)
3824 {
3825  VMA_ASSERT(m_InsideString);
3826  if(pStr != VMA_NULL && pStr[0] != '\0')
3827  {
3828  ContinueString(pStr);
3829  }
3830  m_SB.Add('"');
3831  m_InsideString = false;
3832 }
3833 
3834 void VmaJsonWriter::WriteNumber(uint32_t n)
3835 {
3836  VMA_ASSERT(!m_InsideString);
3837  BeginValue(false);
3838  m_SB.AddNumber(n);
3839 }
3840 
3841 void VmaJsonWriter::WriteNumber(uint64_t n)
3842 {
3843  VMA_ASSERT(!m_InsideString);
3844  BeginValue(false);
3845  m_SB.AddNumber(n);
3846 }
3847 
3848 void VmaJsonWriter::WriteBool(bool b)
3849 {
3850  VMA_ASSERT(!m_InsideString);
3851  BeginValue(false);
3852  m_SB.Add(b ? "true" : "false");
3853 }
3854 
3855 void VmaJsonWriter::WriteNull()
3856 {
3857  VMA_ASSERT(!m_InsideString);
3858  BeginValue(false);
3859  m_SB.Add("null");
3860 }
3861 
3862 void VmaJsonWriter::BeginValue(bool isString)
3863 {
3864  if(!m_Stack.empty())
3865  {
3866  StackItem& currItem = m_Stack.back();
3867  if(currItem.type == COLLECTION_TYPE_OBJECT &&
3868  currItem.valueCount % 2 == 0)
3869  {
3870  VMA_ASSERT(isString);
3871  }
3872 
3873  if(currItem.type == COLLECTION_TYPE_OBJECT &&
3874  currItem.valueCount % 2 != 0)
3875  {
3876  m_SB.Add(": ");
3877  }
3878  else if(currItem.valueCount > 0)
3879  {
3880  m_SB.Add(", ");
3881  WriteIndent();
3882  }
3883  else
3884  {
3885  WriteIndent();
3886  }
3887  ++currItem.valueCount;
3888  }
3889 }
3890 
3891 void VmaJsonWriter::WriteIndent(bool oneLess)
3892 {
3893  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
3894  {
3895  m_SB.AddNewLine();
3896 
3897  size_t count = m_Stack.size();
3898  if(count > 0 && oneLess)
3899  {
3900  --count;
3901  }
3902  for(size_t i = 0; i < count; ++i)
3903  {
3904  m_SB.Add(INDENT);
3905  }
3906  }
3907 }
3908 
3909 #endif // #if VMA_STATS_STRING_ENABLED
3910 
3912 
3913 VkDeviceSize VmaAllocation_T::GetOffset() const
3914 {
3915  switch(m_Type)
3916  {
3917  case ALLOCATION_TYPE_BLOCK:
3918  return m_BlockAllocation.m_Offset;
3919  case ALLOCATION_TYPE_OWN:
3920  return 0;
3921  default:
3922  VMA_ASSERT(0);
3923  return 0;
3924  }
3925 }
3926 
3927 VkDeviceMemory VmaAllocation_T::GetMemory() const
3928 {
3929  switch(m_Type)
3930  {
3931  case ALLOCATION_TYPE_BLOCK:
3932  return m_BlockAllocation.m_Block->m_hMemory;
3933  case ALLOCATION_TYPE_OWN:
3934  return m_OwnAllocation.m_hMemory;
3935  default:
3936  VMA_ASSERT(0);
3937  return VK_NULL_HANDLE;
3938  }
3939 }
3940 
3941 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
3942 {
3943  switch(m_Type)
3944  {
3945  case ALLOCATION_TYPE_BLOCK:
3946  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
3947  case ALLOCATION_TYPE_OWN:
3948  return m_OwnAllocation.m_MemoryTypeIndex;
3949  default:
3950  VMA_ASSERT(0);
3951  return UINT32_MAX;
3952  }
3953 }
3954 
3955 VMA_BLOCK_VECTOR_TYPE VmaAllocation_T::GetBlockVectorType() const
3956 {
3957  switch(m_Type)
3958  {
3959  case ALLOCATION_TYPE_BLOCK:
3960  return m_BlockAllocation.m_Block->m_BlockVectorType;
3961  case ALLOCATION_TYPE_OWN:
3962  return (m_OwnAllocation.m_PersistentMap ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED);
3963  default:
3964  VMA_ASSERT(0);
3965  return VMA_BLOCK_VECTOR_TYPE_COUNT;
3966  }
3967 }
3968 
3969 void* VmaAllocation_T::GetMappedData() const
3970 {
3971  switch(m_Type)
3972  {
3973  case ALLOCATION_TYPE_BLOCK:
3974  if(m_BlockAllocation.m_Block->m_pMappedData != VMA_NULL)
3975  {
3976  return (char*)m_BlockAllocation.m_Block->m_pMappedData + m_BlockAllocation.m_Offset;
3977  }
3978  else
3979  {
3980  return VMA_NULL;
3981  }
3982  break;
3983  case ALLOCATION_TYPE_OWN:
3984  return m_OwnAllocation.m_pMappedData;
3985  default:
3986  VMA_ASSERT(0);
3987  return VMA_NULL;
3988  }
3989 }
3990 
3991 bool VmaAllocation_T::CanBecomeLost() const
3992 {
3993  switch(m_Type)
3994  {
3995  case ALLOCATION_TYPE_BLOCK:
3996  return m_BlockAllocation.m_CanBecomeLost;
3997  case ALLOCATION_TYPE_OWN:
3998  return false;
3999  default:
4000  VMA_ASSERT(0);
4001  return false;
4002  }
4003 }
4004 
4005 VmaPool VmaAllocation_T::GetPool() const
4006 {
4007  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4008  return m_BlockAllocation.m_hPool;
4009 }
4010 
4011 VkResult VmaAllocation_T::OwnAllocMapPersistentlyMappedMemory(VmaAllocator hAllocator)
4012 {
4013  VMA_ASSERT(m_Type == ALLOCATION_TYPE_OWN);
4014  if(m_OwnAllocation.m_PersistentMap)
4015  {
4016  return (*hAllocator->GetVulkanFunctions().vkMapMemory)(
4017  hAllocator->m_hDevice,
4018  m_OwnAllocation.m_hMemory,
4019  0,
4020  VK_WHOLE_SIZE,
4021  0,
4022  &m_OwnAllocation.m_pMappedData);
4023  }
4024  return VK_SUCCESS;
4025 }
4026 void VmaAllocation_T::OwnAllocUnmapPersistentlyMappedMemory(VmaAllocator hAllocator)
4027 {
4028  VMA_ASSERT(m_Type == ALLOCATION_TYPE_OWN);
4029  if(m_OwnAllocation.m_pMappedData)
4030  {
4031  VMA_ASSERT(m_OwnAllocation.m_PersistentMap);
4032  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_OwnAllocation.m_hMemory);
4033  m_OwnAllocation.m_pMappedData = VMA_NULL;
4034  }
4035 }
4036 
4037 
4038 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4039 {
4040  VMA_ASSERT(CanBecomeLost());
4041 
4042  /*
4043  Warning: This is a carefully designed algorithm.
4044  Do not modify unless you really know what you're doing :)
4045  */
4046  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
4047  for(;;)
4048  {
4049  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
4050  {
4051  VMA_ASSERT(0);
4052  return false;
4053  }
4054  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
4055  {
4056  return false;
4057  }
4058  else // Last use time earlier than current time.
4059  {
4060  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
4061  {
4062  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
4063  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
4064  return true;
4065  }
4066  }
4067  }
4068 }
4069 
4070 #if VMA_STATS_STRING_ENABLED
4071 
4072 // Correspond to values of enum VmaSuballocationType.
4073 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
4074  "FREE",
4075  "UNKNOWN",
4076  "BUFFER",
4077  "IMAGE_UNKNOWN",
4078  "IMAGE_LINEAR",
4079  "IMAGE_OPTIMAL",
4080 };
4081 
4082 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
4083 {
4084  json.BeginObject();
4085 
4086  json.WriteString("Blocks");
4087  json.WriteNumber(stat.BlockCount);
4088 
4089  json.WriteString("Allocations");
4090  json.WriteNumber(stat.AllocationCount);
4091 
4092  json.WriteString("UnusedRanges");
4093  json.WriteNumber(stat.UnusedRangeCount);
4094 
4095  json.WriteString("UsedBytes");
4096  json.WriteNumber(stat.UsedBytes);
4097 
4098  json.WriteString("UnusedBytes");
4099  json.WriteNumber(stat.UnusedBytes);
4100 
4101  if(stat.AllocationCount > 1)
4102  {
4103  json.WriteString("AllocationSize");
4104  json.BeginObject(true);
4105  json.WriteString("Min");
4106  json.WriteNumber(stat.AllocationSizeMin);
4107  json.WriteString("Avg");
4108  json.WriteNumber(stat.AllocationSizeAvg);
4109  json.WriteString("Max");
4110  json.WriteNumber(stat.AllocationSizeMax);
4111  json.EndObject();
4112  }
4113 
4114  if(stat.UnusedRangeCount > 1)
4115  {
4116  json.WriteString("UnusedRangeSize");
4117  json.BeginObject(true);
4118  json.WriteString("Min");
4119  json.WriteNumber(stat.UnusedRangeSizeMin);
4120  json.WriteString("Avg");
4121  json.WriteNumber(stat.UnusedRangeSizeAvg);
4122  json.WriteString("Max");
4123  json.WriteNumber(stat.UnusedRangeSizeMax);
4124  json.EndObject();
4125  }
4126 
4127  json.EndObject();
4128 }
4129 
4130 #endif // #if VMA_STATS_STRING_ENABLED
4131 
4132 struct VmaSuballocationItemSizeLess
4133 {
4134  bool operator()(
4135  const VmaSuballocationList::iterator lhs,
4136  const VmaSuballocationList::iterator rhs) const
4137  {
4138  return lhs->size < rhs->size;
4139  }
4140  bool operator()(
4141  const VmaSuballocationList::iterator lhs,
4142  VkDeviceSize rhsSize) const
4143  {
4144  return lhs->size < rhsSize;
4145  }
4146 };
4147 
4148 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
4149  m_MemoryTypeIndex(UINT32_MAX),
4150  m_BlockVectorType(VMA_BLOCK_VECTOR_TYPE_COUNT),
4151  m_hMemory(VK_NULL_HANDLE),
4152  m_Size(0),
4153  m_PersistentMap(false),
4154  m_pMappedData(VMA_NULL),
4155  m_FreeCount(0),
4156  m_SumFreeSize(0),
4157  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
4158  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
4159 {
4160 }
4161 
4162 void VmaDeviceMemoryBlock::Init(
4163  uint32_t newMemoryTypeIndex,
4164  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
4165  VkDeviceMemory newMemory,
4166  VkDeviceSize newSize,
4167  bool persistentMap,
4168  void* pMappedData)
4169 {
4170  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
4171 
4172  m_MemoryTypeIndex = newMemoryTypeIndex;
4173  m_BlockVectorType = newBlockVectorType;
4174  m_hMemory = newMemory;
4175  m_Size = newSize;
4176  m_PersistentMap = persistentMap;
4177  m_pMappedData = pMappedData;
4178  m_FreeCount = 1;
4179  m_SumFreeSize = newSize;
4180 
4181  m_Suballocations.clear();
4182  m_FreeSuballocationsBySize.clear();
4183 
4184  VmaSuballocation suballoc = {};
4185  suballoc.offset = 0;
4186  suballoc.size = newSize;
4187  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4188  suballoc.hAllocation = VK_NULL_HANDLE;
4189 
4190  m_Suballocations.push_back(suballoc);
4191  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
4192  --suballocItem;
4193  m_FreeSuballocationsBySize.push_back(suballocItem);
4194 }
4195 
4196 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
4197 {
4198  // This is the most important assert in the entire library.
4199  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
4200  VMA_ASSERT(IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
4201 
4202  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
4203  if(m_pMappedData != VMA_NULL)
4204  {
4205  (allocator->GetVulkanFunctions().vkUnmapMemory)(allocator->m_hDevice, m_hMemory);
4206  m_pMappedData = VMA_NULL;
4207  }
4208 
4209  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Size, m_hMemory);
4210  m_hMemory = VK_NULL_HANDLE;
4211 }
4212 
4213 bool VmaDeviceMemoryBlock::Validate() const
4214 {
4215  if((m_hMemory == VK_NULL_HANDLE) ||
4216  (m_Size == 0) ||
4217  m_Suballocations.empty())
4218  {
4219  return false;
4220  }
4221 
4222  // Expected offset of new suballocation as calculates from previous ones.
4223  VkDeviceSize calculatedOffset = 0;
4224  // Expected number of free suballocations as calculated from traversing their list.
4225  uint32_t calculatedFreeCount = 0;
4226  // Expected sum size of free suballocations as calculated from traversing their list.
4227  VkDeviceSize calculatedSumFreeSize = 0;
4228  // Expected number of free suballocations that should be registered in
4229  // m_FreeSuballocationsBySize calculated from traversing their list.
4230  size_t freeSuballocationsToRegister = 0;
4231  // True if previous visisted suballocation was free.
4232  bool prevFree = false;
4233 
4234  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4235  suballocItem != m_Suballocations.cend();
4236  ++suballocItem)
4237  {
4238  const VmaSuballocation& subAlloc = *suballocItem;
4239 
4240  // Actual offset of this suballocation doesn't match expected one.
4241  if(subAlloc.offset != calculatedOffset)
4242  {
4243  return false;
4244  }
4245 
4246  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
4247  // Two adjacent free suballocations are invalid. They should be merged.
4248  if(prevFree && currFree)
4249  {
4250  return false;
4251  }
4252  prevFree = currFree;
4253 
4254  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
4255  {
4256  return false;
4257  }
4258 
4259  if(currFree)
4260  {
4261  calculatedSumFreeSize += subAlloc.size;
4262  ++calculatedFreeCount;
4263  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
4264  {
4265  ++freeSuballocationsToRegister;
4266  }
4267  }
4268 
4269  calculatedOffset += subAlloc.size;
4270  }
4271 
4272  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
4273  // match expected one.
4274  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
4275  {
4276  return false;
4277  }
4278 
4279  VkDeviceSize lastSize = 0;
4280  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
4281  {
4282  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
4283 
4284  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
4285  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
4286  {
4287  return false;
4288  }
4289  // They must be sorted by size ascending.
4290  if(suballocItem->size < lastSize)
4291  {
4292  return false;
4293  }
4294 
4295  lastSize = suballocItem->size;
4296  }
4297 
4298  // Check if totals match calculacted values.
4299  return
4300  (calculatedOffset == m_Size) &&
4301  (calculatedSumFreeSize == m_SumFreeSize) &&
4302  (calculatedFreeCount == m_FreeCount);
4303 }
4304 
4305 /*
4306 How many suitable free suballocations to analyze before choosing best one.
4307 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
4308  be chosen.
4309 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
4310  suballocations will be analized and best one will be chosen.
4311 - Any other value is also acceptable.
4312 */
4313 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
4314 
4315 bool VmaDeviceMemoryBlock::CreateAllocationRequest(
4316  uint32_t currentFrameIndex,
4317  uint32_t frameInUseCount,
4318  VkDeviceSize bufferImageGranularity,
4319  VkDeviceSize allocSize,
4320  VkDeviceSize allocAlignment,
4321  VmaSuballocationType allocType,
4322  bool canMakeOtherLost,
4323  VmaAllocationRequest* pAllocationRequest)
4324 {
4325  VMA_ASSERT(allocSize > 0);
4326  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
4327  VMA_ASSERT(pAllocationRequest != VMA_NULL);
4328  VMA_HEAVY_ASSERT(Validate());
4329 
4330  // There is not enough total free space in this block to fullfill the request: Early return.
4331  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
4332  {
4333  return false;
4334  }
4335 
4336  // New algorithm, efficiently searching freeSuballocationsBySize.
4337  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
4338  if(freeSuballocCount > 0)
4339  {
4340  if(VMA_BEST_FIT)
4341  {
4342  // Find first free suballocation with size not less than allocSize.
4343  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
4344  m_FreeSuballocationsBySize.data(),
4345  m_FreeSuballocationsBySize.data() + freeSuballocCount,
4346  allocSize,
4347  VmaSuballocationItemSizeLess());
4348  size_t index = it - m_FreeSuballocationsBySize.data();
4349  for(; index < freeSuballocCount; ++index)
4350  {
4351  if(CheckAllocation(
4352  currentFrameIndex,
4353  frameInUseCount,
4354  bufferImageGranularity,
4355  allocSize,
4356  allocAlignment,
4357  allocType,
4358  m_FreeSuballocationsBySize[index],
4359  false, // canMakeOtherLost
4360  &pAllocationRequest->offset,
4361  &pAllocationRequest->itemsToMakeLostCount,
4362  &pAllocationRequest->sumFreeSize,
4363  &pAllocationRequest->sumItemSize))
4364  {
4365  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
4366  return true;
4367  }
4368  }
4369  }
4370  else
4371  {
4372  // Search staring from biggest suballocations.
4373  for(size_t index = freeSuballocCount; index--; )
4374  {
4375  if(CheckAllocation(
4376  currentFrameIndex,
4377  frameInUseCount,
4378  bufferImageGranularity,
4379  allocSize,
4380  allocAlignment,
4381  allocType,
4382  m_FreeSuballocationsBySize[index],
4383  false, // canMakeOtherLost
4384  &pAllocationRequest->offset,
4385  &pAllocationRequest->itemsToMakeLostCount,
4386  &pAllocationRequest->sumFreeSize,
4387  &pAllocationRequest->sumItemSize))
4388  {
4389  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
4390  return true;
4391  }
4392  }
4393  }
4394  }
4395 
4396  if(canMakeOtherLost)
4397  {
4398  // Brute-force algorithm. TODO: Come up with something better.
4399 
4400  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
4401  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
4402 
4403  VmaAllocationRequest tmpAllocRequest = {};
4404  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
4405  suballocIt != m_Suballocations.end();
4406  ++suballocIt)
4407  {
4408  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
4409  suballocIt->hAllocation->CanBecomeLost())
4410  {
4411  if(CheckAllocation(
4412  currentFrameIndex,
4413  frameInUseCount,
4414  bufferImageGranularity,
4415  allocSize,
4416  allocAlignment,
4417  allocType,
4418  suballocIt,
4419  canMakeOtherLost,
4420  &tmpAllocRequest.offset,
4421  &tmpAllocRequest.itemsToMakeLostCount,
4422  &tmpAllocRequest.sumFreeSize,
4423  &tmpAllocRequest.sumItemSize))
4424  {
4425  tmpAllocRequest.item = suballocIt;
4426 
4427  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
4428  {
4429  *pAllocationRequest = tmpAllocRequest;
4430  }
4431  }
4432  }
4433  }
4434 
4435  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
4436  {
4437  return true;
4438  }
4439  }
4440 
4441  return false;
4442 }
4443 
4444 bool VmaDeviceMemoryBlock::MakeRequestedAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount, VmaAllocationRequest* pAllocationRequest)
4445 {
4446  while(pAllocationRequest->itemsToMakeLostCount > 0)
4447  {
4448  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
4449  {
4450  ++pAllocationRequest->item;
4451  }
4452  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
4453  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
4454  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
4455  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
4456  {
4457  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
4458  --pAllocationRequest->itemsToMakeLostCount;
4459  }
4460  else
4461  {
4462  return false;
4463  }
4464  }
4465 
4466  VMA_HEAVY_ASSERT(Validate());
4467  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
4468  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
4469 
4470  return true;
4471 }
4472 
4473 uint32_t VmaDeviceMemoryBlock::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4474 {
4475  uint32_t lostAllocationCount = 0;
4476  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
4477  it != m_Suballocations.end();
4478  ++it)
4479  {
4480  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
4481  it->hAllocation->CanBecomeLost() &&
4482  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
4483  {
4484  it = FreeSuballocation(it);
4485  ++lostAllocationCount;
4486  }
4487  }
4488  return lostAllocationCount;
4489 }
4490 
4491 bool VmaDeviceMemoryBlock::CheckAllocation(
4492  uint32_t currentFrameIndex,
4493  uint32_t frameInUseCount,
4494  VkDeviceSize bufferImageGranularity,
4495  VkDeviceSize allocSize,
4496  VkDeviceSize allocAlignment,
4497  VmaSuballocationType allocType,
4498  VmaSuballocationList::const_iterator suballocItem,
4499  bool canMakeOtherLost,
4500  VkDeviceSize* pOffset,
4501  size_t* itemsToMakeLostCount,
4502  VkDeviceSize* pSumFreeSize,
4503  VkDeviceSize* pSumItemSize) const
4504 {
4505  VMA_ASSERT(allocSize > 0);
4506  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
4507  VMA_ASSERT(suballocItem != m_Suballocations.cend());
4508  VMA_ASSERT(pOffset != VMA_NULL);
4509 
4510  *itemsToMakeLostCount = 0;
4511  *pSumFreeSize = 0;
4512  *pSumItemSize = 0;
4513 
4514  if(canMakeOtherLost)
4515  {
4516  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
4517  {
4518  *pSumFreeSize = suballocItem->size;
4519  }
4520  else
4521  {
4522  if(suballocItem->hAllocation->CanBecomeLost() &&
4523  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
4524  {
4525  ++*itemsToMakeLostCount;
4526  *pSumItemSize = suballocItem->size;
4527  }
4528  else
4529  {
4530  return false;
4531  }
4532  }
4533 
4534  // Remaining size is too small for this request: Early return.
4535  if(m_Size - suballocItem->offset < allocSize)
4536  {
4537  return false;
4538  }
4539 
4540  // Start from offset equal to beginning of this suballocation.
4541  *pOffset = suballocItem->offset;
4542 
4543  // Apply VMA_DEBUG_MARGIN at the beginning.
4544  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
4545  {
4546  *pOffset += VMA_DEBUG_MARGIN;
4547  }
4548 
4549  // Apply alignment.
4550  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
4551  *pOffset = VmaAlignUp(*pOffset, alignment);
4552 
4553  // Check previous suballocations for BufferImageGranularity conflicts.
4554  // Make bigger alignment if necessary.
4555  if(bufferImageGranularity > 1)
4556  {
4557  bool bufferImageGranularityConflict = false;
4558  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
4559  while(prevSuballocItem != m_Suballocations.cbegin())
4560  {
4561  --prevSuballocItem;
4562  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
4563  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
4564  {
4565  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
4566  {
4567  bufferImageGranularityConflict = true;
4568  break;
4569  }
4570  }
4571  else
4572  // Already on previous page.
4573  break;
4574  }
4575  if(bufferImageGranularityConflict)
4576  {
4577  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
4578  }
4579  }
4580 
4581  // Now that we have final *pOffset, check if we are past suballocItem.
4582  // If yes, return false - this function should be called for another suballocItem as starting point.
4583  if(*pOffset >= suballocItem->offset + suballocItem->size)
4584  {
4585  return false;
4586  }
4587 
4588  // Calculate padding at the beginning based on current offset.
4589  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
4590 
4591  // Calculate required margin at the end if this is not last suballocation.
4592  VmaSuballocationList::const_iterator next = suballocItem;
4593  ++next;
4594  const VkDeviceSize requiredEndMargin =
4595  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
4596 
4597  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
4598  // Another early return check.
4599  if(suballocItem->offset + totalSize > m_Size)
4600  {
4601  return false;
4602  }
4603 
4604  // Advance lastSuballocItem until desired size is reached.
4605  // Update itemsToMakeLostCount.
4606  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
4607  if(totalSize > suballocItem->size)
4608  {
4609  VkDeviceSize remainingSize = totalSize - suballocItem->size;
4610  while(remainingSize > 0)
4611  {
4612  ++lastSuballocItem;
4613  if(lastSuballocItem == m_Suballocations.cend())
4614  {
4615  return false;
4616  }
4617  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
4618  {
4619  *pSumFreeSize += lastSuballocItem->size;
4620  }
4621  else
4622  {
4623  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
4624  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
4625  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
4626  {
4627  ++*itemsToMakeLostCount;
4628  *pSumItemSize += lastSuballocItem->size;
4629  }
4630  else
4631  {
4632  return false;
4633  }
4634  }
4635  remainingSize = (lastSuballocItem->size < remainingSize) ?
4636  remainingSize - lastSuballocItem->size : 0;
4637  }
4638  }
4639 
4640  // Check next suballocations for BufferImageGranularity conflicts.
4641  // If conflict exists, we must mark more allocations lost or fail.
4642  if(bufferImageGranularity > 1)
4643  {
4644  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
4645  ++nextSuballocItem;
4646  while(nextSuballocItem != m_Suballocations.cend())
4647  {
4648  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
4649  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
4650  {
4651  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
4652  {
4653  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
4654  if(nextSuballoc.hAllocation->CanBecomeLost() &&
4655  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
4656  {
4657  ++*itemsToMakeLostCount;
4658  }
4659  else
4660  {
4661  return false;
4662  }
4663  }
4664  }
4665  else
4666  {
4667  // Already on next page.
4668  break;
4669  }
4670  ++nextSuballocItem;
4671  }
4672  }
4673  }
4674  else
4675  {
4676  const VmaSuballocation& suballoc = *suballocItem;
4677  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
4678 
4679  *pSumFreeSize = suballoc.size;
4680 
4681  // Size of this suballocation is too small for this request: Early return.
4682  if(suballoc.size < allocSize)
4683  {
4684  return false;
4685  }
4686 
4687  // Start from offset equal to beginning of this suballocation.
4688  *pOffset = suballoc.offset;
4689 
4690  // Apply VMA_DEBUG_MARGIN at the beginning.
4691  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
4692  {
4693  *pOffset += VMA_DEBUG_MARGIN;
4694  }
4695 
4696  // Apply alignment.
4697  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
4698  *pOffset = VmaAlignUp(*pOffset, alignment);
4699 
4700  // Check previous suballocations for BufferImageGranularity conflicts.
4701  // Make bigger alignment if necessary.
4702  if(bufferImageGranularity > 1)
4703  {
4704  bool bufferImageGranularityConflict = false;
4705  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
4706  while(prevSuballocItem != m_Suballocations.cbegin())
4707  {
4708  --prevSuballocItem;
4709  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
4710  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
4711  {
4712  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
4713  {
4714  bufferImageGranularityConflict = true;
4715  break;
4716  }
4717  }
4718  else
4719  // Already on previous page.
4720  break;
4721  }
4722  if(bufferImageGranularityConflict)
4723  {
4724  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
4725  }
4726  }
4727 
4728  // Calculate padding at the beginning based on current offset.
4729  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
4730 
4731  // Calculate required margin at the end if this is not last suballocation.
4732  VmaSuballocationList::const_iterator next = suballocItem;
4733  ++next;
4734  const VkDeviceSize requiredEndMargin =
4735  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
4736 
4737  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
4738  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
4739  {
4740  return false;
4741  }
4742 
4743  // Check next suballocations for BufferImageGranularity conflicts.
4744  // If conflict exists, allocation cannot be made here.
4745  if(bufferImageGranularity > 1)
4746  {
4747  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
4748  ++nextSuballocItem;
4749  while(nextSuballocItem != m_Suballocations.cend())
4750  {
4751  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
4752  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
4753  {
4754  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
4755  {
4756  return false;
4757  }
4758  }
4759  else
4760  {
4761  // Already on next page.
4762  break;
4763  }
4764  ++nextSuballocItem;
4765  }
4766  }
4767  }
4768 
4769  // All tests passed: Success. pOffset is already filled.
4770  return true;
4771 }
4772 
4773 bool VmaDeviceMemoryBlock::IsEmpty() const
4774 {
4775  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
4776 }
4777 
4778 void VmaDeviceMemoryBlock::Alloc(
4779  const VmaAllocationRequest& request,
4780  VmaSuballocationType type,
4781  VkDeviceSize allocSize,
4782  VmaAllocation hAllocation)
4783 {
4784  VMA_ASSERT(request.item != m_Suballocations.end());
4785  VmaSuballocation& suballoc = *request.item;
4786  // Given suballocation is a free block.
4787  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
4788  // Given offset is inside this suballocation.
4789  VMA_ASSERT(request.offset >= suballoc.offset);
4790  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
4791  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
4792  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
4793 
4794  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
4795  // it to become used.
4796  UnregisterFreeSuballocation(request.item);
4797 
4798  suballoc.offset = request.offset;
4799  suballoc.size = allocSize;
4800  suballoc.type = type;
4801  suballoc.hAllocation = hAllocation;
4802 
4803  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
4804  if(paddingEnd)
4805  {
4806  VmaSuballocation paddingSuballoc = {};
4807  paddingSuballoc.offset = request.offset + allocSize;
4808  paddingSuballoc.size = paddingEnd;
4809  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4810  VmaSuballocationList::iterator next = request.item;
4811  ++next;
4812  const VmaSuballocationList::iterator paddingEndItem =
4813  m_Suballocations.insert(next, paddingSuballoc);
4814  RegisterFreeSuballocation(paddingEndItem);
4815  }
4816 
4817  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
4818  if(paddingBegin)
4819  {
4820  VmaSuballocation paddingSuballoc = {};
4821  paddingSuballoc.offset = request.offset - paddingBegin;
4822  paddingSuballoc.size = paddingBegin;
4823  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4824  const VmaSuballocationList::iterator paddingBeginItem =
4825  m_Suballocations.insert(request.item, paddingSuballoc);
4826  RegisterFreeSuballocation(paddingBeginItem);
4827  }
4828 
4829  // Update totals.
4830  m_FreeCount = m_FreeCount - 1;
4831  if(paddingBegin > 0)
4832  {
4833  ++m_FreeCount;
4834  }
4835  if(paddingEnd > 0)
4836  {
4837  ++m_FreeCount;
4838  }
4839  m_SumFreeSize -= allocSize;
4840 }
4841 
4842 VmaSuballocationList::iterator VmaDeviceMemoryBlock::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
4843 {
4844  // Change this suballocation to be marked as free.
4845  VmaSuballocation& suballoc = *suballocItem;
4846  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4847  suballoc.hAllocation = VK_NULL_HANDLE;
4848 
4849  // Update totals.
4850  ++m_FreeCount;
4851  m_SumFreeSize += suballoc.size;
4852 
4853  // Merge with previous and/or next suballocation if it's also free.
4854  bool mergeWithNext = false;
4855  bool mergeWithPrev = false;
4856 
4857  VmaSuballocationList::iterator nextItem = suballocItem;
4858  ++nextItem;
4859  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
4860  {
4861  mergeWithNext = true;
4862  }
4863 
4864  VmaSuballocationList::iterator prevItem = suballocItem;
4865  if(suballocItem != m_Suballocations.begin())
4866  {
4867  --prevItem;
4868  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
4869  {
4870  mergeWithPrev = true;
4871  }
4872  }
4873 
4874  if(mergeWithNext)
4875  {
4876  UnregisterFreeSuballocation(nextItem);
4877  MergeFreeWithNext(suballocItem);
4878  }
4879 
4880  if(mergeWithPrev)
4881  {
4882  UnregisterFreeSuballocation(prevItem);
4883  MergeFreeWithNext(prevItem);
4884  RegisterFreeSuballocation(prevItem);
4885  return prevItem;
4886  }
4887  else
4888  {
4889  RegisterFreeSuballocation(suballocItem);
4890  return suballocItem;
4891  }
4892 }
4893 
4894 void VmaDeviceMemoryBlock::Free(const VmaAllocation allocation)
4895 {
4896  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
4897  suballocItem != m_Suballocations.end();
4898  ++suballocItem)
4899  {
4900  VmaSuballocation& suballoc = *suballocItem;
4901  if(suballoc.hAllocation == allocation)
4902  {
4903  FreeSuballocation(suballocItem);
4904  VMA_HEAVY_ASSERT(Validate());
4905  return;
4906  }
4907  }
4908  VMA_ASSERT(0 && "Not found!");
4909 }
4910 
4911 #if VMA_STATS_STRING_ENABLED
4912 
4913 void VmaDeviceMemoryBlock::PrintDetailedMap(class VmaJsonWriter& json) const
4914 {
4915  json.BeginObject();
4916 
4917  json.WriteString("TotalBytes");
4918  json.WriteNumber(m_Size);
4919 
4920  json.WriteString("UnusedBytes");
4921  json.WriteNumber(m_SumFreeSize);
4922 
4923  json.WriteString("Allocations");
4924  json.WriteNumber(m_Suballocations.size() - m_FreeCount);
4925 
4926  json.WriteString("UnusedRanges");
4927  json.WriteNumber(m_FreeCount);
4928 
4929  json.WriteString("Suballocations");
4930  json.BeginArray();
4931  size_t i = 0;
4932  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4933  suballocItem != m_Suballocations.cend();
4934  ++suballocItem, ++i)
4935  {
4936  json.BeginObject(true);
4937 
4938  json.WriteString("Type");
4939  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
4940 
4941  json.WriteString("Size");
4942  json.WriteNumber(suballocItem->size);
4943 
4944  json.WriteString("Offset");
4945  json.WriteNumber(suballocItem->offset);
4946 
4947  json.EndObject();
4948  }
4949  json.EndArray();
4950 
4951  json.EndObject();
4952 }
4953 
4954 #endif // #if VMA_STATS_STRING_ENABLED
4955 
4956 void VmaDeviceMemoryBlock::MergeFreeWithNext(VmaSuballocationList::iterator item)
4957 {
4958  VMA_ASSERT(item != m_Suballocations.end());
4959  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
4960 
4961  VmaSuballocationList::iterator nextItem = item;
4962  ++nextItem;
4963  VMA_ASSERT(nextItem != m_Suballocations.end());
4964  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
4965 
4966  item->size += nextItem->size;
4967  --m_FreeCount;
4968  m_Suballocations.erase(nextItem);
4969 }
4970 
4971 void VmaDeviceMemoryBlock::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
4972 {
4973  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
4974  VMA_ASSERT(item->size > 0);
4975 
4976  // You may want to enable this validation at the beginning or at the end of
4977  // this function, depending on what do you want to check.
4978  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
4979 
4980  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
4981  {
4982  if(m_FreeSuballocationsBySize.empty())
4983  {
4984  m_FreeSuballocationsBySize.push_back(item);
4985  }
4986  else
4987  {
4988  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
4989  }
4990  }
4991 
4992  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
4993 }
4994 
4995 
4996 void VmaDeviceMemoryBlock::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
4997 {
4998  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
4999  VMA_ASSERT(item->size > 0);
5000 
5001  // You may want to enable this validation at the beginning or at the end of
5002  // this function, depending on what do you want to check.
5003  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5004 
5005  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5006  {
5007  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5008  m_FreeSuballocationsBySize.data(),
5009  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
5010  item,
5011  VmaSuballocationItemSizeLess());
5012  for(size_t index = it - m_FreeSuballocationsBySize.data();
5013  index < m_FreeSuballocationsBySize.size();
5014  ++index)
5015  {
5016  if(m_FreeSuballocationsBySize[index] == item)
5017  {
5018  VmaVectorRemove(m_FreeSuballocationsBySize, index);
5019  return;
5020  }
5021  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
5022  }
5023  VMA_ASSERT(0 && "Not found.");
5024  }
5025 
5026  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5027 }
5028 
5029 bool VmaDeviceMemoryBlock::ValidateFreeSuballocationList() const
5030 {
5031  VkDeviceSize lastSize = 0;
5032  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
5033  {
5034  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
5035 
5036  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
5037  {
5038  VMA_ASSERT(0);
5039  return false;
5040  }
5041  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5042  {
5043  VMA_ASSERT(0);
5044  return false;
5045  }
5046  if(it->size < lastSize)
5047  {
5048  VMA_ASSERT(0);
5049  return false;
5050  }
5051 
5052  lastSize = it->size;
5053  }
5054  return true;
5055 }
5056 
5057 static void InitStatInfo(VmaStatInfo& outInfo)
5058 {
5059  memset(&outInfo, 0, sizeof(outInfo));
5060  outInfo.AllocationSizeMin = UINT64_MAX;
5061  outInfo.UnusedRangeSizeMin = UINT64_MAX;
5062 }
5063 
5064 static void CalcAllocationStatInfo(VmaStatInfo& outInfo, const VmaDeviceMemoryBlock& block)
5065 {
5066  outInfo.BlockCount = 1;
5067 
5068  const uint32_t rangeCount = (uint32_t)block.m_Suballocations.size();
5069  outInfo.AllocationCount = rangeCount - block.m_FreeCount;
5070  outInfo.UnusedRangeCount = block.m_FreeCount;
5071 
5072  outInfo.UnusedBytes = block.m_SumFreeSize;
5073  outInfo.UsedBytes = block.m_Size - outInfo.UnusedBytes;
5074 
5075  outInfo.AllocationSizeMin = UINT64_MAX;
5076  outInfo.AllocationSizeMax = 0;
5077  outInfo.UnusedRangeSizeMin = UINT64_MAX;
5078  outInfo.UnusedRangeSizeMax = 0;
5079 
5080  for(VmaSuballocationList::const_iterator suballocItem = block.m_Suballocations.cbegin();
5081  suballocItem != block.m_Suballocations.cend();
5082  ++suballocItem)
5083  {
5084  const VmaSuballocation& suballoc = *suballocItem;
5085  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
5086  {
5087  outInfo.AllocationSizeMin = VMA_MIN(outInfo.AllocationSizeMin, suballoc.size);
5088  outInfo.AllocationSizeMax = VMA_MAX(outInfo.AllocationSizeMax, suballoc.size);
5089  }
5090  else
5091  {
5092  outInfo.UnusedRangeSizeMin = VMA_MIN(outInfo.UnusedRangeSizeMin, suballoc.size);
5093  outInfo.UnusedRangeSizeMax = VMA_MAX(outInfo.UnusedRangeSizeMax, suballoc.size);
5094  }
5095  }
5096 }
5097 
5098 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
5099 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
5100 {
5101  inoutInfo.BlockCount += srcInfo.BlockCount;
5102  inoutInfo.AllocationCount += srcInfo.AllocationCount;
5103  inoutInfo.UnusedRangeCount += srcInfo.UnusedRangeCount;
5104  inoutInfo.UsedBytes += srcInfo.UsedBytes;
5105  inoutInfo.UnusedBytes += srcInfo.UnusedBytes;
5106  inoutInfo.AllocationSizeMin = VMA_MIN(inoutInfo.AllocationSizeMin, srcInfo.AllocationSizeMin);
5107  inoutInfo.AllocationSizeMax = VMA_MAX(inoutInfo.AllocationSizeMax, srcInfo.AllocationSizeMax);
5108  inoutInfo.UnusedRangeSizeMin = VMA_MIN(inoutInfo.UnusedRangeSizeMin, srcInfo.UnusedRangeSizeMin);
5109  inoutInfo.UnusedRangeSizeMax = VMA_MAX(inoutInfo.UnusedRangeSizeMax, srcInfo.UnusedRangeSizeMax);
5110 }
5111 
5112 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
5113 {
5114  inoutInfo.AllocationSizeAvg = (inoutInfo.AllocationCount > 0) ?
5115  VmaRoundDiv<VkDeviceSize>(inoutInfo.UsedBytes, inoutInfo.AllocationCount) : 0;
5116  inoutInfo.UnusedRangeSizeAvg = (inoutInfo.UnusedRangeCount > 0) ?
5117  VmaRoundDiv<VkDeviceSize>(inoutInfo.UnusedBytes, inoutInfo.UnusedRangeCount) : 0;
5118 }
5119 
5120 VmaPool_T::VmaPool_T(
5121  VmaAllocator hAllocator,
5122  const VmaPoolCreateInfo& createInfo) :
5123  m_BlockVector(
5124  hAllocator,
5125  createInfo.memoryTypeIndex,
5126  (createInfo.flags & VMA_POOL_CREATE_PERSISTENT_MAP_BIT) != 0 ?
5127  VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED,
5128  createInfo.blockSize,
5129  createInfo.minBlockCount,
5130  createInfo.maxBlockCount,
5131  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
5132  createInfo.frameInUseCount,
5133  true) // isCustomPool
5134 {
5135 }
5136 
5137 VmaPool_T::~VmaPool_T()
5138 {
5139 }
5140 
5141 #if VMA_STATS_STRING_ENABLED
5142 
5143 #endif // #if VMA_STATS_STRING_ENABLED
5144 
5145 VmaBlockVector::VmaBlockVector(
5146  VmaAllocator hAllocator,
5147  uint32_t memoryTypeIndex,
5148  VMA_BLOCK_VECTOR_TYPE blockVectorType,
5149  VkDeviceSize preferredBlockSize,
5150  size_t minBlockCount,
5151  size_t maxBlockCount,
5152  VkDeviceSize bufferImageGranularity,
5153  uint32_t frameInUseCount,
5154  bool isCustomPool) :
5155  m_hAllocator(hAllocator),
5156  m_MemoryTypeIndex(memoryTypeIndex),
5157  m_BlockVectorType(blockVectorType),
5158  m_PreferredBlockSize(preferredBlockSize),
5159  m_MinBlockCount(minBlockCount),
5160  m_MaxBlockCount(maxBlockCount),
5161  m_BufferImageGranularity(bufferImageGranularity),
5162  m_FrameInUseCount(frameInUseCount),
5163  m_IsCustomPool(isCustomPool),
5164  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
5165  m_HasEmptyBlock(false),
5166  m_pDefragmentator(VMA_NULL)
5167 {
5168 }
5169 
5170 VmaBlockVector::~VmaBlockVector()
5171 {
5172  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
5173 
5174  for(size_t i = m_Blocks.size(); i--; )
5175  {
5176  m_Blocks[i]->Destroy(m_hAllocator);
5177  vma_delete(m_hAllocator, m_Blocks[i]);
5178  }
5179 }
5180 
5181 VkResult VmaBlockVector::CreateMinBlocks()
5182 {
5183  for(size_t i = 0; i < m_MinBlockCount; ++i)
5184  {
5185  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
5186  if(res != VK_SUCCESS)
5187  {
5188  return res;
5189  }
5190  }
5191  return VK_SUCCESS;
5192 }
5193 
5194 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
5195 {
5196  pStats->size = 0;
5197  pStats->unusedSize = 0;
5198  pStats->allocationCount = 0;
5199  pStats->unusedRangeCount = 0;
5200 
5201  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5202 
5203  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5204  {
5205  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
5206  VMA_ASSERT(pBlock);
5207  VMA_HEAVY_ASSERT(pBlock->Validate());
5208 
5209  const uint32_t rangeCount = (uint32_t)pBlock->m_Suballocations.size();
5210 
5211  pStats->size += pBlock->m_Size;
5212  pStats->unusedSize += pBlock->m_SumFreeSize;
5213  pStats->allocationCount += rangeCount - pBlock->m_FreeCount;
5214  pStats->unusedRangeCount += pBlock->m_FreeCount;
5215  }
5216 }
5217 
5218 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
5219 
5220 VkResult VmaBlockVector::Allocate(
5221  VmaPool hCurrentPool,
5222  uint32_t currentFrameIndex,
5223  const VkMemoryRequirements& vkMemReq,
5224  const VmaAllocationCreateInfo& createInfo,
5225  VmaSuballocationType suballocType,
5226  VmaAllocation* pAllocation)
5227 {
5228  // Validate flags.
5229  if(((createInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0) !=
5230  (m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED))
5231  {
5232  VMA_ASSERT(0 && "Usage of VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT must match VMA_POOL_CREATE_PERSISTENT_MAP_BIT.");
5233  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5234  }
5235 
5236  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5237 
5238  // 1. Search existing allocations. Try to allocate without making other allocations lost.
5239  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
5240  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
5241  {
5242  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
5243  VMA_ASSERT(pCurrBlock);
5244  VmaAllocationRequest currRequest = {};
5245  if(pCurrBlock->CreateAllocationRequest(
5246  currentFrameIndex,
5247  m_FrameInUseCount,
5248  m_BufferImageGranularity,
5249  vkMemReq.size,
5250  vkMemReq.alignment,
5251  suballocType,
5252  false, // canMakeOtherLost
5253  &currRequest))
5254  {
5255  // Allocate from pCurrBlock.
5256  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
5257 
5258  // We no longer have an empty Allocation.
5259  if(pCurrBlock->IsEmpty())
5260  {
5261  m_HasEmptyBlock = false;
5262  }
5263 
5264  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
5265  pCurrBlock->Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
5266  (*pAllocation)->InitBlockAllocation(
5267  hCurrentPool,
5268  pCurrBlock,
5269  currRequest.offset,
5270  vkMemReq.alignment,
5271  vkMemReq.size,
5272  suballocType,
5273  createInfo.pUserData,
5274  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5275  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
5276  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
5277  return VK_SUCCESS;
5278  }
5279  }
5280 
5281  const bool canCreateNewBlock =
5282  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
5283  (m_Blocks.size() < m_MaxBlockCount);
5284 
5285  // 2. Try to create new block.
5286  if(canCreateNewBlock)
5287  {
5288  // 2.1. Start with full preferredBlockSize.
5289  VkDeviceSize blockSize = m_PreferredBlockSize;
5290  size_t newBlockIndex = 0;
5291  VkResult res = CreateBlock(blockSize, &newBlockIndex);
5292  // Allocating blocks of other sizes is allowed only in default pools.
5293  // In custom pools block size is fixed.
5294  if(res < 0 && m_IsCustomPool == false)
5295  {
5296  // 2.2. Try half the size.
5297  blockSize /= 2;
5298  if(blockSize >= vkMemReq.size)
5299  {
5300  res = CreateBlock(blockSize, &newBlockIndex);
5301  if(res < 0)
5302  {
5303  // 2.3. Try quarter the size.
5304  blockSize /= 2;
5305  if(blockSize >= vkMemReq.size)
5306  {
5307  res = CreateBlock(blockSize, &newBlockIndex);
5308  }
5309  }
5310  }
5311  }
5312  if(res == VK_SUCCESS)
5313  {
5314  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
5315  VMA_ASSERT(pBlock->m_Size >= vkMemReq.size);
5316 
5317  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
5318  VmaAllocationRequest allocRequest = {};
5319  allocRequest.item = pBlock->m_Suballocations.begin();
5320  allocRequest.offset = 0;
5321  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
5322  pBlock->Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
5323  (*pAllocation)->InitBlockAllocation(
5324  hCurrentPool,
5325  pBlock,
5326  allocRequest.offset,
5327  vkMemReq.alignment,
5328  vkMemReq.size,
5329  suballocType,
5330  createInfo.pUserData,
5331  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5332  VMA_HEAVY_ASSERT(pBlock->Validate());
5333  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
5334 
5335  return VK_SUCCESS;
5336  }
5337  }
5338 
5339  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
5340 
5341  // 3. Try to allocate from existing blocks with making other allocations lost.
5342  if(canMakeOtherLost)
5343  {
5344  uint32_t tryIndex = 0;
5345  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
5346  {
5347  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
5348  VmaAllocationRequest bestRequest = {};
5349  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
5350 
5351  // 1. Search existing allocations.
5352  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
5353  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
5354  {
5355  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
5356  VMA_ASSERT(pCurrBlock);
5357  VmaAllocationRequest currRequest = {};
5358  if(pCurrBlock->CreateAllocationRequest(
5359  currentFrameIndex,
5360  m_FrameInUseCount,
5361  m_BufferImageGranularity,
5362  vkMemReq.size,
5363  vkMemReq.alignment,
5364  suballocType,
5365  canMakeOtherLost,
5366  &currRequest))
5367  {
5368  const VkDeviceSize currRequestCost = currRequest.CalcCost();
5369  if(pBestRequestBlock == VMA_NULL ||
5370  currRequestCost < bestRequestCost)
5371  {
5372  pBestRequestBlock = pCurrBlock;
5373  bestRequest = currRequest;
5374  bestRequestCost = currRequestCost;
5375 
5376  if(bestRequestCost == 0)
5377  {
5378  break;
5379  }
5380  }
5381  }
5382  }
5383 
5384  if(pBestRequestBlock != VMA_NULL)
5385  {
5386  if(pBestRequestBlock->MakeRequestedAllocationsLost(
5387  currentFrameIndex,
5388  m_FrameInUseCount,
5389  &bestRequest))
5390  {
5391  // We no longer have an empty Allocation.
5392  if(pBestRequestBlock->IsEmpty())
5393  {
5394  m_HasEmptyBlock = false;
5395  }
5396  // Allocate from this pBlock.
5397  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
5398  pBestRequestBlock->Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
5399  (*pAllocation)->InitBlockAllocation(
5400  hCurrentPool,
5401  pBestRequestBlock,
5402  bestRequest.offset,
5403  vkMemReq.alignment,
5404  vkMemReq.size,
5405  suballocType,
5406  createInfo.pUserData,
5407  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5408  VMA_HEAVY_ASSERT(pBlock->Validate());
5409  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
5410  return VK_SUCCESS;
5411  }
5412  // else: Some allocations must have been touched while we are here. Next try.
5413  }
5414  else
5415  {
5416  // Could not find place in any of the blocks - break outer loop.
5417  break;
5418  }
5419  }
5420  /* Maximum number of tries exceeded - a very unlike event when many other
5421  threads are simultaneously touching allocations making it impossible to make
5422  lost at the same time as we try to allocate. */
5423  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
5424  {
5425  return VK_ERROR_TOO_MANY_OBJECTS;
5426  }
5427  }
5428 
5429  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5430 }
5431 
5432 void VmaBlockVector::Free(
5433  VmaAllocation hAllocation)
5434 {
5435  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
5436 
5437  // Scope for lock.
5438  {
5439  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5440 
5441  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
5442 
5443  pBlock->Free(hAllocation);
5444  VMA_HEAVY_ASSERT(pBlock->Validate());
5445 
5446  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
5447 
5448  // pBlock became empty after this deallocation.
5449  if(pBlock->IsEmpty())
5450  {
5451  // Already has empty Allocation. We don't want to have two, so delete this one.
5452  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
5453  {
5454  pBlockToDelete = pBlock;
5455  Remove(pBlock);
5456  }
5457  // We now have first empty Allocation.
5458  else
5459  {
5460  m_HasEmptyBlock = true;
5461  }
5462  }
5463  // Must be called after srcBlockIndex is used, because later it may become invalid!
5464  IncrementallySortBlocks();
5465  }
5466 
5467  // Destruction of a free Allocation. Deferred until this point, outside of mutex
5468  // lock, for performance reason.
5469  if(pBlockToDelete != VMA_NULL)
5470  {
5471  VMA_DEBUG_LOG(" Deleted empty allocation");
5472  pBlockToDelete->Destroy(m_hAllocator);
5473  vma_delete(m_hAllocator, pBlockToDelete);
5474  }
5475 }
5476 
5477 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
5478 {
5479  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5480  {
5481  if(m_Blocks[blockIndex] == pBlock)
5482  {
5483  VmaVectorRemove(m_Blocks, blockIndex);
5484  return;
5485  }
5486  }
5487  VMA_ASSERT(0);
5488 }
5489 
5490 void VmaBlockVector::IncrementallySortBlocks()
5491 {
5492  // Bubble sort only until first swap.
5493  for(size_t i = 1; i < m_Blocks.size(); ++i)
5494  {
5495  if(m_Blocks[i - 1]->m_SumFreeSize > m_Blocks[i]->m_SumFreeSize)
5496  {
5497  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
5498  return;
5499  }
5500  }
5501 }
5502 
5503 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
5504 {
5505  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
5506  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
5507  allocInfo.allocationSize = blockSize;
5508  VkDeviceMemory mem = VK_NULL_HANDLE;
5509  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
5510  if(res < 0)
5511  {
5512  return res;
5513  }
5514 
5515  // New VkDeviceMemory successfully created.
5516 
5517  // Map memory if needed.
5518  void* pMappedData = VMA_NULL;
5519  const bool persistentMap = (m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED);
5520  if(persistentMap && m_hAllocator->m_UnmapPersistentlyMappedMemoryCounter == 0)
5521  {
5522  res = (*m_hAllocator->GetVulkanFunctions().vkMapMemory)(
5523  m_hAllocator->m_hDevice,
5524  mem,
5525  0,
5526  VK_WHOLE_SIZE,
5527  0,
5528  &pMappedData);
5529  if(res < 0)
5530  {
5531  VMA_DEBUG_LOG(" vkMapMemory FAILED");
5532  m_hAllocator->FreeVulkanMemory(m_MemoryTypeIndex, blockSize, mem);
5533  return res;
5534  }
5535  }
5536 
5537  // Create new Allocation for it.
5538  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
5539  pBlock->Init(
5540  m_MemoryTypeIndex,
5541  (VMA_BLOCK_VECTOR_TYPE)m_BlockVectorType,
5542  mem,
5543  allocInfo.allocationSize,
5544  persistentMap,
5545  pMappedData);
5546 
5547  m_Blocks.push_back(pBlock);
5548  if(pNewBlockIndex != VMA_NULL)
5549  {
5550  *pNewBlockIndex = m_Blocks.size() - 1;
5551  }
5552 
5553  return VK_SUCCESS;
5554 }
5555 
5556 #if VMA_STATS_STRING_ENABLED
5557 
5558 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
5559 {
5560  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5561 
5562  json.BeginObject();
5563 
5564  if(m_IsCustomPool)
5565  {
5566  json.WriteString("MemoryTypeIndex");
5567  json.WriteNumber(m_MemoryTypeIndex);
5568 
5569  if(m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
5570  {
5571  json.WriteString("Mapped");
5572  json.WriteBool(true);
5573  }
5574 
5575  json.WriteString("BlockSize");
5576  json.WriteNumber(m_PreferredBlockSize);
5577 
5578  json.WriteString("BlockCount");
5579  json.BeginObject(true);
5580  if(m_MinBlockCount > 0)
5581  {
5582  json.WriteString("Min");
5583  json.WriteNumber(m_MinBlockCount);
5584  }
5585  if(m_MaxBlockCount < SIZE_MAX)
5586  {
5587  json.WriteString("Max");
5588  json.WriteNumber(m_MaxBlockCount);
5589  }
5590  json.WriteString("Cur");
5591  json.WriteNumber(m_Blocks.size());
5592  json.EndObject();
5593 
5594  if(m_FrameInUseCount > 0)
5595  {
5596  json.WriteString("FrameInUseCount");
5597  json.WriteNumber(m_FrameInUseCount);
5598  }
5599  }
5600  else
5601  {
5602  json.WriteString("PreferredBlockSize");
5603  json.WriteNumber(m_PreferredBlockSize);
5604  }
5605 
5606  json.WriteString("Blocks");
5607  json.BeginArray();
5608  for(size_t i = 0; i < m_Blocks.size(); ++i)
5609  {
5610  m_Blocks[i]->PrintDetailedMap(json);
5611  }
5612  json.EndArray();
5613 
5614  json.EndObject();
5615 }
5616 
5617 #endif // #if VMA_STATS_STRING_ENABLED
5618 
5619 void VmaBlockVector::UnmapPersistentlyMappedMemory()
5620 {
5621  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5622 
5623  for(size_t i = m_Blocks.size(); i--; )
5624  {
5625  VmaDeviceMemoryBlock* pBlock = m_Blocks[i];
5626  if(pBlock->m_pMappedData != VMA_NULL)
5627  {
5628  VMA_ASSERT(pBlock->m_PersistentMap != false);
5629  (m_hAllocator->GetVulkanFunctions().vkUnmapMemory)(m_hAllocator->m_hDevice, pBlock->m_hMemory);
5630  pBlock->m_pMappedData = VMA_NULL;
5631  }
5632  }
5633 }
5634 
5635 VkResult VmaBlockVector::MapPersistentlyMappedMemory()
5636 {
5637  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5638 
5639  VkResult finalResult = VK_SUCCESS;
5640  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
5641  {
5642  VmaDeviceMemoryBlock* pBlock = m_Blocks[i];
5643  if(pBlock->m_PersistentMap)
5644  {
5645  VMA_ASSERT(pBlock->m_pMappedData == nullptr);
5646  VkResult localResult = (*m_hAllocator->GetVulkanFunctions().vkMapMemory)(
5647  m_hAllocator->m_hDevice,
5648  pBlock->m_hMemory,
5649  0,
5650  VK_WHOLE_SIZE,
5651  0,
5652  &pBlock->m_pMappedData);
5653  if(localResult != VK_SUCCESS)
5654  {
5655  finalResult = localResult;
5656  }
5657  }
5658  }
5659  return finalResult;
5660 }
5661 
5662 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
5663  VmaAllocator hAllocator,
5664  uint32_t currentFrameIndex)
5665 {
5666  if(m_pDefragmentator == VMA_NULL)
5667  {
5668  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
5669  hAllocator,
5670  this,
5671  currentFrameIndex);
5672  }
5673 
5674  return m_pDefragmentator;
5675 }
5676 
5677 VkResult VmaBlockVector::Defragment(
5678  VmaDefragmentationStats* pDefragmentationStats,
5679  VkDeviceSize& maxBytesToMove,
5680  uint32_t& maxAllocationsToMove)
5681 {
5682  if(m_pDefragmentator == VMA_NULL)
5683  {
5684  return VK_SUCCESS;
5685  }
5686 
5687  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5688 
5689  // Defragment.
5690  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
5691 
5692  // Accumulate statistics.
5693  if(pDefragmentationStats != VMA_NULL)
5694  {
5695  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
5696  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
5697  pDefragmentationStats->bytesMoved += bytesMoved;
5698  pDefragmentationStats->allocationsMoved += allocationsMoved;
5699  VMA_ASSERT(bytesMoved <= maxBytesToMove);
5700  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
5701  maxBytesToMove -= bytesMoved;
5702  maxAllocationsToMove -= allocationsMoved;
5703  }
5704 
5705  // Free empty blocks.
5706  m_HasEmptyBlock = false;
5707  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
5708  {
5709  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
5710  if(pBlock->IsEmpty())
5711  {
5712  if(m_Blocks.size() > m_MinBlockCount)
5713  {
5714  if(pDefragmentationStats != VMA_NULL)
5715  {
5716  ++pDefragmentationStats->deviceMemoryBlocksFreed;
5717  pDefragmentationStats->bytesFreed += pBlock->m_Size;
5718  }
5719 
5720  VmaVectorRemove(m_Blocks, blockIndex);
5721  pBlock->Destroy(m_hAllocator);
5722  vma_delete(m_hAllocator, pBlock);
5723  }
5724  else
5725  {
5726  m_HasEmptyBlock = true;
5727  }
5728  }
5729  }
5730 
5731  return result;
5732 }
5733 
5734 void VmaBlockVector::DestroyDefragmentator()
5735 {
5736  if(m_pDefragmentator != VMA_NULL)
5737  {
5738  vma_delete(m_hAllocator, m_pDefragmentator);
5739  m_pDefragmentator = VMA_NULL;
5740  }
5741 }
5742 
5743 void VmaBlockVector::MakePoolAllocationsLost(
5744  uint32_t currentFrameIndex,
5745  size_t* pLostAllocationCount)
5746 {
5747  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5748 
5749  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5750  {
5751  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
5752  VMA_ASSERT(pBlock);
5753  pBlock->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
5754  }
5755 }
5756 
5757 void VmaBlockVector::AddStats(VmaStats* pStats)
5758 {
5759  const uint32_t memTypeIndex = m_MemoryTypeIndex;
5760  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
5761 
5762  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5763 
5764  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5765  {
5766  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
5767  VMA_ASSERT(pBlock);
5768  VMA_HEAVY_ASSERT(pBlock->Validate());
5769  VmaStatInfo allocationStatInfo;
5770  CalcAllocationStatInfo(allocationStatInfo, *pBlock);
5771  VmaAddStatInfo(pStats->total, allocationStatInfo);
5772  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
5773  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
5774  }
5775 }
5776 
5778 // VmaDefragmentator members definition
5779 
5780 VmaDefragmentator::VmaDefragmentator(
5781  VmaAllocator hAllocator,
5782  VmaBlockVector* pBlockVector,
5783  uint32_t currentFrameIndex) :
5784  m_hAllocator(hAllocator),
5785  m_pBlockVector(pBlockVector),
5786  m_CurrentFrameIndex(currentFrameIndex),
5787  m_BytesMoved(0),
5788  m_AllocationsMoved(0),
5789  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
5790  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
5791 {
5792 }
5793 
5794 VmaDefragmentator::~VmaDefragmentator()
5795 {
5796  for(size_t i = m_Blocks.size(); i--; )
5797  {
5798  vma_delete(m_hAllocator, m_Blocks[i]);
5799  }
5800 }
5801 
5802 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
5803 {
5804  AllocationInfo allocInfo;
5805  allocInfo.m_hAllocation = hAlloc;
5806  allocInfo.m_pChanged = pChanged;
5807  m_Allocations.push_back(allocInfo);
5808 }
5809 
5810 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
5811 {
5812  // It has already been mapped for defragmentation.
5813  if(m_pMappedDataForDefragmentation)
5814  {
5815  *ppMappedData = m_pMappedDataForDefragmentation;
5816  return VK_SUCCESS;
5817  }
5818 
5819  // It is persistently mapped.
5820  if(m_pBlock->m_PersistentMap)
5821  {
5822  VMA_ASSERT(m_pBlock->m_pMappedData != VMA_NULL);
5823  *ppMappedData = m_pBlock->m_pMappedData;
5824  return VK_SUCCESS;
5825  }
5826 
5827  // Map on first usage.
5828  VkResult res = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
5829  hAllocator->m_hDevice,
5830  m_pBlock->m_hMemory,
5831  0,
5832  VK_WHOLE_SIZE,
5833  0,
5834  &m_pMappedDataForDefragmentation);
5835  *ppMappedData = m_pMappedDataForDefragmentation;
5836  return res;
5837 }
5838 
5839 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
5840 {
5841  if(m_pMappedDataForDefragmentation != VMA_NULL)
5842  {
5843  (hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_pBlock->m_hMemory);
5844  }
5845 }
5846 
5847 VkResult VmaDefragmentator::DefragmentRound(
5848  VkDeviceSize maxBytesToMove,
5849  uint32_t maxAllocationsToMove)
5850 {
5851  if(m_Blocks.empty())
5852  {
5853  return VK_SUCCESS;
5854  }
5855 
5856  size_t srcBlockIndex = m_Blocks.size() - 1;
5857  size_t srcAllocIndex = SIZE_MAX;
5858  for(;;)
5859  {
5860  // 1. Find next allocation to move.
5861  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
5862  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
5863  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
5864  {
5865  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
5866  {
5867  // Finished: no more allocations to process.
5868  if(srcBlockIndex == 0)
5869  {
5870  return VK_SUCCESS;
5871  }
5872  else
5873  {
5874  --srcBlockIndex;
5875  srcAllocIndex = SIZE_MAX;
5876  }
5877  }
5878  else
5879  {
5880  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
5881  }
5882  }
5883 
5884  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
5885  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
5886 
5887  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
5888  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
5889  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
5890  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
5891 
5892  // 2. Try to find new place for this allocation in preceding or current block.
5893  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
5894  {
5895  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
5896  VmaAllocationRequest dstAllocRequest;
5897  if(pDstBlockInfo->m_pBlock->CreateAllocationRequest(
5898  m_CurrentFrameIndex,
5899  m_pBlockVector->GetFrameInUseCount(),
5900  m_pBlockVector->GetBufferImageGranularity(),
5901  size,
5902  alignment,
5903  suballocType,
5904  false, // canMakeOtherLost
5905  &dstAllocRequest) &&
5906  MoveMakesSense(
5907  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
5908  {
5909  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
5910 
5911  // Reached limit on number of allocations or bytes to move.
5912  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
5913  (m_BytesMoved + size > maxBytesToMove))
5914  {
5915  return VK_INCOMPLETE;
5916  }
5917 
5918  void* pDstMappedData = VMA_NULL;
5919  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
5920  if(res != VK_SUCCESS)
5921  {
5922  return res;
5923  }
5924 
5925  void* pSrcMappedData = VMA_NULL;
5926  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
5927  if(res != VK_SUCCESS)
5928  {
5929  return res;
5930  }
5931 
5932  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
5933  memcpy(
5934  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
5935  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
5936  static_cast<size_t>(size));
5937 
5938  pDstBlockInfo->m_pBlock->Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
5939  pSrcBlockInfo->m_pBlock->Free(allocInfo.m_hAllocation);
5940 
5941  allocInfo.m_hAllocation->ChangeBlockAllocation(pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
5942 
5943  if(allocInfo.m_pChanged != VMA_NULL)
5944  {
5945  *allocInfo.m_pChanged = VK_TRUE;
5946  }
5947 
5948  ++m_AllocationsMoved;
5949  m_BytesMoved += size;
5950 
5951  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
5952 
5953  break;
5954  }
5955  }
5956 
5957  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
5958 
5959  if(srcAllocIndex > 0)
5960  {
5961  --srcAllocIndex;
5962  }
5963  else
5964  {
5965  if(srcBlockIndex > 0)
5966  {
5967  --srcBlockIndex;
5968  srcAllocIndex = SIZE_MAX;
5969  }
5970  else
5971  {
5972  return VK_SUCCESS;
5973  }
5974  }
5975  }
5976 }
5977 
5978 VkResult VmaDefragmentator::Defragment(
5979  VkDeviceSize maxBytesToMove,
5980  uint32_t maxAllocationsToMove)
5981 {
5982  if(m_Allocations.empty())
5983  {
5984  return VK_SUCCESS;
5985  }
5986 
5987  // Create block info for each block.
5988  const size_t blockCount = m_pBlockVector->m_Blocks.size();
5989  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
5990  {
5991  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
5992  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
5993  m_Blocks.push_back(pBlockInfo);
5994  }
5995 
5996  // Sort them by m_pBlock pointer value.
5997  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
5998 
5999  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
6000  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
6001  {
6002  AllocationInfo& allocInfo = m_Allocations[blockIndex];
6003  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
6004  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
6005  {
6006  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
6007  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
6008  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
6009  {
6010  (*it)->m_Allocations.push_back(allocInfo);
6011  }
6012  else
6013  {
6014  VMA_ASSERT(0);
6015  }
6016  }
6017  }
6018  m_Allocations.clear();
6019 
6020  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6021  {
6022  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
6023  pBlockInfo->CalcHasNonMovableAllocations();
6024  pBlockInfo->SortAllocationsBySizeDescecnding();
6025  }
6026 
6027  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
6028  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
6029 
6030  // Execute defragmentation rounds (the main part).
6031  VkResult result = VK_SUCCESS;
6032  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
6033  {
6034  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
6035  }
6036 
6037  // Unmap blocks that were mapped for defragmentation.
6038  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6039  {
6040  m_Blocks[blockIndex]->Unmap(m_hAllocator);
6041  }
6042 
6043  return result;
6044 }
6045 
6046 bool VmaDefragmentator::MoveMakesSense(
6047  size_t dstBlockIndex, VkDeviceSize dstOffset,
6048  size_t srcBlockIndex, VkDeviceSize srcOffset)
6049 {
6050  if(dstBlockIndex < srcBlockIndex)
6051  {
6052  return true;
6053  }
6054  if(dstBlockIndex > srcBlockIndex)
6055  {
6056  return false;
6057  }
6058  if(dstOffset < srcOffset)
6059  {
6060  return true;
6061  }
6062  return false;
6063 }
6064 
6066 // VmaAllocator_T
6067 
6068 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
6069  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
6070  m_PhysicalDevice(pCreateInfo->physicalDevice),
6071  m_hDevice(pCreateInfo->device),
6072  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
6073  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
6074  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
6075  m_UnmapPersistentlyMappedMemoryCounter(0),
6076  m_PreferredLargeHeapBlockSize(0),
6077  m_PreferredSmallHeapBlockSize(0),
6078  m_CurrentFrameIndex(0),
6079  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
6080 {
6081  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
6082 
6083  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
6084  memset(&m_MemProps, 0, sizeof(m_MemProps));
6085  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
6086 
6087  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
6088  memset(&m_pOwnAllocations, 0, sizeof(m_pOwnAllocations));
6089 
6090  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
6091  {
6092  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
6093  }
6094 
6095  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
6096  {
6097  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
6098  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
6099  }
6100 
6101  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
6102 
6103  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
6104  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
6105 
6106  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
6107  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
6108  m_PreferredSmallHeapBlockSize = (pCreateInfo->preferredSmallHeapBlockSize != 0) ?
6109  pCreateInfo->preferredSmallHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE);
6110 
6111  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
6112  {
6113  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
6114  {
6115  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
6116  if(limit != VK_WHOLE_SIZE)
6117  {
6118  m_HeapSizeLimit[heapIndex] = limit;
6119  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
6120  {
6121  m_MemProps.memoryHeaps[heapIndex].size = limit;
6122  }
6123  }
6124  }
6125  }
6126 
6127  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6128  {
6129  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
6130 
6131  for(size_t blockVectorTypeIndex = 0; blockVectorTypeIndex < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorTypeIndex)
6132  {
6133  m_pBlockVectors[memTypeIndex][blockVectorTypeIndex] = vma_new(this, VmaBlockVector)(
6134  this,
6135  memTypeIndex,
6136  static_cast<VMA_BLOCK_VECTOR_TYPE>(blockVectorTypeIndex),
6137  preferredBlockSize,
6138  0,
6139  SIZE_MAX,
6140  GetBufferImageGranularity(),
6141  pCreateInfo->frameInUseCount,
6142  false); // isCustomPool
6143  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
6144  // becase minBlockCount is 0.
6145  m_pOwnAllocations[memTypeIndex][blockVectorTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
6146  }
6147  }
6148 }
6149 
6150 VmaAllocator_T::~VmaAllocator_T()
6151 {
6152  VMA_ASSERT(m_Pools.empty());
6153 
6154  for(size_t i = GetMemoryTypeCount(); i--; )
6155  {
6156  for(size_t j = VMA_BLOCK_VECTOR_TYPE_COUNT; j--; )
6157  {
6158  vma_delete(this, m_pOwnAllocations[i][j]);
6159  vma_delete(this, m_pBlockVectors[i][j]);
6160  }
6161  }
6162 }
6163 
6164 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
6165 {
6166 #if VMA_STATIC_VULKAN_FUNCTIONS
6167  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
6168  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
6169  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
6170  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
6171  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
6172  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
6173  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
6174  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
6175  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
6176  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
6177  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
6178  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
6179  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
6180  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
6181 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS
6182 
6183  if(pVulkanFunctions != VMA_NULL)
6184  {
6185  m_VulkanFunctions = *pVulkanFunctions;
6186  }
6187 
6188  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
6189  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
6190  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
6191  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
6192  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
6193  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
6194  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
6195  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
6196  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
6197  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
6198  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
6199  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
6200  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
6201  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
6202  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
6203  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
6204 }
6205 
6206 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
6207 {
6208  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
6209  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
6210  return (heapSize <= VMA_SMALL_HEAP_MAX_SIZE) ?
6211  m_PreferredSmallHeapBlockSize : m_PreferredLargeHeapBlockSize;
6212 }
6213 
6214 VkResult VmaAllocator_T::AllocateMemoryOfType(
6215  const VkMemoryRequirements& vkMemReq,
6216  const VmaAllocationCreateInfo& createInfo,
6217  uint32_t memTypeIndex,
6218  VmaSuballocationType suballocType,
6219  VmaAllocation* pAllocation)
6220 {
6221  VMA_ASSERT(pAllocation != VMA_NULL);
6222  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
6223 
6224  uint32_t blockVectorType = VmaAllocationCreateFlagsToBlockVectorType(createInfo.flags);
6225  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
6226  VMA_ASSERT(blockVector);
6227 
6228  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
6229  // Heuristics: Allocate own memory if requested size if greater than half of preferred block size.
6230  const bool ownMemory =
6231  (createInfo.flags & VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT) != 0 ||
6232  VMA_DEBUG_ALWAYS_OWN_MEMORY ||
6233  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
6234  vkMemReq.size > preferredBlockSize / 2);
6235 
6236  if(ownMemory)
6237  {
6238  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6239  {
6240  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6241  }
6242  else
6243  {
6244  return AllocateOwnMemory(
6245  vkMemReq.size,
6246  suballocType,
6247  memTypeIndex,
6248  (createInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0,
6249  createInfo.pUserData,
6250  pAllocation);
6251  }
6252  }
6253  else
6254  {
6255  VkResult res = blockVector->Allocate(
6256  VK_NULL_HANDLE, // hCurrentPool
6257  m_CurrentFrameIndex.load(),
6258  vkMemReq,
6259  createInfo,
6260  suballocType,
6261  pAllocation);
6262  if(res == VK_SUCCESS)
6263  {
6264  return res;
6265  }
6266 
6267  // 5. Try own memory.
6268  res = AllocateOwnMemory(
6269  vkMemReq.size,
6270  suballocType,
6271  memTypeIndex,
6272  (createInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0,
6273  createInfo.pUserData,
6274  pAllocation);
6275  if(res == VK_SUCCESS)
6276  {
6277  // Succeeded: AllocateOwnMemory function already filld pMemory, nothing more to do here.
6278  VMA_DEBUG_LOG(" Allocated as OwnMemory");
6279  return VK_SUCCESS;
6280  }
6281  else
6282  {
6283  // Everything failed: Return error code.
6284  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
6285  return res;
6286  }
6287  }
6288 }
6289 
6290 VkResult VmaAllocator_T::AllocateOwnMemory(
6291  VkDeviceSize size,
6292  VmaSuballocationType suballocType,
6293  uint32_t memTypeIndex,
6294  bool map,
6295  void* pUserData,
6296  VmaAllocation* pAllocation)
6297 {
6298  VMA_ASSERT(pAllocation);
6299 
6300  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
6301  allocInfo.memoryTypeIndex = memTypeIndex;
6302  allocInfo.allocationSize = size;
6303 
6304  // Allocate VkDeviceMemory.
6305  VkDeviceMemory hMemory = VK_NULL_HANDLE;
6306  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
6307  if(res < 0)
6308  {
6309  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
6310  return res;
6311  }
6312 
6313  void* pMappedData = nullptr;
6314  if(map)
6315  {
6316  if(m_UnmapPersistentlyMappedMemoryCounter == 0)
6317  {
6318  res = vkMapMemory(m_hDevice, hMemory, 0, VK_WHOLE_SIZE, 0, &pMappedData);
6319  if(res < 0)
6320  {
6321  VMA_DEBUG_LOG(" vkMapMemory FAILED");
6322  FreeVulkanMemory(memTypeIndex, size, hMemory);
6323  return res;
6324  }
6325  }
6326  }
6327 
6328  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load());
6329  (*pAllocation)->InitOwnAllocation(memTypeIndex, hMemory, suballocType, map, pMappedData, size, pUserData);
6330 
6331  // Register it in m_pOwnAllocations.
6332  {
6333  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6334  AllocationVectorType* pOwnAllocations = m_pOwnAllocations[memTypeIndex][map ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED];
6335  VMA_ASSERT(pOwnAllocations);
6336  VmaVectorInsertSorted<VmaPointerLess>(*pOwnAllocations, *pAllocation);
6337  }
6338 
6339  VMA_DEBUG_LOG(" Allocated OwnMemory MemoryTypeIndex=#%u", memTypeIndex);
6340 
6341  return VK_SUCCESS;
6342 }
6343 
6344 VkResult VmaAllocator_T::AllocateMemory(
6345  const VkMemoryRequirements& vkMemReq,
6346  const VmaAllocationCreateInfo& createInfo,
6347  VmaSuballocationType suballocType,
6348  VmaAllocation* pAllocation)
6349 {
6350  if((createInfo.flags & VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT) != 0 &&
6351  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6352  {
6353  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
6354  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6355  }
6356  if((createInfo.pool != VK_NULL_HANDLE) &&
6357  ((createInfo.flags & (VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT)) != 0))
6358  {
6359  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT when pool != null is invalid.");
6360  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6361  }
6362 
6363  if(createInfo.pool != VK_NULL_HANDLE)
6364  {
6365  return createInfo.pool->m_BlockVector.Allocate(
6366  createInfo.pool,
6367  m_CurrentFrameIndex.load(),
6368  vkMemReq,
6369  createInfo,
6370  suballocType,
6371  pAllocation);
6372  }
6373  else
6374  {
6375  // Bit mask of memory Vulkan types acceptable for this allocation.
6376  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
6377  uint32_t memTypeIndex = UINT32_MAX;
6378  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
6379  if(res == VK_SUCCESS)
6380  {
6381  res = AllocateMemoryOfType(vkMemReq, createInfo, memTypeIndex, suballocType, pAllocation);
6382  // Succeeded on first try.
6383  if(res == VK_SUCCESS)
6384  {
6385  return res;
6386  }
6387  // Allocation from this memory type failed. Try other compatible memory types.
6388  else
6389  {
6390  for(;;)
6391  {
6392  // Remove old memTypeIndex from list of possibilities.
6393  memoryTypeBits &= ~(1u << memTypeIndex);
6394  // Find alternative memTypeIndex.
6395  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
6396  if(res == VK_SUCCESS)
6397  {
6398  res = AllocateMemoryOfType(vkMemReq, createInfo, memTypeIndex, suballocType, pAllocation);
6399  // Allocation from this alternative memory type succeeded.
6400  if(res == VK_SUCCESS)
6401  {
6402  return res;
6403  }
6404  // else: Allocation from this memory type failed. Try next one - next loop iteration.
6405  }
6406  // No other matching memory type index could be found.
6407  else
6408  {
6409  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
6410  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6411  }
6412  }
6413  }
6414  }
6415  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
6416  else
6417  return res;
6418  }
6419 }
6420 
6421 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
6422 {
6423  VMA_ASSERT(allocation);
6424 
6425  if(allocation->CanBecomeLost() == false ||
6426  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
6427  {
6428  switch(allocation->GetType())
6429  {
6430  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
6431  {
6432  VmaBlockVector* pBlockVector = VMA_NULL;
6433  VmaPool hPool = allocation->GetPool();
6434  if(hPool != VK_NULL_HANDLE)
6435  {
6436  pBlockVector = &hPool->m_BlockVector;
6437  }
6438  else
6439  {
6440  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
6441  const VMA_BLOCK_VECTOR_TYPE blockVectorType = allocation->GetBlockVectorType();
6442  pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
6443  }
6444  pBlockVector->Free(allocation);
6445  }
6446  break;
6447  case VmaAllocation_T::ALLOCATION_TYPE_OWN:
6448  FreeOwnMemory(allocation);
6449  break;
6450  default:
6451  VMA_ASSERT(0);
6452  }
6453  }
6454 
6455  vma_delete(this, allocation);
6456 }
6457 
6458 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
6459 {
6460  // Initialize.
6461  InitStatInfo(pStats->total);
6462  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
6463  InitStatInfo(pStats->memoryType[i]);
6464  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
6465  InitStatInfo(pStats->memoryHeap[i]);
6466 
6467  // Process default pools.
6468  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6469  {
6470  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
6471  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
6472  {
6473  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
6474  VMA_ASSERT(pBlockVector);
6475  pBlockVector->AddStats(pStats);
6476  }
6477  }
6478 
6479  // Process custom pools.
6480  {
6481  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6482  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
6483  {
6484  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
6485  }
6486  }
6487 
6488  // Process own allocations.
6489  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6490  {
6491  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
6492  VmaMutexLock ownAllocationsLock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6493  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
6494  {
6495  AllocationVectorType* const pOwnAllocVector = m_pOwnAllocations[memTypeIndex][blockVectorType];
6496  VMA_ASSERT(pOwnAllocVector);
6497  for(size_t allocIndex = 0, allocCount = pOwnAllocVector->size(); allocIndex < allocCount; ++allocIndex)
6498  {
6499  VmaStatInfo allocationStatInfo;
6500  (*pOwnAllocVector)[allocIndex]->OwnAllocCalcStatsInfo(allocationStatInfo);
6501  VmaAddStatInfo(pStats->total, allocationStatInfo);
6502  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
6503  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
6504  }
6505  }
6506  }
6507 
6508  // Postprocess.
6509  VmaPostprocessCalcStatInfo(pStats->total);
6510  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
6511  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
6512  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
6513  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
6514 }
6515 
6516 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
6517 
6518 void VmaAllocator_T::UnmapPersistentlyMappedMemory()
6519 {
6520  if(m_UnmapPersistentlyMappedMemoryCounter++ == 0)
6521  {
6522  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
6523  {
6524  for(uint32_t memTypeIndex = m_MemProps.memoryTypeCount; memTypeIndex--; )
6525  {
6526  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
6527  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
6528  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
6529  {
6530  // Process OwnAllocations.
6531  {
6532  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6533  AllocationVectorType* pOwnAllocationsVector = m_pOwnAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6534  for(size_t ownAllocIndex = pOwnAllocationsVector->size(); ownAllocIndex--; )
6535  {
6536  VmaAllocation hAlloc = (*pOwnAllocationsVector)[ownAllocIndex];
6537  hAlloc->OwnAllocUnmapPersistentlyMappedMemory(this);
6538  }
6539  }
6540 
6541  // Process normal Allocations.
6542  {
6543  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6544  pBlockVector->UnmapPersistentlyMappedMemory();
6545  }
6546  }
6547  }
6548 
6549  // Process custom pools.
6550  {
6551  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6552  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
6553  {
6554  m_Pools[poolIndex]->GetBlockVector().UnmapPersistentlyMappedMemory();
6555  }
6556  }
6557  }
6558  }
6559 }
6560 
6561 VkResult VmaAllocator_T::MapPersistentlyMappedMemory()
6562 {
6563  VMA_ASSERT(m_UnmapPersistentlyMappedMemoryCounter > 0);
6564  if(--m_UnmapPersistentlyMappedMemoryCounter == 0)
6565  {
6566  VkResult finalResult = VK_SUCCESS;
6567  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
6568  {
6569  // Process custom pools.
6570  {
6571  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6572  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
6573  {
6574  m_Pools[poolIndex]->GetBlockVector().MapPersistentlyMappedMemory();
6575  }
6576  }
6577 
6578  for(uint32_t memTypeIndex = 0; memTypeIndex < m_MemProps.memoryTypeCount; ++memTypeIndex)
6579  {
6580  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
6581  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
6582  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
6583  {
6584  // Process OwnAllocations.
6585  {
6586  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6587  AllocationVectorType* pAllocationsVector = m_pOwnAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6588  for(size_t ownAllocIndex = 0, ownAllocCount = pAllocationsVector->size(); ownAllocIndex < ownAllocCount; ++ownAllocIndex)
6589  {
6590  VmaAllocation hAlloc = (*pAllocationsVector)[ownAllocIndex];
6591  hAlloc->OwnAllocMapPersistentlyMappedMemory(this);
6592  }
6593  }
6594 
6595  // Process normal Allocations.
6596  {
6597  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6598  VkResult localResult = pBlockVector->MapPersistentlyMappedMemory();
6599  if(localResult != VK_SUCCESS)
6600  {
6601  finalResult = localResult;
6602  }
6603  }
6604  }
6605  }
6606  }
6607  return finalResult;
6608  }
6609  else
6610  return VK_SUCCESS;
6611 }
6612 
6613 VkResult VmaAllocator_T::Defragment(
6614  VmaAllocation* pAllocations,
6615  size_t allocationCount,
6616  VkBool32* pAllocationsChanged,
6617  const VmaDefragmentationInfo* pDefragmentationInfo,
6618  VmaDefragmentationStats* pDefragmentationStats)
6619 {
6620  if(pAllocationsChanged != VMA_NULL)
6621  {
6622  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
6623  }
6624  if(pDefragmentationStats != VMA_NULL)
6625  {
6626  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
6627  }
6628 
6629  if(m_UnmapPersistentlyMappedMemoryCounter > 0)
6630  {
6631  VMA_DEBUG_LOG("ERROR: Cannot defragment when inside vmaUnmapPersistentlyMappedMemory.");
6632  return VK_ERROR_MEMORY_MAP_FAILED;
6633  }
6634 
6635  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
6636 
6637  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
6638 
6639  const size_t poolCount = m_Pools.size();
6640 
6641  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
6642  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
6643  {
6644  VmaAllocation hAlloc = pAllocations[allocIndex];
6645  VMA_ASSERT(hAlloc);
6646  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
6647  // OwnAlloc cannot be defragmented.
6648  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
6649  // Only HOST_VISIBLE memory types can be defragmented.
6650  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
6651  // Lost allocation cannot be defragmented.
6652  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
6653  {
6654  VmaBlockVector* pAllocBlockVector = nullptr;
6655 
6656  const VmaPool hAllocPool = hAlloc->GetPool();
6657  // This allocation belongs to custom pool.
6658  if(hAllocPool != VK_NULL_HANDLE)
6659  {
6660  pAllocBlockVector = &hAllocPool->GetBlockVector();
6661  }
6662  // This allocation belongs to general pool.
6663  else
6664  {
6665  pAllocBlockVector = m_pBlockVectors[memTypeIndex][hAlloc->GetBlockVectorType()];
6666  }
6667 
6668  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
6669 
6670  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
6671  &pAllocationsChanged[allocIndex] : VMA_NULL;
6672  pDefragmentator->AddAllocation(hAlloc, pChanged);
6673  }
6674  }
6675 
6676  VkResult result = VK_SUCCESS;
6677 
6678  // ======== Main processing.
6679 
6680  VkDeviceSize maxBytesToMove = SIZE_MAX;
6681  uint32_t maxAllocationsToMove = UINT32_MAX;
6682  if(pDefragmentationInfo != VMA_NULL)
6683  {
6684  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
6685  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
6686  }
6687 
6688  // Process standard memory.
6689  for(uint32_t memTypeIndex = 0;
6690  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
6691  ++memTypeIndex)
6692  {
6693  // Only HOST_VISIBLE memory types can be defragmented.
6694  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
6695  {
6696  for(uint32_t blockVectorType = 0;
6697  (blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT) && (result == VK_SUCCESS);
6698  ++blockVectorType)
6699  {
6700  result = m_pBlockVectors[memTypeIndex][blockVectorType]->Defragment(
6701  pDefragmentationStats,
6702  maxBytesToMove,
6703  maxAllocationsToMove);
6704  }
6705  }
6706  }
6707 
6708  // Process custom pools.
6709  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
6710  {
6711  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
6712  pDefragmentationStats,
6713  maxBytesToMove,
6714  maxAllocationsToMove);
6715  }
6716 
6717  // ======== Destroy defragmentators.
6718 
6719  // Process custom pools.
6720  for(size_t poolIndex = poolCount; poolIndex--; )
6721  {
6722  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
6723  }
6724 
6725  // Process standard memory.
6726  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
6727  {
6728  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
6729  {
6730  for(size_t blockVectorType = VMA_BLOCK_VECTOR_TYPE_COUNT; blockVectorType--; )
6731  {
6732  m_pBlockVectors[memTypeIndex][blockVectorType]->DestroyDefragmentator();
6733  }
6734  }
6735  }
6736 
6737  return result;
6738 }
6739 
6740 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
6741 {
6742  if(hAllocation->CanBecomeLost())
6743  {
6744  /*
6745  Warning: This is a carefully designed algorithm.
6746  Do not modify unless you really know what you're doing :)
6747  */
6748  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
6749  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
6750  for(;;)
6751  {
6752  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
6753  {
6754  pAllocationInfo->memoryType = UINT32_MAX;
6755  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
6756  pAllocationInfo->offset = 0;
6757  pAllocationInfo->size = hAllocation->GetSize();
6758  pAllocationInfo->pMappedData = VMA_NULL;
6759  pAllocationInfo->pUserData = hAllocation->GetUserData();
6760  return;
6761  }
6762  else if(localLastUseFrameIndex == localCurrFrameIndex)
6763  {
6764  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
6765  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
6766  pAllocationInfo->offset = hAllocation->GetOffset();
6767  pAllocationInfo->size = hAllocation->GetSize();
6768  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
6769  pAllocationInfo->pUserData = hAllocation->GetUserData();
6770  return;
6771  }
6772  else // Last use time earlier than current time.
6773  {
6774  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
6775  {
6776  localLastUseFrameIndex = localCurrFrameIndex;
6777  }
6778  }
6779  }
6780  }
6781  // We could use the same code here, but for performance reasons we don't need to use the hAllocation.LastUseFrameIndex atomic.
6782  else
6783  {
6784  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
6785  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
6786  pAllocationInfo->offset = hAllocation->GetOffset();
6787  pAllocationInfo->size = hAllocation->GetSize();
6788  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
6789  pAllocationInfo->pUserData = hAllocation->GetUserData();
6790  }
6791 }
6792 
6793 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
6794 {
6795  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
6796 
6797  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
6798 
6799  if(newCreateInfo.maxBlockCount == 0)
6800  {
6801  newCreateInfo.maxBlockCount = SIZE_MAX;
6802  }
6803  if(newCreateInfo.blockSize == 0)
6804  {
6805  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
6806  }
6807 
6808  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
6809 
6810  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
6811  if(res != VK_SUCCESS)
6812  {
6813  vma_delete(this, *pPool);
6814  *pPool = VMA_NULL;
6815  return res;
6816  }
6817 
6818  // Add to m_Pools.
6819  {
6820  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6821  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
6822  }
6823 
6824  return VK_SUCCESS;
6825 }
6826 
6827 void VmaAllocator_T::DestroyPool(VmaPool pool)
6828 {
6829  // Remove from m_Pools.
6830  {
6831  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6832  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
6833  VMA_ASSERT(success && "Pool not found in Allocator.");
6834  }
6835 
6836  vma_delete(this, pool);
6837 }
6838 
6839 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
6840 {
6841  pool->m_BlockVector.GetPoolStats(pPoolStats);
6842 }
6843 
6844 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
6845 {
6846  m_CurrentFrameIndex.store(frameIndex);
6847 }
6848 
6849 void VmaAllocator_T::MakePoolAllocationsLost(
6850  VmaPool hPool,
6851  size_t* pLostAllocationCount)
6852 {
6853  hPool->m_BlockVector.MakePoolAllocationsLost(
6854  m_CurrentFrameIndex.load(),
6855  pLostAllocationCount);
6856 }
6857 
6858 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
6859 {
6860  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST);
6861  (*pAllocation)->InitLost();
6862 }
6863 
6864 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
6865 {
6866  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
6867 
6868  VkResult res;
6869  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
6870  {
6871  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
6872  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
6873  {
6874  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
6875  if(res == VK_SUCCESS)
6876  {
6877  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
6878  }
6879  }
6880  else
6881  {
6882  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
6883  }
6884  }
6885  else
6886  {
6887  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
6888  }
6889 
6890  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
6891  {
6892  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
6893  }
6894 
6895  return res;
6896 }
6897 
6898 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
6899 {
6900  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
6901  {
6902  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
6903  }
6904 
6905  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
6906 
6907  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
6908  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
6909  {
6910  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
6911  m_HeapSizeLimit[heapIndex] += size;
6912  }
6913 }
6914 
6915 void VmaAllocator_T::FreeOwnMemory(VmaAllocation allocation)
6916 {
6917  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_OWN);
6918 
6919  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
6920  {
6921  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6922  AllocationVectorType* const pOwnAllocations = m_pOwnAllocations[memTypeIndex][allocation->GetBlockVectorType()];
6923  VMA_ASSERT(pOwnAllocations);
6924  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pOwnAllocations, allocation);
6925  VMA_ASSERT(success);
6926  }
6927 
6928  VkDeviceMemory hMemory = allocation->GetMemory();
6929 
6930  if(allocation->GetMappedData() != VMA_NULL)
6931  {
6932  vkUnmapMemory(m_hDevice, hMemory);
6933  }
6934 
6935  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
6936 
6937  VMA_DEBUG_LOG(" Freed OwnMemory MemoryTypeIndex=%u", memTypeIndex);
6938 }
6939 
6940 #if VMA_STATS_STRING_ENABLED
6941 
6942 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
6943 {
6944  bool ownAllocationsStarted = false;
6945  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6946  {
6947  VmaMutexLock ownAllocationsLock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6948  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
6949  {
6950  AllocationVectorType* const pOwnAllocVector = m_pOwnAllocations[memTypeIndex][blockVectorType];
6951  VMA_ASSERT(pOwnAllocVector);
6952  if(pOwnAllocVector->empty() == false)
6953  {
6954  if(ownAllocationsStarted == false)
6955  {
6956  ownAllocationsStarted = true;
6957  json.WriteString("OwnAllocations");
6958  json.BeginObject();
6959  }
6960 
6961  json.BeginString("Type ");
6962  json.ContinueString(memTypeIndex);
6963  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
6964  {
6965  json.ContinueString(" Mapped");
6966  }
6967  json.EndString();
6968 
6969  json.BeginArray();
6970 
6971  for(size_t i = 0; i < pOwnAllocVector->size(); ++i)
6972  {
6973  const VmaAllocation hAlloc = (*pOwnAllocVector)[i];
6974  json.BeginObject(true);
6975 
6976  json.WriteString("Size");
6977  json.WriteNumber(hAlloc->GetSize());
6978 
6979  json.WriteString("Type");
6980  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
6981 
6982  json.EndObject();
6983  }
6984 
6985  json.EndArray();
6986  }
6987  }
6988  }
6989  if(ownAllocationsStarted)
6990  {
6991  json.EndObject();
6992  }
6993 
6994  {
6995  bool allocationsStarted = false;
6996  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6997  {
6998  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
6999  {
7000  if(m_pBlockVectors[memTypeIndex][blockVectorType]->IsEmpty() == false)
7001  {
7002  if(allocationsStarted == false)
7003  {
7004  allocationsStarted = true;
7005  json.WriteString("DefaultPools");
7006  json.BeginObject();
7007  }
7008 
7009  json.BeginString("Type ");
7010  json.ContinueString(memTypeIndex);
7011  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
7012  {
7013  json.ContinueString(" Mapped");
7014  }
7015  json.EndString();
7016 
7017  m_pBlockVectors[memTypeIndex][blockVectorType]->PrintDetailedMap(json);
7018  }
7019  }
7020  }
7021  if(allocationsStarted)
7022  {
7023  json.EndObject();
7024  }
7025  }
7026 
7027  {
7028  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7029  const size_t poolCount = m_Pools.size();
7030  if(poolCount > 0)
7031  {
7032  json.WriteString("Pools");
7033  json.BeginArray();
7034  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
7035  {
7036  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
7037  }
7038  json.EndArray();
7039  }
7040  }
7041 }
7042 
7043 #endif // #if VMA_STATS_STRING_ENABLED
7044 
7045 static VkResult AllocateMemoryForImage(
7046  VmaAllocator allocator,
7047  VkImage image,
7048  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7049  VmaSuballocationType suballocType,
7050  VmaAllocation* pAllocation)
7051 {
7052  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
7053 
7054  VkMemoryRequirements vkMemReq = {};
7055  (*allocator->GetVulkanFunctions().vkGetImageMemoryRequirements)(allocator->m_hDevice, image, &vkMemReq);
7056 
7057  return allocator->AllocateMemory(
7058  vkMemReq,
7059  *pAllocationCreateInfo,
7060  suballocType,
7061  pAllocation);
7062 }
7063 
7065 // Public interface
7066 
7067 VkResult vmaCreateAllocator(
7068  const VmaAllocatorCreateInfo* pCreateInfo,
7069  VmaAllocator* pAllocator)
7070 {
7071  VMA_ASSERT(pCreateInfo && pAllocator);
7072  VMA_DEBUG_LOG("vmaCreateAllocator");
7073  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
7074  return VK_SUCCESS;
7075 }
7076 
7077 void vmaDestroyAllocator(
7078  VmaAllocator allocator)
7079 {
7080  if(allocator != VK_NULL_HANDLE)
7081  {
7082  VMA_DEBUG_LOG("vmaDestroyAllocator");
7083  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
7084  vma_delete(&allocationCallbacks, allocator);
7085  }
7086 }
7087 
7089  VmaAllocator allocator,
7090  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
7091 {
7092  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
7093  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
7094 }
7095 
7097  VmaAllocator allocator,
7098  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
7099 {
7100  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
7101  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
7102 }
7103 
7105  VmaAllocator allocator,
7106  uint32_t memoryTypeIndex,
7107  VkMemoryPropertyFlags* pFlags)
7108 {
7109  VMA_ASSERT(allocator && pFlags);
7110  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
7111  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
7112 }
7113 
7115  VmaAllocator allocator,
7116  uint32_t frameIndex)
7117 {
7118  VMA_ASSERT(allocator);
7119  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
7120 
7121  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7122 
7123  allocator->SetCurrentFrameIndex(frameIndex);
7124 }
7125 
7126 void vmaCalculateStats(
7127  VmaAllocator allocator,
7128  VmaStats* pStats)
7129 {
7130  VMA_ASSERT(allocator && pStats);
7131  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7132  allocator->CalculateStats(pStats);
7133 }
7134 
7135 #if VMA_STATS_STRING_ENABLED
7136 
7137 void vmaBuildStatsString(
7138  VmaAllocator allocator,
7139  char** ppStatsString,
7140  VkBool32 detailedMap)
7141 {
7142  VMA_ASSERT(allocator && ppStatsString);
7143  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7144 
7145  VmaStringBuilder sb(allocator);
7146  {
7147  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
7148  json.BeginObject();
7149 
7150  VmaStats stats;
7151  allocator->CalculateStats(&stats);
7152 
7153  json.WriteString("Total");
7154  VmaPrintStatInfo(json, stats.total);
7155 
7156  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
7157  {
7158  json.BeginString("Heap ");
7159  json.ContinueString(heapIndex);
7160  json.EndString();
7161  json.BeginObject();
7162 
7163  json.WriteString("Size");
7164  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
7165 
7166  json.WriteString("Flags");
7167  json.BeginArray(true);
7168  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
7169  {
7170  json.WriteString("DEVICE_LOCAL");
7171  }
7172  json.EndArray();
7173 
7174  if(stats.memoryHeap[heapIndex].BlockCount > 0)
7175  {
7176  json.WriteString("Stats");
7177  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
7178  }
7179 
7180  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
7181  {
7182  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
7183  {
7184  json.BeginString("Type ");
7185  json.ContinueString(typeIndex);
7186  json.EndString();
7187 
7188  json.BeginObject();
7189 
7190  json.WriteString("Flags");
7191  json.BeginArray(true);
7192  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
7193  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
7194  {
7195  json.WriteString("DEVICE_LOCAL");
7196  }
7197  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7198  {
7199  json.WriteString("HOST_VISIBLE");
7200  }
7201  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
7202  {
7203  json.WriteString("HOST_COHERENT");
7204  }
7205  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
7206  {
7207  json.WriteString("HOST_CACHED");
7208  }
7209  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
7210  {
7211  json.WriteString("LAZILY_ALLOCATED");
7212  }
7213  json.EndArray();
7214 
7215  if(stats.memoryType[typeIndex].BlockCount > 0)
7216  {
7217  json.WriteString("Stats");
7218  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
7219  }
7220 
7221  json.EndObject();
7222  }
7223  }
7224 
7225  json.EndObject();
7226  }
7227  if(detailedMap == VK_TRUE)
7228  {
7229  allocator->PrintDetailedMap(json);
7230  }
7231 
7232  json.EndObject();
7233  }
7234 
7235  const size_t len = sb.GetLength();
7236  char* const pChars = vma_new_array(allocator, char, len + 1);
7237  if(len > 0)
7238  {
7239  memcpy(pChars, sb.GetData(), len);
7240  }
7241  pChars[len] = '\0';
7242  *ppStatsString = pChars;
7243 }
7244 
7245 void vmaFreeStatsString(
7246  VmaAllocator allocator,
7247  char* pStatsString)
7248 {
7249  if(pStatsString != VMA_NULL)
7250  {
7251  VMA_ASSERT(allocator);
7252  size_t len = strlen(pStatsString);
7253  vma_delete_array(allocator, pStatsString, len + 1);
7254  }
7255 }
7256 
7257 #endif // #if VMA_STATS_STRING_ENABLED
7258 
7261 VkResult vmaFindMemoryTypeIndex(
7262  VmaAllocator allocator,
7263  uint32_t memoryTypeBits,
7264  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7265  uint32_t* pMemoryTypeIndex)
7266 {
7267  VMA_ASSERT(allocator != VK_NULL_HANDLE);
7268  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
7269  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
7270 
7271  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
7272  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
7273  if(preferredFlags == 0)
7274  {
7275  preferredFlags = requiredFlags;
7276  }
7277  // preferredFlags, if not 0, must be a superset of requiredFlags.
7278  VMA_ASSERT((requiredFlags & ~preferredFlags) == 0);
7279 
7280  // Convert usage to requiredFlags and preferredFlags.
7281  switch(pAllocationCreateInfo->usage)
7282  {
7284  break;
7286  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
7287  break;
7289  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
7290  break;
7292  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7293  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
7294  break;
7296  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7297  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
7298  break;
7299  default:
7300  break;
7301  }
7302 
7303  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0)
7304  {
7305  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7306  }
7307 
7308  *pMemoryTypeIndex = UINT32_MAX;
7309  uint32_t minCost = UINT32_MAX;
7310  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
7311  memTypeIndex < allocator->GetMemoryTypeCount();
7312  ++memTypeIndex, memTypeBit <<= 1)
7313  {
7314  // This memory type is acceptable according to memoryTypeBits bitmask.
7315  if((memTypeBit & memoryTypeBits) != 0)
7316  {
7317  const VkMemoryPropertyFlags currFlags =
7318  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
7319  // This memory type contains requiredFlags.
7320  if((requiredFlags & ~currFlags) == 0)
7321  {
7322  // Calculate cost as number of bits from preferredFlags not present in this memory type.
7323  uint32_t currCost = CountBitsSet(preferredFlags & ~currFlags);
7324  // Remember memory type with lowest cost.
7325  if(currCost < minCost)
7326  {
7327  *pMemoryTypeIndex = memTypeIndex;
7328  if(currCost == 0)
7329  {
7330  return VK_SUCCESS;
7331  }
7332  minCost = currCost;
7333  }
7334  }
7335  }
7336  }
7337  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
7338 }
7339 
7340 VkResult vmaCreatePool(
7341  VmaAllocator allocator,
7342  const VmaPoolCreateInfo* pCreateInfo,
7343  VmaPool* pPool)
7344 {
7345  VMA_ASSERT(allocator && pCreateInfo && pPool);
7346 
7347  VMA_DEBUG_LOG("vmaCreatePool");
7348 
7349  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7350 
7351  return allocator->CreatePool(pCreateInfo, pPool);
7352 }
7353 
7354 void vmaDestroyPool(
7355  VmaAllocator allocator,
7356  VmaPool pool)
7357 {
7358  VMA_ASSERT(allocator && pool);
7359 
7360  VMA_DEBUG_LOG("vmaDestroyPool");
7361 
7362  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7363 
7364  allocator->DestroyPool(pool);
7365 }
7366 
7367 void vmaGetPoolStats(
7368  VmaAllocator allocator,
7369  VmaPool pool,
7370  VmaPoolStats* pPoolStats)
7371 {
7372  VMA_ASSERT(allocator && pool && pPoolStats);
7373 
7374  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7375 
7376  allocator->GetPoolStats(pool, pPoolStats);
7377 }
7378 
7380  VmaAllocator allocator,
7381  VmaPool pool,
7382  size_t* pLostAllocationCount)
7383 {
7384  VMA_ASSERT(allocator && pool);
7385 
7386  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7387 
7388  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
7389 }
7390 
7391 VkResult vmaAllocateMemory(
7392  VmaAllocator allocator,
7393  const VkMemoryRequirements* pVkMemoryRequirements,
7394  const VmaAllocationCreateInfo* pCreateInfo,
7395  VmaAllocation* pAllocation,
7396  VmaAllocationInfo* pAllocationInfo)
7397 {
7398  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
7399 
7400  VMA_DEBUG_LOG("vmaAllocateMemory");
7401 
7402  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7403 
7404  VkResult result = allocator->AllocateMemory(
7405  *pVkMemoryRequirements,
7406  *pCreateInfo,
7407  VMA_SUBALLOCATION_TYPE_UNKNOWN,
7408  pAllocation);
7409 
7410  if(pAllocationInfo && result == VK_SUCCESS)
7411  {
7412  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7413  }
7414 
7415  return result;
7416 }
7417 
7419  VmaAllocator allocator,
7420  VkBuffer buffer,
7421  const VmaAllocationCreateInfo* pCreateInfo,
7422  VmaAllocation* pAllocation,
7423  VmaAllocationInfo* pAllocationInfo)
7424 {
7425  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
7426 
7427  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
7428 
7429  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7430 
7431  VkMemoryRequirements vkMemReq = {};
7432  (*allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements)(allocator->m_hDevice, buffer, &vkMemReq);
7433 
7434  VkResult result = allocator->AllocateMemory(
7435  vkMemReq,
7436  *pCreateInfo,
7437  VMA_SUBALLOCATION_TYPE_BUFFER,
7438  pAllocation);
7439 
7440  if(pAllocationInfo && result == VK_SUCCESS)
7441  {
7442  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7443  }
7444 
7445  return result;
7446 }
7447 
7448 VkResult vmaAllocateMemoryForImage(
7449  VmaAllocator allocator,
7450  VkImage image,
7451  const VmaAllocationCreateInfo* pCreateInfo,
7452  VmaAllocation* pAllocation,
7453  VmaAllocationInfo* pAllocationInfo)
7454 {
7455  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
7456 
7457  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
7458 
7459  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7460 
7461  VkResult result = AllocateMemoryForImage(
7462  allocator,
7463  image,
7464  pCreateInfo,
7465  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
7466  pAllocation);
7467 
7468  if(pAllocationInfo && result == VK_SUCCESS)
7469  {
7470  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7471  }
7472 
7473  return result;
7474 }
7475 
7476 void vmaFreeMemory(
7477  VmaAllocator allocator,
7478  VmaAllocation allocation)
7479 {
7480  VMA_ASSERT(allocator && allocation);
7481 
7482  VMA_DEBUG_LOG("vmaFreeMemory");
7483 
7484  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7485 
7486  allocator->FreeMemory(allocation);
7487 }
7488 
7490  VmaAllocator allocator,
7491  VmaAllocation allocation,
7492  VmaAllocationInfo* pAllocationInfo)
7493 {
7494  VMA_ASSERT(allocator && allocation && pAllocationInfo);
7495 
7496  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7497 
7498  allocator->GetAllocationInfo(allocation, pAllocationInfo);
7499 }
7500 
7502  VmaAllocator allocator,
7503  VmaAllocation allocation,
7504  void* pUserData)
7505 {
7506  VMA_ASSERT(allocator && allocation);
7507 
7508  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7509 
7510  allocation->SetUserData(pUserData);
7511 }
7512 
7514  VmaAllocator allocator,
7515  VmaAllocation* pAllocation)
7516 {
7517  VMA_ASSERT(allocator && pAllocation);
7518 
7519  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
7520 
7521  allocator->CreateLostAllocation(pAllocation);
7522 }
7523 
7524 VkResult vmaMapMemory(
7525  VmaAllocator allocator,
7526  VmaAllocation allocation,
7527  void** ppData)
7528 {
7529  VMA_ASSERT(allocator && allocation && ppData);
7530 
7531  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7532 
7533  return vkMapMemory(allocator->m_hDevice, allocation->GetMemory(),
7534  allocation->GetOffset(), allocation->GetSize(), 0, ppData);
7535 }
7536 
7537 void vmaUnmapMemory(
7538  VmaAllocator allocator,
7539  VmaAllocation allocation)
7540 {
7541  VMA_ASSERT(allocator && allocation);
7542 
7543  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7544 
7545  vkUnmapMemory(allocator->m_hDevice, allocation->GetMemory());
7546 }
7547 
7548 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator)
7549 {
7550  VMA_ASSERT(allocator);
7551 
7552  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7553 
7554  allocator->UnmapPersistentlyMappedMemory();
7555 }
7556 
7557 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator)
7558 {
7559  VMA_ASSERT(allocator);
7560 
7561  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7562 
7563  return allocator->MapPersistentlyMappedMemory();
7564 }
7565 
7566 VkResult vmaDefragment(
7567  VmaAllocator allocator,
7568  VmaAllocation* pAllocations,
7569  size_t allocationCount,
7570  VkBool32* pAllocationsChanged,
7571  const VmaDefragmentationInfo *pDefragmentationInfo,
7572  VmaDefragmentationStats* pDefragmentationStats)
7573 {
7574  VMA_ASSERT(allocator && pAllocations);
7575 
7576  VMA_DEBUG_LOG("vmaDefragment");
7577 
7578  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7579 
7580  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
7581 }
7582 
7583 VkResult vmaCreateBuffer(
7584  VmaAllocator allocator,
7585  const VkBufferCreateInfo* pBufferCreateInfo,
7586  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7587  VkBuffer* pBuffer,
7588  VmaAllocation* pAllocation,
7589  VmaAllocationInfo* pAllocationInfo)
7590 {
7591  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
7592 
7593  VMA_DEBUG_LOG("vmaCreateBuffer");
7594 
7595  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7596 
7597  *pBuffer = VK_NULL_HANDLE;
7598  *pAllocation = VK_NULL_HANDLE;
7599 
7600  // 1. Create VkBuffer.
7601  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
7602  allocator->m_hDevice,
7603  pBufferCreateInfo,
7604  allocator->GetAllocationCallbacks(),
7605  pBuffer);
7606  if(res >= 0)
7607  {
7608  // 2. vkGetBufferMemoryRequirements.
7609  VkMemoryRequirements vkMemReq = {};
7610  (*allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements)(allocator->m_hDevice, *pBuffer, &vkMemReq);
7611 
7612  // 3. Allocate memory using allocator.
7613  res = allocator->AllocateMemory(
7614  vkMemReq,
7615  *pAllocationCreateInfo,
7616  VMA_SUBALLOCATION_TYPE_BUFFER,
7617  pAllocation);
7618  if(res >= 0)
7619  {
7620  // 3. Bind buffer with memory.
7621  res = (*allocator->GetVulkanFunctions().vkBindBufferMemory)(
7622  allocator->m_hDevice,
7623  *pBuffer,
7624  (*pAllocation)->GetMemory(),
7625  (*pAllocation)->GetOffset());
7626  if(res >= 0)
7627  {
7628  // All steps succeeded.
7629  if(pAllocationInfo != VMA_NULL)
7630  {
7631  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7632  }
7633  return VK_SUCCESS;
7634  }
7635  allocator->FreeMemory(*pAllocation);
7636  *pAllocation = VK_NULL_HANDLE;
7637  return res;
7638  }
7639  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
7640  *pBuffer = VK_NULL_HANDLE;
7641  return res;
7642  }
7643  return res;
7644 }
7645 
7646 void vmaDestroyBuffer(
7647  VmaAllocator allocator,
7648  VkBuffer buffer,
7649  VmaAllocation allocation)
7650 {
7651  if(buffer != VK_NULL_HANDLE)
7652  {
7653  VMA_ASSERT(allocator);
7654 
7655  VMA_DEBUG_LOG("vmaDestroyBuffer");
7656 
7657  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7658 
7659  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
7660 
7661  allocator->FreeMemory(allocation);
7662  }
7663 }
7664 
7665 VkResult vmaCreateImage(
7666  VmaAllocator allocator,
7667  const VkImageCreateInfo* pImageCreateInfo,
7668  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7669  VkImage* pImage,
7670  VmaAllocation* pAllocation,
7671  VmaAllocationInfo* pAllocationInfo)
7672 {
7673  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
7674 
7675  VMA_DEBUG_LOG("vmaCreateImage");
7676 
7677  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7678 
7679  *pImage = VK_NULL_HANDLE;
7680  *pAllocation = VK_NULL_HANDLE;
7681 
7682  // 1. Create VkImage.
7683  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
7684  allocator->m_hDevice,
7685  pImageCreateInfo,
7686  allocator->GetAllocationCallbacks(),
7687  pImage);
7688  if(res >= 0)
7689  {
7690  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
7691  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
7692  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
7693 
7694  // 2. Allocate memory using allocator.
7695  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
7696  if(res >= 0)
7697  {
7698  // 3. Bind image with memory.
7699  res = (*allocator->GetVulkanFunctions().vkBindImageMemory)(
7700  allocator->m_hDevice,
7701  *pImage,
7702  (*pAllocation)->GetMemory(),
7703  (*pAllocation)->GetOffset());
7704  if(res >= 0)
7705  {
7706  // All steps succeeded.
7707  if(pAllocationInfo != VMA_NULL)
7708  {
7709  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7710  }
7711  return VK_SUCCESS;
7712  }
7713  allocator->FreeMemory(*pAllocation);
7714  *pAllocation = VK_NULL_HANDLE;
7715  return res;
7716  }
7717  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
7718  *pImage = VK_NULL_HANDLE;
7719  return res;
7720  }
7721  return res;
7722 }
7723 
7724 void vmaDestroyImage(
7725  VmaAllocator allocator,
7726  VkImage image,
7727  VmaAllocation allocation)
7728 {
7729  if(image != VK_NULL_HANDLE)
7730  {
7731  VMA_ASSERT(allocator);
7732 
7733  VMA_DEBUG_LOG("vmaDestroyImage");
7734 
7735  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7736 
7737  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
7738 
7739  allocator->FreeMemory(allocation);
7740  }
7741 }
7742 
7743 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:476
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:499
Definition: vk_mem_alloc.h:828
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
uint32_t BlockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:612
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:486
Memory will be used for frequent writing on device and readback on host (download).
Definition: vk_mem_alloc.h:679
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:480
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:949
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:1099
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation.
void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator)
Unmaps persistently mapped memory of types that are HOST_COHERENT and DEVICE_LOCAL.
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:880
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:728
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:761
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:445
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks.
Definition: vk_mem_alloc.h:511
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:830
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave #define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:558
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:493
VkDeviceSize preferredSmallHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from small heaps <= 512 MB...
Definition: vk_mem_alloc.h:508
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:483
VkFlags VmaAllocatorFlags
Definition: vk_mem_alloc.h:473
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:1103
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:528
VmaStatInfo total
Definition: vk_mem_alloc.h:630
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:1111
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:744
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1094
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:484
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:502
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:834
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:959
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:481
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:763
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:850
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:886
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:837
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
struct VmaVulkanFunctions VmaVulkanFunctions
Definition: vk_mem_alloc.h:737
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:1089
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VkDeviceSize AllocationSizeMax
Definition: vk_mem_alloc.h:621
Definition: vk_mem_alloc.h:808
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:1107
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:482
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:626
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:717
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:1109
VmaMemoryUsage
Definition: vk_mem_alloc.h:665
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:755
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:469
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
VmaAllocatorFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:464
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:477
Definition: vk_mem_alloc.h:609
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
Definition: vk_mem_alloc.h:845
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:456
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:460
VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator)
Maps back persistently mapped memory of types that are HOST_COHERENT and DEVICE_LOCAL.
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:840
VkDeviceSize UnusedRangeSizeMax
Definition: vk_mem_alloc.h:622
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:439
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:750
Definition: vk_mem_alloc.h:741
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:479
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool.
Definition: vk_mem_alloc.h:858
VkDeviceSize AllocationSizeMin
Definition: vk_mem_alloc.h:621
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory.
Definition: vk_mem_alloc.h:514
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:889
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:768
const VkDeviceSize * pHeapSizeLimit
Either NULL or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:546
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:628
VkDeviceSize AllocationSizeAvg
Definition: vk_mem_alloc.h:621
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:488
uint32_t AllocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:614
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:458
Definition: vk_mem_alloc.h:735
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:487
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:872
VmaAllocatorFlags flags
Flags for created allocator. Use VmaAllocatorFlagBits enum.
Definition: vk_mem_alloc.h:496
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
VkDeviceSize UsedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:618
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:970
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:696
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps. ...
Definition: vk_mem_alloc.h:505
uint32_t UnusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:616
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:877
Memory will be mapped on host. Could be used for transfer to/from device.
Definition: vk_mem_alloc.h:673
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
struct VmaStats VmaStats
General statistics from current state of Allocator.
VkDeviceSize UnusedRangeSizeAvg
Definition: vk_mem_alloc.h:622
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:954
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1105
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
Definition: vk_mem_alloc.h:475
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:739
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:485
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:489
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:799
void * pMappedData
Pointer to the beginning of this allocation as mapped data. Null if this alloaction is not persistent...
Definition: vk_mem_alloc.h:965
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
No intended memory usage specified.
Definition: vk_mem_alloc.h:668
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:478
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
Definition: vk_mem_alloc.h:680
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:935
Memory will be used for frequent (dynamic) updates from host and reads on device (upload).
Definition: vk_mem_alloc.h:676
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:684
Definition: vk_mem_alloc.h:471
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:707
Memory will be used on device only, so faster access from the device is preferred. No need to be mappable on host.
Definition: vk_mem_alloc.h:670
struct VmaStatInfo VmaStatInfo
VkDeviceSize UnusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:620
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:629
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:883
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:826
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
VkDeviceSize UnusedRangeSizeMin
Definition: vk_mem_alloc.h:622
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:940
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.