Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
475 #include <vulkan/vulkan.h>
476 
477 VK_DEFINE_HANDLE(VmaAllocator)
478 
479 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
481  VmaAllocator allocator,
482  uint32_t memoryType,
483  VkDeviceMemory memory,
484  VkDeviceSize size);
486 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
487  VmaAllocator allocator,
488  uint32_t memoryType,
489  VkDeviceMemory memory,
490  VkDeviceSize size);
491 
499 typedef struct VmaDeviceMemoryCallbacks {
505 
541 
544 typedef VkFlags VmaAllocatorCreateFlags;
545 
550 typedef struct VmaVulkanFunctions {
551  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
552  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
553  PFN_vkAllocateMemory vkAllocateMemory;
554  PFN_vkFreeMemory vkFreeMemory;
555  PFN_vkMapMemory vkMapMemory;
556  PFN_vkUnmapMemory vkUnmapMemory;
557  PFN_vkBindBufferMemory vkBindBufferMemory;
558  PFN_vkBindImageMemory vkBindImageMemory;
559  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
560  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
561  PFN_vkCreateBuffer vkCreateBuffer;
562  PFN_vkDestroyBuffer vkDestroyBuffer;
563  PFN_vkCreateImage vkCreateImage;
564  PFN_vkDestroyImage vkDestroyImage;
565  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
566  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
568 
571 {
573  VmaAllocatorCreateFlags flags;
575 
576  VkPhysicalDevice physicalDevice;
578 
579  VkDevice device;
581 
584 
587 
588  const VkAllocationCallbacks* pAllocationCallbacks;
590 
605  uint32_t frameInUseCount;
623  const VkDeviceSize* pHeapSizeLimit;
637 
639 VkResult vmaCreateAllocator(
640  const VmaAllocatorCreateInfo* pCreateInfo,
641  VmaAllocator* pAllocator);
642 
645  VmaAllocator allocator);
646 
652  VmaAllocator allocator,
653  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
654 
660  VmaAllocator allocator,
661  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
662 
670  VmaAllocator allocator,
671  uint32_t memoryTypeIndex,
672  VkMemoryPropertyFlags* pFlags);
673 
683  VmaAllocator allocator,
684  uint32_t frameIndex);
685 
688 typedef struct VmaStatInfo
689 {
691  uint32_t blockCount;
693  uint32_t allocationCount;
697  VkDeviceSize usedBytes;
699  VkDeviceSize unusedBytes;
700  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
701  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
702 } VmaStatInfo;
703 
705 typedef struct VmaStats
706 {
707  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
708  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
710 } VmaStats;
711 
713 void vmaCalculateStats(
714  VmaAllocator allocator,
715  VmaStats* pStats);
716 
717 #define VMA_STATS_STRING_ENABLED 1
718 
719 #if VMA_STATS_STRING_ENABLED
720 
722 
725  VmaAllocator allocator,
726  char** ppStatsString,
727  VkBool32 detailedMap);
728 
729 void vmaFreeStatsString(
730  VmaAllocator allocator,
731  char* pStatsString);
732 
733 #endif // #if VMA_STATS_STRING_ENABLED
734 
735 VK_DEFINE_HANDLE(VmaPool)
736 
737 typedef enum VmaMemoryUsage
738 {
744 
747 
750 
754 
769 
814 
817 typedef VkFlags VmaAllocationCreateFlags;
818 
820 {
822  VmaAllocationCreateFlags flags;
833  VkMemoryPropertyFlags requiredFlags;
839  VkMemoryPropertyFlags preferredFlags;
841  void* pUserData;
846  VmaPool pool;
848 
863 VkResult vmaFindMemoryTypeIndex(
864  VmaAllocator allocator,
865  uint32_t memoryTypeBits,
866  const VmaAllocationCreateInfo* pAllocationCreateInfo,
867  uint32_t* pMemoryTypeIndex);
868 
870 typedef enum VmaPoolCreateFlagBits {
898 
901 typedef VkFlags VmaPoolCreateFlags;
902 
905 typedef struct VmaPoolCreateInfo {
908  uint32_t memoryTypeIndex;
911  VmaPoolCreateFlags flags;
916  VkDeviceSize blockSize;
943  uint32_t frameInUseCount;
945 
948 typedef struct VmaPoolStats {
951  VkDeviceSize size;
954  VkDeviceSize unusedSize;
967  VkDeviceSize unusedRangeSizeMax;
968 } VmaPoolStats;
969 
976 VkResult vmaCreatePool(
977  VmaAllocator allocator,
978  const VmaPoolCreateInfo* pCreateInfo,
979  VmaPool* pPool);
980 
983 void vmaDestroyPool(
984  VmaAllocator allocator,
985  VmaPool pool);
986 
993 void vmaGetPoolStats(
994  VmaAllocator allocator,
995  VmaPool pool,
996  VmaPoolStats* pPoolStats);
997 
1005  VmaAllocator allocator,
1006  VmaPool pool,
1007  size_t* pLostAllocationCount);
1008 
1009 VK_DEFINE_HANDLE(VmaAllocation)
1010 
1011 
1013 typedef struct VmaAllocationInfo {
1018  uint32_t memoryType;
1027  VkDeviceMemory deviceMemory;
1032  VkDeviceSize offset;
1037  VkDeviceSize size;
1048  void* pUserData;
1050 
1061 VkResult vmaAllocateMemory(
1062  VmaAllocator allocator,
1063  const VkMemoryRequirements* pVkMemoryRequirements,
1064  const VmaAllocationCreateInfo* pCreateInfo,
1065  VmaAllocation* pAllocation,
1066  VmaAllocationInfo* pAllocationInfo);
1067 
1075  VmaAllocator allocator,
1076  VkBuffer buffer,
1077  const VmaAllocationCreateInfo* pCreateInfo,
1078  VmaAllocation* pAllocation,
1079  VmaAllocationInfo* pAllocationInfo);
1080 
1082 VkResult vmaAllocateMemoryForImage(
1083  VmaAllocator allocator,
1084  VkImage image,
1085  const VmaAllocationCreateInfo* pCreateInfo,
1086  VmaAllocation* pAllocation,
1087  VmaAllocationInfo* pAllocationInfo);
1088 
1090 void vmaFreeMemory(
1091  VmaAllocator allocator,
1092  VmaAllocation allocation);
1093 
1096  VmaAllocator allocator,
1097  VmaAllocation allocation,
1098  VmaAllocationInfo* pAllocationInfo);
1099 
1102  VmaAllocator allocator,
1103  VmaAllocation allocation,
1104  void* pUserData);
1105 
1117  VmaAllocator allocator,
1118  VmaAllocation* pAllocation);
1119 
1128 VkResult vmaMapMemory(
1129  VmaAllocator allocator,
1130  VmaAllocation allocation,
1131  void** ppData);
1132 
1133 void vmaUnmapMemory(
1134  VmaAllocator allocator,
1135  VmaAllocation allocation);
1136 
1158 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator);
1159 
1167 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator);
1168 
1170 typedef struct VmaDefragmentationInfo {
1175  VkDeviceSize maxBytesToMove;
1182 
1184 typedef struct VmaDefragmentationStats {
1186  VkDeviceSize bytesMoved;
1188  VkDeviceSize bytesFreed;
1194 
1265 VkResult vmaDefragment(
1266  VmaAllocator allocator,
1267  VmaAllocation* pAllocations,
1268  size_t allocationCount,
1269  VkBool32* pAllocationsChanged,
1270  const VmaDefragmentationInfo *pDefragmentationInfo,
1271  VmaDefragmentationStats* pDefragmentationStats);
1272 
1291 VkResult vmaCreateBuffer(
1292  VmaAllocator allocator,
1293  const VkBufferCreateInfo* pBufferCreateInfo,
1294  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1295  VkBuffer* pBuffer,
1296  VmaAllocation* pAllocation,
1297  VmaAllocationInfo* pAllocationInfo);
1298 
1307 void vmaDestroyBuffer(
1308  VmaAllocator allocator,
1309  VkBuffer buffer,
1310  VmaAllocation allocation);
1311 
1313 VkResult vmaCreateImage(
1314  VmaAllocator allocator,
1315  const VkImageCreateInfo* pImageCreateInfo,
1316  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1317  VkImage* pImage,
1318  VmaAllocation* pAllocation,
1319  VmaAllocationInfo* pAllocationInfo);
1320 
1329 void vmaDestroyImage(
1330  VmaAllocator allocator,
1331  VkImage image,
1332  VmaAllocation allocation);
1333 
1334 #ifdef __cplusplus
1335 }
1336 #endif
1337 
1338 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
1339 
1340 // For Visual Studio IntelliSense.
1341 #ifdef __INTELLISENSE__
1342 #define VMA_IMPLEMENTATION
1343 #endif
1344 
1345 #ifdef VMA_IMPLEMENTATION
1346 #undef VMA_IMPLEMENTATION
1347 
1348 #include <cstdint>
1349 #include <cstdlib>
1350 #include <cstring>
1351 
1352 /*******************************************************************************
1353 CONFIGURATION SECTION
1354 
1355 Define some of these macros before each #include of this header or change them
1356 here if you need other then default behavior depending on your environment.
1357 */
1358 
1359 /*
1360 Define this macro to 1 to make the library fetch pointers to Vulkan functions
1361 internally, like:
1362 
1363  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
1364 
1365 Define to 0 if you are going to provide you own pointers to Vulkan functions via
1366 VmaAllocatorCreateInfo::pVulkanFunctions.
1367 */
1368 #ifndef VMA_STATIC_VULKAN_FUNCTIONS
1369 #define VMA_STATIC_VULKAN_FUNCTIONS 1
1370 #endif
1371 
1372 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
1373 //#define VMA_USE_STL_CONTAINERS 1
1374 
1375 /* Set this macro to 1 to make the library including and using STL containers:
1376 std::pair, std::vector, std::list, std::unordered_map.
1377 
1378 Set it to 0 or undefined to make the library using its own implementation of
1379 the containers.
1380 */
1381 #if VMA_USE_STL_CONTAINERS
1382  #define VMA_USE_STL_VECTOR 1
1383  #define VMA_USE_STL_UNORDERED_MAP 1
1384  #define VMA_USE_STL_LIST 1
1385 #endif
1386 
1387 #if VMA_USE_STL_VECTOR
1388  #include <vector>
1389 #endif
1390 
1391 #if VMA_USE_STL_UNORDERED_MAP
1392  #include <unordered_map>
1393 #endif
1394 
1395 #if VMA_USE_STL_LIST
1396  #include <list>
1397 #endif
1398 
1399 /*
1400 Following headers are used in this CONFIGURATION section only, so feel free to
1401 remove them if not needed.
1402 */
1403 #include <cassert> // for assert
1404 #include <algorithm> // for min, max
1405 #include <mutex> // for std::mutex
1406 #include <atomic> // for std::atomic
1407 
1408 #if !defined(_WIN32)
1409  #include <malloc.h> // for aligned_alloc()
1410 #endif
1411 
1412 // Normal assert to check for programmer's errors, especially in Debug configuration.
1413 #ifndef VMA_ASSERT
1414  #ifdef _DEBUG
1415  #define VMA_ASSERT(expr) assert(expr)
1416  #else
1417  #define VMA_ASSERT(expr)
1418  #endif
1419 #endif
1420 
1421 // Assert that will be called very often, like inside data structures e.g. operator[].
1422 // Making it non-empty can make program slow.
1423 #ifndef VMA_HEAVY_ASSERT
1424  #ifdef _DEBUG
1425  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
1426  #else
1427  #define VMA_HEAVY_ASSERT(expr)
1428  #endif
1429 #endif
1430 
1431 #ifndef VMA_NULL
1432  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
1433  #define VMA_NULL nullptr
1434 #endif
1435 
1436 #ifndef VMA_ALIGN_OF
1437  #define VMA_ALIGN_OF(type) (__alignof(type))
1438 #endif
1439 
1440 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
1441  #if defined(_WIN32)
1442  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
1443  #else
1444  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
1445  #endif
1446 #endif
1447 
1448 #ifndef VMA_SYSTEM_FREE
1449  #if defined(_WIN32)
1450  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
1451  #else
1452  #define VMA_SYSTEM_FREE(ptr) free(ptr)
1453  #endif
1454 #endif
1455 
1456 #ifndef VMA_MIN
1457  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
1458 #endif
1459 
1460 #ifndef VMA_MAX
1461  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
1462 #endif
1463 
1464 #ifndef VMA_SWAP
1465  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
1466 #endif
1467 
1468 #ifndef VMA_SORT
1469  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
1470 #endif
1471 
1472 #ifndef VMA_DEBUG_LOG
1473  #define VMA_DEBUG_LOG(format, ...)
1474  /*
1475  #define VMA_DEBUG_LOG(format, ...) do { \
1476  printf(format, __VA_ARGS__); \
1477  printf("\n"); \
1478  } while(false)
1479  */
1480 #endif
1481 
1482 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
1483 #if VMA_STATS_STRING_ENABLED
1484  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
1485  {
1486  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
1487  }
1488  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
1489  {
1490  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
1491  }
1492  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
1493  {
1494  snprintf(outStr, strLen, "%p", ptr);
1495  }
1496 #endif
1497 
1498 #ifndef VMA_MUTEX
1499  class VmaMutex
1500  {
1501  public:
1502  VmaMutex() { }
1503  ~VmaMutex() { }
1504  void Lock() { m_Mutex.lock(); }
1505  void Unlock() { m_Mutex.unlock(); }
1506  private:
1507  std::mutex m_Mutex;
1508  };
1509  #define VMA_MUTEX VmaMutex
1510 #endif
1511 
1512 /*
1513 If providing your own implementation, you need to implement a subset of std::atomic:
1514 
1515 - Constructor(uint32_t desired)
1516 - uint32_t load() const
1517 - void store(uint32_t desired)
1518 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
1519 */
1520 #ifndef VMA_ATOMIC_UINT32
1521  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
1522 #endif
1523 
1524 #ifndef VMA_BEST_FIT
1525 
1537  #define VMA_BEST_FIT (1)
1538 #endif
1539 
1540 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
1541 
1545  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
1546 #endif
1547 
1548 #ifndef VMA_DEBUG_ALIGNMENT
1549 
1553  #define VMA_DEBUG_ALIGNMENT (1)
1554 #endif
1555 
1556 #ifndef VMA_DEBUG_MARGIN
1557 
1561  #define VMA_DEBUG_MARGIN (0)
1562 #endif
1563 
1564 #ifndef VMA_DEBUG_GLOBAL_MUTEX
1565 
1569  #define VMA_DEBUG_GLOBAL_MUTEX (0)
1570 #endif
1571 
1572 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
1573 
1577  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
1578 #endif
1579 
1580 #ifndef VMA_SMALL_HEAP_MAX_SIZE
1581  #define VMA_SMALL_HEAP_MAX_SIZE (512 * 1024 * 1024)
1583 #endif
1584 
1585 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
1586  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256 * 1024 * 1024)
1588 #endif
1589 
1590 #ifndef VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE
1591  #define VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE (64 * 1024 * 1024)
1593 #endif
1594 
1595 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
1596 
1597 /*******************************************************************************
1598 END OF CONFIGURATION
1599 */
1600 
1601 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
1602  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
1603 
1604 // Returns number of bits set to 1 in (v).
1605 static inline uint32_t CountBitsSet(uint32_t v)
1606 {
1607  uint32_t c = v - ((v >> 1) & 0x55555555);
1608  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
1609  c = ((c >> 4) + c) & 0x0F0F0F0F;
1610  c = ((c >> 8) + c) & 0x00FF00FF;
1611  c = ((c >> 16) + c) & 0x0000FFFF;
1612  return c;
1613 }
1614 
1615 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
1616 // Use types like uint32_t, uint64_t as T.
1617 template <typename T>
1618 static inline T VmaAlignUp(T val, T align)
1619 {
1620  return (val + align - 1) / align * align;
1621 }
1622 
1623 // Division with mathematical rounding to nearest number.
1624 template <typename T>
1625 inline T VmaRoundDiv(T x, T y)
1626 {
1627  return (x + (y / (T)2)) / y;
1628 }
1629 
1630 #ifndef VMA_SORT
1631 
1632 template<typename Iterator, typename Compare>
1633 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
1634 {
1635  Iterator centerValue = end; --centerValue;
1636  Iterator insertIndex = beg;
1637  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
1638  {
1639  if(cmp(*memTypeIndex, *centerValue))
1640  {
1641  if(insertIndex != memTypeIndex)
1642  {
1643  VMA_SWAP(*memTypeIndex, *insertIndex);
1644  }
1645  ++insertIndex;
1646  }
1647  }
1648  if(insertIndex != centerValue)
1649  {
1650  VMA_SWAP(*insertIndex, *centerValue);
1651  }
1652  return insertIndex;
1653 }
1654 
1655 template<typename Iterator, typename Compare>
1656 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
1657 {
1658  if(beg < end)
1659  {
1660  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
1661  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
1662  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
1663  }
1664 }
1665 
1666 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
1667 
1668 #endif // #ifndef VMA_SORT
1669 
1670 /*
1671 Returns true if two memory blocks occupy overlapping pages.
1672 ResourceA must be in less memory offset than ResourceB.
1673 
1674 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
1675 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
1676 */
1677 static inline bool VmaBlocksOnSamePage(
1678  VkDeviceSize resourceAOffset,
1679  VkDeviceSize resourceASize,
1680  VkDeviceSize resourceBOffset,
1681  VkDeviceSize pageSize)
1682 {
1683  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
1684  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
1685  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
1686  VkDeviceSize resourceBStart = resourceBOffset;
1687  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
1688  return resourceAEndPage == resourceBStartPage;
1689 }
1690 
1691 enum VmaSuballocationType
1692 {
1693  VMA_SUBALLOCATION_TYPE_FREE = 0,
1694  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
1695  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
1696  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
1697  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
1698  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
1699  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
1700 };
1701 
1702 /*
1703 Returns true if given suballocation types could conflict and must respect
1704 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
1705 or linear image and another one is optimal image. If type is unknown, behave
1706 conservatively.
1707 */
1708 static inline bool VmaIsBufferImageGranularityConflict(
1709  VmaSuballocationType suballocType1,
1710  VmaSuballocationType suballocType2)
1711 {
1712  if(suballocType1 > suballocType2)
1713  {
1714  VMA_SWAP(suballocType1, suballocType2);
1715  }
1716 
1717  switch(suballocType1)
1718  {
1719  case VMA_SUBALLOCATION_TYPE_FREE:
1720  return false;
1721  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
1722  return true;
1723  case VMA_SUBALLOCATION_TYPE_BUFFER:
1724  return
1725  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
1726  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1727  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
1728  return
1729  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
1730  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
1731  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1732  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
1733  return
1734  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1735  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
1736  return false;
1737  default:
1738  VMA_ASSERT(0);
1739  return true;
1740  }
1741 }
1742 
1743 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
1744 struct VmaMutexLock
1745 {
1746 public:
1747  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
1748  m_pMutex(useMutex ? &mutex : VMA_NULL)
1749  {
1750  if(m_pMutex)
1751  {
1752  m_pMutex->Lock();
1753  }
1754  }
1755 
1756  ~VmaMutexLock()
1757  {
1758  if(m_pMutex)
1759  {
1760  m_pMutex->Unlock();
1761  }
1762  }
1763 
1764 private:
1765  VMA_MUTEX* m_pMutex;
1766 };
1767 
1768 #if VMA_DEBUG_GLOBAL_MUTEX
1769  static VMA_MUTEX gDebugGlobalMutex;
1770  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
1771 #else
1772  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
1773 #endif
1774 
1775 // Minimum size of a free suballocation to register it in the free suballocation collection.
1776 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
1777 
1778 /*
1779 Performs binary search and returns iterator to first element that is greater or
1780 equal to (key), according to comparison (cmp).
1781 
1782 Cmp should return true if first argument is less than second argument.
1783 
1784 Returned value is the found element, if present in the collection or place where
1785 new element with value (key) should be inserted.
1786 */
1787 template <typename IterT, typename KeyT, typename CmpT>
1788 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
1789 {
1790  size_t down = 0, up = (end - beg);
1791  while(down < up)
1792  {
1793  const size_t mid = (down + up) / 2;
1794  if(cmp(*(beg+mid), key))
1795  {
1796  down = mid + 1;
1797  }
1798  else
1799  {
1800  up = mid;
1801  }
1802  }
1803  return beg + down;
1804 }
1805 
1807 // Memory allocation
1808 
1809 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
1810 {
1811  if((pAllocationCallbacks != VMA_NULL) &&
1812  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
1813  {
1814  return (*pAllocationCallbacks->pfnAllocation)(
1815  pAllocationCallbacks->pUserData,
1816  size,
1817  alignment,
1818  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1819  }
1820  else
1821  {
1822  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
1823  }
1824 }
1825 
1826 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
1827 {
1828  if((pAllocationCallbacks != VMA_NULL) &&
1829  (pAllocationCallbacks->pfnFree != VMA_NULL))
1830  {
1831  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
1832  }
1833  else
1834  {
1835  VMA_SYSTEM_FREE(ptr);
1836  }
1837 }
1838 
1839 template<typename T>
1840 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
1841 {
1842  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
1843 }
1844 
1845 template<typename T>
1846 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
1847 {
1848  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
1849 }
1850 
1851 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
1852 
1853 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
1854 
1855 template<typename T>
1856 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
1857 {
1858  ptr->~T();
1859  VmaFree(pAllocationCallbacks, ptr);
1860 }
1861 
1862 template<typename T>
1863 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
1864 {
1865  if(ptr != VMA_NULL)
1866  {
1867  for(size_t i = count; i--; )
1868  {
1869  ptr[i].~T();
1870  }
1871  VmaFree(pAllocationCallbacks, ptr);
1872  }
1873 }
1874 
1875 // STL-compatible allocator.
1876 template<typename T>
1877 class VmaStlAllocator
1878 {
1879 public:
1880  const VkAllocationCallbacks* const m_pCallbacks;
1881  typedef T value_type;
1882 
1883  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
1884  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
1885 
1886  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
1887  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
1888 
1889  template<typename U>
1890  bool operator==(const VmaStlAllocator<U>& rhs) const
1891  {
1892  return m_pCallbacks == rhs.m_pCallbacks;
1893  }
1894  template<typename U>
1895  bool operator!=(const VmaStlAllocator<U>& rhs) const
1896  {
1897  return m_pCallbacks != rhs.m_pCallbacks;
1898  }
1899 
1900  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
1901 };
1902 
1903 #if VMA_USE_STL_VECTOR
1904 
1905 #define VmaVector std::vector
1906 
1907 template<typename T, typename allocatorT>
1908 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
1909 {
1910  vec.insert(vec.begin() + index, item);
1911 }
1912 
1913 template<typename T, typename allocatorT>
1914 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
1915 {
1916  vec.erase(vec.begin() + index);
1917 }
1918 
1919 #else // #if VMA_USE_STL_VECTOR
1920 
1921 /* Class with interface compatible with subset of std::vector.
1922 T must be POD because constructors and destructors are not called and memcpy is
1923 used for these objects. */
1924 template<typename T, typename AllocatorT>
1925 class VmaVector
1926 {
1927 public:
1928  typedef T value_type;
1929 
1930  VmaVector(const AllocatorT& allocator) :
1931  m_Allocator(allocator),
1932  m_pArray(VMA_NULL),
1933  m_Count(0),
1934  m_Capacity(0)
1935  {
1936  }
1937 
1938  VmaVector(size_t count, const AllocatorT& allocator) :
1939  m_Allocator(allocator),
1940  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
1941  m_Count(count),
1942  m_Capacity(count)
1943  {
1944  }
1945 
1946  VmaVector(const VmaVector<T, AllocatorT>& src) :
1947  m_Allocator(src.m_Allocator),
1948  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
1949  m_Count(src.m_Count),
1950  m_Capacity(src.m_Count)
1951  {
1952  if(m_Count != 0)
1953  {
1954  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
1955  }
1956  }
1957 
1958  ~VmaVector()
1959  {
1960  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1961  }
1962 
1963  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
1964  {
1965  if(&rhs != this)
1966  {
1967  resize(rhs.m_Count);
1968  if(m_Count != 0)
1969  {
1970  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
1971  }
1972  }
1973  return *this;
1974  }
1975 
1976  bool empty() const { return m_Count == 0; }
1977  size_t size() const { return m_Count; }
1978  T* data() { return m_pArray; }
1979  const T* data() const { return m_pArray; }
1980 
1981  T& operator[](size_t index)
1982  {
1983  VMA_HEAVY_ASSERT(index < m_Count);
1984  return m_pArray[index];
1985  }
1986  const T& operator[](size_t index) const
1987  {
1988  VMA_HEAVY_ASSERT(index < m_Count);
1989  return m_pArray[index];
1990  }
1991 
1992  T& front()
1993  {
1994  VMA_HEAVY_ASSERT(m_Count > 0);
1995  return m_pArray[0];
1996  }
1997  const T& front() const
1998  {
1999  VMA_HEAVY_ASSERT(m_Count > 0);
2000  return m_pArray[0];
2001  }
2002  T& back()
2003  {
2004  VMA_HEAVY_ASSERT(m_Count > 0);
2005  return m_pArray[m_Count - 1];
2006  }
2007  const T& back() const
2008  {
2009  VMA_HEAVY_ASSERT(m_Count > 0);
2010  return m_pArray[m_Count - 1];
2011  }
2012 
2013  void reserve(size_t newCapacity, bool freeMemory = false)
2014  {
2015  newCapacity = VMA_MAX(newCapacity, m_Count);
2016 
2017  if((newCapacity < m_Capacity) && !freeMemory)
2018  {
2019  newCapacity = m_Capacity;
2020  }
2021 
2022  if(newCapacity != m_Capacity)
2023  {
2024  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
2025  if(m_Count != 0)
2026  {
2027  memcpy(newArray, m_pArray, m_Count * sizeof(T));
2028  }
2029  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2030  m_Capacity = newCapacity;
2031  m_pArray = newArray;
2032  }
2033  }
2034 
2035  void resize(size_t newCount, bool freeMemory = false)
2036  {
2037  size_t newCapacity = m_Capacity;
2038  if(newCount > m_Capacity)
2039  {
2040  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
2041  }
2042  else if(freeMemory)
2043  {
2044  newCapacity = newCount;
2045  }
2046 
2047  if(newCapacity != m_Capacity)
2048  {
2049  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
2050  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
2051  if(elementsToCopy != 0)
2052  {
2053  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
2054  }
2055  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2056  m_Capacity = newCapacity;
2057  m_pArray = newArray;
2058  }
2059 
2060  m_Count = newCount;
2061  }
2062 
2063  void clear(bool freeMemory = false)
2064  {
2065  resize(0, freeMemory);
2066  }
2067 
2068  void insert(size_t index, const T& src)
2069  {
2070  VMA_HEAVY_ASSERT(index <= m_Count);
2071  const size_t oldCount = size();
2072  resize(oldCount + 1);
2073  if(index < oldCount)
2074  {
2075  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
2076  }
2077  m_pArray[index] = src;
2078  }
2079 
2080  void remove(size_t index)
2081  {
2082  VMA_HEAVY_ASSERT(index < m_Count);
2083  const size_t oldCount = size();
2084  if(index < oldCount - 1)
2085  {
2086  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
2087  }
2088  resize(oldCount - 1);
2089  }
2090 
2091  void push_back(const T& src)
2092  {
2093  const size_t newIndex = size();
2094  resize(newIndex + 1);
2095  m_pArray[newIndex] = src;
2096  }
2097 
2098  void pop_back()
2099  {
2100  VMA_HEAVY_ASSERT(m_Count > 0);
2101  resize(size() - 1);
2102  }
2103 
2104  void push_front(const T& src)
2105  {
2106  insert(0, src);
2107  }
2108 
2109  void pop_front()
2110  {
2111  VMA_HEAVY_ASSERT(m_Count > 0);
2112  remove(0);
2113  }
2114 
2115  typedef T* iterator;
2116 
2117  iterator begin() { return m_pArray; }
2118  iterator end() { return m_pArray + m_Count; }
2119 
2120 private:
2121  AllocatorT m_Allocator;
2122  T* m_pArray;
2123  size_t m_Count;
2124  size_t m_Capacity;
2125 };
2126 
2127 template<typename T, typename allocatorT>
2128 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
2129 {
2130  vec.insert(index, item);
2131 }
2132 
2133 template<typename T, typename allocatorT>
2134 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
2135 {
2136  vec.remove(index);
2137 }
2138 
2139 #endif // #if VMA_USE_STL_VECTOR
2140 
2141 template<typename CmpLess, typename VectorT>
2142 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
2143 {
2144  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2145  vector.data(),
2146  vector.data() + vector.size(),
2147  value,
2148  CmpLess()) - vector.data();
2149  VmaVectorInsert(vector, indexToInsert, value);
2150  return indexToInsert;
2151 }
2152 
2153 template<typename CmpLess, typename VectorT>
2154 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
2155 {
2156  CmpLess comparator;
2157  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2158  vector.begin(),
2159  vector.end(),
2160  value,
2161  comparator);
2162  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
2163  {
2164  size_t indexToRemove = it - vector.begin();
2165  VmaVectorRemove(vector, indexToRemove);
2166  return true;
2167  }
2168  return false;
2169 }
2170 
2171 template<typename CmpLess, typename VectorT>
2172 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
2173 {
2174  CmpLess comparator;
2175  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2176  vector.data(),
2177  vector.data() + vector.size(),
2178  value,
2179  comparator);
2180  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
2181  {
2182  return it - vector.begin();
2183  }
2184  else
2185  {
2186  return vector.size();
2187  }
2188 }
2189 
2191 // class VmaPoolAllocator
2192 
2193 /*
2194 Allocator for objects of type T using a list of arrays (pools) to speed up
2195 allocation. Number of elements that can be allocated is not bounded because
2196 allocator can create multiple blocks.
2197 */
2198 template<typename T>
2199 class VmaPoolAllocator
2200 {
2201 public:
2202  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
2203  ~VmaPoolAllocator();
2204  void Clear();
2205  T* Alloc();
2206  void Free(T* ptr);
2207 
2208 private:
2209  union Item
2210  {
2211  uint32_t NextFreeIndex;
2212  T Value;
2213  };
2214 
2215  struct ItemBlock
2216  {
2217  Item* pItems;
2218  uint32_t FirstFreeIndex;
2219  };
2220 
2221  const VkAllocationCallbacks* m_pAllocationCallbacks;
2222  size_t m_ItemsPerBlock;
2223  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
2224 
2225  ItemBlock& CreateNewBlock();
2226 };
2227 
2228 template<typename T>
2229 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
2230  m_pAllocationCallbacks(pAllocationCallbacks),
2231  m_ItemsPerBlock(itemsPerBlock),
2232  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
2233 {
2234  VMA_ASSERT(itemsPerBlock > 0);
2235 }
2236 
2237 template<typename T>
2238 VmaPoolAllocator<T>::~VmaPoolAllocator()
2239 {
2240  Clear();
2241 }
2242 
2243 template<typename T>
2244 void VmaPoolAllocator<T>::Clear()
2245 {
2246  for(size_t i = m_ItemBlocks.size(); i--; )
2247  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
2248  m_ItemBlocks.clear();
2249 }
2250 
2251 template<typename T>
2252 T* VmaPoolAllocator<T>::Alloc()
2253 {
2254  for(size_t i = m_ItemBlocks.size(); i--; )
2255  {
2256  ItemBlock& block = m_ItemBlocks[i];
2257  // This block has some free items: Use first one.
2258  if(block.FirstFreeIndex != UINT32_MAX)
2259  {
2260  Item* const pItem = &block.pItems[block.FirstFreeIndex];
2261  block.FirstFreeIndex = pItem->NextFreeIndex;
2262  return &pItem->Value;
2263  }
2264  }
2265 
2266  // No block has free item: Create new one and use it.
2267  ItemBlock& newBlock = CreateNewBlock();
2268  Item* const pItem = &newBlock.pItems[0];
2269  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
2270  return &pItem->Value;
2271 }
2272 
2273 template<typename T>
2274 void VmaPoolAllocator<T>::Free(T* ptr)
2275 {
2276  // Search all memory blocks to find ptr.
2277  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
2278  {
2279  ItemBlock& block = m_ItemBlocks[i];
2280 
2281  // Casting to union.
2282  Item* pItemPtr;
2283  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
2284 
2285  // Check if pItemPtr is in address range of this block.
2286  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
2287  {
2288  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
2289  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
2290  block.FirstFreeIndex = index;
2291  return;
2292  }
2293  }
2294  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
2295 }
2296 
2297 template<typename T>
2298 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
2299 {
2300  ItemBlock newBlock = {
2301  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
2302 
2303  m_ItemBlocks.push_back(newBlock);
2304 
2305  // Setup singly-linked list of all free items in this block.
2306  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
2307  newBlock.pItems[i].NextFreeIndex = i + 1;
2308  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
2309  return m_ItemBlocks.back();
2310 }
2311 
2313 // class VmaRawList, VmaList
2314 
2315 #if VMA_USE_STL_LIST
2316 
2317 #define VmaList std::list
2318 
2319 #else // #if VMA_USE_STL_LIST
2320 
2321 template<typename T>
2322 struct VmaListItem
2323 {
2324  VmaListItem* pPrev;
2325  VmaListItem* pNext;
2326  T Value;
2327 };
2328 
2329 // Doubly linked list.
2330 template<typename T>
2331 class VmaRawList
2332 {
2333 public:
2334  typedef VmaListItem<T> ItemType;
2335 
2336  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
2337  ~VmaRawList();
2338  void Clear();
2339 
2340  size_t GetCount() const { return m_Count; }
2341  bool IsEmpty() const { return m_Count == 0; }
2342 
2343  ItemType* Front() { return m_pFront; }
2344  const ItemType* Front() const { return m_pFront; }
2345  ItemType* Back() { return m_pBack; }
2346  const ItemType* Back() const { return m_pBack; }
2347 
2348  ItemType* PushBack();
2349  ItemType* PushFront();
2350  ItemType* PushBack(const T& value);
2351  ItemType* PushFront(const T& value);
2352  void PopBack();
2353  void PopFront();
2354 
2355  // Item can be null - it means PushBack.
2356  ItemType* InsertBefore(ItemType* pItem);
2357  // Item can be null - it means PushFront.
2358  ItemType* InsertAfter(ItemType* pItem);
2359 
2360  ItemType* InsertBefore(ItemType* pItem, const T& value);
2361  ItemType* InsertAfter(ItemType* pItem, const T& value);
2362 
2363  void Remove(ItemType* pItem);
2364 
2365 private:
2366  const VkAllocationCallbacks* const m_pAllocationCallbacks;
2367  VmaPoolAllocator<ItemType> m_ItemAllocator;
2368  ItemType* m_pFront;
2369  ItemType* m_pBack;
2370  size_t m_Count;
2371 
2372  // Declared not defined, to block copy constructor and assignment operator.
2373  VmaRawList(const VmaRawList<T>& src);
2374  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
2375 };
2376 
2377 template<typename T>
2378 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
2379  m_pAllocationCallbacks(pAllocationCallbacks),
2380  m_ItemAllocator(pAllocationCallbacks, 128),
2381  m_pFront(VMA_NULL),
2382  m_pBack(VMA_NULL),
2383  m_Count(0)
2384 {
2385 }
2386 
2387 template<typename T>
2388 VmaRawList<T>::~VmaRawList()
2389 {
2390  // Intentionally not calling Clear, because that would be unnecessary
2391  // computations to return all items to m_ItemAllocator as free.
2392 }
2393 
2394 template<typename T>
2395 void VmaRawList<T>::Clear()
2396 {
2397  if(IsEmpty() == false)
2398  {
2399  ItemType* pItem = m_pBack;
2400  while(pItem != VMA_NULL)
2401  {
2402  ItemType* const pPrevItem = pItem->pPrev;
2403  m_ItemAllocator.Free(pItem);
2404  pItem = pPrevItem;
2405  }
2406  m_pFront = VMA_NULL;
2407  m_pBack = VMA_NULL;
2408  m_Count = 0;
2409  }
2410 }
2411 
2412 template<typename T>
2413 VmaListItem<T>* VmaRawList<T>::PushBack()
2414 {
2415  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2416  pNewItem->pNext = VMA_NULL;
2417  if(IsEmpty())
2418  {
2419  pNewItem->pPrev = VMA_NULL;
2420  m_pFront = pNewItem;
2421  m_pBack = pNewItem;
2422  m_Count = 1;
2423  }
2424  else
2425  {
2426  pNewItem->pPrev = m_pBack;
2427  m_pBack->pNext = pNewItem;
2428  m_pBack = pNewItem;
2429  ++m_Count;
2430  }
2431  return pNewItem;
2432 }
2433 
2434 template<typename T>
2435 VmaListItem<T>* VmaRawList<T>::PushFront()
2436 {
2437  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2438  pNewItem->pPrev = VMA_NULL;
2439  if(IsEmpty())
2440  {
2441  pNewItem->pNext = VMA_NULL;
2442  m_pFront = pNewItem;
2443  m_pBack = pNewItem;
2444  m_Count = 1;
2445  }
2446  else
2447  {
2448  pNewItem->pNext = m_pFront;
2449  m_pFront->pPrev = pNewItem;
2450  m_pFront = pNewItem;
2451  ++m_Count;
2452  }
2453  return pNewItem;
2454 }
2455 
2456 template<typename T>
2457 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
2458 {
2459  ItemType* const pNewItem = PushBack();
2460  pNewItem->Value = value;
2461  return pNewItem;
2462 }
2463 
2464 template<typename T>
2465 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
2466 {
2467  ItemType* const pNewItem = PushFront();
2468  pNewItem->Value = value;
2469  return pNewItem;
2470 }
2471 
2472 template<typename T>
2473 void VmaRawList<T>::PopBack()
2474 {
2475  VMA_HEAVY_ASSERT(m_Count > 0);
2476  ItemType* const pBackItem = m_pBack;
2477  ItemType* const pPrevItem = pBackItem->pPrev;
2478  if(pPrevItem != VMA_NULL)
2479  {
2480  pPrevItem->pNext = VMA_NULL;
2481  }
2482  m_pBack = pPrevItem;
2483  m_ItemAllocator.Free(pBackItem);
2484  --m_Count;
2485 }
2486 
2487 template<typename T>
2488 void VmaRawList<T>::PopFront()
2489 {
2490  VMA_HEAVY_ASSERT(m_Count > 0);
2491  ItemType* const pFrontItem = m_pFront;
2492  ItemType* const pNextItem = pFrontItem->pNext;
2493  if(pNextItem != VMA_NULL)
2494  {
2495  pNextItem->pPrev = VMA_NULL;
2496  }
2497  m_pFront = pNextItem;
2498  m_ItemAllocator.Free(pFrontItem);
2499  --m_Count;
2500 }
2501 
2502 template<typename T>
2503 void VmaRawList<T>::Remove(ItemType* pItem)
2504 {
2505  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
2506  VMA_HEAVY_ASSERT(m_Count > 0);
2507 
2508  if(pItem->pPrev != VMA_NULL)
2509  {
2510  pItem->pPrev->pNext = pItem->pNext;
2511  }
2512  else
2513  {
2514  VMA_HEAVY_ASSERT(m_pFront == pItem);
2515  m_pFront = pItem->pNext;
2516  }
2517 
2518  if(pItem->pNext != VMA_NULL)
2519  {
2520  pItem->pNext->pPrev = pItem->pPrev;
2521  }
2522  else
2523  {
2524  VMA_HEAVY_ASSERT(m_pBack == pItem);
2525  m_pBack = pItem->pPrev;
2526  }
2527 
2528  m_ItemAllocator.Free(pItem);
2529  --m_Count;
2530 }
2531 
2532 template<typename T>
2533 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
2534 {
2535  if(pItem != VMA_NULL)
2536  {
2537  ItemType* const prevItem = pItem->pPrev;
2538  ItemType* const newItem = m_ItemAllocator.Alloc();
2539  newItem->pPrev = prevItem;
2540  newItem->pNext = pItem;
2541  pItem->pPrev = newItem;
2542  if(prevItem != VMA_NULL)
2543  {
2544  prevItem->pNext = newItem;
2545  }
2546  else
2547  {
2548  VMA_HEAVY_ASSERT(m_pFront == pItem);
2549  m_pFront = newItem;
2550  }
2551  ++m_Count;
2552  return newItem;
2553  }
2554  else
2555  return PushBack();
2556 }
2557 
2558 template<typename T>
2559 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
2560 {
2561  if(pItem != VMA_NULL)
2562  {
2563  ItemType* const nextItem = pItem->pNext;
2564  ItemType* const newItem = m_ItemAllocator.Alloc();
2565  newItem->pNext = nextItem;
2566  newItem->pPrev = pItem;
2567  pItem->pNext = newItem;
2568  if(nextItem != VMA_NULL)
2569  {
2570  nextItem->pPrev = newItem;
2571  }
2572  else
2573  {
2574  VMA_HEAVY_ASSERT(m_pBack == pItem);
2575  m_pBack = newItem;
2576  }
2577  ++m_Count;
2578  return newItem;
2579  }
2580  else
2581  return PushFront();
2582 }
2583 
2584 template<typename T>
2585 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
2586 {
2587  ItemType* const newItem = InsertBefore(pItem);
2588  newItem->Value = value;
2589  return newItem;
2590 }
2591 
2592 template<typename T>
2593 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
2594 {
2595  ItemType* const newItem = InsertAfter(pItem);
2596  newItem->Value = value;
2597  return newItem;
2598 }
2599 
2600 template<typename T, typename AllocatorT>
2601 class VmaList
2602 {
2603 public:
2604  class iterator
2605  {
2606  public:
2607  iterator() :
2608  m_pList(VMA_NULL),
2609  m_pItem(VMA_NULL)
2610  {
2611  }
2612 
2613  T& operator*() const
2614  {
2615  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2616  return m_pItem->Value;
2617  }
2618  T* operator->() const
2619  {
2620  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2621  return &m_pItem->Value;
2622  }
2623 
2624  iterator& operator++()
2625  {
2626  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2627  m_pItem = m_pItem->pNext;
2628  return *this;
2629  }
2630  iterator& operator--()
2631  {
2632  if(m_pItem != VMA_NULL)
2633  {
2634  m_pItem = m_pItem->pPrev;
2635  }
2636  else
2637  {
2638  VMA_HEAVY_ASSERT(!m_pList.IsEmpty());
2639  m_pItem = m_pList->Back();
2640  }
2641  return *this;
2642  }
2643 
2644  iterator operator++(int)
2645  {
2646  iterator result = *this;
2647  ++*this;
2648  return result;
2649  }
2650  iterator operator--(int)
2651  {
2652  iterator result = *this;
2653  --*this;
2654  return result;
2655  }
2656 
2657  bool operator==(const iterator& rhs) const
2658  {
2659  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2660  return m_pItem == rhs.m_pItem;
2661  }
2662  bool operator!=(const iterator& rhs) const
2663  {
2664  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2665  return m_pItem != rhs.m_pItem;
2666  }
2667 
2668  private:
2669  VmaRawList<T>* m_pList;
2670  VmaListItem<T>* m_pItem;
2671 
2672  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
2673  m_pList(pList),
2674  m_pItem(pItem)
2675  {
2676  }
2677 
2678  friend class VmaList<T, AllocatorT>;
2679  };
2680 
2681  class const_iterator
2682  {
2683  public:
2684  const_iterator() :
2685  m_pList(VMA_NULL),
2686  m_pItem(VMA_NULL)
2687  {
2688  }
2689 
2690  const_iterator(const iterator& src) :
2691  m_pList(src.m_pList),
2692  m_pItem(src.m_pItem)
2693  {
2694  }
2695 
2696  const T& operator*() const
2697  {
2698  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2699  return m_pItem->Value;
2700  }
2701  const T* operator->() const
2702  {
2703  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2704  return &m_pItem->Value;
2705  }
2706 
2707  const_iterator& operator++()
2708  {
2709  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2710  m_pItem = m_pItem->pNext;
2711  return *this;
2712  }
2713  const_iterator& operator--()
2714  {
2715  if(m_pItem != VMA_NULL)
2716  {
2717  m_pItem = m_pItem->pPrev;
2718  }
2719  else
2720  {
2721  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
2722  m_pItem = m_pList->Back();
2723  }
2724  return *this;
2725  }
2726 
2727  const_iterator operator++(int)
2728  {
2729  const_iterator result = *this;
2730  ++*this;
2731  return result;
2732  }
2733  const_iterator operator--(int)
2734  {
2735  const_iterator result = *this;
2736  --*this;
2737  return result;
2738  }
2739 
2740  bool operator==(const const_iterator& rhs) const
2741  {
2742  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2743  return m_pItem == rhs.m_pItem;
2744  }
2745  bool operator!=(const const_iterator& rhs) const
2746  {
2747  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2748  return m_pItem != rhs.m_pItem;
2749  }
2750 
2751  private:
2752  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
2753  m_pList(pList),
2754  m_pItem(pItem)
2755  {
2756  }
2757 
2758  const VmaRawList<T>* m_pList;
2759  const VmaListItem<T>* m_pItem;
2760 
2761  friend class VmaList<T, AllocatorT>;
2762  };
2763 
2764  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
2765 
2766  bool empty() const { return m_RawList.IsEmpty(); }
2767  size_t size() const { return m_RawList.GetCount(); }
2768 
2769  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
2770  iterator end() { return iterator(&m_RawList, VMA_NULL); }
2771 
2772  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
2773  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
2774 
2775  void clear() { m_RawList.Clear(); }
2776  void push_back(const T& value) { m_RawList.PushBack(value); }
2777  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
2778  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
2779 
2780 private:
2781  VmaRawList<T> m_RawList;
2782 };
2783 
2784 #endif // #if VMA_USE_STL_LIST
2785 
2787 // class VmaMap
2788 
2789 // Unused in this version.
2790 #if 0
2791 
2792 #if VMA_USE_STL_UNORDERED_MAP
2793 
2794 #define VmaPair std::pair
2795 
2796 #define VMA_MAP_TYPE(KeyT, ValueT) \
2797  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
2798 
2799 #else // #if VMA_USE_STL_UNORDERED_MAP
2800 
2801 template<typename T1, typename T2>
2802 struct VmaPair
2803 {
2804  T1 first;
2805  T2 second;
2806 
2807  VmaPair() : first(), second() { }
2808  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
2809 };
2810 
2811 /* Class compatible with subset of interface of std::unordered_map.
2812 KeyT, ValueT must be POD because they will be stored in VmaVector.
2813 */
2814 template<typename KeyT, typename ValueT>
2815 class VmaMap
2816 {
2817 public:
2818  typedef VmaPair<KeyT, ValueT> PairType;
2819  typedef PairType* iterator;
2820 
2821  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
2822 
2823  iterator begin() { return m_Vector.begin(); }
2824  iterator end() { return m_Vector.end(); }
2825 
2826  void insert(const PairType& pair);
2827  iterator find(const KeyT& key);
2828  void erase(iterator it);
2829 
2830 private:
2831  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
2832 };
2833 
2834 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
2835 
2836 template<typename FirstT, typename SecondT>
2837 struct VmaPairFirstLess
2838 {
2839  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
2840  {
2841  return lhs.first < rhs.first;
2842  }
2843  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
2844  {
2845  return lhs.first < rhsFirst;
2846  }
2847 };
2848 
2849 template<typename KeyT, typename ValueT>
2850 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
2851 {
2852  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2853  m_Vector.data(),
2854  m_Vector.data() + m_Vector.size(),
2855  pair,
2856  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
2857  VmaVectorInsert(m_Vector, indexToInsert, pair);
2858 }
2859 
2860 template<typename KeyT, typename ValueT>
2861 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
2862 {
2863  PairType* it = VmaBinaryFindFirstNotLess(
2864  m_Vector.data(),
2865  m_Vector.data() + m_Vector.size(),
2866  key,
2867  VmaPairFirstLess<KeyT, ValueT>());
2868  if((it != m_Vector.end()) && (it->first == key))
2869  {
2870  return it;
2871  }
2872  else
2873  {
2874  return m_Vector.end();
2875  }
2876 }
2877 
2878 template<typename KeyT, typename ValueT>
2879 void VmaMap<KeyT, ValueT>::erase(iterator it)
2880 {
2881  VmaVectorRemove(m_Vector, it - m_Vector.begin());
2882 }
2883 
2884 #endif // #if VMA_USE_STL_UNORDERED_MAP
2885 
2886 #endif // #if 0
2887 
2889 
2890 class VmaDeviceMemoryBlock;
2891 
2892 enum VMA_BLOCK_VECTOR_TYPE
2893 {
2894  VMA_BLOCK_VECTOR_TYPE_UNMAPPED,
2895  VMA_BLOCK_VECTOR_TYPE_MAPPED,
2896  VMA_BLOCK_VECTOR_TYPE_COUNT
2897 };
2898 
2899 static VMA_BLOCK_VECTOR_TYPE VmaAllocationCreateFlagsToBlockVectorType(VmaAllocationCreateFlags flags)
2900 {
2901  return (flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0 ?
2902  VMA_BLOCK_VECTOR_TYPE_MAPPED :
2903  VMA_BLOCK_VECTOR_TYPE_UNMAPPED;
2904 }
2905 
2906 struct VmaAllocation_T
2907 {
2908 public:
2909  enum ALLOCATION_TYPE
2910  {
2911  ALLOCATION_TYPE_NONE,
2912  ALLOCATION_TYPE_BLOCK,
2913  ALLOCATION_TYPE_DEDICATED,
2914  };
2915 
2916  VmaAllocation_T(uint32_t currentFrameIndex) :
2917  m_Alignment(1),
2918  m_Size(0),
2919  m_pUserData(VMA_NULL),
2920  m_Type(ALLOCATION_TYPE_NONE),
2921  m_SuballocationType(VMA_SUBALLOCATION_TYPE_UNKNOWN),
2922  m_LastUseFrameIndex(currentFrameIndex)
2923  {
2924  }
2925 
2926  void InitBlockAllocation(
2927  VmaPool hPool,
2928  VmaDeviceMemoryBlock* block,
2929  VkDeviceSize offset,
2930  VkDeviceSize alignment,
2931  VkDeviceSize size,
2932  VmaSuballocationType suballocationType,
2933  void* pUserData,
2934  bool canBecomeLost)
2935  {
2936  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
2937  VMA_ASSERT(block != VMA_NULL);
2938  m_Type = ALLOCATION_TYPE_BLOCK;
2939  m_Alignment = alignment;
2940  m_Size = size;
2941  m_pUserData = pUserData;
2942  m_SuballocationType = suballocationType;
2943  m_BlockAllocation.m_hPool = hPool;
2944  m_BlockAllocation.m_Block = block;
2945  m_BlockAllocation.m_Offset = offset;
2946  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
2947  }
2948 
2949  void InitLost()
2950  {
2951  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
2952  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
2953  m_Type = ALLOCATION_TYPE_BLOCK;
2954  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
2955  m_BlockAllocation.m_Block = VMA_NULL;
2956  m_BlockAllocation.m_Offset = 0;
2957  m_BlockAllocation.m_CanBecomeLost = true;
2958  }
2959 
2960  void ChangeBlockAllocation(
2961  VmaDeviceMemoryBlock* block,
2962  VkDeviceSize offset)
2963  {
2964  VMA_ASSERT(block != VMA_NULL);
2965  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
2966  m_BlockAllocation.m_Block = block;
2967  m_BlockAllocation.m_Offset = offset;
2968  }
2969 
2970  void InitDedicatedAllocation(
2971  uint32_t memoryTypeIndex,
2972  VkDeviceMemory hMemory,
2973  VmaSuballocationType suballocationType,
2974  bool persistentMap,
2975  void* pMappedData,
2976  VkDeviceSize size,
2977  void* pUserData)
2978  {
2979  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
2980  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
2981  m_Type = ALLOCATION_TYPE_DEDICATED;
2982  m_Alignment = 0;
2983  m_Size = size;
2984  m_pUserData = pUserData;
2985  m_SuballocationType = suballocationType;
2986  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
2987  m_DedicatedAllocation.m_hMemory = hMemory;
2988  m_DedicatedAllocation.m_PersistentMap = persistentMap;
2989  m_DedicatedAllocation.m_pMappedData = pMappedData;
2990  }
2991 
2992  ALLOCATION_TYPE GetType() const { return m_Type; }
2993  VkDeviceSize GetAlignment() const { return m_Alignment; }
2994  VkDeviceSize GetSize() const { return m_Size; }
2995  void* GetUserData() const { return m_pUserData; }
2996  void SetUserData(void* pUserData) { m_pUserData = pUserData; }
2997  VmaSuballocationType GetSuballocationType() const { return m_SuballocationType; }
2998 
2999  VmaDeviceMemoryBlock* GetBlock() const
3000  {
3001  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
3002  return m_BlockAllocation.m_Block;
3003  }
3004  VkDeviceSize GetOffset() const;
3005  VkDeviceMemory GetMemory() const;
3006  uint32_t GetMemoryTypeIndex() const;
3007  VMA_BLOCK_VECTOR_TYPE GetBlockVectorType() const;
3008  void* GetMappedData() const;
3009  bool CanBecomeLost() const;
3010  VmaPool GetPool() const;
3011 
3012  VkResult DedicatedAllocMapPersistentlyMappedMemory(VmaAllocator hAllocator);
3013  void DedicatedAllocUnmapPersistentlyMappedMemory(VmaAllocator hAllocator);
3014 
3015  uint32_t GetLastUseFrameIndex() const
3016  {
3017  return m_LastUseFrameIndex.load();
3018  }
3019  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
3020  {
3021  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
3022  }
3023  /*
3024  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
3025  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
3026  - Else, returns false.
3027 
3028  If hAllocation is already lost, assert - you should not call it then.
3029  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
3030  */
3031  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3032 
3033  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
3034  {
3035  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
3036  outInfo.blockCount = 1;
3037  outInfo.allocationCount = 1;
3038  outInfo.unusedRangeCount = 0;
3039  outInfo.usedBytes = m_Size;
3040  outInfo.unusedBytes = 0;
3041  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
3042  outInfo.unusedRangeSizeMin = UINT64_MAX;
3043  outInfo.unusedRangeSizeMax = 0;
3044  }
3045 
3046 private:
3047  VkDeviceSize m_Alignment;
3048  VkDeviceSize m_Size;
3049  void* m_pUserData;
3050  ALLOCATION_TYPE m_Type;
3051  VmaSuballocationType m_SuballocationType;
3052  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
3053 
3054  // Allocation out of VmaDeviceMemoryBlock.
3055  struct BlockAllocation
3056  {
3057  VmaPool m_hPool; // Null if belongs to general memory.
3058  VmaDeviceMemoryBlock* m_Block;
3059  VkDeviceSize m_Offset;
3060  bool m_CanBecomeLost;
3061  };
3062 
3063  // Allocation for an object that has its own private VkDeviceMemory.
3064  struct DedicatedAllocation
3065  {
3066  uint32_t m_MemoryTypeIndex;
3067  VkDeviceMemory m_hMemory;
3068  bool m_PersistentMap;
3069  void* m_pMappedData;
3070  };
3071 
3072  union
3073  {
3074  // Allocation out of VmaDeviceMemoryBlock.
3075  BlockAllocation m_BlockAllocation;
3076  // Allocation for an object that has its own private VkDeviceMemory.
3077  DedicatedAllocation m_DedicatedAllocation;
3078  };
3079 };
3080 
3081 /*
3082 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
3083 allocated memory block or free.
3084 */
3085 struct VmaSuballocation
3086 {
3087  VkDeviceSize offset;
3088  VkDeviceSize size;
3089  VmaAllocation hAllocation;
3090  VmaSuballocationType type;
3091 };
3092 
3093 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
3094 
3095 // Cost of one additional allocation lost, as equivalent in bytes.
3096 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
3097 
3098 /*
3099 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
3100 
3101 If canMakeOtherLost was false:
3102 - item points to a FREE suballocation.
3103 - itemsToMakeLostCount is 0.
3104 
3105 If canMakeOtherLost was true:
3106 - item points to first of sequence of suballocations, which are either FREE,
3107  or point to VmaAllocations that can become lost.
3108 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
3109  the requested allocation to succeed.
3110 */
3111 struct VmaAllocationRequest
3112 {
3113  VkDeviceSize offset;
3114  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
3115  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
3116  VmaSuballocationList::iterator item;
3117  size_t itemsToMakeLostCount;
3118 
3119  VkDeviceSize CalcCost() const
3120  {
3121  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
3122  }
3123 };
3124 
3125 /*
3126 Data structure used for bookkeeping of allocations and unused ranges of memory
3127 in a single VkDeviceMemory block.
3128 */
3129 class VmaBlockMetadata
3130 {
3131 public:
3132  VmaBlockMetadata(VmaAllocator hAllocator);
3133  ~VmaBlockMetadata();
3134  void Init(VkDeviceSize size);
3135 
3136  // Validates all data structures inside this object. If not valid, returns false.
3137  bool Validate() const;
3138  VkDeviceSize GetSize() const { return m_Size; }
3139  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
3140  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
3141  VkDeviceSize GetUnusedRangeSizeMax() const;
3142  // Returns true if this block is empty - contains only single free suballocation.
3143  bool IsEmpty() const;
3144 
3145  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
3146  void AddPoolStats(VmaPoolStats& inoutStats) const;
3147 
3148 #if VMA_STATS_STRING_ENABLED
3149  void PrintDetailedMap(class VmaJsonWriter& json) const;
3150 #endif
3151 
3152  // Creates trivial request for case when block is empty.
3153  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
3154 
3155  // Tries to find a place for suballocation with given parameters inside this block.
3156  // If succeeded, fills pAllocationRequest and returns true.
3157  // If failed, returns false.
3158  bool CreateAllocationRequest(
3159  uint32_t currentFrameIndex,
3160  uint32_t frameInUseCount,
3161  VkDeviceSize bufferImageGranularity,
3162  VkDeviceSize allocSize,
3163  VkDeviceSize allocAlignment,
3164  VmaSuballocationType allocType,
3165  bool canMakeOtherLost,
3166  VmaAllocationRequest* pAllocationRequest);
3167 
3168  bool MakeRequestedAllocationsLost(
3169  uint32_t currentFrameIndex,
3170  uint32_t frameInUseCount,
3171  VmaAllocationRequest* pAllocationRequest);
3172 
3173  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3174 
3175  // Makes actual allocation based on request. Request must already be checked and valid.
3176  void Alloc(
3177  const VmaAllocationRequest& request,
3178  VmaSuballocationType type,
3179  VkDeviceSize allocSize,
3180  VmaAllocation hAllocation);
3181 
3182  // Frees suballocation assigned to given memory region.
3183  void Free(const VmaAllocation allocation);
3184 
3185 private:
3186  VkDeviceSize m_Size;
3187  uint32_t m_FreeCount;
3188  VkDeviceSize m_SumFreeSize;
3189  VmaSuballocationList m_Suballocations;
3190  // Suballocations that are free and have size greater than certain threshold.
3191  // Sorted by size, ascending.
3192  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
3193 
3194  bool ValidateFreeSuballocationList() const;
3195 
3196  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
3197  // If yes, fills pOffset and returns true. If no, returns false.
3198  bool CheckAllocation(
3199  uint32_t currentFrameIndex,
3200  uint32_t frameInUseCount,
3201  VkDeviceSize bufferImageGranularity,
3202  VkDeviceSize allocSize,
3203  VkDeviceSize allocAlignment,
3204  VmaSuballocationType allocType,
3205  VmaSuballocationList::const_iterator suballocItem,
3206  bool canMakeOtherLost,
3207  VkDeviceSize* pOffset,
3208  size_t* itemsToMakeLostCount,
3209  VkDeviceSize* pSumFreeSize,
3210  VkDeviceSize* pSumItemSize) const;
3211  // Given free suballocation, it merges it with following one, which must also be free.
3212  void MergeFreeWithNext(VmaSuballocationList::iterator item);
3213  // Releases given suballocation, making it free.
3214  // Merges it with adjacent free suballocations if applicable.
3215  // Returns iterator to new free suballocation at this place.
3216  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
3217  // Given free suballocation, it inserts it into sorted list of
3218  // m_FreeSuballocationsBySize if it's suitable.
3219  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
3220  // Given free suballocation, it removes it from sorted list of
3221  // m_FreeSuballocationsBySize if it's suitable.
3222  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
3223 };
3224 
3225 /*
3226 Represents a single block of device memory (`VkDeviceMemory`) with all the
3227 data about its regions (aka suballocations, `VmaAllocation`), assigned and free.
3228 
3229 Thread-safety: This class must be externally synchronized.
3230 */
3231 class VmaDeviceMemoryBlock
3232 {
3233 public:
3234  uint32_t m_MemoryTypeIndex;
3235  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
3236  VkDeviceMemory m_hMemory;
3237  bool m_PersistentMap;
3238  void* m_pMappedData;
3239  VmaBlockMetadata m_Metadata;
3240 
3241  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
3242 
3243  ~VmaDeviceMemoryBlock()
3244  {
3245  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
3246  }
3247 
3248  // Always call after construction.
3249  void Init(
3250  uint32_t newMemoryTypeIndex,
3251  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
3252  VkDeviceMemory newMemory,
3253  VkDeviceSize newSize,
3254  bool persistentMap,
3255  void* pMappedData);
3256  // Always call before destruction.
3257  void Destroy(VmaAllocator allocator);
3258 
3259  // Validates all data structures inside this object. If not valid, returns false.
3260  bool Validate() const;
3261 };
3262 
3263 struct VmaPointerLess
3264 {
3265  bool operator()(const void* lhs, const void* rhs) const
3266  {
3267  return lhs < rhs;
3268  }
3269 };
3270 
3271 class VmaDefragmentator;
3272 
3273 /*
3274 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
3275 Vulkan memory type.
3276 
3277 Synchronized internally with a mutex.
3278 */
3279 struct VmaBlockVector
3280 {
3281  VmaBlockVector(
3282  VmaAllocator hAllocator,
3283  uint32_t memoryTypeIndex,
3284  VMA_BLOCK_VECTOR_TYPE blockVectorType,
3285  VkDeviceSize preferredBlockSize,
3286  size_t minBlockCount,
3287  size_t maxBlockCount,
3288  VkDeviceSize bufferImageGranularity,
3289  uint32_t frameInUseCount,
3290  bool isCustomPool);
3291  ~VmaBlockVector();
3292 
3293  VkResult CreateMinBlocks();
3294 
3295  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
3296  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
3297  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
3298  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
3299  VMA_BLOCK_VECTOR_TYPE GetBlockVectorType() const { return m_BlockVectorType; }
3300 
3301  void GetPoolStats(VmaPoolStats* pStats);
3302 
3303  bool IsEmpty() const { return m_Blocks.empty(); }
3304 
3305  VkResult Allocate(
3306  VmaPool hCurrentPool,
3307  uint32_t currentFrameIndex,
3308  const VkMemoryRequirements& vkMemReq,
3309  const VmaAllocationCreateInfo& createInfo,
3310  VmaSuballocationType suballocType,
3311  VmaAllocation* pAllocation);
3312 
3313  void Free(
3314  VmaAllocation hAllocation);
3315 
3316  // Adds statistics of this BlockVector to pStats.
3317  void AddStats(VmaStats* pStats);
3318 
3319 #if VMA_STATS_STRING_ENABLED
3320  void PrintDetailedMap(class VmaJsonWriter& json);
3321 #endif
3322 
3323  void UnmapPersistentlyMappedMemory();
3324  VkResult MapPersistentlyMappedMemory();
3325 
3326  void MakePoolAllocationsLost(
3327  uint32_t currentFrameIndex,
3328  size_t* pLostAllocationCount);
3329 
3330  VmaDefragmentator* EnsureDefragmentator(
3331  VmaAllocator hAllocator,
3332  uint32_t currentFrameIndex);
3333 
3334  VkResult Defragment(
3335  VmaDefragmentationStats* pDefragmentationStats,
3336  VkDeviceSize& maxBytesToMove,
3337  uint32_t& maxAllocationsToMove);
3338 
3339  void DestroyDefragmentator();
3340 
3341 private:
3342  friend class VmaDefragmentator;
3343 
3344  const VmaAllocator m_hAllocator;
3345  const uint32_t m_MemoryTypeIndex;
3346  const VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
3347  const VkDeviceSize m_PreferredBlockSize;
3348  const size_t m_MinBlockCount;
3349  const size_t m_MaxBlockCount;
3350  const VkDeviceSize m_BufferImageGranularity;
3351  const uint32_t m_FrameInUseCount;
3352  const bool m_IsCustomPool;
3353  VMA_MUTEX m_Mutex;
3354  // Incrementally sorted by sumFreeSize, ascending.
3355  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
3356  /* There can be at most one allocation that is completely empty - a
3357  hysteresis to avoid pessimistic case of alternating creation and destruction
3358  of a VkDeviceMemory. */
3359  bool m_HasEmptyBlock;
3360  VmaDefragmentator* m_pDefragmentator;
3361 
3362  // Finds and removes given block from vector.
3363  void Remove(VmaDeviceMemoryBlock* pBlock);
3364 
3365  // Performs single step in sorting m_Blocks. They may not be fully sorted
3366  // after this call.
3367  void IncrementallySortBlocks();
3368 
3369  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
3370 };
3371 
3372 struct VmaPool_T
3373 {
3374 public:
3375  VmaBlockVector m_BlockVector;
3376 
3377  // Takes ownership.
3378  VmaPool_T(
3379  VmaAllocator hAllocator,
3380  const VmaPoolCreateInfo& createInfo);
3381  ~VmaPool_T();
3382 
3383  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
3384 
3385 #if VMA_STATS_STRING_ENABLED
3386  //void PrintDetailedMap(class VmaStringBuilder& sb);
3387 #endif
3388 };
3389 
3390 class VmaDefragmentator
3391 {
3392  const VmaAllocator m_hAllocator;
3393  VmaBlockVector* const m_pBlockVector;
3394  uint32_t m_CurrentFrameIndex;
3395  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
3396  VkDeviceSize m_BytesMoved;
3397  uint32_t m_AllocationsMoved;
3398 
3399  struct AllocationInfo
3400  {
3401  VmaAllocation m_hAllocation;
3402  VkBool32* m_pChanged;
3403 
3404  AllocationInfo() :
3405  m_hAllocation(VK_NULL_HANDLE),
3406  m_pChanged(VMA_NULL)
3407  {
3408  }
3409  };
3410 
3411  struct AllocationInfoSizeGreater
3412  {
3413  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
3414  {
3415  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
3416  }
3417  };
3418 
3419  // Used between AddAllocation and Defragment.
3420  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3421 
3422  struct BlockInfo
3423  {
3424  VmaDeviceMemoryBlock* m_pBlock;
3425  bool m_HasNonMovableAllocations;
3426  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3427 
3428  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
3429  m_pBlock(VMA_NULL),
3430  m_HasNonMovableAllocations(true),
3431  m_Allocations(pAllocationCallbacks),
3432  m_pMappedDataForDefragmentation(VMA_NULL)
3433  {
3434  }
3435 
3436  void CalcHasNonMovableAllocations()
3437  {
3438  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
3439  const size_t defragmentAllocCount = m_Allocations.size();
3440  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
3441  }
3442 
3443  void SortAllocationsBySizeDescecnding()
3444  {
3445  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
3446  }
3447 
3448  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
3449  void Unmap(VmaAllocator hAllocator);
3450 
3451  private:
3452  // Not null if mapped for defragmentation only, not persistently mapped.
3453  void* m_pMappedDataForDefragmentation;
3454  };
3455 
3456  struct BlockPointerLess
3457  {
3458  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
3459  {
3460  return pLhsBlockInfo->m_pBlock < pRhsBlock;
3461  }
3462  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3463  {
3464  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
3465  }
3466  };
3467 
3468  // 1. Blocks with some non-movable allocations go first.
3469  // 2. Blocks with smaller sumFreeSize go first.
3470  struct BlockInfoCompareMoveDestination
3471  {
3472  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3473  {
3474  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
3475  {
3476  return true;
3477  }
3478  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
3479  {
3480  return false;
3481  }
3482  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
3483  {
3484  return true;
3485  }
3486  return false;
3487  }
3488  };
3489 
3490  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
3491  BlockInfoVector m_Blocks;
3492 
3493  VkResult DefragmentRound(
3494  VkDeviceSize maxBytesToMove,
3495  uint32_t maxAllocationsToMove);
3496 
3497  static bool MoveMakesSense(
3498  size_t dstBlockIndex, VkDeviceSize dstOffset,
3499  size_t srcBlockIndex, VkDeviceSize srcOffset);
3500 
3501 public:
3502  VmaDefragmentator(
3503  VmaAllocator hAllocator,
3504  VmaBlockVector* pBlockVector,
3505  uint32_t currentFrameIndex);
3506 
3507  ~VmaDefragmentator();
3508 
3509  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
3510  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
3511 
3512  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
3513 
3514  VkResult Defragment(
3515  VkDeviceSize maxBytesToMove,
3516  uint32_t maxAllocationsToMove);
3517 };
3518 
3519 // Main allocator object.
3520 struct VmaAllocator_T
3521 {
3522  bool m_UseMutex;
3523  bool m_UseKhrDedicatedAllocation;
3524  VkDevice m_hDevice;
3525  bool m_AllocationCallbacksSpecified;
3526  VkAllocationCallbacks m_AllocationCallbacks;
3527  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
3528  // Non-zero when we are inside UnmapPersistentlyMappedMemory...MapPersistentlyMappedMemory.
3529  // Counter to allow nested calls to these functions.
3530  uint32_t m_UnmapPersistentlyMappedMemoryCounter;
3531 
3532  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
3533  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
3534  VMA_MUTEX m_HeapSizeLimitMutex;
3535 
3536  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
3537  VkPhysicalDeviceMemoryProperties m_MemProps;
3538 
3539  // Default pools.
3540  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
3541 
3542  // Each vector is sorted by memory (handle value).
3543  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
3544  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
3545  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
3546 
3547  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
3548  ~VmaAllocator_T();
3549 
3550  const VkAllocationCallbacks* GetAllocationCallbacks() const
3551  {
3552  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
3553  }
3554  const VmaVulkanFunctions& GetVulkanFunctions() const
3555  {
3556  return m_VulkanFunctions;
3557  }
3558 
3559  VkDeviceSize GetBufferImageGranularity() const
3560  {
3561  return VMA_MAX(
3562  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
3563  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
3564  }
3565 
3566  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
3567  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
3568 
3569  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
3570  {
3571  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
3572  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
3573  }
3574 
3575  void GetBufferMemoryRequirements(
3576  VkBuffer hBuffer,
3577  VkMemoryRequirements& memReq,
3578  bool& requiresDedicatedAllocation,
3579  bool& prefersDedicatedAllocation) const;
3580  void GetImageMemoryRequirements(
3581  VkImage hImage,
3582  VkMemoryRequirements& memReq,
3583  bool& requiresDedicatedAllocation,
3584  bool& prefersDedicatedAllocation) const;
3585 
3586  // Main allocation function.
3587  VkResult AllocateMemory(
3588  const VkMemoryRequirements& vkMemReq,
3589  bool requiresDedicatedAllocation,
3590  bool prefersDedicatedAllocation,
3591  VkBuffer dedicatedBuffer,
3592  VkImage dedicatedImage,
3593  const VmaAllocationCreateInfo& createInfo,
3594  VmaSuballocationType suballocType,
3595  VmaAllocation* pAllocation);
3596 
3597  // Main deallocation function.
3598  void FreeMemory(const VmaAllocation allocation);
3599 
3600  void CalculateStats(VmaStats* pStats);
3601 
3602 #if VMA_STATS_STRING_ENABLED
3603  void PrintDetailedMap(class VmaJsonWriter& json);
3604 #endif
3605 
3606  void UnmapPersistentlyMappedMemory();
3607  VkResult MapPersistentlyMappedMemory();
3608 
3609  VkResult Defragment(
3610  VmaAllocation* pAllocations,
3611  size_t allocationCount,
3612  VkBool32* pAllocationsChanged,
3613  const VmaDefragmentationInfo* pDefragmentationInfo,
3614  VmaDefragmentationStats* pDefragmentationStats);
3615 
3616  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
3617 
3618  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
3619  void DestroyPool(VmaPool pool);
3620  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
3621 
3622  void SetCurrentFrameIndex(uint32_t frameIndex);
3623 
3624  void MakePoolAllocationsLost(
3625  VmaPool hPool,
3626  size_t* pLostAllocationCount);
3627 
3628  void CreateLostAllocation(VmaAllocation* pAllocation);
3629 
3630  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
3631  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
3632 
3633 private:
3634  VkDeviceSize m_PreferredLargeHeapBlockSize;
3635  VkDeviceSize m_PreferredSmallHeapBlockSize;
3636 
3637  VkPhysicalDevice m_PhysicalDevice;
3638  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
3639 
3640  VMA_MUTEX m_PoolsMutex;
3641  // Protected by m_PoolsMutex. Sorted by pointer value.
3642  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
3643 
3644  VmaVulkanFunctions m_VulkanFunctions;
3645 
3646  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
3647 
3648  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
3649 
3650  VkResult AllocateMemoryOfType(
3651  const VkMemoryRequirements& vkMemReq,
3652  bool dedicatedAllocation,
3653  VkBuffer dedicatedBuffer,
3654  VkImage dedicatedImage,
3655  const VmaAllocationCreateInfo& createInfo,
3656  uint32_t memTypeIndex,
3657  VmaSuballocationType suballocType,
3658  VmaAllocation* pAllocation);
3659 
3660  // Allocates and registers new VkDeviceMemory specifically for single allocation.
3661  VkResult AllocateDedicatedMemory(
3662  VkDeviceSize size,
3663  VmaSuballocationType suballocType,
3664  uint32_t memTypeIndex,
3665  bool map,
3666  void* pUserData,
3667  VkBuffer dedicatedBuffer,
3668  VkImage dedicatedImage,
3669  VmaAllocation* pAllocation);
3670 
3671  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
3672  void FreeDedicatedMemory(VmaAllocation allocation);
3673 };
3674 
3676 // Memory allocation #2 after VmaAllocator_T definition
3677 
3678 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
3679 {
3680  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
3681 }
3682 
3683 static void VmaFree(VmaAllocator hAllocator, void* ptr)
3684 {
3685  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
3686 }
3687 
3688 template<typename T>
3689 static T* VmaAllocate(VmaAllocator hAllocator)
3690 {
3691  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
3692 }
3693 
3694 template<typename T>
3695 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
3696 {
3697  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
3698 }
3699 
3700 template<typename T>
3701 static void vma_delete(VmaAllocator hAllocator, T* ptr)
3702 {
3703  if(ptr != VMA_NULL)
3704  {
3705  ptr->~T();
3706  VmaFree(hAllocator, ptr);
3707  }
3708 }
3709 
3710 template<typename T>
3711 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
3712 {
3713  if(ptr != VMA_NULL)
3714  {
3715  for(size_t i = count; i--; )
3716  ptr[i].~T();
3717  VmaFree(hAllocator, ptr);
3718  }
3719 }
3720 
3722 // VmaStringBuilder
3723 
3724 #if VMA_STATS_STRING_ENABLED
3725 
3726 class VmaStringBuilder
3727 {
3728 public:
3729  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
3730  size_t GetLength() const { return m_Data.size(); }
3731  const char* GetData() const { return m_Data.data(); }
3732 
3733  void Add(char ch) { m_Data.push_back(ch); }
3734  void Add(const char* pStr);
3735  void AddNewLine() { Add('\n'); }
3736  void AddNumber(uint32_t num);
3737  void AddNumber(uint64_t num);
3738  void AddPointer(const void* ptr);
3739 
3740 private:
3741  VmaVector< char, VmaStlAllocator<char> > m_Data;
3742 };
3743 
3744 void VmaStringBuilder::Add(const char* pStr)
3745 {
3746  const size_t strLen = strlen(pStr);
3747  if(strLen > 0)
3748  {
3749  const size_t oldCount = m_Data.size();
3750  m_Data.resize(oldCount + strLen);
3751  memcpy(m_Data.data() + oldCount, pStr, strLen);
3752  }
3753 }
3754 
3755 void VmaStringBuilder::AddNumber(uint32_t num)
3756 {
3757  char buf[11];
3758  VmaUint32ToStr(buf, sizeof(buf), num);
3759  Add(buf);
3760 }
3761 
3762 void VmaStringBuilder::AddNumber(uint64_t num)
3763 {
3764  char buf[21];
3765  VmaUint64ToStr(buf, sizeof(buf), num);
3766  Add(buf);
3767 }
3768 
3769 void VmaStringBuilder::AddPointer(const void* ptr)
3770 {
3771  char buf[21];
3772  VmaPtrToStr(buf, sizeof(buf), ptr);
3773  Add(buf);
3774 }
3775 
3776 #endif // #if VMA_STATS_STRING_ENABLED
3777 
3779 // VmaJsonWriter
3780 
3781 #if VMA_STATS_STRING_ENABLED
3782 
3783 class VmaJsonWriter
3784 {
3785 public:
3786  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
3787  ~VmaJsonWriter();
3788 
3789  void BeginObject(bool singleLine = false);
3790  void EndObject();
3791 
3792  void BeginArray(bool singleLine = false);
3793  void EndArray();
3794 
3795  void WriteString(const char* pStr);
3796  void BeginString(const char* pStr = VMA_NULL);
3797  void ContinueString(const char* pStr);
3798  void ContinueString(uint32_t n);
3799  void ContinueString(uint64_t n);
3800  void EndString(const char* pStr = VMA_NULL);
3801 
3802  void WriteNumber(uint32_t n);
3803  void WriteNumber(uint64_t n);
3804  void WriteBool(bool b);
3805  void WriteNull();
3806 
3807 private:
3808  static const char* const INDENT;
3809 
3810  enum COLLECTION_TYPE
3811  {
3812  COLLECTION_TYPE_OBJECT,
3813  COLLECTION_TYPE_ARRAY,
3814  };
3815  struct StackItem
3816  {
3817  COLLECTION_TYPE type;
3818  uint32_t valueCount;
3819  bool singleLineMode;
3820  };
3821 
3822  VmaStringBuilder& m_SB;
3823  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
3824  bool m_InsideString;
3825 
3826  void BeginValue(bool isString);
3827  void WriteIndent(bool oneLess = false);
3828 };
3829 
3830 const char* const VmaJsonWriter::INDENT = " ";
3831 
3832 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
3833  m_SB(sb),
3834  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
3835  m_InsideString(false)
3836 {
3837 }
3838 
3839 VmaJsonWriter::~VmaJsonWriter()
3840 {
3841  VMA_ASSERT(!m_InsideString);
3842  VMA_ASSERT(m_Stack.empty());
3843 }
3844 
3845 void VmaJsonWriter::BeginObject(bool singleLine)
3846 {
3847  VMA_ASSERT(!m_InsideString);
3848 
3849  BeginValue(false);
3850  m_SB.Add('{');
3851 
3852  StackItem item;
3853  item.type = COLLECTION_TYPE_OBJECT;
3854  item.valueCount = 0;
3855  item.singleLineMode = singleLine;
3856  m_Stack.push_back(item);
3857 }
3858 
3859 void VmaJsonWriter::EndObject()
3860 {
3861  VMA_ASSERT(!m_InsideString);
3862 
3863  WriteIndent(true);
3864  m_SB.Add('}');
3865 
3866  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
3867  m_Stack.pop_back();
3868 }
3869 
3870 void VmaJsonWriter::BeginArray(bool singleLine)
3871 {
3872  VMA_ASSERT(!m_InsideString);
3873 
3874  BeginValue(false);
3875  m_SB.Add('[');
3876 
3877  StackItem item;
3878  item.type = COLLECTION_TYPE_ARRAY;
3879  item.valueCount = 0;
3880  item.singleLineMode = singleLine;
3881  m_Stack.push_back(item);
3882 }
3883 
3884 void VmaJsonWriter::EndArray()
3885 {
3886  VMA_ASSERT(!m_InsideString);
3887 
3888  WriteIndent(true);
3889  m_SB.Add(']');
3890 
3891  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
3892  m_Stack.pop_back();
3893 }
3894 
3895 void VmaJsonWriter::WriteString(const char* pStr)
3896 {
3897  BeginString(pStr);
3898  EndString();
3899 }
3900 
3901 void VmaJsonWriter::BeginString(const char* pStr)
3902 {
3903  VMA_ASSERT(!m_InsideString);
3904 
3905  BeginValue(true);
3906  m_SB.Add('"');
3907  m_InsideString = true;
3908  if(pStr != VMA_NULL && pStr[0] != '\0')
3909  {
3910  ContinueString(pStr);
3911  }
3912 }
3913 
3914 void VmaJsonWriter::ContinueString(const char* pStr)
3915 {
3916  VMA_ASSERT(m_InsideString);
3917 
3918  const size_t strLen = strlen(pStr);
3919  for(size_t i = 0; i < strLen; ++i)
3920  {
3921  char ch = pStr[i];
3922  if(ch == '\'')
3923  {
3924  m_SB.Add("\\\\");
3925  }
3926  else if(ch == '"')
3927  {
3928  m_SB.Add("\\\"");
3929  }
3930  else if(ch >= 32)
3931  {
3932  m_SB.Add(ch);
3933  }
3934  else switch(ch)
3935  {
3936  case '\n':
3937  m_SB.Add("\\n");
3938  break;
3939  case '\r':
3940  m_SB.Add("\\r");
3941  break;
3942  case '\t':
3943  m_SB.Add("\\t");
3944  break;
3945  default:
3946  VMA_ASSERT(0 && "Character not currently supported.");
3947  break;
3948  }
3949  }
3950 }
3951 
3952 void VmaJsonWriter::ContinueString(uint32_t n)
3953 {
3954  VMA_ASSERT(m_InsideString);
3955  m_SB.AddNumber(n);
3956 }
3957 
3958 void VmaJsonWriter::ContinueString(uint64_t n)
3959 {
3960  VMA_ASSERT(m_InsideString);
3961  m_SB.AddNumber(n);
3962 }
3963 
3964 void VmaJsonWriter::EndString(const char* pStr)
3965 {
3966  VMA_ASSERT(m_InsideString);
3967  if(pStr != VMA_NULL && pStr[0] != '\0')
3968  {
3969  ContinueString(pStr);
3970  }
3971  m_SB.Add('"');
3972  m_InsideString = false;
3973 }
3974 
3975 void VmaJsonWriter::WriteNumber(uint32_t n)
3976 {
3977  VMA_ASSERT(!m_InsideString);
3978  BeginValue(false);
3979  m_SB.AddNumber(n);
3980 }
3981 
3982 void VmaJsonWriter::WriteNumber(uint64_t n)
3983 {
3984  VMA_ASSERT(!m_InsideString);
3985  BeginValue(false);
3986  m_SB.AddNumber(n);
3987 }
3988 
3989 void VmaJsonWriter::WriteBool(bool b)
3990 {
3991  VMA_ASSERT(!m_InsideString);
3992  BeginValue(false);
3993  m_SB.Add(b ? "true" : "false");
3994 }
3995 
3996 void VmaJsonWriter::WriteNull()
3997 {
3998  VMA_ASSERT(!m_InsideString);
3999  BeginValue(false);
4000  m_SB.Add("null");
4001 }
4002 
4003 void VmaJsonWriter::BeginValue(bool isString)
4004 {
4005  if(!m_Stack.empty())
4006  {
4007  StackItem& currItem = m_Stack.back();
4008  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4009  currItem.valueCount % 2 == 0)
4010  {
4011  VMA_ASSERT(isString);
4012  }
4013 
4014  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4015  currItem.valueCount % 2 != 0)
4016  {
4017  m_SB.Add(": ");
4018  }
4019  else if(currItem.valueCount > 0)
4020  {
4021  m_SB.Add(", ");
4022  WriteIndent();
4023  }
4024  else
4025  {
4026  WriteIndent();
4027  }
4028  ++currItem.valueCount;
4029  }
4030 }
4031 
4032 void VmaJsonWriter::WriteIndent(bool oneLess)
4033 {
4034  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
4035  {
4036  m_SB.AddNewLine();
4037 
4038  size_t count = m_Stack.size();
4039  if(count > 0 && oneLess)
4040  {
4041  --count;
4042  }
4043  for(size_t i = 0; i < count; ++i)
4044  {
4045  m_SB.Add(INDENT);
4046  }
4047  }
4048 }
4049 
4050 #endif // #if VMA_STATS_STRING_ENABLED
4051 
4053 
4054 VkDeviceSize VmaAllocation_T::GetOffset() const
4055 {
4056  switch(m_Type)
4057  {
4058  case ALLOCATION_TYPE_BLOCK:
4059  return m_BlockAllocation.m_Offset;
4060  case ALLOCATION_TYPE_DEDICATED:
4061  return 0;
4062  default:
4063  VMA_ASSERT(0);
4064  return 0;
4065  }
4066 }
4067 
4068 VkDeviceMemory VmaAllocation_T::GetMemory() const
4069 {
4070  switch(m_Type)
4071  {
4072  case ALLOCATION_TYPE_BLOCK:
4073  return m_BlockAllocation.m_Block->m_hMemory;
4074  case ALLOCATION_TYPE_DEDICATED:
4075  return m_DedicatedAllocation.m_hMemory;
4076  default:
4077  VMA_ASSERT(0);
4078  return VK_NULL_HANDLE;
4079  }
4080 }
4081 
4082 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
4083 {
4084  switch(m_Type)
4085  {
4086  case ALLOCATION_TYPE_BLOCK:
4087  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
4088  case ALLOCATION_TYPE_DEDICATED:
4089  return m_DedicatedAllocation.m_MemoryTypeIndex;
4090  default:
4091  VMA_ASSERT(0);
4092  return UINT32_MAX;
4093  }
4094 }
4095 
4096 VMA_BLOCK_VECTOR_TYPE VmaAllocation_T::GetBlockVectorType() const
4097 {
4098  switch(m_Type)
4099  {
4100  case ALLOCATION_TYPE_BLOCK:
4101  return m_BlockAllocation.m_Block->m_BlockVectorType;
4102  case ALLOCATION_TYPE_DEDICATED:
4103  return (m_DedicatedAllocation.m_PersistentMap ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED);
4104  default:
4105  VMA_ASSERT(0);
4106  return VMA_BLOCK_VECTOR_TYPE_COUNT;
4107  }
4108 }
4109 
4110 void* VmaAllocation_T::GetMappedData() const
4111 {
4112  switch(m_Type)
4113  {
4114  case ALLOCATION_TYPE_BLOCK:
4115  if(m_BlockAllocation.m_Block->m_pMappedData != VMA_NULL)
4116  {
4117  return (char*)m_BlockAllocation.m_Block->m_pMappedData + m_BlockAllocation.m_Offset;
4118  }
4119  else
4120  {
4121  return VMA_NULL;
4122  }
4123  break;
4124  case ALLOCATION_TYPE_DEDICATED:
4125  return m_DedicatedAllocation.m_pMappedData;
4126  default:
4127  VMA_ASSERT(0);
4128  return VMA_NULL;
4129  }
4130 }
4131 
4132 bool VmaAllocation_T::CanBecomeLost() const
4133 {
4134  switch(m_Type)
4135  {
4136  case ALLOCATION_TYPE_BLOCK:
4137  return m_BlockAllocation.m_CanBecomeLost;
4138  case ALLOCATION_TYPE_DEDICATED:
4139  return false;
4140  default:
4141  VMA_ASSERT(0);
4142  return false;
4143  }
4144 }
4145 
4146 VmaPool VmaAllocation_T::GetPool() const
4147 {
4148  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4149  return m_BlockAllocation.m_hPool;
4150 }
4151 
4152 VkResult VmaAllocation_T::DedicatedAllocMapPersistentlyMappedMemory(VmaAllocator hAllocator)
4153 {
4154  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
4155  if(m_DedicatedAllocation.m_PersistentMap)
4156  {
4157  return (*hAllocator->GetVulkanFunctions().vkMapMemory)(
4158  hAllocator->m_hDevice,
4159  m_DedicatedAllocation.m_hMemory,
4160  0,
4161  VK_WHOLE_SIZE,
4162  0,
4163  &m_DedicatedAllocation.m_pMappedData);
4164  }
4165  return VK_SUCCESS;
4166 }
4167 void VmaAllocation_T::DedicatedAllocUnmapPersistentlyMappedMemory(VmaAllocator hAllocator)
4168 {
4169  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
4170  if(m_DedicatedAllocation.m_pMappedData)
4171  {
4172  VMA_ASSERT(m_DedicatedAllocation.m_PersistentMap);
4173  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_DedicatedAllocation.m_hMemory);
4174  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
4175  }
4176 }
4177 
4178 
4179 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4180 {
4181  VMA_ASSERT(CanBecomeLost());
4182 
4183  /*
4184  Warning: This is a carefully designed algorithm.
4185  Do not modify unless you really know what you're doing :)
4186  */
4187  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
4188  for(;;)
4189  {
4190  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
4191  {
4192  VMA_ASSERT(0);
4193  return false;
4194  }
4195  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
4196  {
4197  return false;
4198  }
4199  else // Last use time earlier than current time.
4200  {
4201  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
4202  {
4203  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
4204  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
4205  return true;
4206  }
4207  }
4208  }
4209 }
4210 
4211 #if VMA_STATS_STRING_ENABLED
4212 
4213 // Correspond to values of enum VmaSuballocationType.
4214 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
4215  "FREE",
4216  "UNKNOWN",
4217  "BUFFER",
4218  "IMAGE_UNKNOWN",
4219  "IMAGE_LINEAR",
4220  "IMAGE_OPTIMAL",
4221 };
4222 
4223 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
4224 {
4225  json.BeginObject();
4226 
4227  json.WriteString("Blocks");
4228  json.WriteNumber(stat.blockCount);
4229 
4230  json.WriteString("Allocations");
4231  json.WriteNumber(stat.allocationCount);
4232 
4233  json.WriteString("UnusedRanges");
4234  json.WriteNumber(stat.unusedRangeCount);
4235 
4236  json.WriteString("UsedBytes");
4237  json.WriteNumber(stat.usedBytes);
4238 
4239  json.WriteString("UnusedBytes");
4240  json.WriteNumber(stat.unusedBytes);
4241 
4242  if(stat.allocationCount > 1)
4243  {
4244  json.WriteString("AllocationSize");
4245  json.BeginObject(true);
4246  json.WriteString("Min");
4247  json.WriteNumber(stat.allocationSizeMin);
4248  json.WriteString("Avg");
4249  json.WriteNumber(stat.allocationSizeAvg);
4250  json.WriteString("Max");
4251  json.WriteNumber(stat.allocationSizeMax);
4252  json.EndObject();
4253  }
4254 
4255  if(stat.unusedRangeCount > 1)
4256  {
4257  json.WriteString("UnusedRangeSize");
4258  json.BeginObject(true);
4259  json.WriteString("Min");
4260  json.WriteNumber(stat.unusedRangeSizeMin);
4261  json.WriteString("Avg");
4262  json.WriteNumber(stat.unusedRangeSizeAvg);
4263  json.WriteString("Max");
4264  json.WriteNumber(stat.unusedRangeSizeMax);
4265  json.EndObject();
4266  }
4267 
4268  json.EndObject();
4269 }
4270 
4271 #endif // #if VMA_STATS_STRING_ENABLED
4272 
4273 struct VmaSuballocationItemSizeLess
4274 {
4275  bool operator()(
4276  const VmaSuballocationList::iterator lhs,
4277  const VmaSuballocationList::iterator rhs) const
4278  {
4279  return lhs->size < rhs->size;
4280  }
4281  bool operator()(
4282  const VmaSuballocationList::iterator lhs,
4283  VkDeviceSize rhsSize) const
4284  {
4285  return lhs->size < rhsSize;
4286  }
4287 };
4288 
4290 // class VmaBlockMetadata
4291 
4292 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
4293  m_Size(0),
4294  m_FreeCount(0),
4295  m_SumFreeSize(0),
4296  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
4297  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
4298 {
4299 }
4300 
4301 VmaBlockMetadata::~VmaBlockMetadata()
4302 {
4303 }
4304 
4305 void VmaBlockMetadata::Init(VkDeviceSize size)
4306 {
4307  m_Size = size;
4308  m_FreeCount = 1;
4309  m_SumFreeSize = size;
4310 
4311  VmaSuballocation suballoc = {};
4312  suballoc.offset = 0;
4313  suballoc.size = size;
4314  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4315  suballoc.hAllocation = VK_NULL_HANDLE;
4316 
4317  m_Suballocations.push_back(suballoc);
4318  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
4319  --suballocItem;
4320  m_FreeSuballocationsBySize.push_back(suballocItem);
4321 }
4322 
4323 bool VmaBlockMetadata::Validate() const
4324 {
4325  if(m_Suballocations.empty())
4326  {
4327  return false;
4328  }
4329 
4330  // Expected offset of new suballocation as calculates from previous ones.
4331  VkDeviceSize calculatedOffset = 0;
4332  // Expected number of free suballocations as calculated from traversing their list.
4333  uint32_t calculatedFreeCount = 0;
4334  // Expected sum size of free suballocations as calculated from traversing their list.
4335  VkDeviceSize calculatedSumFreeSize = 0;
4336  // Expected number of free suballocations that should be registered in
4337  // m_FreeSuballocationsBySize calculated from traversing their list.
4338  size_t freeSuballocationsToRegister = 0;
4339  // True if previous visisted suballocation was free.
4340  bool prevFree = false;
4341 
4342  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4343  suballocItem != m_Suballocations.cend();
4344  ++suballocItem)
4345  {
4346  const VmaSuballocation& subAlloc = *suballocItem;
4347 
4348  // Actual offset of this suballocation doesn't match expected one.
4349  if(subAlloc.offset != calculatedOffset)
4350  {
4351  return false;
4352  }
4353 
4354  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
4355  // Two adjacent free suballocations are invalid. They should be merged.
4356  if(prevFree && currFree)
4357  {
4358  return false;
4359  }
4360  prevFree = currFree;
4361 
4362  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
4363  {
4364  return false;
4365  }
4366 
4367  if(currFree)
4368  {
4369  calculatedSumFreeSize += subAlloc.size;
4370  ++calculatedFreeCount;
4371  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
4372  {
4373  ++freeSuballocationsToRegister;
4374  }
4375  }
4376 
4377  calculatedOffset += subAlloc.size;
4378  }
4379 
4380  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
4381  // match expected one.
4382  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
4383  {
4384  return false;
4385  }
4386 
4387  VkDeviceSize lastSize = 0;
4388  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
4389  {
4390  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
4391 
4392  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
4393  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
4394  {
4395  return false;
4396  }
4397  // They must be sorted by size ascending.
4398  if(suballocItem->size < lastSize)
4399  {
4400  return false;
4401  }
4402 
4403  lastSize = suballocItem->size;
4404  }
4405 
4406  // Check if totals match calculacted values.
4407  return
4408  ValidateFreeSuballocationList() &&
4409  (calculatedOffset == m_Size) &&
4410  (calculatedSumFreeSize == m_SumFreeSize) &&
4411  (calculatedFreeCount == m_FreeCount);
4412 }
4413 
4414 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
4415 {
4416  if(!m_FreeSuballocationsBySize.empty())
4417  {
4418  return m_FreeSuballocationsBySize.back()->size;
4419  }
4420  else
4421  {
4422  return 0;
4423  }
4424 }
4425 
4426 bool VmaBlockMetadata::IsEmpty() const
4427 {
4428  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
4429 }
4430 
4431 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
4432 {
4433  outInfo.blockCount = 1;
4434 
4435  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
4436  outInfo.allocationCount = rangeCount - m_FreeCount;
4437  outInfo.unusedRangeCount = m_FreeCount;
4438 
4439  outInfo.unusedBytes = m_SumFreeSize;
4440  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
4441 
4442  outInfo.allocationSizeMin = UINT64_MAX;
4443  outInfo.allocationSizeMax = 0;
4444  outInfo.unusedRangeSizeMin = UINT64_MAX;
4445  outInfo.unusedRangeSizeMax = 0;
4446 
4447  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4448  suballocItem != m_Suballocations.cend();
4449  ++suballocItem)
4450  {
4451  const VmaSuballocation& suballoc = *suballocItem;
4452  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
4453  {
4454  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
4455  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
4456  }
4457  else
4458  {
4459  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
4460  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
4461  }
4462  }
4463 }
4464 
4465 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
4466 {
4467  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
4468 
4469  inoutStats.size += m_Size;
4470  inoutStats.unusedSize += m_SumFreeSize;
4471  inoutStats.allocationCount += rangeCount - m_FreeCount;
4472  inoutStats.unusedRangeCount += m_FreeCount;
4473  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
4474 }
4475 
4476 #if VMA_STATS_STRING_ENABLED
4477 
4478 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
4479 {
4480  json.BeginObject();
4481 
4482  json.WriteString("TotalBytes");
4483  json.WriteNumber(m_Size);
4484 
4485  json.WriteString("UnusedBytes");
4486  json.WriteNumber(m_SumFreeSize);
4487 
4488  json.WriteString("Allocations");
4489  json.WriteNumber(m_Suballocations.size() - m_FreeCount);
4490 
4491  json.WriteString("UnusedRanges");
4492  json.WriteNumber(m_FreeCount);
4493 
4494  json.WriteString("Suballocations");
4495  json.BeginArray();
4496  size_t i = 0;
4497  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4498  suballocItem != m_Suballocations.cend();
4499  ++suballocItem, ++i)
4500  {
4501  json.BeginObject(true);
4502 
4503  json.WriteString("Type");
4504  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
4505 
4506  json.WriteString("Size");
4507  json.WriteNumber(suballocItem->size);
4508 
4509  json.WriteString("Offset");
4510  json.WriteNumber(suballocItem->offset);
4511 
4512  json.EndObject();
4513  }
4514  json.EndArray();
4515 
4516  json.EndObject();
4517 }
4518 
4519 #endif // #if VMA_STATS_STRING_ENABLED
4520 
4521 /*
4522 How many suitable free suballocations to analyze before choosing best one.
4523 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
4524  be chosen.
4525 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
4526  suballocations will be analized and best one will be chosen.
4527 - Any other value is also acceptable.
4528 */
4529 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
4530 
4531 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
4532 {
4533  VMA_ASSERT(IsEmpty());
4534  pAllocationRequest->offset = 0;
4535  pAllocationRequest->sumFreeSize = m_SumFreeSize;
4536  pAllocationRequest->sumItemSize = 0;
4537  pAllocationRequest->item = m_Suballocations.begin();
4538  pAllocationRequest->itemsToMakeLostCount = 0;
4539 }
4540 
4541 bool VmaBlockMetadata::CreateAllocationRequest(
4542  uint32_t currentFrameIndex,
4543  uint32_t frameInUseCount,
4544  VkDeviceSize bufferImageGranularity,
4545  VkDeviceSize allocSize,
4546  VkDeviceSize allocAlignment,
4547  VmaSuballocationType allocType,
4548  bool canMakeOtherLost,
4549  VmaAllocationRequest* pAllocationRequest)
4550 {
4551  VMA_ASSERT(allocSize > 0);
4552  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
4553  VMA_ASSERT(pAllocationRequest != VMA_NULL);
4554  VMA_HEAVY_ASSERT(Validate());
4555 
4556  // There is not enough total free space in this block to fullfill the request: Early return.
4557  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
4558  {
4559  return false;
4560  }
4561 
4562  // New algorithm, efficiently searching freeSuballocationsBySize.
4563  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
4564  if(freeSuballocCount > 0)
4565  {
4566  if(VMA_BEST_FIT)
4567  {
4568  // Find first free suballocation with size not less than allocSize.
4569  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
4570  m_FreeSuballocationsBySize.data(),
4571  m_FreeSuballocationsBySize.data() + freeSuballocCount,
4572  allocSize,
4573  VmaSuballocationItemSizeLess());
4574  size_t index = it - m_FreeSuballocationsBySize.data();
4575  for(; index < freeSuballocCount; ++index)
4576  {
4577  if(CheckAllocation(
4578  currentFrameIndex,
4579  frameInUseCount,
4580  bufferImageGranularity,
4581  allocSize,
4582  allocAlignment,
4583  allocType,
4584  m_FreeSuballocationsBySize[index],
4585  false, // canMakeOtherLost
4586  &pAllocationRequest->offset,
4587  &pAllocationRequest->itemsToMakeLostCount,
4588  &pAllocationRequest->sumFreeSize,
4589  &pAllocationRequest->sumItemSize))
4590  {
4591  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
4592  return true;
4593  }
4594  }
4595  }
4596  else
4597  {
4598  // Search staring from biggest suballocations.
4599  for(size_t index = freeSuballocCount; index--; )
4600  {
4601  if(CheckAllocation(
4602  currentFrameIndex,
4603  frameInUseCount,
4604  bufferImageGranularity,
4605  allocSize,
4606  allocAlignment,
4607  allocType,
4608  m_FreeSuballocationsBySize[index],
4609  false, // canMakeOtherLost
4610  &pAllocationRequest->offset,
4611  &pAllocationRequest->itemsToMakeLostCount,
4612  &pAllocationRequest->sumFreeSize,
4613  &pAllocationRequest->sumItemSize))
4614  {
4615  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
4616  return true;
4617  }
4618  }
4619  }
4620  }
4621 
4622  if(canMakeOtherLost)
4623  {
4624  // Brute-force algorithm. TODO: Come up with something better.
4625 
4626  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
4627  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
4628 
4629  VmaAllocationRequest tmpAllocRequest = {};
4630  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
4631  suballocIt != m_Suballocations.end();
4632  ++suballocIt)
4633  {
4634  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
4635  suballocIt->hAllocation->CanBecomeLost())
4636  {
4637  if(CheckAllocation(
4638  currentFrameIndex,
4639  frameInUseCount,
4640  bufferImageGranularity,
4641  allocSize,
4642  allocAlignment,
4643  allocType,
4644  suballocIt,
4645  canMakeOtherLost,
4646  &tmpAllocRequest.offset,
4647  &tmpAllocRequest.itemsToMakeLostCount,
4648  &tmpAllocRequest.sumFreeSize,
4649  &tmpAllocRequest.sumItemSize))
4650  {
4651  tmpAllocRequest.item = suballocIt;
4652 
4653  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
4654  {
4655  *pAllocationRequest = tmpAllocRequest;
4656  }
4657  }
4658  }
4659  }
4660 
4661  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
4662  {
4663  return true;
4664  }
4665  }
4666 
4667  return false;
4668 }
4669 
4670 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
4671  uint32_t currentFrameIndex,
4672  uint32_t frameInUseCount,
4673  VmaAllocationRequest* pAllocationRequest)
4674 {
4675  while(pAllocationRequest->itemsToMakeLostCount > 0)
4676  {
4677  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
4678  {
4679  ++pAllocationRequest->item;
4680  }
4681  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
4682  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
4683  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
4684  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
4685  {
4686  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
4687  --pAllocationRequest->itemsToMakeLostCount;
4688  }
4689  else
4690  {
4691  return false;
4692  }
4693  }
4694 
4695  VMA_HEAVY_ASSERT(Validate());
4696  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
4697  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
4698 
4699  return true;
4700 }
4701 
4702 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4703 {
4704  uint32_t lostAllocationCount = 0;
4705  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
4706  it != m_Suballocations.end();
4707  ++it)
4708  {
4709  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
4710  it->hAllocation->CanBecomeLost() &&
4711  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
4712  {
4713  it = FreeSuballocation(it);
4714  ++lostAllocationCount;
4715  }
4716  }
4717  return lostAllocationCount;
4718 }
4719 
4720 void VmaBlockMetadata::Alloc(
4721  const VmaAllocationRequest& request,
4722  VmaSuballocationType type,
4723  VkDeviceSize allocSize,
4724  VmaAllocation hAllocation)
4725 {
4726  VMA_ASSERT(request.item != m_Suballocations.end());
4727  VmaSuballocation& suballoc = *request.item;
4728  // Given suballocation is a free block.
4729  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
4730  // Given offset is inside this suballocation.
4731  VMA_ASSERT(request.offset >= suballoc.offset);
4732  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
4733  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
4734  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
4735 
4736  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
4737  // it to become used.
4738  UnregisterFreeSuballocation(request.item);
4739 
4740  suballoc.offset = request.offset;
4741  suballoc.size = allocSize;
4742  suballoc.type = type;
4743  suballoc.hAllocation = hAllocation;
4744 
4745  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
4746  if(paddingEnd)
4747  {
4748  VmaSuballocation paddingSuballoc = {};
4749  paddingSuballoc.offset = request.offset + allocSize;
4750  paddingSuballoc.size = paddingEnd;
4751  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4752  VmaSuballocationList::iterator next = request.item;
4753  ++next;
4754  const VmaSuballocationList::iterator paddingEndItem =
4755  m_Suballocations.insert(next, paddingSuballoc);
4756  RegisterFreeSuballocation(paddingEndItem);
4757  }
4758 
4759  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
4760  if(paddingBegin)
4761  {
4762  VmaSuballocation paddingSuballoc = {};
4763  paddingSuballoc.offset = request.offset - paddingBegin;
4764  paddingSuballoc.size = paddingBegin;
4765  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4766  const VmaSuballocationList::iterator paddingBeginItem =
4767  m_Suballocations.insert(request.item, paddingSuballoc);
4768  RegisterFreeSuballocation(paddingBeginItem);
4769  }
4770 
4771  // Update totals.
4772  m_FreeCount = m_FreeCount - 1;
4773  if(paddingBegin > 0)
4774  {
4775  ++m_FreeCount;
4776  }
4777  if(paddingEnd > 0)
4778  {
4779  ++m_FreeCount;
4780  }
4781  m_SumFreeSize -= allocSize;
4782 }
4783 
4784 void VmaBlockMetadata::Free(const VmaAllocation allocation)
4785 {
4786  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
4787  suballocItem != m_Suballocations.end();
4788  ++suballocItem)
4789  {
4790  VmaSuballocation& suballoc = *suballocItem;
4791  if(suballoc.hAllocation == allocation)
4792  {
4793  FreeSuballocation(suballocItem);
4794  VMA_HEAVY_ASSERT(Validate());
4795  return;
4796  }
4797  }
4798  VMA_ASSERT(0 && "Not found!");
4799 }
4800 
4801 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
4802 {
4803  VkDeviceSize lastSize = 0;
4804  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
4805  {
4806  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
4807 
4808  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
4809  {
4810  VMA_ASSERT(0);
4811  return false;
4812  }
4813  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
4814  {
4815  VMA_ASSERT(0);
4816  return false;
4817  }
4818  if(it->size < lastSize)
4819  {
4820  VMA_ASSERT(0);
4821  return false;
4822  }
4823 
4824  lastSize = it->size;
4825  }
4826  return true;
4827 }
4828 
4829 bool VmaBlockMetadata::CheckAllocation(
4830  uint32_t currentFrameIndex,
4831  uint32_t frameInUseCount,
4832  VkDeviceSize bufferImageGranularity,
4833  VkDeviceSize allocSize,
4834  VkDeviceSize allocAlignment,
4835  VmaSuballocationType allocType,
4836  VmaSuballocationList::const_iterator suballocItem,
4837  bool canMakeOtherLost,
4838  VkDeviceSize* pOffset,
4839  size_t* itemsToMakeLostCount,
4840  VkDeviceSize* pSumFreeSize,
4841  VkDeviceSize* pSumItemSize) const
4842 {
4843  VMA_ASSERT(allocSize > 0);
4844  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
4845  VMA_ASSERT(suballocItem != m_Suballocations.cend());
4846  VMA_ASSERT(pOffset != VMA_NULL);
4847 
4848  *itemsToMakeLostCount = 0;
4849  *pSumFreeSize = 0;
4850  *pSumItemSize = 0;
4851 
4852  if(canMakeOtherLost)
4853  {
4854  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
4855  {
4856  *pSumFreeSize = suballocItem->size;
4857  }
4858  else
4859  {
4860  if(suballocItem->hAllocation->CanBecomeLost() &&
4861  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
4862  {
4863  ++*itemsToMakeLostCount;
4864  *pSumItemSize = suballocItem->size;
4865  }
4866  else
4867  {
4868  return false;
4869  }
4870  }
4871 
4872  // Remaining size is too small for this request: Early return.
4873  if(m_Size - suballocItem->offset < allocSize)
4874  {
4875  return false;
4876  }
4877 
4878  // Start from offset equal to beginning of this suballocation.
4879  *pOffset = suballocItem->offset;
4880 
4881  // Apply VMA_DEBUG_MARGIN at the beginning.
4882  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
4883  {
4884  *pOffset += VMA_DEBUG_MARGIN;
4885  }
4886 
4887  // Apply alignment.
4888  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
4889  *pOffset = VmaAlignUp(*pOffset, alignment);
4890 
4891  // Check previous suballocations for BufferImageGranularity conflicts.
4892  // Make bigger alignment if necessary.
4893  if(bufferImageGranularity > 1)
4894  {
4895  bool bufferImageGranularityConflict = false;
4896  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
4897  while(prevSuballocItem != m_Suballocations.cbegin())
4898  {
4899  --prevSuballocItem;
4900  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
4901  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
4902  {
4903  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
4904  {
4905  bufferImageGranularityConflict = true;
4906  break;
4907  }
4908  }
4909  else
4910  // Already on previous page.
4911  break;
4912  }
4913  if(bufferImageGranularityConflict)
4914  {
4915  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
4916  }
4917  }
4918 
4919  // Now that we have final *pOffset, check if we are past suballocItem.
4920  // If yes, return false - this function should be called for another suballocItem as starting point.
4921  if(*pOffset >= suballocItem->offset + suballocItem->size)
4922  {
4923  return false;
4924  }
4925 
4926  // Calculate padding at the beginning based on current offset.
4927  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
4928 
4929  // Calculate required margin at the end if this is not last suballocation.
4930  VmaSuballocationList::const_iterator next = suballocItem;
4931  ++next;
4932  const VkDeviceSize requiredEndMargin =
4933  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
4934 
4935  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
4936  // Another early return check.
4937  if(suballocItem->offset + totalSize > m_Size)
4938  {
4939  return false;
4940  }
4941 
4942  // Advance lastSuballocItem until desired size is reached.
4943  // Update itemsToMakeLostCount.
4944  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
4945  if(totalSize > suballocItem->size)
4946  {
4947  VkDeviceSize remainingSize = totalSize - suballocItem->size;
4948  while(remainingSize > 0)
4949  {
4950  ++lastSuballocItem;
4951  if(lastSuballocItem == m_Suballocations.cend())
4952  {
4953  return false;
4954  }
4955  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
4956  {
4957  *pSumFreeSize += lastSuballocItem->size;
4958  }
4959  else
4960  {
4961  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
4962  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
4963  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
4964  {
4965  ++*itemsToMakeLostCount;
4966  *pSumItemSize += lastSuballocItem->size;
4967  }
4968  else
4969  {
4970  return false;
4971  }
4972  }
4973  remainingSize = (lastSuballocItem->size < remainingSize) ?
4974  remainingSize - lastSuballocItem->size : 0;
4975  }
4976  }
4977 
4978  // Check next suballocations for BufferImageGranularity conflicts.
4979  // If conflict exists, we must mark more allocations lost or fail.
4980  if(bufferImageGranularity > 1)
4981  {
4982  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
4983  ++nextSuballocItem;
4984  while(nextSuballocItem != m_Suballocations.cend())
4985  {
4986  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
4987  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
4988  {
4989  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
4990  {
4991  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
4992  if(nextSuballoc.hAllocation->CanBecomeLost() &&
4993  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
4994  {
4995  ++*itemsToMakeLostCount;
4996  }
4997  else
4998  {
4999  return false;
5000  }
5001  }
5002  }
5003  else
5004  {
5005  // Already on next page.
5006  break;
5007  }
5008  ++nextSuballocItem;
5009  }
5010  }
5011  }
5012  else
5013  {
5014  const VmaSuballocation& suballoc = *suballocItem;
5015  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5016 
5017  *pSumFreeSize = suballoc.size;
5018 
5019  // Size of this suballocation is too small for this request: Early return.
5020  if(suballoc.size < allocSize)
5021  {
5022  return false;
5023  }
5024 
5025  // Start from offset equal to beginning of this suballocation.
5026  *pOffset = suballoc.offset;
5027 
5028  // Apply VMA_DEBUG_MARGIN at the beginning.
5029  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5030  {
5031  *pOffset += VMA_DEBUG_MARGIN;
5032  }
5033 
5034  // Apply alignment.
5035  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5036  *pOffset = VmaAlignUp(*pOffset, alignment);
5037 
5038  // Check previous suballocations for BufferImageGranularity conflicts.
5039  // Make bigger alignment if necessary.
5040  if(bufferImageGranularity > 1)
5041  {
5042  bool bufferImageGranularityConflict = false;
5043  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5044  while(prevSuballocItem != m_Suballocations.cbegin())
5045  {
5046  --prevSuballocItem;
5047  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5048  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5049  {
5050  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5051  {
5052  bufferImageGranularityConflict = true;
5053  break;
5054  }
5055  }
5056  else
5057  // Already on previous page.
5058  break;
5059  }
5060  if(bufferImageGranularityConflict)
5061  {
5062  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5063  }
5064  }
5065 
5066  // Calculate padding at the beginning based on current offset.
5067  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
5068 
5069  // Calculate required margin at the end if this is not last suballocation.
5070  VmaSuballocationList::const_iterator next = suballocItem;
5071  ++next;
5072  const VkDeviceSize requiredEndMargin =
5073  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5074 
5075  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
5076  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
5077  {
5078  return false;
5079  }
5080 
5081  // Check next suballocations for BufferImageGranularity conflicts.
5082  // If conflict exists, allocation cannot be made here.
5083  if(bufferImageGranularity > 1)
5084  {
5085  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
5086  ++nextSuballocItem;
5087  while(nextSuballocItem != m_Suballocations.cend())
5088  {
5089  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5090  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5091  {
5092  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5093  {
5094  return false;
5095  }
5096  }
5097  else
5098  {
5099  // Already on next page.
5100  break;
5101  }
5102  ++nextSuballocItem;
5103  }
5104  }
5105  }
5106 
5107  // All tests passed: Success. pOffset is already filled.
5108  return true;
5109 }
5110 
5111 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
5112 {
5113  VMA_ASSERT(item != m_Suballocations.end());
5114  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5115 
5116  VmaSuballocationList::iterator nextItem = item;
5117  ++nextItem;
5118  VMA_ASSERT(nextItem != m_Suballocations.end());
5119  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
5120 
5121  item->size += nextItem->size;
5122  --m_FreeCount;
5123  m_Suballocations.erase(nextItem);
5124 }
5125 
5126 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
5127 {
5128  // Change this suballocation to be marked as free.
5129  VmaSuballocation& suballoc = *suballocItem;
5130  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5131  suballoc.hAllocation = VK_NULL_HANDLE;
5132 
5133  // Update totals.
5134  ++m_FreeCount;
5135  m_SumFreeSize += suballoc.size;
5136 
5137  // Merge with previous and/or next suballocation if it's also free.
5138  bool mergeWithNext = false;
5139  bool mergeWithPrev = false;
5140 
5141  VmaSuballocationList::iterator nextItem = suballocItem;
5142  ++nextItem;
5143  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
5144  {
5145  mergeWithNext = true;
5146  }
5147 
5148  VmaSuballocationList::iterator prevItem = suballocItem;
5149  if(suballocItem != m_Suballocations.begin())
5150  {
5151  --prevItem;
5152  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5153  {
5154  mergeWithPrev = true;
5155  }
5156  }
5157 
5158  if(mergeWithNext)
5159  {
5160  UnregisterFreeSuballocation(nextItem);
5161  MergeFreeWithNext(suballocItem);
5162  }
5163 
5164  if(mergeWithPrev)
5165  {
5166  UnregisterFreeSuballocation(prevItem);
5167  MergeFreeWithNext(prevItem);
5168  RegisterFreeSuballocation(prevItem);
5169  return prevItem;
5170  }
5171  else
5172  {
5173  RegisterFreeSuballocation(suballocItem);
5174  return suballocItem;
5175  }
5176 }
5177 
5178 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
5179 {
5180  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5181  VMA_ASSERT(item->size > 0);
5182 
5183  // You may want to enable this validation at the beginning or at the end of
5184  // this function, depending on what do you want to check.
5185  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5186 
5187  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5188  {
5189  if(m_FreeSuballocationsBySize.empty())
5190  {
5191  m_FreeSuballocationsBySize.push_back(item);
5192  }
5193  else
5194  {
5195  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
5196  }
5197  }
5198 
5199  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5200 }
5201 
5202 
5203 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
5204 {
5205  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5206  VMA_ASSERT(item->size > 0);
5207 
5208  // You may want to enable this validation at the beginning or at the end of
5209  // this function, depending on what do you want to check.
5210  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5211 
5212  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5213  {
5214  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5215  m_FreeSuballocationsBySize.data(),
5216  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
5217  item,
5218  VmaSuballocationItemSizeLess());
5219  for(size_t index = it - m_FreeSuballocationsBySize.data();
5220  index < m_FreeSuballocationsBySize.size();
5221  ++index)
5222  {
5223  if(m_FreeSuballocationsBySize[index] == item)
5224  {
5225  VmaVectorRemove(m_FreeSuballocationsBySize, index);
5226  return;
5227  }
5228  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
5229  }
5230  VMA_ASSERT(0 && "Not found.");
5231  }
5232 
5233  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5234 }
5235 
5237 // class VmaDeviceMemoryBlock
5238 
5239 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
5240  m_MemoryTypeIndex(UINT32_MAX),
5241  m_BlockVectorType(VMA_BLOCK_VECTOR_TYPE_COUNT),
5242  m_hMemory(VK_NULL_HANDLE),
5243  m_PersistentMap(false),
5244  m_pMappedData(VMA_NULL),
5245  m_Metadata(hAllocator)
5246 {
5247 }
5248 
5249 void VmaDeviceMemoryBlock::Init(
5250  uint32_t newMemoryTypeIndex,
5251  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
5252  VkDeviceMemory newMemory,
5253  VkDeviceSize newSize,
5254  bool persistentMap,
5255  void* pMappedData)
5256 {
5257  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5258 
5259  m_MemoryTypeIndex = newMemoryTypeIndex;
5260  m_BlockVectorType = newBlockVectorType;
5261  m_hMemory = newMemory;
5262  m_PersistentMap = persistentMap;
5263  m_pMappedData = pMappedData;
5264 
5265  m_Metadata.Init(newSize);
5266 }
5267 
5268 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
5269 {
5270  // This is the most important assert in the entire library.
5271  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
5272  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
5273 
5274  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
5275  if(m_pMappedData != VMA_NULL)
5276  {
5277  (allocator->GetVulkanFunctions().vkUnmapMemory)(allocator->m_hDevice, m_hMemory);
5278  m_pMappedData = VMA_NULL;
5279  }
5280 
5281  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
5282  m_hMemory = VK_NULL_HANDLE;
5283 }
5284 
5285 bool VmaDeviceMemoryBlock::Validate() const
5286 {
5287  if((m_hMemory == VK_NULL_HANDLE) ||
5288  (m_Metadata.GetSize() == 0))
5289  {
5290  return false;
5291  }
5292 
5293  return m_Metadata.Validate();
5294 }
5295 
5296 static void InitStatInfo(VmaStatInfo& outInfo)
5297 {
5298  memset(&outInfo, 0, sizeof(outInfo));
5299  outInfo.allocationSizeMin = UINT64_MAX;
5300  outInfo.unusedRangeSizeMin = UINT64_MAX;
5301 }
5302 
5303 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
5304 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
5305 {
5306  inoutInfo.blockCount += srcInfo.blockCount;
5307  inoutInfo.allocationCount += srcInfo.allocationCount;
5308  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
5309  inoutInfo.usedBytes += srcInfo.usedBytes;
5310  inoutInfo.unusedBytes += srcInfo.unusedBytes;
5311  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
5312  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
5313  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
5314  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
5315 }
5316 
5317 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
5318 {
5319  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
5320  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
5321  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
5322  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
5323 }
5324 
5325 VmaPool_T::VmaPool_T(
5326  VmaAllocator hAllocator,
5327  const VmaPoolCreateInfo& createInfo) :
5328  m_BlockVector(
5329  hAllocator,
5330  createInfo.memoryTypeIndex,
5331  (createInfo.flags & VMA_POOL_CREATE_PERSISTENT_MAP_BIT) != 0 ?
5332  VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED,
5333  createInfo.blockSize,
5334  createInfo.minBlockCount,
5335  createInfo.maxBlockCount,
5336  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
5337  createInfo.frameInUseCount,
5338  true) // isCustomPool
5339 {
5340 }
5341 
5342 VmaPool_T::~VmaPool_T()
5343 {
5344 }
5345 
5346 #if VMA_STATS_STRING_ENABLED
5347 
5348 #endif // #if VMA_STATS_STRING_ENABLED
5349 
5350 VmaBlockVector::VmaBlockVector(
5351  VmaAllocator hAllocator,
5352  uint32_t memoryTypeIndex,
5353  VMA_BLOCK_VECTOR_TYPE blockVectorType,
5354  VkDeviceSize preferredBlockSize,
5355  size_t minBlockCount,
5356  size_t maxBlockCount,
5357  VkDeviceSize bufferImageGranularity,
5358  uint32_t frameInUseCount,
5359  bool isCustomPool) :
5360  m_hAllocator(hAllocator),
5361  m_MemoryTypeIndex(memoryTypeIndex),
5362  m_BlockVectorType(blockVectorType),
5363  m_PreferredBlockSize(preferredBlockSize),
5364  m_MinBlockCount(minBlockCount),
5365  m_MaxBlockCount(maxBlockCount),
5366  m_BufferImageGranularity(bufferImageGranularity),
5367  m_FrameInUseCount(frameInUseCount),
5368  m_IsCustomPool(isCustomPool),
5369  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
5370  m_HasEmptyBlock(false),
5371  m_pDefragmentator(VMA_NULL)
5372 {
5373 }
5374 
5375 VmaBlockVector::~VmaBlockVector()
5376 {
5377  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
5378 
5379  for(size_t i = m_Blocks.size(); i--; )
5380  {
5381  m_Blocks[i]->Destroy(m_hAllocator);
5382  vma_delete(m_hAllocator, m_Blocks[i]);
5383  }
5384 }
5385 
5386 VkResult VmaBlockVector::CreateMinBlocks()
5387 {
5388  for(size_t i = 0; i < m_MinBlockCount; ++i)
5389  {
5390  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
5391  if(res != VK_SUCCESS)
5392  {
5393  return res;
5394  }
5395  }
5396  return VK_SUCCESS;
5397 }
5398 
5399 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
5400 {
5401  pStats->size = 0;
5402  pStats->unusedSize = 0;
5403  pStats->allocationCount = 0;
5404  pStats->unusedRangeCount = 0;
5405  pStats->unusedRangeSizeMax = 0;
5406 
5407  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5408 
5409  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5410  {
5411  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
5412  VMA_ASSERT(pBlock);
5413  VMA_HEAVY_ASSERT(pBlock->Validate());
5414  pBlock->m_Metadata.AddPoolStats(*pStats);
5415  }
5416 }
5417 
5418 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
5419 
5420 VkResult VmaBlockVector::Allocate(
5421  VmaPool hCurrentPool,
5422  uint32_t currentFrameIndex,
5423  const VkMemoryRequirements& vkMemReq,
5424  const VmaAllocationCreateInfo& createInfo,
5425  VmaSuballocationType suballocType,
5426  VmaAllocation* pAllocation)
5427 {
5428  // Validate flags.
5429  if(createInfo.pool != VK_NULL_HANDLE &&
5430  ((createInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0) != (m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED))
5431  {
5432  VMA_ASSERT(0 && "Usage of VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT must match VMA_POOL_CREATE_PERSISTENT_MAP_BIT.");
5433  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5434  }
5435 
5436  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5437 
5438  // 1. Search existing allocations. Try to allocate without making other allocations lost.
5439  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
5440  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
5441  {
5442  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
5443  VMA_ASSERT(pCurrBlock);
5444  VmaAllocationRequest currRequest = {};
5445  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
5446  currentFrameIndex,
5447  m_FrameInUseCount,
5448  m_BufferImageGranularity,
5449  vkMemReq.size,
5450  vkMemReq.alignment,
5451  suballocType,
5452  false, // canMakeOtherLost
5453  &currRequest))
5454  {
5455  // Allocate from pCurrBlock.
5456  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
5457 
5458  // We no longer have an empty Allocation.
5459  if(pCurrBlock->m_Metadata.IsEmpty())
5460  {
5461  m_HasEmptyBlock = false;
5462  }
5463 
5464  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
5465  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
5466  (*pAllocation)->InitBlockAllocation(
5467  hCurrentPool,
5468  pCurrBlock,
5469  currRequest.offset,
5470  vkMemReq.alignment,
5471  vkMemReq.size,
5472  suballocType,
5473  createInfo.pUserData,
5474  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5475  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
5476  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
5477  return VK_SUCCESS;
5478  }
5479  }
5480 
5481  const bool canCreateNewBlock =
5482  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
5483  (m_Blocks.size() < m_MaxBlockCount);
5484 
5485  // 2. Try to create new block.
5486  if(canCreateNewBlock)
5487  {
5488  // 2.1. Start with full preferredBlockSize.
5489  VkDeviceSize blockSize = m_PreferredBlockSize;
5490  size_t newBlockIndex = 0;
5491  VkResult res = CreateBlock(blockSize, &newBlockIndex);
5492  // Allocating blocks of other sizes is allowed only in default pools.
5493  // In custom pools block size is fixed.
5494  if(res < 0 && m_IsCustomPool == false)
5495  {
5496  // 2.2. Try half the size.
5497  blockSize /= 2;
5498  if(blockSize >= vkMemReq.size)
5499  {
5500  res = CreateBlock(blockSize, &newBlockIndex);
5501  if(res < 0)
5502  {
5503  // 2.3. Try quarter the size.
5504  blockSize /= 2;
5505  if(blockSize >= vkMemReq.size)
5506  {
5507  res = CreateBlock(blockSize, &newBlockIndex);
5508  }
5509  }
5510  }
5511  }
5512  if(res == VK_SUCCESS)
5513  {
5514  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
5515  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= vkMemReq.size);
5516 
5517  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
5518  VmaAllocationRequest allocRequest;
5519  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
5520  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
5521  pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
5522  (*pAllocation)->InitBlockAllocation(
5523  hCurrentPool,
5524  pBlock,
5525  allocRequest.offset,
5526  vkMemReq.alignment,
5527  vkMemReq.size,
5528  suballocType,
5529  createInfo.pUserData,
5530  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5531  VMA_HEAVY_ASSERT(pBlock->Validate());
5532  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
5533 
5534  return VK_SUCCESS;
5535  }
5536  }
5537 
5538  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
5539 
5540  // 3. Try to allocate from existing blocks with making other allocations lost.
5541  if(canMakeOtherLost)
5542  {
5543  uint32_t tryIndex = 0;
5544  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
5545  {
5546  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
5547  VmaAllocationRequest bestRequest = {};
5548  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
5549 
5550  // 1. Search existing allocations.
5551  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
5552  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
5553  {
5554  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
5555  VMA_ASSERT(pCurrBlock);
5556  VmaAllocationRequest currRequest = {};
5557  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
5558  currentFrameIndex,
5559  m_FrameInUseCount,
5560  m_BufferImageGranularity,
5561  vkMemReq.size,
5562  vkMemReq.alignment,
5563  suballocType,
5564  canMakeOtherLost,
5565  &currRequest))
5566  {
5567  const VkDeviceSize currRequestCost = currRequest.CalcCost();
5568  if(pBestRequestBlock == VMA_NULL ||
5569  currRequestCost < bestRequestCost)
5570  {
5571  pBestRequestBlock = pCurrBlock;
5572  bestRequest = currRequest;
5573  bestRequestCost = currRequestCost;
5574 
5575  if(bestRequestCost == 0)
5576  {
5577  break;
5578  }
5579  }
5580  }
5581  }
5582 
5583  if(pBestRequestBlock != VMA_NULL)
5584  {
5585  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
5586  currentFrameIndex,
5587  m_FrameInUseCount,
5588  &bestRequest))
5589  {
5590  // We no longer have an empty Allocation.
5591  if(pBestRequestBlock->m_Metadata.IsEmpty())
5592  {
5593  m_HasEmptyBlock = false;
5594  }
5595  // Allocate from this pBlock.
5596  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
5597  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
5598  (*pAllocation)->InitBlockAllocation(
5599  hCurrentPool,
5600  pBestRequestBlock,
5601  bestRequest.offset,
5602  vkMemReq.alignment,
5603  vkMemReq.size,
5604  suballocType,
5605  createInfo.pUserData,
5606  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5607  VMA_HEAVY_ASSERT(pBlock->Validate());
5608  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
5609  return VK_SUCCESS;
5610  }
5611  // else: Some allocations must have been touched while we are here. Next try.
5612  }
5613  else
5614  {
5615  // Could not find place in any of the blocks - break outer loop.
5616  break;
5617  }
5618  }
5619  /* Maximum number of tries exceeded - a very unlike event when many other
5620  threads are simultaneously touching allocations making it impossible to make
5621  lost at the same time as we try to allocate. */
5622  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
5623  {
5624  return VK_ERROR_TOO_MANY_OBJECTS;
5625  }
5626  }
5627 
5628  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5629 }
5630 
5631 void VmaBlockVector::Free(
5632  VmaAllocation hAllocation)
5633 {
5634  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
5635 
5636  // Scope for lock.
5637  {
5638  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5639 
5640  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
5641 
5642  pBlock->m_Metadata.Free(hAllocation);
5643  VMA_HEAVY_ASSERT(pBlock->Validate());
5644 
5645  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
5646 
5647  // pBlock became empty after this deallocation.
5648  if(pBlock->m_Metadata.IsEmpty())
5649  {
5650  // Already has empty Allocation. We don't want to have two, so delete this one.
5651  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
5652  {
5653  pBlockToDelete = pBlock;
5654  Remove(pBlock);
5655  }
5656  // We now have first empty Allocation.
5657  else
5658  {
5659  m_HasEmptyBlock = true;
5660  }
5661  }
5662  // pBlock didn't become empty, but we have another empty block - find and free that one.
5663  // (This is optional, heuristics.)
5664  else if(m_HasEmptyBlock)
5665  {
5666  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
5667  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
5668  {
5669  pBlockToDelete = pLastBlock;
5670  m_Blocks.pop_back();
5671  m_HasEmptyBlock = false;
5672  }
5673  }
5674 
5675  IncrementallySortBlocks();
5676  }
5677 
5678  // Destruction of a free Allocation. Deferred until this point, outside of mutex
5679  // lock, for performance reason.
5680  if(pBlockToDelete != VMA_NULL)
5681  {
5682  VMA_DEBUG_LOG(" Deleted empty allocation");
5683  pBlockToDelete->Destroy(m_hAllocator);
5684  vma_delete(m_hAllocator, pBlockToDelete);
5685  }
5686 }
5687 
5688 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
5689 {
5690  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5691  {
5692  if(m_Blocks[blockIndex] == pBlock)
5693  {
5694  VmaVectorRemove(m_Blocks, blockIndex);
5695  return;
5696  }
5697  }
5698  VMA_ASSERT(0);
5699 }
5700 
5701 void VmaBlockVector::IncrementallySortBlocks()
5702 {
5703  // Bubble sort only until first swap.
5704  for(size_t i = 1; i < m_Blocks.size(); ++i)
5705  {
5706  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
5707  {
5708  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
5709  return;
5710  }
5711  }
5712 }
5713 
5714 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
5715 {
5716  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
5717  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
5718  allocInfo.allocationSize = blockSize;
5719  VkDeviceMemory mem = VK_NULL_HANDLE;
5720  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
5721  if(res < 0)
5722  {
5723  return res;
5724  }
5725 
5726  // New VkDeviceMemory successfully created.
5727 
5728  // Map memory if needed.
5729  void* pMappedData = VMA_NULL;
5730  const bool persistentMap = (m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED);
5731  if(persistentMap && m_hAllocator->m_UnmapPersistentlyMappedMemoryCounter == 0)
5732  {
5733  res = (*m_hAllocator->GetVulkanFunctions().vkMapMemory)(
5734  m_hAllocator->m_hDevice,
5735  mem,
5736  0,
5737  VK_WHOLE_SIZE,
5738  0,
5739  &pMappedData);
5740  if(res < 0)
5741  {
5742  VMA_DEBUG_LOG(" vkMapMemory FAILED");
5743  m_hAllocator->FreeVulkanMemory(m_MemoryTypeIndex, blockSize, mem);
5744  return res;
5745  }
5746  }
5747 
5748  // Create new Allocation for it.
5749  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
5750  pBlock->Init(
5751  m_MemoryTypeIndex,
5752  (VMA_BLOCK_VECTOR_TYPE)m_BlockVectorType,
5753  mem,
5754  allocInfo.allocationSize,
5755  persistentMap,
5756  pMappedData);
5757 
5758  m_Blocks.push_back(pBlock);
5759  if(pNewBlockIndex != VMA_NULL)
5760  {
5761  *pNewBlockIndex = m_Blocks.size() - 1;
5762  }
5763 
5764  return VK_SUCCESS;
5765 }
5766 
5767 #if VMA_STATS_STRING_ENABLED
5768 
5769 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
5770 {
5771  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5772 
5773  json.BeginObject();
5774 
5775  if(m_IsCustomPool)
5776  {
5777  json.WriteString("MemoryTypeIndex");
5778  json.WriteNumber(m_MemoryTypeIndex);
5779 
5780  if(m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
5781  {
5782  json.WriteString("Mapped");
5783  json.WriteBool(true);
5784  }
5785 
5786  json.WriteString("BlockSize");
5787  json.WriteNumber(m_PreferredBlockSize);
5788 
5789  json.WriteString("BlockCount");
5790  json.BeginObject(true);
5791  if(m_MinBlockCount > 0)
5792  {
5793  json.WriteString("Min");
5794  json.WriteNumber(m_MinBlockCount);
5795  }
5796  if(m_MaxBlockCount < SIZE_MAX)
5797  {
5798  json.WriteString("Max");
5799  json.WriteNumber(m_MaxBlockCount);
5800  }
5801  json.WriteString("Cur");
5802  json.WriteNumber(m_Blocks.size());
5803  json.EndObject();
5804 
5805  if(m_FrameInUseCount > 0)
5806  {
5807  json.WriteString("FrameInUseCount");
5808  json.WriteNumber(m_FrameInUseCount);
5809  }
5810  }
5811  else
5812  {
5813  json.WriteString("PreferredBlockSize");
5814  json.WriteNumber(m_PreferredBlockSize);
5815  }
5816 
5817  json.WriteString("Blocks");
5818  json.BeginArray();
5819  for(size_t i = 0; i < m_Blocks.size(); ++i)
5820  {
5821  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
5822  }
5823  json.EndArray();
5824 
5825  json.EndObject();
5826 }
5827 
5828 #endif // #if VMA_STATS_STRING_ENABLED
5829 
5830 void VmaBlockVector::UnmapPersistentlyMappedMemory()
5831 {
5832  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5833 
5834  for(size_t i = m_Blocks.size(); i--; )
5835  {
5836  VmaDeviceMemoryBlock* pBlock = m_Blocks[i];
5837  if(pBlock->m_pMappedData != VMA_NULL)
5838  {
5839  VMA_ASSERT(pBlock->m_PersistentMap != false);
5840  (m_hAllocator->GetVulkanFunctions().vkUnmapMemory)(m_hAllocator->m_hDevice, pBlock->m_hMemory);
5841  pBlock->m_pMappedData = VMA_NULL;
5842  }
5843  }
5844 }
5845 
5846 VkResult VmaBlockVector::MapPersistentlyMappedMemory()
5847 {
5848  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5849 
5850  VkResult finalResult = VK_SUCCESS;
5851  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
5852  {
5853  VmaDeviceMemoryBlock* pBlock = m_Blocks[i];
5854  if(pBlock->m_PersistentMap)
5855  {
5856  VMA_ASSERT(pBlock->m_pMappedData == nullptr);
5857  VkResult localResult = (*m_hAllocator->GetVulkanFunctions().vkMapMemory)(
5858  m_hAllocator->m_hDevice,
5859  pBlock->m_hMemory,
5860  0,
5861  VK_WHOLE_SIZE,
5862  0,
5863  &pBlock->m_pMappedData);
5864  if(localResult != VK_SUCCESS)
5865  {
5866  finalResult = localResult;
5867  }
5868  }
5869  }
5870  return finalResult;
5871 }
5872 
5873 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
5874  VmaAllocator hAllocator,
5875  uint32_t currentFrameIndex)
5876 {
5877  if(m_pDefragmentator == VMA_NULL)
5878  {
5879  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
5880  hAllocator,
5881  this,
5882  currentFrameIndex);
5883  }
5884 
5885  return m_pDefragmentator;
5886 }
5887 
5888 VkResult VmaBlockVector::Defragment(
5889  VmaDefragmentationStats* pDefragmentationStats,
5890  VkDeviceSize& maxBytesToMove,
5891  uint32_t& maxAllocationsToMove)
5892 {
5893  if(m_pDefragmentator == VMA_NULL)
5894  {
5895  return VK_SUCCESS;
5896  }
5897 
5898  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5899 
5900  // Defragment.
5901  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
5902 
5903  // Accumulate statistics.
5904  if(pDefragmentationStats != VMA_NULL)
5905  {
5906  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
5907  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
5908  pDefragmentationStats->bytesMoved += bytesMoved;
5909  pDefragmentationStats->allocationsMoved += allocationsMoved;
5910  VMA_ASSERT(bytesMoved <= maxBytesToMove);
5911  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
5912  maxBytesToMove -= bytesMoved;
5913  maxAllocationsToMove -= allocationsMoved;
5914  }
5915 
5916  // Free empty blocks.
5917  m_HasEmptyBlock = false;
5918  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
5919  {
5920  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
5921  if(pBlock->m_Metadata.IsEmpty())
5922  {
5923  if(m_Blocks.size() > m_MinBlockCount)
5924  {
5925  if(pDefragmentationStats != VMA_NULL)
5926  {
5927  ++pDefragmentationStats->deviceMemoryBlocksFreed;
5928  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
5929  }
5930 
5931  VmaVectorRemove(m_Blocks, blockIndex);
5932  pBlock->Destroy(m_hAllocator);
5933  vma_delete(m_hAllocator, pBlock);
5934  }
5935  else
5936  {
5937  m_HasEmptyBlock = true;
5938  }
5939  }
5940  }
5941 
5942  return result;
5943 }
5944 
5945 void VmaBlockVector::DestroyDefragmentator()
5946 {
5947  if(m_pDefragmentator != VMA_NULL)
5948  {
5949  vma_delete(m_hAllocator, m_pDefragmentator);
5950  m_pDefragmentator = VMA_NULL;
5951  }
5952 }
5953 
5954 void VmaBlockVector::MakePoolAllocationsLost(
5955  uint32_t currentFrameIndex,
5956  size_t* pLostAllocationCount)
5957 {
5958  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5959 
5960  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5961  {
5962  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
5963  VMA_ASSERT(pBlock);
5964  pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
5965  }
5966 }
5967 
5968 void VmaBlockVector::AddStats(VmaStats* pStats)
5969 {
5970  const uint32_t memTypeIndex = m_MemoryTypeIndex;
5971  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
5972 
5973  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5974 
5975  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5976  {
5977  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
5978  VMA_ASSERT(pBlock);
5979  VMA_HEAVY_ASSERT(pBlock->Validate());
5980  VmaStatInfo allocationStatInfo;
5981  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
5982  VmaAddStatInfo(pStats->total, allocationStatInfo);
5983  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
5984  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
5985  }
5986 }
5987 
5989 // VmaDefragmentator members definition
5990 
5991 VmaDefragmentator::VmaDefragmentator(
5992  VmaAllocator hAllocator,
5993  VmaBlockVector* pBlockVector,
5994  uint32_t currentFrameIndex) :
5995  m_hAllocator(hAllocator),
5996  m_pBlockVector(pBlockVector),
5997  m_CurrentFrameIndex(currentFrameIndex),
5998  m_BytesMoved(0),
5999  m_AllocationsMoved(0),
6000  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
6001  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
6002 {
6003 }
6004 
6005 VmaDefragmentator::~VmaDefragmentator()
6006 {
6007  for(size_t i = m_Blocks.size(); i--; )
6008  {
6009  vma_delete(m_hAllocator, m_Blocks[i]);
6010  }
6011 }
6012 
6013 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
6014 {
6015  AllocationInfo allocInfo;
6016  allocInfo.m_hAllocation = hAlloc;
6017  allocInfo.m_pChanged = pChanged;
6018  m_Allocations.push_back(allocInfo);
6019 }
6020 
6021 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
6022 {
6023  // It has already been mapped for defragmentation.
6024  if(m_pMappedDataForDefragmentation)
6025  {
6026  *ppMappedData = m_pMappedDataForDefragmentation;
6027  return VK_SUCCESS;
6028  }
6029 
6030  // It is persistently mapped.
6031  if(m_pBlock->m_PersistentMap)
6032  {
6033  VMA_ASSERT(m_pBlock->m_pMappedData != VMA_NULL);
6034  *ppMappedData = m_pBlock->m_pMappedData;
6035  return VK_SUCCESS;
6036  }
6037 
6038  // Map on first usage.
6039  VkResult res = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
6040  hAllocator->m_hDevice,
6041  m_pBlock->m_hMemory,
6042  0,
6043  VK_WHOLE_SIZE,
6044  0,
6045  &m_pMappedDataForDefragmentation);
6046  *ppMappedData = m_pMappedDataForDefragmentation;
6047  return res;
6048 }
6049 
6050 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
6051 {
6052  if(m_pMappedDataForDefragmentation != VMA_NULL)
6053  {
6054  (hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_pBlock->m_hMemory);
6055  }
6056 }
6057 
6058 VkResult VmaDefragmentator::DefragmentRound(
6059  VkDeviceSize maxBytesToMove,
6060  uint32_t maxAllocationsToMove)
6061 {
6062  if(m_Blocks.empty())
6063  {
6064  return VK_SUCCESS;
6065  }
6066 
6067  size_t srcBlockIndex = m_Blocks.size() - 1;
6068  size_t srcAllocIndex = SIZE_MAX;
6069  for(;;)
6070  {
6071  // 1. Find next allocation to move.
6072  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
6073  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
6074  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
6075  {
6076  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
6077  {
6078  // Finished: no more allocations to process.
6079  if(srcBlockIndex == 0)
6080  {
6081  return VK_SUCCESS;
6082  }
6083  else
6084  {
6085  --srcBlockIndex;
6086  srcAllocIndex = SIZE_MAX;
6087  }
6088  }
6089  else
6090  {
6091  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
6092  }
6093  }
6094 
6095  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
6096  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
6097 
6098  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
6099  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
6100  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
6101  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
6102 
6103  // 2. Try to find new place for this allocation in preceding or current block.
6104  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
6105  {
6106  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
6107  VmaAllocationRequest dstAllocRequest;
6108  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
6109  m_CurrentFrameIndex,
6110  m_pBlockVector->GetFrameInUseCount(),
6111  m_pBlockVector->GetBufferImageGranularity(),
6112  size,
6113  alignment,
6114  suballocType,
6115  false, // canMakeOtherLost
6116  &dstAllocRequest) &&
6117  MoveMakesSense(
6118  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
6119  {
6120  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
6121 
6122  // Reached limit on number of allocations or bytes to move.
6123  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
6124  (m_BytesMoved + size > maxBytesToMove))
6125  {
6126  return VK_INCOMPLETE;
6127  }
6128 
6129  void* pDstMappedData = VMA_NULL;
6130  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
6131  if(res != VK_SUCCESS)
6132  {
6133  return res;
6134  }
6135 
6136  void* pSrcMappedData = VMA_NULL;
6137  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
6138  if(res != VK_SUCCESS)
6139  {
6140  return res;
6141  }
6142 
6143  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
6144  memcpy(
6145  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
6146  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
6147  static_cast<size_t>(size));
6148 
6149  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
6150  pSrcBlockInfo->m_pBlock->m_Metadata.Free(allocInfo.m_hAllocation);
6151 
6152  allocInfo.m_hAllocation->ChangeBlockAllocation(pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
6153 
6154  if(allocInfo.m_pChanged != VMA_NULL)
6155  {
6156  *allocInfo.m_pChanged = VK_TRUE;
6157  }
6158 
6159  ++m_AllocationsMoved;
6160  m_BytesMoved += size;
6161 
6162  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
6163 
6164  break;
6165  }
6166  }
6167 
6168  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
6169 
6170  if(srcAllocIndex > 0)
6171  {
6172  --srcAllocIndex;
6173  }
6174  else
6175  {
6176  if(srcBlockIndex > 0)
6177  {
6178  --srcBlockIndex;
6179  srcAllocIndex = SIZE_MAX;
6180  }
6181  else
6182  {
6183  return VK_SUCCESS;
6184  }
6185  }
6186  }
6187 }
6188 
6189 VkResult VmaDefragmentator::Defragment(
6190  VkDeviceSize maxBytesToMove,
6191  uint32_t maxAllocationsToMove)
6192 {
6193  if(m_Allocations.empty())
6194  {
6195  return VK_SUCCESS;
6196  }
6197 
6198  // Create block info for each block.
6199  const size_t blockCount = m_pBlockVector->m_Blocks.size();
6200  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6201  {
6202  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
6203  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
6204  m_Blocks.push_back(pBlockInfo);
6205  }
6206 
6207  // Sort them by m_pBlock pointer value.
6208  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
6209 
6210  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
6211  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
6212  {
6213  AllocationInfo& allocInfo = m_Allocations[blockIndex];
6214  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
6215  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
6216  {
6217  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
6218  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
6219  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
6220  {
6221  (*it)->m_Allocations.push_back(allocInfo);
6222  }
6223  else
6224  {
6225  VMA_ASSERT(0);
6226  }
6227  }
6228  }
6229  m_Allocations.clear();
6230 
6231  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6232  {
6233  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
6234  pBlockInfo->CalcHasNonMovableAllocations();
6235  pBlockInfo->SortAllocationsBySizeDescecnding();
6236  }
6237 
6238  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
6239  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
6240 
6241  // Execute defragmentation rounds (the main part).
6242  VkResult result = VK_SUCCESS;
6243  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
6244  {
6245  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
6246  }
6247 
6248  // Unmap blocks that were mapped for defragmentation.
6249  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6250  {
6251  m_Blocks[blockIndex]->Unmap(m_hAllocator);
6252  }
6253 
6254  return result;
6255 }
6256 
6257 bool VmaDefragmentator::MoveMakesSense(
6258  size_t dstBlockIndex, VkDeviceSize dstOffset,
6259  size_t srcBlockIndex, VkDeviceSize srcOffset)
6260 {
6261  if(dstBlockIndex < srcBlockIndex)
6262  {
6263  return true;
6264  }
6265  if(dstBlockIndex > srcBlockIndex)
6266  {
6267  return false;
6268  }
6269  if(dstOffset < srcOffset)
6270  {
6271  return true;
6272  }
6273  return false;
6274 }
6275 
6277 // VmaAllocator_T
6278 
6279 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
6280  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
6281  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
6282  m_PhysicalDevice(pCreateInfo->physicalDevice),
6283  m_hDevice(pCreateInfo->device),
6284  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
6285  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
6286  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
6287  m_UnmapPersistentlyMappedMemoryCounter(0),
6288  m_PreferredLargeHeapBlockSize(0),
6289  m_PreferredSmallHeapBlockSize(0),
6290  m_CurrentFrameIndex(0),
6291  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
6292 {
6293  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
6294 
6295  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
6296  memset(&m_MemProps, 0, sizeof(m_MemProps));
6297  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
6298 
6299  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
6300  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
6301 
6302  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
6303  {
6304  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
6305  }
6306 
6307  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
6308  {
6309  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
6310  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
6311  }
6312 
6313  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
6314 
6315  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
6316  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
6317 
6318  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
6319  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
6320  m_PreferredSmallHeapBlockSize = (pCreateInfo->preferredSmallHeapBlockSize != 0) ?
6321  pCreateInfo->preferredSmallHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE);
6322 
6323  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
6324  {
6325  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
6326  {
6327  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
6328  if(limit != VK_WHOLE_SIZE)
6329  {
6330  m_HeapSizeLimit[heapIndex] = limit;
6331  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
6332  {
6333  m_MemProps.memoryHeaps[heapIndex].size = limit;
6334  }
6335  }
6336  }
6337  }
6338 
6339  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6340  {
6341  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
6342 
6343  for(size_t blockVectorTypeIndex = 0; blockVectorTypeIndex < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorTypeIndex)
6344  {
6345  m_pBlockVectors[memTypeIndex][blockVectorTypeIndex] = vma_new(this, VmaBlockVector)(
6346  this,
6347  memTypeIndex,
6348  static_cast<VMA_BLOCK_VECTOR_TYPE>(blockVectorTypeIndex),
6349  preferredBlockSize,
6350  0,
6351  SIZE_MAX,
6352  GetBufferImageGranularity(),
6353  pCreateInfo->frameInUseCount,
6354  false); // isCustomPool
6355  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
6356  // becase minBlockCount is 0.
6357  m_pDedicatedAllocations[memTypeIndex][blockVectorTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
6358  }
6359  }
6360 }
6361 
6362 VmaAllocator_T::~VmaAllocator_T()
6363 {
6364  VMA_ASSERT(m_Pools.empty());
6365 
6366  for(size_t i = GetMemoryTypeCount(); i--; )
6367  {
6368  for(size_t j = VMA_BLOCK_VECTOR_TYPE_COUNT; j--; )
6369  {
6370  vma_delete(this, m_pDedicatedAllocations[i][j]);
6371  vma_delete(this, m_pBlockVectors[i][j]);
6372  }
6373  }
6374 }
6375 
6376 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
6377 {
6378 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
6379  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
6380  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
6381  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
6382  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
6383  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
6384  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
6385  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
6386  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
6387  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
6388  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
6389  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
6390  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
6391  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
6392  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
6393  // Ignoring vkGetBufferMemoryRequirements2KHR.
6394  // Ignoring vkGetImageMemoryRequirements2KHR.
6395 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
6396 
6397 #define VMA_COPY_IF_NOT_NULL(funcName) \
6398  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
6399 
6400  if(pVulkanFunctions != VMA_NULL)
6401  {
6402  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
6403  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
6404  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
6405  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
6406  VMA_COPY_IF_NOT_NULL(vkMapMemory);
6407  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
6408  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
6409  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
6410  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
6411  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
6412  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
6413  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
6414  VMA_COPY_IF_NOT_NULL(vkCreateImage);
6415  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
6416  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
6417  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
6418  }
6419 
6420 #undef VMA_COPY_IF_NOT_NULL
6421 
6422  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
6423  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
6424  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
6425  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
6426  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
6427  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
6428  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
6429  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
6430  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
6431  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
6432  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
6433  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
6434  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
6435  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
6436  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
6437  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
6438  if(m_UseKhrDedicatedAllocation)
6439  {
6440  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
6441  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
6442  }
6443 }
6444 
6445 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
6446 {
6447  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
6448  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
6449  return (heapSize <= VMA_SMALL_HEAP_MAX_SIZE) ?
6450  m_PreferredSmallHeapBlockSize : m_PreferredLargeHeapBlockSize;
6451 }
6452 
6453 VkResult VmaAllocator_T::AllocateMemoryOfType(
6454  const VkMemoryRequirements& vkMemReq,
6455  bool dedicatedAllocation,
6456  VkBuffer dedicatedBuffer,
6457  VkImage dedicatedImage,
6458  const VmaAllocationCreateInfo& createInfo,
6459  uint32_t memTypeIndex,
6460  VmaSuballocationType suballocType,
6461  VmaAllocation* pAllocation)
6462 {
6463  VMA_ASSERT(pAllocation != VMA_NULL);
6464  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
6465 
6466  uint32_t blockVectorType = VmaAllocationCreateFlagsToBlockVectorType(createInfo.flags);
6467  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
6468  VMA_ASSERT(blockVector);
6469 
6470  VmaAllocationCreateInfo finalCreateInfo = createInfo;
6471 
6472  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
6473  bool preferDedicatedMemory =
6474  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
6475  dedicatedAllocation ||
6476  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
6477  vkMemReq.size > preferredBlockSize / 2;
6478 
6479  if(preferDedicatedMemory &&
6480  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
6481  finalCreateInfo.pool == VK_NULL_HANDLE)
6482  {
6484  }
6485 
6486  // If memory type is not HOST_VISIBLE, disable PERSISTENT_MAP.
6487  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0 &&
6488  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
6489  {
6490  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT;
6491  }
6492 
6493  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
6494  {
6495  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6496  {
6497  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6498  }
6499  else
6500  {
6501  return AllocateDedicatedMemory(
6502  vkMemReq.size,
6503  suballocType,
6504  memTypeIndex,
6505  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0,
6506  finalCreateInfo.pUserData,
6507  dedicatedBuffer,
6508  dedicatedImage,
6509  pAllocation);
6510  }
6511  }
6512  else
6513  {
6514  VkResult res = blockVector->Allocate(
6515  VK_NULL_HANDLE, // hCurrentPool
6516  m_CurrentFrameIndex.load(),
6517  vkMemReq,
6518  finalCreateInfo,
6519  suballocType,
6520  pAllocation);
6521  if(res == VK_SUCCESS)
6522  {
6523  return res;
6524  }
6525 
6526  // 5. Try dedicated memory.
6527  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6528  {
6529  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6530  }
6531  else
6532  {
6533  res = AllocateDedicatedMemory(
6534  vkMemReq.size,
6535  suballocType,
6536  memTypeIndex,
6537  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0,
6538  finalCreateInfo.pUserData,
6539  dedicatedBuffer,
6540  dedicatedImage,
6541  pAllocation);
6542  if(res == VK_SUCCESS)
6543  {
6544  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
6545  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
6546  return VK_SUCCESS;
6547  }
6548  else
6549  {
6550  // Everything failed: Return error code.
6551  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
6552  return res;
6553  }
6554  }
6555  }
6556 }
6557 
6558 VkResult VmaAllocator_T::AllocateDedicatedMemory(
6559  VkDeviceSize size,
6560  VmaSuballocationType suballocType,
6561  uint32_t memTypeIndex,
6562  bool map,
6563  void* pUserData,
6564  VkBuffer dedicatedBuffer,
6565  VkImage dedicatedImage,
6566  VmaAllocation* pAllocation)
6567 {
6568  VMA_ASSERT(pAllocation);
6569 
6570  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
6571  allocInfo.memoryTypeIndex = memTypeIndex;
6572  allocInfo.allocationSize = size;
6573 
6574  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
6575  if(m_UseKhrDedicatedAllocation)
6576  {
6577  if(dedicatedBuffer != VK_NULL_HANDLE)
6578  {
6579  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
6580  dedicatedAllocInfo.buffer = dedicatedBuffer;
6581  allocInfo.pNext = &dedicatedAllocInfo;
6582  }
6583  else if(dedicatedImage != VK_NULL_HANDLE)
6584  {
6585  dedicatedAllocInfo.image = dedicatedImage;
6586  allocInfo.pNext = &dedicatedAllocInfo;
6587  }
6588  }
6589 
6590  // Allocate VkDeviceMemory.
6591  VkDeviceMemory hMemory = VK_NULL_HANDLE;
6592  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
6593  if(res < 0)
6594  {
6595  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
6596  return res;
6597  }
6598 
6599  void* pMappedData = nullptr;
6600  if(map)
6601  {
6602  if(m_UnmapPersistentlyMappedMemoryCounter == 0)
6603  {
6604  res = (*m_VulkanFunctions.vkMapMemory)(
6605  m_hDevice,
6606  hMemory,
6607  0,
6608  VK_WHOLE_SIZE,
6609  0,
6610  &pMappedData);
6611  if(res < 0)
6612  {
6613  VMA_DEBUG_LOG(" vkMapMemory FAILED");
6614  FreeVulkanMemory(memTypeIndex, size, hMemory);
6615  return res;
6616  }
6617  }
6618  }
6619 
6620  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load());
6621  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, map, pMappedData, size, pUserData);
6622 
6623  // Register it in m_pDedicatedAllocations.
6624  {
6625  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
6626  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex][map ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED];
6627  VMA_ASSERT(pDedicatedAllocations);
6628  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
6629  }
6630 
6631  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
6632 
6633  return VK_SUCCESS;
6634 }
6635 
6636 void VmaAllocator_T::GetBufferMemoryRequirements(
6637  VkBuffer hBuffer,
6638  VkMemoryRequirements& memReq,
6639  bool& requiresDedicatedAllocation,
6640  bool& prefersDedicatedAllocation) const
6641 {
6642  if(m_UseKhrDedicatedAllocation)
6643  {
6644  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
6645  memReqInfo.buffer = hBuffer;
6646 
6647  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
6648 
6649  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
6650  memReq2.pNext = &memDedicatedReq;
6651 
6652  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
6653 
6654  memReq = memReq2.memoryRequirements;
6655  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
6656  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
6657  }
6658  else
6659  {
6660  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
6661  requiresDedicatedAllocation = false;
6662  prefersDedicatedAllocation = false;
6663  }
6664 }
6665 
6666 void VmaAllocator_T::GetImageMemoryRequirements(
6667  VkImage hImage,
6668  VkMemoryRequirements& memReq,
6669  bool& requiresDedicatedAllocation,
6670  bool& prefersDedicatedAllocation) const
6671 {
6672  if(m_UseKhrDedicatedAllocation)
6673  {
6674  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
6675  memReqInfo.image = hImage;
6676 
6677  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
6678 
6679  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
6680  memReq2.pNext = &memDedicatedReq;
6681 
6682  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
6683 
6684  memReq = memReq2.memoryRequirements;
6685  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
6686  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
6687  }
6688  else
6689  {
6690  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
6691  requiresDedicatedAllocation = false;
6692  prefersDedicatedAllocation = false;
6693  }
6694 }
6695 
6696 VkResult VmaAllocator_T::AllocateMemory(
6697  const VkMemoryRequirements& vkMemReq,
6698  bool requiresDedicatedAllocation,
6699  bool prefersDedicatedAllocation,
6700  VkBuffer dedicatedBuffer,
6701  VkImage dedicatedImage,
6702  const VmaAllocationCreateInfo& createInfo,
6703  VmaSuballocationType suballocType,
6704  VmaAllocation* pAllocation)
6705 {
6706  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
6707  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6708  {
6709  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
6710  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6711  }
6712  if(requiresDedicatedAllocation)
6713  {
6714  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6715  {
6716  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
6717  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6718  }
6719  if(createInfo.pool != VK_NULL_HANDLE)
6720  {
6721  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
6722  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6723  }
6724  }
6725  if((createInfo.pool != VK_NULL_HANDLE) &&
6726  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
6727  {
6728  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
6729  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6730  }
6731 
6732  if(createInfo.pool != VK_NULL_HANDLE)
6733  {
6734  return createInfo.pool->m_BlockVector.Allocate(
6735  createInfo.pool,
6736  m_CurrentFrameIndex.load(),
6737  vkMemReq,
6738  createInfo,
6739  suballocType,
6740  pAllocation);
6741  }
6742  else
6743  {
6744  // Bit mask of memory Vulkan types acceptable for this allocation.
6745  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
6746  uint32_t memTypeIndex = UINT32_MAX;
6747  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
6748  if(res == VK_SUCCESS)
6749  {
6750  res = AllocateMemoryOfType(
6751  vkMemReq,
6752  requiresDedicatedAllocation || prefersDedicatedAllocation,
6753  dedicatedBuffer,
6754  dedicatedImage,
6755  createInfo,
6756  memTypeIndex,
6757  suballocType,
6758  pAllocation);
6759  // Succeeded on first try.
6760  if(res == VK_SUCCESS)
6761  {
6762  return res;
6763  }
6764  // Allocation from this memory type failed. Try other compatible memory types.
6765  else
6766  {
6767  for(;;)
6768  {
6769  // Remove old memTypeIndex from list of possibilities.
6770  memoryTypeBits &= ~(1u << memTypeIndex);
6771  // Find alternative memTypeIndex.
6772  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
6773  if(res == VK_SUCCESS)
6774  {
6775  res = AllocateMemoryOfType(
6776  vkMemReq,
6777  requiresDedicatedAllocation || prefersDedicatedAllocation,
6778  dedicatedBuffer,
6779  dedicatedImage,
6780  createInfo,
6781  memTypeIndex,
6782  suballocType,
6783  pAllocation);
6784  // Allocation from this alternative memory type succeeded.
6785  if(res == VK_SUCCESS)
6786  {
6787  return res;
6788  }
6789  // else: Allocation from this memory type failed. Try next one - next loop iteration.
6790  }
6791  // No other matching memory type index could be found.
6792  else
6793  {
6794  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
6795  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6796  }
6797  }
6798  }
6799  }
6800  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
6801  else
6802  return res;
6803  }
6804 }
6805 
6806 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
6807 {
6808  VMA_ASSERT(allocation);
6809 
6810  if(allocation->CanBecomeLost() == false ||
6811  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
6812  {
6813  switch(allocation->GetType())
6814  {
6815  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
6816  {
6817  VmaBlockVector* pBlockVector = VMA_NULL;
6818  VmaPool hPool = allocation->GetPool();
6819  if(hPool != VK_NULL_HANDLE)
6820  {
6821  pBlockVector = &hPool->m_BlockVector;
6822  }
6823  else
6824  {
6825  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
6826  const VMA_BLOCK_VECTOR_TYPE blockVectorType = allocation->GetBlockVectorType();
6827  pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
6828  }
6829  pBlockVector->Free(allocation);
6830  }
6831  break;
6832  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
6833  FreeDedicatedMemory(allocation);
6834  break;
6835  default:
6836  VMA_ASSERT(0);
6837  }
6838  }
6839 
6840  vma_delete(this, allocation);
6841 }
6842 
6843 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
6844 {
6845  // Initialize.
6846  InitStatInfo(pStats->total);
6847  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
6848  InitStatInfo(pStats->memoryType[i]);
6849  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
6850  InitStatInfo(pStats->memoryHeap[i]);
6851 
6852  // Process default pools.
6853  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6854  {
6855  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
6856  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
6857  {
6858  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
6859  VMA_ASSERT(pBlockVector);
6860  pBlockVector->AddStats(pStats);
6861  }
6862  }
6863 
6864  // Process custom pools.
6865  {
6866  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6867  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
6868  {
6869  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
6870  }
6871  }
6872 
6873  // Process dedicated allocations.
6874  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6875  {
6876  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
6877  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
6878  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
6879  {
6880  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex][blockVectorType];
6881  VMA_ASSERT(pDedicatedAllocVector);
6882  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
6883  {
6884  VmaStatInfo allocationStatInfo;
6885  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
6886  VmaAddStatInfo(pStats->total, allocationStatInfo);
6887  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
6888  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
6889  }
6890  }
6891  }
6892 
6893  // Postprocess.
6894  VmaPostprocessCalcStatInfo(pStats->total);
6895  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
6896  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
6897  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
6898  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
6899 }
6900 
6901 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
6902 
6903 void VmaAllocator_T::UnmapPersistentlyMappedMemory()
6904 {
6905  if(m_UnmapPersistentlyMappedMemoryCounter++ == 0)
6906  {
6907  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
6908  {
6909  for(uint32_t memTypeIndex = m_MemProps.memoryTypeCount; memTypeIndex--; )
6910  {
6911  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
6912  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
6913  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
6914  {
6915  // Process DedicatedAllocations.
6916  {
6917  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
6918  AllocationVectorType* pDedicatedAllocationsVector = m_pDedicatedAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6919  for(size_t dedicatedAllocIndex = pDedicatedAllocationsVector->size(); dedicatedAllocIndex--; )
6920  {
6921  VmaAllocation hAlloc = (*pDedicatedAllocationsVector)[dedicatedAllocIndex];
6922  hAlloc->DedicatedAllocUnmapPersistentlyMappedMemory(this);
6923  }
6924  }
6925 
6926  // Process normal Allocations.
6927  {
6928  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6929  pBlockVector->UnmapPersistentlyMappedMemory();
6930  }
6931  }
6932  }
6933 
6934  // Process custom pools.
6935  {
6936  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6937  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
6938  {
6939  m_Pools[poolIndex]->GetBlockVector().UnmapPersistentlyMappedMemory();
6940  }
6941  }
6942  }
6943  }
6944 }
6945 
6946 VkResult VmaAllocator_T::MapPersistentlyMappedMemory()
6947 {
6948  VMA_ASSERT(m_UnmapPersistentlyMappedMemoryCounter > 0);
6949  if(--m_UnmapPersistentlyMappedMemoryCounter == 0)
6950  {
6951  VkResult finalResult = VK_SUCCESS;
6952  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
6953  {
6954  // Process custom pools.
6955  {
6956  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6957  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
6958  {
6959  m_Pools[poolIndex]->GetBlockVector().MapPersistentlyMappedMemory();
6960  }
6961  }
6962 
6963  for(uint32_t memTypeIndex = 0; memTypeIndex < m_MemProps.memoryTypeCount; ++memTypeIndex)
6964  {
6965  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
6966  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
6967  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
6968  {
6969  // Process DedicatedAllocations.
6970  {
6971  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
6972  AllocationVectorType* pAllocationsVector = m_pDedicatedAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6973  for(size_t dedicatedAllocIndex = 0, dedicatedAllocCount = pAllocationsVector->size(); dedicatedAllocIndex < dedicatedAllocCount; ++dedicatedAllocIndex)
6974  {
6975  VmaAllocation hAlloc = (*pAllocationsVector)[dedicatedAllocIndex];
6976  hAlloc->DedicatedAllocMapPersistentlyMappedMemory(this);
6977  }
6978  }
6979 
6980  // Process normal Allocations.
6981  {
6982  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6983  VkResult localResult = pBlockVector->MapPersistentlyMappedMemory();
6984  if(localResult != VK_SUCCESS)
6985  {
6986  finalResult = localResult;
6987  }
6988  }
6989  }
6990  }
6991  }
6992  return finalResult;
6993  }
6994  else
6995  return VK_SUCCESS;
6996 }
6997 
6998 VkResult VmaAllocator_T::Defragment(
6999  VmaAllocation* pAllocations,
7000  size_t allocationCount,
7001  VkBool32* pAllocationsChanged,
7002  const VmaDefragmentationInfo* pDefragmentationInfo,
7003  VmaDefragmentationStats* pDefragmentationStats)
7004 {
7005  if(pAllocationsChanged != VMA_NULL)
7006  {
7007  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
7008  }
7009  if(pDefragmentationStats != VMA_NULL)
7010  {
7011  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
7012  }
7013 
7014  if(m_UnmapPersistentlyMappedMemoryCounter > 0)
7015  {
7016  VMA_DEBUG_LOG("ERROR: Cannot defragment when inside vmaUnmapPersistentlyMappedMemory.");
7017  return VK_ERROR_MEMORY_MAP_FAILED;
7018  }
7019 
7020  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
7021 
7022  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
7023 
7024  const size_t poolCount = m_Pools.size();
7025 
7026  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
7027  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
7028  {
7029  VmaAllocation hAlloc = pAllocations[allocIndex];
7030  VMA_ASSERT(hAlloc);
7031  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
7032  // DedicatedAlloc cannot be defragmented.
7033  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
7034  // Only HOST_VISIBLE memory types can be defragmented.
7035  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
7036  // Lost allocation cannot be defragmented.
7037  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
7038  {
7039  VmaBlockVector* pAllocBlockVector = nullptr;
7040 
7041  const VmaPool hAllocPool = hAlloc->GetPool();
7042  // This allocation belongs to custom pool.
7043  if(hAllocPool != VK_NULL_HANDLE)
7044  {
7045  pAllocBlockVector = &hAllocPool->GetBlockVector();
7046  }
7047  // This allocation belongs to general pool.
7048  else
7049  {
7050  pAllocBlockVector = m_pBlockVectors[memTypeIndex][hAlloc->GetBlockVectorType()];
7051  }
7052 
7053  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
7054 
7055  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
7056  &pAllocationsChanged[allocIndex] : VMA_NULL;
7057  pDefragmentator->AddAllocation(hAlloc, pChanged);
7058  }
7059  }
7060 
7061  VkResult result = VK_SUCCESS;
7062 
7063  // ======== Main processing.
7064 
7065  VkDeviceSize maxBytesToMove = SIZE_MAX;
7066  uint32_t maxAllocationsToMove = UINT32_MAX;
7067  if(pDefragmentationInfo != VMA_NULL)
7068  {
7069  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
7070  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
7071  }
7072 
7073  // Process standard memory.
7074  for(uint32_t memTypeIndex = 0;
7075  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
7076  ++memTypeIndex)
7077  {
7078  // Only HOST_VISIBLE memory types can be defragmented.
7079  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7080  {
7081  for(uint32_t blockVectorType = 0;
7082  (blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT) && (result == VK_SUCCESS);
7083  ++blockVectorType)
7084  {
7085  result = m_pBlockVectors[memTypeIndex][blockVectorType]->Defragment(
7086  pDefragmentationStats,
7087  maxBytesToMove,
7088  maxAllocationsToMove);
7089  }
7090  }
7091  }
7092 
7093  // Process custom pools.
7094  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
7095  {
7096  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
7097  pDefragmentationStats,
7098  maxBytesToMove,
7099  maxAllocationsToMove);
7100  }
7101 
7102  // ======== Destroy defragmentators.
7103 
7104  // Process custom pools.
7105  for(size_t poolIndex = poolCount; poolIndex--; )
7106  {
7107  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
7108  }
7109 
7110  // Process standard memory.
7111  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
7112  {
7113  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7114  {
7115  for(size_t blockVectorType = VMA_BLOCK_VECTOR_TYPE_COUNT; blockVectorType--; )
7116  {
7117  m_pBlockVectors[memTypeIndex][blockVectorType]->DestroyDefragmentator();
7118  }
7119  }
7120  }
7121 
7122  return result;
7123 }
7124 
7125 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
7126 {
7127  if(hAllocation->CanBecomeLost())
7128  {
7129  /*
7130  Warning: This is a carefully designed algorithm.
7131  Do not modify unless you really know what you're doing :)
7132  */
7133  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
7134  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
7135  for(;;)
7136  {
7137  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7138  {
7139  pAllocationInfo->memoryType = UINT32_MAX;
7140  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
7141  pAllocationInfo->offset = 0;
7142  pAllocationInfo->size = hAllocation->GetSize();
7143  pAllocationInfo->pMappedData = VMA_NULL;
7144  pAllocationInfo->pUserData = hAllocation->GetUserData();
7145  return;
7146  }
7147  else if(localLastUseFrameIndex == localCurrFrameIndex)
7148  {
7149  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7150  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7151  pAllocationInfo->offset = hAllocation->GetOffset();
7152  pAllocationInfo->size = hAllocation->GetSize();
7153  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
7154  pAllocationInfo->pUserData = hAllocation->GetUserData();
7155  return;
7156  }
7157  else // Last use time earlier than current time.
7158  {
7159  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
7160  {
7161  localLastUseFrameIndex = localCurrFrameIndex;
7162  }
7163  }
7164  }
7165  }
7166  // We could use the same code here, but for performance reasons we don't need to use the hAllocation.LastUseFrameIndex atomic.
7167  else
7168  {
7169  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7170  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7171  pAllocationInfo->offset = hAllocation->GetOffset();
7172  pAllocationInfo->size = hAllocation->GetSize();
7173  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
7174  pAllocationInfo->pUserData = hAllocation->GetUserData();
7175  }
7176 }
7177 
7178 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
7179 {
7180  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
7181 
7182  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
7183 
7184  if(newCreateInfo.maxBlockCount == 0)
7185  {
7186  newCreateInfo.maxBlockCount = SIZE_MAX;
7187  }
7188  if(newCreateInfo.blockSize == 0)
7189  {
7190  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
7191  }
7192 
7193  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
7194 
7195  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
7196  if(res != VK_SUCCESS)
7197  {
7198  vma_delete(this, *pPool);
7199  *pPool = VMA_NULL;
7200  return res;
7201  }
7202 
7203  // Add to m_Pools.
7204  {
7205  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7206  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
7207  }
7208 
7209  return VK_SUCCESS;
7210 }
7211 
7212 void VmaAllocator_T::DestroyPool(VmaPool pool)
7213 {
7214  // Remove from m_Pools.
7215  {
7216  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7217  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
7218  VMA_ASSERT(success && "Pool not found in Allocator.");
7219  }
7220 
7221  vma_delete(this, pool);
7222 }
7223 
7224 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
7225 {
7226  pool->m_BlockVector.GetPoolStats(pPoolStats);
7227 }
7228 
7229 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
7230 {
7231  m_CurrentFrameIndex.store(frameIndex);
7232 }
7233 
7234 void VmaAllocator_T::MakePoolAllocationsLost(
7235  VmaPool hPool,
7236  size_t* pLostAllocationCount)
7237 {
7238  hPool->m_BlockVector.MakePoolAllocationsLost(
7239  m_CurrentFrameIndex.load(),
7240  pLostAllocationCount);
7241 }
7242 
7243 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
7244 {
7245  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST);
7246  (*pAllocation)->InitLost();
7247 }
7248 
7249 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
7250 {
7251  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
7252 
7253  VkResult res;
7254  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
7255  {
7256  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
7257  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
7258  {
7259  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
7260  if(res == VK_SUCCESS)
7261  {
7262  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
7263  }
7264  }
7265  else
7266  {
7267  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
7268  }
7269  }
7270  else
7271  {
7272  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
7273  }
7274 
7275  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
7276  {
7277  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
7278  }
7279 
7280  return res;
7281 }
7282 
7283 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
7284 {
7285  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
7286  {
7287  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
7288  }
7289 
7290  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
7291 
7292  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
7293  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
7294  {
7295  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
7296  m_HeapSizeLimit[heapIndex] += size;
7297  }
7298 }
7299 
7300 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
7301 {
7302  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
7303 
7304  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
7305  {
7306  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7307  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex][allocation->GetBlockVectorType()];
7308  VMA_ASSERT(pDedicatedAllocations);
7309  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
7310  VMA_ASSERT(success);
7311  }
7312 
7313  VkDeviceMemory hMemory = allocation->GetMemory();
7314 
7315  if(allocation->GetMappedData() != VMA_NULL)
7316  {
7317  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
7318  }
7319 
7320  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
7321 
7322  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
7323 }
7324 
7325 #if VMA_STATS_STRING_ENABLED
7326 
7327 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
7328 {
7329  bool dedicatedAllocationsStarted = false;
7330  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7331  {
7332  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7333  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
7334  {
7335  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex][blockVectorType];
7336  VMA_ASSERT(pDedicatedAllocVector);
7337  if(pDedicatedAllocVector->empty() == false)
7338  {
7339  if(dedicatedAllocationsStarted == false)
7340  {
7341  dedicatedAllocationsStarted = true;
7342  json.WriteString("DedicatedAllocations");
7343  json.BeginObject();
7344  }
7345 
7346  json.BeginString("Type ");
7347  json.ContinueString(memTypeIndex);
7348  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
7349  {
7350  json.ContinueString(" Mapped");
7351  }
7352  json.EndString();
7353 
7354  json.BeginArray();
7355 
7356  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
7357  {
7358  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
7359  json.BeginObject(true);
7360 
7361  json.WriteString("Size");
7362  json.WriteNumber(hAlloc->GetSize());
7363 
7364  json.WriteString("Type");
7365  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
7366 
7367  json.EndObject();
7368  }
7369 
7370  json.EndArray();
7371  }
7372  }
7373  }
7374  if(dedicatedAllocationsStarted)
7375  {
7376  json.EndObject();
7377  }
7378 
7379  {
7380  bool allocationsStarted = false;
7381  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7382  {
7383  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
7384  {
7385  if(m_pBlockVectors[memTypeIndex][blockVectorType]->IsEmpty() == false)
7386  {
7387  if(allocationsStarted == false)
7388  {
7389  allocationsStarted = true;
7390  json.WriteString("DefaultPools");
7391  json.BeginObject();
7392  }
7393 
7394  json.BeginString("Type ");
7395  json.ContinueString(memTypeIndex);
7396  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
7397  {
7398  json.ContinueString(" Mapped");
7399  }
7400  json.EndString();
7401 
7402  m_pBlockVectors[memTypeIndex][blockVectorType]->PrintDetailedMap(json);
7403  }
7404  }
7405  }
7406  if(allocationsStarted)
7407  {
7408  json.EndObject();
7409  }
7410  }
7411 
7412  {
7413  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7414  const size_t poolCount = m_Pools.size();
7415  if(poolCount > 0)
7416  {
7417  json.WriteString("Pools");
7418  json.BeginArray();
7419  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
7420  {
7421  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
7422  }
7423  json.EndArray();
7424  }
7425  }
7426 }
7427 
7428 #endif // #if VMA_STATS_STRING_ENABLED
7429 
7430 static VkResult AllocateMemoryForImage(
7431  VmaAllocator allocator,
7432  VkImage image,
7433  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7434  VmaSuballocationType suballocType,
7435  VmaAllocation* pAllocation)
7436 {
7437  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
7438 
7439  VkMemoryRequirements vkMemReq = {};
7440  bool requiresDedicatedAllocation = false;
7441  bool prefersDedicatedAllocation = false;
7442  allocator->GetImageMemoryRequirements(image, vkMemReq,
7443  requiresDedicatedAllocation, prefersDedicatedAllocation);
7444 
7445  return allocator->AllocateMemory(
7446  vkMemReq,
7447  requiresDedicatedAllocation,
7448  prefersDedicatedAllocation,
7449  VK_NULL_HANDLE, // dedicatedBuffer
7450  image, // dedicatedImage
7451  *pAllocationCreateInfo,
7452  suballocType,
7453  pAllocation);
7454 }
7455 
7457 // Public interface
7458 
7459 VkResult vmaCreateAllocator(
7460  const VmaAllocatorCreateInfo* pCreateInfo,
7461  VmaAllocator* pAllocator)
7462 {
7463  VMA_ASSERT(pCreateInfo && pAllocator);
7464  VMA_DEBUG_LOG("vmaCreateAllocator");
7465  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
7466  return VK_SUCCESS;
7467 }
7468 
7469 void vmaDestroyAllocator(
7470  VmaAllocator allocator)
7471 {
7472  if(allocator != VK_NULL_HANDLE)
7473  {
7474  VMA_DEBUG_LOG("vmaDestroyAllocator");
7475  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
7476  vma_delete(&allocationCallbacks, allocator);
7477  }
7478 }
7479 
7481  VmaAllocator allocator,
7482  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
7483 {
7484  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
7485  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
7486 }
7487 
7489  VmaAllocator allocator,
7490  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
7491 {
7492  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
7493  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
7494 }
7495 
7497  VmaAllocator allocator,
7498  uint32_t memoryTypeIndex,
7499  VkMemoryPropertyFlags* pFlags)
7500 {
7501  VMA_ASSERT(allocator && pFlags);
7502  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
7503  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
7504 }
7505 
7507  VmaAllocator allocator,
7508  uint32_t frameIndex)
7509 {
7510  VMA_ASSERT(allocator);
7511  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
7512 
7513  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7514 
7515  allocator->SetCurrentFrameIndex(frameIndex);
7516 }
7517 
7518 void vmaCalculateStats(
7519  VmaAllocator allocator,
7520  VmaStats* pStats)
7521 {
7522  VMA_ASSERT(allocator && pStats);
7523  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7524  allocator->CalculateStats(pStats);
7525 }
7526 
7527 #if VMA_STATS_STRING_ENABLED
7528 
7529 void vmaBuildStatsString(
7530  VmaAllocator allocator,
7531  char** ppStatsString,
7532  VkBool32 detailedMap)
7533 {
7534  VMA_ASSERT(allocator && ppStatsString);
7535  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7536 
7537  VmaStringBuilder sb(allocator);
7538  {
7539  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
7540  json.BeginObject();
7541 
7542  VmaStats stats;
7543  allocator->CalculateStats(&stats);
7544 
7545  json.WriteString("Total");
7546  VmaPrintStatInfo(json, stats.total);
7547 
7548  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
7549  {
7550  json.BeginString("Heap ");
7551  json.ContinueString(heapIndex);
7552  json.EndString();
7553  json.BeginObject();
7554 
7555  json.WriteString("Size");
7556  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
7557 
7558  json.WriteString("Flags");
7559  json.BeginArray(true);
7560  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
7561  {
7562  json.WriteString("DEVICE_LOCAL");
7563  }
7564  json.EndArray();
7565 
7566  if(stats.memoryHeap[heapIndex].blockCount > 0)
7567  {
7568  json.WriteString("Stats");
7569  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
7570  }
7571 
7572  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
7573  {
7574  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
7575  {
7576  json.BeginString("Type ");
7577  json.ContinueString(typeIndex);
7578  json.EndString();
7579 
7580  json.BeginObject();
7581 
7582  json.WriteString("Flags");
7583  json.BeginArray(true);
7584  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
7585  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
7586  {
7587  json.WriteString("DEVICE_LOCAL");
7588  }
7589  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7590  {
7591  json.WriteString("HOST_VISIBLE");
7592  }
7593  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
7594  {
7595  json.WriteString("HOST_COHERENT");
7596  }
7597  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
7598  {
7599  json.WriteString("HOST_CACHED");
7600  }
7601  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
7602  {
7603  json.WriteString("LAZILY_ALLOCATED");
7604  }
7605  json.EndArray();
7606 
7607  if(stats.memoryType[typeIndex].blockCount > 0)
7608  {
7609  json.WriteString("Stats");
7610  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
7611  }
7612 
7613  json.EndObject();
7614  }
7615  }
7616 
7617  json.EndObject();
7618  }
7619  if(detailedMap == VK_TRUE)
7620  {
7621  allocator->PrintDetailedMap(json);
7622  }
7623 
7624  json.EndObject();
7625  }
7626 
7627  const size_t len = sb.GetLength();
7628  char* const pChars = vma_new_array(allocator, char, len + 1);
7629  if(len > 0)
7630  {
7631  memcpy(pChars, sb.GetData(), len);
7632  }
7633  pChars[len] = '\0';
7634  *ppStatsString = pChars;
7635 }
7636 
7637 void vmaFreeStatsString(
7638  VmaAllocator allocator,
7639  char* pStatsString)
7640 {
7641  if(pStatsString != VMA_NULL)
7642  {
7643  VMA_ASSERT(allocator);
7644  size_t len = strlen(pStatsString);
7645  vma_delete_array(allocator, pStatsString, len + 1);
7646  }
7647 }
7648 
7649 #endif // #if VMA_STATS_STRING_ENABLED
7650 
7653 VkResult vmaFindMemoryTypeIndex(
7654  VmaAllocator allocator,
7655  uint32_t memoryTypeBits,
7656  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7657  uint32_t* pMemoryTypeIndex)
7658 {
7659  VMA_ASSERT(allocator != VK_NULL_HANDLE);
7660  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
7661  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
7662 
7663  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
7664  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
7665  if(preferredFlags == 0)
7666  {
7667  preferredFlags = requiredFlags;
7668  }
7669  // preferredFlags, if not 0, must be a superset of requiredFlags.
7670  VMA_ASSERT((requiredFlags & ~preferredFlags) == 0);
7671 
7672  // Convert usage to requiredFlags and preferredFlags.
7673  switch(pAllocationCreateInfo->usage)
7674  {
7676  break;
7678  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
7679  break;
7681  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
7682  break;
7684  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7685  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
7686  break;
7688  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7689  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
7690  break;
7691  default:
7692  break;
7693  }
7694 
7695  *pMemoryTypeIndex = UINT32_MAX;
7696  uint32_t minCost = UINT32_MAX;
7697  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
7698  memTypeIndex < allocator->GetMemoryTypeCount();
7699  ++memTypeIndex, memTypeBit <<= 1)
7700  {
7701  // This memory type is acceptable according to memoryTypeBits bitmask.
7702  if((memTypeBit & memoryTypeBits) != 0)
7703  {
7704  const VkMemoryPropertyFlags currFlags =
7705  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
7706  // This memory type contains requiredFlags.
7707  if((requiredFlags & ~currFlags) == 0)
7708  {
7709  // Calculate cost as number of bits from preferredFlags not present in this memory type.
7710  uint32_t currCost = CountBitsSet(preferredFlags & ~currFlags);
7711  // Remember memory type with lowest cost.
7712  if(currCost < minCost)
7713  {
7714  *pMemoryTypeIndex = memTypeIndex;
7715  if(currCost == 0)
7716  {
7717  return VK_SUCCESS;
7718  }
7719  minCost = currCost;
7720  }
7721  }
7722  }
7723  }
7724  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
7725 }
7726 
7727 VkResult vmaCreatePool(
7728  VmaAllocator allocator,
7729  const VmaPoolCreateInfo* pCreateInfo,
7730  VmaPool* pPool)
7731 {
7732  VMA_ASSERT(allocator && pCreateInfo && pPool);
7733 
7734  VMA_DEBUG_LOG("vmaCreatePool");
7735 
7736  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7737 
7738  return allocator->CreatePool(pCreateInfo, pPool);
7739 }
7740 
7741 void vmaDestroyPool(
7742  VmaAllocator allocator,
7743  VmaPool pool)
7744 {
7745  VMA_ASSERT(allocator && pool);
7746 
7747  VMA_DEBUG_LOG("vmaDestroyPool");
7748 
7749  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7750 
7751  allocator->DestroyPool(pool);
7752 }
7753 
7754 void vmaGetPoolStats(
7755  VmaAllocator allocator,
7756  VmaPool pool,
7757  VmaPoolStats* pPoolStats)
7758 {
7759  VMA_ASSERT(allocator && pool && pPoolStats);
7760 
7761  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7762 
7763  allocator->GetPoolStats(pool, pPoolStats);
7764 }
7765 
7767  VmaAllocator allocator,
7768  VmaPool pool,
7769  size_t* pLostAllocationCount)
7770 {
7771  VMA_ASSERT(allocator && pool);
7772 
7773  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7774 
7775  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
7776 }
7777 
7778 VkResult vmaAllocateMemory(
7779  VmaAllocator allocator,
7780  const VkMemoryRequirements* pVkMemoryRequirements,
7781  const VmaAllocationCreateInfo* pCreateInfo,
7782  VmaAllocation* pAllocation,
7783  VmaAllocationInfo* pAllocationInfo)
7784 {
7785  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
7786 
7787  VMA_DEBUG_LOG("vmaAllocateMemory");
7788 
7789  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7790 
7791  VkResult result = allocator->AllocateMemory(
7792  *pVkMemoryRequirements,
7793  false, // requiresDedicatedAllocation
7794  false, // prefersDedicatedAllocation
7795  VK_NULL_HANDLE, // dedicatedBuffer
7796  VK_NULL_HANDLE, // dedicatedImage
7797  *pCreateInfo,
7798  VMA_SUBALLOCATION_TYPE_UNKNOWN,
7799  pAllocation);
7800 
7801  if(pAllocationInfo && result == VK_SUCCESS)
7802  {
7803  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7804  }
7805 
7806  return result;
7807 }
7808 
7810  VmaAllocator allocator,
7811  VkBuffer buffer,
7812  const VmaAllocationCreateInfo* pCreateInfo,
7813  VmaAllocation* pAllocation,
7814  VmaAllocationInfo* pAllocationInfo)
7815 {
7816  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
7817 
7818  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
7819 
7820  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7821 
7822  VkMemoryRequirements vkMemReq = {};
7823  bool requiresDedicatedAllocation = false;
7824  bool prefersDedicatedAllocation = false;
7825  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
7826  requiresDedicatedAllocation,
7827  prefersDedicatedAllocation);
7828 
7829  VkResult result = allocator->AllocateMemory(
7830  vkMemReq,
7831  requiresDedicatedAllocation,
7832  prefersDedicatedAllocation,
7833  buffer, // dedicatedBuffer
7834  VK_NULL_HANDLE, // dedicatedImage
7835  *pCreateInfo,
7836  VMA_SUBALLOCATION_TYPE_BUFFER,
7837  pAllocation);
7838 
7839  if(pAllocationInfo && result == VK_SUCCESS)
7840  {
7841  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7842  }
7843 
7844  return result;
7845 }
7846 
7847 VkResult vmaAllocateMemoryForImage(
7848  VmaAllocator allocator,
7849  VkImage image,
7850  const VmaAllocationCreateInfo* pCreateInfo,
7851  VmaAllocation* pAllocation,
7852  VmaAllocationInfo* pAllocationInfo)
7853 {
7854  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
7855 
7856  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
7857 
7858  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7859 
7860  VkResult result = AllocateMemoryForImage(
7861  allocator,
7862  image,
7863  pCreateInfo,
7864  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
7865  pAllocation);
7866 
7867  if(pAllocationInfo && result == VK_SUCCESS)
7868  {
7869  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7870  }
7871 
7872  return result;
7873 }
7874 
7875 void vmaFreeMemory(
7876  VmaAllocator allocator,
7877  VmaAllocation allocation)
7878 {
7879  VMA_ASSERT(allocator && allocation);
7880 
7881  VMA_DEBUG_LOG("vmaFreeMemory");
7882 
7883  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7884 
7885  allocator->FreeMemory(allocation);
7886 }
7887 
7889  VmaAllocator allocator,
7890  VmaAllocation allocation,
7891  VmaAllocationInfo* pAllocationInfo)
7892 {
7893  VMA_ASSERT(allocator && allocation && pAllocationInfo);
7894 
7895  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7896 
7897  allocator->GetAllocationInfo(allocation, pAllocationInfo);
7898 }
7899 
7901  VmaAllocator allocator,
7902  VmaAllocation allocation,
7903  void* pUserData)
7904 {
7905  VMA_ASSERT(allocator && allocation);
7906 
7907  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7908 
7909  allocation->SetUserData(pUserData);
7910 }
7911 
7913  VmaAllocator allocator,
7914  VmaAllocation* pAllocation)
7915 {
7916  VMA_ASSERT(allocator && pAllocation);
7917 
7918  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
7919 
7920  allocator->CreateLostAllocation(pAllocation);
7921 }
7922 
7923 VkResult vmaMapMemory(
7924  VmaAllocator allocator,
7925  VmaAllocation allocation,
7926  void** ppData)
7927 {
7928  VMA_ASSERT(allocator && allocation && ppData);
7929 
7930  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7931 
7932  return (*allocator->GetVulkanFunctions().vkMapMemory)(
7933  allocator->m_hDevice,
7934  allocation->GetMemory(),
7935  allocation->GetOffset(),
7936  allocation->GetSize(),
7937  0,
7938  ppData);
7939 }
7940 
7941 void vmaUnmapMemory(
7942  VmaAllocator allocator,
7943  VmaAllocation allocation)
7944 {
7945  VMA_ASSERT(allocator && allocation);
7946 
7947  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7948 
7949  (*allocator->GetVulkanFunctions().vkUnmapMemory)(allocator->m_hDevice, allocation->GetMemory());
7950 }
7951 
7952 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator)
7953 {
7954  VMA_ASSERT(allocator);
7955 
7956  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7957 
7958  allocator->UnmapPersistentlyMappedMemory();
7959 }
7960 
7961 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator)
7962 {
7963  VMA_ASSERT(allocator);
7964 
7965  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7966 
7967  return allocator->MapPersistentlyMappedMemory();
7968 }
7969 
7970 VkResult vmaDefragment(
7971  VmaAllocator allocator,
7972  VmaAllocation* pAllocations,
7973  size_t allocationCount,
7974  VkBool32* pAllocationsChanged,
7975  const VmaDefragmentationInfo *pDefragmentationInfo,
7976  VmaDefragmentationStats* pDefragmentationStats)
7977 {
7978  VMA_ASSERT(allocator && pAllocations);
7979 
7980  VMA_DEBUG_LOG("vmaDefragment");
7981 
7982  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7983 
7984  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
7985 }
7986 
7987 VkResult vmaCreateBuffer(
7988  VmaAllocator allocator,
7989  const VkBufferCreateInfo* pBufferCreateInfo,
7990  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7991  VkBuffer* pBuffer,
7992  VmaAllocation* pAllocation,
7993  VmaAllocationInfo* pAllocationInfo)
7994 {
7995  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
7996 
7997  VMA_DEBUG_LOG("vmaCreateBuffer");
7998 
7999  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8000 
8001  *pBuffer = VK_NULL_HANDLE;
8002  *pAllocation = VK_NULL_HANDLE;
8003 
8004  // 1. Create VkBuffer.
8005  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
8006  allocator->m_hDevice,
8007  pBufferCreateInfo,
8008  allocator->GetAllocationCallbacks(),
8009  pBuffer);
8010  if(res >= 0)
8011  {
8012  // 2. vkGetBufferMemoryRequirements.
8013  VkMemoryRequirements vkMemReq = {};
8014  bool requiresDedicatedAllocation = false;
8015  bool prefersDedicatedAllocation = false;
8016  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
8017  requiresDedicatedAllocation, prefersDedicatedAllocation);
8018 
8019  // 3. Allocate memory using allocator.
8020  res = allocator->AllocateMemory(
8021  vkMemReq,
8022  requiresDedicatedAllocation,
8023  prefersDedicatedAllocation,
8024  *pBuffer, // dedicatedBuffer
8025  VK_NULL_HANDLE, // dedicatedImage
8026  *pAllocationCreateInfo,
8027  VMA_SUBALLOCATION_TYPE_BUFFER,
8028  pAllocation);
8029  if(res >= 0)
8030  {
8031  // 3. Bind buffer with memory.
8032  res = (*allocator->GetVulkanFunctions().vkBindBufferMemory)(
8033  allocator->m_hDevice,
8034  *pBuffer,
8035  (*pAllocation)->GetMemory(),
8036  (*pAllocation)->GetOffset());
8037  if(res >= 0)
8038  {
8039  // All steps succeeded.
8040  if(pAllocationInfo != VMA_NULL)
8041  {
8042  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8043  }
8044  return VK_SUCCESS;
8045  }
8046  allocator->FreeMemory(*pAllocation);
8047  *pAllocation = VK_NULL_HANDLE;
8048  return res;
8049  }
8050  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
8051  *pBuffer = VK_NULL_HANDLE;
8052  return res;
8053  }
8054  return res;
8055 }
8056 
8057 void vmaDestroyBuffer(
8058  VmaAllocator allocator,
8059  VkBuffer buffer,
8060  VmaAllocation allocation)
8061 {
8062  if(buffer != VK_NULL_HANDLE)
8063  {
8064  VMA_ASSERT(allocator);
8065 
8066  VMA_DEBUG_LOG("vmaDestroyBuffer");
8067 
8068  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8069 
8070  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
8071 
8072  allocator->FreeMemory(allocation);
8073  }
8074 }
8075 
8076 VkResult vmaCreateImage(
8077  VmaAllocator allocator,
8078  const VkImageCreateInfo* pImageCreateInfo,
8079  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8080  VkImage* pImage,
8081  VmaAllocation* pAllocation,
8082  VmaAllocationInfo* pAllocationInfo)
8083 {
8084  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
8085 
8086  VMA_DEBUG_LOG("vmaCreateImage");
8087 
8088  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8089 
8090  *pImage = VK_NULL_HANDLE;
8091  *pAllocation = VK_NULL_HANDLE;
8092 
8093  // 1. Create VkImage.
8094  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
8095  allocator->m_hDevice,
8096  pImageCreateInfo,
8097  allocator->GetAllocationCallbacks(),
8098  pImage);
8099  if(res >= 0)
8100  {
8101  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
8102  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
8103  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
8104 
8105  // 2. Allocate memory using allocator.
8106  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
8107  if(res >= 0)
8108  {
8109  // 3. Bind image with memory.
8110  res = (*allocator->GetVulkanFunctions().vkBindImageMemory)(
8111  allocator->m_hDevice,
8112  *pImage,
8113  (*pAllocation)->GetMemory(),
8114  (*pAllocation)->GetOffset());
8115  if(res >= 0)
8116  {
8117  // All steps succeeded.
8118  if(pAllocationInfo != VMA_NULL)
8119  {
8120  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8121  }
8122  return VK_SUCCESS;
8123  }
8124  allocator->FreeMemory(*pAllocation);
8125  *pAllocation = VK_NULL_HANDLE;
8126  return res;
8127  }
8128  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
8129  *pImage = VK_NULL_HANDLE;
8130  return res;
8131  }
8132  return res;
8133 }
8134 
8135 void vmaDestroyImage(
8136  VmaAllocator allocator,
8137  VkImage image,
8138  VmaAllocation allocation)
8139 {
8140  if(image != VK_NULL_HANDLE)
8141  {
8142  VMA_ASSERT(allocator);
8143 
8144  VMA_DEBUG_LOG("vmaDestroyImage");
8145 
8146  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8147 
8148  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
8149 
8150  allocator->FreeMemory(allocation);
8151  }
8152 }
8153 
8154 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:551
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:768
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:576
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:561
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Memory will be used on device only, so faster access from the device is preferred. No need to be mappable on host.
Definition: vk_mem_alloc.h:742
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:555
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:1027
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:573
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:1180
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:897
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:951
Definition: vk_mem_alloc.h:806
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:544
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:839
Definition: vk_mem_alloc.h:752
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks.
Definition: vk_mem_alloc.h:588
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:635
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:570
VkDeviceSize preferredSmallHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from small heaps <= 512 MB...
Definition: vk_mem_alloc.h:585
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:756
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation.
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:700
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:558
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:699
PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR
Definition: vk_mem_alloc.h:566
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:1184
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:605
VmaStatInfo total
Definition: vk_mem_alloc.h:709
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:1192
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:822
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1175
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:559
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:480
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:579
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:905
Definition: vk_mem_alloc.h:899
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:1037
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:556
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:841
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:921
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:957
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:542
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:908
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
VmaMemoryUsage
Definition: vk_mem_alloc.h:737
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:1170
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:1188
Memory will be used for frequent (dynamic) updates from host and reads on device (upload).
Definition: vk_mem_alloc.h:748
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:557
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:705
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:486
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:507
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:512
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:1190
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:833
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region.
Definition: vk_mem_alloc.h:967
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:552
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:688
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
Definition: vk_mem_alloc.h:916
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:499
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:813
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:701
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:503
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:911
Memory will be used for frequent writing on device and readback on host (download).
Definition: vk_mem_alloc.h:751
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:828
Definition: vk_mem_alloc.h:819
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:691
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:554
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool.
Definition: vk_mem_alloc.h:929
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory.
Definition: vk_mem_alloc.h:591
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:960
VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator)
Maps back persistently mapped memory of types that are HOST_COHERENT and DEVICE_LOCAL.
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:817
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:846
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either NULL or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:623
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:707
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:700
Definition: vk_mem_alloc.h:879
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:563
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:501
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:562
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:943
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:1048
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps. ...
Definition: vk_mem_alloc.h:582
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:700
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:697
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:948
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:1032
Definition: vk_mem_alloc.h:815
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1186
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:550
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR
Definition: vk_mem_alloc.h:565
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:695
No intended memory usage specified. Use other members of VmaAllocationCreateInfo to specify your requ...
Definition: vk_mem_alloc.h:740
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:901
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:693
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:560
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:564
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:779
Memory will be mapped on host. Could be used for transfer to/from device.
Definition: vk_mem_alloc.h:745
void * pMappedData
Pointer to the beginning of this allocation as mapped data. Null if this alloaction is not persistent...
Definition: vk_mem_alloc.h:1043
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:540
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:553
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:1013
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:795
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:870
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:701
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:708
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:954
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:701
void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator)
Unmaps persistently mapped memory of types that are HOST_COHERENT and DEVICE_LOCAL.
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:1018