Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
492 #include <vulkan/vulkan.h>
493 
494 VK_DEFINE_HANDLE(VmaAllocator)
495 
496 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
498  VmaAllocator allocator,
499  uint32_t memoryType,
500  VkDeviceMemory memory,
501  VkDeviceSize size);
503 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
504  VmaAllocator allocator,
505  uint32_t memoryType,
506  VkDeviceMemory memory,
507  VkDeviceSize size);
508 
516 typedef struct VmaDeviceMemoryCallbacks {
522 
558 
561 typedef VkFlags VmaAllocatorCreateFlags;
562 
567 typedef struct VmaVulkanFunctions {
568  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
569  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
570  PFN_vkAllocateMemory vkAllocateMemory;
571  PFN_vkFreeMemory vkFreeMemory;
572  PFN_vkMapMemory vkMapMemory;
573  PFN_vkUnmapMemory vkUnmapMemory;
574  PFN_vkBindBufferMemory vkBindBufferMemory;
575  PFN_vkBindImageMemory vkBindImageMemory;
576  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
577  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
578  PFN_vkCreateBuffer vkCreateBuffer;
579  PFN_vkDestroyBuffer vkDestroyBuffer;
580  PFN_vkCreateImage vkCreateImage;
581  PFN_vkDestroyImage vkDestroyImage;
582  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
583  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
585 
588 {
590  VmaAllocatorCreateFlags flags;
592 
593  VkPhysicalDevice physicalDevice;
595 
596  VkDevice device;
598 
601 
604 
605  const VkAllocationCallbacks* pAllocationCallbacks;
607 
622  uint32_t frameInUseCount;
640  const VkDeviceSize* pHeapSizeLimit;
654 
656 VkResult vmaCreateAllocator(
657  const VmaAllocatorCreateInfo* pCreateInfo,
658  VmaAllocator* pAllocator);
659 
662  VmaAllocator allocator);
663 
669  VmaAllocator allocator,
670  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
671 
677  VmaAllocator allocator,
678  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
679 
687  VmaAllocator allocator,
688  uint32_t memoryTypeIndex,
689  VkMemoryPropertyFlags* pFlags);
690 
700  VmaAllocator allocator,
701  uint32_t frameIndex);
702 
705 typedef struct VmaStatInfo
706 {
708  uint32_t blockCount;
710  uint32_t allocationCount;
714  VkDeviceSize usedBytes;
716  VkDeviceSize unusedBytes;
717  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
718  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
719 } VmaStatInfo;
720 
722 typedef struct VmaStats
723 {
724  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
725  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
727 } VmaStats;
728 
730 void vmaCalculateStats(
731  VmaAllocator allocator,
732  VmaStats* pStats);
733 
734 #define VMA_STATS_STRING_ENABLED 1
735 
736 #if VMA_STATS_STRING_ENABLED
737 
739 
742  VmaAllocator allocator,
743  char** ppStatsString,
744  VkBool32 detailedMap);
745 
746 void vmaFreeStatsString(
747  VmaAllocator allocator,
748  char* pStatsString);
749 
750 #endif // #if VMA_STATS_STRING_ENABLED
751 
752 VK_DEFINE_HANDLE(VmaPool)
753 
754 typedef enum VmaMemoryUsage
755 {
761 
764 
767 
771 
786 
831 
834 typedef VkFlags VmaAllocationCreateFlags;
835 
837 {
839  VmaAllocationCreateFlags flags;
850  VkMemoryPropertyFlags requiredFlags;
856  VkMemoryPropertyFlags preferredFlags;
858  void* pUserData;
863  VmaPool pool;
865 
880 VkResult vmaFindMemoryTypeIndex(
881  VmaAllocator allocator,
882  uint32_t memoryTypeBits,
883  const VmaAllocationCreateInfo* pAllocationCreateInfo,
884  uint32_t* pMemoryTypeIndex);
885 
887 typedef enum VmaPoolCreateFlagBits {
915 
918 typedef VkFlags VmaPoolCreateFlags;
919 
922 typedef struct VmaPoolCreateInfo {
925  uint32_t memoryTypeIndex;
928  VmaPoolCreateFlags flags;
933  VkDeviceSize blockSize;
960  uint32_t frameInUseCount;
962 
965 typedef struct VmaPoolStats {
968  VkDeviceSize size;
971  VkDeviceSize unusedSize;
984  VkDeviceSize unusedRangeSizeMax;
985 } VmaPoolStats;
986 
993 VkResult vmaCreatePool(
994  VmaAllocator allocator,
995  const VmaPoolCreateInfo* pCreateInfo,
996  VmaPool* pPool);
997 
1000 void vmaDestroyPool(
1001  VmaAllocator allocator,
1002  VmaPool pool);
1003 
1010 void vmaGetPoolStats(
1011  VmaAllocator allocator,
1012  VmaPool pool,
1013  VmaPoolStats* pPoolStats);
1014 
1022  VmaAllocator allocator,
1023  VmaPool pool,
1024  size_t* pLostAllocationCount);
1025 
1026 VK_DEFINE_HANDLE(VmaAllocation)
1027 
1028 
1030 typedef struct VmaAllocationInfo {
1035  uint32_t memoryType;
1044  VkDeviceMemory deviceMemory;
1049  VkDeviceSize offset;
1054  VkDeviceSize size;
1065  void* pUserData;
1067 
1078 VkResult vmaAllocateMemory(
1079  VmaAllocator allocator,
1080  const VkMemoryRequirements* pVkMemoryRequirements,
1081  const VmaAllocationCreateInfo* pCreateInfo,
1082  VmaAllocation* pAllocation,
1083  VmaAllocationInfo* pAllocationInfo);
1084 
1092  VmaAllocator allocator,
1093  VkBuffer buffer,
1094  const VmaAllocationCreateInfo* pCreateInfo,
1095  VmaAllocation* pAllocation,
1096  VmaAllocationInfo* pAllocationInfo);
1097 
1099 VkResult vmaAllocateMemoryForImage(
1100  VmaAllocator allocator,
1101  VkImage image,
1102  const VmaAllocationCreateInfo* pCreateInfo,
1103  VmaAllocation* pAllocation,
1104  VmaAllocationInfo* pAllocationInfo);
1105 
1107 void vmaFreeMemory(
1108  VmaAllocator allocator,
1109  VmaAllocation allocation);
1110 
1113  VmaAllocator allocator,
1114  VmaAllocation allocation,
1115  VmaAllocationInfo* pAllocationInfo);
1116 
1119  VmaAllocator allocator,
1120  VmaAllocation allocation,
1121  void* pUserData);
1122 
1134  VmaAllocator allocator,
1135  VmaAllocation* pAllocation);
1136 
1145 VkResult vmaMapMemory(
1146  VmaAllocator allocator,
1147  VmaAllocation allocation,
1148  void** ppData);
1149 
1150 void vmaUnmapMemory(
1151  VmaAllocator allocator,
1152  VmaAllocation allocation);
1153 
1176 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator);
1177 
1185 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator);
1186 
1188 typedef struct VmaDefragmentationInfo {
1193  VkDeviceSize maxBytesToMove;
1200 
1202 typedef struct VmaDefragmentationStats {
1204  VkDeviceSize bytesMoved;
1206  VkDeviceSize bytesFreed;
1212 
1284 VkResult vmaDefragment(
1285  VmaAllocator allocator,
1286  VmaAllocation* pAllocations,
1287  size_t allocationCount,
1288  VkBool32* pAllocationsChanged,
1289  const VmaDefragmentationInfo *pDefragmentationInfo,
1290  VmaDefragmentationStats* pDefragmentationStats);
1291 
1318 VkResult vmaCreateBuffer(
1319  VmaAllocator allocator,
1320  const VkBufferCreateInfo* pBufferCreateInfo,
1321  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1322  VkBuffer* pBuffer,
1323  VmaAllocation* pAllocation,
1324  VmaAllocationInfo* pAllocationInfo);
1325 
1337 void vmaDestroyBuffer(
1338  VmaAllocator allocator,
1339  VkBuffer buffer,
1340  VmaAllocation allocation);
1341 
1343 VkResult vmaCreateImage(
1344  VmaAllocator allocator,
1345  const VkImageCreateInfo* pImageCreateInfo,
1346  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1347  VkImage* pImage,
1348  VmaAllocation* pAllocation,
1349  VmaAllocationInfo* pAllocationInfo);
1350 
1362 void vmaDestroyImage(
1363  VmaAllocator allocator,
1364  VkImage image,
1365  VmaAllocation allocation);
1366 
1367 #ifdef __cplusplus
1368 }
1369 #endif
1370 
1371 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
1372 
1373 // For Visual Studio IntelliSense.
1374 #ifdef __INTELLISENSE__
1375 #define VMA_IMPLEMENTATION
1376 #endif
1377 
1378 #ifdef VMA_IMPLEMENTATION
1379 #undef VMA_IMPLEMENTATION
1380 
1381 #include <cstdint>
1382 #include <cstdlib>
1383 #include <cstring>
1384 
1385 /*******************************************************************************
1386 CONFIGURATION SECTION
1387 
1388 Define some of these macros before each #include of this header or change them
1389 here if you need other then default behavior depending on your environment.
1390 */
1391 
1392 /*
1393 Define this macro to 1 to make the library fetch pointers to Vulkan functions
1394 internally, like:
1395 
1396  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
1397 
1398 Define to 0 if you are going to provide you own pointers to Vulkan functions via
1399 VmaAllocatorCreateInfo::pVulkanFunctions.
1400 */
1401 #ifndef VMA_STATIC_VULKAN_FUNCTIONS
1402 #define VMA_STATIC_VULKAN_FUNCTIONS 1
1403 #endif
1404 
1405 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
1406 //#define VMA_USE_STL_CONTAINERS 1
1407 
1408 /* Set this macro to 1 to make the library including and using STL containers:
1409 std::pair, std::vector, std::list, std::unordered_map.
1410 
1411 Set it to 0 or undefined to make the library using its own implementation of
1412 the containers.
1413 */
1414 #if VMA_USE_STL_CONTAINERS
1415  #define VMA_USE_STL_VECTOR 1
1416  #define VMA_USE_STL_UNORDERED_MAP 1
1417  #define VMA_USE_STL_LIST 1
1418 #endif
1419 
1420 #if VMA_USE_STL_VECTOR
1421  #include <vector>
1422 #endif
1423 
1424 #if VMA_USE_STL_UNORDERED_MAP
1425  #include <unordered_map>
1426 #endif
1427 
1428 #if VMA_USE_STL_LIST
1429  #include <list>
1430 #endif
1431 
1432 /*
1433 Following headers are used in this CONFIGURATION section only, so feel free to
1434 remove them if not needed.
1435 */
1436 #include <cassert> // for assert
1437 #include <algorithm> // for min, max
1438 #include <mutex> // for std::mutex
1439 #include <atomic> // for std::atomic
1440 
1441 #if !defined(_WIN32)
1442  #include <malloc.h> // for aligned_alloc()
1443 #endif
1444 
1445 // Normal assert to check for programmer's errors, especially in Debug configuration.
1446 #ifndef VMA_ASSERT
1447  #ifdef _DEBUG
1448  #define VMA_ASSERT(expr) assert(expr)
1449  #else
1450  #define VMA_ASSERT(expr)
1451  #endif
1452 #endif
1453 
1454 // Assert that will be called very often, like inside data structures e.g. operator[].
1455 // Making it non-empty can make program slow.
1456 #ifndef VMA_HEAVY_ASSERT
1457  #ifdef _DEBUG
1458  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
1459  #else
1460  #define VMA_HEAVY_ASSERT(expr)
1461  #endif
1462 #endif
1463 
1464 #ifndef VMA_NULL
1465  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
1466  #define VMA_NULL nullptr
1467 #endif
1468 
1469 #ifndef VMA_ALIGN_OF
1470  #define VMA_ALIGN_OF(type) (__alignof(type))
1471 #endif
1472 
1473 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
1474  #if defined(_WIN32)
1475  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
1476  #else
1477  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
1478  #endif
1479 #endif
1480 
1481 #ifndef VMA_SYSTEM_FREE
1482  #if defined(_WIN32)
1483  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
1484  #else
1485  #define VMA_SYSTEM_FREE(ptr) free(ptr)
1486  #endif
1487 #endif
1488 
1489 #ifndef VMA_MIN
1490  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
1491 #endif
1492 
1493 #ifndef VMA_MAX
1494  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
1495 #endif
1496 
1497 #ifndef VMA_SWAP
1498  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
1499 #endif
1500 
1501 #ifndef VMA_SORT
1502  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
1503 #endif
1504 
1505 #ifndef VMA_DEBUG_LOG
1506  #define VMA_DEBUG_LOG(format, ...)
1507  /*
1508  #define VMA_DEBUG_LOG(format, ...) do { \
1509  printf(format, __VA_ARGS__); \
1510  printf("\n"); \
1511  } while(false)
1512  */
1513 #endif
1514 
1515 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
1516 #if VMA_STATS_STRING_ENABLED
1517  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
1518  {
1519  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
1520  }
1521  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
1522  {
1523  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
1524  }
1525  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
1526  {
1527  snprintf(outStr, strLen, "%p", ptr);
1528  }
1529 #endif
1530 
1531 #ifndef VMA_MUTEX
1532  class VmaMutex
1533  {
1534  public:
1535  VmaMutex() { }
1536  ~VmaMutex() { }
1537  void Lock() { m_Mutex.lock(); }
1538  void Unlock() { m_Mutex.unlock(); }
1539  private:
1540  std::mutex m_Mutex;
1541  };
1542  #define VMA_MUTEX VmaMutex
1543 #endif
1544 
1545 /*
1546 If providing your own implementation, you need to implement a subset of std::atomic:
1547 
1548 - Constructor(uint32_t desired)
1549 - uint32_t load() const
1550 - void store(uint32_t desired)
1551 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
1552 */
1553 #ifndef VMA_ATOMIC_UINT32
1554  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
1555 #endif
1556 
1557 #ifndef VMA_BEST_FIT
1558 
1570  #define VMA_BEST_FIT (1)
1571 #endif
1572 
1573 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
1574 
1578  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
1579 #endif
1580 
1581 #ifndef VMA_DEBUG_ALIGNMENT
1582 
1586  #define VMA_DEBUG_ALIGNMENT (1)
1587 #endif
1588 
1589 #ifndef VMA_DEBUG_MARGIN
1590 
1594  #define VMA_DEBUG_MARGIN (0)
1595 #endif
1596 
1597 #ifndef VMA_DEBUG_GLOBAL_MUTEX
1598 
1602  #define VMA_DEBUG_GLOBAL_MUTEX (0)
1603 #endif
1604 
1605 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
1606 
1610  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
1611 #endif
1612 
1613 #ifndef VMA_SMALL_HEAP_MAX_SIZE
1614  #define VMA_SMALL_HEAP_MAX_SIZE (512 * 1024 * 1024)
1616 #endif
1617 
1618 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
1619  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256 * 1024 * 1024)
1621 #endif
1622 
1623 #ifndef VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE
1624  #define VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE (64 * 1024 * 1024)
1626 #endif
1627 
1628 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
1629 
1630 /*******************************************************************************
1631 END OF CONFIGURATION
1632 */
1633 
1634 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
1635  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
1636 
1637 // Returns number of bits set to 1 in (v).
1638 static inline uint32_t CountBitsSet(uint32_t v)
1639 {
1640  uint32_t c = v - ((v >> 1) & 0x55555555);
1641  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
1642  c = ((c >> 4) + c) & 0x0F0F0F0F;
1643  c = ((c >> 8) + c) & 0x00FF00FF;
1644  c = ((c >> 16) + c) & 0x0000FFFF;
1645  return c;
1646 }
1647 
1648 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
1649 // Use types like uint32_t, uint64_t as T.
1650 template <typename T>
1651 static inline T VmaAlignUp(T val, T align)
1652 {
1653  return (val + align - 1) / align * align;
1654 }
1655 
1656 // Division with mathematical rounding to nearest number.
1657 template <typename T>
1658 inline T VmaRoundDiv(T x, T y)
1659 {
1660  return (x + (y / (T)2)) / y;
1661 }
1662 
1663 #ifndef VMA_SORT
1664 
1665 template<typename Iterator, typename Compare>
1666 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
1667 {
1668  Iterator centerValue = end; --centerValue;
1669  Iterator insertIndex = beg;
1670  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
1671  {
1672  if(cmp(*memTypeIndex, *centerValue))
1673  {
1674  if(insertIndex != memTypeIndex)
1675  {
1676  VMA_SWAP(*memTypeIndex, *insertIndex);
1677  }
1678  ++insertIndex;
1679  }
1680  }
1681  if(insertIndex != centerValue)
1682  {
1683  VMA_SWAP(*insertIndex, *centerValue);
1684  }
1685  return insertIndex;
1686 }
1687 
1688 template<typename Iterator, typename Compare>
1689 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
1690 {
1691  if(beg < end)
1692  {
1693  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
1694  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
1695  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
1696  }
1697 }
1698 
1699 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
1700 
1701 #endif // #ifndef VMA_SORT
1702 
1703 /*
1704 Returns true if two memory blocks occupy overlapping pages.
1705 ResourceA must be in less memory offset than ResourceB.
1706 
1707 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
1708 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
1709 */
1710 static inline bool VmaBlocksOnSamePage(
1711  VkDeviceSize resourceAOffset,
1712  VkDeviceSize resourceASize,
1713  VkDeviceSize resourceBOffset,
1714  VkDeviceSize pageSize)
1715 {
1716  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
1717  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
1718  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
1719  VkDeviceSize resourceBStart = resourceBOffset;
1720  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
1721  return resourceAEndPage == resourceBStartPage;
1722 }
1723 
1724 enum VmaSuballocationType
1725 {
1726  VMA_SUBALLOCATION_TYPE_FREE = 0,
1727  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
1728  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
1729  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
1730  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
1731  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
1732  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
1733 };
1734 
1735 /*
1736 Returns true if given suballocation types could conflict and must respect
1737 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
1738 or linear image and another one is optimal image. If type is unknown, behave
1739 conservatively.
1740 */
1741 static inline bool VmaIsBufferImageGranularityConflict(
1742  VmaSuballocationType suballocType1,
1743  VmaSuballocationType suballocType2)
1744 {
1745  if(suballocType1 > suballocType2)
1746  {
1747  VMA_SWAP(suballocType1, suballocType2);
1748  }
1749 
1750  switch(suballocType1)
1751  {
1752  case VMA_SUBALLOCATION_TYPE_FREE:
1753  return false;
1754  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
1755  return true;
1756  case VMA_SUBALLOCATION_TYPE_BUFFER:
1757  return
1758  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
1759  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1760  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
1761  return
1762  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
1763  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
1764  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1765  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
1766  return
1767  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1768  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
1769  return false;
1770  default:
1771  VMA_ASSERT(0);
1772  return true;
1773  }
1774 }
1775 
1776 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
1777 struct VmaMutexLock
1778 {
1779 public:
1780  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
1781  m_pMutex(useMutex ? &mutex : VMA_NULL)
1782  {
1783  if(m_pMutex)
1784  {
1785  m_pMutex->Lock();
1786  }
1787  }
1788 
1789  ~VmaMutexLock()
1790  {
1791  if(m_pMutex)
1792  {
1793  m_pMutex->Unlock();
1794  }
1795  }
1796 
1797 private:
1798  VMA_MUTEX* m_pMutex;
1799 };
1800 
1801 #if VMA_DEBUG_GLOBAL_MUTEX
1802  static VMA_MUTEX gDebugGlobalMutex;
1803  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
1804 #else
1805  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
1806 #endif
1807 
1808 // Minimum size of a free suballocation to register it in the free suballocation collection.
1809 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
1810 
1811 /*
1812 Performs binary search and returns iterator to first element that is greater or
1813 equal to (key), according to comparison (cmp).
1814 
1815 Cmp should return true if first argument is less than second argument.
1816 
1817 Returned value is the found element, if present in the collection or place where
1818 new element with value (key) should be inserted.
1819 */
1820 template <typename IterT, typename KeyT, typename CmpT>
1821 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
1822 {
1823  size_t down = 0, up = (end - beg);
1824  while(down < up)
1825  {
1826  const size_t mid = (down + up) / 2;
1827  if(cmp(*(beg+mid), key))
1828  {
1829  down = mid + 1;
1830  }
1831  else
1832  {
1833  up = mid;
1834  }
1835  }
1836  return beg + down;
1837 }
1838 
1840 // Memory allocation
1841 
1842 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
1843 {
1844  if((pAllocationCallbacks != VMA_NULL) &&
1845  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
1846  {
1847  return (*pAllocationCallbacks->pfnAllocation)(
1848  pAllocationCallbacks->pUserData,
1849  size,
1850  alignment,
1851  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1852  }
1853  else
1854  {
1855  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
1856  }
1857 }
1858 
1859 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
1860 {
1861  if((pAllocationCallbacks != VMA_NULL) &&
1862  (pAllocationCallbacks->pfnFree != VMA_NULL))
1863  {
1864  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
1865  }
1866  else
1867  {
1868  VMA_SYSTEM_FREE(ptr);
1869  }
1870 }
1871 
1872 template<typename T>
1873 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
1874 {
1875  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
1876 }
1877 
1878 template<typename T>
1879 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
1880 {
1881  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
1882 }
1883 
1884 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
1885 
1886 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
1887 
1888 template<typename T>
1889 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
1890 {
1891  ptr->~T();
1892  VmaFree(pAllocationCallbacks, ptr);
1893 }
1894 
1895 template<typename T>
1896 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
1897 {
1898  if(ptr != VMA_NULL)
1899  {
1900  for(size_t i = count; i--; )
1901  {
1902  ptr[i].~T();
1903  }
1904  VmaFree(pAllocationCallbacks, ptr);
1905  }
1906 }
1907 
1908 // STL-compatible allocator.
1909 template<typename T>
1910 class VmaStlAllocator
1911 {
1912 public:
1913  const VkAllocationCallbacks* const m_pCallbacks;
1914  typedef T value_type;
1915 
1916  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
1917  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
1918 
1919  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
1920  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
1921 
1922  template<typename U>
1923  bool operator==(const VmaStlAllocator<U>& rhs) const
1924  {
1925  return m_pCallbacks == rhs.m_pCallbacks;
1926  }
1927  template<typename U>
1928  bool operator!=(const VmaStlAllocator<U>& rhs) const
1929  {
1930  return m_pCallbacks != rhs.m_pCallbacks;
1931  }
1932 
1933  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
1934 };
1935 
1936 #if VMA_USE_STL_VECTOR
1937 
1938 #define VmaVector std::vector
1939 
1940 template<typename T, typename allocatorT>
1941 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
1942 {
1943  vec.insert(vec.begin() + index, item);
1944 }
1945 
1946 template<typename T, typename allocatorT>
1947 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
1948 {
1949  vec.erase(vec.begin() + index);
1950 }
1951 
1952 #else // #if VMA_USE_STL_VECTOR
1953 
1954 /* Class with interface compatible with subset of std::vector.
1955 T must be POD because constructors and destructors are not called and memcpy is
1956 used for these objects. */
1957 template<typename T, typename AllocatorT>
1958 class VmaVector
1959 {
1960 public:
1961  typedef T value_type;
1962 
1963  VmaVector(const AllocatorT& allocator) :
1964  m_Allocator(allocator),
1965  m_pArray(VMA_NULL),
1966  m_Count(0),
1967  m_Capacity(0)
1968  {
1969  }
1970 
1971  VmaVector(size_t count, const AllocatorT& allocator) :
1972  m_Allocator(allocator),
1973  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
1974  m_Count(count),
1975  m_Capacity(count)
1976  {
1977  }
1978 
1979  VmaVector(const VmaVector<T, AllocatorT>& src) :
1980  m_Allocator(src.m_Allocator),
1981  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
1982  m_Count(src.m_Count),
1983  m_Capacity(src.m_Count)
1984  {
1985  if(m_Count != 0)
1986  {
1987  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
1988  }
1989  }
1990 
1991  ~VmaVector()
1992  {
1993  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1994  }
1995 
1996  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
1997  {
1998  if(&rhs != this)
1999  {
2000  resize(rhs.m_Count);
2001  if(m_Count != 0)
2002  {
2003  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
2004  }
2005  }
2006  return *this;
2007  }
2008 
2009  bool empty() const { return m_Count == 0; }
2010  size_t size() const { return m_Count; }
2011  T* data() { return m_pArray; }
2012  const T* data() const { return m_pArray; }
2013 
2014  T& operator[](size_t index)
2015  {
2016  VMA_HEAVY_ASSERT(index < m_Count);
2017  return m_pArray[index];
2018  }
2019  const T& operator[](size_t index) const
2020  {
2021  VMA_HEAVY_ASSERT(index < m_Count);
2022  return m_pArray[index];
2023  }
2024 
2025  T& front()
2026  {
2027  VMA_HEAVY_ASSERT(m_Count > 0);
2028  return m_pArray[0];
2029  }
2030  const T& front() const
2031  {
2032  VMA_HEAVY_ASSERT(m_Count > 0);
2033  return m_pArray[0];
2034  }
2035  T& back()
2036  {
2037  VMA_HEAVY_ASSERT(m_Count > 0);
2038  return m_pArray[m_Count - 1];
2039  }
2040  const T& back() const
2041  {
2042  VMA_HEAVY_ASSERT(m_Count > 0);
2043  return m_pArray[m_Count - 1];
2044  }
2045 
2046  void reserve(size_t newCapacity, bool freeMemory = false)
2047  {
2048  newCapacity = VMA_MAX(newCapacity, m_Count);
2049 
2050  if((newCapacity < m_Capacity) && !freeMemory)
2051  {
2052  newCapacity = m_Capacity;
2053  }
2054 
2055  if(newCapacity != m_Capacity)
2056  {
2057  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
2058  if(m_Count != 0)
2059  {
2060  memcpy(newArray, m_pArray, m_Count * sizeof(T));
2061  }
2062  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2063  m_Capacity = newCapacity;
2064  m_pArray = newArray;
2065  }
2066  }
2067 
2068  void resize(size_t newCount, bool freeMemory = false)
2069  {
2070  size_t newCapacity = m_Capacity;
2071  if(newCount > m_Capacity)
2072  {
2073  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
2074  }
2075  else if(freeMemory)
2076  {
2077  newCapacity = newCount;
2078  }
2079 
2080  if(newCapacity != m_Capacity)
2081  {
2082  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
2083  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
2084  if(elementsToCopy != 0)
2085  {
2086  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
2087  }
2088  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2089  m_Capacity = newCapacity;
2090  m_pArray = newArray;
2091  }
2092 
2093  m_Count = newCount;
2094  }
2095 
2096  void clear(bool freeMemory = false)
2097  {
2098  resize(0, freeMemory);
2099  }
2100 
2101  void insert(size_t index, const T& src)
2102  {
2103  VMA_HEAVY_ASSERT(index <= m_Count);
2104  const size_t oldCount = size();
2105  resize(oldCount + 1);
2106  if(index < oldCount)
2107  {
2108  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
2109  }
2110  m_pArray[index] = src;
2111  }
2112 
2113  void remove(size_t index)
2114  {
2115  VMA_HEAVY_ASSERT(index < m_Count);
2116  const size_t oldCount = size();
2117  if(index < oldCount - 1)
2118  {
2119  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
2120  }
2121  resize(oldCount - 1);
2122  }
2123 
2124  void push_back(const T& src)
2125  {
2126  const size_t newIndex = size();
2127  resize(newIndex + 1);
2128  m_pArray[newIndex] = src;
2129  }
2130 
2131  void pop_back()
2132  {
2133  VMA_HEAVY_ASSERT(m_Count > 0);
2134  resize(size() - 1);
2135  }
2136 
2137  void push_front(const T& src)
2138  {
2139  insert(0, src);
2140  }
2141 
2142  void pop_front()
2143  {
2144  VMA_HEAVY_ASSERT(m_Count > 0);
2145  remove(0);
2146  }
2147 
2148  typedef T* iterator;
2149 
2150  iterator begin() { return m_pArray; }
2151  iterator end() { return m_pArray + m_Count; }
2152 
2153 private:
2154  AllocatorT m_Allocator;
2155  T* m_pArray;
2156  size_t m_Count;
2157  size_t m_Capacity;
2158 };
2159 
2160 template<typename T, typename allocatorT>
2161 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
2162 {
2163  vec.insert(index, item);
2164 }
2165 
2166 template<typename T, typename allocatorT>
2167 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
2168 {
2169  vec.remove(index);
2170 }
2171 
2172 #endif // #if VMA_USE_STL_VECTOR
2173 
2174 template<typename CmpLess, typename VectorT>
2175 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
2176 {
2177  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2178  vector.data(),
2179  vector.data() + vector.size(),
2180  value,
2181  CmpLess()) - vector.data();
2182  VmaVectorInsert(vector, indexToInsert, value);
2183  return indexToInsert;
2184 }
2185 
2186 template<typename CmpLess, typename VectorT>
2187 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
2188 {
2189  CmpLess comparator;
2190  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2191  vector.begin(),
2192  vector.end(),
2193  value,
2194  comparator);
2195  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
2196  {
2197  size_t indexToRemove = it - vector.begin();
2198  VmaVectorRemove(vector, indexToRemove);
2199  return true;
2200  }
2201  return false;
2202 }
2203 
2204 template<typename CmpLess, typename VectorT>
2205 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
2206 {
2207  CmpLess comparator;
2208  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2209  vector.data(),
2210  vector.data() + vector.size(),
2211  value,
2212  comparator);
2213  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
2214  {
2215  return it - vector.begin();
2216  }
2217  else
2218  {
2219  return vector.size();
2220  }
2221 }
2222 
2224 // class VmaPoolAllocator
2225 
2226 /*
2227 Allocator for objects of type T using a list of arrays (pools) to speed up
2228 allocation. Number of elements that can be allocated is not bounded because
2229 allocator can create multiple blocks.
2230 */
2231 template<typename T>
2232 class VmaPoolAllocator
2233 {
2234 public:
2235  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
2236  ~VmaPoolAllocator();
2237  void Clear();
2238  T* Alloc();
2239  void Free(T* ptr);
2240 
2241 private:
2242  union Item
2243  {
2244  uint32_t NextFreeIndex;
2245  T Value;
2246  };
2247 
2248  struct ItemBlock
2249  {
2250  Item* pItems;
2251  uint32_t FirstFreeIndex;
2252  };
2253 
2254  const VkAllocationCallbacks* m_pAllocationCallbacks;
2255  size_t m_ItemsPerBlock;
2256  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
2257 
2258  ItemBlock& CreateNewBlock();
2259 };
2260 
2261 template<typename T>
2262 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
2263  m_pAllocationCallbacks(pAllocationCallbacks),
2264  m_ItemsPerBlock(itemsPerBlock),
2265  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
2266 {
2267  VMA_ASSERT(itemsPerBlock > 0);
2268 }
2269 
2270 template<typename T>
2271 VmaPoolAllocator<T>::~VmaPoolAllocator()
2272 {
2273  Clear();
2274 }
2275 
2276 template<typename T>
2277 void VmaPoolAllocator<T>::Clear()
2278 {
2279  for(size_t i = m_ItemBlocks.size(); i--; )
2280  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
2281  m_ItemBlocks.clear();
2282 }
2283 
2284 template<typename T>
2285 T* VmaPoolAllocator<T>::Alloc()
2286 {
2287  for(size_t i = m_ItemBlocks.size(); i--; )
2288  {
2289  ItemBlock& block = m_ItemBlocks[i];
2290  // This block has some free items: Use first one.
2291  if(block.FirstFreeIndex != UINT32_MAX)
2292  {
2293  Item* const pItem = &block.pItems[block.FirstFreeIndex];
2294  block.FirstFreeIndex = pItem->NextFreeIndex;
2295  return &pItem->Value;
2296  }
2297  }
2298 
2299  // No block has free item: Create new one and use it.
2300  ItemBlock& newBlock = CreateNewBlock();
2301  Item* const pItem = &newBlock.pItems[0];
2302  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
2303  return &pItem->Value;
2304 }
2305 
2306 template<typename T>
2307 void VmaPoolAllocator<T>::Free(T* ptr)
2308 {
2309  // Search all memory blocks to find ptr.
2310  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
2311  {
2312  ItemBlock& block = m_ItemBlocks[i];
2313 
2314  // Casting to union.
2315  Item* pItemPtr;
2316  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
2317 
2318  // Check if pItemPtr is in address range of this block.
2319  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
2320  {
2321  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
2322  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
2323  block.FirstFreeIndex = index;
2324  return;
2325  }
2326  }
2327  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
2328 }
2329 
2330 template<typename T>
2331 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
2332 {
2333  ItemBlock newBlock = {
2334  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
2335 
2336  m_ItemBlocks.push_back(newBlock);
2337 
2338  // Setup singly-linked list of all free items in this block.
2339  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
2340  newBlock.pItems[i].NextFreeIndex = i + 1;
2341  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
2342  return m_ItemBlocks.back();
2343 }
2344 
2346 // class VmaRawList, VmaList
2347 
2348 #if VMA_USE_STL_LIST
2349 
2350 #define VmaList std::list
2351 
2352 #else // #if VMA_USE_STL_LIST
2353 
2354 template<typename T>
2355 struct VmaListItem
2356 {
2357  VmaListItem* pPrev;
2358  VmaListItem* pNext;
2359  T Value;
2360 };
2361 
2362 // Doubly linked list.
2363 template<typename T>
2364 class VmaRawList
2365 {
2366 public:
2367  typedef VmaListItem<T> ItemType;
2368 
2369  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
2370  ~VmaRawList();
2371  void Clear();
2372 
2373  size_t GetCount() const { return m_Count; }
2374  bool IsEmpty() const { return m_Count == 0; }
2375 
2376  ItemType* Front() { return m_pFront; }
2377  const ItemType* Front() const { return m_pFront; }
2378  ItemType* Back() { return m_pBack; }
2379  const ItemType* Back() const { return m_pBack; }
2380 
2381  ItemType* PushBack();
2382  ItemType* PushFront();
2383  ItemType* PushBack(const T& value);
2384  ItemType* PushFront(const T& value);
2385  void PopBack();
2386  void PopFront();
2387 
2388  // Item can be null - it means PushBack.
2389  ItemType* InsertBefore(ItemType* pItem);
2390  // Item can be null - it means PushFront.
2391  ItemType* InsertAfter(ItemType* pItem);
2392 
2393  ItemType* InsertBefore(ItemType* pItem, const T& value);
2394  ItemType* InsertAfter(ItemType* pItem, const T& value);
2395 
2396  void Remove(ItemType* pItem);
2397 
2398 private:
2399  const VkAllocationCallbacks* const m_pAllocationCallbacks;
2400  VmaPoolAllocator<ItemType> m_ItemAllocator;
2401  ItemType* m_pFront;
2402  ItemType* m_pBack;
2403  size_t m_Count;
2404 
2405  // Declared not defined, to block copy constructor and assignment operator.
2406  VmaRawList(const VmaRawList<T>& src);
2407  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
2408 };
2409 
2410 template<typename T>
2411 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
2412  m_pAllocationCallbacks(pAllocationCallbacks),
2413  m_ItemAllocator(pAllocationCallbacks, 128),
2414  m_pFront(VMA_NULL),
2415  m_pBack(VMA_NULL),
2416  m_Count(0)
2417 {
2418 }
2419 
2420 template<typename T>
2421 VmaRawList<T>::~VmaRawList()
2422 {
2423  // Intentionally not calling Clear, because that would be unnecessary
2424  // computations to return all items to m_ItemAllocator as free.
2425 }
2426 
2427 template<typename T>
2428 void VmaRawList<T>::Clear()
2429 {
2430  if(IsEmpty() == false)
2431  {
2432  ItemType* pItem = m_pBack;
2433  while(pItem != VMA_NULL)
2434  {
2435  ItemType* const pPrevItem = pItem->pPrev;
2436  m_ItemAllocator.Free(pItem);
2437  pItem = pPrevItem;
2438  }
2439  m_pFront = VMA_NULL;
2440  m_pBack = VMA_NULL;
2441  m_Count = 0;
2442  }
2443 }
2444 
2445 template<typename T>
2446 VmaListItem<T>* VmaRawList<T>::PushBack()
2447 {
2448  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2449  pNewItem->pNext = VMA_NULL;
2450  if(IsEmpty())
2451  {
2452  pNewItem->pPrev = VMA_NULL;
2453  m_pFront = pNewItem;
2454  m_pBack = pNewItem;
2455  m_Count = 1;
2456  }
2457  else
2458  {
2459  pNewItem->pPrev = m_pBack;
2460  m_pBack->pNext = pNewItem;
2461  m_pBack = pNewItem;
2462  ++m_Count;
2463  }
2464  return pNewItem;
2465 }
2466 
2467 template<typename T>
2468 VmaListItem<T>* VmaRawList<T>::PushFront()
2469 {
2470  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2471  pNewItem->pPrev = VMA_NULL;
2472  if(IsEmpty())
2473  {
2474  pNewItem->pNext = VMA_NULL;
2475  m_pFront = pNewItem;
2476  m_pBack = pNewItem;
2477  m_Count = 1;
2478  }
2479  else
2480  {
2481  pNewItem->pNext = m_pFront;
2482  m_pFront->pPrev = pNewItem;
2483  m_pFront = pNewItem;
2484  ++m_Count;
2485  }
2486  return pNewItem;
2487 }
2488 
2489 template<typename T>
2490 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
2491 {
2492  ItemType* const pNewItem = PushBack();
2493  pNewItem->Value = value;
2494  return pNewItem;
2495 }
2496 
2497 template<typename T>
2498 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
2499 {
2500  ItemType* const pNewItem = PushFront();
2501  pNewItem->Value = value;
2502  return pNewItem;
2503 }
2504 
2505 template<typename T>
2506 void VmaRawList<T>::PopBack()
2507 {
2508  VMA_HEAVY_ASSERT(m_Count > 0);
2509  ItemType* const pBackItem = m_pBack;
2510  ItemType* const pPrevItem = pBackItem->pPrev;
2511  if(pPrevItem != VMA_NULL)
2512  {
2513  pPrevItem->pNext = VMA_NULL;
2514  }
2515  m_pBack = pPrevItem;
2516  m_ItemAllocator.Free(pBackItem);
2517  --m_Count;
2518 }
2519 
2520 template<typename T>
2521 void VmaRawList<T>::PopFront()
2522 {
2523  VMA_HEAVY_ASSERT(m_Count > 0);
2524  ItemType* const pFrontItem = m_pFront;
2525  ItemType* const pNextItem = pFrontItem->pNext;
2526  if(pNextItem != VMA_NULL)
2527  {
2528  pNextItem->pPrev = VMA_NULL;
2529  }
2530  m_pFront = pNextItem;
2531  m_ItemAllocator.Free(pFrontItem);
2532  --m_Count;
2533 }
2534 
2535 template<typename T>
2536 void VmaRawList<T>::Remove(ItemType* pItem)
2537 {
2538  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
2539  VMA_HEAVY_ASSERT(m_Count > 0);
2540 
2541  if(pItem->pPrev != VMA_NULL)
2542  {
2543  pItem->pPrev->pNext = pItem->pNext;
2544  }
2545  else
2546  {
2547  VMA_HEAVY_ASSERT(m_pFront == pItem);
2548  m_pFront = pItem->pNext;
2549  }
2550 
2551  if(pItem->pNext != VMA_NULL)
2552  {
2553  pItem->pNext->pPrev = pItem->pPrev;
2554  }
2555  else
2556  {
2557  VMA_HEAVY_ASSERT(m_pBack == pItem);
2558  m_pBack = pItem->pPrev;
2559  }
2560 
2561  m_ItemAllocator.Free(pItem);
2562  --m_Count;
2563 }
2564 
2565 template<typename T>
2566 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
2567 {
2568  if(pItem != VMA_NULL)
2569  {
2570  ItemType* const prevItem = pItem->pPrev;
2571  ItemType* const newItem = m_ItemAllocator.Alloc();
2572  newItem->pPrev = prevItem;
2573  newItem->pNext = pItem;
2574  pItem->pPrev = newItem;
2575  if(prevItem != VMA_NULL)
2576  {
2577  prevItem->pNext = newItem;
2578  }
2579  else
2580  {
2581  VMA_HEAVY_ASSERT(m_pFront == pItem);
2582  m_pFront = newItem;
2583  }
2584  ++m_Count;
2585  return newItem;
2586  }
2587  else
2588  return PushBack();
2589 }
2590 
2591 template<typename T>
2592 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
2593 {
2594  if(pItem != VMA_NULL)
2595  {
2596  ItemType* const nextItem = pItem->pNext;
2597  ItemType* const newItem = m_ItemAllocator.Alloc();
2598  newItem->pNext = nextItem;
2599  newItem->pPrev = pItem;
2600  pItem->pNext = newItem;
2601  if(nextItem != VMA_NULL)
2602  {
2603  nextItem->pPrev = newItem;
2604  }
2605  else
2606  {
2607  VMA_HEAVY_ASSERT(m_pBack == pItem);
2608  m_pBack = newItem;
2609  }
2610  ++m_Count;
2611  return newItem;
2612  }
2613  else
2614  return PushFront();
2615 }
2616 
2617 template<typename T>
2618 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
2619 {
2620  ItemType* const newItem = InsertBefore(pItem);
2621  newItem->Value = value;
2622  return newItem;
2623 }
2624 
2625 template<typename T>
2626 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
2627 {
2628  ItemType* const newItem = InsertAfter(pItem);
2629  newItem->Value = value;
2630  return newItem;
2631 }
2632 
2633 template<typename T, typename AllocatorT>
2634 class VmaList
2635 {
2636 public:
2637  class iterator
2638  {
2639  public:
2640  iterator() :
2641  m_pList(VMA_NULL),
2642  m_pItem(VMA_NULL)
2643  {
2644  }
2645 
2646  T& operator*() const
2647  {
2648  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2649  return m_pItem->Value;
2650  }
2651  T* operator->() const
2652  {
2653  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2654  return &m_pItem->Value;
2655  }
2656 
2657  iterator& operator++()
2658  {
2659  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2660  m_pItem = m_pItem->pNext;
2661  return *this;
2662  }
2663  iterator& operator--()
2664  {
2665  if(m_pItem != VMA_NULL)
2666  {
2667  m_pItem = m_pItem->pPrev;
2668  }
2669  else
2670  {
2671  VMA_HEAVY_ASSERT(!m_pList.IsEmpty());
2672  m_pItem = m_pList->Back();
2673  }
2674  return *this;
2675  }
2676 
2677  iterator operator++(int)
2678  {
2679  iterator result = *this;
2680  ++*this;
2681  return result;
2682  }
2683  iterator operator--(int)
2684  {
2685  iterator result = *this;
2686  --*this;
2687  return result;
2688  }
2689 
2690  bool operator==(const iterator& rhs) const
2691  {
2692  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2693  return m_pItem == rhs.m_pItem;
2694  }
2695  bool operator!=(const iterator& rhs) const
2696  {
2697  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2698  return m_pItem != rhs.m_pItem;
2699  }
2700 
2701  private:
2702  VmaRawList<T>* m_pList;
2703  VmaListItem<T>* m_pItem;
2704 
2705  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
2706  m_pList(pList),
2707  m_pItem(pItem)
2708  {
2709  }
2710 
2711  friend class VmaList<T, AllocatorT>;
2712  };
2713 
2714  class const_iterator
2715  {
2716  public:
2717  const_iterator() :
2718  m_pList(VMA_NULL),
2719  m_pItem(VMA_NULL)
2720  {
2721  }
2722 
2723  const_iterator(const iterator& src) :
2724  m_pList(src.m_pList),
2725  m_pItem(src.m_pItem)
2726  {
2727  }
2728 
2729  const T& operator*() const
2730  {
2731  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2732  return m_pItem->Value;
2733  }
2734  const T* operator->() const
2735  {
2736  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2737  return &m_pItem->Value;
2738  }
2739 
2740  const_iterator& operator++()
2741  {
2742  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2743  m_pItem = m_pItem->pNext;
2744  return *this;
2745  }
2746  const_iterator& operator--()
2747  {
2748  if(m_pItem != VMA_NULL)
2749  {
2750  m_pItem = m_pItem->pPrev;
2751  }
2752  else
2753  {
2754  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
2755  m_pItem = m_pList->Back();
2756  }
2757  return *this;
2758  }
2759 
2760  const_iterator operator++(int)
2761  {
2762  const_iterator result = *this;
2763  ++*this;
2764  return result;
2765  }
2766  const_iterator operator--(int)
2767  {
2768  const_iterator result = *this;
2769  --*this;
2770  return result;
2771  }
2772 
2773  bool operator==(const const_iterator& rhs) const
2774  {
2775  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2776  return m_pItem == rhs.m_pItem;
2777  }
2778  bool operator!=(const const_iterator& rhs) const
2779  {
2780  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2781  return m_pItem != rhs.m_pItem;
2782  }
2783 
2784  private:
2785  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
2786  m_pList(pList),
2787  m_pItem(pItem)
2788  {
2789  }
2790 
2791  const VmaRawList<T>* m_pList;
2792  const VmaListItem<T>* m_pItem;
2793 
2794  friend class VmaList<T, AllocatorT>;
2795  };
2796 
2797  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
2798 
2799  bool empty() const { return m_RawList.IsEmpty(); }
2800  size_t size() const { return m_RawList.GetCount(); }
2801 
2802  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
2803  iterator end() { return iterator(&m_RawList, VMA_NULL); }
2804 
2805  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
2806  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
2807 
2808  void clear() { m_RawList.Clear(); }
2809  void push_back(const T& value) { m_RawList.PushBack(value); }
2810  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
2811  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
2812 
2813 private:
2814  VmaRawList<T> m_RawList;
2815 };
2816 
2817 #endif // #if VMA_USE_STL_LIST
2818 
2820 // class VmaMap
2821 
2822 // Unused in this version.
2823 #if 0
2824 
2825 #if VMA_USE_STL_UNORDERED_MAP
2826 
2827 #define VmaPair std::pair
2828 
2829 #define VMA_MAP_TYPE(KeyT, ValueT) \
2830  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
2831 
2832 #else // #if VMA_USE_STL_UNORDERED_MAP
2833 
2834 template<typename T1, typename T2>
2835 struct VmaPair
2836 {
2837  T1 first;
2838  T2 second;
2839 
2840  VmaPair() : first(), second() { }
2841  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
2842 };
2843 
2844 /* Class compatible with subset of interface of std::unordered_map.
2845 KeyT, ValueT must be POD because they will be stored in VmaVector.
2846 */
2847 template<typename KeyT, typename ValueT>
2848 class VmaMap
2849 {
2850 public:
2851  typedef VmaPair<KeyT, ValueT> PairType;
2852  typedef PairType* iterator;
2853 
2854  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
2855 
2856  iterator begin() { return m_Vector.begin(); }
2857  iterator end() { return m_Vector.end(); }
2858 
2859  void insert(const PairType& pair);
2860  iterator find(const KeyT& key);
2861  void erase(iterator it);
2862 
2863 private:
2864  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
2865 };
2866 
2867 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
2868 
2869 template<typename FirstT, typename SecondT>
2870 struct VmaPairFirstLess
2871 {
2872  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
2873  {
2874  return lhs.first < rhs.first;
2875  }
2876  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
2877  {
2878  return lhs.first < rhsFirst;
2879  }
2880 };
2881 
2882 template<typename KeyT, typename ValueT>
2883 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
2884 {
2885  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2886  m_Vector.data(),
2887  m_Vector.data() + m_Vector.size(),
2888  pair,
2889  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
2890  VmaVectorInsert(m_Vector, indexToInsert, pair);
2891 }
2892 
2893 template<typename KeyT, typename ValueT>
2894 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
2895 {
2896  PairType* it = VmaBinaryFindFirstNotLess(
2897  m_Vector.data(),
2898  m_Vector.data() + m_Vector.size(),
2899  key,
2900  VmaPairFirstLess<KeyT, ValueT>());
2901  if((it != m_Vector.end()) && (it->first == key))
2902  {
2903  return it;
2904  }
2905  else
2906  {
2907  return m_Vector.end();
2908  }
2909 }
2910 
2911 template<typename KeyT, typename ValueT>
2912 void VmaMap<KeyT, ValueT>::erase(iterator it)
2913 {
2914  VmaVectorRemove(m_Vector, it - m_Vector.begin());
2915 }
2916 
2917 #endif // #if VMA_USE_STL_UNORDERED_MAP
2918 
2919 #endif // #if 0
2920 
2922 
2923 class VmaDeviceMemoryBlock;
2924 
2925 enum VMA_BLOCK_VECTOR_TYPE
2926 {
2927  VMA_BLOCK_VECTOR_TYPE_UNMAPPED,
2928  VMA_BLOCK_VECTOR_TYPE_MAPPED,
2929  VMA_BLOCK_VECTOR_TYPE_COUNT
2930 };
2931 
2932 static VMA_BLOCK_VECTOR_TYPE VmaAllocationCreateFlagsToBlockVectorType(VmaAllocationCreateFlags flags)
2933 {
2934  return (flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0 ?
2935  VMA_BLOCK_VECTOR_TYPE_MAPPED :
2936  VMA_BLOCK_VECTOR_TYPE_UNMAPPED;
2937 }
2938 
2939 struct VmaAllocation_T
2940 {
2941 public:
2942  enum ALLOCATION_TYPE
2943  {
2944  ALLOCATION_TYPE_NONE,
2945  ALLOCATION_TYPE_BLOCK,
2946  ALLOCATION_TYPE_DEDICATED,
2947  };
2948 
2949  VmaAllocation_T(uint32_t currentFrameIndex) :
2950  m_Alignment(1),
2951  m_Size(0),
2952  m_pUserData(VMA_NULL),
2953  m_Type(ALLOCATION_TYPE_NONE),
2954  m_SuballocationType(VMA_SUBALLOCATION_TYPE_UNKNOWN),
2955  m_LastUseFrameIndex(currentFrameIndex)
2956  {
2957  }
2958 
2959  void InitBlockAllocation(
2960  VmaPool hPool,
2961  VmaDeviceMemoryBlock* block,
2962  VkDeviceSize offset,
2963  VkDeviceSize alignment,
2964  VkDeviceSize size,
2965  VmaSuballocationType suballocationType,
2966  void* pUserData,
2967  bool canBecomeLost)
2968  {
2969  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
2970  VMA_ASSERT(block != VMA_NULL);
2971  m_Type = ALLOCATION_TYPE_BLOCK;
2972  m_Alignment = alignment;
2973  m_Size = size;
2974  m_pUserData = pUserData;
2975  m_SuballocationType = suballocationType;
2976  m_BlockAllocation.m_hPool = hPool;
2977  m_BlockAllocation.m_Block = block;
2978  m_BlockAllocation.m_Offset = offset;
2979  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
2980  }
2981 
2982  void InitLost()
2983  {
2984  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
2985  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
2986  m_Type = ALLOCATION_TYPE_BLOCK;
2987  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
2988  m_BlockAllocation.m_Block = VMA_NULL;
2989  m_BlockAllocation.m_Offset = 0;
2990  m_BlockAllocation.m_CanBecomeLost = true;
2991  }
2992 
2993  void ChangeBlockAllocation(
2994  VmaDeviceMemoryBlock* block,
2995  VkDeviceSize offset)
2996  {
2997  VMA_ASSERT(block != VMA_NULL);
2998  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
2999  m_BlockAllocation.m_Block = block;
3000  m_BlockAllocation.m_Offset = offset;
3001  }
3002 
3003  void InitDedicatedAllocation(
3004  uint32_t memoryTypeIndex,
3005  VkDeviceMemory hMemory,
3006  VmaSuballocationType suballocationType,
3007  bool persistentMap,
3008  void* pMappedData,
3009  VkDeviceSize size,
3010  void* pUserData)
3011  {
3012  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3013  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
3014  m_Type = ALLOCATION_TYPE_DEDICATED;
3015  m_Alignment = 0;
3016  m_Size = size;
3017  m_pUserData = pUserData;
3018  m_SuballocationType = suballocationType;
3019  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
3020  m_DedicatedAllocation.m_hMemory = hMemory;
3021  m_DedicatedAllocation.m_PersistentMap = persistentMap;
3022  m_DedicatedAllocation.m_pMappedData = pMappedData;
3023  }
3024 
3025  ALLOCATION_TYPE GetType() const { return m_Type; }
3026  VkDeviceSize GetAlignment() const { return m_Alignment; }
3027  VkDeviceSize GetSize() const { return m_Size; }
3028  void* GetUserData() const { return m_pUserData; }
3029  void SetUserData(void* pUserData) { m_pUserData = pUserData; }
3030  VmaSuballocationType GetSuballocationType() const { return m_SuballocationType; }
3031 
3032  VmaDeviceMemoryBlock* GetBlock() const
3033  {
3034  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
3035  return m_BlockAllocation.m_Block;
3036  }
3037  VkDeviceSize GetOffset() const;
3038  VkDeviceMemory GetMemory() const;
3039  uint32_t GetMemoryTypeIndex() const;
3040  VMA_BLOCK_VECTOR_TYPE GetBlockVectorType() const;
3041  void* GetMappedData() const;
3042  bool CanBecomeLost() const;
3043  VmaPool GetPool() const;
3044 
3045  VkResult DedicatedAllocMapPersistentlyMappedMemory(VmaAllocator hAllocator);
3046  void DedicatedAllocUnmapPersistentlyMappedMemory(VmaAllocator hAllocator);
3047 
3048  uint32_t GetLastUseFrameIndex() const
3049  {
3050  return m_LastUseFrameIndex.load();
3051  }
3052  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
3053  {
3054  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
3055  }
3056  /*
3057  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
3058  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
3059  - Else, returns false.
3060 
3061  If hAllocation is already lost, assert - you should not call it then.
3062  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
3063  */
3064  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3065 
3066  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
3067  {
3068  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
3069  outInfo.blockCount = 1;
3070  outInfo.allocationCount = 1;
3071  outInfo.unusedRangeCount = 0;
3072  outInfo.usedBytes = m_Size;
3073  outInfo.unusedBytes = 0;
3074  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
3075  outInfo.unusedRangeSizeMin = UINT64_MAX;
3076  outInfo.unusedRangeSizeMax = 0;
3077  }
3078 
3079 private:
3080  VkDeviceSize m_Alignment;
3081  VkDeviceSize m_Size;
3082  void* m_pUserData;
3083  ALLOCATION_TYPE m_Type;
3084  VmaSuballocationType m_SuballocationType;
3085  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
3086 
3087  // Allocation out of VmaDeviceMemoryBlock.
3088  struct BlockAllocation
3089  {
3090  VmaPool m_hPool; // Null if belongs to general memory.
3091  VmaDeviceMemoryBlock* m_Block;
3092  VkDeviceSize m_Offset;
3093  bool m_CanBecomeLost;
3094  };
3095 
3096  // Allocation for an object that has its own private VkDeviceMemory.
3097  struct DedicatedAllocation
3098  {
3099  uint32_t m_MemoryTypeIndex;
3100  VkDeviceMemory m_hMemory;
3101  bool m_PersistentMap;
3102  void* m_pMappedData;
3103  };
3104 
3105  union
3106  {
3107  // Allocation out of VmaDeviceMemoryBlock.
3108  BlockAllocation m_BlockAllocation;
3109  // Allocation for an object that has its own private VkDeviceMemory.
3110  DedicatedAllocation m_DedicatedAllocation;
3111  };
3112 };
3113 
3114 /*
3115 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
3116 allocated memory block or free.
3117 */
3118 struct VmaSuballocation
3119 {
3120  VkDeviceSize offset;
3121  VkDeviceSize size;
3122  VmaAllocation hAllocation;
3123  VmaSuballocationType type;
3124 };
3125 
3126 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
3127 
3128 // Cost of one additional allocation lost, as equivalent in bytes.
3129 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
3130 
3131 /*
3132 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
3133 
3134 If canMakeOtherLost was false:
3135 - item points to a FREE suballocation.
3136 - itemsToMakeLostCount is 0.
3137 
3138 If canMakeOtherLost was true:
3139 - item points to first of sequence of suballocations, which are either FREE,
3140  or point to VmaAllocations that can become lost.
3141 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
3142  the requested allocation to succeed.
3143 */
3144 struct VmaAllocationRequest
3145 {
3146  VkDeviceSize offset;
3147  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
3148  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
3149  VmaSuballocationList::iterator item;
3150  size_t itemsToMakeLostCount;
3151 
3152  VkDeviceSize CalcCost() const
3153  {
3154  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
3155  }
3156 };
3157 
3158 /*
3159 Data structure used for bookkeeping of allocations and unused ranges of memory
3160 in a single VkDeviceMemory block.
3161 */
3162 class VmaBlockMetadata
3163 {
3164 public:
3165  VmaBlockMetadata(VmaAllocator hAllocator);
3166  ~VmaBlockMetadata();
3167  void Init(VkDeviceSize size);
3168 
3169  // Validates all data structures inside this object. If not valid, returns false.
3170  bool Validate() const;
3171  VkDeviceSize GetSize() const { return m_Size; }
3172  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
3173  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
3174  VkDeviceSize GetUnusedRangeSizeMax() const;
3175  // Returns true if this block is empty - contains only single free suballocation.
3176  bool IsEmpty() const;
3177 
3178  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
3179  void AddPoolStats(VmaPoolStats& inoutStats) const;
3180 
3181 #if VMA_STATS_STRING_ENABLED
3182  void PrintDetailedMap(class VmaJsonWriter& json) const;
3183 #endif
3184 
3185  // Creates trivial request for case when block is empty.
3186  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
3187 
3188  // Tries to find a place for suballocation with given parameters inside this block.
3189  // If succeeded, fills pAllocationRequest and returns true.
3190  // If failed, returns false.
3191  bool CreateAllocationRequest(
3192  uint32_t currentFrameIndex,
3193  uint32_t frameInUseCount,
3194  VkDeviceSize bufferImageGranularity,
3195  VkDeviceSize allocSize,
3196  VkDeviceSize allocAlignment,
3197  VmaSuballocationType allocType,
3198  bool canMakeOtherLost,
3199  VmaAllocationRequest* pAllocationRequest);
3200 
3201  bool MakeRequestedAllocationsLost(
3202  uint32_t currentFrameIndex,
3203  uint32_t frameInUseCount,
3204  VmaAllocationRequest* pAllocationRequest);
3205 
3206  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3207 
3208  // Makes actual allocation based on request. Request must already be checked and valid.
3209  void Alloc(
3210  const VmaAllocationRequest& request,
3211  VmaSuballocationType type,
3212  VkDeviceSize allocSize,
3213  VmaAllocation hAllocation);
3214 
3215  // Frees suballocation assigned to given memory region.
3216  void Free(const VmaAllocation allocation);
3217 
3218 private:
3219  VkDeviceSize m_Size;
3220  uint32_t m_FreeCount;
3221  VkDeviceSize m_SumFreeSize;
3222  VmaSuballocationList m_Suballocations;
3223  // Suballocations that are free and have size greater than certain threshold.
3224  // Sorted by size, ascending.
3225  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
3226 
3227  bool ValidateFreeSuballocationList() const;
3228 
3229  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
3230  // If yes, fills pOffset and returns true. If no, returns false.
3231  bool CheckAllocation(
3232  uint32_t currentFrameIndex,
3233  uint32_t frameInUseCount,
3234  VkDeviceSize bufferImageGranularity,
3235  VkDeviceSize allocSize,
3236  VkDeviceSize allocAlignment,
3237  VmaSuballocationType allocType,
3238  VmaSuballocationList::const_iterator suballocItem,
3239  bool canMakeOtherLost,
3240  VkDeviceSize* pOffset,
3241  size_t* itemsToMakeLostCount,
3242  VkDeviceSize* pSumFreeSize,
3243  VkDeviceSize* pSumItemSize) const;
3244  // Given free suballocation, it merges it with following one, which must also be free.
3245  void MergeFreeWithNext(VmaSuballocationList::iterator item);
3246  // Releases given suballocation, making it free.
3247  // Merges it with adjacent free suballocations if applicable.
3248  // Returns iterator to new free suballocation at this place.
3249  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
3250  // Given free suballocation, it inserts it into sorted list of
3251  // m_FreeSuballocationsBySize if it's suitable.
3252  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
3253  // Given free suballocation, it removes it from sorted list of
3254  // m_FreeSuballocationsBySize if it's suitable.
3255  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
3256 };
3257 
3258 /*
3259 Represents a single block of device memory (`VkDeviceMemory`) with all the
3260 data about its regions (aka suballocations, `VmaAllocation`), assigned and free.
3261 
3262 Thread-safety: This class must be externally synchronized.
3263 */
3264 class VmaDeviceMemoryBlock
3265 {
3266 public:
3267  uint32_t m_MemoryTypeIndex;
3268  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
3269  VkDeviceMemory m_hMemory;
3270  bool m_PersistentMap;
3271  void* m_pMappedData;
3272  VmaBlockMetadata m_Metadata;
3273 
3274  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
3275 
3276  ~VmaDeviceMemoryBlock()
3277  {
3278  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
3279  }
3280 
3281  // Always call after construction.
3282  void Init(
3283  uint32_t newMemoryTypeIndex,
3284  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
3285  VkDeviceMemory newMemory,
3286  VkDeviceSize newSize,
3287  bool persistentMap,
3288  void* pMappedData);
3289  // Always call before destruction.
3290  void Destroy(VmaAllocator allocator);
3291 
3292  // Validates all data structures inside this object. If not valid, returns false.
3293  bool Validate() const;
3294 };
3295 
3296 struct VmaPointerLess
3297 {
3298  bool operator()(const void* lhs, const void* rhs) const
3299  {
3300  return lhs < rhs;
3301  }
3302 };
3303 
3304 class VmaDefragmentator;
3305 
3306 /*
3307 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
3308 Vulkan memory type.
3309 
3310 Synchronized internally with a mutex.
3311 */
3312 struct VmaBlockVector
3313 {
3314  VmaBlockVector(
3315  VmaAllocator hAllocator,
3316  uint32_t memoryTypeIndex,
3317  VMA_BLOCK_VECTOR_TYPE blockVectorType,
3318  VkDeviceSize preferredBlockSize,
3319  size_t minBlockCount,
3320  size_t maxBlockCount,
3321  VkDeviceSize bufferImageGranularity,
3322  uint32_t frameInUseCount,
3323  bool isCustomPool);
3324  ~VmaBlockVector();
3325 
3326  VkResult CreateMinBlocks();
3327 
3328  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
3329  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
3330  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
3331  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
3332  VMA_BLOCK_VECTOR_TYPE GetBlockVectorType() const { return m_BlockVectorType; }
3333 
3334  void GetPoolStats(VmaPoolStats* pStats);
3335 
3336  bool IsEmpty() const { return m_Blocks.empty(); }
3337 
3338  VkResult Allocate(
3339  VmaPool hCurrentPool,
3340  uint32_t currentFrameIndex,
3341  const VkMemoryRequirements& vkMemReq,
3342  const VmaAllocationCreateInfo& createInfo,
3343  VmaSuballocationType suballocType,
3344  VmaAllocation* pAllocation);
3345 
3346  void Free(
3347  VmaAllocation hAllocation);
3348 
3349  // Adds statistics of this BlockVector to pStats.
3350  void AddStats(VmaStats* pStats);
3351 
3352 #if VMA_STATS_STRING_ENABLED
3353  void PrintDetailedMap(class VmaJsonWriter& json);
3354 #endif
3355 
3356  void UnmapPersistentlyMappedMemory();
3357  VkResult MapPersistentlyMappedMemory();
3358 
3359  void MakePoolAllocationsLost(
3360  uint32_t currentFrameIndex,
3361  size_t* pLostAllocationCount);
3362 
3363  VmaDefragmentator* EnsureDefragmentator(
3364  VmaAllocator hAllocator,
3365  uint32_t currentFrameIndex);
3366 
3367  VkResult Defragment(
3368  VmaDefragmentationStats* pDefragmentationStats,
3369  VkDeviceSize& maxBytesToMove,
3370  uint32_t& maxAllocationsToMove);
3371 
3372  void DestroyDefragmentator();
3373 
3374 private:
3375  friend class VmaDefragmentator;
3376 
3377  const VmaAllocator m_hAllocator;
3378  const uint32_t m_MemoryTypeIndex;
3379  const VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
3380  const VkDeviceSize m_PreferredBlockSize;
3381  const size_t m_MinBlockCount;
3382  const size_t m_MaxBlockCount;
3383  const VkDeviceSize m_BufferImageGranularity;
3384  const uint32_t m_FrameInUseCount;
3385  const bool m_IsCustomPool;
3386  VMA_MUTEX m_Mutex;
3387  // Incrementally sorted by sumFreeSize, ascending.
3388  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
3389  /* There can be at most one allocation that is completely empty - a
3390  hysteresis to avoid pessimistic case of alternating creation and destruction
3391  of a VkDeviceMemory. */
3392  bool m_HasEmptyBlock;
3393  VmaDefragmentator* m_pDefragmentator;
3394 
3395  // Finds and removes given block from vector.
3396  void Remove(VmaDeviceMemoryBlock* pBlock);
3397 
3398  // Performs single step in sorting m_Blocks. They may not be fully sorted
3399  // after this call.
3400  void IncrementallySortBlocks();
3401 
3402  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
3403 };
3404 
3405 struct VmaPool_T
3406 {
3407 public:
3408  VmaBlockVector m_BlockVector;
3409 
3410  // Takes ownership.
3411  VmaPool_T(
3412  VmaAllocator hAllocator,
3413  const VmaPoolCreateInfo& createInfo);
3414  ~VmaPool_T();
3415 
3416  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
3417 
3418 #if VMA_STATS_STRING_ENABLED
3419  //void PrintDetailedMap(class VmaStringBuilder& sb);
3420 #endif
3421 };
3422 
3423 class VmaDefragmentator
3424 {
3425  const VmaAllocator m_hAllocator;
3426  VmaBlockVector* const m_pBlockVector;
3427  uint32_t m_CurrentFrameIndex;
3428  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
3429  VkDeviceSize m_BytesMoved;
3430  uint32_t m_AllocationsMoved;
3431 
3432  struct AllocationInfo
3433  {
3434  VmaAllocation m_hAllocation;
3435  VkBool32* m_pChanged;
3436 
3437  AllocationInfo() :
3438  m_hAllocation(VK_NULL_HANDLE),
3439  m_pChanged(VMA_NULL)
3440  {
3441  }
3442  };
3443 
3444  struct AllocationInfoSizeGreater
3445  {
3446  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
3447  {
3448  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
3449  }
3450  };
3451 
3452  // Used between AddAllocation and Defragment.
3453  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3454 
3455  struct BlockInfo
3456  {
3457  VmaDeviceMemoryBlock* m_pBlock;
3458  bool m_HasNonMovableAllocations;
3459  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3460 
3461  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
3462  m_pBlock(VMA_NULL),
3463  m_HasNonMovableAllocations(true),
3464  m_Allocations(pAllocationCallbacks),
3465  m_pMappedDataForDefragmentation(VMA_NULL)
3466  {
3467  }
3468 
3469  void CalcHasNonMovableAllocations()
3470  {
3471  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
3472  const size_t defragmentAllocCount = m_Allocations.size();
3473  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
3474  }
3475 
3476  void SortAllocationsBySizeDescecnding()
3477  {
3478  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
3479  }
3480 
3481  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
3482  void Unmap(VmaAllocator hAllocator);
3483 
3484  private:
3485  // Not null if mapped for defragmentation only, not persistently mapped.
3486  void* m_pMappedDataForDefragmentation;
3487  };
3488 
3489  struct BlockPointerLess
3490  {
3491  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
3492  {
3493  return pLhsBlockInfo->m_pBlock < pRhsBlock;
3494  }
3495  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3496  {
3497  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
3498  }
3499  };
3500 
3501  // 1. Blocks with some non-movable allocations go first.
3502  // 2. Blocks with smaller sumFreeSize go first.
3503  struct BlockInfoCompareMoveDestination
3504  {
3505  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3506  {
3507  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
3508  {
3509  return true;
3510  }
3511  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
3512  {
3513  return false;
3514  }
3515  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
3516  {
3517  return true;
3518  }
3519  return false;
3520  }
3521  };
3522 
3523  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
3524  BlockInfoVector m_Blocks;
3525 
3526  VkResult DefragmentRound(
3527  VkDeviceSize maxBytesToMove,
3528  uint32_t maxAllocationsToMove);
3529 
3530  static bool MoveMakesSense(
3531  size_t dstBlockIndex, VkDeviceSize dstOffset,
3532  size_t srcBlockIndex, VkDeviceSize srcOffset);
3533 
3534 public:
3535  VmaDefragmentator(
3536  VmaAllocator hAllocator,
3537  VmaBlockVector* pBlockVector,
3538  uint32_t currentFrameIndex);
3539 
3540  ~VmaDefragmentator();
3541 
3542  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
3543  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
3544 
3545  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
3546 
3547  VkResult Defragment(
3548  VkDeviceSize maxBytesToMove,
3549  uint32_t maxAllocationsToMove);
3550 };
3551 
3552 // Main allocator object.
3553 struct VmaAllocator_T
3554 {
3555  bool m_UseMutex;
3556  bool m_UseKhrDedicatedAllocation;
3557  VkDevice m_hDevice;
3558  bool m_AllocationCallbacksSpecified;
3559  VkAllocationCallbacks m_AllocationCallbacks;
3560  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
3561  // Non-zero when we are inside UnmapPersistentlyMappedMemory...MapPersistentlyMappedMemory.
3562  // Counter to allow nested calls to these functions.
3563  uint32_t m_UnmapPersistentlyMappedMemoryCounter;
3564 
3565  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
3566  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
3567  VMA_MUTEX m_HeapSizeLimitMutex;
3568 
3569  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
3570  VkPhysicalDeviceMemoryProperties m_MemProps;
3571 
3572  // Default pools.
3573  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
3574 
3575  // Each vector is sorted by memory (handle value).
3576  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
3577  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
3578  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
3579 
3580  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
3581  ~VmaAllocator_T();
3582 
3583  const VkAllocationCallbacks* GetAllocationCallbacks() const
3584  {
3585  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
3586  }
3587  const VmaVulkanFunctions& GetVulkanFunctions() const
3588  {
3589  return m_VulkanFunctions;
3590  }
3591 
3592  VkDeviceSize GetBufferImageGranularity() const
3593  {
3594  return VMA_MAX(
3595  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
3596  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
3597  }
3598 
3599  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
3600  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
3601 
3602  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
3603  {
3604  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
3605  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
3606  }
3607 
3608  void GetBufferMemoryRequirements(
3609  VkBuffer hBuffer,
3610  VkMemoryRequirements& memReq,
3611  bool& requiresDedicatedAllocation,
3612  bool& prefersDedicatedAllocation) const;
3613  void GetImageMemoryRequirements(
3614  VkImage hImage,
3615  VkMemoryRequirements& memReq,
3616  bool& requiresDedicatedAllocation,
3617  bool& prefersDedicatedAllocation) const;
3618 
3619  // Main allocation function.
3620  VkResult AllocateMemory(
3621  const VkMemoryRequirements& vkMemReq,
3622  bool requiresDedicatedAllocation,
3623  bool prefersDedicatedAllocation,
3624  VkBuffer dedicatedBuffer,
3625  VkImage dedicatedImage,
3626  const VmaAllocationCreateInfo& createInfo,
3627  VmaSuballocationType suballocType,
3628  VmaAllocation* pAllocation);
3629 
3630  // Main deallocation function.
3631  void FreeMemory(const VmaAllocation allocation);
3632 
3633  void CalculateStats(VmaStats* pStats);
3634 
3635 #if VMA_STATS_STRING_ENABLED
3636  void PrintDetailedMap(class VmaJsonWriter& json);
3637 #endif
3638 
3639  void UnmapPersistentlyMappedMemory();
3640  VkResult MapPersistentlyMappedMemory();
3641 
3642  VkResult Defragment(
3643  VmaAllocation* pAllocations,
3644  size_t allocationCount,
3645  VkBool32* pAllocationsChanged,
3646  const VmaDefragmentationInfo* pDefragmentationInfo,
3647  VmaDefragmentationStats* pDefragmentationStats);
3648 
3649  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
3650 
3651  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
3652  void DestroyPool(VmaPool pool);
3653  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
3654 
3655  void SetCurrentFrameIndex(uint32_t frameIndex);
3656 
3657  void MakePoolAllocationsLost(
3658  VmaPool hPool,
3659  size_t* pLostAllocationCount);
3660 
3661  void CreateLostAllocation(VmaAllocation* pAllocation);
3662 
3663  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
3664  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
3665 
3666 private:
3667  VkDeviceSize m_PreferredLargeHeapBlockSize;
3668  VkDeviceSize m_PreferredSmallHeapBlockSize;
3669 
3670  VkPhysicalDevice m_PhysicalDevice;
3671  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
3672 
3673  VMA_MUTEX m_PoolsMutex;
3674  // Protected by m_PoolsMutex. Sorted by pointer value.
3675  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
3676 
3677  VmaVulkanFunctions m_VulkanFunctions;
3678 
3679  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
3680 
3681  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
3682 
3683  VkResult AllocateMemoryOfType(
3684  const VkMemoryRequirements& vkMemReq,
3685  bool dedicatedAllocation,
3686  VkBuffer dedicatedBuffer,
3687  VkImage dedicatedImage,
3688  const VmaAllocationCreateInfo& createInfo,
3689  uint32_t memTypeIndex,
3690  VmaSuballocationType suballocType,
3691  VmaAllocation* pAllocation);
3692 
3693  // Allocates and registers new VkDeviceMemory specifically for single allocation.
3694  VkResult AllocateDedicatedMemory(
3695  VkDeviceSize size,
3696  VmaSuballocationType suballocType,
3697  uint32_t memTypeIndex,
3698  bool map,
3699  void* pUserData,
3700  VkBuffer dedicatedBuffer,
3701  VkImage dedicatedImage,
3702  VmaAllocation* pAllocation);
3703 
3704  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
3705  void FreeDedicatedMemory(VmaAllocation allocation);
3706 };
3707 
3709 // Memory allocation #2 after VmaAllocator_T definition
3710 
3711 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
3712 {
3713  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
3714 }
3715 
3716 static void VmaFree(VmaAllocator hAllocator, void* ptr)
3717 {
3718  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
3719 }
3720 
3721 template<typename T>
3722 static T* VmaAllocate(VmaAllocator hAllocator)
3723 {
3724  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
3725 }
3726 
3727 template<typename T>
3728 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
3729 {
3730  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
3731 }
3732 
3733 template<typename T>
3734 static void vma_delete(VmaAllocator hAllocator, T* ptr)
3735 {
3736  if(ptr != VMA_NULL)
3737  {
3738  ptr->~T();
3739  VmaFree(hAllocator, ptr);
3740  }
3741 }
3742 
3743 template<typename T>
3744 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
3745 {
3746  if(ptr != VMA_NULL)
3747  {
3748  for(size_t i = count; i--; )
3749  ptr[i].~T();
3750  VmaFree(hAllocator, ptr);
3751  }
3752 }
3753 
3755 // VmaStringBuilder
3756 
3757 #if VMA_STATS_STRING_ENABLED
3758 
3759 class VmaStringBuilder
3760 {
3761 public:
3762  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
3763  size_t GetLength() const { return m_Data.size(); }
3764  const char* GetData() const { return m_Data.data(); }
3765 
3766  void Add(char ch) { m_Data.push_back(ch); }
3767  void Add(const char* pStr);
3768  void AddNewLine() { Add('\n'); }
3769  void AddNumber(uint32_t num);
3770  void AddNumber(uint64_t num);
3771  void AddPointer(const void* ptr);
3772 
3773 private:
3774  VmaVector< char, VmaStlAllocator<char> > m_Data;
3775 };
3776 
3777 void VmaStringBuilder::Add(const char* pStr)
3778 {
3779  const size_t strLen = strlen(pStr);
3780  if(strLen > 0)
3781  {
3782  const size_t oldCount = m_Data.size();
3783  m_Data.resize(oldCount + strLen);
3784  memcpy(m_Data.data() + oldCount, pStr, strLen);
3785  }
3786 }
3787 
3788 void VmaStringBuilder::AddNumber(uint32_t num)
3789 {
3790  char buf[11];
3791  VmaUint32ToStr(buf, sizeof(buf), num);
3792  Add(buf);
3793 }
3794 
3795 void VmaStringBuilder::AddNumber(uint64_t num)
3796 {
3797  char buf[21];
3798  VmaUint64ToStr(buf, sizeof(buf), num);
3799  Add(buf);
3800 }
3801 
3802 void VmaStringBuilder::AddPointer(const void* ptr)
3803 {
3804  char buf[21];
3805  VmaPtrToStr(buf, sizeof(buf), ptr);
3806  Add(buf);
3807 }
3808 
3809 #endif // #if VMA_STATS_STRING_ENABLED
3810 
3812 // VmaJsonWriter
3813 
3814 #if VMA_STATS_STRING_ENABLED
3815 
3816 class VmaJsonWriter
3817 {
3818 public:
3819  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
3820  ~VmaJsonWriter();
3821 
3822  void BeginObject(bool singleLine = false);
3823  void EndObject();
3824 
3825  void BeginArray(bool singleLine = false);
3826  void EndArray();
3827 
3828  void WriteString(const char* pStr);
3829  void BeginString(const char* pStr = VMA_NULL);
3830  void ContinueString(const char* pStr);
3831  void ContinueString(uint32_t n);
3832  void ContinueString(uint64_t n);
3833  void EndString(const char* pStr = VMA_NULL);
3834 
3835  void WriteNumber(uint32_t n);
3836  void WriteNumber(uint64_t n);
3837  void WriteBool(bool b);
3838  void WriteNull();
3839 
3840 private:
3841  static const char* const INDENT;
3842 
3843  enum COLLECTION_TYPE
3844  {
3845  COLLECTION_TYPE_OBJECT,
3846  COLLECTION_TYPE_ARRAY,
3847  };
3848  struct StackItem
3849  {
3850  COLLECTION_TYPE type;
3851  uint32_t valueCount;
3852  bool singleLineMode;
3853  };
3854 
3855  VmaStringBuilder& m_SB;
3856  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
3857  bool m_InsideString;
3858 
3859  void BeginValue(bool isString);
3860  void WriteIndent(bool oneLess = false);
3861 };
3862 
3863 const char* const VmaJsonWriter::INDENT = " ";
3864 
3865 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
3866  m_SB(sb),
3867  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
3868  m_InsideString(false)
3869 {
3870 }
3871 
3872 VmaJsonWriter::~VmaJsonWriter()
3873 {
3874  VMA_ASSERT(!m_InsideString);
3875  VMA_ASSERT(m_Stack.empty());
3876 }
3877 
3878 void VmaJsonWriter::BeginObject(bool singleLine)
3879 {
3880  VMA_ASSERT(!m_InsideString);
3881 
3882  BeginValue(false);
3883  m_SB.Add('{');
3884 
3885  StackItem item;
3886  item.type = COLLECTION_TYPE_OBJECT;
3887  item.valueCount = 0;
3888  item.singleLineMode = singleLine;
3889  m_Stack.push_back(item);
3890 }
3891 
3892 void VmaJsonWriter::EndObject()
3893 {
3894  VMA_ASSERT(!m_InsideString);
3895 
3896  WriteIndent(true);
3897  m_SB.Add('}');
3898 
3899  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
3900  m_Stack.pop_back();
3901 }
3902 
3903 void VmaJsonWriter::BeginArray(bool singleLine)
3904 {
3905  VMA_ASSERT(!m_InsideString);
3906 
3907  BeginValue(false);
3908  m_SB.Add('[');
3909 
3910  StackItem item;
3911  item.type = COLLECTION_TYPE_ARRAY;
3912  item.valueCount = 0;
3913  item.singleLineMode = singleLine;
3914  m_Stack.push_back(item);
3915 }
3916 
3917 void VmaJsonWriter::EndArray()
3918 {
3919  VMA_ASSERT(!m_InsideString);
3920 
3921  WriteIndent(true);
3922  m_SB.Add(']');
3923 
3924  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
3925  m_Stack.pop_back();
3926 }
3927 
3928 void VmaJsonWriter::WriteString(const char* pStr)
3929 {
3930  BeginString(pStr);
3931  EndString();
3932 }
3933 
3934 void VmaJsonWriter::BeginString(const char* pStr)
3935 {
3936  VMA_ASSERT(!m_InsideString);
3937 
3938  BeginValue(true);
3939  m_SB.Add('"');
3940  m_InsideString = true;
3941  if(pStr != VMA_NULL && pStr[0] != '\0')
3942  {
3943  ContinueString(pStr);
3944  }
3945 }
3946 
3947 void VmaJsonWriter::ContinueString(const char* pStr)
3948 {
3949  VMA_ASSERT(m_InsideString);
3950 
3951  const size_t strLen = strlen(pStr);
3952  for(size_t i = 0; i < strLen; ++i)
3953  {
3954  char ch = pStr[i];
3955  if(ch == '\'')
3956  {
3957  m_SB.Add("\\\\");
3958  }
3959  else if(ch == '"')
3960  {
3961  m_SB.Add("\\\"");
3962  }
3963  else if(ch >= 32)
3964  {
3965  m_SB.Add(ch);
3966  }
3967  else switch(ch)
3968  {
3969  case '\n':
3970  m_SB.Add("\\n");
3971  break;
3972  case '\r':
3973  m_SB.Add("\\r");
3974  break;
3975  case '\t':
3976  m_SB.Add("\\t");
3977  break;
3978  default:
3979  VMA_ASSERT(0 && "Character not currently supported.");
3980  break;
3981  }
3982  }
3983 }
3984 
3985 void VmaJsonWriter::ContinueString(uint32_t n)
3986 {
3987  VMA_ASSERT(m_InsideString);
3988  m_SB.AddNumber(n);
3989 }
3990 
3991 void VmaJsonWriter::ContinueString(uint64_t n)
3992 {
3993  VMA_ASSERT(m_InsideString);
3994  m_SB.AddNumber(n);
3995 }
3996 
3997 void VmaJsonWriter::EndString(const char* pStr)
3998 {
3999  VMA_ASSERT(m_InsideString);
4000  if(pStr != VMA_NULL && pStr[0] != '\0')
4001  {
4002  ContinueString(pStr);
4003  }
4004  m_SB.Add('"');
4005  m_InsideString = false;
4006 }
4007 
4008 void VmaJsonWriter::WriteNumber(uint32_t n)
4009 {
4010  VMA_ASSERT(!m_InsideString);
4011  BeginValue(false);
4012  m_SB.AddNumber(n);
4013 }
4014 
4015 void VmaJsonWriter::WriteNumber(uint64_t n)
4016 {
4017  VMA_ASSERT(!m_InsideString);
4018  BeginValue(false);
4019  m_SB.AddNumber(n);
4020 }
4021 
4022 void VmaJsonWriter::WriteBool(bool b)
4023 {
4024  VMA_ASSERT(!m_InsideString);
4025  BeginValue(false);
4026  m_SB.Add(b ? "true" : "false");
4027 }
4028 
4029 void VmaJsonWriter::WriteNull()
4030 {
4031  VMA_ASSERT(!m_InsideString);
4032  BeginValue(false);
4033  m_SB.Add("null");
4034 }
4035 
4036 void VmaJsonWriter::BeginValue(bool isString)
4037 {
4038  if(!m_Stack.empty())
4039  {
4040  StackItem& currItem = m_Stack.back();
4041  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4042  currItem.valueCount % 2 == 0)
4043  {
4044  VMA_ASSERT(isString);
4045  }
4046 
4047  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4048  currItem.valueCount % 2 != 0)
4049  {
4050  m_SB.Add(": ");
4051  }
4052  else if(currItem.valueCount > 0)
4053  {
4054  m_SB.Add(", ");
4055  WriteIndent();
4056  }
4057  else
4058  {
4059  WriteIndent();
4060  }
4061  ++currItem.valueCount;
4062  }
4063 }
4064 
4065 void VmaJsonWriter::WriteIndent(bool oneLess)
4066 {
4067  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
4068  {
4069  m_SB.AddNewLine();
4070 
4071  size_t count = m_Stack.size();
4072  if(count > 0 && oneLess)
4073  {
4074  --count;
4075  }
4076  for(size_t i = 0; i < count; ++i)
4077  {
4078  m_SB.Add(INDENT);
4079  }
4080  }
4081 }
4082 
4083 #endif // #if VMA_STATS_STRING_ENABLED
4084 
4086 
4087 VkDeviceSize VmaAllocation_T::GetOffset() const
4088 {
4089  switch(m_Type)
4090  {
4091  case ALLOCATION_TYPE_BLOCK:
4092  return m_BlockAllocation.m_Offset;
4093  case ALLOCATION_TYPE_DEDICATED:
4094  return 0;
4095  default:
4096  VMA_ASSERT(0);
4097  return 0;
4098  }
4099 }
4100 
4101 VkDeviceMemory VmaAllocation_T::GetMemory() const
4102 {
4103  switch(m_Type)
4104  {
4105  case ALLOCATION_TYPE_BLOCK:
4106  return m_BlockAllocation.m_Block->m_hMemory;
4107  case ALLOCATION_TYPE_DEDICATED:
4108  return m_DedicatedAllocation.m_hMemory;
4109  default:
4110  VMA_ASSERT(0);
4111  return VK_NULL_HANDLE;
4112  }
4113 }
4114 
4115 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
4116 {
4117  switch(m_Type)
4118  {
4119  case ALLOCATION_TYPE_BLOCK:
4120  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
4121  case ALLOCATION_TYPE_DEDICATED:
4122  return m_DedicatedAllocation.m_MemoryTypeIndex;
4123  default:
4124  VMA_ASSERT(0);
4125  return UINT32_MAX;
4126  }
4127 }
4128 
4129 VMA_BLOCK_VECTOR_TYPE VmaAllocation_T::GetBlockVectorType() const
4130 {
4131  switch(m_Type)
4132  {
4133  case ALLOCATION_TYPE_BLOCK:
4134  return m_BlockAllocation.m_Block->m_BlockVectorType;
4135  case ALLOCATION_TYPE_DEDICATED:
4136  return (m_DedicatedAllocation.m_PersistentMap ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED);
4137  default:
4138  VMA_ASSERT(0);
4139  return VMA_BLOCK_VECTOR_TYPE_COUNT;
4140  }
4141 }
4142 
4143 void* VmaAllocation_T::GetMappedData() const
4144 {
4145  switch(m_Type)
4146  {
4147  case ALLOCATION_TYPE_BLOCK:
4148  if(m_BlockAllocation.m_Block->m_pMappedData != VMA_NULL)
4149  {
4150  return (char*)m_BlockAllocation.m_Block->m_pMappedData + m_BlockAllocation.m_Offset;
4151  }
4152  else
4153  {
4154  return VMA_NULL;
4155  }
4156  break;
4157  case ALLOCATION_TYPE_DEDICATED:
4158  return m_DedicatedAllocation.m_pMappedData;
4159  default:
4160  VMA_ASSERT(0);
4161  return VMA_NULL;
4162  }
4163 }
4164 
4165 bool VmaAllocation_T::CanBecomeLost() const
4166 {
4167  switch(m_Type)
4168  {
4169  case ALLOCATION_TYPE_BLOCK:
4170  return m_BlockAllocation.m_CanBecomeLost;
4171  case ALLOCATION_TYPE_DEDICATED:
4172  return false;
4173  default:
4174  VMA_ASSERT(0);
4175  return false;
4176  }
4177 }
4178 
4179 VmaPool VmaAllocation_T::GetPool() const
4180 {
4181  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4182  return m_BlockAllocation.m_hPool;
4183 }
4184 
4185 VkResult VmaAllocation_T::DedicatedAllocMapPersistentlyMappedMemory(VmaAllocator hAllocator)
4186 {
4187  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
4188  if(m_DedicatedAllocation.m_PersistentMap)
4189  {
4190  return (*hAllocator->GetVulkanFunctions().vkMapMemory)(
4191  hAllocator->m_hDevice,
4192  m_DedicatedAllocation.m_hMemory,
4193  0,
4194  VK_WHOLE_SIZE,
4195  0,
4196  &m_DedicatedAllocation.m_pMappedData);
4197  }
4198  return VK_SUCCESS;
4199 }
4200 void VmaAllocation_T::DedicatedAllocUnmapPersistentlyMappedMemory(VmaAllocator hAllocator)
4201 {
4202  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
4203  if(m_DedicatedAllocation.m_pMappedData)
4204  {
4205  VMA_ASSERT(m_DedicatedAllocation.m_PersistentMap);
4206  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_DedicatedAllocation.m_hMemory);
4207  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
4208  }
4209 }
4210 
4211 
4212 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4213 {
4214  VMA_ASSERT(CanBecomeLost());
4215 
4216  /*
4217  Warning: This is a carefully designed algorithm.
4218  Do not modify unless you really know what you're doing :)
4219  */
4220  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
4221  for(;;)
4222  {
4223  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
4224  {
4225  VMA_ASSERT(0);
4226  return false;
4227  }
4228  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
4229  {
4230  return false;
4231  }
4232  else // Last use time earlier than current time.
4233  {
4234  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
4235  {
4236  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
4237  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
4238  return true;
4239  }
4240  }
4241  }
4242 }
4243 
4244 #if VMA_STATS_STRING_ENABLED
4245 
4246 // Correspond to values of enum VmaSuballocationType.
4247 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
4248  "FREE",
4249  "UNKNOWN",
4250  "BUFFER",
4251  "IMAGE_UNKNOWN",
4252  "IMAGE_LINEAR",
4253  "IMAGE_OPTIMAL",
4254 };
4255 
4256 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
4257 {
4258  json.BeginObject();
4259 
4260  json.WriteString("Blocks");
4261  json.WriteNumber(stat.blockCount);
4262 
4263  json.WriteString("Allocations");
4264  json.WriteNumber(stat.allocationCount);
4265 
4266  json.WriteString("UnusedRanges");
4267  json.WriteNumber(stat.unusedRangeCount);
4268 
4269  json.WriteString("UsedBytes");
4270  json.WriteNumber(stat.usedBytes);
4271 
4272  json.WriteString("UnusedBytes");
4273  json.WriteNumber(stat.unusedBytes);
4274 
4275  if(stat.allocationCount > 1)
4276  {
4277  json.WriteString("AllocationSize");
4278  json.BeginObject(true);
4279  json.WriteString("Min");
4280  json.WriteNumber(stat.allocationSizeMin);
4281  json.WriteString("Avg");
4282  json.WriteNumber(stat.allocationSizeAvg);
4283  json.WriteString("Max");
4284  json.WriteNumber(stat.allocationSizeMax);
4285  json.EndObject();
4286  }
4287 
4288  if(stat.unusedRangeCount > 1)
4289  {
4290  json.WriteString("UnusedRangeSize");
4291  json.BeginObject(true);
4292  json.WriteString("Min");
4293  json.WriteNumber(stat.unusedRangeSizeMin);
4294  json.WriteString("Avg");
4295  json.WriteNumber(stat.unusedRangeSizeAvg);
4296  json.WriteString("Max");
4297  json.WriteNumber(stat.unusedRangeSizeMax);
4298  json.EndObject();
4299  }
4300 
4301  json.EndObject();
4302 }
4303 
4304 #endif // #if VMA_STATS_STRING_ENABLED
4305 
4306 struct VmaSuballocationItemSizeLess
4307 {
4308  bool operator()(
4309  const VmaSuballocationList::iterator lhs,
4310  const VmaSuballocationList::iterator rhs) const
4311  {
4312  return lhs->size < rhs->size;
4313  }
4314  bool operator()(
4315  const VmaSuballocationList::iterator lhs,
4316  VkDeviceSize rhsSize) const
4317  {
4318  return lhs->size < rhsSize;
4319  }
4320 };
4321 
4323 // class VmaBlockMetadata
4324 
4325 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
4326  m_Size(0),
4327  m_FreeCount(0),
4328  m_SumFreeSize(0),
4329  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
4330  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
4331 {
4332 }
4333 
4334 VmaBlockMetadata::~VmaBlockMetadata()
4335 {
4336 }
4337 
4338 void VmaBlockMetadata::Init(VkDeviceSize size)
4339 {
4340  m_Size = size;
4341  m_FreeCount = 1;
4342  m_SumFreeSize = size;
4343 
4344  VmaSuballocation suballoc = {};
4345  suballoc.offset = 0;
4346  suballoc.size = size;
4347  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4348  suballoc.hAllocation = VK_NULL_HANDLE;
4349 
4350  m_Suballocations.push_back(suballoc);
4351  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
4352  --suballocItem;
4353  m_FreeSuballocationsBySize.push_back(suballocItem);
4354 }
4355 
4356 bool VmaBlockMetadata::Validate() const
4357 {
4358  if(m_Suballocations.empty())
4359  {
4360  return false;
4361  }
4362 
4363  // Expected offset of new suballocation as calculates from previous ones.
4364  VkDeviceSize calculatedOffset = 0;
4365  // Expected number of free suballocations as calculated from traversing their list.
4366  uint32_t calculatedFreeCount = 0;
4367  // Expected sum size of free suballocations as calculated from traversing their list.
4368  VkDeviceSize calculatedSumFreeSize = 0;
4369  // Expected number of free suballocations that should be registered in
4370  // m_FreeSuballocationsBySize calculated from traversing their list.
4371  size_t freeSuballocationsToRegister = 0;
4372  // True if previous visisted suballocation was free.
4373  bool prevFree = false;
4374 
4375  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4376  suballocItem != m_Suballocations.cend();
4377  ++suballocItem)
4378  {
4379  const VmaSuballocation& subAlloc = *suballocItem;
4380 
4381  // Actual offset of this suballocation doesn't match expected one.
4382  if(subAlloc.offset != calculatedOffset)
4383  {
4384  return false;
4385  }
4386 
4387  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
4388  // Two adjacent free suballocations are invalid. They should be merged.
4389  if(prevFree && currFree)
4390  {
4391  return false;
4392  }
4393  prevFree = currFree;
4394 
4395  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
4396  {
4397  return false;
4398  }
4399 
4400  if(currFree)
4401  {
4402  calculatedSumFreeSize += subAlloc.size;
4403  ++calculatedFreeCount;
4404  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
4405  {
4406  ++freeSuballocationsToRegister;
4407  }
4408  }
4409 
4410  calculatedOffset += subAlloc.size;
4411  }
4412 
4413  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
4414  // match expected one.
4415  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
4416  {
4417  return false;
4418  }
4419 
4420  VkDeviceSize lastSize = 0;
4421  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
4422  {
4423  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
4424 
4425  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
4426  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
4427  {
4428  return false;
4429  }
4430  // They must be sorted by size ascending.
4431  if(suballocItem->size < lastSize)
4432  {
4433  return false;
4434  }
4435 
4436  lastSize = suballocItem->size;
4437  }
4438 
4439  // Check if totals match calculacted values.
4440  return
4441  ValidateFreeSuballocationList() &&
4442  (calculatedOffset == m_Size) &&
4443  (calculatedSumFreeSize == m_SumFreeSize) &&
4444  (calculatedFreeCount == m_FreeCount);
4445 }
4446 
4447 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
4448 {
4449  if(!m_FreeSuballocationsBySize.empty())
4450  {
4451  return m_FreeSuballocationsBySize.back()->size;
4452  }
4453  else
4454  {
4455  return 0;
4456  }
4457 }
4458 
4459 bool VmaBlockMetadata::IsEmpty() const
4460 {
4461  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
4462 }
4463 
4464 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
4465 {
4466  outInfo.blockCount = 1;
4467 
4468  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
4469  outInfo.allocationCount = rangeCount - m_FreeCount;
4470  outInfo.unusedRangeCount = m_FreeCount;
4471 
4472  outInfo.unusedBytes = m_SumFreeSize;
4473  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
4474 
4475  outInfo.allocationSizeMin = UINT64_MAX;
4476  outInfo.allocationSizeMax = 0;
4477  outInfo.unusedRangeSizeMin = UINT64_MAX;
4478  outInfo.unusedRangeSizeMax = 0;
4479 
4480  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4481  suballocItem != m_Suballocations.cend();
4482  ++suballocItem)
4483  {
4484  const VmaSuballocation& suballoc = *suballocItem;
4485  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
4486  {
4487  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
4488  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
4489  }
4490  else
4491  {
4492  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
4493  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
4494  }
4495  }
4496 }
4497 
4498 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
4499 {
4500  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
4501 
4502  inoutStats.size += m_Size;
4503  inoutStats.unusedSize += m_SumFreeSize;
4504  inoutStats.allocationCount += rangeCount - m_FreeCount;
4505  inoutStats.unusedRangeCount += m_FreeCount;
4506  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
4507 }
4508 
4509 #if VMA_STATS_STRING_ENABLED
4510 
4511 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
4512 {
4513  json.BeginObject();
4514 
4515  json.WriteString("TotalBytes");
4516  json.WriteNumber(m_Size);
4517 
4518  json.WriteString("UnusedBytes");
4519  json.WriteNumber(m_SumFreeSize);
4520 
4521  json.WriteString("Allocations");
4522  json.WriteNumber(m_Suballocations.size() - m_FreeCount);
4523 
4524  json.WriteString("UnusedRanges");
4525  json.WriteNumber(m_FreeCount);
4526 
4527  json.WriteString("Suballocations");
4528  json.BeginArray();
4529  size_t i = 0;
4530  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4531  suballocItem != m_Suballocations.cend();
4532  ++suballocItem, ++i)
4533  {
4534  json.BeginObject(true);
4535 
4536  json.WriteString("Type");
4537  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
4538 
4539  json.WriteString("Size");
4540  json.WriteNumber(suballocItem->size);
4541 
4542  json.WriteString("Offset");
4543  json.WriteNumber(suballocItem->offset);
4544 
4545  json.EndObject();
4546  }
4547  json.EndArray();
4548 
4549  json.EndObject();
4550 }
4551 
4552 #endif // #if VMA_STATS_STRING_ENABLED
4553 
4554 /*
4555 How many suitable free suballocations to analyze before choosing best one.
4556 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
4557  be chosen.
4558 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
4559  suballocations will be analized and best one will be chosen.
4560 - Any other value is also acceptable.
4561 */
4562 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
4563 
4564 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
4565 {
4566  VMA_ASSERT(IsEmpty());
4567  pAllocationRequest->offset = 0;
4568  pAllocationRequest->sumFreeSize = m_SumFreeSize;
4569  pAllocationRequest->sumItemSize = 0;
4570  pAllocationRequest->item = m_Suballocations.begin();
4571  pAllocationRequest->itemsToMakeLostCount = 0;
4572 }
4573 
4574 bool VmaBlockMetadata::CreateAllocationRequest(
4575  uint32_t currentFrameIndex,
4576  uint32_t frameInUseCount,
4577  VkDeviceSize bufferImageGranularity,
4578  VkDeviceSize allocSize,
4579  VkDeviceSize allocAlignment,
4580  VmaSuballocationType allocType,
4581  bool canMakeOtherLost,
4582  VmaAllocationRequest* pAllocationRequest)
4583 {
4584  VMA_ASSERT(allocSize > 0);
4585  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
4586  VMA_ASSERT(pAllocationRequest != VMA_NULL);
4587  VMA_HEAVY_ASSERT(Validate());
4588 
4589  // There is not enough total free space in this block to fullfill the request: Early return.
4590  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
4591  {
4592  return false;
4593  }
4594 
4595  // New algorithm, efficiently searching freeSuballocationsBySize.
4596  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
4597  if(freeSuballocCount > 0)
4598  {
4599  if(VMA_BEST_FIT)
4600  {
4601  // Find first free suballocation with size not less than allocSize.
4602  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
4603  m_FreeSuballocationsBySize.data(),
4604  m_FreeSuballocationsBySize.data() + freeSuballocCount,
4605  allocSize,
4606  VmaSuballocationItemSizeLess());
4607  size_t index = it - m_FreeSuballocationsBySize.data();
4608  for(; index < freeSuballocCount; ++index)
4609  {
4610  if(CheckAllocation(
4611  currentFrameIndex,
4612  frameInUseCount,
4613  bufferImageGranularity,
4614  allocSize,
4615  allocAlignment,
4616  allocType,
4617  m_FreeSuballocationsBySize[index],
4618  false, // canMakeOtherLost
4619  &pAllocationRequest->offset,
4620  &pAllocationRequest->itemsToMakeLostCount,
4621  &pAllocationRequest->sumFreeSize,
4622  &pAllocationRequest->sumItemSize))
4623  {
4624  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
4625  return true;
4626  }
4627  }
4628  }
4629  else
4630  {
4631  // Search staring from biggest suballocations.
4632  for(size_t index = freeSuballocCount; index--; )
4633  {
4634  if(CheckAllocation(
4635  currentFrameIndex,
4636  frameInUseCount,
4637  bufferImageGranularity,
4638  allocSize,
4639  allocAlignment,
4640  allocType,
4641  m_FreeSuballocationsBySize[index],
4642  false, // canMakeOtherLost
4643  &pAllocationRequest->offset,
4644  &pAllocationRequest->itemsToMakeLostCount,
4645  &pAllocationRequest->sumFreeSize,
4646  &pAllocationRequest->sumItemSize))
4647  {
4648  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
4649  return true;
4650  }
4651  }
4652  }
4653  }
4654 
4655  if(canMakeOtherLost)
4656  {
4657  // Brute-force algorithm. TODO: Come up with something better.
4658 
4659  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
4660  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
4661 
4662  VmaAllocationRequest tmpAllocRequest = {};
4663  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
4664  suballocIt != m_Suballocations.end();
4665  ++suballocIt)
4666  {
4667  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
4668  suballocIt->hAllocation->CanBecomeLost())
4669  {
4670  if(CheckAllocation(
4671  currentFrameIndex,
4672  frameInUseCount,
4673  bufferImageGranularity,
4674  allocSize,
4675  allocAlignment,
4676  allocType,
4677  suballocIt,
4678  canMakeOtherLost,
4679  &tmpAllocRequest.offset,
4680  &tmpAllocRequest.itemsToMakeLostCount,
4681  &tmpAllocRequest.sumFreeSize,
4682  &tmpAllocRequest.sumItemSize))
4683  {
4684  tmpAllocRequest.item = suballocIt;
4685 
4686  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
4687  {
4688  *pAllocationRequest = tmpAllocRequest;
4689  }
4690  }
4691  }
4692  }
4693 
4694  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
4695  {
4696  return true;
4697  }
4698  }
4699 
4700  return false;
4701 }
4702 
4703 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
4704  uint32_t currentFrameIndex,
4705  uint32_t frameInUseCount,
4706  VmaAllocationRequest* pAllocationRequest)
4707 {
4708  while(pAllocationRequest->itemsToMakeLostCount > 0)
4709  {
4710  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
4711  {
4712  ++pAllocationRequest->item;
4713  }
4714  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
4715  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
4716  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
4717  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
4718  {
4719  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
4720  --pAllocationRequest->itemsToMakeLostCount;
4721  }
4722  else
4723  {
4724  return false;
4725  }
4726  }
4727 
4728  VMA_HEAVY_ASSERT(Validate());
4729  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
4730  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
4731 
4732  return true;
4733 }
4734 
4735 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4736 {
4737  uint32_t lostAllocationCount = 0;
4738  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
4739  it != m_Suballocations.end();
4740  ++it)
4741  {
4742  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
4743  it->hAllocation->CanBecomeLost() &&
4744  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
4745  {
4746  it = FreeSuballocation(it);
4747  ++lostAllocationCount;
4748  }
4749  }
4750  return lostAllocationCount;
4751 }
4752 
4753 void VmaBlockMetadata::Alloc(
4754  const VmaAllocationRequest& request,
4755  VmaSuballocationType type,
4756  VkDeviceSize allocSize,
4757  VmaAllocation hAllocation)
4758 {
4759  VMA_ASSERT(request.item != m_Suballocations.end());
4760  VmaSuballocation& suballoc = *request.item;
4761  // Given suballocation is a free block.
4762  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
4763  // Given offset is inside this suballocation.
4764  VMA_ASSERT(request.offset >= suballoc.offset);
4765  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
4766  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
4767  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
4768 
4769  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
4770  // it to become used.
4771  UnregisterFreeSuballocation(request.item);
4772 
4773  suballoc.offset = request.offset;
4774  suballoc.size = allocSize;
4775  suballoc.type = type;
4776  suballoc.hAllocation = hAllocation;
4777 
4778  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
4779  if(paddingEnd)
4780  {
4781  VmaSuballocation paddingSuballoc = {};
4782  paddingSuballoc.offset = request.offset + allocSize;
4783  paddingSuballoc.size = paddingEnd;
4784  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4785  VmaSuballocationList::iterator next = request.item;
4786  ++next;
4787  const VmaSuballocationList::iterator paddingEndItem =
4788  m_Suballocations.insert(next, paddingSuballoc);
4789  RegisterFreeSuballocation(paddingEndItem);
4790  }
4791 
4792  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
4793  if(paddingBegin)
4794  {
4795  VmaSuballocation paddingSuballoc = {};
4796  paddingSuballoc.offset = request.offset - paddingBegin;
4797  paddingSuballoc.size = paddingBegin;
4798  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4799  const VmaSuballocationList::iterator paddingBeginItem =
4800  m_Suballocations.insert(request.item, paddingSuballoc);
4801  RegisterFreeSuballocation(paddingBeginItem);
4802  }
4803 
4804  // Update totals.
4805  m_FreeCount = m_FreeCount - 1;
4806  if(paddingBegin > 0)
4807  {
4808  ++m_FreeCount;
4809  }
4810  if(paddingEnd > 0)
4811  {
4812  ++m_FreeCount;
4813  }
4814  m_SumFreeSize -= allocSize;
4815 }
4816 
4817 void VmaBlockMetadata::Free(const VmaAllocation allocation)
4818 {
4819  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
4820  suballocItem != m_Suballocations.end();
4821  ++suballocItem)
4822  {
4823  VmaSuballocation& suballoc = *suballocItem;
4824  if(suballoc.hAllocation == allocation)
4825  {
4826  FreeSuballocation(suballocItem);
4827  VMA_HEAVY_ASSERT(Validate());
4828  return;
4829  }
4830  }
4831  VMA_ASSERT(0 && "Not found!");
4832 }
4833 
4834 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
4835 {
4836  VkDeviceSize lastSize = 0;
4837  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
4838  {
4839  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
4840 
4841  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
4842  {
4843  VMA_ASSERT(0);
4844  return false;
4845  }
4846  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
4847  {
4848  VMA_ASSERT(0);
4849  return false;
4850  }
4851  if(it->size < lastSize)
4852  {
4853  VMA_ASSERT(0);
4854  return false;
4855  }
4856 
4857  lastSize = it->size;
4858  }
4859  return true;
4860 }
4861 
4862 bool VmaBlockMetadata::CheckAllocation(
4863  uint32_t currentFrameIndex,
4864  uint32_t frameInUseCount,
4865  VkDeviceSize bufferImageGranularity,
4866  VkDeviceSize allocSize,
4867  VkDeviceSize allocAlignment,
4868  VmaSuballocationType allocType,
4869  VmaSuballocationList::const_iterator suballocItem,
4870  bool canMakeOtherLost,
4871  VkDeviceSize* pOffset,
4872  size_t* itemsToMakeLostCount,
4873  VkDeviceSize* pSumFreeSize,
4874  VkDeviceSize* pSumItemSize) const
4875 {
4876  VMA_ASSERT(allocSize > 0);
4877  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
4878  VMA_ASSERT(suballocItem != m_Suballocations.cend());
4879  VMA_ASSERT(pOffset != VMA_NULL);
4880 
4881  *itemsToMakeLostCount = 0;
4882  *pSumFreeSize = 0;
4883  *pSumItemSize = 0;
4884 
4885  if(canMakeOtherLost)
4886  {
4887  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
4888  {
4889  *pSumFreeSize = suballocItem->size;
4890  }
4891  else
4892  {
4893  if(suballocItem->hAllocation->CanBecomeLost() &&
4894  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
4895  {
4896  ++*itemsToMakeLostCount;
4897  *pSumItemSize = suballocItem->size;
4898  }
4899  else
4900  {
4901  return false;
4902  }
4903  }
4904 
4905  // Remaining size is too small for this request: Early return.
4906  if(m_Size - suballocItem->offset < allocSize)
4907  {
4908  return false;
4909  }
4910 
4911  // Start from offset equal to beginning of this suballocation.
4912  *pOffset = suballocItem->offset;
4913 
4914  // Apply VMA_DEBUG_MARGIN at the beginning.
4915  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
4916  {
4917  *pOffset += VMA_DEBUG_MARGIN;
4918  }
4919 
4920  // Apply alignment.
4921  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
4922  *pOffset = VmaAlignUp(*pOffset, alignment);
4923 
4924  // Check previous suballocations for BufferImageGranularity conflicts.
4925  // Make bigger alignment if necessary.
4926  if(bufferImageGranularity > 1)
4927  {
4928  bool bufferImageGranularityConflict = false;
4929  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
4930  while(prevSuballocItem != m_Suballocations.cbegin())
4931  {
4932  --prevSuballocItem;
4933  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
4934  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
4935  {
4936  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
4937  {
4938  bufferImageGranularityConflict = true;
4939  break;
4940  }
4941  }
4942  else
4943  // Already on previous page.
4944  break;
4945  }
4946  if(bufferImageGranularityConflict)
4947  {
4948  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
4949  }
4950  }
4951 
4952  // Now that we have final *pOffset, check if we are past suballocItem.
4953  // If yes, return false - this function should be called for another suballocItem as starting point.
4954  if(*pOffset >= suballocItem->offset + suballocItem->size)
4955  {
4956  return false;
4957  }
4958 
4959  // Calculate padding at the beginning based on current offset.
4960  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
4961 
4962  // Calculate required margin at the end if this is not last suballocation.
4963  VmaSuballocationList::const_iterator next = suballocItem;
4964  ++next;
4965  const VkDeviceSize requiredEndMargin =
4966  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
4967 
4968  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
4969  // Another early return check.
4970  if(suballocItem->offset + totalSize > m_Size)
4971  {
4972  return false;
4973  }
4974 
4975  // Advance lastSuballocItem until desired size is reached.
4976  // Update itemsToMakeLostCount.
4977  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
4978  if(totalSize > suballocItem->size)
4979  {
4980  VkDeviceSize remainingSize = totalSize - suballocItem->size;
4981  while(remainingSize > 0)
4982  {
4983  ++lastSuballocItem;
4984  if(lastSuballocItem == m_Suballocations.cend())
4985  {
4986  return false;
4987  }
4988  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
4989  {
4990  *pSumFreeSize += lastSuballocItem->size;
4991  }
4992  else
4993  {
4994  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
4995  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
4996  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
4997  {
4998  ++*itemsToMakeLostCount;
4999  *pSumItemSize += lastSuballocItem->size;
5000  }
5001  else
5002  {
5003  return false;
5004  }
5005  }
5006  remainingSize = (lastSuballocItem->size < remainingSize) ?
5007  remainingSize - lastSuballocItem->size : 0;
5008  }
5009  }
5010 
5011  // Check next suballocations for BufferImageGranularity conflicts.
5012  // If conflict exists, we must mark more allocations lost or fail.
5013  if(bufferImageGranularity > 1)
5014  {
5015  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
5016  ++nextSuballocItem;
5017  while(nextSuballocItem != m_Suballocations.cend())
5018  {
5019  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5020  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5021  {
5022  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5023  {
5024  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
5025  if(nextSuballoc.hAllocation->CanBecomeLost() &&
5026  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5027  {
5028  ++*itemsToMakeLostCount;
5029  }
5030  else
5031  {
5032  return false;
5033  }
5034  }
5035  }
5036  else
5037  {
5038  // Already on next page.
5039  break;
5040  }
5041  ++nextSuballocItem;
5042  }
5043  }
5044  }
5045  else
5046  {
5047  const VmaSuballocation& suballoc = *suballocItem;
5048  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5049 
5050  *pSumFreeSize = suballoc.size;
5051 
5052  // Size of this suballocation is too small for this request: Early return.
5053  if(suballoc.size < allocSize)
5054  {
5055  return false;
5056  }
5057 
5058  // Start from offset equal to beginning of this suballocation.
5059  *pOffset = suballoc.offset;
5060 
5061  // Apply VMA_DEBUG_MARGIN at the beginning.
5062  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5063  {
5064  *pOffset += VMA_DEBUG_MARGIN;
5065  }
5066 
5067  // Apply alignment.
5068  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5069  *pOffset = VmaAlignUp(*pOffset, alignment);
5070 
5071  // Check previous suballocations for BufferImageGranularity conflicts.
5072  // Make bigger alignment if necessary.
5073  if(bufferImageGranularity > 1)
5074  {
5075  bool bufferImageGranularityConflict = false;
5076  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5077  while(prevSuballocItem != m_Suballocations.cbegin())
5078  {
5079  --prevSuballocItem;
5080  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5081  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5082  {
5083  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5084  {
5085  bufferImageGranularityConflict = true;
5086  break;
5087  }
5088  }
5089  else
5090  // Already on previous page.
5091  break;
5092  }
5093  if(bufferImageGranularityConflict)
5094  {
5095  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5096  }
5097  }
5098 
5099  // Calculate padding at the beginning based on current offset.
5100  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
5101 
5102  // Calculate required margin at the end if this is not last suballocation.
5103  VmaSuballocationList::const_iterator next = suballocItem;
5104  ++next;
5105  const VkDeviceSize requiredEndMargin =
5106  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5107 
5108  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
5109  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
5110  {
5111  return false;
5112  }
5113 
5114  // Check next suballocations for BufferImageGranularity conflicts.
5115  // If conflict exists, allocation cannot be made here.
5116  if(bufferImageGranularity > 1)
5117  {
5118  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
5119  ++nextSuballocItem;
5120  while(nextSuballocItem != m_Suballocations.cend())
5121  {
5122  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5123  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5124  {
5125  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5126  {
5127  return false;
5128  }
5129  }
5130  else
5131  {
5132  // Already on next page.
5133  break;
5134  }
5135  ++nextSuballocItem;
5136  }
5137  }
5138  }
5139 
5140  // All tests passed: Success. pOffset is already filled.
5141  return true;
5142 }
5143 
5144 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
5145 {
5146  VMA_ASSERT(item != m_Suballocations.end());
5147  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5148 
5149  VmaSuballocationList::iterator nextItem = item;
5150  ++nextItem;
5151  VMA_ASSERT(nextItem != m_Suballocations.end());
5152  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
5153 
5154  item->size += nextItem->size;
5155  --m_FreeCount;
5156  m_Suballocations.erase(nextItem);
5157 }
5158 
5159 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
5160 {
5161  // Change this suballocation to be marked as free.
5162  VmaSuballocation& suballoc = *suballocItem;
5163  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5164  suballoc.hAllocation = VK_NULL_HANDLE;
5165 
5166  // Update totals.
5167  ++m_FreeCount;
5168  m_SumFreeSize += suballoc.size;
5169 
5170  // Merge with previous and/or next suballocation if it's also free.
5171  bool mergeWithNext = false;
5172  bool mergeWithPrev = false;
5173 
5174  VmaSuballocationList::iterator nextItem = suballocItem;
5175  ++nextItem;
5176  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
5177  {
5178  mergeWithNext = true;
5179  }
5180 
5181  VmaSuballocationList::iterator prevItem = suballocItem;
5182  if(suballocItem != m_Suballocations.begin())
5183  {
5184  --prevItem;
5185  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5186  {
5187  mergeWithPrev = true;
5188  }
5189  }
5190 
5191  if(mergeWithNext)
5192  {
5193  UnregisterFreeSuballocation(nextItem);
5194  MergeFreeWithNext(suballocItem);
5195  }
5196 
5197  if(mergeWithPrev)
5198  {
5199  UnregisterFreeSuballocation(prevItem);
5200  MergeFreeWithNext(prevItem);
5201  RegisterFreeSuballocation(prevItem);
5202  return prevItem;
5203  }
5204  else
5205  {
5206  RegisterFreeSuballocation(suballocItem);
5207  return suballocItem;
5208  }
5209 }
5210 
5211 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
5212 {
5213  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5214  VMA_ASSERT(item->size > 0);
5215 
5216  // You may want to enable this validation at the beginning or at the end of
5217  // this function, depending on what do you want to check.
5218  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5219 
5220  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5221  {
5222  if(m_FreeSuballocationsBySize.empty())
5223  {
5224  m_FreeSuballocationsBySize.push_back(item);
5225  }
5226  else
5227  {
5228  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
5229  }
5230  }
5231 
5232  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5233 }
5234 
5235 
5236 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
5237 {
5238  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5239  VMA_ASSERT(item->size > 0);
5240 
5241  // You may want to enable this validation at the beginning or at the end of
5242  // this function, depending on what do you want to check.
5243  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5244 
5245  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5246  {
5247  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5248  m_FreeSuballocationsBySize.data(),
5249  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
5250  item,
5251  VmaSuballocationItemSizeLess());
5252  for(size_t index = it - m_FreeSuballocationsBySize.data();
5253  index < m_FreeSuballocationsBySize.size();
5254  ++index)
5255  {
5256  if(m_FreeSuballocationsBySize[index] == item)
5257  {
5258  VmaVectorRemove(m_FreeSuballocationsBySize, index);
5259  return;
5260  }
5261  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
5262  }
5263  VMA_ASSERT(0 && "Not found.");
5264  }
5265 
5266  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5267 }
5268 
5270 // class VmaDeviceMemoryBlock
5271 
5272 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
5273  m_MemoryTypeIndex(UINT32_MAX),
5274  m_BlockVectorType(VMA_BLOCK_VECTOR_TYPE_COUNT),
5275  m_hMemory(VK_NULL_HANDLE),
5276  m_PersistentMap(false),
5277  m_pMappedData(VMA_NULL),
5278  m_Metadata(hAllocator)
5279 {
5280 }
5281 
5282 void VmaDeviceMemoryBlock::Init(
5283  uint32_t newMemoryTypeIndex,
5284  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
5285  VkDeviceMemory newMemory,
5286  VkDeviceSize newSize,
5287  bool persistentMap,
5288  void* pMappedData)
5289 {
5290  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5291 
5292  m_MemoryTypeIndex = newMemoryTypeIndex;
5293  m_BlockVectorType = newBlockVectorType;
5294  m_hMemory = newMemory;
5295  m_PersistentMap = persistentMap;
5296  m_pMappedData = pMappedData;
5297 
5298  m_Metadata.Init(newSize);
5299 }
5300 
5301 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
5302 {
5303  // This is the most important assert in the entire library.
5304  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
5305  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
5306 
5307  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
5308  if(m_pMappedData != VMA_NULL)
5309  {
5310  (allocator->GetVulkanFunctions().vkUnmapMemory)(allocator->m_hDevice, m_hMemory);
5311  m_pMappedData = VMA_NULL;
5312  }
5313 
5314  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
5315  m_hMemory = VK_NULL_HANDLE;
5316 }
5317 
5318 bool VmaDeviceMemoryBlock::Validate() const
5319 {
5320  if((m_hMemory == VK_NULL_HANDLE) ||
5321  (m_Metadata.GetSize() == 0))
5322  {
5323  return false;
5324  }
5325 
5326  return m_Metadata.Validate();
5327 }
5328 
5329 static void InitStatInfo(VmaStatInfo& outInfo)
5330 {
5331  memset(&outInfo, 0, sizeof(outInfo));
5332  outInfo.allocationSizeMin = UINT64_MAX;
5333  outInfo.unusedRangeSizeMin = UINT64_MAX;
5334 }
5335 
5336 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
5337 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
5338 {
5339  inoutInfo.blockCount += srcInfo.blockCount;
5340  inoutInfo.allocationCount += srcInfo.allocationCount;
5341  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
5342  inoutInfo.usedBytes += srcInfo.usedBytes;
5343  inoutInfo.unusedBytes += srcInfo.unusedBytes;
5344  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
5345  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
5346  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
5347  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
5348 }
5349 
5350 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
5351 {
5352  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
5353  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
5354  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
5355  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
5356 }
5357 
5358 VmaPool_T::VmaPool_T(
5359  VmaAllocator hAllocator,
5360  const VmaPoolCreateInfo& createInfo) :
5361  m_BlockVector(
5362  hAllocator,
5363  createInfo.memoryTypeIndex,
5364  (createInfo.flags & VMA_POOL_CREATE_PERSISTENT_MAP_BIT) != 0 ?
5365  VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED,
5366  createInfo.blockSize,
5367  createInfo.minBlockCount,
5368  createInfo.maxBlockCount,
5369  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
5370  createInfo.frameInUseCount,
5371  true) // isCustomPool
5372 {
5373 }
5374 
5375 VmaPool_T::~VmaPool_T()
5376 {
5377 }
5378 
5379 #if VMA_STATS_STRING_ENABLED
5380 
5381 #endif // #if VMA_STATS_STRING_ENABLED
5382 
5383 VmaBlockVector::VmaBlockVector(
5384  VmaAllocator hAllocator,
5385  uint32_t memoryTypeIndex,
5386  VMA_BLOCK_VECTOR_TYPE blockVectorType,
5387  VkDeviceSize preferredBlockSize,
5388  size_t minBlockCount,
5389  size_t maxBlockCount,
5390  VkDeviceSize bufferImageGranularity,
5391  uint32_t frameInUseCount,
5392  bool isCustomPool) :
5393  m_hAllocator(hAllocator),
5394  m_MemoryTypeIndex(memoryTypeIndex),
5395  m_BlockVectorType(blockVectorType),
5396  m_PreferredBlockSize(preferredBlockSize),
5397  m_MinBlockCount(minBlockCount),
5398  m_MaxBlockCount(maxBlockCount),
5399  m_BufferImageGranularity(bufferImageGranularity),
5400  m_FrameInUseCount(frameInUseCount),
5401  m_IsCustomPool(isCustomPool),
5402  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
5403  m_HasEmptyBlock(false),
5404  m_pDefragmentator(VMA_NULL)
5405 {
5406 }
5407 
5408 VmaBlockVector::~VmaBlockVector()
5409 {
5410  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
5411 
5412  for(size_t i = m_Blocks.size(); i--; )
5413  {
5414  m_Blocks[i]->Destroy(m_hAllocator);
5415  vma_delete(m_hAllocator, m_Blocks[i]);
5416  }
5417 }
5418 
5419 VkResult VmaBlockVector::CreateMinBlocks()
5420 {
5421  for(size_t i = 0; i < m_MinBlockCount; ++i)
5422  {
5423  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
5424  if(res != VK_SUCCESS)
5425  {
5426  return res;
5427  }
5428  }
5429  return VK_SUCCESS;
5430 }
5431 
5432 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
5433 {
5434  pStats->size = 0;
5435  pStats->unusedSize = 0;
5436  pStats->allocationCount = 0;
5437  pStats->unusedRangeCount = 0;
5438  pStats->unusedRangeSizeMax = 0;
5439 
5440  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5441 
5442  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5443  {
5444  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
5445  VMA_ASSERT(pBlock);
5446  VMA_HEAVY_ASSERT(pBlock->Validate());
5447  pBlock->m_Metadata.AddPoolStats(*pStats);
5448  }
5449 }
5450 
5451 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
5452 
5453 VkResult VmaBlockVector::Allocate(
5454  VmaPool hCurrentPool,
5455  uint32_t currentFrameIndex,
5456  const VkMemoryRequirements& vkMemReq,
5457  const VmaAllocationCreateInfo& createInfo,
5458  VmaSuballocationType suballocType,
5459  VmaAllocation* pAllocation)
5460 {
5461  // Validate flags.
5462  if(createInfo.pool != VK_NULL_HANDLE &&
5463  ((createInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0) != (m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED))
5464  {
5465  VMA_ASSERT(0 && "Usage of VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT must match VMA_POOL_CREATE_PERSISTENT_MAP_BIT.");
5466  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5467  }
5468 
5469  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5470 
5471  // 1. Search existing allocations. Try to allocate without making other allocations lost.
5472  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
5473  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
5474  {
5475  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
5476  VMA_ASSERT(pCurrBlock);
5477  VmaAllocationRequest currRequest = {};
5478  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
5479  currentFrameIndex,
5480  m_FrameInUseCount,
5481  m_BufferImageGranularity,
5482  vkMemReq.size,
5483  vkMemReq.alignment,
5484  suballocType,
5485  false, // canMakeOtherLost
5486  &currRequest))
5487  {
5488  // Allocate from pCurrBlock.
5489  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
5490 
5491  // We no longer have an empty Allocation.
5492  if(pCurrBlock->m_Metadata.IsEmpty())
5493  {
5494  m_HasEmptyBlock = false;
5495  }
5496 
5497  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
5498  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
5499  (*pAllocation)->InitBlockAllocation(
5500  hCurrentPool,
5501  pCurrBlock,
5502  currRequest.offset,
5503  vkMemReq.alignment,
5504  vkMemReq.size,
5505  suballocType,
5506  createInfo.pUserData,
5507  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5508  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
5509  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
5510  return VK_SUCCESS;
5511  }
5512  }
5513 
5514  const bool canCreateNewBlock =
5515  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
5516  (m_Blocks.size() < m_MaxBlockCount);
5517 
5518  // 2. Try to create new block.
5519  if(canCreateNewBlock)
5520  {
5521  // 2.1. Start with full preferredBlockSize.
5522  VkDeviceSize blockSize = m_PreferredBlockSize;
5523  size_t newBlockIndex = 0;
5524  VkResult res = CreateBlock(blockSize, &newBlockIndex);
5525  // Allocating blocks of other sizes is allowed only in default pools.
5526  // In custom pools block size is fixed.
5527  if(res < 0 && m_IsCustomPool == false)
5528  {
5529  // 2.2. Try half the size.
5530  blockSize /= 2;
5531  if(blockSize >= vkMemReq.size)
5532  {
5533  res = CreateBlock(blockSize, &newBlockIndex);
5534  if(res < 0)
5535  {
5536  // 2.3. Try quarter the size.
5537  blockSize /= 2;
5538  if(blockSize >= vkMemReq.size)
5539  {
5540  res = CreateBlock(blockSize, &newBlockIndex);
5541  }
5542  }
5543  }
5544  }
5545  if(res == VK_SUCCESS)
5546  {
5547  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
5548  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= vkMemReq.size);
5549 
5550  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
5551  VmaAllocationRequest allocRequest;
5552  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
5553  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
5554  pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
5555  (*pAllocation)->InitBlockAllocation(
5556  hCurrentPool,
5557  pBlock,
5558  allocRequest.offset,
5559  vkMemReq.alignment,
5560  vkMemReq.size,
5561  suballocType,
5562  createInfo.pUserData,
5563  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5564  VMA_HEAVY_ASSERT(pBlock->Validate());
5565  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
5566 
5567  return VK_SUCCESS;
5568  }
5569  }
5570 
5571  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
5572 
5573  // 3. Try to allocate from existing blocks with making other allocations lost.
5574  if(canMakeOtherLost)
5575  {
5576  uint32_t tryIndex = 0;
5577  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
5578  {
5579  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
5580  VmaAllocationRequest bestRequest = {};
5581  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
5582 
5583  // 1. Search existing allocations.
5584  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
5585  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
5586  {
5587  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
5588  VMA_ASSERT(pCurrBlock);
5589  VmaAllocationRequest currRequest = {};
5590  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
5591  currentFrameIndex,
5592  m_FrameInUseCount,
5593  m_BufferImageGranularity,
5594  vkMemReq.size,
5595  vkMemReq.alignment,
5596  suballocType,
5597  canMakeOtherLost,
5598  &currRequest))
5599  {
5600  const VkDeviceSize currRequestCost = currRequest.CalcCost();
5601  if(pBestRequestBlock == VMA_NULL ||
5602  currRequestCost < bestRequestCost)
5603  {
5604  pBestRequestBlock = pCurrBlock;
5605  bestRequest = currRequest;
5606  bestRequestCost = currRequestCost;
5607 
5608  if(bestRequestCost == 0)
5609  {
5610  break;
5611  }
5612  }
5613  }
5614  }
5615 
5616  if(pBestRequestBlock != VMA_NULL)
5617  {
5618  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
5619  currentFrameIndex,
5620  m_FrameInUseCount,
5621  &bestRequest))
5622  {
5623  // We no longer have an empty Allocation.
5624  if(pBestRequestBlock->m_Metadata.IsEmpty())
5625  {
5626  m_HasEmptyBlock = false;
5627  }
5628  // Allocate from this pBlock.
5629  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
5630  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
5631  (*pAllocation)->InitBlockAllocation(
5632  hCurrentPool,
5633  pBestRequestBlock,
5634  bestRequest.offset,
5635  vkMemReq.alignment,
5636  vkMemReq.size,
5637  suballocType,
5638  createInfo.pUserData,
5639  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5640  VMA_HEAVY_ASSERT(pBlock->Validate());
5641  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
5642  return VK_SUCCESS;
5643  }
5644  // else: Some allocations must have been touched while we are here. Next try.
5645  }
5646  else
5647  {
5648  // Could not find place in any of the blocks - break outer loop.
5649  break;
5650  }
5651  }
5652  /* Maximum number of tries exceeded - a very unlike event when many other
5653  threads are simultaneously touching allocations making it impossible to make
5654  lost at the same time as we try to allocate. */
5655  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
5656  {
5657  return VK_ERROR_TOO_MANY_OBJECTS;
5658  }
5659  }
5660 
5661  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5662 }
5663 
5664 void VmaBlockVector::Free(
5665  VmaAllocation hAllocation)
5666 {
5667  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
5668 
5669  // Scope for lock.
5670  {
5671  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5672 
5673  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
5674 
5675  pBlock->m_Metadata.Free(hAllocation);
5676  VMA_HEAVY_ASSERT(pBlock->Validate());
5677 
5678  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
5679 
5680  // pBlock became empty after this deallocation.
5681  if(pBlock->m_Metadata.IsEmpty())
5682  {
5683  // Already has empty Allocation. We don't want to have two, so delete this one.
5684  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
5685  {
5686  pBlockToDelete = pBlock;
5687  Remove(pBlock);
5688  }
5689  // We now have first empty Allocation.
5690  else
5691  {
5692  m_HasEmptyBlock = true;
5693  }
5694  }
5695  // pBlock didn't become empty, but we have another empty block - find and free that one.
5696  // (This is optional, heuristics.)
5697  else if(m_HasEmptyBlock)
5698  {
5699  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
5700  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
5701  {
5702  pBlockToDelete = pLastBlock;
5703  m_Blocks.pop_back();
5704  m_HasEmptyBlock = false;
5705  }
5706  }
5707 
5708  IncrementallySortBlocks();
5709  }
5710 
5711  // Destruction of a free Allocation. Deferred until this point, outside of mutex
5712  // lock, for performance reason.
5713  if(pBlockToDelete != VMA_NULL)
5714  {
5715  VMA_DEBUG_LOG(" Deleted empty allocation");
5716  pBlockToDelete->Destroy(m_hAllocator);
5717  vma_delete(m_hAllocator, pBlockToDelete);
5718  }
5719 }
5720 
5721 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
5722 {
5723  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5724  {
5725  if(m_Blocks[blockIndex] == pBlock)
5726  {
5727  VmaVectorRemove(m_Blocks, blockIndex);
5728  return;
5729  }
5730  }
5731  VMA_ASSERT(0);
5732 }
5733 
5734 void VmaBlockVector::IncrementallySortBlocks()
5735 {
5736  // Bubble sort only until first swap.
5737  for(size_t i = 1; i < m_Blocks.size(); ++i)
5738  {
5739  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
5740  {
5741  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
5742  return;
5743  }
5744  }
5745 }
5746 
5747 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
5748 {
5749  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
5750  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
5751  allocInfo.allocationSize = blockSize;
5752  VkDeviceMemory mem = VK_NULL_HANDLE;
5753  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
5754  if(res < 0)
5755  {
5756  return res;
5757  }
5758 
5759  // New VkDeviceMemory successfully created.
5760 
5761  // Map memory if needed.
5762  void* pMappedData = VMA_NULL;
5763  const bool persistentMap = (m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED);
5764  if(persistentMap && m_hAllocator->m_UnmapPersistentlyMappedMemoryCounter == 0)
5765  {
5766  res = (*m_hAllocator->GetVulkanFunctions().vkMapMemory)(
5767  m_hAllocator->m_hDevice,
5768  mem,
5769  0,
5770  VK_WHOLE_SIZE,
5771  0,
5772  &pMappedData);
5773  if(res < 0)
5774  {
5775  VMA_DEBUG_LOG(" vkMapMemory FAILED");
5776  m_hAllocator->FreeVulkanMemory(m_MemoryTypeIndex, blockSize, mem);
5777  return res;
5778  }
5779  }
5780 
5781  // Create new Allocation for it.
5782  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
5783  pBlock->Init(
5784  m_MemoryTypeIndex,
5785  (VMA_BLOCK_VECTOR_TYPE)m_BlockVectorType,
5786  mem,
5787  allocInfo.allocationSize,
5788  persistentMap,
5789  pMappedData);
5790 
5791  m_Blocks.push_back(pBlock);
5792  if(pNewBlockIndex != VMA_NULL)
5793  {
5794  *pNewBlockIndex = m_Blocks.size() - 1;
5795  }
5796 
5797  return VK_SUCCESS;
5798 }
5799 
5800 #if VMA_STATS_STRING_ENABLED
5801 
5802 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
5803 {
5804  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5805 
5806  json.BeginObject();
5807 
5808  if(m_IsCustomPool)
5809  {
5810  json.WriteString("MemoryTypeIndex");
5811  json.WriteNumber(m_MemoryTypeIndex);
5812 
5813  if(m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
5814  {
5815  json.WriteString("Mapped");
5816  json.WriteBool(true);
5817  }
5818 
5819  json.WriteString("BlockSize");
5820  json.WriteNumber(m_PreferredBlockSize);
5821 
5822  json.WriteString("BlockCount");
5823  json.BeginObject(true);
5824  if(m_MinBlockCount > 0)
5825  {
5826  json.WriteString("Min");
5827  json.WriteNumber(m_MinBlockCount);
5828  }
5829  if(m_MaxBlockCount < SIZE_MAX)
5830  {
5831  json.WriteString("Max");
5832  json.WriteNumber(m_MaxBlockCount);
5833  }
5834  json.WriteString("Cur");
5835  json.WriteNumber(m_Blocks.size());
5836  json.EndObject();
5837 
5838  if(m_FrameInUseCount > 0)
5839  {
5840  json.WriteString("FrameInUseCount");
5841  json.WriteNumber(m_FrameInUseCount);
5842  }
5843  }
5844  else
5845  {
5846  json.WriteString("PreferredBlockSize");
5847  json.WriteNumber(m_PreferredBlockSize);
5848  }
5849 
5850  json.WriteString("Blocks");
5851  json.BeginArray();
5852  for(size_t i = 0; i < m_Blocks.size(); ++i)
5853  {
5854  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
5855  }
5856  json.EndArray();
5857 
5858  json.EndObject();
5859 }
5860 
5861 #endif // #if VMA_STATS_STRING_ENABLED
5862 
5863 void VmaBlockVector::UnmapPersistentlyMappedMemory()
5864 {
5865  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5866 
5867  for(size_t i = m_Blocks.size(); i--; )
5868  {
5869  VmaDeviceMemoryBlock* pBlock = m_Blocks[i];
5870  if(pBlock->m_pMappedData != VMA_NULL)
5871  {
5872  VMA_ASSERT(pBlock->m_PersistentMap != false);
5873  (m_hAllocator->GetVulkanFunctions().vkUnmapMemory)(m_hAllocator->m_hDevice, pBlock->m_hMemory);
5874  pBlock->m_pMappedData = VMA_NULL;
5875  }
5876  }
5877 }
5878 
5879 VkResult VmaBlockVector::MapPersistentlyMappedMemory()
5880 {
5881  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5882 
5883  VkResult finalResult = VK_SUCCESS;
5884  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
5885  {
5886  VmaDeviceMemoryBlock* pBlock = m_Blocks[i];
5887  if(pBlock->m_PersistentMap)
5888  {
5889  VMA_ASSERT(pBlock->m_pMappedData == nullptr);
5890  VkResult localResult = (*m_hAllocator->GetVulkanFunctions().vkMapMemory)(
5891  m_hAllocator->m_hDevice,
5892  pBlock->m_hMemory,
5893  0,
5894  VK_WHOLE_SIZE,
5895  0,
5896  &pBlock->m_pMappedData);
5897  if(localResult != VK_SUCCESS)
5898  {
5899  finalResult = localResult;
5900  }
5901  }
5902  }
5903  return finalResult;
5904 }
5905 
5906 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
5907  VmaAllocator hAllocator,
5908  uint32_t currentFrameIndex)
5909 {
5910  if(m_pDefragmentator == VMA_NULL)
5911  {
5912  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
5913  hAllocator,
5914  this,
5915  currentFrameIndex);
5916  }
5917 
5918  return m_pDefragmentator;
5919 }
5920 
5921 VkResult VmaBlockVector::Defragment(
5922  VmaDefragmentationStats* pDefragmentationStats,
5923  VkDeviceSize& maxBytesToMove,
5924  uint32_t& maxAllocationsToMove)
5925 {
5926  if(m_pDefragmentator == VMA_NULL)
5927  {
5928  return VK_SUCCESS;
5929  }
5930 
5931  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5932 
5933  // Defragment.
5934  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
5935 
5936  // Accumulate statistics.
5937  if(pDefragmentationStats != VMA_NULL)
5938  {
5939  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
5940  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
5941  pDefragmentationStats->bytesMoved += bytesMoved;
5942  pDefragmentationStats->allocationsMoved += allocationsMoved;
5943  VMA_ASSERT(bytesMoved <= maxBytesToMove);
5944  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
5945  maxBytesToMove -= bytesMoved;
5946  maxAllocationsToMove -= allocationsMoved;
5947  }
5948 
5949  // Free empty blocks.
5950  m_HasEmptyBlock = false;
5951  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
5952  {
5953  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
5954  if(pBlock->m_Metadata.IsEmpty())
5955  {
5956  if(m_Blocks.size() > m_MinBlockCount)
5957  {
5958  if(pDefragmentationStats != VMA_NULL)
5959  {
5960  ++pDefragmentationStats->deviceMemoryBlocksFreed;
5961  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
5962  }
5963 
5964  VmaVectorRemove(m_Blocks, blockIndex);
5965  pBlock->Destroy(m_hAllocator);
5966  vma_delete(m_hAllocator, pBlock);
5967  }
5968  else
5969  {
5970  m_HasEmptyBlock = true;
5971  }
5972  }
5973  }
5974 
5975  return result;
5976 }
5977 
5978 void VmaBlockVector::DestroyDefragmentator()
5979 {
5980  if(m_pDefragmentator != VMA_NULL)
5981  {
5982  vma_delete(m_hAllocator, m_pDefragmentator);
5983  m_pDefragmentator = VMA_NULL;
5984  }
5985 }
5986 
5987 void VmaBlockVector::MakePoolAllocationsLost(
5988  uint32_t currentFrameIndex,
5989  size_t* pLostAllocationCount)
5990 {
5991  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5992 
5993  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5994  {
5995  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
5996  VMA_ASSERT(pBlock);
5997  pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
5998  }
5999 }
6000 
6001 void VmaBlockVector::AddStats(VmaStats* pStats)
6002 {
6003  const uint32_t memTypeIndex = m_MemoryTypeIndex;
6004  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
6005 
6006  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6007 
6008  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6009  {
6010  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6011  VMA_ASSERT(pBlock);
6012  VMA_HEAVY_ASSERT(pBlock->Validate());
6013  VmaStatInfo allocationStatInfo;
6014  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
6015  VmaAddStatInfo(pStats->total, allocationStatInfo);
6016  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
6017  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
6018  }
6019 }
6020 
6022 // VmaDefragmentator members definition
6023 
6024 VmaDefragmentator::VmaDefragmentator(
6025  VmaAllocator hAllocator,
6026  VmaBlockVector* pBlockVector,
6027  uint32_t currentFrameIndex) :
6028  m_hAllocator(hAllocator),
6029  m_pBlockVector(pBlockVector),
6030  m_CurrentFrameIndex(currentFrameIndex),
6031  m_BytesMoved(0),
6032  m_AllocationsMoved(0),
6033  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
6034  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
6035 {
6036 }
6037 
6038 VmaDefragmentator::~VmaDefragmentator()
6039 {
6040  for(size_t i = m_Blocks.size(); i--; )
6041  {
6042  vma_delete(m_hAllocator, m_Blocks[i]);
6043  }
6044 }
6045 
6046 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
6047 {
6048  AllocationInfo allocInfo;
6049  allocInfo.m_hAllocation = hAlloc;
6050  allocInfo.m_pChanged = pChanged;
6051  m_Allocations.push_back(allocInfo);
6052 }
6053 
6054 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
6055 {
6056  // It has already been mapped for defragmentation.
6057  if(m_pMappedDataForDefragmentation)
6058  {
6059  *ppMappedData = m_pMappedDataForDefragmentation;
6060  return VK_SUCCESS;
6061  }
6062 
6063  // It is persistently mapped.
6064  if(m_pBlock->m_PersistentMap)
6065  {
6066  VMA_ASSERT(m_pBlock->m_pMappedData != VMA_NULL);
6067  *ppMappedData = m_pBlock->m_pMappedData;
6068  return VK_SUCCESS;
6069  }
6070 
6071  // Map on first usage.
6072  VkResult res = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
6073  hAllocator->m_hDevice,
6074  m_pBlock->m_hMemory,
6075  0,
6076  VK_WHOLE_SIZE,
6077  0,
6078  &m_pMappedDataForDefragmentation);
6079  *ppMappedData = m_pMappedDataForDefragmentation;
6080  return res;
6081 }
6082 
6083 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
6084 {
6085  if(m_pMappedDataForDefragmentation != VMA_NULL)
6086  {
6087  (hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_pBlock->m_hMemory);
6088  }
6089 }
6090 
6091 VkResult VmaDefragmentator::DefragmentRound(
6092  VkDeviceSize maxBytesToMove,
6093  uint32_t maxAllocationsToMove)
6094 {
6095  if(m_Blocks.empty())
6096  {
6097  return VK_SUCCESS;
6098  }
6099 
6100  size_t srcBlockIndex = m_Blocks.size() - 1;
6101  size_t srcAllocIndex = SIZE_MAX;
6102  for(;;)
6103  {
6104  // 1. Find next allocation to move.
6105  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
6106  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
6107  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
6108  {
6109  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
6110  {
6111  // Finished: no more allocations to process.
6112  if(srcBlockIndex == 0)
6113  {
6114  return VK_SUCCESS;
6115  }
6116  else
6117  {
6118  --srcBlockIndex;
6119  srcAllocIndex = SIZE_MAX;
6120  }
6121  }
6122  else
6123  {
6124  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
6125  }
6126  }
6127 
6128  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
6129  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
6130 
6131  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
6132  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
6133  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
6134  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
6135 
6136  // 2. Try to find new place for this allocation in preceding or current block.
6137  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
6138  {
6139  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
6140  VmaAllocationRequest dstAllocRequest;
6141  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
6142  m_CurrentFrameIndex,
6143  m_pBlockVector->GetFrameInUseCount(),
6144  m_pBlockVector->GetBufferImageGranularity(),
6145  size,
6146  alignment,
6147  suballocType,
6148  false, // canMakeOtherLost
6149  &dstAllocRequest) &&
6150  MoveMakesSense(
6151  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
6152  {
6153  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
6154 
6155  // Reached limit on number of allocations or bytes to move.
6156  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
6157  (m_BytesMoved + size > maxBytesToMove))
6158  {
6159  return VK_INCOMPLETE;
6160  }
6161 
6162  void* pDstMappedData = VMA_NULL;
6163  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
6164  if(res != VK_SUCCESS)
6165  {
6166  return res;
6167  }
6168 
6169  void* pSrcMappedData = VMA_NULL;
6170  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
6171  if(res != VK_SUCCESS)
6172  {
6173  return res;
6174  }
6175 
6176  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
6177  memcpy(
6178  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
6179  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
6180  static_cast<size_t>(size));
6181 
6182  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
6183  pSrcBlockInfo->m_pBlock->m_Metadata.Free(allocInfo.m_hAllocation);
6184 
6185  allocInfo.m_hAllocation->ChangeBlockAllocation(pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
6186 
6187  if(allocInfo.m_pChanged != VMA_NULL)
6188  {
6189  *allocInfo.m_pChanged = VK_TRUE;
6190  }
6191 
6192  ++m_AllocationsMoved;
6193  m_BytesMoved += size;
6194 
6195  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
6196 
6197  break;
6198  }
6199  }
6200 
6201  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
6202 
6203  if(srcAllocIndex > 0)
6204  {
6205  --srcAllocIndex;
6206  }
6207  else
6208  {
6209  if(srcBlockIndex > 0)
6210  {
6211  --srcBlockIndex;
6212  srcAllocIndex = SIZE_MAX;
6213  }
6214  else
6215  {
6216  return VK_SUCCESS;
6217  }
6218  }
6219  }
6220 }
6221 
6222 VkResult VmaDefragmentator::Defragment(
6223  VkDeviceSize maxBytesToMove,
6224  uint32_t maxAllocationsToMove)
6225 {
6226  if(m_Allocations.empty())
6227  {
6228  return VK_SUCCESS;
6229  }
6230 
6231  // Create block info for each block.
6232  const size_t blockCount = m_pBlockVector->m_Blocks.size();
6233  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6234  {
6235  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
6236  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
6237  m_Blocks.push_back(pBlockInfo);
6238  }
6239 
6240  // Sort them by m_pBlock pointer value.
6241  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
6242 
6243  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
6244  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
6245  {
6246  AllocationInfo& allocInfo = m_Allocations[blockIndex];
6247  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
6248  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
6249  {
6250  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
6251  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
6252  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
6253  {
6254  (*it)->m_Allocations.push_back(allocInfo);
6255  }
6256  else
6257  {
6258  VMA_ASSERT(0);
6259  }
6260  }
6261  }
6262  m_Allocations.clear();
6263 
6264  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6265  {
6266  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
6267  pBlockInfo->CalcHasNonMovableAllocations();
6268  pBlockInfo->SortAllocationsBySizeDescecnding();
6269  }
6270 
6271  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
6272  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
6273 
6274  // Execute defragmentation rounds (the main part).
6275  VkResult result = VK_SUCCESS;
6276  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
6277  {
6278  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
6279  }
6280 
6281  // Unmap blocks that were mapped for defragmentation.
6282  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6283  {
6284  m_Blocks[blockIndex]->Unmap(m_hAllocator);
6285  }
6286 
6287  return result;
6288 }
6289 
6290 bool VmaDefragmentator::MoveMakesSense(
6291  size_t dstBlockIndex, VkDeviceSize dstOffset,
6292  size_t srcBlockIndex, VkDeviceSize srcOffset)
6293 {
6294  if(dstBlockIndex < srcBlockIndex)
6295  {
6296  return true;
6297  }
6298  if(dstBlockIndex > srcBlockIndex)
6299  {
6300  return false;
6301  }
6302  if(dstOffset < srcOffset)
6303  {
6304  return true;
6305  }
6306  return false;
6307 }
6308 
6310 // VmaAllocator_T
6311 
6312 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
6313  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
6314  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
6315  m_PhysicalDevice(pCreateInfo->physicalDevice),
6316  m_hDevice(pCreateInfo->device),
6317  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
6318  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
6319  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
6320  m_UnmapPersistentlyMappedMemoryCounter(0),
6321  m_PreferredLargeHeapBlockSize(0),
6322  m_PreferredSmallHeapBlockSize(0),
6323  m_CurrentFrameIndex(0),
6324  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
6325 {
6326  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
6327 
6328  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
6329  memset(&m_MemProps, 0, sizeof(m_MemProps));
6330  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
6331 
6332  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
6333  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
6334 
6335  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
6336  {
6337  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
6338  }
6339 
6340  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
6341  {
6342  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
6343  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
6344  }
6345 
6346  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
6347 
6348  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
6349  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
6350 
6351  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
6352  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
6353  m_PreferredSmallHeapBlockSize = (pCreateInfo->preferredSmallHeapBlockSize != 0) ?
6354  pCreateInfo->preferredSmallHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE);
6355 
6356  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
6357  {
6358  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
6359  {
6360  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
6361  if(limit != VK_WHOLE_SIZE)
6362  {
6363  m_HeapSizeLimit[heapIndex] = limit;
6364  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
6365  {
6366  m_MemProps.memoryHeaps[heapIndex].size = limit;
6367  }
6368  }
6369  }
6370  }
6371 
6372  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6373  {
6374  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
6375 
6376  for(size_t blockVectorTypeIndex = 0; blockVectorTypeIndex < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorTypeIndex)
6377  {
6378  m_pBlockVectors[memTypeIndex][blockVectorTypeIndex] = vma_new(this, VmaBlockVector)(
6379  this,
6380  memTypeIndex,
6381  static_cast<VMA_BLOCK_VECTOR_TYPE>(blockVectorTypeIndex),
6382  preferredBlockSize,
6383  0,
6384  SIZE_MAX,
6385  GetBufferImageGranularity(),
6386  pCreateInfo->frameInUseCount,
6387  false); // isCustomPool
6388  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
6389  // becase minBlockCount is 0.
6390  m_pDedicatedAllocations[memTypeIndex][blockVectorTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
6391  }
6392  }
6393 }
6394 
6395 VmaAllocator_T::~VmaAllocator_T()
6396 {
6397  VMA_ASSERT(m_Pools.empty());
6398 
6399  for(size_t i = GetMemoryTypeCount(); i--; )
6400  {
6401  for(size_t j = VMA_BLOCK_VECTOR_TYPE_COUNT; j--; )
6402  {
6403  vma_delete(this, m_pDedicatedAllocations[i][j]);
6404  vma_delete(this, m_pBlockVectors[i][j]);
6405  }
6406  }
6407 }
6408 
6409 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
6410 {
6411 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
6412  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
6413  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
6414  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
6415  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
6416  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
6417  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
6418  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
6419  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
6420  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
6421  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
6422  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
6423  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
6424  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
6425  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
6426  // Ignoring vkGetBufferMemoryRequirements2KHR.
6427  // Ignoring vkGetImageMemoryRequirements2KHR.
6428 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
6429 
6430 #define VMA_COPY_IF_NOT_NULL(funcName) \
6431  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
6432 
6433  if(pVulkanFunctions != VMA_NULL)
6434  {
6435  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
6436  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
6437  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
6438  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
6439  VMA_COPY_IF_NOT_NULL(vkMapMemory);
6440  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
6441  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
6442  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
6443  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
6444  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
6445  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
6446  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
6447  VMA_COPY_IF_NOT_NULL(vkCreateImage);
6448  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
6449  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
6450  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
6451  }
6452 
6453 #undef VMA_COPY_IF_NOT_NULL
6454 
6455  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
6456  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
6457  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
6458  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
6459  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
6460  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
6461  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
6462  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
6463  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
6464  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
6465  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
6466  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
6467  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
6468  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
6469  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
6470  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
6471  if(m_UseKhrDedicatedAllocation)
6472  {
6473  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
6474  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
6475  }
6476 }
6477 
6478 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
6479 {
6480  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
6481  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
6482  return (heapSize <= VMA_SMALL_HEAP_MAX_SIZE) ?
6483  m_PreferredSmallHeapBlockSize : m_PreferredLargeHeapBlockSize;
6484 }
6485 
6486 VkResult VmaAllocator_T::AllocateMemoryOfType(
6487  const VkMemoryRequirements& vkMemReq,
6488  bool dedicatedAllocation,
6489  VkBuffer dedicatedBuffer,
6490  VkImage dedicatedImage,
6491  const VmaAllocationCreateInfo& createInfo,
6492  uint32_t memTypeIndex,
6493  VmaSuballocationType suballocType,
6494  VmaAllocation* pAllocation)
6495 {
6496  VMA_ASSERT(pAllocation != VMA_NULL);
6497  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
6498 
6499  uint32_t blockVectorType = VmaAllocationCreateFlagsToBlockVectorType(createInfo.flags);
6500  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
6501  VMA_ASSERT(blockVector);
6502 
6503  VmaAllocationCreateInfo finalCreateInfo = createInfo;
6504 
6505  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
6506  bool preferDedicatedMemory =
6507  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
6508  dedicatedAllocation ||
6509  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
6510  vkMemReq.size > preferredBlockSize / 2;
6511 
6512  if(preferDedicatedMemory &&
6513  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
6514  finalCreateInfo.pool == VK_NULL_HANDLE)
6515  {
6517  }
6518 
6519  // If memory type is not HOST_VISIBLE, disable PERSISTENT_MAP.
6520  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0 &&
6521  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
6522  {
6523  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT;
6524  }
6525 
6526  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
6527  {
6528  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6529  {
6530  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6531  }
6532  else
6533  {
6534  return AllocateDedicatedMemory(
6535  vkMemReq.size,
6536  suballocType,
6537  memTypeIndex,
6538  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0,
6539  finalCreateInfo.pUserData,
6540  dedicatedBuffer,
6541  dedicatedImage,
6542  pAllocation);
6543  }
6544  }
6545  else
6546  {
6547  VkResult res = blockVector->Allocate(
6548  VK_NULL_HANDLE, // hCurrentPool
6549  m_CurrentFrameIndex.load(),
6550  vkMemReq,
6551  finalCreateInfo,
6552  suballocType,
6553  pAllocation);
6554  if(res == VK_SUCCESS)
6555  {
6556  return res;
6557  }
6558 
6559  // 5. Try dedicated memory.
6560  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6561  {
6562  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6563  }
6564  else
6565  {
6566  res = AllocateDedicatedMemory(
6567  vkMemReq.size,
6568  suballocType,
6569  memTypeIndex,
6570  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0,
6571  finalCreateInfo.pUserData,
6572  dedicatedBuffer,
6573  dedicatedImage,
6574  pAllocation);
6575  if(res == VK_SUCCESS)
6576  {
6577  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
6578  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
6579  return VK_SUCCESS;
6580  }
6581  else
6582  {
6583  // Everything failed: Return error code.
6584  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
6585  return res;
6586  }
6587  }
6588  }
6589 }
6590 
6591 VkResult VmaAllocator_T::AllocateDedicatedMemory(
6592  VkDeviceSize size,
6593  VmaSuballocationType suballocType,
6594  uint32_t memTypeIndex,
6595  bool map,
6596  void* pUserData,
6597  VkBuffer dedicatedBuffer,
6598  VkImage dedicatedImage,
6599  VmaAllocation* pAllocation)
6600 {
6601  VMA_ASSERT(pAllocation);
6602 
6603  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
6604  allocInfo.memoryTypeIndex = memTypeIndex;
6605  allocInfo.allocationSize = size;
6606 
6607  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
6608  if(m_UseKhrDedicatedAllocation)
6609  {
6610  if(dedicatedBuffer != VK_NULL_HANDLE)
6611  {
6612  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
6613  dedicatedAllocInfo.buffer = dedicatedBuffer;
6614  allocInfo.pNext = &dedicatedAllocInfo;
6615  }
6616  else if(dedicatedImage != VK_NULL_HANDLE)
6617  {
6618  dedicatedAllocInfo.image = dedicatedImage;
6619  allocInfo.pNext = &dedicatedAllocInfo;
6620  }
6621  }
6622 
6623  // Allocate VkDeviceMemory.
6624  VkDeviceMemory hMemory = VK_NULL_HANDLE;
6625  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
6626  if(res < 0)
6627  {
6628  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
6629  return res;
6630  }
6631 
6632  void* pMappedData = nullptr;
6633  if(map)
6634  {
6635  if(m_UnmapPersistentlyMappedMemoryCounter == 0)
6636  {
6637  res = (*m_VulkanFunctions.vkMapMemory)(
6638  m_hDevice,
6639  hMemory,
6640  0,
6641  VK_WHOLE_SIZE,
6642  0,
6643  &pMappedData);
6644  if(res < 0)
6645  {
6646  VMA_DEBUG_LOG(" vkMapMemory FAILED");
6647  FreeVulkanMemory(memTypeIndex, size, hMemory);
6648  return res;
6649  }
6650  }
6651  }
6652 
6653  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load());
6654  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, map, pMappedData, size, pUserData);
6655 
6656  // Register it in m_pDedicatedAllocations.
6657  {
6658  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
6659  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex][map ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED];
6660  VMA_ASSERT(pDedicatedAllocations);
6661  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
6662  }
6663 
6664  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
6665 
6666  return VK_SUCCESS;
6667 }
6668 
6669 void VmaAllocator_T::GetBufferMemoryRequirements(
6670  VkBuffer hBuffer,
6671  VkMemoryRequirements& memReq,
6672  bool& requiresDedicatedAllocation,
6673  bool& prefersDedicatedAllocation) const
6674 {
6675  if(m_UseKhrDedicatedAllocation)
6676  {
6677  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
6678  memReqInfo.buffer = hBuffer;
6679 
6680  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
6681 
6682  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
6683  memReq2.pNext = &memDedicatedReq;
6684 
6685  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
6686 
6687  memReq = memReq2.memoryRequirements;
6688  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
6689  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
6690  }
6691  else
6692  {
6693  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
6694  requiresDedicatedAllocation = false;
6695  prefersDedicatedAllocation = false;
6696  }
6697 }
6698 
6699 void VmaAllocator_T::GetImageMemoryRequirements(
6700  VkImage hImage,
6701  VkMemoryRequirements& memReq,
6702  bool& requiresDedicatedAllocation,
6703  bool& prefersDedicatedAllocation) const
6704 {
6705  if(m_UseKhrDedicatedAllocation)
6706  {
6707  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
6708  memReqInfo.image = hImage;
6709 
6710  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
6711 
6712  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
6713  memReq2.pNext = &memDedicatedReq;
6714 
6715  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
6716 
6717  memReq = memReq2.memoryRequirements;
6718  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
6719  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
6720  }
6721  else
6722  {
6723  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
6724  requiresDedicatedAllocation = false;
6725  prefersDedicatedAllocation = false;
6726  }
6727 }
6728 
6729 VkResult VmaAllocator_T::AllocateMemory(
6730  const VkMemoryRequirements& vkMemReq,
6731  bool requiresDedicatedAllocation,
6732  bool prefersDedicatedAllocation,
6733  VkBuffer dedicatedBuffer,
6734  VkImage dedicatedImage,
6735  const VmaAllocationCreateInfo& createInfo,
6736  VmaSuballocationType suballocType,
6737  VmaAllocation* pAllocation)
6738 {
6739  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
6740  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6741  {
6742  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
6743  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6744  }
6745  if(requiresDedicatedAllocation)
6746  {
6747  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6748  {
6749  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
6750  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6751  }
6752  if(createInfo.pool != VK_NULL_HANDLE)
6753  {
6754  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
6755  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6756  }
6757  }
6758  if((createInfo.pool != VK_NULL_HANDLE) &&
6759  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
6760  {
6761  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
6762  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6763  }
6764 
6765  if(createInfo.pool != VK_NULL_HANDLE)
6766  {
6767  return createInfo.pool->m_BlockVector.Allocate(
6768  createInfo.pool,
6769  m_CurrentFrameIndex.load(),
6770  vkMemReq,
6771  createInfo,
6772  suballocType,
6773  pAllocation);
6774  }
6775  else
6776  {
6777  // Bit mask of memory Vulkan types acceptable for this allocation.
6778  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
6779  uint32_t memTypeIndex = UINT32_MAX;
6780  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
6781  if(res == VK_SUCCESS)
6782  {
6783  res = AllocateMemoryOfType(
6784  vkMemReq,
6785  requiresDedicatedAllocation || prefersDedicatedAllocation,
6786  dedicatedBuffer,
6787  dedicatedImage,
6788  createInfo,
6789  memTypeIndex,
6790  suballocType,
6791  pAllocation);
6792  // Succeeded on first try.
6793  if(res == VK_SUCCESS)
6794  {
6795  return res;
6796  }
6797  // Allocation from this memory type failed. Try other compatible memory types.
6798  else
6799  {
6800  for(;;)
6801  {
6802  // Remove old memTypeIndex from list of possibilities.
6803  memoryTypeBits &= ~(1u << memTypeIndex);
6804  // Find alternative memTypeIndex.
6805  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
6806  if(res == VK_SUCCESS)
6807  {
6808  res = AllocateMemoryOfType(
6809  vkMemReq,
6810  requiresDedicatedAllocation || prefersDedicatedAllocation,
6811  dedicatedBuffer,
6812  dedicatedImage,
6813  createInfo,
6814  memTypeIndex,
6815  suballocType,
6816  pAllocation);
6817  // Allocation from this alternative memory type succeeded.
6818  if(res == VK_SUCCESS)
6819  {
6820  return res;
6821  }
6822  // else: Allocation from this memory type failed. Try next one - next loop iteration.
6823  }
6824  // No other matching memory type index could be found.
6825  else
6826  {
6827  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
6828  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6829  }
6830  }
6831  }
6832  }
6833  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
6834  else
6835  return res;
6836  }
6837 }
6838 
6839 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
6840 {
6841  VMA_ASSERT(allocation);
6842 
6843  if(allocation->CanBecomeLost() == false ||
6844  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
6845  {
6846  switch(allocation->GetType())
6847  {
6848  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
6849  {
6850  VmaBlockVector* pBlockVector = VMA_NULL;
6851  VmaPool hPool = allocation->GetPool();
6852  if(hPool != VK_NULL_HANDLE)
6853  {
6854  pBlockVector = &hPool->m_BlockVector;
6855  }
6856  else
6857  {
6858  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
6859  const VMA_BLOCK_VECTOR_TYPE blockVectorType = allocation->GetBlockVectorType();
6860  pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
6861  }
6862  pBlockVector->Free(allocation);
6863  }
6864  break;
6865  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
6866  FreeDedicatedMemory(allocation);
6867  break;
6868  default:
6869  VMA_ASSERT(0);
6870  }
6871  }
6872 
6873  vma_delete(this, allocation);
6874 }
6875 
6876 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
6877 {
6878  // Initialize.
6879  InitStatInfo(pStats->total);
6880  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
6881  InitStatInfo(pStats->memoryType[i]);
6882  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
6883  InitStatInfo(pStats->memoryHeap[i]);
6884 
6885  // Process default pools.
6886  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6887  {
6888  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
6889  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
6890  {
6891  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
6892  VMA_ASSERT(pBlockVector);
6893  pBlockVector->AddStats(pStats);
6894  }
6895  }
6896 
6897  // Process custom pools.
6898  {
6899  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6900  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
6901  {
6902  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
6903  }
6904  }
6905 
6906  // Process dedicated allocations.
6907  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6908  {
6909  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
6910  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
6911  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
6912  {
6913  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex][blockVectorType];
6914  VMA_ASSERT(pDedicatedAllocVector);
6915  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
6916  {
6917  VmaStatInfo allocationStatInfo;
6918  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
6919  VmaAddStatInfo(pStats->total, allocationStatInfo);
6920  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
6921  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
6922  }
6923  }
6924  }
6925 
6926  // Postprocess.
6927  VmaPostprocessCalcStatInfo(pStats->total);
6928  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
6929  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
6930  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
6931  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
6932 }
6933 
6934 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
6935 
6936 void VmaAllocator_T::UnmapPersistentlyMappedMemory()
6937 {
6938  if(m_UnmapPersistentlyMappedMemoryCounter++ == 0)
6939  {
6940  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
6941  {
6942  for(uint32_t memTypeIndex = m_MemProps.memoryTypeCount; memTypeIndex--; )
6943  {
6944  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
6945  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
6946  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
6947  {
6948  // Process DedicatedAllocations.
6949  {
6950  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
6951  AllocationVectorType* pDedicatedAllocationsVector = m_pDedicatedAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6952  for(size_t dedicatedAllocIndex = pDedicatedAllocationsVector->size(); dedicatedAllocIndex--; )
6953  {
6954  VmaAllocation hAlloc = (*pDedicatedAllocationsVector)[dedicatedAllocIndex];
6955  hAlloc->DedicatedAllocUnmapPersistentlyMappedMemory(this);
6956  }
6957  }
6958 
6959  // Process normal Allocations.
6960  {
6961  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6962  pBlockVector->UnmapPersistentlyMappedMemory();
6963  }
6964  }
6965  }
6966 
6967  // Process custom pools.
6968  {
6969  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6970  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
6971  {
6972  m_Pools[poolIndex]->GetBlockVector().UnmapPersistentlyMappedMemory();
6973  }
6974  }
6975  }
6976  }
6977 }
6978 
6979 VkResult VmaAllocator_T::MapPersistentlyMappedMemory()
6980 {
6981  VMA_ASSERT(m_UnmapPersistentlyMappedMemoryCounter > 0);
6982  if(--m_UnmapPersistentlyMappedMemoryCounter == 0)
6983  {
6984  VkResult finalResult = VK_SUCCESS;
6985  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
6986  {
6987  // Process custom pools.
6988  {
6989  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6990  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
6991  {
6992  m_Pools[poolIndex]->GetBlockVector().MapPersistentlyMappedMemory();
6993  }
6994  }
6995 
6996  for(uint32_t memTypeIndex = 0; memTypeIndex < m_MemProps.memoryTypeCount; ++memTypeIndex)
6997  {
6998  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
6999  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
7000  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
7001  {
7002  // Process DedicatedAllocations.
7003  {
7004  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7005  AllocationVectorType* pAllocationsVector = m_pDedicatedAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
7006  for(size_t dedicatedAllocIndex = 0, dedicatedAllocCount = pAllocationsVector->size(); dedicatedAllocIndex < dedicatedAllocCount; ++dedicatedAllocIndex)
7007  {
7008  VmaAllocation hAlloc = (*pAllocationsVector)[dedicatedAllocIndex];
7009  hAlloc->DedicatedAllocMapPersistentlyMappedMemory(this);
7010  }
7011  }
7012 
7013  // Process normal Allocations.
7014  {
7015  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
7016  VkResult localResult = pBlockVector->MapPersistentlyMappedMemory();
7017  if(localResult != VK_SUCCESS)
7018  {
7019  finalResult = localResult;
7020  }
7021  }
7022  }
7023  }
7024  }
7025  return finalResult;
7026  }
7027  else
7028  return VK_SUCCESS;
7029 }
7030 
7031 VkResult VmaAllocator_T::Defragment(
7032  VmaAllocation* pAllocations,
7033  size_t allocationCount,
7034  VkBool32* pAllocationsChanged,
7035  const VmaDefragmentationInfo* pDefragmentationInfo,
7036  VmaDefragmentationStats* pDefragmentationStats)
7037 {
7038  if(pAllocationsChanged != VMA_NULL)
7039  {
7040  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
7041  }
7042  if(pDefragmentationStats != VMA_NULL)
7043  {
7044  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
7045  }
7046 
7047  if(m_UnmapPersistentlyMappedMemoryCounter > 0)
7048  {
7049  VMA_DEBUG_LOG("ERROR: Cannot defragment when inside vmaUnmapPersistentlyMappedMemory.");
7050  return VK_ERROR_MEMORY_MAP_FAILED;
7051  }
7052 
7053  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
7054 
7055  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
7056 
7057  const size_t poolCount = m_Pools.size();
7058 
7059  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
7060  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
7061  {
7062  VmaAllocation hAlloc = pAllocations[allocIndex];
7063  VMA_ASSERT(hAlloc);
7064  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
7065  // DedicatedAlloc cannot be defragmented.
7066  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
7067  // Only HOST_VISIBLE memory types can be defragmented.
7068  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
7069  // Lost allocation cannot be defragmented.
7070  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
7071  {
7072  VmaBlockVector* pAllocBlockVector = nullptr;
7073 
7074  const VmaPool hAllocPool = hAlloc->GetPool();
7075  // This allocation belongs to custom pool.
7076  if(hAllocPool != VK_NULL_HANDLE)
7077  {
7078  pAllocBlockVector = &hAllocPool->GetBlockVector();
7079  }
7080  // This allocation belongs to general pool.
7081  else
7082  {
7083  pAllocBlockVector = m_pBlockVectors[memTypeIndex][hAlloc->GetBlockVectorType()];
7084  }
7085 
7086  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
7087 
7088  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
7089  &pAllocationsChanged[allocIndex] : VMA_NULL;
7090  pDefragmentator->AddAllocation(hAlloc, pChanged);
7091  }
7092  }
7093 
7094  VkResult result = VK_SUCCESS;
7095 
7096  // ======== Main processing.
7097 
7098  VkDeviceSize maxBytesToMove = SIZE_MAX;
7099  uint32_t maxAllocationsToMove = UINT32_MAX;
7100  if(pDefragmentationInfo != VMA_NULL)
7101  {
7102  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
7103  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
7104  }
7105 
7106  // Process standard memory.
7107  for(uint32_t memTypeIndex = 0;
7108  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
7109  ++memTypeIndex)
7110  {
7111  // Only HOST_VISIBLE memory types can be defragmented.
7112  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7113  {
7114  for(uint32_t blockVectorType = 0;
7115  (blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT) && (result == VK_SUCCESS);
7116  ++blockVectorType)
7117  {
7118  result = m_pBlockVectors[memTypeIndex][blockVectorType]->Defragment(
7119  pDefragmentationStats,
7120  maxBytesToMove,
7121  maxAllocationsToMove);
7122  }
7123  }
7124  }
7125 
7126  // Process custom pools.
7127  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
7128  {
7129  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
7130  pDefragmentationStats,
7131  maxBytesToMove,
7132  maxAllocationsToMove);
7133  }
7134 
7135  // ======== Destroy defragmentators.
7136 
7137  // Process custom pools.
7138  for(size_t poolIndex = poolCount; poolIndex--; )
7139  {
7140  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
7141  }
7142 
7143  // Process standard memory.
7144  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
7145  {
7146  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7147  {
7148  for(size_t blockVectorType = VMA_BLOCK_VECTOR_TYPE_COUNT; blockVectorType--; )
7149  {
7150  m_pBlockVectors[memTypeIndex][blockVectorType]->DestroyDefragmentator();
7151  }
7152  }
7153  }
7154 
7155  return result;
7156 }
7157 
7158 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
7159 {
7160  if(hAllocation->CanBecomeLost())
7161  {
7162  /*
7163  Warning: This is a carefully designed algorithm.
7164  Do not modify unless you really know what you're doing :)
7165  */
7166  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
7167  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
7168  for(;;)
7169  {
7170  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7171  {
7172  pAllocationInfo->memoryType = UINT32_MAX;
7173  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
7174  pAllocationInfo->offset = 0;
7175  pAllocationInfo->size = hAllocation->GetSize();
7176  pAllocationInfo->pMappedData = VMA_NULL;
7177  pAllocationInfo->pUserData = hAllocation->GetUserData();
7178  return;
7179  }
7180  else if(localLastUseFrameIndex == localCurrFrameIndex)
7181  {
7182  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7183  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7184  pAllocationInfo->offset = hAllocation->GetOffset();
7185  pAllocationInfo->size = hAllocation->GetSize();
7186  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
7187  pAllocationInfo->pUserData = hAllocation->GetUserData();
7188  return;
7189  }
7190  else // Last use time earlier than current time.
7191  {
7192  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
7193  {
7194  localLastUseFrameIndex = localCurrFrameIndex;
7195  }
7196  }
7197  }
7198  }
7199  // We could use the same code here, but for performance reasons we don't need to use the hAllocation.LastUseFrameIndex atomic.
7200  else
7201  {
7202  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7203  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7204  pAllocationInfo->offset = hAllocation->GetOffset();
7205  pAllocationInfo->size = hAllocation->GetSize();
7206  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
7207  pAllocationInfo->pUserData = hAllocation->GetUserData();
7208  }
7209 }
7210 
7211 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
7212 {
7213  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
7214 
7215  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
7216 
7217  if(newCreateInfo.maxBlockCount == 0)
7218  {
7219  newCreateInfo.maxBlockCount = SIZE_MAX;
7220  }
7221  if(newCreateInfo.blockSize == 0)
7222  {
7223  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
7224  }
7225 
7226  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
7227 
7228  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
7229  if(res != VK_SUCCESS)
7230  {
7231  vma_delete(this, *pPool);
7232  *pPool = VMA_NULL;
7233  return res;
7234  }
7235 
7236  // Add to m_Pools.
7237  {
7238  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7239  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
7240  }
7241 
7242  return VK_SUCCESS;
7243 }
7244 
7245 void VmaAllocator_T::DestroyPool(VmaPool pool)
7246 {
7247  // Remove from m_Pools.
7248  {
7249  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7250  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
7251  VMA_ASSERT(success && "Pool not found in Allocator.");
7252  }
7253 
7254  vma_delete(this, pool);
7255 }
7256 
7257 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
7258 {
7259  pool->m_BlockVector.GetPoolStats(pPoolStats);
7260 }
7261 
7262 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
7263 {
7264  m_CurrentFrameIndex.store(frameIndex);
7265 }
7266 
7267 void VmaAllocator_T::MakePoolAllocationsLost(
7268  VmaPool hPool,
7269  size_t* pLostAllocationCount)
7270 {
7271  hPool->m_BlockVector.MakePoolAllocationsLost(
7272  m_CurrentFrameIndex.load(),
7273  pLostAllocationCount);
7274 }
7275 
7276 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
7277 {
7278  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST);
7279  (*pAllocation)->InitLost();
7280 }
7281 
7282 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
7283 {
7284  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
7285 
7286  VkResult res;
7287  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
7288  {
7289  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
7290  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
7291  {
7292  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
7293  if(res == VK_SUCCESS)
7294  {
7295  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
7296  }
7297  }
7298  else
7299  {
7300  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
7301  }
7302  }
7303  else
7304  {
7305  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
7306  }
7307 
7308  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
7309  {
7310  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
7311  }
7312 
7313  return res;
7314 }
7315 
7316 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
7317 {
7318  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
7319  {
7320  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
7321  }
7322 
7323  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
7324 
7325  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
7326  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
7327  {
7328  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
7329  m_HeapSizeLimit[heapIndex] += size;
7330  }
7331 }
7332 
7333 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
7334 {
7335  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
7336 
7337  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
7338  {
7339  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7340  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex][allocation->GetBlockVectorType()];
7341  VMA_ASSERT(pDedicatedAllocations);
7342  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
7343  VMA_ASSERT(success);
7344  }
7345 
7346  VkDeviceMemory hMemory = allocation->GetMemory();
7347 
7348  if(allocation->GetMappedData() != VMA_NULL)
7349  {
7350  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
7351  }
7352 
7353  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
7354 
7355  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
7356 }
7357 
7358 #if VMA_STATS_STRING_ENABLED
7359 
7360 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
7361 {
7362  bool dedicatedAllocationsStarted = false;
7363  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7364  {
7365  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7366  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
7367  {
7368  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex][blockVectorType];
7369  VMA_ASSERT(pDedicatedAllocVector);
7370  if(pDedicatedAllocVector->empty() == false)
7371  {
7372  if(dedicatedAllocationsStarted == false)
7373  {
7374  dedicatedAllocationsStarted = true;
7375  json.WriteString("DedicatedAllocations");
7376  json.BeginObject();
7377  }
7378 
7379  json.BeginString("Type ");
7380  json.ContinueString(memTypeIndex);
7381  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
7382  {
7383  json.ContinueString(" Mapped");
7384  }
7385  json.EndString();
7386 
7387  json.BeginArray();
7388 
7389  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
7390  {
7391  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
7392  json.BeginObject(true);
7393 
7394  json.WriteString("Size");
7395  json.WriteNumber(hAlloc->GetSize());
7396 
7397  json.WriteString("Type");
7398  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
7399 
7400  json.EndObject();
7401  }
7402 
7403  json.EndArray();
7404  }
7405  }
7406  }
7407  if(dedicatedAllocationsStarted)
7408  {
7409  json.EndObject();
7410  }
7411 
7412  {
7413  bool allocationsStarted = false;
7414  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7415  {
7416  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
7417  {
7418  if(m_pBlockVectors[memTypeIndex][blockVectorType]->IsEmpty() == false)
7419  {
7420  if(allocationsStarted == false)
7421  {
7422  allocationsStarted = true;
7423  json.WriteString("DefaultPools");
7424  json.BeginObject();
7425  }
7426 
7427  json.BeginString("Type ");
7428  json.ContinueString(memTypeIndex);
7429  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
7430  {
7431  json.ContinueString(" Mapped");
7432  }
7433  json.EndString();
7434 
7435  m_pBlockVectors[memTypeIndex][blockVectorType]->PrintDetailedMap(json);
7436  }
7437  }
7438  }
7439  if(allocationsStarted)
7440  {
7441  json.EndObject();
7442  }
7443  }
7444 
7445  {
7446  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7447  const size_t poolCount = m_Pools.size();
7448  if(poolCount > 0)
7449  {
7450  json.WriteString("Pools");
7451  json.BeginArray();
7452  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
7453  {
7454  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
7455  }
7456  json.EndArray();
7457  }
7458  }
7459 }
7460 
7461 #endif // #if VMA_STATS_STRING_ENABLED
7462 
7463 static VkResult AllocateMemoryForImage(
7464  VmaAllocator allocator,
7465  VkImage image,
7466  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7467  VmaSuballocationType suballocType,
7468  VmaAllocation* pAllocation)
7469 {
7470  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
7471 
7472  VkMemoryRequirements vkMemReq = {};
7473  bool requiresDedicatedAllocation = false;
7474  bool prefersDedicatedAllocation = false;
7475  allocator->GetImageMemoryRequirements(image, vkMemReq,
7476  requiresDedicatedAllocation, prefersDedicatedAllocation);
7477 
7478  return allocator->AllocateMemory(
7479  vkMemReq,
7480  requiresDedicatedAllocation,
7481  prefersDedicatedAllocation,
7482  VK_NULL_HANDLE, // dedicatedBuffer
7483  image, // dedicatedImage
7484  *pAllocationCreateInfo,
7485  suballocType,
7486  pAllocation);
7487 }
7488 
7490 // Public interface
7491 
7492 VkResult vmaCreateAllocator(
7493  const VmaAllocatorCreateInfo* pCreateInfo,
7494  VmaAllocator* pAllocator)
7495 {
7496  VMA_ASSERT(pCreateInfo && pAllocator);
7497  VMA_DEBUG_LOG("vmaCreateAllocator");
7498  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
7499  return VK_SUCCESS;
7500 }
7501 
7502 void vmaDestroyAllocator(
7503  VmaAllocator allocator)
7504 {
7505  if(allocator != VK_NULL_HANDLE)
7506  {
7507  VMA_DEBUG_LOG("vmaDestroyAllocator");
7508  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
7509  vma_delete(&allocationCallbacks, allocator);
7510  }
7511 }
7512 
7514  VmaAllocator allocator,
7515  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
7516 {
7517  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
7518  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
7519 }
7520 
7522  VmaAllocator allocator,
7523  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
7524 {
7525  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
7526  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
7527 }
7528 
7530  VmaAllocator allocator,
7531  uint32_t memoryTypeIndex,
7532  VkMemoryPropertyFlags* pFlags)
7533 {
7534  VMA_ASSERT(allocator && pFlags);
7535  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
7536  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
7537 }
7538 
7540  VmaAllocator allocator,
7541  uint32_t frameIndex)
7542 {
7543  VMA_ASSERT(allocator);
7544  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
7545 
7546  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7547 
7548  allocator->SetCurrentFrameIndex(frameIndex);
7549 }
7550 
7551 void vmaCalculateStats(
7552  VmaAllocator allocator,
7553  VmaStats* pStats)
7554 {
7555  VMA_ASSERT(allocator && pStats);
7556  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7557  allocator->CalculateStats(pStats);
7558 }
7559 
7560 #if VMA_STATS_STRING_ENABLED
7561 
7562 void vmaBuildStatsString(
7563  VmaAllocator allocator,
7564  char** ppStatsString,
7565  VkBool32 detailedMap)
7566 {
7567  VMA_ASSERT(allocator && ppStatsString);
7568  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7569 
7570  VmaStringBuilder sb(allocator);
7571  {
7572  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
7573  json.BeginObject();
7574 
7575  VmaStats stats;
7576  allocator->CalculateStats(&stats);
7577 
7578  json.WriteString("Total");
7579  VmaPrintStatInfo(json, stats.total);
7580 
7581  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
7582  {
7583  json.BeginString("Heap ");
7584  json.ContinueString(heapIndex);
7585  json.EndString();
7586  json.BeginObject();
7587 
7588  json.WriteString("Size");
7589  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
7590 
7591  json.WriteString("Flags");
7592  json.BeginArray(true);
7593  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
7594  {
7595  json.WriteString("DEVICE_LOCAL");
7596  }
7597  json.EndArray();
7598 
7599  if(stats.memoryHeap[heapIndex].blockCount > 0)
7600  {
7601  json.WriteString("Stats");
7602  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
7603  }
7604 
7605  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
7606  {
7607  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
7608  {
7609  json.BeginString("Type ");
7610  json.ContinueString(typeIndex);
7611  json.EndString();
7612 
7613  json.BeginObject();
7614 
7615  json.WriteString("Flags");
7616  json.BeginArray(true);
7617  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
7618  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
7619  {
7620  json.WriteString("DEVICE_LOCAL");
7621  }
7622  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7623  {
7624  json.WriteString("HOST_VISIBLE");
7625  }
7626  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
7627  {
7628  json.WriteString("HOST_COHERENT");
7629  }
7630  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
7631  {
7632  json.WriteString("HOST_CACHED");
7633  }
7634  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
7635  {
7636  json.WriteString("LAZILY_ALLOCATED");
7637  }
7638  json.EndArray();
7639 
7640  if(stats.memoryType[typeIndex].blockCount > 0)
7641  {
7642  json.WriteString("Stats");
7643  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
7644  }
7645 
7646  json.EndObject();
7647  }
7648  }
7649 
7650  json.EndObject();
7651  }
7652  if(detailedMap == VK_TRUE)
7653  {
7654  allocator->PrintDetailedMap(json);
7655  }
7656 
7657  json.EndObject();
7658  }
7659 
7660  const size_t len = sb.GetLength();
7661  char* const pChars = vma_new_array(allocator, char, len + 1);
7662  if(len > 0)
7663  {
7664  memcpy(pChars, sb.GetData(), len);
7665  }
7666  pChars[len] = '\0';
7667  *ppStatsString = pChars;
7668 }
7669 
7670 void vmaFreeStatsString(
7671  VmaAllocator allocator,
7672  char* pStatsString)
7673 {
7674  if(pStatsString != VMA_NULL)
7675  {
7676  VMA_ASSERT(allocator);
7677  size_t len = strlen(pStatsString);
7678  vma_delete_array(allocator, pStatsString, len + 1);
7679  }
7680 }
7681 
7682 #endif // #if VMA_STATS_STRING_ENABLED
7683 
7686 VkResult vmaFindMemoryTypeIndex(
7687  VmaAllocator allocator,
7688  uint32_t memoryTypeBits,
7689  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7690  uint32_t* pMemoryTypeIndex)
7691 {
7692  VMA_ASSERT(allocator != VK_NULL_HANDLE);
7693  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
7694  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
7695 
7696  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
7697  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
7698  if(preferredFlags == 0)
7699  {
7700  preferredFlags = requiredFlags;
7701  }
7702  // preferredFlags, if not 0, must be a superset of requiredFlags.
7703  VMA_ASSERT((requiredFlags & ~preferredFlags) == 0);
7704 
7705  // Convert usage to requiredFlags and preferredFlags.
7706  switch(pAllocationCreateInfo->usage)
7707  {
7709  break;
7711  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
7712  break;
7714  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
7715  break;
7717  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7718  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
7719  break;
7721  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7722  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
7723  break;
7724  default:
7725  break;
7726  }
7727 
7728  *pMemoryTypeIndex = UINT32_MAX;
7729  uint32_t minCost = UINT32_MAX;
7730  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
7731  memTypeIndex < allocator->GetMemoryTypeCount();
7732  ++memTypeIndex, memTypeBit <<= 1)
7733  {
7734  // This memory type is acceptable according to memoryTypeBits bitmask.
7735  if((memTypeBit & memoryTypeBits) != 0)
7736  {
7737  const VkMemoryPropertyFlags currFlags =
7738  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
7739  // This memory type contains requiredFlags.
7740  if((requiredFlags & ~currFlags) == 0)
7741  {
7742  // Calculate cost as number of bits from preferredFlags not present in this memory type.
7743  uint32_t currCost = CountBitsSet(preferredFlags & ~currFlags);
7744  // Remember memory type with lowest cost.
7745  if(currCost < minCost)
7746  {
7747  *pMemoryTypeIndex = memTypeIndex;
7748  if(currCost == 0)
7749  {
7750  return VK_SUCCESS;
7751  }
7752  minCost = currCost;
7753  }
7754  }
7755  }
7756  }
7757  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
7758 }
7759 
7760 VkResult vmaCreatePool(
7761  VmaAllocator allocator,
7762  const VmaPoolCreateInfo* pCreateInfo,
7763  VmaPool* pPool)
7764 {
7765  VMA_ASSERT(allocator && pCreateInfo && pPool);
7766 
7767  VMA_DEBUG_LOG("vmaCreatePool");
7768 
7769  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7770 
7771  return allocator->CreatePool(pCreateInfo, pPool);
7772 }
7773 
7774 void vmaDestroyPool(
7775  VmaAllocator allocator,
7776  VmaPool pool)
7777 {
7778  VMA_ASSERT(allocator && pool);
7779 
7780  VMA_DEBUG_LOG("vmaDestroyPool");
7781 
7782  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7783 
7784  allocator->DestroyPool(pool);
7785 }
7786 
7787 void vmaGetPoolStats(
7788  VmaAllocator allocator,
7789  VmaPool pool,
7790  VmaPoolStats* pPoolStats)
7791 {
7792  VMA_ASSERT(allocator && pool && pPoolStats);
7793 
7794  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7795 
7796  allocator->GetPoolStats(pool, pPoolStats);
7797 }
7798 
7800  VmaAllocator allocator,
7801  VmaPool pool,
7802  size_t* pLostAllocationCount)
7803 {
7804  VMA_ASSERT(allocator && pool);
7805 
7806  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7807 
7808  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
7809 }
7810 
7811 VkResult vmaAllocateMemory(
7812  VmaAllocator allocator,
7813  const VkMemoryRequirements* pVkMemoryRequirements,
7814  const VmaAllocationCreateInfo* pCreateInfo,
7815  VmaAllocation* pAllocation,
7816  VmaAllocationInfo* pAllocationInfo)
7817 {
7818  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
7819 
7820  VMA_DEBUG_LOG("vmaAllocateMemory");
7821 
7822  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7823 
7824  VkResult result = allocator->AllocateMemory(
7825  *pVkMemoryRequirements,
7826  false, // requiresDedicatedAllocation
7827  false, // prefersDedicatedAllocation
7828  VK_NULL_HANDLE, // dedicatedBuffer
7829  VK_NULL_HANDLE, // dedicatedImage
7830  *pCreateInfo,
7831  VMA_SUBALLOCATION_TYPE_UNKNOWN,
7832  pAllocation);
7833 
7834  if(pAllocationInfo && result == VK_SUCCESS)
7835  {
7836  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7837  }
7838 
7839  return result;
7840 }
7841 
7843  VmaAllocator allocator,
7844  VkBuffer buffer,
7845  const VmaAllocationCreateInfo* pCreateInfo,
7846  VmaAllocation* pAllocation,
7847  VmaAllocationInfo* pAllocationInfo)
7848 {
7849  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
7850 
7851  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
7852 
7853  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7854 
7855  VkMemoryRequirements vkMemReq = {};
7856  bool requiresDedicatedAllocation = false;
7857  bool prefersDedicatedAllocation = false;
7858  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
7859  requiresDedicatedAllocation,
7860  prefersDedicatedAllocation);
7861 
7862  VkResult result = allocator->AllocateMemory(
7863  vkMemReq,
7864  requiresDedicatedAllocation,
7865  prefersDedicatedAllocation,
7866  buffer, // dedicatedBuffer
7867  VK_NULL_HANDLE, // dedicatedImage
7868  *pCreateInfo,
7869  VMA_SUBALLOCATION_TYPE_BUFFER,
7870  pAllocation);
7871 
7872  if(pAllocationInfo && result == VK_SUCCESS)
7873  {
7874  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7875  }
7876 
7877  return result;
7878 }
7879 
7880 VkResult vmaAllocateMemoryForImage(
7881  VmaAllocator allocator,
7882  VkImage image,
7883  const VmaAllocationCreateInfo* pCreateInfo,
7884  VmaAllocation* pAllocation,
7885  VmaAllocationInfo* pAllocationInfo)
7886 {
7887  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
7888 
7889  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
7890 
7891  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7892 
7893  VkResult result = AllocateMemoryForImage(
7894  allocator,
7895  image,
7896  pCreateInfo,
7897  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
7898  pAllocation);
7899 
7900  if(pAllocationInfo && result == VK_SUCCESS)
7901  {
7902  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7903  }
7904 
7905  return result;
7906 }
7907 
7908 void vmaFreeMemory(
7909  VmaAllocator allocator,
7910  VmaAllocation allocation)
7911 {
7912  VMA_ASSERT(allocator && allocation);
7913 
7914  VMA_DEBUG_LOG("vmaFreeMemory");
7915 
7916  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7917 
7918  allocator->FreeMemory(allocation);
7919 }
7920 
7922  VmaAllocator allocator,
7923  VmaAllocation allocation,
7924  VmaAllocationInfo* pAllocationInfo)
7925 {
7926  VMA_ASSERT(allocator && allocation && pAllocationInfo);
7927 
7928  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7929 
7930  allocator->GetAllocationInfo(allocation, pAllocationInfo);
7931 }
7932 
7934  VmaAllocator allocator,
7935  VmaAllocation allocation,
7936  void* pUserData)
7937 {
7938  VMA_ASSERT(allocator && allocation);
7939 
7940  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7941 
7942  allocation->SetUserData(pUserData);
7943 }
7944 
7946  VmaAllocator allocator,
7947  VmaAllocation* pAllocation)
7948 {
7949  VMA_ASSERT(allocator && pAllocation);
7950 
7951  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
7952 
7953  allocator->CreateLostAllocation(pAllocation);
7954 }
7955 
7956 VkResult vmaMapMemory(
7957  VmaAllocator allocator,
7958  VmaAllocation allocation,
7959  void** ppData)
7960 {
7961  VMA_ASSERT(allocator && allocation && ppData);
7962 
7963  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7964 
7965  return (*allocator->GetVulkanFunctions().vkMapMemory)(
7966  allocator->m_hDevice,
7967  allocation->GetMemory(),
7968  allocation->GetOffset(),
7969  allocation->GetSize(),
7970  0,
7971  ppData);
7972 }
7973 
7974 void vmaUnmapMemory(
7975  VmaAllocator allocator,
7976  VmaAllocation allocation)
7977 {
7978  VMA_ASSERT(allocator && allocation);
7979 
7980  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7981 
7982  (*allocator->GetVulkanFunctions().vkUnmapMemory)(allocator->m_hDevice, allocation->GetMemory());
7983 }
7984 
7985 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator)
7986 {
7987  VMA_ASSERT(allocator);
7988 
7989  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7990 
7991  allocator->UnmapPersistentlyMappedMemory();
7992 }
7993 
7994 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator)
7995 {
7996  VMA_ASSERT(allocator);
7997 
7998  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7999 
8000  return allocator->MapPersistentlyMappedMemory();
8001 }
8002 
8003 VkResult vmaDefragment(
8004  VmaAllocator allocator,
8005  VmaAllocation* pAllocations,
8006  size_t allocationCount,
8007  VkBool32* pAllocationsChanged,
8008  const VmaDefragmentationInfo *pDefragmentationInfo,
8009  VmaDefragmentationStats* pDefragmentationStats)
8010 {
8011  VMA_ASSERT(allocator && pAllocations);
8012 
8013  VMA_DEBUG_LOG("vmaDefragment");
8014 
8015  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8016 
8017  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
8018 }
8019 
8020 VkResult vmaCreateBuffer(
8021  VmaAllocator allocator,
8022  const VkBufferCreateInfo* pBufferCreateInfo,
8023  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8024  VkBuffer* pBuffer,
8025  VmaAllocation* pAllocation,
8026  VmaAllocationInfo* pAllocationInfo)
8027 {
8028  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
8029 
8030  VMA_DEBUG_LOG("vmaCreateBuffer");
8031 
8032  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8033 
8034  *pBuffer = VK_NULL_HANDLE;
8035  *pAllocation = VK_NULL_HANDLE;
8036 
8037  // 1. Create VkBuffer.
8038  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
8039  allocator->m_hDevice,
8040  pBufferCreateInfo,
8041  allocator->GetAllocationCallbacks(),
8042  pBuffer);
8043  if(res >= 0)
8044  {
8045  // 2. vkGetBufferMemoryRequirements.
8046  VkMemoryRequirements vkMemReq = {};
8047  bool requiresDedicatedAllocation = false;
8048  bool prefersDedicatedAllocation = false;
8049  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
8050  requiresDedicatedAllocation, prefersDedicatedAllocation);
8051 
8052  // 3. Allocate memory using allocator.
8053  res = allocator->AllocateMemory(
8054  vkMemReq,
8055  requiresDedicatedAllocation,
8056  prefersDedicatedAllocation,
8057  *pBuffer, // dedicatedBuffer
8058  VK_NULL_HANDLE, // dedicatedImage
8059  *pAllocationCreateInfo,
8060  VMA_SUBALLOCATION_TYPE_BUFFER,
8061  pAllocation);
8062  if(res >= 0)
8063  {
8064  // 3. Bind buffer with memory.
8065  res = (*allocator->GetVulkanFunctions().vkBindBufferMemory)(
8066  allocator->m_hDevice,
8067  *pBuffer,
8068  (*pAllocation)->GetMemory(),
8069  (*pAllocation)->GetOffset());
8070  if(res >= 0)
8071  {
8072  // All steps succeeded.
8073  if(pAllocationInfo != VMA_NULL)
8074  {
8075  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8076  }
8077  return VK_SUCCESS;
8078  }
8079  allocator->FreeMemory(*pAllocation);
8080  *pAllocation = VK_NULL_HANDLE;
8081  return res;
8082  }
8083  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
8084  *pBuffer = VK_NULL_HANDLE;
8085  return res;
8086  }
8087  return res;
8088 }
8089 
8090 void vmaDestroyBuffer(
8091  VmaAllocator allocator,
8092  VkBuffer buffer,
8093  VmaAllocation allocation)
8094 {
8095  if(buffer != VK_NULL_HANDLE)
8096  {
8097  VMA_ASSERT(allocator);
8098 
8099  VMA_DEBUG_LOG("vmaDestroyBuffer");
8100 
8101  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8102 
8103  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
8104 
8105  allocator->FreeMemory(allocation);
8106  }
8107 }
8108 
8109 VkResult vmaCreateImage(
8110  VmaAllocator allocator,
8111  const VkImageCreateInfo* pImageCreateInfo,
8112  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8113  VkImage* pImage,
8114  VmaAllocation* pAllocation,
8115  VmaAllocationInfo* pAllocationInfo)
8116 {
8117  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
8118 
8119  VMA_DEBUG_LOG("vmaCreateImage");
8120 
8121  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8122 
8123  *pImage = VK_NULL_HANDLE;
8124  *pAllocation = VK_NULL_HANDLE;
8125 
8126  // 1. Create VkImage.
8127  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
8128  allocator->m_hDevice,
8129  pImageCreateInfo,
8130  allocator->GetAllocationCallbacks(),
8131  pImage);
8132  if(res >= 0)
8133  {
8134  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
8135  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
8136  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
8137 
8138  // 2. Allocate memory using allocator.
8139  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
8140  if(res >= 0)
8141  {
8142  // 3. Bind image with memory.
8143  res = (*allocator->GetVulkanFunctions().vkBindImageMemory)(
8144  allocator->m_hDevice,
8145  *pImage,
8146  (*pAllocation)->GetMemory(),
8147  (*pAllocation)->GetOffset());
8148  if(res >= 0)
8149  {
8150  // All steps succeeded.
8151  if(pAllocationInfo != VMA_NULL)
8152  {
8153  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8154  }
8155  return VK_SUCCESS;
8156  }
8157  allocator->FreeMemory(*pAllocation);
8158  *pAllocation = VK_NULL_HANDLE;
8159  return res;
8160  }
8161  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
8162  *pImage = VK_NULL_HANDLE;
8163  return res;
8164  }
8165  return res;
8166 }
8167 
8168 void vmaDestroyImage(
8169  VmaAllocator allocator,
8170  VkImage image,
8171  VmaAllocation allocation)
8172 {
8173  if(image != VK_NULL_HANDLE)
8174  {
8175  VMA_ASSERT(allocator);
8176 
8177  VMA_DEBUG_LOG("vmaDestroyImage");
8178 
8179  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8180 
8181  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
8182 
8183  allocator->FreeMemory(allocation);
8184  }
8185 }
8186 
8187 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:568
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:785
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:593
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:578
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Memory will be used on device only, so faster access from the device is preferred. No need to be mappable on host.
Definition: vk_mem_alloc.h:759
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:572
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:1044
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:590
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:1198
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:914
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:968
Definition: vk_mem_alloc.h:823
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:561
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:856
Definition: vk_mem_alloc.h:769
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks.
Definition: vk_mem_alloc.h:605
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:652
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:587
VkDeviceSize preferredSmallHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from small heaps <= 512 MB...
Definition: vk_mem_alloc.h:602
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:773
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation.
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:717
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:575
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:716
PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR
Definition: vk_mem_alloc.h:583
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:1202
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:622
VmaStatInfo total
Definition: vk_mem_alloc.h:726
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:1210
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:839
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1193
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:576
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:497
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:596
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:922
Definition: vk_mem_alloc.h:916
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:1054
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:573
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:858
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:938
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:974
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:559
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:925
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
VmaMemoryUsage
Definition: vk_mem_alloc.h:754
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:1188
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:1206
Memory will be used for frequent (dynamic) updates from host and reads on device (upload).
Definition: vk_mem_alloc.h:765
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:574
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:722
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:503
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:524
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:529
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:1208
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:850
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region.
Definition: vk_mem_alloc.h:984
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:569
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:705
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
Definition: vk_mem_alloc.h:933
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:516
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:830
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:718
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:520
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:928
Memory will be used for frequent writing on device and readback on host (download).
Definition: vk_mem_alloc.h:768
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:845
Definition: vk_mem_alloc.h:836
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:708
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:571
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool.
Definition: vk_mem_alloc.h:946
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory.
Definition: vk_mem_alloc.h:608
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:977
VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator)
Maps back persistently mapped memory of types that are HOST_COHERENT and DEVICE_LOCAL.
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:834
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:863
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either NULL or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:640
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:724
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:717
Definition: vk_mem_alloc.h:896
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:580
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:518
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:579
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:960
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:1065
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps. ...
Definition: vk_mem_alloc.h:599
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:717
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:714
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:965
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:1049
Definition: vk_mem_alloc.h:832
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1204
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:567
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR
Definition: vk_mem_alloc.h:582
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:712
No intended memory usage specified. Use other members of VmaAllocationCreateInfo to specify your requ...
Definition: vk_mem_alloc.h:757
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:918
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:710
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:577
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:581
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:796
Memory will be mapped on host. Could be used for transfer to/from device.
Definition: vk_mem_alloc.h:762
void * pMappedData
Pointer to the beginning of this allocation as mapped data. Null if this alloaction is not persistent...
Definition: vk_mem_alloc.h:1060
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:557
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:570
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:1030
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:812
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:887
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:718
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:725
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:971
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:718
void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator)
Unmaps persistently mapped memory of types that are HOST_COHERENT and DEVICE_LOCAL.
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:1035