Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
688 #include <vulkan/vulkan.h>
689 
690 VK_DEFINE_HANDLE(VmaAllocator)
691 
692 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
694  VmaAllocator allocator,
695  uint32_t memoryType,
696  VkDeviceMemory memory,
697  VkDeviceSize size);
699 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
700  VmaAllocator allocator,
701  uint32_t memoryType,
702  VkDeviceMemory memory,
703  VkDeviceSize size);
704 
712 typedef struct VmaDeviceMemoryCallbacks {
718 
748 
751 typedef VkFlags VmaAllocatorCreateFlags;
752 
757 typedef struct VmaVulkanFunctions {
758  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
759  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
760  PFN_vkAllocateMemory vkAllocateMemory;
761  PFN_vkFreeMemory vkFreeMemory;
762  PFN_vkMapMemory vkMapMemory;
763  PFN_vkUnmapMemory vkUnmapMemory;
764  PFN_vkBindBufferMemory vkBindBufferMemory;
765  PFN_vkBindImageMemory vkBindImageMemory;
766  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
767  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
768  PFN_vkCreateBuffer vkCreateBuffer;
769  PFN_vkDestroyBuffer vkDestroyBuffer;
770  PFN_vkCreateImage vkCreateImage;
771  PFN_vkDestroyImage vkDestroyImage;
772  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
773  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
775 
778 {
780  VmaAllocatorCreateFlags flags;
782 
783  VkPhysicalDevice physicalDevice;
785 
786  VkDevice device;
788 
791 
792  const VkAllocationCallbacks* pAllocationCallbacks;
794 
809  uint32_t frameInUseCount;
833  const VkDeviceSize* pHeapSizeLimit;
847 
849 VkResult vmaCreateAllocator(
850  const VmaAllocatorCreateInfo* pCreateInfo,
851  VmaAllocator* pAllocator);
852 
855  VmaAllocator allocator);
856 
862  VmaAllocator allocator,
863  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
864 
870  VmaAllocator allocator,
871  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
872 
880  VmaAllocator allocator,
881  uint32_t memoryTypeIndex,
882  VkMemoryPropertyFlags* pFlags);
883 
893  VmaAllocator allocator,
894  uint32_t frameIndex);
895 
898 typedef struct VmaStatInfo
899 {
901  uint32_t blockCount;
903  uint32_t allocationCount;
907  VkDeviceSize usedBytes;
909  VkDeviceSize unusedBytes;
910  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
911  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
912 } VmaStatInfo;
913 
915 typedef struct VmaStats
916 {
917  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
918  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
920 } VmaStats;
921 
923 void vmaCalculateStats(
924  VmaAllocator allocator,
925  VmaStats* pStats);
926 
927 #define VMA_STATS_STRING_ENABLED 1
928 
929 #if VMA_STATS_STRING_ENABLED
930 
932 
935  VmaAllocator allocator,
936  char** ppStatsString,
937  VkBool32 detailedMap);
938 
939 void vmaFreeStatsString(
940  VmaAllocator allocator,
941  char* pStatsString);
942 
943 #endif // #if VMA_STATS_STRING_ENABLED
944 
945 VK_DEFINE_HANDLE(VmaPool)
946 
947 typedef enum VmaMemoryUsage
948 {
995 
1010 
1060 
1064 
1066 {
1068  VmaAllocationCreateFlags flags;
1079  VkMemoryPropertyFlags requiredFlags;
1084  VkMemoryPropertyFlags preferredFlags;
1092  uint32_t memoryTypeBits;
1098  VmaPool pool;
1105  void* pUserData;
1107 
1122 VkResult vmaFindMemoryTypeIndex(
1123  VmaAllocator allocator,
1124  uint32_t memoryTypeBits,
1125  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1126  uint32_t* pMemoryTypeIndex);
1127 
1148 
1151 typedef VkFlags VmaPoolCreateFlags;
1152 
1155 typedef struct VmaPoolCreateInfo {
1161  VmaPoolCreateFlags flags;
1166  VkDeviceSize blockSize;
1195 
1198 typedef struct VmaPoolStats {
1201  VkDeviceSize size;
1204  VkDeviceSize unusedSize;
1217  VkDeviceSize unusedRangeSizeMax;
1218 } VmaPoolStats;
1219 
1226 VkResult vmaCreatePool(
1227  VmaAllocator allocator,
1228  const VmaPoolCreateInfo* pCreateInfo,
1229  VmaPool* pPool);
1230 
1233 void vmaDestroyPool(
1234  VmaAllocator allocator,
1235  VmaPool pool);
1236 
1243 void vmaGetPoolStats(
1244  VmaAllocator allocator,
1245  VmaPool pool,
1246  VmaPoolStats* pPoolStats);
1247 
1255  VmaAllocator allocator,
1256  VmaPool pool,
1257  size_t* pLostAllocationCount);
1258 
1259 VK_DEFINE_HANDLE(VmaAllocation)
1260 
1261 
1263 typedef struct VmaAllocationInfo {
1268  uint32_t memoryType;
1277  VkDeviceMemory deviceMemory;
1282  VkDeviceSize offset;
1287  VkDeviceSize size;
1301  void* pUserData;
1303 
1314 VkResult vmaAllocateMemory(
1315  VmaAllocator allocator,
1316  const VkMemoryRequirements* pVkMemoryRequirements,
1317  const VmaAllocationCreateInfo* pCreateInfo,
1318  VmaAllocation* pAllocation,
1319  VmaAllocationInfo* pAllocationInfo);
1320 
1328  VmaAllocator allocator,
1329  VkBuffer buffer,
1330  const VmaAllocationCreateInfo* pCreateInfo,
1331  VmaAllocation* pAllocation,
1332  VmaAllocationInfo* pAllocationInfo);
1333 
1335 VkResult vmaAllocateMemoryForImage(
1336  VmaAllocator allocator,
1337  VkImage image,
1338  const VmaAllocationCreateInfo* pCreateInfo,
1339  VmaAllocation* pAllocation,
1340  VmaAllocationInfo* pAllocationInfo);
1341 
1343 void vmaFreeMemory(
1344  VmaAllocator allocator,
1345  VmaAllocation allocation);
1346 
1349  VmaAllocator allocator,
1350  VmaAllocation allocation,
1351  VmaAllocationInfo* pAllocationInfo);
1352 
1367  VmaAllocator allocator,
1368  VmaAllocation allocation,
1369  void* pUserData);
1370 
1382  VmaAllocator allocator,
1383  VmaAllocation* pAllocation);
1384 
1419 VkResult vmaMapMemory(
1420  VmaAllocator allocator,
1421  VmaAllocation allocation,
1422  void** ppData);
1423 
1428 void vmaUnmapMemory(
1429  VmaAllocator allocator,
1430  VmaAllocation allocation);
1431 
1433 typedef struct VmaDefragmentationInfo {
1438  VkDeviceSize maxBytesToMove;
1445 
1447 typedef struct VmaDefragmentationStats {
1449  VkDeviceSize bytesMoved;
1451  VkDeviceSize bytesFreed;
1457 
1534 VkResult vmaDefragment(
1535  VmaAllocator allocator,
1536  VmaAllocation* pAllocations,
1537  size_t allocationCount,
1538  VkBool32* pAllocationsChanged,
1539  const VmaDefragmentationInfo *pDefragmentationInfo,
1540  VmaDefragmentationStats* pDefragmentationStats);
1541 
1568 VkResult vmaCreateBuffer(
1569  VmaAllocator allocator,
1570  const VkBufferCreateInfo* pBufferCreateInfo,
1571  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1572  VkBuffer* pBuffer,
1573  VmaAllocation* pAllocation,
1574  VmaAllocationInfo* pAllocationInfo);
1575 
1587 void vmaDestroyBuffer(
1588  VmaAllocator allocator,
1589  VkBuffer buffer,
1590  VmaAllocation allocation);
1591 
1593 VkResult vmaCreateImage(
1594  VmaAllocator allocator,
1595  const VkImageCreateInfo* pImageCreateInfo,
1596  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1597  VkImage* pImage,
1598  VmaAllocation* pAllocation,
1599  VmaAllocationInfo* pAllocationInfo);
1600 
1612 void vmaDestroyImage(
1613  VmaAllocator allocator,
1614  VkImage image,
1615  VmaAllocation allocation);
1616 
1617 #ifdef __cplusplus
1618 }
1619 #endif
1620 
1621 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
1622 
1623 // For Visual Studio IntelliSense.
1624 #ifdef __INTELLISENSE__
1625 #define VMA_IMPLEMENTATION
1626 #endif
1627 
1628 #ifdef VMA_IMPLEMENTATION
1629 #undef VMA_IMPLEMENTATION
1630 
1631 #include <cstdint>
1632 #include <cstdlib>
1633 #include <cstring>
1634 
1635 /*******************************************************************************
1636 CONFIGURATION SECTION
1637 
1638 Define some of these macros before each #include of this header or change them
1639 here if you need other then default behavior depending on your environment.
1640 */
1641 
1642 /*
1643 Define this macro to 1 to make the library fetch pointers to Vulkan functions
1644 internally, like:
1645 
1646  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
1647 
1648 Define to 0 if you are going to provide you own pointers to Vulkan functions via
1649 VmaAllocatorCreateInfo::pVulkanFunctions.
1650 */
1651 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
1652 #define VMA_STATIC_VULKAN_FUNCTIONS 1
1653 #endif
1654 
1655 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
1656 //#define VMA_USE_STL_CONTAINERS 1
1657 
1658 /* Set this macro to 1 to make the library including and using STL containers:
1659 std::pair, std::vector, std::list, std::unordered_map.
1660 
1661 Set it to 0 or undefined to make the library using its own implementation of
1662 the containers.
1663 */
1664 #if VMA_USE_STL_CONTAINERS
1665  #define VMA_USE_STL_VECTOR 1
1666  #define VMA_USE_STL_UNORDERED_MAP 1
1667  #define VMA_USE_STL_LIST 1
1668 #endif
1669 
1670 #if VMA_USE_STL_VECTOR
1671  #include <vector>
1672 #endif
1673 
1674 #if VMA_USE_STL_UNORDERED_MAP
1675  #include <unordered_map>
1676 #endif
1677 
1678 #if VMA_USE_STL_LIST
1679  #include <list>
1680 #endif
1681 
1682 /*
1683 Following headers are used in this CONFIGURATION section only, so feel free to
1684 remove them if not needed.
1685 */
1686 #include <cassert> // for assert
1687 #include <algorithm> // for min, max
1688 #include <mutex> // for std::mutex
1689 #include <atomic> // for std::atomic
1690 
1691 #if !defined(_WIN32)
1692  #include <malloc.h> // for aligned_alloc()
1693 #endif
1694 
1695 // Normal assert to check for programmer's errors, especially in Debug configuration.
1696 #ifndef VMA_ASSERT
1697  #ifdef _DEBUG
1698  #define VMA_ASSERT(expr) assert(expr)
1699  #else
1700  #define VMA_ASSERT(expr)
1701  #endif
1702 #endif
1703 
1704 // Assert that will be called very often, like inside data structures e.g. operator[].
1705 // Making it non-empty can make program slow.
1706 #ifndef VMA_HEAVY_ASSERT
1707  #ifdef _DEBUG
1708  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
1709  #else
1710  #define VMA_HEAVY_ASSERT(expr)
1711  #endif
1712 #endif
1713 
1714 #ifndef VMA_NULL
1715  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
1716  #define VMA_NULL nullptr
1717 #endif
1718 
1719 #ifndef VMA_ALIGN_OF
1720  #define VMA_ALIGN_OF(type) (__alignof(type))
1721 #endif
1722 
1723 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
1724  #if defined(_WIN32)
1725  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
1726  #else
1727  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
1728  #endif
1729 #endif
1730 
1731 #ifndef VMA_SYSTEM_FREE
1732  #if defined(_WIN32)
1733  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
1734  #else
1735  #define VMA_SYSTEM_FREE(ptr) free(ptr)
1736  #endif
1737 #endif
1738 
1739 #ifndef VMA_MIN
1740  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
1741 #endif
1742 
1743 #ifndef VMA_MAX
1744  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
1745 #endif
1746 
1747 #ifndef VMA_SWAP
1748  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
1749 #endif
1750 
1751 #ifndef VMA_SORT
1752  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
1753 #endif
1754 
1755 #ifndef VMA_DEBUG_LOG
1756  #define VMA_DEBUG_LOG(format, ...)
1757  /*
1758  #define VMA_DEBUG_LOG(format, ...) do { \
1759  printf(format, __VA_ARGS__); \
1760  printf("\n"); \
1761  } while(false)
1762  */
1763 #endif
1764 
1765 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
1766 #if VMA_STATS_STRING_ENABLED
1767  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
1768  {
1769  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
1770  }
1771  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
1772  {
1773  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
1774  }
1775  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
1776  {
1777  snprintf(outStr, strLen, "%p", ptr);
1778  }
1779 #endif
1780 
1781 #ifndef VMA_MUTEX
1782  class VmaMutex
1783  {
1784  public:
1785  VmaMutex() { }
1786  ~VmaMutex() { }
1787  void Lock() { m_Mutex.lock(); }
1788  void Unlock() { m_Mutex.unlock(); }
1789  private:
1790  std::mutex m_Mutex;
1791  };
1792  #define VMA_MUTEX VmaMutex
1793 #endif
1794 
1795 /*
1796 If providing your own implementation, you need to implement a subset of std::atomic:
1797 
1798 - Constructor(uint32_t desired)
1799 - uint32_t load() const
1800 - void store(uint32_t desired)
1801 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
1802 */
1803 #ifndef VMA_ATOMIC_UINT32
1804  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
1805 #endif
1806 
1807 #ifndef VMA_BEST_FIT
1808 
1820  #define VMA_BEST_FIT (1)
1821 #endif
1822 
1823 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
1824 
1828  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
1829 #endif
1830 
1831 #ifndef VMA_DEBUG_ALIGNMENT
1832 
1836  #define VMA_DEBUG_ALIGNMENT (1)
1837 #endif
1838 
1839 #ifndef VMA_DEBUG_MARGIN
1840 
1844  #define VMA_DEBUG_MARGIN (0)
1845 #endif
1846 
1847 #ifndef VMA_DEBUG_GLOBAL_MUTEX
1848 
1852  #define VMA_DEBUG_GLOBAL_MUTEX (0)
1853 #endif
1854 
1855 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
1856 
1860  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
1861 #endif
1862 
1863 #ifndef VMA_SMALL_HEAP_MAX_SIZE
1864  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
1866 #endif
1867 
1868 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
1869  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
1871 #endif
1872 
1873 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
1874 
1875 /*******************************************************************************
1876 END OF CONFIGURATION
1877 */
1878 
1879 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
1880  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
1881 
1882 // Returns number of bits set to 1 in (v).
1883 static inline uint32_t VmaCountBitsSet(uint32_t v)
1884 {
1885  uint32_t c = v - ((v >> 1) & 0x55555555);
1886  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
1887  c = ((c >> 4) + c) & 0x0F0F0F0F;
1888  c = ((c >> 8) + c) & 0x00FF00FF;
1889  c = ((c >> 16) + c) & 0x0000FFFF;
1890  return c;
1891 }
1892 
1893 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
1894 // Use types like uint32_t, uint64_t as T.
1895 template <typename T>
1896 static inline T VmaAlignUp(T val, T align)
1897 {
1898  return (val + align - 1) / align * align;
1899 }
1900 
1901 // Division with mathematical rounding to nearest number.
1902 template <typename T>
1903 inline T VmaRoundDiv(T x, T y)
1904 {
1905  return (x + (y / (T)2)) / y;
1906 }
1907 
1908 #ifndef VMA_SORT
1909 
1910 template<typename Iterator, typename Compare>
1911 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
1912 {
1913  Iterator centerValue = end; --centerValue;
1914  Iterator insertIndex = beg;
1915  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
1916  {
1917  if(cmp(*memTypeIndex, *centerValue))
1918  {
1919  if(insertIndex != memTypeIndex)
1920  {
1921  VMA_SWAP(*memTypeIndex, *insertIndex);
1922  }
1923  ++insertIndex;
1924  }
1925  }
1926  if(insertIndex != centerValue)
1927  {
1928  VMA_SWAP(*insertIndex, *centerValue);
1929  }
1930  return insertIndex;
1931 }
1932 
1933 template<typename Iterator, typename Compare>
1934 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
1935 {
1936  if(beg < end)
1937  {
1938  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
1939  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
1940  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
1941  }
1942 }
1943 
1944 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
1945 
1946 #endif // #ifndef VMA_SORT
1947 
1948 /*
1949 Returns true if two memory blocks occupy overlapping pages.
1950 ResourceA must be in less memory offset than ResourceB.
1951 
1952 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
1953 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
1954 */
1955 static inline bool VmaBlocksOnSamePage(
1956  VkDeviceSize resourceAOffset,
1957  VkDeviceSize resourceASize,
1958  VkDeviceSize resourceBOffset,
1959  VkDeviceSize pageSize)
1960 {
1961  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
1962  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
1963  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
1964  VkDeviceSize resourceBStart = resourceBOffset;
1965  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
1966  return resourceAEndPage == resourceBStartPage;
1967 }
1968 
1969 enum VmaSuballocationType
1970 {
1971  VMA_SUBALLOCATION_TYPE_FREE = 0,
1972  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
1973  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
1974  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
1975  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
1976  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
1977  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
1978 };
1979 
1980 /*
1981 Returns true if given suballocation types could conflict and must respect
1982 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
1983 or linear image and another one is optimal image. If type is unknown, behave
1984 conservatively.
1985 */
1986 static inline bool VmaIsBufferImageGranularityConflict(
1987  VmaSuballocationType suballocType1,
1988  VmaSuballocationType suballocType2)
1989 {
1990  if(suballocType1 > suballocType2)
1991  {
1992  VMA_SWAP(suballocType1, suballocType2);
1993  }
1994 
1995  switch(suballocType1)
1996  {
1997  case VMA_SUBALLOCATION_TYPE_FREE:
1998  return false;
1999  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
2000  return true;
2001  case VMA_SUBALLOCATION_TYPE_BUFFER:
2002  return
2003  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2004  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2005  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
2006  return
2007  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2008  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
2009  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2010  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
2011  return
2012  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2013  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
2014  return false;
2015  default:
2016  VMA_ASSERT(0);
2017  return true;
2018  }
2019 }
2020 
2021 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
2022 struct VmaMutexLock
2023 {
2024 public:
2025  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
2026  m_pMutex(useMutex ? &mutex : VMA_NULL)
2027  {
2028  if(m_pMutex)
2029  {
2030  m_pMutex->Lock();
2031  }
2032  }
2033 
2034  ~VmaMutexLock()
2035  {
2036  if(m_pMutex)
2037  {
2038  m_pMutex->Unlock();
2039  }
2040  }
2041 
2042 private:
2043  VMA_MUTEX* m_pMutex;
2044 };
2045 
2046 #if VMA_DEBUG_GLOBAL_MUTEX
2047  static VMA_MUTEX gDebugGlobalMutex;
2048  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
2049 #else
2050  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
2051 #endif
2052 
2053 // Minimum size of a free suballocation to register it in the free suballocation collection.
2054 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
2055 
2056 /*
2057 Performs binary search and returns iterator to first element that is greater or
2058 equal to (key), according to comparison (cmp).
2059 
2060 Cmp should return true if first argument is less than second argument.
2061 
2062 Returned value is the found element, if present in the collection or place where
2063 new element with value (key) should be inserted.
2064 */
2065 template <typename IterT, typename KeyT, typename CmpT>
2066 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
2067 {
2068  size_t down = 0, up = (end - beg);
2069  while(down < up)
2070  {
2071  const size_t mid = (down + up) / 2;
2072  if(cmp(*(beg+mid), key))
2073  {
2074  down = mid + 1;
2075  }
2076  else
2077  {
2078  up = mid;
2079  }
2080  }
2081  return beg + down;
2082 }
2083 
2085 // Memory allocation
2086 
2087 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
2088 {
2089  if((pAllocationCallbacks != VMA_NULL) &&
2090  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
2091  {
2092  return (*pAllocationCallbacks->pfnAllocation)(
2093  pAllocationCallbacks->pUserData,
2094  size,
2095  alignment,
2096  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2097  }
2098  else
2099  {
2100  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
2101  }
2102 }
2103 
2104 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
2105 {
2106  if((pAllocationCallbacks != VMA_NULL) &&
2107  (pAllocationCallbacks->pfnFree != VMA_NULL))
2108  {
2109  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
2110  }
2111  else
2112  {
2113  VMA_SYSTEM_FREE(ptr);
2114  }
2115 }
2116 
2117 template<typename T>
2118 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
2119 {
2120  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
2121 }
2122 
2123 template<typename T>
2124 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
2125 {
2126  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
2127 }
2128 
2129 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
2130 
2131 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
2132 
2133 template<typename T>
2134 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
2135 {
2136  ptr->~T();
2137  VmaFree(pAllocationCallbacks, ptr);
2138 }
2139 
2140 template<typename T>
2141 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
2142 {
2143  if(ptr != VMA_NULL)
2144  {
2145  for(size_t i = count; i--; )
2146  {
2147  ptr[i].~T();
2148  }
2149  VmaFree(pAllocationCallbacks, ptr);
2150  }
2151 }
2152 
2153 // STL-compatible allocator.
2154 template<typename T>
2155 class VmaStlAllocator
2156 {
2157 public:
2158  const VkAllocationCallbacks* const m_pCallbacks;
2159  typedef T value_type;
2160 
2161  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
2162  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
2163 
2164  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
2165  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
2166 
2167  template<typename U>
2168  bool operator==(const VmaStlAllocator<U>& rhs) const
2169  {
2170  return m_pCallbacks == rhs.m_pCallbacks;
2171  }
2172  template<typename U>
2173  bool operator!=(const VmaStlAllocator<U>& rhs) const
2174  {
2175  return m_pCallbacks != rhs.m_pCallbacks;
2176  }
2177 
2178  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
2179 };
2180 
2181 #if VMA_USE_STL_VECTOR
2182 
2183 #define VmaVector std::vector
2184 
2185 template<typename T, typename allocatorT>
2186 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
2187 {
2188  vec.insert(vec.begin() + index, item);
2189 }
2190 
2191 template<typename T, typename allocatorT>
2192 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
2193 {
2194  vec.erase(vec.begin() + index);
2195 }
2196 
2197 #else // #if VMA_USE_STL_VECTOR
2198 
2199 /* Class with interface compatible with subset of std::vector.
2200 T must be POD because constructors and destructors are not called and memcpy is
2201 used for these objects. */
2202 template<typename T, typename AllocatorT>
2203 class VmaVector
2204 {
2205 public:
2206  typedef T value_type;
2207 
2208  VmaVector(const AllocatorT& allocator) :
2209  m_Allocator(allocator),
2210  m_pArray(VMA_NULL),
2211  m_Count(0),
2212  m_Capacity(0)
2213  {
2214  }
2215 
2216  VmaVector(size_t count, const AllocatorT& allocator) :
2217  m_Allocator(allocator),
2218  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
2219  m_Count(count),
2220  m_Capacity(count)
2221  {
2222  }
2223 
2224  VmaVector(const VmaVector<T, AllocatorT>& src) :
2225  m_Allocator(src.m_Allocator),
2226  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
2227  m_Count(src.m_Count),
2228  m_Capacity(src.m_Count)
2229  {
2230  if(m_Count != 0)
2231  {
2232  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
2233  }
2234  }
2235 
2236  ~VmaVector()
2237  {
2238  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2239  }
2240 
2241  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
2242  {
2243  if(&rhs != this)
2244  {
2245  resize(rhs.m_Count);
2246  if(m_Count != 0)
2247  {
2248  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
2249  }
2250  }
2251  return *this;
2252  }
2253 
2254  bool empty() const { return m_Count == 0; }
2255  size_t size() const { return m_Count; }
2256  T* data() { return m_pArray; }
2257  const T* data() const { return m_pArray; }
2258 
2259  T& operator[](size_t index)
2260  {
2261  VMA_HEAVY_ASSERT(index < m_Count);
2262  return m_pArray[index];
2263  }
2264  const T& operator[](size_t index) const
2265  {
2266  VMA_HEAVY_ASSERT(index < m_Count);
2267  return m_pArray[index];
2268  }
2269 
2270  T& front()
2271  {
2272  VMA_HEAVY_ASSERT(m_Count > 0);
2273  return m_pArray[0];
2274  }
2275  const T& front() const
2276  {
2277  VMA_HEAVY_ASSERT(m_Count > 0);
2278  return m_pArray[0];
2279  }
2280  T& back()
2281  {
2282  VMA_HEAVY_ASSERT(m_Count > 0);
2283  return m_pArray[m_Count - 1];
2284  }
2285  const T& back() const
2286  {
2287  VMA_HEAVY_ASSERT(m_Count > 0);
2288  return m_pArray[m_Count - 1];
2289  }
2290 
2291  void reserve(size_t newCapacity, bool freeMemory = false)
2292  {
2293  newCapacity = VMA_MAX(newCapacity, m_Count);
2294 
2295  if((newCapacity < m_Capacity) && !freeMemory)
2296  {
2297  newCapacity = m_Capacity;
2298  }
2299 
2300  if(newCapacity != m_Capacity)
2301  {
2302  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
2303  if(m_Count != 0)
2304  {
2305  memcpy(newArray, m_pArray, m_Count * sizeof(T));
2306  }
2307  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2308  m_Capacity = newCapacity;
2309  m_pArray = newArray;
2310  }
2311  }
2312 
2313  void resize(size_t newCount, bool freeMemory = false)
2314  {
2315  size_t newCapacity = m_Capacity;
2316  if(newCount > m_Capacity)
2317  {
2318  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
2319  }
2320  else if(freeMemory)
2321  {
2322  newCapacity = newCount;
2323  }
2324 
2325  if(newCapacity != m_Capacity)
2326  {
2327  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
2328  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
2329  if(elementsToCopy != 0)
2330  {
2331  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
2332  }
2333  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2334  m_Capacity = newCapacity;
2335  m_pArray = newArray;
2336  }
2337 
2338  m_Count = newCount;
2339  }
2340 
2341  void clear(bool freeMemory = false)
2342  {
2343  resize(0, freeMemory);
2344  }
2345 
2346  void insert(size_t index, const T& src)
2347  {
2348  VMA_HEAVY_ASSERT(index <= m_Count);
2349  const size_t oldCount = size();
2350  resize(oldCount + 1);
2351  if(index < oldCount)
2352  {
2353  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
2354  }
2355  m_pArray[index] = src;
2356  }
2357 
2358  void remove(size_t index)
2359  {
2360  VMA_HEAVY_ASSERT(index < m_Count);
2361  const size_t oldCount = size();
2362  if(index < oldCount - 1)
2363  {
2364  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
2365  }
2366  resize(oldCount - 1);
2367  }
2368 
2369  void push_back(const T& src)
2370  {
2371  const size_t newIndex = size();
2372  resize(newIndex + 1);
2373  m_pArray[newIndex] = src;
2374  }
2375 
2376  void pop_back()
2377  {
2378  VMA_HEAVY_ASSERT(m_Count > 0);
2379  resize(size() - 1);
2380  }
2381 
2382  void push_front(const T& src)
2383  {
2384  insert(0, src);
2385  }
2386 
2387  void pop_front()
2388  {
2389  VMA_HEAVY_ASSERT(m_Count > 0);
2390  remove(0);
2391  }
2392 
2393  typedef T* iterator;
2394 
2395  iterator begin() { return m_pArray; }
2396  iterator end() { return m_pArray + m_Count; }
2397 
2398 private:
2399  AllocatorT m_Allocator;
2400  T* m_pArray;
2401  size_t m_Count;
2402  size_t m_Capacity;
2403 };
2404 
2405 template<typename T, typename allocatorT>
2406 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
2407 {
2408  vec.insert(index, item);
2409 }
2410 
2411 template<typename T, typename allocatorT>
2412 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
2413 {
2414  vec.remove(index);
2415 }
2416 
2417 #endif // #if VMA_USE_STL_VECTOR
2418 
2419 template<typename CmpLess, typename VectorT>
2420 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
2421 {
2422  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2423  vector.data(),
2424  vector.data() + vector.size(),
2425  value,
2426  CmpLess()) - vector.data();
2427  VmaVectorInsert(vector, indexToInsert, value);
2428  return indexToInsert;
2429 }
2430 
2431 template<typename CmpLess, typename VectorT>
2432 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
2433 {
2434  CmpLess comparator;
2435  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2436  vector.begin(),
2437  vector.end(),
2438  value,
2439  comparator);
2440  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
2441  {
2442  size_t indexToRemove = it - vector.begin();
2443  VmaVectorRemove(vector, indexToRemove);
2444  return true;
2445  }
2446  return false;
2447 }
2448 
2449 template<typename CmpLess, typename VectorT>
2450 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
2451 {
2452  CmpLess comparator;
2453  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2454  vector.data(),
2455  vector.data() + vector.size(),
2456  value,
2457  comparator);
2458  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
2459  {
2460  return it - vector.begin();
2461  }
2462  else
2463  {
2464  return vector.size();
2465  }
2466 }
2467 
2469 // class VmaPoolAllocator
2470 
2471 /*
2472 Allocator for objects of type T using a list of arrays (pools) to speed up
2473 allocation. Number of elements that can be allocated is not bounded because
2474 allocator can create multiple blocks.
2475 */
2476 template<typename T>
2477 class VmaPoolAllocator
2478 {
2479 public:
2480  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
2481  ~VmaPoolAllocator();
2482  void Clear();
2483  T* Alloc();
2484  void Free(T* ptr);
2485 
2486 private:
2487  union Item
2488  {
2489  uint32_t NextFreeIndex;
2490  T Value;
2491  };
2492 
2493  struct ItemBlock
2494  {
2495  Item* pItems;
2496  uint32_t FirstFreeIndex;
2497  };
2498 
2499  const VkAllocationCallbacks* m_pAllocationCallbacks;
2500  size_t m_ItemsPerBlock;
2501  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
2502 
2503  ItemBlock& CreateNewBlock();
2504 };
2505 
2506 template<typename T>
2507 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
2508  m_pAllocationCallbacks(pAllocationCallbacks),
2509  m_ItemsPerBlock(itemsPerBlock),
2510  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
2511 {
2512  VMA_ASSERT(itemsPerBlock > 0);
2513 }
2514 
2515 template<typename T>
2516 VmaPoolAllocator<T>::~VmaPoolAllocator()
2517 {
2518  Clear();
2519 }
2520 
2521 template<typename T>
2522 void VmaPoolAllocator<T>::Clear()
2523 {
2524  for(size_t i = m_ItemBlocks.size(); i--; )
2525  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
2526  m_ItemBlocks.clear();
2527 }
2528 
2529 template<typename T>
2530 T* VmaPoolAllocator<T>::Alloc()
2531 {
2532  for(size_t i = m_ItemBlocks.size(); i--; )
2533  {
2534  ItemBlock& block = m_ItemBlocks[i];
2535  // This block has some free items: Use first one.
2536  if(block.FirstFreeIndex != UINT32_MAX)
2537  {
2538  Item* const pItem = &block.pItems[block.FirstFreeIndex];
2539  block.FirstFreeIndex = pItem->NextFreeIndex;
2540  return &pItem->Value;
2541  }
2542  }
2543 
2544  // No block has free item: Create new one and use it.
2545  ItemBlock& newBlock = CreateNewBlock();
2546  Item* const pItem = &newBlock.pItems[0];
2547  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
2548  return &pItem->Value;
2549 }
2550 
2551 template<typename T>
2552 void VmaPoolAllocator<T>::Free(T* ptr)
2553 {
2554  // Search all memory blocks to find ptr.
2555  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
2556  {
2557  ItemBlock& block = m_ItemBlocks[i];
2558 
2559  // Casting to union.
2560  Item* pItemPtr;
2561  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
2562 
2563  // Check if pItemPtr is in address range of this block.
2564  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
2565  {
2566  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
2567  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
2568  block.FirstFreeIndex = index;
2569  return;
2570  }
2571  }
2572  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
2573 }
2574 
2575 template<typename T>
2576 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
2577 {
2578  ItemBlock newBlock = {
2579  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
2580 
2581  m_ItemBlocks.push_back(newBlock);
2582 
2583  // Setup singly-linked list of all free items in this block.
2584  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
2585  newBlock.pItems[i].NextFreeIndex = i + 1;
2586  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
2587  return m_ItemBlocks.back();
2588 }
2589 
2591 // class VmaRawList, VmaList
2592 
2593 #if VMA_USE_STL_LIST
2594 
2595 #define VmaList std::list
2596 
2597 #else // #if VMA_USE_STL_LIST
2598 
2599 template<typename T>
2600 struct VmaListItem
2601 {
2602  VmaListItem* pPrev;
2603  VmaListItem* pNext;
2604  T Value;
2605 };
2606 
2607 // Doubly linked list.
2608 template<typename T>
2609 class VmaRawList
2610 {
2611 public:
2612  typedef VmaListItem<T> ItemType;
2613 
2614  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
2615  ~VmaRawList();
2616  void Clear();
2617 
2618  size_t GetCount() const { return m_Count; }
2619  bool IsEmpty() const { return m_Count == 0; }
2620 
2621  ItemType* Front() { return m_pFront; }
2622  const ItemType* Front() const { return m_pFront; }
2623  ItemType* Back() { return m_pBack; }
2624  const ItemType* Back() const { return m_pBack; }
2625 
2626  ItemType* PushBack();
2627  ItemType* PushFront();
2628  ItemType* PushBack(const T& value);
2629  ItemType* PushFront(const T& value);
2630  void PopBack();
2631  void PopFront();
2632 
2633  // Item can be null - it means PushBack.
2634  ItemType* InsertBefore(ItemType* pItem);
2635  // Item can be null - it means PushFront.
2636  ItemType* InsertAfter(ItemType* pItem);
2637 
2638  ItemType* InsertBefore(ItemType* pItem, const T& value);
2639  ItemType* InsertAfter(ItemType* pItem, const T& value);
2640 
2641  void Remove(ItemType* pItem);
2642 
2643 private:
2644  const VkAllocationCallbacks* const m_pAllocationCallbacks;
2645  VmaPoolAllocator<ItemType> m_ItemAllocator;
2646  ItemType* m_pFront;
2647  ItemType* m_pBack;
2648  size_t m_Count;
2649 
2650  // Declared not defined, to block copy constructor and assignment operator.
2651  VmaRawList(const VmaRawList<T>& src);
2652  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
2653 };
2654 
2655 template<typename T>
2656 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
2657  m_pAllocationCallbacks(pAllocationCallbacks),
2658  m_ItemAllocator(pAllocationCallbacks, 128),
2659  m_pFront(VMA_NULL),
2660  m_pBack(VMA_NULL),
2661  m_Count(0)
2662 {
2663 }
2664 
2665 template<typename T>
2666 VmaRawList<T>::~VmaRawList()
2667 {
2668  // Intentionally not calling Clear, because that would be unnecessary
2669  // computations to return all items to m_ItemAllocator as free.
2670 }
2671 
2672 template<typename T>
2673 void VmaRawList<T>::Clear()
2674 {
2675  if(IsEmpty() == false)
2676  {
2677  ItemType* pItem = m_pBack;
2678  while(pItem != VMA_NULL)
2679  {
2680  ItemType* const pPrevItem = pItem->pPrev;
2681  m_ItemAllocator.Free(pItem);
2682  pItem = pPrevItem;
2683  }
2684  m_pFront = VMA_NULL;
2685  m_pBack = VMA_NULL;
2686  m_Count = 0;
2687  }
2688 }
2689 
2690 template<typename T>
2691 VmaListItem<T>* VmaRawList<T>::PushBack()
2692 {
2693  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2694  pNewItem->pNext = VMA_NULL;
2695  if(IsEmpty())
2696  {
2697  pNewItem->pPrev = VMA_NULL;
2698  m_pFront = pNewItem;
2699  m_pBack = pNewItem;
2700  m_Count = 1;
2701  }
2702  else
2703  {
2704  pNewItem->pPrev = m_pBack;
2705  m_pBack->pNext = pNewItem;
2706  m_pBack = pNewItem;
2707  ++m_Count;
2708  }
2709  return pNewItem;
2710 }
2711 
2712 template<typename T>
2713 VmaListItem<T>* VmaRawList<T>::PushFront()
2714 {
2715  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2716  pNewItem->pPrev = VMA_NULL;
2717  if(IsEmpty())
2718  {
2719  pNewItem->pNext = VMA_NULL;
2720  m_pFront = pNewItem;
2721  m_pBack = pNewItem;
2722  m_Count = 1;
2723  }
2724  else
2725  {
2726  pNewItem->pNext = m_pFront;
2727  m_pFront->pPrev = pNewItem;
2728  m_pFront = pNewItem;
2729  ++m_Count;
2730  }
2731  return pNewItem;
2732 }
2733 
2734 template<typename T>
2735 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
2736 {
2737  ItemType* const pNewItem = PushBack();
2738  pNewItem->Value = value;
2739  return pNewItem;
2740 }
2741 
2742 template<typename T>
2743 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
2744 {
2745  ItemType* const pNewItem = PushFront();
2746  pNewItem->Value = value;
2747  return pNewItem;
2748 }
2749 
2750 template<typename T>
2751 void VmaRawList<T>::PopBack()
2752 {
2753  VMA_HEAVY_ASSERT(m_Count > 0);
2754  ItemType* const pBackItem = m_pBack;
2755  ItemType* const pPrevItem = pBackItem->pPrev;
2756  if(pPrevItem != VMA_NULL)
2757  {
2758  pPrevItem->pNext = VMA_NULL;
2759  }
2760  m_pBack = pPrevItem;
2761  m_ItemAllocator.Free(pBackItem);
2762  --m_Count;
2763 }
2764 
2765 template<typename T>
2766 void VmaRawList<T>::PopFront()
2767 {
2768  VMA_HEAVY_ASSERT(m_Count > 0);
2769  ItemType* const pFrontItem = m_pFront;
2770  ItemType* const pNextItem = pFrontItem->pNext;
2771  if(pNextItem != VMA_NULL)
2772  {
2773  pNextItem->pPrev = VMA_NULL;
2774  }
2775  m_pFront = pNextItem;
2776  m_ItemAllocator.Free(pFrontItem);
2777  --m_Count;
2778 }
2779 
2780 template<typename T>
2781 void VmaRawList<T>::Remove(ItemType* pItem)
2782 {
2783  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
2784  VMA_HEAVY_ASSERT(m_Count > 0);
2785 
2786  if(pItem->pPrev != VMA_NULL)
2787  {
2788  pItem->pPrev->pNext = pItem->pNext;
2789  }
2790  else
2791  {
2792  VMA_HEAVY_ASSERT(m_pFront == pItem);
2793  m_pFront = pItem->pNext;
2794  }
2795 
2796  if(pItem->pNext != VMA_NULL)
2797  {
2798  pItem->pNext->pPrev = pItem->pPrev;
2799  }
2800  else
2801  {
2802  VMA_HEAVY_ASSERT(m_pBack == pItem);
2803  m_pBack = pItem->pPrev;
2804  }
2805 
2806  m_ItemAllocator.Free(pItem);
2807  --m_Count;
2808 }
2809 
2810 template<typename T>
2811 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
2812 {
2813  if(pItem != VMA_NULL)
2814  {
2815  ItemType* const prevItem = pItem->pPrev;
2816  ItemType* const newItem = m_ItemAllocator.Alloc();
2817  newItem->pPrev = prevItem;
2818  newItem->pNext = pItem;
2819  pItem->pPrev = newItem;
2820  if(prevItem != VMA_NULL)
2821  {
2822  prevItem->pNext = newItem;
2823  }
2824  else
2825  {
2826  VMA_HEAVY_ASSERT(m_pFront == pItem);
2827  m_pFront = newItem;
2828  }
2829  ++m_Count;
2830  return newItem;
2831  }
2832  else
2833  return PushBack();
2834 }
2835 
2836 template<typename T>
2837 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
2838 {
2839  if(pItem != VMA_NULL)
2840  {
2841  ItemType* const nextItem = pItem->pNext;
2842  ItemType* const newItem = m_ItemAllocator.Alloc();
2843  newItem->pNext = nextItem;
2844  newItem->pPrev = pItem;
2845  pItem->pNext = newItem;
2846  if(nextItem != VMA_NULL)
2847  {
2848  nextItem->pPrev = newItem;
2849  }
2850  else
2851  {
2852  VMA_HEAVY_ASSERT(m_pBack == pItem);
2853  m_pBack = newItem;
2854  }
2855  ++m_Count;
2856  return newItem;
2857  }
2858  else
2859  return PushFront();
2860 }
2861 
2862 template<typename T>
2863 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
2864 {
2865  ItemType* const newItem = InsertBefore(pItem);
2866  newItem->Value = value;
2867  return newItem;
2868 }
2869 
2870 template<typename T>
2871 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
2872 {
2873  ItemType* const newItem = InsertAfter(pItem);
2874  newItem->Value = value;
2875  return newItem;
2876 }
2877 
2878 template<typename T, typename AllocatorT>
2879 class VmaList
2880 {
2881 public:
2882  class iterator
2883  {
2884  public:
2885  iterator() :
2886  m_pList(VMA_NULL),
2887  m_pItem(VMA_NULL)
2888  {
2889  }
2890 
2891  T& operator*() const
2892  {
2893  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2894  return m_pItem->Value;
2895  }
2896  T* operator->() const
2897  {
2898  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2899  return &m_pItem->Value;
2900  }
2901 
2902  iterator& operator++()
2903  {
2904  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2905  m_pItem = m_pItem->pNext;
2906  return *this;
2907  }
2908  iterator& operator--()
2909  {
2910  if(m_pItem != VMA_NULL)
2911  {
2912  m_pItem = m_pItem->pPrev;
2913  }
2914  else
2915  {
2916  VMA_HEAVY_ASSERT(!m_pList.IsEmpty());
2917  m_pItem = m_pList->Back();
2918  }
2919  return *this;
2920  }
2921 
2922  iterator operator++(int)
2923  {
2924  iterator result = *this;
2925  ++*this;
2926  return result;
2927  }
2928  iterator operator--(int)
2929  {
2930  iterator result = *this;
2931  --*this;
2932  return result;
2933  }
2934 
2935  bool operator==(const iterator& rhs) const
2936  {
2937  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2938  return m_pItem == rhs.m_pItem;
2939  }
2940  bool operator!=(const iterator& rhs) const
2941  {
2942  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2943  return m_pItem != rhs.m_pItem;
2944  }
2945 
2946  private:
2947  VmaRawList<T>* m_pList;
2948  VmaListItem<T>* m_pItem;
2949 
2950  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
2951  m_pList(pList),
2952  m_pItem(pItem)
2953  {
2954  }
2955 
2956  friend class VmaList<T, AllocatorT>;
2957  };
2958 
2959  class const_iterator
2960  {
2961  public:
2962  const_iterator() :
2963  m_pList(VMA_NULL),
2964  m_pItem(VMA_NULL)
2965  {
2966  }
2967 
2968  const_iterator(const iterator& src) :
2969  m_pList(src.m_pList),
2970  m_pItem(src.m_pItem)
2971  {
2972  }
2973 
2974  const T& operator*() const
2975  {
2976  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2977  return m_pItem->Value;
2978  }
2979  const T* operator->() const
2980  {
2981  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2982  return &m_pItem->Value;
2983  }
2984 
2985  const_iterator& operator++()
2986  {
2987  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2988  m_pItem = m_pItem->pNext;
2989  return *this;
2990  }
2991  const_iterator& operator--()
2992  {
2993  if(m_pItem != VMA_NULL)
2994  {
2995  m_pItem = m_pItem->pPrev;
2996  }
2997  else
2998  {
2999  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3000  m_pItem = m_pList->Back();
3001  }
3002  return *this;
3003  }
3004 
3005  const_iterator operator++(int)
3006  {
3007  const_iterator result = *this;
3008  ++*this;
3009  return result;
3010  }
3011  const_iterator operator--(int)
3012  {
3013  const_iterator result = *this;
3014  --*this;
3015  return result;
3016  }
3017 
3018  bool operator==(const const_iterator& rhs) const
3019  {
3020  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3021  return m_pItem == rhs.m_pItem;
3022  }
3023  bool operator!=(const const_iterator& rhs) const
3024  {
3025  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3026  return m_pItem != rhs.m_pItem;
3027  }
3028 
3029  private:
3030  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
3031  m_pList(pList),
3032  m_pItem(pItem)
3033  {
3034  }
3035 
3036  const VmaRawList<T>* m_pList;
3037  const VmaListItem<T>* m_pItem;
3038 
3039  friend class VmaList<T, AllocatorT>;
3040  };
3041 
3042  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
3043 
3044  bool empty() const { return m_RawList.IsEmpty(); }
3045  size_t size() const { return m_RawList.GetCount(); }
3046 
3047  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
3048  iterator end() { return iterator(&m_RawList, VMA_NULL); }
3049 
3050  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
3051  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
3052 
3053  void clear() { m_RawList.Clear(); }
3054  void push_back(const T& value) { m_RawList.PushBack(value); }
3055  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
3056  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
3057 
3058 private:
3059  VmaRawList<T> m_RawList;
3060 };
3061 
3062 #endif // #if VMA_USE_STL_LIST
3063 
3065 // class VmaMap
3066 
3067 // Unused in this version.
3068 #if 0
3069 
3070 #if VMA_USE_STL_UNORDERED_MAP
3071 
3072 #define VmaPair std::pair
3073 
3074 #define VMA_MAP_TYPE(KeyT, ValueT) \
3075  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
3076 
3077 #else // #if VMA_USE_STL_UNORDERED_MAP
3078 
3079 template<typename T1, typename T2>
3080 struct VmaPair
3081 {
3082  T1 first;
3083  T2 second;
3084 
3085  VmaPair() : first(), second() { }
3086  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
3087 };
3088 
3089 /* Class compatible with subset of interface of std::unordered_map.
3090 KeyT, ValueT must be POD because they will be stored in VmaVector.
3091 */
3092 template<typename KeyT, typename ValueT>
3093 class VmaMap
3094 {
3095 public:
3096  typedef VmaPair<KeyT, ValueT> PairType;
3097  typedef PairType* iterator;
3098 
3099  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
3100 
3101  iterator begin() { return m_Vector.begin(); }
3102  iterator end() { return m_Vector.end(); }
3103 
3104  void insert(const PairType& pair);
3105  iterator find(const KeyT& key);
3106  void erase(iterator it);
3107 
3108 private:
3109  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
3110 };
3111 
3112 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
3113 
3114 template<typename FirstT, typename SecondT>
3115 struct VmaPairFirstLess
3116 {
3117  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
3118  {
3119  return lhs.first < rhs.first;
3120  }
3121  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
3122  {
3123  return lhs.first < rhsFirst;
3124  }
3125 };
3126 
3127 template<typename KeyT, typename ValueT>
3128 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
3129 {
3130  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3131  m_Vector.data(),
3132  m_Vector.data() + m_Vector.size(),
3133  pair,
3134  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
3135  VmaVectorInsert(m_Vector, indexToInsert, pair);
3136 }
3137 
3138 template<typename KeyT, typename ValueT>
3139 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
3140 {
3141  PairType* it = VmaBinaryFindFirstNotLess(
3142  m_Vector.data(),
3143  m_Vector.data() + m_Vector.size(),
3144  key,
3145  VmaPairFirstLess<KeyT, ValueT>());
3146  if((it != m_Vector.end()) && (it->first == key))
3147  {
3148  return it;
3149  }
3150  else
3151  {
3152  return m_Vector.end();
3153  }
3154 }
3155 
3156 template<typename KeyT, typename ValueT>
3157 void VmaMap<KeyT, ValueT>::erase(iterator it)
3158 {
3159  VmaVectorRemove(m_Vector, it - m_Vector.begin());
3160 }
3161 
3162 #endif // #if VMA_USE_STL_UNORDERED_MAP
3163 
3164 #endif // #if 0
3165 
3167 
3168 class VmaDeviceMemoryBlock;
3169 
3170 struct VmaAllocation_T
3171 {
3172 private:
3173  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
3174 
3175  enum FLAGS
3176  {
3177  FLAG_USER_DATA_STRING = 0x01,
3178  };
3179 
3180 public:
3181  enum ALLOCATION_TYPE
3182  {
3183  ALLOCATION_TYPE_NONE,
3184  ALLOCATION_TYPE_BLOCK,
3185  ALLOCATION_TYPE_DEDICATED,
3186  };
3187 
3188  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
3189  m_Alignment(1),
3190  m_Size(0),
3191  m_pUserData(VMA_NULL),
3192  m_LastUseFrameIndex(currentFrameIndex),
3193  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
3194  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
3195  m_MapCount(0),
3196  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
3197  {
3198  }
3199 
3200  ~VmaAllocation_T()
3201  {
3202  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
3203 
3204  // Check if owned string was freed.
3205  VMA_ASSERT(m_pUserData == VMA_NULL);
3206  }
3207 
3208  void InitBlockAllocation(
3209  VmaPool hPool,
3210  VmaDeviceMemoryBlock* block,
3211  VkDeviceSize offset,
3212  VkDeviceSize alignment,
3213  VkDeviceSize size,
3214  VmaSuballocationType suballocationType,
3215  bool mapped,
3216  bool canBecomeLost)
3217  {
3218  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3219  VMA_ASSERT(block != VMA_NULL);
3220  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3221  m_Alignment = alignment;
3222  m_Size = size;
3223  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3224  m_SuballocationType = (uint8_t)suballocationType;
3225  m_BlockAllocation.m_hPool = hPool;
3226  m_BlockAllocation.m_Block = block;
3227  m_BlockAllocation.m_Offset = offset;
3228  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
3229  }
3230 
3231  void InitLost()
3232  {
3233  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3234  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
3235  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3236  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
3237  m_BlockAllocation.m_Block = VMA_NULL;
3238  m_BlockAllocation.m_Offset = 0;
3239  m_BlockAllocation.m_CanBecomeLost = true;
3240  }
3241 
3242  void ChangeBlockAllocation(
3243  VmaDeviceMemoryBlock* block,
3244  VkDeviceSize offset)
3245  {
3246  VMA_ASSERT(block != VMA_NULL);
3247  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
3248  m_BlockAllocation.m_Block = block;
3249  m_BlockAllocation.m_Offset = offset;
3250  }
3251 
3252  // pMappedData not null means allocation is created with MAPPED flag.
3253  void InitDedicatedAllocation(
3254  uint32_t memoryTypeIndex,
3255  VkDeviceMemory hMemory,
3256  VmaSuballocationType suballocationType,
3257  void* pMappedData,
3258  VkDeviceSize size)
3259  {
3260  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3261  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
3262  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
3263  m_Alignment = 0;
3264  m_Size = size;
3265  m_SuballocationType = (uint8_t)suballocationType;
3266  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3267  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
3268  m_DedicatedAllocation.m_hMemory = hMemory;
3269  m_DedicatedAllocation.m_pMappedData = pMappedData;
3270  }
3271 
3272  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
3273  VkDeviceSize GetAlignment() const { return m_Alignment; }
3274  VkDeviceSize GetSize() const { return m_Size; }
3275  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
3276  void* GetUserData() const { return m_pUserData; }
3277  void SetUserData(VmaAllocator hAllocator, void* pUserData);
3278  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
3279 
3280  VmaDeviceMemoryBlock* GetBlock() const
3281  {
3282  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
3283  return m_BlockAllocation.m_Block;
3284  }
3285  VkDeviceSize GetOffset() const;
3286  VkDeviceMemory GetMemory() const;
3287  uint32_t GetMemoryTypeIndex() const;
3288  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
3289  void* GetMappedData() const;
3290  bool CanBecomeLost() const;
3291  VmaPool GetPool() const;
3292 
3293  uint32_t GetLastUseFrameIndex() const
3294  {
3295  return m_LastUseFrameIndex.load();
3296  }
3297  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
3298  {
3299  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
3300  }
3301  /*
3302  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
3303  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
3304  - Else, returns false.
3305 
3306  If hAllocation is already lost, assert - you should not call it then.
3307  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
3308  */
3309  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3310 
3311  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
3312  {
3313  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
3314  outInfo.blockCount = 1;
3315  outInfo.allocationCount = 1;
3316  outInfo.unusedRangeCount = 0;
3317  outInfo.usedBytes = m_Size;
3318  outInfo.unusedBytes = 0;
3319  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
3320  outInfo.unusedRangeSizeMin = UINT64_MAX;
3321  outInfo.unusedRangeSizeMax = 0;
3322  }
3323 
3324  void BlockAllocMap();
3325  void BlockAllocUnmap();
3326  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
3327  void DedicatedAllocUnmap(VmaAllocator hAllocator);
3328 
3329 private:
3330  VkDeviceSize m_Alignment;
3331  VkDeviceSize m_Size;
3332  void* m_pUserData;
3333  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
3334  uint8_t m_Type; // ALLOCATION_TYPE
3335  uint8_t m_SuballocationType; // VmaSuballocationType
3336  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
3337  // Bits with mask 0x7F, used only when ALLOCATION_TYPE_DEDICATED, are reference counter for vmaMapMemory()/vmaUnmapMemory().
3338  uint8_t m_MapCount;
3339  uint8_t m_Flags; // enum FLAGS
3340 
3341  // Allocation out of VmaDeviceMemoryBlock.
3342  struct BlockAllocation
3343  {
3344  VmaPool m_hPool; // Null if belongs to general memory.
3345  VmaDeviceMemoryBlock* m_Block;
3346  VkDeviceSize m_Offset;
3347  bool m_CanBecomeLost;
3348  };
3349 
3350  // Allocation for an object that has its own private VkDeviceMemory.
3351  struct DedicatedAllocation
3352  {
3353  uint32_t m_MemoryTypeIndex;
3354  VkDeviceMemory m_hMemory;
3355  void* m_pMappedData; // Not null means memory is mapped.
3356  };
3357 
3358  union
3359  {
3360  // Allocation out of VmaDeviceMemoryBlock.
3361  BlockAllocation m_BlockAllocation;
3362  // Allocation for an object that has its own private VkDeviceMemory.
3363  DedicatedAllocation m_DedicatedAllocation;
3364  };
3365 
3366  void FreeUserDataString(VmaAllocator hAllocator);
3367 };
3368 
3369 /*
3370 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
3371 allocated memory block or free.
3372 */
3373 struct VmaSuballocation
3374 {
3375  VkDeviceSize offset;
3376  VkDeviceSize size;
3377  VmaAllocation hAllocation;
3378  VmaSuballocationType type;
3379 };
3380 
3381 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
3382 
3383 // Cost of one additional allocation lost, as equivalent in bytes.
3384 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
3385 
3386 /*
3387 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
3388 
3389 If canMakeOtherLost was false:
3390 - item points to a FREE suballocation.
3391 - itemsToMakeLostCount is 0.
3392 
3393 If canMakeOtherLost was true:
3394 - item points to first of sequence of suballocations, which are either FREE,
3395  or point to VmaAllocations that can become lost.
3396 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
3397  the requested allocation to succeed.
3398 */
3399 struct VmaAllocationRequest
3400 {
3401  VkDeviceSize offset;
3402  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
3403  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
3404  VmaSuballocationList::iterator item;
3405  size_t itemsToMakeLostCount;
3406 
3407  VkDeviceSize CalcCost() const
3408  {
3409  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
3410  }
3411 };
3412 
3413 /*
3414 Data structure used for bookkeeping of allocations and unused ranges of memory
3415 in a single VkDeviceMemory block.
3416 */
3417 class VmaBlockMetadata
3418 {
3419 public:
3420  VmaBlockMetadata(VmaAllocator hAllocator);
3421  ~VmaBlockMetadata();
3422  void Init(VkDeviceSize size);
3423 
3424  // Validates all data structures inside this object. If not valid, returns false.
3425  bool Validate() const;
3426  VkDeviceSize GetSize() const { return m_Size; }
3427  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
3428  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
3429  VkDeviceSize GetUnusedRangeSizeMax() const;
3430  // Returns true if this block is empty - contains only single free suballocation.
3431  bool IsEmpty() const;
3432 
3433  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
3434  void AddPoolStats(VmaPoolStats& inoutStats) const;
3435 
3436 #if VMA_STATS_STRING_ENABLED
3437  void PrintDetailedMap(class VmaJsonWriter& json) const;
3438 #endif
3439 
3440  // Creates trivial request for case when block is empty.
3441  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
3442 
3443  // Tries to find a place for suballocation with given parameters inside this block.
3444  // If succeeded, fills pAllocationRequest and returns true.
3445  // If failed, returns false.
3446  bool CreateAllocationRequest(
3447  uint32_t currentFrameIndex,
3448  uint32_t frameInUseCount,
3449  VkDeviceSize bufferImageGranularity,
3450  VkDeviceSize allocSize,
3451  VkDeviceSize allocAlignment,
3452  VmaSuballocationType allocType,
3453  bool canMakeOtherLost,
3454  VmaAllocationRequest* pAllocationRequest);
3455 
3456  bool MakeRequestedAllocationsLost(
3457  uint32_t currentFrameIndex,
3458  uint32_t frameInUseCount,
3459  VmaAllocationRequest* pAllocationRequest);
3460 
3461  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3462 
3463  // Makes actual allocation based on request. Request must already be checked and valid.
3464  void Alloc(
3465  const VmaAllocationRequest& request,
3466  VmaSuballocationType type,
3467  VkDeviceSize allocSize,
3468  VmaAllocation hAllocation);
3469 
3470  // Frees suballocation assigned to given memory region.
3471  void Free(const VmaAllocation allocation);
3472 
3473 private:
3474  VkDeviceSize m_Size;
3475  uint32_t m_FreeCount;
3476  VkDeviceSize m_SumFreeSize;
3477  VmaSuballocationList m_Suballocations;
3478  // Suballocations that are free and have size greater than certain threshold.
3479  // Sorted by size, ascending.
3480  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
3481 
3482  bool ValidateFreeSuballocationList() const;
3483 
3484  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
3485  // If yes, fills pOffset and returns true. If no, returns false.
3486  bool CheckAllocation(
3487  uint32_t currentFrameIndex,
3488  uint32_t frameInUseCount,
3489  VkDeviceSize bufferImageGranularity,
3490  VkDeviceSize allocSize,
3491  VkDeviceSize allocAlignment,
3492  VmaSuballocationType allocType,
3493  VmaSuballocationList::const_iterator suballocItem,
3494  bool canMakeOtherLost,
3495  VkDeviceSize* pOffset,
3496  size_t* itemsToMakeLostCount,
3497  VkDeviceSize* pSumFreeSize,
3498  VkDeviceSize* pSumItemSize) const;
3499  // Given free suballocation, it merges it with following one, which must also be free.
3500  void MergeFreeWithNext(VmaSuballocationList::iterator item);
3501  // Releases given suballocation, making it free.
3502  // Merges it with adjacent free suballocations if applicable.
3503  // Returns iterator to new free suballocation at this place.
3504  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
3505  // Given free suballocation, it inserts it into sorted list of
3506  // m_FreeSuballocationsBySize if it's suitable.
3507  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
3508  // Given free suballocation, it removes it from sorted list of
3509  // m_FreeSuballocationsBySize if it's suitable.
3510  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
3511 };
3512 
3513 // Helper class that represents mapped memory. Synchronized internally.
3514 class VmaDeviceMemoryMapping
3515 {
3516 public:
3517  VmaDeviceMemoryMapping();
3518  ~VmaDeviceMemoryMapping();
3519 
3520  void* GetMappedData() const { return m_pMappedData; }
3521 
3522  // ppData can be null.
3523  VkResult Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, void **ppData);
3524  void Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory);
3525 
3526 private:
3527  VMA_MUTEX m_Mutex;
3528  uint32_t m_MapCount;
3529  void* m_pMappedData;
3530 };
3531 
3532 /*
3533 Represents a single block of device memory (`VkDeviceMemory`) with all the
3534 data about its regions (aka suballocations, `VmaAllocation`), assigned and free.
3535 
3536 Thread-safety: This class must be externally synchronized.
3537 */
3538 class VmaDeviceMemoryBlock
3539 {
3540 public:
3541  uint32_t m_MemoryTypeIndex;
3542  VkDeviceMemory m_hMemory;
3543  VmaDeviceMemoryMapping m_Mapping;
3544  VmaBlockMetadata m_Metadata;
3545 
3546  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
3547 
3548  ~VmaDeviceMemoryBlock()
3549  {
3550  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
3551  }
3552 
3553  // Always call after construction.
3554  void Init(
3555  uint32_t newMemoryTypeIndex,
3556  VkDeviceMemory newMemory,
3557  VkDeviceSize newSize);
3558  // Always call before destruction.
3559  void Destroy(VmaAllocator allocator);
3560 
3561  // Validates all data structures inside this object. If not valid, returns false.
3562  bool Validate() const;
3563 
3564  // ppData can be null.
3565  VkResult Map(VmaAllocator hAllocator, void** ppData);
3566  void Unmap(VmaAllocator hAllocator);
3567 };
3568 
3569 struct VmaPointerLess
3570 {
3571  bool operator()(const void* lhs, const void* rhs) const
3572  {
3573  return lhs < rhs;
3574  }
3575 };
3576 
3577 class VmaDefragmentator;
3578 
3579 /*
3580 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
3581 Vulkan memory type.
3582 
3583 Synchronized internally with a mutex.
3584 */
3585 struct VmaBlockVector
3586 {
3587  VmaBlockVector(
3588  VmaAllocator hAllocator,
3589  uint32_t memoryTypeIndex,
3590  VkDeviceSize preferredBlockSize,
3591  size_t minBlockCount,
3592  size_t maxBlockCount,
3593  VkDeviceSize bufferImageGranularity,
3594  uint32_t frameInUseCount,
3595  bool isCustomPool);
3596  ~VmaBlockVector();
3597 
3598  VkResult CreateMinBlocks();
3599 
3600  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
3601  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
3602  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
3603  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
3604 
3605  void GetPoolStats(VmaPoolStats* pStats);
3606 
3607  bool IsEmpty() const { return m_Blocks.empty(); }
3608 
3609  VkResult Allocate(
3610  VmaPool hCurrentPool,
3611  uint32_t currentFrameIndex,
3612  const VkMemoryRequirements& vkMemReq,
3613  const VmaAllocationCreateInfo& createInfo,
3614  VmaSuballocationType suballocType,
3615  VmaAllocation* pAllocation);
3616 
3617  void Free(
3618  VmaAllocation hAllocation);
3619 
3620  // Adds statistics of this BlockVector to pStats.
3621  void AddStats(VmaStats* pStats);
3622 
3623 #if VMA_STATS_STRING_ENABLED
3624  void PrintDetailedMap(class VmaJsonWriter& json);
3625 #endif
3626 
3627  void MakePoolAllocationsLost(
3628  uint32_t currentFrameIndex,
3629  size_t* pLostAllocationCount);
3630 
3631  VmaDefragmentator* EnsureDefragmentator(
3632  VmaAllocator hAllocator,
3633  uint32_t currentFrameIndex);
3634 
3635  VkResult Defragment(
3636  VmaDefragmentationStats* pDefragmentationStats,
3637  VkDeviceSize& maxBytesToMove,
3638  uint32_t& maxAllocationsToMove);
3639 
3640  void DestroyDefragmentator();
3641 
3642 private:
3643  friend class VmaDefragmentator;
3644 
3645  const VmaAllocator m_hAllocator;
3646  const uint32_t m_MemoryTypeIndex;
3647  const VkDeviceSize m_PreferredBlockSize;
3648  const size_t m_MinBlockCount;
3649  const size_t m_MaxBlockCount;
3650  const VkDeviceSize m_BufferImageGranularity;
3651  const uint32_t m_FrameInUseCount;
3652  const bool m_IsCustomPool;
3653  VMA_MUTEX m_Mutex;
3654  // Incrementally sorted by sumFreeSize, ascending.
3655  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
3656  /* There can be at most one allocation that is completely empty - a
3657  hysteresis to avoid pessimistic case of alternating creation and destruction
3658  of a VkDeviceMemory. */
3659  bool m_HasEmptyBlock;
3660  VmaDefragmentator* m_pDefragmentator;
3661 
3662  size_t CalcMaxBlockSize() const;
3663 
3664  // Finds and removes given block from vector.
3665  void Remove(VmaDeviceMemoryBlock* pBlock);
3666 
3667  // Performs single step in sorting m_Blocks. They may not be fully sorted
3668  // after this call.
3669  void IncrementallySortBlocks();
3670 
3671  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
3672 };
3673 
3674 struct VmaPool_T
3675 {
3676 public:
3677  VmaBlockVector m_BlockVector;
3678 
3679  // Takes ownership.
3680  VmaPool_T(
3681  VmaAllocator hAllocator,
3682  const VmaPoolCreateInfo& createInfo);
3683  ~VmaPool_T();
3684 
3685  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
3686 
3687 #if VMA_STATS_STRING_ENABLED
3688  //void PrintDetailedMap(class VmaStringBuilder& sb);
3689 #endif
3690 };
3691 
3692 class VmaDefragmentator
3693 {
3694  const VmaAllocator m_hAllocator;
3695  VmaBlockVector* const m_pBlockVector;
3696  uint32_t m_CurrentFrameIndex;
3697  VkDeviceSize m_BytesMoved;
3698  uint32_t m_AllocationsMoved;
3699 
3700  struct AllocationInfo
3701  {
3702  VmaAllocation m_hAllocation;
3703  VkBool32* m_pChanged;
3704 
3705  AllocationInfo() :
3706  m_hAllocation(VK_NULL_HANDLE),
3707  m_pChanged(VMA_NULL)
3708  {
3709  }
3710  };
3711 
3712  struct AllocationInfoSizeGreater
3713  {
3714  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
3715  {
3716  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
3717  }
3718  };
3719 
3720  // Used between AddAllocation and Defragment.
3721  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3722 
3723  struct BlockInfo
3724  {
3725  VmaDeviceMemoryBlock* m_pBlock;
3726  bool m_HasNonMovableAllocations;
3727  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3728 
3729  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
3730  m_pBlock(VMA_NULL),
3731  m_HasNonMovableAllocations(true),
3732  m_Allocations(pAllocationCallbacks),
3733  m_pMappedDataForDefragmentation(VMA_NULL)
3734  {
3735  }
3736 
3737  void CalcHasNonMovableAllocations()
3738  {
3739  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
3740  const size_t defragmentAllocCount = m_Allocations.size();
3741  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
3742  }
3743 
3744  void SortAllocationsBySizeDescecnding()
3745  {
3746  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
3747  }
3748 
3749  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
3750  void Unmap(VmaAllocator hAllocator);
3751 
3752  private:
3753  // Not null if mapped for defragmentation only, not originally mapped.
3754  void* m_pMappedDataForDefragmentation;
3755  };
3756 
3757  struct BlockPointerLess
3758  {
3759  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
3760  {
3761  return pLhsBlockInfo->m_pBlock < pRhsBlock;
3762  }
3763  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3764  {
3765  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
3766  }
3767  };
3768 
3769  // 1. Blocks with some non-movable allocations go first.
3770  // 2. Blocks with smaller sumFreeSize go first.
3771  struct BlockInfoCompareMoveDestination
3772  {
3773  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3774  {
3775  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
3776  {
3777  return true;
3778  }
3779  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
3780  {
3781  return false;
3782  }
3783  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
3784  {
3785  return true;
3786  }
3787  return false;
3788  }
3789  };
3790 
3791  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
3792  BlockInfoVector m_Blocks;
3793 
3794  VkResult DefragmentRound(
3795  VkDeviceSize maxBytesToMove,
3796  uint32_t maxAllocationsToMove);
3797 
3798  static bool MoveMakesSense(
3799  size_t dstBlockIndex, VkDeviceSize dstOffset,
3800  size_t srcBlockIndex, VkDeviceSize srcOffset);
3801 
3802 public:
3803  VmaDefragmentator(
3804  VmaAllocator hAllocator,
3805  VmaBlockVector* pBlockVector,
3806  uint32_t currentFrameIndex);
3807 
3808  ~VmaDefragmentator();
3809 
3810  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
3811  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
3812 
3813  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
3814 
3815  VkResult Defragment(
3816  VkDeviceSize maxBytesToMove,
3817  uint32_t maxAllocationsToMove);
3818 };
3819 
3820 // Main allocator object.
3821 struct VmaAllocator_T
3822 {
3823  bool m_UseMutex;
3824  bool m_UseKhrDedicatedAllocation;
3825  VkDevice m_hDevice;
3826  bool m_AllocationCallbacksSpecified;
3827  VkAllocationCallbacks m_AllocationCallbacks;
3828  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
3829 
3830  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
3831  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
3832  VMA_MUTEX m_HeapSizeLimitMutex;
3833 
3834  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
3835  VkPhysicalDeviceMemoryProperties m_MemProps;
3836 
3837  // Default pools.
3838  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
3839 
3840  // Each vector is sorted by memory (handle value).
3841  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
3842  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
3843  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
3844 
3845  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
3846  ~VmaAllocator_T();
3847 
3848  const VkAllocationCallbacks* GetAllocationCallbacks() const
3849  {
3850  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
3851  }
3852  const VmaVulkanFunctions& GetVulkanFunctions() const
3853  {
3854  return m_VulkanFunctions;
3855  }
3856 
3857  VkDeviceSize GetBufferImageGranularity() const
3858  {
3859  return VMA_MAX(
3860  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
3861  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
3862  }
3863 
3864  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
3865  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
3866 
3867  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
3868  {
3869  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
3870  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
3871  }
3872 
3873  void GetBufferMemoryRequirements(
3874  VkBuffer hBuffer,
3875  VkMemoryRequirements& memReq,
3876  bool& requiresDedicatedAllocation,
3877  bool& prefersDedicatedAllocation) const;
3878  void GetImageMemoryRequirements(
3879  VkImage hImage,
3880  VkMemoryRequirements& memReq,
3881  bool& requiresDedicatedAllocation,
3882  bool& prefersDedicatedAllocation) const;
3883 
3884  // Main allocation function.
3885  VkResult AllocateMemory(
3886  const VkMemoryRequirements& vkMemReq,
3887  bool requiresDedicatedAllocation,
3888  bool prefersDedicatedAllocation,
3889  VkBuffer dedicatedBuffer,
3890  VkImage dedicatedImage,
3891  const VmaAllocationCreateInfo& createInfo,
3892  VmaSuballocationType suballocType,
3893  VmaAllocation* pAllocation);
3894 
3895  // Main deallocation function.
3896  void FreeMemory(const VmaAllocation allocation);
3897 
3898  void CalculateStats(VmaStats* pStats);
3899 
3900 #if VMA_STATS_STRING_ENABLED
3901  void PrintDetailedMap(class VmaJsonWriter& json);
3902 #endif
3903 
3904  VkResult Defragment(
3905  VmaAllocation* pAllocations,
3906  size_t allocationCount,
3907  VkBool32* pAllocationsChanged,
3908  const VmaDefragmentationInfo* pDefragmentationInfo,
3909  VmaDefragmentationStats* pDefragmentationStats);
3910 
3911  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
3912 
3913  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
3914  void DestroyPool(VmaPool pool);
3915  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
3916 
3917  void SetCurrentFrameIndex(uint32_t frameIndex);
3918 
3919  void MakePoolAllocationsLost(
3920  VmaPool hPool,
3921  size_t* pLostAllocationCount);
3922 
3923  void CreateLostAllocation(VmaAllocation* pAllocation);
3924 
3925  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
3926  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
3927 
3928  VkResult Map(VmaAllocation hAllocation, void** ppData);
3929  void Unmap(VmaAllocation hAllocation);
3930 
3931 private:
3932  VkDeviceSize m_PreferredLargeHeapBlockSize;
3933 
3934  VkPhysicalDevice m_PhysicalDevice;
3935  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
3936 
3937  VMA_MUTEX m_PoolsMutex;
3938  // Protected by m_PoolsMutex. Sorted by pointer value.
3939  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
3940 
3941  VmaVulkanFunctions m_VulkanFunctions;
3942 
3943  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
3944 
3945  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
3946 
3947  VkResult AllocateMemoryOfType(
3948  const VkMemoryRequirements& vkMemReq,
3949  bool dedicatedAllocation,
3950  VkBuffer dedicatedBuffer,
3951  VkImage dedicatedImage,
3952  const VmaAllocationCreateInfo& createInfo,
3953  uint32_t memTypeIndex,
3954  VmaSuballocationType suballocType,
3955  VmaAllocation* pAllocation);
3956 
3957  // Allocates and registers new VkDeviceMemory specifically for single allocation.
3958  VkResult AllocateDedicatedMemory(
3959  VkDeviceSize size,
3960  VmaSuballocationType suballocType,
3961  uint32_t memTypeIndex,
3962  bool map,
3963  bool isUserDataString,
3964  void* pUserData,
3965  VkBuffer dedicatedBuffer,
3966  VkImage dedicatedImage,
3967  VmaAllocation* pAllocation);
3968 
3969  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
3970  void FreeDedicatedMemory(VmaAllocation allocation);
3971 };
3972 
3974 // Memory allocation #2 after VmaAllocator_T definition
3975 
3976 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
3977 {
3978  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
3979 }
3980 
3981 static void VmaFree(VmaAllocator hAllocator, void* ptr)
3982 {
3983  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
3984 }
3985 
3986 template<typename T>
3987 static T* VmaAllocate(VmaAllocator hAllocator)
3988 {
3989  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
3990 }
3991 
3992 template<typename T>
3993 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
3994 {
3995  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
3996 }
3997 
3998 template<typename T>
3999 static void vma_delete(VmaAllocator hAllocator, T* ptr)
4000 {
4001  if(ptr != VMA_NULL)
4002  {
4003  ptr->~T();
4004  VmaFree(hAllocator, ptr);
4005  }
4006 }
4007 
4008 template<typename T>
4009 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
4010 {
4011  if(ptr != VMA_NULL)
4012  {
4013  for(size_t i = count; i--; )
4014  ptr[i].~T();
4015  VmaFree(hAllocator, ptr);
4016  }
4017 }
4018 
4020 // VmaStringBuilder
4021 
4022 #if VMA_STATS_STRING_ENABLED
4023 
4024 class VmaStringBuilder
4025 {
4026 public:
4027  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
4028  size_t GetLength() const { return m_Data.size(); }
4029  const char* GetData() const { return m_Data.data(); }
4030 
4031  void Add(char ch) { m_Data.push_back(ch); }
4032  void Add(const char* pStr);
4033  void AddNewLine() { Add('\n'); }
4034  void AddNumber(uint32_t num);
4035  void AddNumber(uint64_t num);
4036  void AddPointer(const void* ptr);
4037 
4038 private:
4039  VmaVector< char, VmaStlAllocator<char> > m_Data;
4040 };
4041 
4042 void VmaStringBuilder::Add(const char* pStr)
4043 {
4044  const size_t strLen = strlen(pStr);
4045  if(strLen > 0)
4046  {
4047  const size_t oldCount = m_Data.size();
4048  m_Data.resize(oldCount + strLen);
4049  memcpy(m_Data.data() + oldCount, pStr, strLen);
4050  }
4051 }
4052 
4053 void VmaStringBuilder::AddNumber(uint32_t num)
4054 {
4055  char buf[11];
4056  VmaUint32ToStr(buf, sizeof(buf), num);
4057  Add(buf);
4058 }
4059 
4060 void VmaStringBuilder::AddNumber(uint64_t num)
4061 {
4062  char buf[21];
4063  VmaUint64ToStr(buf, sizeof(buf), num);
4064  Add(buf);
4065 }
4066 
4067 void VmaStringBuilder::AddPointer(const void* ptr)
4068 {
4069  char buf[21];
4070  VmaPtrToStr(buf, sizeof(buf), ptr);
4071  Add(buf);
4072 }
4073 
4074 #endif // #if VMA_STATS_STRING_ENABLED
4075 
4077 // VmaJsonWriter
4078 
4079 #if VMA_STATS_STRING_ENABLED
4080 
4081 class VmaJsonWriter
4082 {
4083 public:
4084  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
4085  ~VmaJsonWriter();
4086 
4087  void BeginObject(bool singleLine = false);
4088  void EndObject();
4089 
4090  void BeginArray(bool singleLine = false);
4091  void EndArray();
4092 
4093  void WriteString(const char* pStr);
4094  void BeginString(const char* pStr = VMA_NULL);
4095  void ContinueString(const char* pStr);
4096  void ContinueString(uint32_t n);
4097  void ContinueString(uint64_t n);
4098  void ContinueString_Pointer(const void* ptr);
4099  void EndString(const char* pStr = VMA_NULL);
4100 
4101  void WriteNumber(uint32_t n);
4102  void WriteNumber(uint64_t n);
4103  void WriteBool(bool b);
4104  void WriteNull();
4105 
4106 private:
4107  static const char* const INDENT;
4108 
4109  enum COLLECTION_TYPE
4110  {
4111  COLLECTION_TYPE_OBJECT,
4112  COLLECTION_TYPE_ARRAY,
4113  };
4114  struct StackItem
4115  {
4116  COLLECTION_TYPE type;
4117  uint32_t valueCount;
4118  bool singleLineMode;
4119  };
4120 
4121  VmaStringBuilder& m_SB;
4122  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
4123  bool m_InsideString;
4124 
4125  void BeginValue(bool isString);
4126  void WriteIndent(bool oneLess = false);
4127 };
4128 
4129 const char* const VmaJsonWriter::INDENT = " ";
4130 
4131 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
4132  m_SB(sb),
4133  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
4134  m_InsideString(false)
4135 {
4136 }
4137 
4138 VmaJsonWriter::~VmaJsonWriter()
4139 {
4140  VMA_ASSERT(!m_InsideString);
4141  VMA_ASSERT(m_Stack.empty());
4142 }
4143 
4144 void VmaJsonWriter::BeginObject(bool singleLine)
4145 {
4146  VMA_ASSERT(!m_InsideString);
4147 
4148  BeginValue(false);
4149  m_SB.Add('{');
4150 
4151  StackItem item;
4152  item.type = COLLECTION_TYPE_OBJECT;
4153  item.valueCount = 0;
4154  item.singleLineMode = singleLine;
4155  m_Stack.push_back(item);
4156 }
4157 
4158 void VmaJsonWriter::EndObject()
4159 {
4160  VMA_ASSERT(!m_InsideString);
4161 
4162  WriteIndent(true);
4163  m_SB.Add('}');
4164 
4165  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
4166  m_Stack.pop_back();
4167 }
4168 
4169 void VmaJsonWriter::BeginArray(bool singleLine)
4170 {
4171  VMA_ASSERT(!m_InsideString);
4172 
4173  BeginValue(false);
4174  m_SB.Add('[');
4175 
4176  StackItem item;
4177  item.type = COLLECTION_TYPE_ARRAY;
4178  item.valueCount = 0;
4179  item.singleLineMode = singleLine;
4180  m_Stack.push_back(item);
4181 }
4182 
4183 void VmaJsonWriter::EndArray()
4184 {
4185  VMA_ASSERT(!m_InsideString);
4186 
4187  WriteIndent(true);
4188  m_SB.Add(']');
4189 
4190  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
4191  m_Stack.pop_back();
4192 }
4193 
4194 void VmaJsonWriter::WriteString(const char* pStr)
4195 {
4196  BeginString(pStr);
4197  EndString();
4198 }
4199 
4200 void VmaJsonWriter::BeginString(const char* pStr)
4201 {
4202  VMA_ASSERT(!m_InsideString);
4203 
4204  BeginValue(true);
4205  m_SB.Add('"');
4206  m_InsideString = true;
4207  if(pStr != VMA_NULL && pStr[0] != '\0')
4208  {
4209  ContinueString(pStr);
4210  }
4211 }
4212 
4213 void VmaJsonWriter::ContinueString(const char* pStr)
4214 {
4215  VMA_ASSERT(m_InsideString);
4216 
4217  const size_t strLen = strlen(pStr);
4218  for(size_t i = 0; i < strLen; ++i)
4219  {
4220  char ch = pStr[i];
4221  if(ch == '\'')
4222  {
4223  m_SB.Add("\\\\");
4224  }
4225  else if(ch == '"')
4226  {
4227  m_SB.Add("\\\"");
4228  }
4229  else if(ch >= 32)
4230  {
4231  m_SB.Add(ch);
4232  }
4233  else switch(ch)
4234  {
4235  case '\b':
4236  m_SB.Add("\\b");
4237  break;
4238  case '\f':
4239  m_SB.Add("\\f");
4240  break;
4241  case '\n':
4242  m_SB.Add("\\n");
4243  break;
4244  case '\r':
4245  m_SB.Add("\\r");
4246  break;
4247  case '\t':
4248  m_SB.Add("\\t");
4249  break;
4250  default:
4251  VMA_ASSERT(0 && "Character not currently supported.");
4252  break;
4253  }
4254  }
4255 }
4256 
4257 void VmaJsonWriter::ContinueString(uint32_t n)
4258 {
4259  VMA_ASSERT(m_InsideString);
4260  m_SB.AddNumber(n);
4261 }
4262 
4263 void VmaJsonWriter::ContinueString(uint64_t n)
4264 {
4265  VMA_ASSERT(m_InsideString);
4266  m_SB.AddNumber(n);
4267 }
4268 
4269 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
4270 {
4271  VMA_ASSERT(m_InsideString);
4272  m_SB.AddPointer(ptr);
4273 }
4274 
4275 void VmaJsonWriter::EndString(const char* pStr)
4276 {
4277  VMA_ASSERT(m_InsideString);
4278  if(pStr != VMA_NULL && pStr[0] != '\0')
4279  {
4280  ContinueString(pStr);
4281  }
4282  m_SB.Add('"');
4283  m_InsideString = false;
4284 }
4285 
4286 void VmaJsonWriter::WriteNumber(uint32_t n)
4287 {
4288  VMA_ASSERT(!m_InsideString);
4289  BeginValue(false);
4290  m_SB.AddNumber(n);
4291 }
4292 
4293 void VmaJsonWriter::WriteNumber(uint64_t n)
4294 {
4295  VMA_ASSERT(!m_InsideString);
4296  BeginValue(false);
4297  m_SB.AddNumber(n);
4298 }
4299 
4300 void VmaJsonWriter::WriteBool(bool b)
4301 {
4302  VMA_ASSERT(!m_InsideString);
4303  BeginValue(false);
4304  m_SB.Add(b ? "true" : "false");
4305 }
4306 
4307 void VmaJsonWriter::WriteNull()
4308 {
4309  VMA_ASSERT(!m_InsideString);
4310  BeginValue(false);
4311  m_SB.Add("null");
4312 }
4313 
4314 void VmaJsonWriter::BeginValue(bool isString)
4315 {
4316  if(!m_Stack.empty())
4317  {
4318  StackItem& currItem = m_Stack.back();
4319  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4320  currItem.valueCount % 2 == 0)
4321  {
4322  VMA_ASSERT(isString);
4323  }
4324 
4325  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4326  currItem.valueCount % 2 != 0)
4327  {
4328  m_SB.Add(": ");
4329  }
4330  else if(currItem.valueCount > 0)
4331  {
4332  m_SB.Add(", ");
4333  WriteIndent();
4334  }
4335  else
4336  {
4337  WriteIndent();
4338  }
4339  ++currItem.valueCount;
4340  }
4341 }
4342 
4343 void VmaJsonWriter::WriteIndent(bool oneLess)
4344 {
4345  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
4346  {
4347  m_SB.AddNewLine();
4348 
4349  size_t count = m_Stack.size();
4350  if(count > 0 && oneLess)
4351  {
4352  --count;
4353  }
4354  for(size_t i = 0; i < count; ++i)
4355  {
4356  m_SB.Add(INDENT);
4357  }
4358  }
4359 }
4360 
4361 #endif // #if VMA_STATS_STRING_ENABLED
4362 
4364 
4365 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
4366 {
4367  if(IsUserDataString())
4368  {
4369  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
4370 
4371  FreeUserDataString(hAllocator);
4372 
4373  if(pUserData != VMA_NULL)
4374  {
4375  const char* const newStrSrc = (char*)pUserData;
4376  const size_t newStrLen = strlen(newStrSrc);
4377  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
4378  memcpy(newStrDst, newStrSrc, newStrLen + 1);
4379  m_pUserData = newStrDst;
4380  }
4381  }
4382  else
4383  {
4384  m_pUserData = pUserData;
4385  }
4386 }
4387 
4388 VkDeviceSize VmaAllocation_T::GetOffset() const
4389 {
4390  switch(m_Type)
4391  {
4392  case ALLOCATION_TYPE_BLOCK:
4393  return m_BlockAllocation.m_Offset;
4394  case ALLOCATION_TYPE_DEDICATED:
4395  return 0;
4396  default:
4397  VMA_ASSERT(0);
4398  return 0;
4399  }
4400 }
4401 
4402 VkDeviceMemory VmaAllocation_T::GetMemory() const
4403 {
4404  switch(m_Type)
4405  {
4406  case ALLOCATION_TYPE_BLOCK:
4407  return m_BlockAllocation.m_Block->m_hMemory;
4408  case ALLOCATION_TYPE_DEDICATED:
4409  return m_DedicatedAllocation.m_hMemory;
4410  default:
4411  VMA_ASSERT(0);
4412  return VK_NULL_HANDLE;
4413  }
4414 }
4415 
4416 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
4417 {
4418  switch(m_Type)
4419  {
4420  case ALLOCATION_TYPE_BLOCK:
4421  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
4422  case ALLOCATION_TYPE_DEDICATED:
4423  return m_DedicatedAllocation.m_MemoryTypeIndex;
4424  default:
4425  VMA_ASSERT(0);
4426  return UINT32_MAX;
4427  }
4428 }
4429 
4430 void* VmaAllocation_T::GetMappedData() const
4431 {
4432  switch(m_Type)
4433  {
4434  case ALLOCATION_TYPE_BLOCK:
4435  if(m_MapCount != 0)
4436  {
4437  void* pBlockData = m_BlockAllocation.m_Block->m_Mapping.GetMappedData();
4438  VMA_ASSERT(pBlockData != VMA_NULL);
4439  return (char*)pBlockData + m_BlockAllocation.m_Offset;
4440  }
4441  else
4442  {
4443  return VMA_NULL;
4444  }
4445  break;
4446  case ALLOCATION_TYPE_DEDICATED:
4447  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
4448  return m_DedicatedAllocation.m_pMappedData;
4449  default:
4450  VMA_ASSERT(0);
4451  return VMA_NULL;
4452  }
4453 }
4454 
4455 bool VmaAllocation_T::CanBecomeLost() const
4456 {
4457  switch(m_Type)
4458  {
4459  case ALLOCATION_TYPE_BLOCK:
4460  return m_BlockAllocation.m_CanBecomeLost;
4461  case ALLOCATION_TYPE_DEDICATED:
4462  return false;
4463  default:
4464  VMA_ASSERT(0);
4465  return false;
4466  }
4467 }
4468 
4469 VmaPool VmaAllocation_T::GetPool() const
4470 {
4471  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4472  return m_BlockAllocation.m_hPool;
4473 }
4474 
4475 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4476 {
4477  VMA_ASSERT(CanBecomeLost());
4478 
4479  /*
4480  Warning: This is a carefully designed algorithm.
4481  Do not modify unless you really know what you're doing :)
4482  */
4483  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
4484  for(;;)
4485  {
4486  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
4487  {
4488  VMA_ASSERT(0);
4489  return false;
4490  }
4491  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
4492  {
4493  return false;
4494  }
4495  else // Last use time earlier than current time.
4496  {
4497  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
4498  {
4499  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
4500  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
4501  return true;
4502  }
4503  }
4504  }
4505 }
4506 
4507 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
4508 {
4509  VMA_ASSERT(IsUserDataString());
4510  if(m_pUserData != VMA_NULL)
4511  {
4512  char* const oldStr = (char*)m_pUserData;
4513  const size_t oldStrLen = strlen(oldStr);
4514  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
4515  m_pUserData = VMA_NULL;
4516  }
4517 }
4518 
4519 void VmaAllocation_T::BlockAllocMap()
4520 {
4521  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
4522 
4523  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
4524  {
4525  ++m_MapCount;
4526  }
4527  else
4528  {
4529  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
4530  }
4531 }
4532 
4533 void VmaAllocation_T::BlockAllocUnmap()
4534 {
4535  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
4536 
4537  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
4538  {
4539  --m_MapCount;
4540  }
4541  else
4542  {
4543  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
4544  }
4545 }
4546 
4547 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
4548 {
4549  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
4550 
4551  if(m_MapCount != 0)
4552  {
4553  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
4554  {
4555  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
4556  *ppData = m_DedicatedAllocation.m_pMappedData;
4557  ++m_MapCount;
4558  return VK_SUCCESS;
4559  }
4560  else
4561  {
4562  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
4563  return VK_ERROR_MEMORY_MAP_FAILED;
4564  }
4565  }
4566  else
4567  {
4568  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
4569  hAllocator->m_hDevice,
4570  m_DedicatedAllocation.m_hMemory,
4571  0, // offset
4572  VK_WHOLE_SIZE,
4573  0, // flags
4574  ppData);
4575  if(result == VK_SUCCESS)
4576  {
4577  m_DedicatedAllocation.m_pMappedData = *ppData;
4578  m_MapCount = 1;
4579  }
4580  return result;
4581  }
4582 }
4583 
4584 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
4585 {
4586  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
4587 
4588  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
4589  {
4590  --m_MapCount;
4591  if(m_MapCount == 0)
4592  {
4593  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
4594  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
4595  hAllocator->m_hDevice,
4596  m_DedicatedAllocation.m_hMemory);
4597  }
4598  }
4599  else
4600  {
4601  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
4602  }
4603 }
4604 
4605 #if VMA_STATS_STRING_ENABLED
4606 
4607 // Correspond to values of enum VmaSuballocationType.
4608 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
4609  "FREE",
4610  "UNKNOWN",
4611  "BUFFER",
4612  "IMAGE_UNKNOWN",
4613  "IMAGE_LINEAR",
4614  "IMAGE_OPTIMAL",
4615 };
4616 
4617 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
4618 {
4619  json.BeginObject();
4620 
4621  json.WriteString("Blocks");
4622  json.WriteNumber(stat.blockCount);
4623 
4624  json.WriteString("Allocations");
4625  json.WriteNumber(stat.allocationCount);
4626 
4627  json.WriteString("UnusedRanges");
4628  json.WriteNumber(stat.unusedRangeCount);
4629 
4630  json.WriteString("UsedBytes");
4631  json.WriteNumber(stat.usedBytes);
4632 
4633  json.WriteString("UnusedBytes");
4634  json.WriteNumber(stat.unusedBytes);
4635 
4636  if(stat.allocationCount > 1)
4637  {
4638  json.WriteString("AllocationSize");
4639  json.BeginObject(true);
4640  json.WriteString("Min");
4641  json.WriteNumber(stat.allocationSizeMin);
4642  json.WriteString("Avg");
4643  json.WriteNumber(stat.allocationSizeAvg);
4644  json.WriteString("Max");
4645  json.WriteNumber(stat.allocationSizeMax);
4646  json.EndObject();
4647  }
4648 
4649  if(stat.unusedRangeCount > 1)
4650  {
4651  json.WriteString("UnusedRangeSize");
4652  json.BeginObject(true);
4653  json.WriteString("Min");
4654  json.WriteNumber(stat.unusedRangeSizeMin);
4655  json.WriteString("Avg");
4656  json.WriteNumber(stat.unusedRangeSizeAvg);
4657  json.WriteString("Max");
4658  json.WriteNumber(stat.unusedRangeSizeMax);
4659  json.EndObject();
4660  }
4661 
4662  json.EndObject();
4663 }
4664 
4665 #endif // #if VMA_STATS_STRING_ENABLED
4666 
4667 struct VmaSuballocationItemSizeLess
4668 {
4669  bool operator()(
4670  const VmaSuballocationList::iterator lhs,
4671  const VmaSuballocationList::iterator rhs) const
4672  {
4673  return lhs->size < rhs->size;
4674  }
4675  bool operator()(
4676  const VmaSuballocationList::iterator lhs,
4677  VkDeviceSize rhsSize) const
4678  {
4679  return lhs->size < rhsSize;
4680  }
4681 };
4682 
4684 // class VmaBlockMetadata
4685 
4686 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
4687  m_Size(0),
4688  m_FreeCount(0),
4689  m_SumFreeSize(0),
4690  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
4691  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
4692 {
4693 }
4694 
4695 VmaBlockMetadata::~VmaBlockMetadata()
4696 {
4697 }
4698 
4699 void VmaBlockMetadata::Init(VkDeviceSize size)
4700 {
4701  m_Size = size;
4702  m_FreeCount = 1;
4703  m_SumFreeSize = size;
4704 
4705  VmaSuballocation suballoc = {};
4706  suballoc.offset = 0;
4707  suballoc.size = size;
4708  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4709  suballoc.hAllocation = VK_NULL_HANDLE;
4710 
4711  m_Suballocations.push_back(suballoc);
4712  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
4713  --suballocItem;
4714  m_FreeSuballocationsBySize.push_back(suballocItem);
4715 }
4716 
4717 bool VmaBlockMetadata::Validate() const
4718 {
4719  if(m_Suballocations.empty())
4720  {
4721  return false;
4722  }
4723 
4724  // Expected offset of new suballocation as calculates from previous ones.
4725  VkDeviceSize calculatedOffset = 0;
4726  // Expected number of free suballocations as calculated from traversing their list.
4727  uint32_t calculatedFreeCount = 0;
4728  // Expected sum size of free suballocations as calculated from traversing their list.
4729  VkDeviceSize calculatedSumFreeSize = 0;
4730  // Expected number of free suballocations that should be registered in
4731  // m_FreeSuballocationsBySize calculated from traversing their list.
4732  size_t freeSuballocationsToRegister = 0;
4733  // True if previous visisted suballocation was free.
4734  bool prevFree = false;
4735 
4736  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4737  suballocItem != m_Suballocations.cend();
4738  ++suballocItem)
4739  {
4740  const VmaSuballocation& subAlloc = *suballocItem;
4741 
4742  // Actual offset of this suballocation doesn't match expected one.
4743  if(subAlloc.offset != calculatedOffset)
4744  {
4745  return false;
4746  }
4747 
4748  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
4749  // Two adjacent free suballocations are invalid. They should be merged.
4750  if(prevFree && currFree)
4751  {
4752  return false;
4753  }
4754  prevFree = currFree;
4755 
4756  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
4757  {
4758  return false;
4759  }
4760 
4761  if(currFree)
4762  {
4763  calculatedSumFreeSize += subAlloc.size;
4764  ++calculatedFreeCount;
4765  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
4766  {
4767  ++freeSuballocationsToRegister;
4768  }
4769  }
4770 
4771  calculatedOffset += subAlloc.size;
4772  }
4773 
4774  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
4775  // match expected one.
4776  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
4777  {
4778  return false;
4779  }
4780 
4781  VkDeviceSize lastSize = 0;
4782  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
4783  {
4784  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
4785 
4786  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
4787  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
4788  {
4789  return false;
4790  }
4791  // They must be sorted by size ascending.
4792  if(suballocItem->size < lastSize)
4793  {
4794  return false;
4795  }
4796 
4797  lastSize = suballocItem->size;
4798  }
4799 
4800  // Check if totals match calculacted values.
4801  return
4802  ValidateFreeSuballocationList() &&
4803  (calculatedOffset == m_Size) &&
4804  (calculatedSumFreeSize == m_SumFreeSize) &&
4805  (calculatedFreeCount == m_FreeCount);
4806 }
4807 
4808 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
4809 {
4810  if(!m_FreeSuballocationsBySize.empty())
4811  {
4812  return m_FreeSuballocationsBySize.back()->size;
4813  }
4814  else
4815  {
4816  return 0;
4817  }
4818 }
4819 
4820 bool VmaBlockMetadata::IsEmpty() const
4821 {
4822  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
4823 }
4824 
4825 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
4826 {
4827  outInfo.blockCount = 1;
4828 
4829  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
4830  outInfo.allocationCount = rangeCount - m_FreeCount;
4831  outInfo.unusedRangeCount = m_FreeCount;
4832 
4833  outInfo.unusedBytes = m_SumFreeSize;
4834  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
4835 
4836  outInfo.allocationSizeMin = UINT64_MAX;
4837  outInfo.allocationSizeMax = 0;
4838  outInfo.unusedRangeSizeMin = UINT64_MAX;
4839  outInfo.unusedRangeSizeMax = 0;
4840 
4841  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4842  suballocItem != m_Suballocations.cend();
4843  ++suballocItem)
4844  {
4845  const VmaSuballocation& suballoc = *suballocItem;
4846  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
4847  {
4848  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
4849  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
4850  }
4851  else
4852  {
4853  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
4854  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
4855  }
4856  }
4857 }
4858 
4859 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
4860 {
4861  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
4862 
4863  inoutStats.size += m_Size;
4864  inoutStats.unusedSize += m_SumFreeSize;
4865  inoutStats.allocationCount += rangeCount - m_FreeCount;
4866  inoutStats.unusedRangeCount += m_FreeCount;
4867  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
4868 }
4869 
4870 #if VMA_STATS_STRING_ENABLED
4871 
4872 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
4873 {
4874  json.BeginObject();
4875 
4876  json.WriteString("TotalBytes");
4877  json.WriteNumber(m_Size);
4878 
4879  json.WriteString("UnusedBytes");
4880  json.WriteNumber(m_SumFreeSize);
4881 
4882  json.WriteString("Allocations");
4883  json.WriteNumber(m_Suballocations.size() - m_FreeCount);
4884 
4885  json.WriteString("UnusedRanges");
4886  json.WriteNumber(m_FreeCount);
4887 
4888  json.WriteString("Suballocations");
4889  json.BeginArray();
4890  size_t i = 0;
4891  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4892  suballocItem != m_Suballocations.cend();
4893  ++suballocItem, ++i)
4894  {
4895  json.BeginObject(true);
4896 
4897  json.WriteString("Type");
4898  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
4899 
4900  json.WriteString("Size");
4901  json.WriteNumber(suballocItem->size);
4902 
4903  json.WriteString("Offset");
4904  json.WriteNumber(suballocItem->offset);
4905 
4906  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
4907  {
4908  const void* pUserData = suballocItem->hAllocation->GetUserData();
4909  if(pUserData != VMA_NULL)
4910  {
4911  json.WriteString("UserData");
4912  if(suballocItem->hAllocation->IsUserDataString())
4913  {
4914  json.WriteString((const char*)pUserData);
4915  }
4916  else
4917  {
4918  json.BeginString();
4919  json.ContinueString_Pointer(pUserData);
4920  json.EndString();
4921  }
4922  }
4923  }
4924 
4925  json.EndObject();
4926  }
4927  json.EndArray();
4928 
4929  json.EndObject();
4930 }
4931 
4932 #endif // #if VMA_STATS_STRING_ENABLED
4933 
4934 /*
4935 How many suitable free suballocations to analyze before choosing best one.
4936 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
4937  be chosen.
4938 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
4939  suballocations will be analized and best one will be chosen.
4940 - Any other value is also acceptable.
4941 */
4942 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
4943 
4944 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
4945 {
4946  VMA_ASSERT(IsEmpty());
4947  pAllocationRequest->offset = 0;
4948  pAllocationRequest->sumFreeSize = m_SumFreeSize;
4949  pAllocationRequest->sumItemSize = 0;
4950  pAllocationRequest->item = m_Suballocations.begin();
4951  pAllocationRequest->itemsToMakeLostCount = 0;
4952 }
4953 
4954 bool VmaBlockMetadata::CreateAllocationRequest(
4955  uint32_t currentFrameIndex,
4956  uint32_t frameInUseCount,
4957  VkDeviceSize bufferImageGranularity,
4958  VkDeviceSize allocSize,
4959  VkDeviceSize allocAlignment,
4960  VmaSuballocationType allocType,
4961  bool canMakeOtherLost,
4962  VmaAllocationRequest* pAllocationRequest)
4963 {
4964  VMA_ASSERT(allocSize > 0);
4965  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
4966  VMA_ASSERT(pAllocationRequest != VMA_NULL);
4967  VMA_HEAVY_ASSERT(Validate());
4968 
4969  // There is not enough total free space in this block to fullfill the request: Early return.
4970  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
4971  {
4972  return false;
4973  }
4974 
4975  // New algorithm, efficiently searching freeSuballocationsBySize.
4976  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
4977  if(freeSuballocCount > 0)
4978  {
4979  if(VMA_BEST_FIT)
4980  {
4981  // Find first free suballocation with size not less than allocSize.
4982  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
4983  m_FreeSuballocationsBySize.data(),
4984  m_FreeSuballocationsBySize.data() + freeSuballocCount,
4985  allocSize,
4986  VmaSuballocationItemSizeLess());
4987  size_t index = it - m_FreeSuballocationsBySize.data();
4988  for(; index < freeSuballocCount; ++index)
4989  {
4990  if(CheckAllocation(
4991  currentFrameIndex,
4992  frameInUseCount,
4993  bufferImageGranularity,
4994  allocSize,
4995  allocAlignment,
4996  allocType,
4997  m_FreeSuballocationsBySize[index],
4998  false, // canMakeOtherLost
4999  &pAllocationRequest->offset,
5000  &pAllocationRequest->itemsToMakeLostCount,
5001  &pAllocationRequest->sumFreeSize,
5002  &pAllocationRequest->sumItemSize))
5003  {
5004  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5005  return true;
5006  }
5007  }
5008  }
5009  else
5010  {
5011  // Search staring from biggest suballocations.
5012  for(size_t index = freeSuballocCount; index--; )
5013  {
5014  if(CheckAllocation(
5015  currentFrameIndex,
5016  frameInUseCount,
5017  bufferImageGranularity,
5018  allocSize,
5019  allocAlignment,
5020  allocType,
5021  m_FreeSuballocationsBySize[index],
5022  false, // canMakeOtherLost
5023  &pAllocationRequest->offset,
5024  &pAllocationRequest->itemsToMakeLostCount,
5025  &pAllocationRequest->sumFreeSize,
5026  &pAllocationRequest->sumItemSize))
5027  {
5028  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5029  return true;
5030  }
5031  }
5032  }
5033  }
5034 
5035  if(canMakeOtherLost)
5036  {
5037  // Brute-force algorithm. TODO: Come up with something better.
5038 
5039  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
5040  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
5041 
5042  VmaAllocationRequest tmpAllocRequest = {};
5043  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
5044  suballocIt != m_Suballocations.end();
5045  ++suballocIt)
5046  {
5047  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
5048  suballocIt->hAllocation->CanBecomeLost())
5049  {
5050  if(CheckAllocation(
5051  currentFrameIndex,
5052  frameInUseCount,
5053  bufferImageGranularity,
5054  allocSize,
5055  allocAlignment,
5056  allocType,
5057  suballocIt,
5058  canMakeOtherLost,
5059  &tmpAllocRequest.offset,
5060  &tmpAllocRequest.itemsToMakeLostCount,
5061  &tmpAllocRequest.sumFreeSize,
5062  &tmpAllocRequest.sumItemSize))
5063  {
5064  tmpAllocRequest.item = suballocIt;
5065 
5066  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
5067  {
5068  *pAllocationRequest = tmpAllocRequest;
5069  }
5070  }
5071  }
5072  }
5073 
5074  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
5075  {
5076  return true;
5077  }
5078  }
5079 
5080  return false;
5081 }
5082 
5083 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
5084  uint32_t currentFrameIndex,
5085  uint32_t frameInUseCount,
5086  VmaAllocationRequest* pAllocationRequest)
5087 {
5088  while(pAllocationRequest->itemsToMakeLostCount > 0)
5089  {
5090  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
5091  {
5092  ++pAllocationRequest->item;
5093  }
5094  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5095  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
5096  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
5097  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5098  {
5099  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
5100  --pAllocationRequest->itemsToMakeLostCount;
5101  }
5102  else
5103  {
5104  return false;
5105  }
5106  }
5107 
5108  VMA_HEAVY_ASSERT(Validate());
5109  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5110  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
5111 
5112  return true;
5113 }
5114 
5115 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
5116 {
5117  uint32_t lostAllocationCount = 0;
5118  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
5119  it != m_Suballocations.end();
5120  ++it)
5121  {
5122  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
5123  it->hAllocation->CanBecomeLost() &&
5124  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5125  {
5126  it = FreeSuballocation(it);
5127  ++lostAllocationCount;
5128  }
5129  }
5130  return lostAllocationCount;
5131 }
5132 
5133 void VmaBlockMetadata::Alloc(
5134  const VmaAllocationRequest& request,
5135  VmaSuballocationType type,
5136  VkDeviceSize allocSize,
5137  VmaAllocation hAllocation)
5138 {
5139  VMA_ASSERT(request.item != m_Suballocations.end());
5140  VmaSuballocation& suballoc = *request.item;
5141  // Given suballocation is a free block.
5142  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5143  // Given offset is inside this suballocation.
5144  VMA_ASSERT(request.offset >= suballoc.offset);
5145  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
5146  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
5147  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
5148 
5149  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
5150  // it to become used.
5151  UnregisterFreeSuballocation(request.item);
5152 
5153  suballoc.offset = request.offset;
5154  suballoc.size = allocSize;
5155  suballoc.type = type;
5156  suballoc.hAllocation = hAllocation;
5157 
5158  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
5159  if(paddingEnd)
5160  {
5161  VmaSuballocation paddingSuballoc = {};
5162  paddingSuballoc.offset = request.offset + allocSize;
5163  paddingSuballoc.size = paddingEnd;
5164  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5165  VmaSuballocationList::iterator next = request.item;
5166  ++next;
5167  const VmaSuballocationList::iterator paddingEndItem =
5168  m_Suballocations.insert(next, paddingSuballoc);
5169  RegisterFreeSuballocation(paddingEndItem);
5170  }
5171 
5172  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
5173  if(paddingBegin)
5174  {
5175  VmaSuballocation paddingSuballoc = {};
5176  paddingSuballoc.offset = request.offset - paddingBegin;
5177  paddingSuballoc.size = paddingBegin;
5178  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5179  const VmaSuballocationList::iterator paddingBeginItem =
5180  m_Suballocations.insert(request.item, paddingSuballoc);
5181  RegisterFreeSuballocation(paddingBeginItem);
5182  }
5183 
5184  // Update totals.
5185  m_FreeCount = m_FreeCount - 1;
5186  if(paddingBegin > 0)
5187  {
5188  ++m_FreeCount;
5189  }
5190  if(paddingEnd > 0)
5191  {
5192  ++m_FreeCount;
5193  }
5194  m_SumFreeSize -= allocSize;
5195 }
5196 
5197 void VmaBlockMetadata::Free(const VmaAllocation allocation)
5198 {
5199  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5200  suballocItem != m_Suballocations.end();
5201  ++suballocItem)
5202  {
5203  VmaSuballocation& suballoc = *suballocItem;
5204  if(suballoc.hAllocation == allocation)
5205  {
5206  FreeSuballocation(suballocItem);
5207  VMA_HEAVY_ASSERT(Validate());
5208  return;
5209  }
5210  }
5211  VMA_ASSERT(0 && "Not found!");
5212 }
5213 
5214 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
5215 {
5216  VkDeviceSize lastSize = 0;
5217  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
5218  {
5219  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
5220 
5221  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
5222  {
5223  VMA_ASSERT(0);
5224  return false;
5225  }
5226  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5227  {
5228  VMA_ASSERT(0);
5229  return false;
5230  }
5231  if(it->size < lastSize)
5232  {
5233  VMA_ASSERT(0);
5234  return false;
5235  }
5236 
5237  lastSize = it->size;
5238  }
5239  return true;
5240 }
5241 
5242 bool VmaBlockMetadata::CheckAllocation(
5243  uint32_t currentFrameIndex,
5244  uint32_t frameInUseCount,
5245  VkDeviceSize bufferImageGranularity,
5246  VkDeviceSize allocSize,
5247  VkDeviceSize allocAlignment,
5248  VmaSuballocationType allocType,
5249  VmaSuballocationList::const_iterator suballocItem,
5250  bool canMakeOtherLost,
5251  VkDeviceSize* pOffset,
5252  size_t* itemsToMakeLostCount,
5253  VkDeviceSize* pSumFreeSize,
5254  VkDeviceSize* pSumItemSize) const
5255 {
5256  VMA_ASSERT(allocSize > 0);
5257  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5258  VMA_ASSERT(suballocItem != m_Suballocations.cend());
5259  VMA_ASSERT(pOffset != VMA_NULL);
5260 
5261  *itemsToMakeLostCount = 0;
5262  *pSumFreeSize = 0;
5263  *pSumItemSize = 0;
5264 
5265  if(canMakeOtherLost)
5266  {
5267  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5268  {
5269  *pSumFreeSize = suballocItem->size;
5270  }
5271  else
5272  {
5273  if(suballocItem->hAllocation->CanBecomeLost() &&
5274  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5275  {
5276  ++*itemsToMakeLostCount;
5277  *pSumItemSize = suballocItem->size;
5278  }
5279  else
5280  {
5281  return false;
5282  }
5283  }
5284 
5285  // Remaining size is too small for this request: Early return.
5286  if(m_Size - suballocItem->offset < allocSize)
5287  {
5288  return false;
5289  }
5290 
5291  // Start from offset equal to beginning of this suballocation.
5292  *pOffset = suballocItem->offset;
5293 
5294  // Apply VMA_DEBUG_MARGIN at the beginning.
5295  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5296  {
5297  *pOffset += VMA_DEBUG_MARGIN;
5298  }
5299 
5300  // Apply alignment.
5301  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5302  *pOffset = VmaAlignUp(*pOffset, alignment);
5303 
5304  // Check previous suballocations for BufferImageGranularity conflicts.
5305  // Make bigger alignment if necessary.
5306  if(bufferImageGranularity > 1)
5307  {
5308  bool bufferImageGranularityConflict = false;
5309  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5310  while(prevSuballocItem != m_Suballocations.cbegin())
5311  {
5312  --prevSuballocItem;
5313  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5314  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5315  {
5316  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5317  {
5318  bufferImageGranularityConflict = true;
5319  break;
5320  }
5321  }
5322  else
5323  // Already on previous page.
5324  break;
5325  }
5326  if(bufferImageGranularityConflict)
5327  {
5328  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5329  }
5330  }
5331 
5332  // Now that we have final *pOffset, check if we are past suballocItem.
5333  // If yes, return false - this function should be called for another suballocItem as starting point.
5334  if(*pOffset >= suballocItem->offset + suballocItem->size)
5335  {
5336  return false;
5337  }
5338 
5339  // Calculate padding at the beginning based on current offset.
5340  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
5341 
5342  // Calculate required margin at the end if this is not last suballocation.
5343  VmaSuballocationList::const_iterator next = suballocItem;
5344  ++next;
5345  const VkDeviceSize requiredEndMargin =
5346  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5347 
5348  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
5349  // Another early return check.
5350  if(suballocItem->offset + totalSize > m_Size)
5351  {
5352  return false;
5353  }
5354 
5355  // Advance lastSuballocItem until desired size is reached.
5356  // Update itemsToMakeLostCount.
5357  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
5358  if(totalSize > suballocItem->size)
5359  {
5360  VkDeviceSize remainingSize = totalSize - suballocItem->size;
5361  while(remainingSize > 0)
5362  {
5363  ++lastSuballocItem;
5364  if(lastSuballocItem == m_Suballocations.cend())
5365  {
5366  return false;
5367  }
5368  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5369  {
5370  *pSumFreeSize += lastSuballocItem->size;
5371  }
5372  else
5373  {
5374  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
5375  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
5376  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5377  {
5378  ++*itemsToMakeLostCount;
5379  *pSumItemSize += lastSuballocItem->size;
5380  }
5381  else
5382  {
5383  return false;
5384  }
5385  }
5386  remainingSize = (lastSuballocItem->size < remainingSize) ?
5387  remainingSize - lastSuballocItem->size : 0;
5388  }
5389  }
5390 
5391  // Check next suballocations for BufferImageGranularity conflicts.
5392  // If conflict exists, we must mark more allocations lost or fail.
5393  if(bufferImageGranularity > 1)
5394  {
5395  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
5396  ++nextSuballocItem;
5397  while(nextSuballocItem != m_Suballocations.cend())
5398  {
5399  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5400  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5401  {
5402  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5403  {
5404  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
5405  if(nextSuballoc.hAllocation->CanBecomeLost() &&
5406  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5407  {
5408  ++*itemsToMakeLostCount;
5409  }
5410  else
5411  {
5412  return false;
5413  }
5414  }
5415  }
5416  else
5417  {
5418  // Already on next page.
5419  break;
5420  }
5421  ++nextSuballocItem;
5422  }
5423  }
5424  }
5425  else
5426  {
5427  const VmaSuballocation& suballoc = *suballocItem;
5428  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5429 
5430  *pSumFreeSize = suballoc.size;
5431 
5432  // Size of this suballocation is too small for this request: Early return.
5433  if(suballoc.size < allocSize)
5434  {
5435  return false;
5436  }
5437 
5438  // Start from offset equal to beginning of this suballocation.
5439  *pOffset = suballoc.offset;
5440 
5441  // Apply VMA_DEBUG_MARGIN at the beginning.
5442  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5443  {
5444  *pOffset += VMA_DEBUG_MARGIN;
5445  }
5446 
5447  // Apply alignment.
5448  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5449  *pOffset = VmaAlignUp(*pOffset, alignment);
5450 
5451  // Check previous suballocations for BufferImageGranularity conflicts.
5452  // Make bigger alignment if necessary.
5453  if(bufferImageGranularity > 1)
5454  {
5455  bool bufferImageGranularityConflict = false;
5456  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5457  while(prevSuballocItem != m_Suballocations.cbegin())
5458  {
5459  --prevSuballocItem;
5460  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5461  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5462  {
5463  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5464  {
5465  bufferImageGranularityConflict = true;
5466  break;
5467  }
5468  }
5469  else
5470  // Already on previous page.
5471  break;
5472  }
5473  if(bufferImageGranularityConflict)
5474  {
5475  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5476  }
5477  }
5478 
5479  // Calculate padding at the beginning based on current offset.
5480  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
5481 
5482  // Calculate required margin at the end if this is not last suballocation.
5483  VmaSuballocationList::const_iterator next = suballocItem;
5484  ++next;
5485  const VkDeviceSize requiredEndMargin =
5486  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5487 
5488  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
5489  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
5490  {
5491  return false;
5492  }
5493 
5494  // Check next suballocations for BufferImageGranularity conflicts.
5495  // If conflict exists, allocation cannot be made here.
5496  if(bufferImageGranularity > 1)
5497  {
5498  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
5499  ++nextSuballocItem;
5500  while(nextSuballocItem != m_Suballocations.cend())
5501  {
5502  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5503  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5504  {
5505  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5506  {
5507  return false;
5508  }
5509  }
5510  else
5511  {
5512  // Already on next page.
5513  break;
5514  }
5515  ++nextSuballocItem;
5516  }
5517  }
5518  }
5519 
5520  // All tests passed: Success. pOffset is already filled.
5521  return true;
5522 }
5523 
5524 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
5525 {
5526  VMA_ASSERT(item != m_Suballocations.end());
5527  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5528 
5529  VmaSuballocationList::iterator nextItem = item;
5530  ++nextItem;
5531  VMA_ASSERT(nextItem != m_Suballocations.end());
5532  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
5533 
5534  item->size += nextItem->size;
5535  --m_FreeCount;
5536  m_Suballocations.erase(nextItem);
5537 }
5538 
5539 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
5540 {
5541  // Change this suballocation to be marked as free.
5542  VmaSuballocation& suballoc = *suballocItem;
5543  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5544  suballoc.hAllocation = VK_NULL_HANDLE;
5545 
5546  // Update totals.
5547  ++m_FreeCount;
5548  m_SumFreeSize += suballoc.size;
5549 
5550  // Merge with previous and/or next suballocation if it's also free.
5551  bool mergeWithNext = false;
5552  bool mergeWithPrev = false;
5553 
5554  VmaSuballocationList::iterator nextItem = suballocItem;
5555  ++nextItem;
5556  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
5557  {
5558  mergeWithNext = true;
5559  }
5560 
5561  VmaSuballocationList::iterator prevItem = suballocItem;
5562  if(suballocItem != m_Suballocations.begin())
5563  {
5564  --prevItem;
5565  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5566  {
5567  mergeWithPrev = true;
5568  }
5569  }
5570 
5571  if(mergeWithNext)
5572  {
5573  UnregisterFreeSuballocation(nextItem);
5574  MergeFreeWithNext(suballocItem);
5575  }
5576 
5577  if(mergeWithPrev)
5578  {
5579  UnregisterFreeSuballocation(prevItem);
5580  MergeFreeWithNext(prevItem);
5581  RegisterFreeSuballocation(prevItem);
5582  return prevItem;
5583  }
5584  else
5585  {
5586  RegisterFreeSuballocation(suballocItem);
5587  return suballocItem;
5588  }
5589 }
5590 
5591 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
5592 {
5593  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5594  VMA_ASSERT(item->size > 0);
5595 
5596  // You may want to enable this validation at the beginning or at the end of
5597  // this function, depending on what do you want to check.
5598  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5599 
5600  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5601  {
5602  if(m_FreeSuballocationsBySize.empty())
5603  {
5604  m_FreeSuballocationsBySize.push_back(item);
5605  }
5606  else
5607  {
5608  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
5609  }
5610  }
5611 
5612  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5613 }
5614 
5615 
5616 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
5617 {
5618  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5619  VMA_ASSERT(item->size > 0);
5620 
5621  // You may want to enable this validation at the beginning or at the end of
5622  // this function, depending on what do you want to check.
5623  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5624 
5625  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5626  {
5627  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5628  m_FreeSuballocationsBySize.data(),
5629  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
5630  item,
5631  VmaSuballocationItemSizeLess());
5632  for(size_t index = it - m_FreeSuballocationsBySize.data();
5633  index < m_FreeSuballocationsBySize.size();
5634  ++index)
5635  {
5636  if(m_FreeSuballocationsBySize[index] == item)
5637  {
5638  VmaVectorRemove(m_FreeSuballocationsBySize, index);
5639  return;
5640  }
5641  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
5642  }
5643  VMA_ASSERT(0 && "Not found.");
5644  }
5645 
5646  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5647 }
5648 
5650 // class VmaDeviceMemoryMapping
5651 
5652 VmaDeviceMemoryMapping::VmaDeviceMemoryMapping() :
5653  m_MapCount(0),
5654  m_pMappedData(VMA_NULL)
5655 {
5656 }
5657 
5658 VmaDeviceMemoryMapping::~VmaDeviceMemoryMapping()
5659 {
5660  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5661 }
5662 
5663 VkResult VmaDeviceMemoryMapping::Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, void **ppData)
5664 {
5665  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
5666  if(m_MapCount != 0)
5667  {
5668  ++m_MapCount;
5669  VMA_ASSERT(m_pMappedData != VMA_NULL);
5670  if(ppData != VMA_NULL)
5671  {
5672  *ppData = m_pMappedData;
5673  }
5674  return VK_SUCCESS;
5675  }
5676  else
5677  {
5678  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
5679  hAllocator->m_hDevice,
5680  hMemory,
5681  0, // offset
5682  VK_WHOLE_SIZE,
5683  0, // flags
5684  &m_pMappedData);
5685  if(result == VK_SUCCESS)
5686  {
5687  if(ppData != VMA_NULL)
5688  {
5689  *ppData = m_pMappedData;
5690  }
5691  m_MapCount = 1;
5692  }
5693  return result;
5694  }
5695 }
5696 
5697 void VmaDeviceMemoryMapping::Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory)
5698 {
5699  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
5700  if(m_MapCount != 0)
5701  {
5702  if(--m_MapCount == 0)
5703  {
5704  m_pMappedData = VMA_NULL;
5705  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, hMemory);
5706  }
5707  }
5708  else
5709  {
5710  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
5711  }
5712 }
5713 
5715 // class VmaDeviceMemoryBlock
5716 
5717 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
5718  m_MemoryTypeIndex(UINT32_MAX),
5719  m_hMemory(VK_NULL_HANDLE),
5720  m_Metadata(hAllocator)
5721 {
5722 }
5723 
5724 void VmaDeviceMemoryBlock::Init(
5725  uint32_t newMemoryTypeIndex,
5726  VkDeviceMemory newMemory,
5727  VkDeviceSize newSize)
5728 {
5729  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5730 
5731  m_MemoryTypeIndex = newMemoryTypeIndex;
5732  m_hMemory = newMemory;
5733 
5734  m_Metadata.Init(newSize);
5735 }
5736 
5737 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
5738 {
5739  // This is the most important assert in the entire library.
5740  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
5741  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
5742 
5743  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
5744  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
5745  m_hMemory = VK_NULL_HANDLE;
5746 }
5747 
5748 bool VmaDeviceMemoryBlock::Validate() const
5749 {
5750  if((m_hMemory == VK_NULL_HANDLE) ||
5751  (m_Metadata.GetSize() == 0))
5752  {
5753  return false;
5754  }
5755 
5756  return m_Metadata.Validate();
5757 }
5758 
5759 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, void** ppData)
5760 {
5761  return m_Mapping.Map(hAllocator, m_hMemory, ppData);
5762 }
5763 
5764 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator)
5765 {
5766  m_Mapping.Unmap(hAllocator, m_hMemory);
5767 }
5768 
5769 static void InitStatInfo(VmaStatInfo& outInfo)
5770 {
5771  memset(&outInfo, 0, sizeof(outInfo));
5772  outInfo.allocationSizeMin = UINT64_MAX;
5773  outInfo.unusedRangeSizeMin = UINT64_MAX;
5774 }
5775 
5776 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
5777 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
5778 {
5779  inoutInfo.blockCount += srcInfo.blockCount;
5780  inoutInfo.allocationCount += srcInfo.allocationCount;
5781  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
5782  inoutInfo.usedBytes += srcInfo.usedBytes;
5783  inoutInfo.unusedBytes += srcInfo.unusedBytes;
5784  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
5785  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
5786  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
5787  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
5788 }
5789 
5790 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
5791 {
5792  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
5793  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
5794  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
5795  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
5796 }
5797 
5798 VmaPool_T::VmaPool_T(
5799  VmaAllocator hAllocator,
5800  const VmaPoolCreateInfo& createInfo) :
5801  m_BlockVector(
5802  hAllocator,
5803  createInfo.memoryTypeIndex,
5804  createInfo.blockSize,
5805  createInfo.minBlockCount,
5806  createInfo.maxBlockCount,
5807  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
5808  createInfo.frameInUseCount,
5809  true) // isCustomPool
5810 {
5811 }
5812 
5813 VmaPool_T::~VmaPool_T()
5814 {
5815 }
5816 
5817 #if VMA_STATS_STRING_ENABLED
5818 
5819 #endif // #if VMA_STATS_STRING_ENABLED
5820 
5821 VmaBlockVector::VmaBlockVector(
5822  VmaAllocator hAllocator,
5823  uint32_t memoryTypeIndex,
5824  VkDeviceSize preferredBlockSize,
5825  size_t minBlockCount,
5826  size_t maxBlockCount,
5827  VkDeviceSize bufferImageGranularity,
5828  uint32_t frameInUseCount,
5829  bool isCustomPool) :
5830  m_hAllocator(hAllocator),
5831  m_MemoryTypeIndex(memoryTypeIndex),
5832  m_PreferredBlockSize(preferredBlockSize),
5833  m_MinBlockCount(minBlockCount),
5834  m_MaxBlockCount(maxBlockCount),
5835  m_BufferImageGranularity(bufferImageGranularity),
5836  m_FrameInUseCount(frameInUseCount),
5837  m_IsCustomPool(isCustomPool),
5838  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
5839  m_HasEmptyBlock(false),
5840  m_pDefragmentator(VMA_NULL)
5841 {
5842 }
5843 
5844 VmaBlockVector::~VmaBlockVector()
5845 {
5846  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
5847 
5848  for(size_t i = m_Blocks.size(); i--; )
5849  {
5850  m_Blocks[i]->Destroy(m_hAllocator);
5851  vma_delete(m_hAllocator, m_Blocks[i]);
5852  }
5853 }
5854 
5855 VkResult VmaBlockVector::CreateMinBlocks()
5856 {
5857  for(size_t i = 0; i < m_MinBlockCount; ++i)
5858  {
5859  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
5860  if(res != VK_SUCCESS)
5861  {
5862  return res;
5863  }
5864  }
5865  return VK_SUCCESS;
5866 }
5867 
5868 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
5869 {
5870  pStats->size = 0;
5871  pStats->unusedSize = 0;
5872  pStats->allocationCount = 0;
5873  pStats->unusedRangeCount = 0;
5874  pStats->unusedRangeSizeMax = 0;
5875 
5876  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5877 
5878  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5879  {
5880  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
5881  VMA_ASSERT(pBlock);
5882  VMA_HEAVY_ASSERT(pBlock->Validate());
5883  pBlock->m_Metadata.AddPoolStats(*pStats);
5884  }
5885 }
5886 
5887 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
5888 
5889 VkResult VmaBlockVector::Allocate(
5890  VmaPool hCurrentPool,
5891  uint32_t currentFrameIndex,
5892  const VkMemoryRequirements& vkMemReq,
5893  const VmaAllocationCreateInfo& createInfo,
5894  VmaSuballocationType suballocType,
5895  VmaAllocation* pAllocation)
5896 {
5897  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
5898  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
5899 
5900  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5901 
5902  // 1. Search existing allocations. Try to allocate without making other allocations lost.
5903  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
5904  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
5905  {
5906  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
5907  VMA_ASSERT(pCurrBlock);
5908  VmaAllocationRequest currRequest = {};
5909  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
5910  currentFrameIndex,
5911  m_FrameInUseCount,
5912  m_BufferImageGranularity,
5913  vkMemReq.size,
5914  vkMemReq.alignment,
5915  suballocType,
5916  false, // canMakeOtherLost
5917  &currRequest))
5918  {
5919  // Allocate from pCurrBlock.
5920  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
5921 
5922  if(mapped)
5923  {
5924  VkResult res = pCurrBlock->Map(m_hAllocator, nullptr);
5925  if(res != VK_SUCCESS)
5926  {
5927  return res;
5928  }
5929  }
5930 
5931  // We no longer have an empty Allocation.
5932  if(pCurrBlock->m_Metadata.IsEmpty())
5933  {
5934  m_HasEmptyBlock = false;
5935  }
5936 
5937  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
5938  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
5939  (*pAllocation)->InitBlockAllocation(
5940  hCurrentPool,
5941  pCurrBlock,
5942  currRequest.offset,
5943  vkMemReq.alignment,
5944  vkMemReq.size,
5945  suballocType,
5946  mapped,
5947  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5948  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
5949  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
5950  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
5951  return VK_SUCCESS;
5952  }
5953  }
5954 
5955  const bool canCreateNewBlock =
5956  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
5957  (m_Blocks.size() < m_MaxBlockCount);
5958 
5959  // 2. Try to create new block.
5960  if(canCreateNewBlock)
5961  {
5962  // Calculate optimal size for new block.
5963  VkDeviceSize newBlockSize = m_PreferredBlockSize;
5964  uint32_t newBlockSizeShift = 0;
5965  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
5966 
5967  // Allocating blocks of other sizes is allowed only in default pools.
5968  // In custom pools block size is fixed.
5969  if(m_IsCustomPool == false)
5970  {
5971  // Allocate 1/8, 1/4, 1/2 as first blocks.
5972  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
5973  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
5974  {
5975  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
5976  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= vkMemReq.size * 2)
5977  {
5978  newBlockSize = smallerNewBlockSize;
5979  ++newBlockSizeShift;
5980  }
5981  else
5982  {
5983  break;
5984  }
5985  }
5986  }
5987 
5988  size_t newBlockIndex = 0;
5989  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
5990  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
5991  if(m_IsCustomPool == false)
5992  {
5993  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
5994  {
5995  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
5996  if(smallerNewBlockSize >= vkMemReq.size)
5997  {
5998  newBlockSize = smallerNewBlockSize;
5999  ++newBlockSizeShift;
6000  res = CreateBlock(newBlockSize, &newBlockIndex);
6001  }
6002  else
6003  {
6004  break;
6005  }
6006  }
6007  }
6008 
6009  if(res == VK_SUCCESS)
6010  {
6011  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
6012  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= vkMemReq.size);
6013 
6014  if(mapped)
6015  {
6016  res = pBlock->Map(m_hAllocator, nullptr);
6017  if(res != VK_SUCCESS)
6018  {
6019  return res;
6020  }
6021  }
6022 
6023  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
6024  VmaAllocationRequest allocRequest;
6025  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
6026  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6027  pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
6028  (*pAllocation)->InitBlockAllocation(
6029  hCurrentPool,
6030  pBlock,
6031  allocRequest.offset,
6032  vkMemReq.alignment,
6033  vkMemReq.size,
6034  suballocType,
6035  mapped,
6036  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6037  VMA_HEAVY_ASSERT(pBlock->Validate());
6038  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
6039  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6040  return VK_SUCCESS;
6041  }
6042  }
6043 
6044  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
6045 
6046  // 3. Try to allocate from existing blocks with making other allocations lost.
6047  if(canMakeOtherLost)
6048  {
6049  uint32_t tryIndex = 0;
6050  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
6051  {
6052  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
6053  VmaAllocationRequest bestRequest = {};
6054  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
6055 
6056  // 1. Search existing allocations.
6057  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6058  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6059  {
6060  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6061  VMA_ASSERT(pCurrBlock);
6062  VmaAllocationRequest currRequest = {};
6063  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6064  currentFrameIndex,
6065  m_FrameInUseCount,
6066  m_BufferImageGranularity,
6067  vkMemReq.size,
6068  vkMemReq.alignment,
6069  suballocType,
6070  canMakeOtherLost,
6071  &currRequest))
6072  {
6073  const VkDeviceSize currRequestCost = currRequest.CalcCost();
6074  if(pBestRequestBlock == VMA_NULL ||
6075  currRequestCost < bestRequestCost)
6076  {
6077  pBestRequestBlock = pCurrBlock;
6078  bestRequest = currRequest;
6079  bestRequestCost = currRequestCost;
6080 
6081  if(bestRequestCost == 0)
6082  {
6083  break;
6084  }
6085  }
6086  }
6087  }
6088 
6089  if(pBestRequestBlock != VMA_NULL)
6090  {
6091  if(mapped)
6092  {
6093  VkResult res = pBestRequestBlock->Map(m_hAllocator, nullptr);
6094  if(res != VK_SUCCESS)
6095  {
6096  return res;
6097  }
6098  }
6099 
6100  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
6101  currentFrameIndex,
6102  m_FrameInUseCount,
6103  &bestRequest))
6104  {
6105  // We no longer have an empty Allocation.
6106  if(pBestRequestBlock->m_Metadata.IsEmpty())
6107  {
6108  m_HasEmptyBlock = false;
6109  }
6110  // Allocate from this pBlock.
6111  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6112  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
6113  (*pAllocation)->InitBlockAllocation(
6114  hCurrentPool,
6115  pBestRequestBlock,
6116  bestRequest.offset,
6117  vkMemReq.alignment,
6118  vkMemReq.size,
6119  suballocType,
6120  mapped,
6121  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6122  VMA_HEAVY_ASSERT(pBlock->Validate());
6123  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6124  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6125  return VK_SUCCESS;
6126  }
6127  // else: Some allocations must have been touched while we are here. Next try.
6128  }
6129  else
6130  {
6131  // Could not find place in any of the blocks - break outer loop.
6132  break;
6133  }
6134  }
6135  /* Maximum number of tries exceeded - a very unlike event when many other
6136  threads are simultaneously touching allocations making it impossible to make
6137  lost at the same time as we try to allocate. */
6138  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
6139  {
6140  return VK_ERROR_TOO_MANY_OBJECTS;
6141  }
6142  }
6143 
6144  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6145 }
6146 
6147 void VmaBlockVector::Free(
6148  VmaAllocation hAllocation)
6149 {
6150  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
6151 
6152  // Scope for lock.
6153  {
6154  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6155 
6156  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
6157 
6158  if(hAllocation->IsPersistentMap())
6159  {
6160  pBlock->m_Mapping.Unmap(m_hAllocator, pBlock->m_hMemory);
6161  }
6162 
6163  pBlock->m_Metadata.Free(hAllocation);
6164  VMA_HEAVY_ASSERT(pBlock->Validate());
6165 
6166  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
6167 
6168  // pBlock became empty after this deallocation.
6169  if(pBlock->m_Metadata.IsEmpty())
6170  {
6171  // Already has empty Allocation. We don't want to have two, so delete this one.
6172  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
6173  {
6174  pBlockToDelete = pBlock;
6175  Remove(pBlock);
6176  }
6177  // We now have first empty Allocation.
6178  else
6179  {
6180  m_HasEmptyBlock = true;
6181  }
6182  }
6183  // pBlock didn't become empty, but we have another empty block - find and free that one.
6184  // (This is optional, heuristics.)
6185  else if(m_HasEmptyBlock)
6186  {
6187  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
6188  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
6189  {
6190  pBlockToDelete = pLastBlock;
6191  m_Blocks.pop_back();
6192  m_HasEmptyBlock = false;
6193  }
6194  }
6195 
6196  IncrementallySortBlocks();
6197  }
6198 
6199  // Destruction of a free Allocation. Deferred until this point, outside of mutex
6200  // lock, for performance reason.
6201  if(pBlockToDelete != VMA_NULL)
6202  {
6203  VMA_DEBUG_LOG(" Deleted empty allocation");
6204  pBlockToDelete->Destroy(m_hAllocator);
6205  vma_delete(m_hAllocator, pBlockToDelete);
6206  }
6207 }
6208 
6209 size_t VmaBlockVector::CalcMaxBlockSize() const
6210 {
6211  size_t result = 0;
6212  for(size_t i = m_Blocks.size(); i--; )
6213  {
6214  result = VMA_MAX(result, m_Blocks[i]->m_Metadata.GetSize());
6215  if(result >= m_PreferredBlockSize)
6216  {
6217  break;
6218  }
6219  }
6220  return result;
6221 }
6222 
6223 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
6224 {
6225  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6226  {
6227  if(m_Blocks[blockIndex] == pBlock)
6228  {
6229  VmaVectorRemove(m_Blocks, blockIndex);
6230  return;
6231  }
6232  }
6233  VMA_ASSERT(0);
6234 }
6235 
6236 void VmaBlockVector::IncrementallySortBlocks()
6237 {
6238  // Bubble sort only until first swap.
6239  for(size_t i = 1; i < m_Blocks.size(); ++i)
6240  {
6241  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
6242  {
6243  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
6244  return;
6245  }
6246  }
6247 }
6248 
6249 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
6250 {
6251  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
6252  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
6253  allocInfo.allocationSize = blockSize;
6254  VkDeviceMemory mem = VK_NULL_HANDLE;
6255  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
6256  if(res < 0)
6257  {
6258  return res;
6259  }
6260 
6261  // New VkDeviceMemory successfully created.
6262 
6263  // Create new Allocation for it.
6264  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
6265  pBlock->Init(
6266  m_MemoryTypeIndex,
6267  mem,
6268  allocInfo.allocationSize);
6269 
6270  m_Blocks.push_back(pBlock);
6271  if(pNewBlockIndex != VMA_NULL)
6272  {
6273  *pNewBlockIndex = m_Blocks.size() - 1;
6274  }
6275 
6276  return VK_SUCCESS;
6277 }
6278 
6279 #if VMA_STATS_STRING_ENABLED
6280 
6281 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
6282 {
6283  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6284 
6285  json.BeginObject();
6286 
6287  if(m_IsCustomPool)
6288  {
6289  json.WriteString("MemoryTypeIndex");
6290  json.WriteNumber(m_MemoryTypeIndex);
6291 
6292  json.WriteString("BlockSize");
6293  json.WriteNumber(m_PreferredBlockSize);
6294 
6295  json.WriteString("BlockCount");
6296  json.BeginObject(true);
6297  if(m_MinBlockCount > 0)
6298  {
6299  json.WriteString("Min");
6300  json.WriteNumber(m_MinBlockCount);
6301  }
6302  if(m_MaxBlockCount < SIZE_MAX)
6303  {
6304  json.WriteString("Max");
6305  json.WriteNumber(m_MaxBlockCount);
6306  }
6307  json.WriteString("Cur");
6308  json.WriteNumber(m_Blocks.size());
6309  json.EndObject();
6310 
6311  if(m_FrameInUseCount > 0)
6312  {
6313  json.WriteString("FrameInUseCount");
6314  json.WriteNumber(m_FrameInUseCount);
6315  }
6316  }
6317  else
6318  {
6319  json.WriteString("PreferredBlockSize");
6320  json.WriteNumber(m_PreferredBlockSize);
6321  }
6322 
6323  json.WriteString("Blocks");
6324  json.BeginArray();
6325  for(size_t i = 0; i < m_Blocks.size(); ++i)
6326  {
6327  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
6328  }
6329  json.EndArray();
6330 
6331  json.EndObject();
6332 }
6333 
6334 #endif // #if VMA_STATS_STRING_ENABLED
6335 
6336 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
6337  VmaAllocator hAllocator,
6338  uint32_t currentFrameIndex)
6339 {
6340  if(m_pDefragmentator == VMA_NULL)
6341  {
6342  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
6343  hAllocator,
6344  this,
6345  currentFrameIndex);
6346  }
6347 
6348  return m_pDefragmentator;
6349 }
6350 
6351 VkResult VmaBlockVector::Defragment(
6352  VmaDefragmentationStats* pDefragmentationStats,
6353  VkDeviceSize& maxBytesToMove,
6354  uint32_t& maxAllocationsToMove)
6355 {
6356  if(m_pDefragmentator == VMA_NULL)
6357  {
6358  return VK_SUCCESS;
6359  }
6360 
6361  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6362 
6363  // Defragment.
6364  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
6365 
6366  // Accumulate statistics.
6367  if(pDefragmentationStats != VMA_NULL)
6368  {
6369  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
6370  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
6371  pDefragmentationStats->bytesMoved += bytesMoved;
6372  pDefragmentationStats->allocationsMoved += allocationsMoved;
6373  VMA_ASSERT(bytesMoved <= maxBytesToMove);
6374  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
6375  maxBytesToMove -= bytesMoved;
6376  maxAllocationsToMove -= allocationsMoved;
6377  }
6378 
6379  // Free empty blocks.
6380  m_HasEmptyBlock = false;
6381  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
6382  {
6383  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
6384  if(pBlock->m_Metadata.IsEmpty())
6385  {
6386  if(m_Blocks.size() > m_MinBlockCount)
6387  {
6388  if(pDefragmentationStats != VMA_NULL)
6389  {
6390  ++pDefragmentationStats->deviceMemoryBlocksFreed;
6391  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
6392  }
6393 
6394  VmaVectorRemove(m_Blocks, blockIndex);
6395  pBlock->Destroy(m_hAllocator);
6396  vma_delete(m_hAllocator, pBlock);
6397  }
6398  else
6399  {
6400  m_HasEmptyBlock = true;
6401  }
6402  }
6403  }
6404 
6405  return result;
6406 }
6407 
6408 void VmaBlockVector::DestroyDefragmentator()
6409 {
6410  if(m_pDefragmentator != VMA_NULL)
6411  {
6412  vma_delete(m_hAllocator, m_pDefragmentator);
6413  m_pDefragmentator = VMA_NULL;
6414  }
6415 }
6416 
6417 void VmaBlockVector::MakePoolAllocationsLost(
6418  uint32_t currentFrameIndex,
6419  size_t* pLostAllocationCount)
6420 {
6421  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6422  size_t lostAllocationCount = 0;
6423  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6424  {
6425  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6426  VMA_ASSERT(pBlock);
6427  lostAllocationCount += pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
6428  }
6429  if(pLostAllocationCount != VMA_NULL)
6430  {
6431  *pLostAllocationCount = lostAllocationCount;
6432  }
6433 }
6434 
6435 void VmaBlockVector::AddStats(VmaStats* pStats)
6436 {
6437  const uint32_t memTypeIndex = m_MemoryTypeIndex;
6438  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
6439 
6440  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6441 
6442  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6443  {
6444  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6445  VMA_ASSERT(pBlock);
6446  VMA_HEAVY_ASSERT(pBlock->Validate());
6447  VmaStatInfo allocationStatInfo;
6448  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
6449  VmaAddStatInfo(pStats->total, allocationStatInfo);
6450  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
6451  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
6452  }
6453 }
6454 
6456 // VmaDefragmentator members definition
6457 
6458 VmaDefragmentator::VmaDefragmentator(
6459  VmaAllocator hAllocator,
6460  VmaBlockVector* pBlockVector,
6461  uint32_t currentFrameIndex) :
6462  m_hAllocator(hAllocator),
6463  m_pBlockVector(pBlockVector),
6464  m_CurrentFrameIndex(currentFrameIndex),
6465  m_BytesMoved(0),
6466  m_AllocationsMoved(0),
6467  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
6468  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
6469 {
6470 }
6471 
6472 VmaDefragmentator::~VmaDefragmentator()
6473 {
6474  for(size_t i = m_Blocks.size(); i--; )
6475  {
6476  vma_delete(m_hAllocator, m_Blocks[i]);
6477  }
6478 }
6479 
6480 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
6481 {
6482  AllocationInfo allocInfo;
6483  allocInfo.m_hAllocation = hAlloc;
6484  allocInfo.m_pChanged = pChanged;
6485  m_Allocations.push_back(allocInfo);
6486 }
6487 
6488 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
6489 {
6490  // It has already been mapped for defragmentation.
6491  if(m_pMappedDataForDefragmentation)
6492  {
6493  *ppMappedData = m_pMappedDataForDefragmentation;
6494  return VK_SUCCESS;
6495  }
6496 
6497  // It is originally mapped.
6498  if(m_pBlock->m_Mapping.GetMappedData())
6499  {
6500  *ppMappedData = m_pBlock->m_Mapping.GetMappedData();
6501  return VK_SUCCESS;
6502  }
6503 
6504  // Map on first usage.
6505  VkResult res = m_pBlock->Map(hAllocator, &m_pMappedDataForDefragmentation);
6506  *ppMappedData = m_pMappedDataForDefragmentation;
6507  return res;
6508 }
6509 
6510 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
6511 {
6512  if(m_pMappedDataForDefragmentation != VMA_NULL)
6513  {
6514  m_pBlock->Unmap(hAllocator);
6515  }
6516 }
6517 
6518 VkResult VmaDefragmentator::DefragmentRound(
6519  VkDeviceSize maxBytesToMove,
6520  uint32_t maxAllocationsToMove)
6521 {
6522  if(m_Blocks.empty())
6523  {
6524  return VK_SUCCESS;
6525  }
6526 
6527  size_t srcBlockIndex = m_Blocks.size() - 1;
6528  size_t srcAllocIndex = SIZE_MAX;
6529  for(;;)
6530  {
6531  // 1. Find next allocation to move.
6532  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
6533  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
6534  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
6535  {
6536  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
6537  {
6538  // Finished: no more allocations to process.
6539  if(srcBlockIndex == 0)
6540  {
6541  return VK_SUCCESS;
6542  }
6543  else
6544  {
6545  --srcBlockIndex;
6546  srcAllocIndex = SIZE_MAX;
6547  }
6548  }
6549  else
6550  {
6551  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
6552  }
6553  }
6554 
6555  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
6556  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
6557 
6558  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
6559  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
6560  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
6561  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
6562 
6563  // 2. Try to find new place for this allocation in preceding or current block.
6564  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
6565  {
6566  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
6567  VmaAllocationRequest dstAllocRequest;
6568  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
6569  m_CurrentFrameIndex,
6570  m_pBlockVector->GetFrameInUseCount(),
6571  m_pBlockVector->GetBufferImageGranularity(),
6572  size,
6573  alignment,
6574  suballocType,
6575  false, // canMakeOtherLost
6576  &dstAllocRequest) &&
6577  MoveMakesSense(
6578  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
6579  {
6580  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
6581 
6582  // Reached limit on number of allocations or bytes to move.
6583  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
6584  (m_BytesMoved + size > maxBytesToMove))
6585  {
6586  return VK_INCOMPLETE;
6587  }
6588 
6589  void* pDstMappedData = VMA_NULL;
6590  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
6591  if(res != VK_SUCCESS)
6592  {
6593  return res;
6594  }
6595 
6596  void* pSrcMappedData = VMA_NULL;
6597  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
6598  if(res != VK_SUCCESS)
6599  {
6600  return res;
6601  }
6602 
6603  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
6604  memcpy(
6605  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
6606  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
6607  static_cast<size_t>(size));
6608 
6609  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
6610  pSrcBlockInfo->m_pBlock->m_Metadata.Free(allocInfo.m_hAllocation);
6611 
6612  allocInfo.m_hAllocation->ChangeBlockAllocation(pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
6613 
6614  if(allocInfo.m_pChanged != VMA_NULL)
6615  {
6616  *allocInfo.m_pChanged = VK_TRUE;
6617  }
6618 
6619  ++m_AllocationsMoved;
6620  m_BytesMoved += size;
6621 
6622  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
6623 
6624  break;
6625  }
6626  }
6627 
6628  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
6629 
6630  if(srcAllocIndex > 0)
6631  {
6632  --srcAllocIndex;
6633  }
6634  else
6635  {
6636  if(srcBlockIndex > 0)
6637  {
6638  --srcBlockIndex;
6639  srcAllocIndex = SIZE_MAX;
6640  }
6641  else
6642  {
6643  return VK_SUCCESS;
6644  }
6645  }
6646  }
6647 }
6648 
6649 VkResult VmaDefragmentator::Defragment(
6650  VkDeviceSize maxBytesToMove,
6651  uint32_t maxAllocationsToMove)
6652 {
6653  if(m_Allocations.empty())
6654  {
6655  return VK_SUCCESS;
6656  }
6657 
6658  // Create block info for each block.
6659  const size_t blockCount = m_pBlockVector->m_Blocks.size();
6660  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6661  {
6662  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
6663  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
6664  m_Blocks.push_back(pBlockInfo);
6665  }
6666 
6667  // Sort them by m_pBlock pointer value.
6668  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
6669 
6670  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
6671  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
6672  {
6673  AllocationInfo& allocInfo = m_Allocations[blockIndex];
6674  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
6675  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
6676  {
6677  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
6678  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
6679  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
6680  {
6681  (*it)->m_Allocations.push_back(allocInfo);
6682  }
6683  else
6684  {
6685  VMA_ASSERT(0);
6686  }
6687  }
6688  }
6689  m_Allocations.clear();
6690 
6691  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6692  {
6693  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
6694  pBlockInfo->CalcHasNonMovableAllocations();
6695  pBlockInfo->SortAllocationsBySizeDescecnding();
6696  }
6697 
6698  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
6699  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
6700 
6701  // Execute defragmentation rounds (the main part).
6702  VkResult result = VK_SUCCESS;
6703  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
6704  {
6705  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
6706  }
6707 
6708  // Unmap blocks that were mapped for defragmentation.
6709  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6710  {
6711  m_Blocks[blockIndex]->Unmap(m_hAllocator);
6712  }
6713 
6714  return result;
6715 }
6716 
6717 bool VmaDefragmentator::MoveMakesSense(
6718  size_t dstBlockIndex, VkDeviceSize dstOffset,
6719  size_t srcBlockIndex, VkDeviceSize srcOffset)
6720 {
6721  if(dstBlockIndex < srcBlockIndex)
6722  {
6723  return true;
6724  }
6725  if(dstBlockIndex > srcBlockIndex)
6726  {
6727  return false;
6728  }
6729  if(dstOffset < srcOffset)
6730  {
6731  return true;
6732  }
6733  return false;
6734 }
6735 
6737 // VmaAllocator_T
6738 
6739 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
6740  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
6741  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
6742  m_hDevice(pCreateInfo->device),
6743  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
6744  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
6745  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
6746  m_PreferredLargeHeapBlockSize(0),
6747  m_PhysicalDevice(pCreateInfo->physicalDevice),
6748  m_CurrentFrameIndex(0),
6749  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
6750 {
6751  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
6752 
6753  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
6754  memset(&m_MemProps, 0, sizeof(m_MemProps));
6755  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
6756 
6757  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
6758  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
6759 
6760  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
6761  {
6762  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
6763  }
6764 
6765  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
6766  {
6767  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
6768  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
6769  }
6770 
6771  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
6772 
6773  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
6774  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
6775 
6776  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
6777  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
6778 
6779  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
6780  {
6781  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
6782  {
6783  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
6784  if(limit != VK_WHOLE_SIZE)
6785  {
6786  m_HeapSizeLimit[heapIndex] = limit;
6787  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
6788  {
6789  m_MemProps.memoryHeaps[heapIndex].size = limit;
6790  }
6791  }
6792  }
6793  }
6794 
6795  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6796  {
6797  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
6798 
6799  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
6800  this,
6801  memTypeIndex,
6802  preferredBlockSize,
6803  0,
6804  SIZE_MAX,
6805  GetBufferImageGranularity(),
6806  pCreateInfo->frameInUseCount,
6807  false); // isCustomPool
6808  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
6809  // becase minBlockCount is 0.
6810  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
6811  }
6812 }
6813 
6814 VmaAllocator_T::~VmaAllocator_T()
6815 {
6816  VMA_ASSERT(m_Pools.empty());
6817 
6818  for(size_t i = GetMemoryTypeCount(); i--; )
6819  {
6820  vma_delete(this, m_pDedicatedAllocations[i]);
6821  vma_delete(this, m_pBlockVectors[i]);
6822  }
6823 }
6824 
6825 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
6826 {
6827 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
6828  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
6829  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
6830  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
6831  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
6832  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
6833  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
6834  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
6835  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
6836  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
6837  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
6838  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
6839  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
6840  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
6841  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
6842  if(m_UseKhrDedicatedAllocation)
6843  {
6844  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
6845  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
6846  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
6847  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
6848  }
6849 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
6850 
6851 #define VMA_COPY_IF_NOT_NULL(funcName) \
6852  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
6853 
6854  if(pVulkanFunctions != VMA_NULL)
6855  {
6856  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
6857  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
6858  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
6859  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
6860  VMA_COPY_IF_NOT_NULL(vkMapMemory);
6861  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
6862  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
6863  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
6864  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
6865  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
6866  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
6867  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
6868  VMA_COPY_IF_NOT_NULL(vkCreateImage);
6869  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
6870  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
6871  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
6872  }
6873 
6874 #undef VMA_COPY_IF_NOT_NULL
6875 
6876  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
6877  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
6878  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
6879  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
6880  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
6881  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
6882  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
6883  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
6884  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
6885  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
6886  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
6887  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
6888  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
6889  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
6890  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
6891  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
6892  if(m_UseKhrDedicatedAllocation)
6893  {
6894  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
6895  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
6896  }
6897 }
6898 
6899 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
6900 {
6901  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
6902  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
6903  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
6904  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
6905 }
6906 
6907 VkResult VmaAllocator_T::AllocateMemoryOfType(
6908  const VkMemoryRequirements& vkMemReq,
6909  bool dedicatedAllocation,
6910  VkBuffer dedicatedBuffer,
6911  VkImage dedicatedImage,
6912  const VmaAllocationCreateInfo& createInfo,
6913  uint32_t memTypeIndex,
6914  VmaSuballocationType suballocType,
6915  VmaAllocation* pAllocation)
6916 {
6917  VMA_ASSERT(pAllocation != VMA_NULL);
6918  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
6919 
6920  VmaAllocationCreateInfo finalCreateInfo = createInfo;
6921 
6922  // If memory type is not HOST_VISIBLE, disable MAPPED.
6923  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
6924  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
6925  {
6926  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
6927  }
6928 
6929  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
6930  VMA_ASSERT(blockVector);
6931 
6932  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
6933  bool preferDedicatedMemory =
6934  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
6935  dedicatedAllocation ||
6936  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
6937  vkMemReq.size > preferredBlockSize / 2;
6938 
6939  if(preferDedicatedMemory &&
6940  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
6941  finalCreateInfo.pool == VK_NULL_HANDLE)
6942  {
6944  }
6945 
6946  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
6947  {
6948  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6949  {
6950  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6951  }
6952  else
6953  {
6954  return AllocateDedicatedMemory(
6955  vkMemReq.size,
6956  suballocType,
6957  memTypeIndex,
6958  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
6959  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
6960  finalCreateInfo.pUserData,
6961  dedicatedBuffer,
6962  dedicatedImage,
6963  pAllocation);
6964  }
6965  }
6966  else
6967  {
6968  VkResult res = blockVector->Allocate(
6969  VK_NULL_HANDLE, // hCurrentPool
6970  m_CurrentFrameIndex.load(),
6971  vkMemReq,
6972  finalCreateInfo,
6973  suballocType,
6974  pAllocation);
6975  if(res == VK_SUCCESS)
6976  {
6977  return res;
6978  }
6979 
6980  // 5. Try dedicated memory.
6981  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6982  {
6983  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6984  }
6985  else
6986  {
6987  res = AllocateDedicatedMemory(
6988  vkMemReq.size,
6989  suballocType,
6990  memTypeIndex,
6991  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
6992  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
6993  finalCreateInfo.pUserData,
6994  dedicatedBuffer,
6995  dedicatedImage,
6996  pAllocation);
6997  if(res == VK_SUCCESS)
6998  {
6999  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
7000  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
7001  return VK_SUCCESS;
7002  }
7003  else
7004  {
7005  // Everything failed: Return error code.
7006  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7007  return res;
7008  }
7009  }
7010  }
7011 }
7012 
7013 VkResult VmaAllocator_T::AllocateDedicatedMemory(
7014  VkDeviceSize size,
7015  VmaSuballocationType suballocType,
7016  uint32_t memTypeIndex,
7017  bool map,
7018  bool isUserDataString,
7019  void* pUserData,
7020  VkBuffer dedicatedBuffer,
7021  VkImage dedicatedImage,
7022  VmaAllocation* pAllocation)
7023 {
7024  VMA_ASSERT(pAllocation);
7025 
7026  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
7027  allocInfo.memoryTypeIndex = memTypeIndex;
7028  allocInfo.allocationSize = size;
7029 
7030  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
7031  if(m_UseKhrDedicatedAllocation)
7032  {
7033  if(dedicatedBuffer != VK_NULL_HANDLE)
7034  {
7035  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
7036  dedicatedAllocInfo.buffer = dedicatedBuffer;
7037  allocInfo.pNext = &dedicatedAllocInfo;
7038  }
7039  else if(dedicatedImage != VK_NULL_HANDLE)
7040  {
7041  dedicatedAllocInfo.image = dedicatedImage;
7042  allocInfo.pNext = &dedicatedAllocInfo;
7043  }
7044  }
7045 
7046  // Allocate VkDeviceMemory.
7047  VkDeviceMemory hMemory = VK_NULL_HANDLE;
7048  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
7049  if(res < 0)
7050  {
7051  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7052  return res;
7053  }
7054 
7055  void* pMappedData = nullptr;
7056  if(map)
7057  {
7058  res = (*m_VulkanFunctions.vkMapMemory)(
7059  m_hDevice,
7060  hMemory,
7061  0,
7062  VK_WHOLE_SIZE,
7063  0,
7064  &pMappedData);
7065  if(res < 0)
7066  {
7067  VMA_DEBUG_LOG(" vkMapMemory FAILED");
7068  FreeVulkanMemory(memTypeIndex, size, hMemory);
7069  return res;
7070  }
7071  }
7072 
7073  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
7074  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
7075  (*pAllocation)->SetUserData(this, pUserData);
7076 
7077  // Register it in m_pDedicatedAllocations.
7078  {
7079  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7080  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
7081  VMA_ASSERT(pDedicatedAllocations);
7082  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
7083  }
7084 
7085  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
7086 
7087  return VK_SUCCESS;
7088 }
7089 
7090 void VmaAllocator_T::GetBufferMemoryRequirements(
7091  VkBuffer hBuffer,
7092  VkMemoryRequirements& memReq,
7093  bool& requiresDedicatedAllocation,
7094  bool& prefersDedicatedAllocation) const
7095 {
7096  if(m_UseKhrDedicatedAllocation)
7097  {
7098  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
7099  memReqInfo.buffer = hBuffer;
7100 
7101  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7102 
7103  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7104  memReq2.pNext = &memDedicatedReq;
7105 
7106  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7107 
7108  memReq = memReq2.memoryRequirements;
7109  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7110  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7111  }
7112  else
7113  {
7114  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
7115  requiresDedicatedAllocation = false;
7116  prefersDedicatedAllocation = false;
7117  }
7118 }
7119 
7120 void VmaAllocator_T::GetImageMemoryRequirements(
7121  VkImage hImage,
7122  VkMemoryRequirements& memReq,
7123  bool& requiresDedicatedAllocation,
7124  bool& prefersDedicatedAllocation) const
7125 {
7126  if(m_UseKhrDedicatedAllocation)
7127  {
7128  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
7129  memReqInfo.image = hImage;
7130 
7131  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7132 
7133  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7134  memReq2.pNext = &memDedicatedReq;
7135 
7136  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7137 
7138  memReq = memReq2.memoryRequirements;
7139  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7140  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7141  }
7142  else
7143  {
7144  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
7145  requiresDedicatedAllocation = false;
7146  prefersDedicatedAllocation = false;
7147  }
7148 }
7149 
7150 VkResult VmaAllocator_T::AllocateMemory(
7151  const VkMemoryRequirements& vkMemReq,
7152  bool requiresDedicatedAllocation,
7153  bool prefersDedicatedAllocation,
7154  VkBuffer dedicatedBuffer,
7155  VkImage dedicatedImage,
7156  const VmaAllocationCreateInfo& createInfo,
7157  VmaSuballocationType suballocType,
7158  VmaAllocation* pAllocation)
7159 {
7160  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
7161  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7162  {
7163  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
7164  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7165  }
7166  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7168  {
7169  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
7170  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7171  }
7172  if(requiresDedicatedAllocation)
7173  {
7174  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7175  {
7176  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
7177  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7178  }
7179  if(createInfo.pool != VK_NULL_HANDLE)
7180  {
7181  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
7182  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7183  }
7184  }
7185  if((createInfo.pool != VK_NULL_HANDLE) &&
7186  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
7187  {
7188  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
7189  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7190  }
7191 
7192  if(createInfo.pool != VK_NULL_HANDLE)
7193  {
7194  return createInfo.pool->m_BlockVector.Allocate(
7195  createInfo.pool,
7196  m_CurrentFrameIndex.load(),
7197  vkMemReq,
7198  createInfo,
7199  suballocType,
7200  pAllocation);
7201  }
7202  else
7203  {
7204  // Bit mask of memory Vulkan types acceptable for this allocation.
7205  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
7206  uint32_t memTypeIndex = UINT32_MAX;
7207  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7208  if(res == VK_SUCCESS)
7209  {
7210  res = AllocateMemoryOfType(
7211  vkMemReq,
7212  requiresDedicatedAllocation || prefersDedicatedAllocation,
7213  dedicatedBuffer,
7214  dedicatedImage,
7215  createInfo,
7216  memTypeIndex,
7217  suballocType,
7218  pAllocation);
7219  // Succeeded on first try.
7220  if(res == VK_SUCCESS)
7221  {
7222  return res;
7223  }
7224  // Allocation from this memory type failed. Try other compatible memory types.
7225  else
7226  {
7227  for(;;)
7228  {
7229  // Remove old memTypeIndex from list of possibilities.
7230  memoryTypeBits &= ~(1u << memTypeIndex);
7231  // Find alternative memTypeIndex.
7232  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7233  if(res == VK_SUCCESS)
7234  {
7235  res = AllocateMemoryOfType(
7236  vkMemReq,
7237  requiresDedicatedAllocation || prefersDedicatedAllocation,
7238  dedicatedBuffer,
7239  dedicatedImage,
7240  createInfo,
7241  memTypeIndex,
7242  suballocType,
7243  pAllocation);
7244  // Allocation from this alternative memory type succeeded.
7245  if(res == VK_SUCCESS)
7246  {
7247  return res;
7248  }
7249  // else: Allocation from this memory type failed. Try next one - next loop iteration.
7250  }
7251  // No other matching memory type index could be found.
7252  else
7253  {
7254  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
7255  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7256  }
7257  }
7258  }
7259  }
7260  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
7261  else
7262  return res;
7263  }
7264 }
7265 
7266 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
7267 {
7268  VMA_ASSERT(allocation);
7269 
7270  if(allocation->CanBecomeLost() == false ||
7271  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
7272  {
7273  switch(allocation->GetType())
7274  {
7275  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7276  {
7277  VmaBlockVector* pBlockVector = VMA_NULL;
7278  VmaPool hPool = allocation->GetPool();
7279  if(hPool != VK_NULL_HANDLE)
7280  {
7281  pBlockVector = &hPool->m_BlockVector;
7282  }
7283  else
7284  {
7285  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
7286  pBlockVector = m_pBlockVectors[memTypeIndex];
7287  }
7288  pBlockVector->Free(allocation);
7289  }
7290  break;
7291  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7292  FreeDedicatedMemory(allocation);
7293  break;
7294  default:
7295  VMA_ASSERT(0);
7296  }
7297  }
7298 
7299  allocation->SetUserData(this, VMA_NULL);
7300  vma_delete(this, allocation);
7301 }
7302 
7303 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
7304 {
7305  // Initialize.
7306  InitStatInfo(pStats->total);
7307  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
7308  InitStatInfo(pStats->memoryType[i]);
7309  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7310  InitStatInfo(pStats->memoryHeap[i]);
7311 
7312  // Process default pools.
7313  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7314  {
7315  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
7316  VMA_ASSERT(pBlockVector);
7317  pBlockVector->AddStats(pStats);
7318  }
7319 
7320  // Process custom pools.
7321  {
7322  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7323  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
7324  {
7325  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
7326  }
7327  }
7328 
7329  // Process dedicated allocations.
7330  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7331  {
7332  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7333  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7334  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
7335  VMA_ASSERT(pDedicatedAllocVector);
7336  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
7337  {
7338  VmaStatInfo allocationStatInfo;
7339  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
7340  VmaAddStatInfo(pStats->total, allocationStatInfo);
7341  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
7342  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
7343  }
7344  }
7345 
7346  // Postprocess.
7347  VmaPostprocessCalcStatInfo(pStats->total);
7348  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
7349  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
7350  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
7351  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
7352 }
7353 
7354 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
7355 
7356 VkResult VmaAllocator_T::Defragment(
7357  VmaAllocation* pAllocations,
7358  size_t allocationCount,
7359  VkBool32* pAllocationsChanged,
7360  const VmaDefragmentationInfo* pDefragmentationInfo,
7361  VmaDefragmentationStats* pDefragmentationStats)
7362 {
7363  if(pAllocationsChanged != VMA_NULL)
7364  {
7365  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
7366  }
7367  if(pDefragmentationStats != VMA_NULL)
7368  {
7369  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
7370  }
7371 
7372  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
7373 
7374  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
7375 
7376  const size_t poolCount = m_Pools.size();
7377 
7378  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
7379  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
7380  {
7381  VmaAllocation hAlloc = pAllocations[allocIndex];
7382  VMA_ASSERT(hAlloc);
7383  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
7384  // DedicatedAlloc cannot be defragmented.
7385  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
7386  // Only HOST_VISIBLE memory types can be defragmented.
7387  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
7388  // Lost allocation cannot be defragmented.
7389  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
7390  {
7391  VmaBlockVector* pAllocBlockVector = nullptr;
7392 
7393  const VmaPool hAllocPool = hAlloc->GetPool();
7394  // This allocation belongs to custom pool.
7395  if(hAllocPool != VK_NULL_HANDLE)
7396  {
7397  pAllocBlockVector = &hAllocPool->GetBlockVector();
7398  }
7399  // This allocation belongs to general pool.
7400  else
7401  {
7402  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
7403  }
7404 
7405  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
7406 
7407  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
7408  &pAllocationsChanged[allocIndex] : VMA_NULL;
7409  pDefragmentator->AddAllocation(hAlloc, pChanged);
7410  }
7411  }
7412 
7413  VkResult result = VK_SUCCESS;
7414 
7415  // ======== Main processing.
7416 
7417  VkDeviceSize maxBytesToMove = SIZE_MAX;
7418  uint32_t maxAllocationsToMove = UINT32_MAX;
7419  if(pDefragmentationInfo != VMA_NULL)
7420  {
7421  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
7422  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
7423  }
7424 
7425  // Process standard memory.
7426  for(uint32_t memTypeIndex = 0;
7427  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
7428  ++memTypeIndex)
7429  {
7430  // Only HOST_VISIBLE memory types can be defragmented.
7431  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7432  {
7433  result = m_pBlockVectors[memTypeIndex]->Defragment(
7434  pDefragmentationStats,
7435  maxBytesToMove,
7436  maxAllocationsToMove);
7437  }
7438  }
7439 
7440  // Process custom pools.
7441  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
7442  {
7443  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
7444  pDefragmentationStats,
7445  maxBytesToMove,
7446  maxAllocationsToMove);
7447  }
7448 
7449  // ======== Destroy defragmentators.
7450 
7451  // Process custom pools.
7452  for(size_t poolIndex = poolCount; poolIndex--; )
7453  {
7454  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
7455  }
7456 
7457  // Process standard memory.
7458  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
7459  {
7460  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7461  {
7462  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
7463  }
7464  }
7465 
7466  return result;
7467 }
7468 
7469 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
7470 {
7471  if(hAllocation->CanBecomeLost())
7472  {
7473  /*
7474  Warning: This is a carefully designed algorithm.
7475  Do not modify unless you really know what you're doing :)
7476  */
7477  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
7478  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
7479  for(;;)
7480  {
7481  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7482  {
7483  pAllocationInfo->memoryType = UINT32_MAX;
7484  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
7485  pAllocationInfo->offset = 0;
7486  pAllocationInfo->size = hAllocation->GetSize();
7487  pAllocationInfo->pMappedData = VMA_NULL;
7488  pAllocationInfo->pUserData = hAllocation->GetUserData();
7489  return;
7490  }
7491  else if(localLastUseFrameIndex == localCurrFrameIndex)
7492  {
7493  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7494  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7495  pAllocationInfo->offset = hAllocation->GetOffset();
7496  pAllocationInfo->size = hAllocation->GetSize();
7497  pAllocationInfo->pMappedData = VMA_NULL;
7498  pAllocationInfo->pUserData = hAllocation->GetUserData();
7499  return;
7500  }
7501  else // Last use time earlier than current time.
7502  {
7503  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
7504  {
7505  localLastUseFrameIndex = localCurrFrameIndex;
7506  }
7507  }
7508  }
7509  }
7510  else
7511  {
7512  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7513  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7514  pAllocationInfo->offset = hAllocation->GetOffset();
7515  pAllocationInfo->size = hAllocation->GetSize();
7516  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
7517  pAllocationInfo->pUserData = hAllocation->GetUserData();
7518  }
7519 }
7520 
7521 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
7522 {
7523  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
7524 
7525  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
7526 
7527  if(newCreateInfo.maxBlockCount == 0)
7528  {
7529  newCreateInfo.maxBlockCount = SIZE_MAX;
7530  }
7531  if(newCreateInfo.blockSize == 0)
7532  {
7533  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
7534  }
7535 
7536  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
7537 
7538  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
7539  if(res != VK_SUCCESS)
7540  {
7541  vma_delete(this, *pPool);
7542  *pPool = VMA_NULL;
7543  return res;
7544  }
7545 
7546  // Add to m_Pools.
7547  {
7548  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7549  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
7550  }
7551 
7552  return VK_SUCCESS;
7553 }
7554 
7555 void VmaAllocator_T::DestroyPool(VmaPool pool)
7556 {
7557  // Remove from m_Pools.
7558  {
7559  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7560  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
7561  VMA_ASSERT(success && "Pool not found in Allocator.");
7562  }
7563 
7564  vma_delete(this, pool);
7565 }
7566 
7567 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
7568 {
7569  pool->m_BlockVector.GetPoolStats(pPoolStats);
7570 }
7571 
7572 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
7573 {
7574  m_CurrentFrameIndex.store(frameIndex);
7575 }
7576 
7577 void VmaAllocator_T::MakePoolAllocationsLost(
7578  VmaPool hPool,
7579  size_t* pLostAllocationCount)
7580 {
7581  hPool->m_BlockVector.MakePoolAllocationsLost(
7582  m_CurrentFrameIndex.load(),
7583  pLostAllocationCount);
7584 }
7585 
7586 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
7587 {
7588  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
7589  (*pAllocation)->InitLost();
7590 }
7591 
7592 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
7593 {
7594  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
7595 
7596  VkResult res;
7597  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
7598  {
7599  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
7600  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
7601  {
7602  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
7603  if(res == VK_SUCCESS)
7604  {
7605  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
7606  }
7607  }
7608  else
7609  {
7610  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
7611  }
7612  }
7613  else
7614  {
7615  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
7616  }
7617 
7618  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
7619  {
7620  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
7621  }
7622 
7623  return res;
7624 }
7625 
7626 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
7627 {
7628  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
7629  {
7630  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
7631  }
7632 
7633  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
7634 
7635  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
7636  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
7637  {
7638  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
7639  m_HeapSizeLimit[heapIndex] += size;
7640  }
7641 }
7642 
7643 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
7644 {
7645  if(hAllocation->CanBecomeLost())
7646  {
7647  return VK_ERROR_MEMORY_MAP_FAILED;
7648  }
7649 
7650  switch(hAllocation->GetType())
7651  {
7652  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7653  {
7654  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
7655  char *pBytes = nullptr;
7656  VkResult res = pBlock->Map(this, (void**)&pBytes);
7657  if(res == VK_SUCCESS)
7658  {
7659  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
7660  hAllocation->BlockAllocMap();
7661  }
7662  return res;
7663  }
7664  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7665  return hAllocation->DedicatedAllocMap(this, ppData);
7666  default:
7667  VMA_ASSERT(0);
7668  return VK_ERROR_MEMORY_MAP_FAILED;
7669  }
7670 }
7671 
7672 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
7673 {
7674  switch(hAllocation->GetType())
7675  {
7676  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7677  {
7678  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
7679  hAllocation->BlockAllocUnmap();
7680  pBlock->Unmap(this);
7681  }
7682  break;
7683  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7684  hAllocation->DedicatedAllocUnmap(this);
7685  break;
7686  default:
7687  VMA_ASSERT(0);
7688  }
7689 }
7690 
7691 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
7692 {
7693  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
7694 
7695  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
7696  {
7697  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7698  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
7699  VMA_ASSERT(pDedicatedAllocations);
7700  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
7701  VMA_ASSERT(success);
7702  }
7703 
7704  VkDeviceMemory hMemory = allocation->GetMemory();
7705 
7706  if(allocation->GetMappedData() != VMA_NULL)
7707  {
7708  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
7709  }
7710 
7711  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
7712 
7713  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
7714 }
7715 
7716 #if VMA_STATS_STRING_ENABLED
7717 
7718 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
7719 {
7720  bool dedicatedAllocationsStarted = false;
7721  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7722  {
7723  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7724  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
7725  VMA_ASSERT(pDedicatedAllocVector);
7726  if(pDedicatedAllocVector->empty() == false)
7727  {
7728  if(dedicatedAllocationsStarted == false)
7729  {
7730  dedicatedAllocationsStarted = true;
7731  json.WriteString("DedicatedAllocations");
7732  json.BeginObject();
7733  }
7734 
7735  json.BeginString("Type ");
7736  json.ContinueString(memTypeIndex);
7737  json.EndString();
7738 
7739  json.BeginArray();
7740 
7741  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
7742  {
7743  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
7744  json.BeginObject(true);
7745 
7746  json.WriteString("Type");
7747  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
7748 
7749  json.WriteString("Size");
7750  json.WriteNumber(hAlloc->GetSize());
7751 
7752  const void* pUserData = hAlloc->GetUserData();
7753  if(pUserData != VMA_NULL)
7754  {
7755  json.WriteString("UserData");
7756  if(hAlloc->IsUserDataString())
7757  {
7758  json.WriteString((const char*)pUserData);
7759  }
7760  else
7761  {
7762  json.BeginString();
7763  json.ContinueString_Pointer(pUserData);
7764  json.EndString();
7765  }
7766  }
7767 
7768  json.EndObject();
7769  }
7770 
7771  json.EndArray();
7772  }
7773  }
7774  if(dedicatedAllocationsStarted)
7775  {
7776  json.EndObject();
7777  }
7778 
7779  {
7780  bool allocationsStarted = false;
7781  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7782  {
7783  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
7784  {
7785  if(allocationsStarted == false)
7786  {
7787  allocationsStarted = true;
7788  json.WriteString("DefaultPools");
7789  json.BeginObject();
7790  }
7791 
7792  json.BeginString("Type ");
7793  json.ContinueString(memTypeIndex);
7794  json.EndString();
7795 
7796  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
7797  }
7798  }
7799  if(allocationsStarted)
7800  {
7801  json.EndObject();
7802  }
7803  }
7804 
7805  {
7806  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7807  const size_t poolCount = m_Pools.size();
7808  if(poolCount > 0)
7809  {
7810  json.WriteString("Pools");
7811  json.BeginArray();
7812  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
7813  {
7814  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
7815  }
7816  json.EndArray();
7817  }
7818  }
7819 }
7820 
7821 #endif // #if VMA_STATS_STRING_ENABLED
7822 
7823 static VkResult AllocateMemoryForImage(
7824  VmaAllocator allocator,
7825  VkImage image,
7826  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7827  VmaSuballocationType suballocType,
7828  VmaAllocation* pAllocation)
7829 {
7830  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
7831 
7832  VkMemoryRequirements vkMemReq = {};
7833  bool requiresDedicatedAllocation = false;
7834  bool prefersDedicatedAllocation = false;
7835  allocator->GetImageMemoryRequirements(image, vkMemReq,
7836  requiresDedicatedAllocation, prefersDedicatedAllocation);
7837 
7838  return allocator->AllocateMemory(
7839  vkMemReq,
7840  requiresDedicatedAllocation,
7841  prefersDedicatedAllocation,
7842  VK_NULL_HANDLE, // dedicatedBuffer
7843  image, // dedicatedImage
7844  *pAllocationCreateInfo,
7845  suballocType,
7846  pAllocation);
7847 }
7848 
7850 // Public interface
7851 
7852 VkResult vmaCreateAllocator(
7853  const VmaAllocatorCreateInfo* pCreateInfo,
7854  VmaAllocator* pAllocator)
7855 {
7856  VMA_ASSERT(pCreateInfo && pAllocator);
7857  VMA_DEBUG_LOG("vmaCreateAllocator");
7858  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
7859  return VK_SUCCESS;
7860 }
7861 
7862 void vmaDestroyAllocator(
7863  VmaAllocator allocator)
7864 {
7865  if(allocator != VK_NULL_HANDLE)
7866  {
7867  VMA_DEBUG_LOG("vmaDestroyAllocator");
7868  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
7869  vma_delete(&allocationCallbacks, allocator);
7870  }
7871 }
7872 
7874  VmaAllocator allocator,
7875  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
7876 {
7877  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
7878  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
7879 }
7880 
7882  VmaAllocator allocator,
7883  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
7884 {
7885  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
7886  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
7887 }
7888 
7890  VmaAllocator allocator,
7891  uint32_t memoryTypeIndex,
7892  VkMemoryPropertyFlags* pFlags)
7893 {
7894  VMA_ASSERT(allocator && pFlags);
7895  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
7896  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
7897 }
7898 
7900  VmaAllocator allocator,
7901  uint32_t frameIndex)
7902 {
7903  VMA_ASSERT(allocator);
7904  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
7905 
7906  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7907 
7908  allocator->SetCurrentFrameIndex(frameIndex);
7909 }
7910 
7911 void vmaCalculateStats(
7912  VmaAllocator allocator,
7913  VmaStats* pStats)
7914 {
7915  VMA_ASSERT(allocator && pStats);
7916  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7917  allocator->CalculateStats(pStats);
7918 }
7919 
7920 #if VMA_STATS_STRING_ENABLED
7921 
7922 void vmaBuildStatsString(
7923  VmaAllocator allocator,
7924  char** ppStatsString,
7925  VkBool32 detailedMap)
7926 {
7927  VMA_ASSERT(allocator && ppStatsString);
7928  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7929 
7930  VmaStringBuilder sb(allocator);
7931  {
7932  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
7933  json.BeginObject();
7934 
7935  VmaStats stats;
7936  allocator->CalculateStats(&stats);
7937 
7938  json.WriteString("Total");
7939  VmaPrintStatInfo(json, stats.total);
7940 
7941  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
7942  {
7943  json.BeginString("Heap ");
7944  json.ContinueString(heapIndex);
7945  json.EndString();
7946  json.BeginObject();
7947 
7948  json.WriteString("Size");
7949  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
7950 
7951  json.WriteString("Flags");
7952  json.BeginArray(true);
7953  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
7954  {
7955  json.WriteString("DEVICE_LOCAL");
7956  }
7957  json.EndArray();
7958 
7959  if(stats.memoryHeap[heapIndex].blockCount > 0)
7960  {
7961  json.WriteString("Stats");
7962  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
7963  }
7964 
7965  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
7966  {
7967  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
7968  {
7969  json.BeginString("Type ");
7970  json.ContinueString(typeIndex);
7971  json.EndString();
7972 
7973  json.BeginObject();
7974 
7975  json.WriteString("Flags");
7976  json.BeginArray(true);
7977  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
7978  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
7979  {
7980  json.WriteString("DEVICE_LOCAL");
7981  }
7982  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7983  {
7984  json.WriteString("HOST_VISIBLE");
7985  }
7986  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
7987  {
7988  json.WriteString("HOST_COHERENT");
7989  }
7990  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
7991  {
7992  json.WriteString("HOST_CACHED");
7993  }
7994  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
7995  {
7996  json.WriteString("LAZILY_ALLOCATED");
7997  }
7998  json.EndArray();
7999 
8000  if(stats.memoryType[typeIndex].blockCount > 0)
8001  {
8002  json.WriteString("Stats");
8003  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
8004  }
8005 
8006  json.EndObject();
8007  }
8008  }
8009 
8010  json.EndObject();
8011  }
8012  if(detailedMap == VK_TRUE)
8013  {
8014  allocator->PrintDetailedMap(json);
8015  }
8016 
8017  json.EndObject();
8018  }
8019 
8020  const size_t len = sb.GetLength();
8021  char* const pChars = vma_new_array(allocator, char, len + 1);
8022  if(len > 0)
8023  {
8024  memcpy(pChars, sb.GetData(), len);
8025  }
8026  pChars[len] = '\0';
8027  *ppStatsString = pChars;
8028 }
8029 
8030 void vmaFreeStatsString(
8031  VmaAllocator allocator,
8032  char* pStatsString)
8033 {
8034  if(pStatsString != VMA_NULL)
8035  {
8036  VMA_ASSERT(allocator);
8037  size_t len = strlen(pStatsString);
8038  vma_delete_array(allocator, pStatsString, len + 1);
8039  }
8040 }
8041 
8042 #endif // #if VMA_STATS_STRING_ENABLED
8043 
8044 /*
8045 This function is not protected by any mutex because it just reads immutable data.
8046 */
8047 VkResult vmaFindMemoryTypeIndex(
8048  VmaAllocator allocator,
8049  uint32_t memoryTypeBits,
8050  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8051  uint32_t* pMemoryTypeIndex)
8052 {
8053  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8054  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8055  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8056 
8057  if(pAllocationCreateInfo->memoryTypeBits != 0)
8058  {
8059  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
8060  }
8061 
8062  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
8063  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
8064 
8065  // Convert usage to requiredFlags and preferredFlags.
8066  switch(pAllocationCreateInfo->usage)
8067  {
8069  break;
8071  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8072  break;
8074  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
8075  break;
8077  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8078  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8079  break;
8081  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8082  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
8083  break;
8084  default:
8085  break;
8086  }
8087 
8088  *pMemoryTypeIndex = UINT32_MAX;
8089  uint32_t minCost = UINT32_MAX;
8090  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
8091  memTypeIndex < allocator->GetMemoryTypeCount();
8092  ++memTypeIndex, memTypeBit <<= 1)
8093  {
8094  // This memory type is acceptable according to memoryTypeBits bitmask.
8095  if((memTypeBit & memoryTypeBits) != 0)
8096  {
8097  const VkMemoryPropertyFlags currFlags =
8098  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
8099  // This memory type contains requiredFlags.
8100  if((requiredFlags & ~currFlags) == 0)
8101  {
8102  // Calculate cost as number of bits from preferredFlags not present in this memory type.
8103  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
8104  // Remember memory type with lowest cost.
8105  if(currCost < minCost)
8106  {
8107  *pMemoryTypeIndex = memTypeIndex;
8108  if(currCost == 0)
8109  {
8110  return VK_SUCCESS;
8111  }
8112  minCost = currCost;
8113  }
8114  }
8115  }
8116  }
8117  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
8118 }
8119 
8120 VkResult vmaCreatePool(
8121  VmaAllocator allocator,
8122  const VmaPoolCreateInfo* pCreateInfo,
8123  VmaPool* pPool)
8124 {
8125  VMA_ASSERT(allocator && pCreateInfo && pPool);
8126 
8127  VMA_DEBUG_LOG("vmaCreatePool");
8128 
8129  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8130 
8131  return allocator->CreatePool(pCreateInfo, pPool);
8132 }
8133 
8134 void vmaDestroyPool(
8135  VmaAllocator allocator,
8136  VmaPool pool)
8137 {
8138  VMA_ASSERT(allocator);
8139 
8140  if(pool == VK_NULL_HANDLE)
8141  {
8142  return;
8143  }
8144 
8145  VMA_DEBUG_LOG("vmaDestroyPool");
8146 
8147  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8148 
8149  allocator->DestroyPool(pool);
8150 }
8151 
8152 void vmaGetPoolStats(
8153  VmaAllocator allocator,
8154  VmaPool pool,
8155  VmaPoolStats* pPoolStats)
8156 {
8157  VMA_ASSERT(allocator && pool && pPoolStats);
8158 
8159  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8160 
8161  allocator->GetPoolStats(pool, pPoolStats);
8162 }
8163 
8165  VmaAllocator allocator,
8166  VmaPool pool,
8167  size_t* pLostAllocationCount)
8168 {
8169  VMA_ASSERT(allocator && pool);
8170 
8171  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8172 
8173  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
8174 }
8175 
8176 VkResult vmaAllocateMemory(
8177  VmaAllocator allocator,
8178  const VkMemoryRequirements* pVkMemoryRequirements,
8179  const VmaAllocationCreateInfo* pCreateInfo,
8180  VmaAllocation* pAllocation,
8181  VmaAllocationInfo* pAllocationInfo)
8182 {
8183  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
8184 
8185  VMA_DEBUG_LOG("vmaAllocateMemory");
8186 
8187  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8188 
8189  VkResult result = allocator->AllocateMemory(
8190  *pVkMemoryRequirements,
8191  false, // requiresDedicatedAllocation
8192  false, // prefersDedicatedAllocation
8193  VK_NULL_HANDLE, // dedicatedBuffer
8194  VK_NULL_HANDLE, // dedicatedImage
8195  *pCreateInfo,
8196  VMA_SUBALLOCATION_TYPE_UNKNOWN,
8197  pAllocation);
8198 
8199  if(pAllocationInfo && result == VK_SUCCESS)
8200  {
8201  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8202  }
8203 
8204  return result;
8205 }
8206 
8208  VmaAllocator allocator,
8209  VkBuffer buffer,
8210  const VmaAllocationCreateInfo* pCreateInfo,
8211  VmaAllocation* pAllocation,
8212  VmaAllocationInfo* pAllocationInfo)
8213 {
8214  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
8215 
8216  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
8217 
8218  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8219 
8220  VkMemoryRequirements vkMemReq = {};
8221  bool requiresDedicatedAllocation = false;
8222  bool prefersDedicatedAllocation = false;
8223  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
8224  requiresDedicatedAllocation,
8225  prefersDedicatedAllocation);
8226 
8227  VkResult result = allocator->AllocateMemory(
8228  vkMemReq,
8229  requiresDedicatedAllocation,
8230  prefersDedicatedAllocation,
8231  buffer, // dedicatedBuffer
8232  VK_NULL_HANDLE, // dedicatedImage
8233  *pCreateInfo,
8234  VMA_SUBALLOCATION_TYPE_BUFFER,
8235  pAllocation);
8236 
8237  if(pAllocationInfo && result == VK_SUCCESS)
8238  {
8239  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8240  }
8241 
8242  return result;
8243 }
8244 
8245 VkResult vmaAllocateMemoryForImage(
8246  VmaAllocator allocator,
8247  VkImage image,
8248  const VmaAllocationCreateInfo* pCreateInfo,
8249  VmaAllocation* pAllocation,
8250  VmaAllocationInfo* pAllocationInfo)
8251 {
8252  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
8253 
8254  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
8255 
8256  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8257 
8258  VkResult result = AllocateMemoryForImage(
8259  allocator,
8260  image,
8261  pCreateInfo,
8262  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
8263  pAllocation);
8264 
8265  if(pAllocationInfo && result == VK_SUCCESS)
8266  {
8267  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8268  }
8269 
8270  return result;
8271 }
8272 
8273 void vmaFreeMemory(
8274  VmaAllocator allocator,
8275  VmaAllocation allocation)
8276 {
8277  VMA_ASSERT(allocator && allocation);
8278 
8279  VMA_DEBUG_LOG("vmaFreeMemory");
8280 
8281  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8282 
8283  allocator->FreeMemory(allocation);
8284 }
8285 
8287  VmaAllocator allocator,
8288  VmaAllocation allocation,
8289  VmaAllocationInfo* pAllocationInfo)
8290 {
8291  VMA_ASSERT(allocator && allocation && pAllocationInfo);
8292 
8293  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8294 
8295  allocator->GetAllocationInfo(allocation, pAllocationInfo);
8296 }
8297 
8299  VmaAllocator allocator,
8300  VmaAllocation allocation,
8301  void* pUserData)
8302 {
8303  VMA_ASSERT(allocator && allocation);
8304 
8305  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8306 
8307  allocation->SetUserData(allocator, pUserData);
8308 }
8309 
8311  VmaAllocator allocator,
8312  VmaAllocation* pAllocation)
8313 {
8314  VMA_ASSERT(allocator && pAllocation);
8315 
8316  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
8317 
8318  allocator->CreateLostAllocation(pAllocation);
8319 }
8320 
8321 VkResult vmaMapMemory(
8322  VmaAllocator allocator,
8323  VmaAllocation allocation,
8324  void** ppData)
8325 {
8326  VMA_ASSERT(allocator && allocation && ppData);
8327 
8328  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8329 
8330  return allocator->Map(allocation, ppData);
8331 }
8332 
8333 void vmaUnmapMemory(
8334  VmaAllocator allocator,
8335  VmaAllocation allocation)
8336 {
8337  VMA_ASSERT(allocator && allocation);
8338 
8339  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8340 
8341  allocator->Unmap(allocation);
8342 }
8343 
8344 VkResult vmaDefragment(
8345  VmaAllocator allocator,
8346  VmaAllocation* pAllocations,
8347  size_t allocationCount,
8348  VkBool32* pAllocationsChanged,
8349  const VmaDefragmentationInfo *pDefragmentationInfo,
8350  VmaDefragmentationStats* pDefragmentationStats)
8351 {
8352  VMA_ASSERT(allocator && pAllocations);
8353 
8354  VMA_DEBUG_LOG("vmaDefragment");
8355 
8356  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8357 
8358  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
8359 }
8360 
8361 VkResult vmaCreateBuffer(
8362  VmaAllocator allocator,
8363  const VkBufferCreateInfo* pBufferCreateInfo,
8364  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8365  VkBuffer* pBuffer,
8366  VmaAllocation* pAllocation,
8367  VmaAllocationInfo* pAllocationInfo)
8368 {
8369  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
8370 
8371  VMA_DEBUG_LOG("vmaCreateBuffer");
8372 
8373  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8374 
8375  *pBuffer = VK_NULL_HANDLE;
8376  *pAllocation = VK_NULL_HANDLE;
8377 
8378  // 1. Create VkBuffer.
8379  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
8380  allocator->m_hDevice,
8381  pBufferCreateInfo,
8382  allocator->GetAllocationCallbacks(),
8383  pBuffer);
8384  if(res >= 0)
8385  {
8386  // 2. vkGetBufferMemoryRequirements.
8387  VkMemoryRequirements vkMemReq = {};
8388  bool requiresDedicatedAllocation = false;
8389  bool prefersDedicatedAllocation = false;
8390  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
8391  requiresDedicatedAllocation, prefersDedicatedAllocation);
8392 
8393  // Make sure alignment requirements for specific buffer usages reported
8394  // in Physical Device Properties are included in alignment reported by memory requirements.
8395  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
8396  {
8397  VMA_ASSERT(vkMemReq.alignment %
8398  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
8399  }
8400  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
8401  {
8402  VMA_ASSERT(vkMemReq.alignment %
8403  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
8404  }
8405  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
8406  {
8407  VMA_ASSERT(vkMemReq.alignment %
8408  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
8409  }
8410 
8411  // 3. Allocate memory using allocator.
8412  res = allocator->AllocateMemory(
8413  vkMemReq,
8414  requiresDedicatedAllocation,
8415  prefersDedicatedAllocation,
8416  *pBuffer, // dedicatedBuffer
8417  VK_NULL_HANDLE, // dedicatedImage
8418  *pAllocationCreateInfo,
8419  VMA_SUBALLOCATION_TYPE_BUFFER,
8420  pAllocation);
8421  if(res >= 0)
8422  {
8423  // 3. Bind buffer with memory.
8424  res = (*allocator->GetVulkanFunctions().vkBindBufferMemory)(
8425  allocator->m_hDevice,
8426  *pBuffer,
8427  (*pAllocation)->GetMemory(),
8428  (*pAllocation)->GetOffset());
8429  if(res >= 0)
8430  {
8431  // All steps succeeded.
8432  if(pAllocationInfo != VMA_NULL)
8433  {
8434  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8435  }
8436  return VK_SUCCESS;
8437  }
8438  allocator->FreeMemory(*pAllocation);
8439  *pAllocation = VK_NULL_HANDLE;
8440  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
8441  *pBuffer = VK_NULL_HANDLE;
8442  return res;
8443  }
8444  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
8445  *pBuffer = VK_NULL_HANDLE;
8446  return res;
8447  }
8448  return res;
8449 }
8450 
8451 void vmaDestroyBuffer(
8452  VmaAllocator allocator,
8453  VkBuffer buffer,
8454  VmaAllocation allocation)
8455 {
8456  if(buffer != VK_NULL_HANDLE)
8457  {
8458  VMA_ASSERT(allocator);
8459 
8460  VMA_DEBUG_LOG("vmaDestroyBuffer");
8461 
8462  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8463 
8464  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
8465 
8466  allocator->FreeMemory(allocation);
8467  }
8468 }
8469 
8470 VkResult vmaCreateImage(
8471  VmaAllocator allocator,
8472  const VkImageCreateInfo* pImageCreateInfo,
8473  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8474  VkImage* pImage,
8475  VmaAllocation* pAllocation,
8476  VmaAllocationInfo* pAllocationInfo)
8477 {
8478  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
8479 
8480  VMA_DEBUG_LOG("vmaCreateImage");
8481 
8482  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8483 
8484  *pImage = VK_NULL_HANDLE;
8485  *pAllocation = VK_NULL_HANDLE;
8486 
8487  // 1. Create VkImage.
8488  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
8489  allocator->m_hDevice,
8490  pImageCreateInfo,
8491  allocator->GetAllocationCallbacks(),
8492  pImage);
8493  if(res >= 0)
8494  {
8495  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
8496  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
8497  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
8498 
8499  // 2. Allocate memory using allocator.
8500  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
8501  if(res >= 0)
8502  {
8503  // 3. Bind image with memory.
8504  res = (*allocator->GetVulkanFunctions().vkBindImageMemory)(
8505  allocator->m_hDevice,
8506  *pImage,
8507  (*pAllocation)->GetMemory(),
8508  (*pAllocation)->GetOffset());
8509  if(res >= 0)
8510  {
8511  // All steps succeeded.
8512  if(pAllocationInfo != VMA_NULL)
8513  {
8514  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8515  }
8516  return VK_SUCCESS;
8517  }
8518  allocator->FreeMemory(*pAllocation);
8519  *pAllocation = VK_NULL_HANDLE;
8520  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
8521  *pImage = VK_NULL_HANDLE;
8522  return res;
8523  }
8524  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
8525  *pImage = VK_NULL_HANDLE;
8526  return res;
8527  }
8528  return res;
8529 }
8530 
8531 void vmaDestroyImage(
8532  VmaAllocator allocator,
8533  VkImage image,
8534  VmaAllocation allocation)
8535 {
8536  if(image != VK_NULL_HANDLE)
8537  {
8538  VMA_ASSERT(allocator);
8539 
8540  VMA_DEBUG_LOG("vmaDestroyImage");
8541 
8542  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8543 
8544  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
8545 
8546  allocator->FreeMemory(allocation);
8547  }
8548 }
8549 
8550 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:758
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:1009
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:783
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:768
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:968
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:762
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:1277
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:780
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:1443
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:1147
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:1201
Definition: vk_mem_alloc.h:1046
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:751
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1084
Definition: vk_mem_alloc.h:993
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks.
Definition: vk_mem_alloc.h:792
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:845
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:777
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:997
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation.
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:910
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:765
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:909
PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR
Definition: vk_mem_alloc.h:773
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:1447
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:809
VmaStatInfo total
Definition: vk_mem_alloc.h:919
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:1455
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1068
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1438
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:766
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:693
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:786
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:1155
Definition: vk_mem_alloc.h:1149
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:1287
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:763
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:1105
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:1171
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:1207
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:749
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:1158
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
VmaMemoryUsage
Definition: vk_mem_alloc.h:947
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:1433
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:1451
Definition: vk_mem_alloc.h:984
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:1092
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:764
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:915
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:699
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:720
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:725
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:1453
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1079
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region.
Definition: vk_mem_alloc.h:1217
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:759
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:898
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
Definition: vk_mem_alloc.h:1166
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:712
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:1053
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:911
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:716
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:1161
Definition: vk_mem_alloc.h:992
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1074
Definition: vk_mem_alloc.h:1065
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:901
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:761
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool.
Definition: vk_mem_alloc.h:1179
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory.
Definition: vk_mem_alloc.h:795
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1210
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:1063
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:1098
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either NULL or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:833
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:917
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:1033
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:910
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:770
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:714
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:769
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1193
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:1301
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:789
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:910
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:907
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:1198
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:1282
Definition: vk_mem_alloc.h:1061
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1449
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:757
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR
Definition: vk_mem_alloc.h:772
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:905
Definition: vk_mem_alloc.h:952
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:1151
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:903
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:767
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:771
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:1020
Definition: vk_mem_alloc.h:977
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:1296
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:747
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:760
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:1263
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:1129
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:911
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:918
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1204
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:911
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:1268