Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
827 #include <vulkan/vulkan.h>
828 
829 VK_DEFINE_HANDLE(VmaAllocator)
830 
831 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
833  VmaAllocator allocator,
834  uint32_t memoryType,
835  VkDeviceMemory memory,
836  VkDeviceSize size);
838 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
839  VmaAllocator allocator,
840  uint32_t memoryType,
841  VkDeviceMemory memory,
842  VkDeviceSize size);
843 
851 typedef struct VmaDeviceMemoryCallbacks {
857 
887 
890 typedef VkFlags VmaAllocatorCreateFlags;
891 
896 typedef struct VmaVulkanFunctions {
897  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
898  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
899  PFN_vkAllocateMemory vkAllocateMemory;
900  PFN_vkFreeMemory vkFreeMemory;
901  PFN_vkMapMemory vkMapMemory;
902  PFN_vkUnmapMemory vkUnmapMemory;
903  PFN_vkBindBufferMemory vkBindBufferMemory;
904  PFN_vkBindImageMemory vkBindImageMemory;
905  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
906  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
907  PFN_vkCreateBuffer vkCreateBuffer;
908  PFN_vkDestroyBuffer vkDestroyBuffer;
909  PFN_vkCreateImage vkCreateImage;
910  PFN_vkDestroyImage vkDestroyImage;
911  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
912  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
914 
917 {
919  VmaAllocatorCreateFlags flags;
921 
922  VkPhysicalDevice physicalDevice;
924 
925  VkDevice device;
927 
930 
931  const VkAllocationCallbacks* pAllocationCallbacks;
933 
948  uint32_t frameInUseCount;
972  const VkDeviceSize* pHeapSizeLimit;
986 
988 VkResult vmaCreateAllocator(
989  const VmaAllocatorCreateInfo* pCreateInfo,
990  VmaAllocator* pAllocator);
991 
994  VmaAllocator allocator);
995 
1001  VmaAllocator allocator,
1002  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1003 
1009  VmaAllocator allocator,
1010  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1011 
1019  VmaAllocator allocator,
1020  uint32_t memoryTypeIndex,
1021  VkMemoryPropertyFlags* pFlags);
1022 
1032  VmaAllocator allocator,
1033  uint32_t frameIndex);
1034 
1037 typedef struct VmaStatInfo
1038 {
1040  uint32_t blockCount;
1046  VkDeviceSize usedBytes;
1048  VkDeviceSize unusedBytes;
1049  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
1050  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
1051 } VmaStatInfo;
1052 
1054 typedef struct VmaStats
1055 {
1056  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1057  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1059 } VmaStats;
1060 
1062 void vmaCalculateStats(
1063  VmaAllocator allocator,
1064  VmaStats* pStats);
1065 
1066 #define VMA_STATS_STRING_ENABLED 1
1067 
1068 #if VMA_STATS_STRING_ENABLED
1069 
1071 
1073 void vmaBuildStatsString(
1074  VmaAllocator allocator,
1075  char** ppStatsString,
1076  VkBool32 detailedMap);
1077 
1078 void vmaFreeStatsString(
1079  VmaAllocator allocator,
1080  char* pStatsString);
1081 
1082 #endif // #if VMA_STATS_STRING_ENABLED
1083 
1084 VK_DEFINE_HANDLE(VmaPool)
1085 
1086 typedef enum VmaMemoryUsage
1087 {
1136 } VmaMemoryUsage;
1137 
1152 
1202 
1206 
1208 {
1210  VmaAllocationCreateFlags flags;
1221  VkMemoryPropertyFlags requiredFlags;
1226  VkMemoryPropertyFlags preferredFlags;
1234  uint32_t memoryTypeBits;
1240  VmaPool pool;
1247  void* pUserData;
1249 
1266 VkResult vmaFindMemoryTypeIndex(
1267  VmaAllocator allocator,
1268  uint32_t memoryTypeBits,
1269  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1270  uint32_t* pMemoryTypeIndex);
1271 
1285  VmaAllocator allocator,
1286  const VkBufferCreateInfo* pBufferCreateInfo,
1287  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1288  uint32_t* pMemoryTypeIndex);
1289 
1303  VmaAllocator allocator,
1304  const VkImageCreateInfo* pImageCreateInfo,
1305  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1306  uint32_t* pMemoryTypeIndex);
1307 
1328 
1331 typedef VkFlags VmaPoolCreateFlags;
1332 
1335 typedef struct VmaPoolCreateInfo {
1341  VmaPoolCreateFlags flags;
1346  VkDeviceSize blockSize;
1375 
1378 typedef struct VmaPoolStats {
1381  VkDeviceSize size;
1384  VkDeviceSize unusedSize;
1397  VkDeviceSize unusedRangeSizeMax;
1398 } VmaPoolStats;
1399 
1406 VkResult vmaCreatePool(
1407  VmaAllocator allocator,
1408  const VmaPoolCreateInfo* pCreateInfo,
1409  VmaPool* pPool);
1410 
1413 void vmaDestroyPool(
1414  VmaAllocator allocator,
1415  VmaPool pool);
1416 
1423 void vmaGetPoolStats(
1424  VmaAllocator allocator,
1425  VmaPool pool,
1426  VmaPoolStats* pPoolStats);
1427 
1435  VmaAllocator allocator,
1436  VmaPool pool,
1437  size_t* pLostAllocationCount);
1438 
1439 VK_DEFINE_HANDLE(VmaAllocation)
1440 
1441 
1443 typedef struct VmaAllocationInfo {
1448  uint32_t memoryType;
1457  VkDeviceMemory deviceMemory;
1462  VkDeviceSize offset;
1467  VkDeviceSize size;
1481  void* pUserData;
1483 
1494 VkResult vmaAllocateMemory(
1495  VmaAllocator allocator,
1496  const VkMemoryRequirements* pVkMemoryRequirements,
1497  const VmaAllocationCreateInfo* pCreateInfo,
1498  VmaAllocation* pAllocation,
1499  VmaAllocationInfo* pAllocationInfo);
1500 
1508  VmaAllocator allocator,
1509  VkBuffer buffer,
1510  const VmaAllocationCreateInfo* pCreateInfo,
1511  VmaAllocation* pAllocation,
1512  VmaAllocationInfo* pAllocationInfo);
1513 
1515 VkResult vmaAllocateMemoryForImage(
1516  VmaAllocator allocator,
1517  VkImage image,
1518  const VmaAllocationCreateInfo* pCreateInfo,
1519  VmaAllocation* pAllocation,
1520  VmaAllocationInfo* pAllocationInfo);
1521 
1523 void vmaFreeMemory(
1524  VmaAllocator allocator,
1525  VmaAllocation allocation);
1526 
1532  VmaAllocator allocator,
1533  VmaAllocation allocation,
1534  VmaAllocationInfo* pAllocationInfo);
1535 
1538 VkBool32 vmaTouchAllocation(
1539  VmaAllocator allocator,
1540  VmaAllocation allocation);
1541 
1556  VmaAllocator allocator,
1557  VmaAllocation allocation,
1558  void* pUserData);
1559 
1571  VmaAllocator allocator,
1572  VmaAllocation* pAllocation);
1573 
1608 VkResult vmaMapMemory(
1609  VmaAllocator allocator,
1610  VmaAllocation allocation,
1611  void** ppData);
1612 
1617 void vmaUnmapMemory(
1618  VmaAllocator allocator,
1619  VmaAllocation allocation);
1620 
1622 typedef struct VmaDefragmentationInfo {
1627  VkDeviceSize maxBytesToMove;
1634 
1636 typedef struct VmaDefragmentationStats {
1638  VkDeviceSize bytesMoved;
1640  VkDeviceSize bytesFreed;
1646 
1729 VkResult vmaDefragment(
1730  VmaAllocator allocator,
1731  VmaAllocation* pAllocations,
1732  size_t allocationCount,
1733  VkBool32* pAllocationsChanged,
1734  const VmaDefragmentationInfo *pDefragmentationInfo,
1735  VmaDefragmentationStats* pDefragmentationStats);
1736 
1763 VkResult vmaCreateBuffer(
1764  VmaAllocator allocator,
1765  const VkBufferCreateInfo* pBufferCreateInfo,
1766  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1767  VkBuffer* pBuffer,
1768  VmaAllocation* pAllocation,
1769  VmaAllocationInfo* pAllocationInfo);
1770 
1782 void vmaDestroyBuffer(
1783  VmaAllocator allocator,
1784  VkBuffer buffer,
1785  VmaAllocation allocation);
1786 
1788 VkResult vmaCreateImage(
1789  VmaAllocator allocator,
1790  const VkImageCreateInfo* pImageCreateInfo,
1791  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1792  VkImage* pImage,
1793  VmaAllocation* pAllocation,
1794  VmaAllocationInfo* pAllocationInfo);
1795 
1807 void vmaDestroyImage(
1808  VmaAllocator allocator,
1809  VkImage image,
1810  VmaAllocation allocation);
1811 
1812 #ifdef __cplusplus
1813 }
1814 #endif
1815 
1816 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
1817 
1818 // For Visual Studio IntelliSense.
1819 #ifdef __INTELLISENSE__
1820 #define VMA_IMPLEMENTATION
1821 #endif
1822 
1823 #ifdef VMA_IMPLEMENTATION
1824 #undef VMA_IMPLEMENTATION
1825 
1826 #include <cstdint>
1827 #include <cstdlib>
1828 #include <cstring>
1829 
1830 /*******************************************************************************
1831 CONFIGURATION SECTION
1832 
1833 Define some of these macros before each #include of this header or change them
1834 here if you need other then default behavior depending on your environment.
1835 */
1836 
1837 /*
1838 Define this macro to 1 to make the library fetch pointers to Vulkan functions
1839 internally, like:
1840 
1841  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
1842 
1843 Define to 0 if you are going to provide you own pointers to Vulkan functions via
1844 VmaAllocatorCreateInfo::pVulkanFunctions.
1845 */
1846 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
1847 #define VMA_STATIC_VULKAN_FUNCTIONS 1
1848 #endif
1849 
1850 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
1851 //#define VMA_USE_STL_CONTAINERS 1
1852 
1853 /* Set this macro to 1 to make the library including and using STL containers:
1854 std::pair, std::vector, std::list, std::unordered_map.
1855 
1856 Set it to 0 or undefined to make the library using its own implementation of
1857 the containers.
1858 */
1859 #if VMA_USE_STL_CONTAINERS
1860  #define VMA_USE_STL_VECTOR 1
1861  #define VMA_USE_STL_UNORDERED_MAP 1
1862  #define VMA_USE_STL_LIST 1
1863 #endif
1864 
1865 #if VMA_USE_STL_VECTOR
1866  #include <vector>
1867 #endif
1868 
1869 #if VMA_USE_STL_UNORDERED_MAP
1870  #include <unordered_map>
1871 #endif
1872 
1873 #if VMA_USE_STL_LIST
1874  #include <list>
1875 #endif
1876 
1877 /*
1878 Following headers are used in this CONFIGURATION section only, so feel free to
1879 remove them if not needed.
1880 */
1881 #include <cassert> // for assert
1882 #include <algorithm> // for min, max
1883 #include <mutex> // for std::mutex
1884 #include <atomic> // for std::atomic
1885 
1886 #if !defined(_WIN32) && !defined(__APPLE__)
1887  #include <malloc.h> // for aligned_alloc()
1888 #endif
1889 
1890 #ifndef VMA_NULL
1891  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
1892  #define VMA_NULL nullptr
1893 #endif
1894 
1895 #if defined(__APPLE__) || defined(__ANDROID__)
1896 #include <cstdlib>
1897 void *aligned_alloc(size_t alignment, size_t size)
1898 {
1899  // alignment must be >= sizeof(void*)
1900  if(alignment < sizeof(void*))
1901  {
1902  alignment = sizeof(void*);
1903  }
1904 
1905  void *pointer;
1906  if(posix_memalign(&pointer, alignment, size) == 0)
1907  return pointer;
1908  return VMA_NULL;
1909 }
1910 #endif
1911 
1912 // Normal assert to check for programmer's errors, especially in Debug configuration.
1913 #ifndef VMA_ASSERT
1914  #ifdef _DEBUG
1915  #define VMA_ASSERT(expr) assert(expr)
1916  #else
1917  #define VMA_ASSERT(expr)
1918  #endif
1919 #endif
1920 
1921 // Assert that will be called very often, like inside data structures e.g. operator[].
1922 // Making it non-empty can make program slow.
1923 #ifndef VMA_HEAVY_ASSERT
1924  #ifdef _DEBUG
1925  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
1926  #else
1927  #define VMA_HEAVY_ASSERT(expr)
1928  #endif
1929 #endif
1930 
1931 #ifndef VMA_ALIGN_OF
1932  #define VMA_ALIGN_OF(type) (__alignof(type))
1933 #endif
1934 
1935 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
1936  #if defined(_WIN32)
1937  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
1938  #else
1939  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
1940  #endif
1941 #endif
1942 
1943 #ifndef VMA_SYSTEM_FREE
1944  #if defined(_WIN32)
1945  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
1946  #else
1947  #define VMA_SYSTEM_FREE(ptr) free(ptr)
1948  #endif
1949 #endif
1950 
1951 #ifndef VMA_MIN
1952  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
1953 #endif
1954 
1955 #ifndef VMA_MAX
1956  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
1957 #endif
1958 
1959 #ifndef VMA_SWAP
1960  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
1961 #endif
1962 
1963 #ifndef VMA_SORT
1964  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
1965 #endif
1966 
1967 #ifndef VMA_DEBUG_LOG
1968  #define VMA_DEBUG_LOG(format, ...)
1969  /*
1970  #define VMA_DEBUG_LOG(format, ...) do { \
1971  printf(format, __VA_ARGS__); \
1972  printf("\n"); \
1973  } while(false)
1974  */
1975 #endif
1976 
1977 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
1978 #if VMA_STATS_STRING_ENABLED
1979  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
1980  {
1981  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
1982  }
1983  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
1984  {
1985  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
1986  }
1987  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
1988  {
1989  snprintf(outStr, strLen, "%p", ptr);
1990  }
1991 #endif
1992 
1993 #ifndef VMA_MUTEX
1994  class VmaMutex
1995  {
1996  public:
1997  VmaMutex() { }
1998  ~VmaMutex() { }
1999  void Lock() { m_Mutex.lock(); }
2000  void Unlock() { m_Mutex.unlock(); }
2001  private:
2002  std::mutex m_Mutex;
2003  };
2004  #define VMA_MUTEX VmaMutex
2005 #endif
2006 
2007 /*
2008 If providing your own implementation, you need to implement a subset of std::atomic:
2009 
2010 - Constructor(uint32_t desired)
2011 - uint32_t load() const
2012 - void store(uint32_t desired)
2013 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
2014 */
2015 #ifndef VMA_ATOMIC_UINT32
2016  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2017 #endif
2018 
2019 #ifndef VMA_BEST_FIT
2020 
2032  #define VMA_BEST_FIT (1)
2033 #endif
2034 
2035 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
2036 
2040  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
2041 #endif
2042 
2043 #ifndef VMA_DEBUG_ALIGNMENT
2044 
2048  #define VMA_DEBUG_ALIGNMENT (1)
2049 #endif
2050 
2051 #ifndef VMA_DEBUG_MARGIN
2052 
2056  #define VMA_DEBUG_MARGIN (0)
2057 #endif
2058 
2059 #ifndef VMA_DEBUG_GLOBAL_MUTEX
2060 
2064  #define VMA_DEBUG_GLOBAL_MUTEX (0)
2065 #endif
2066 
2067 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
2068 
2072  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
2073 #endif
2074 
2075 #ifndef VMA_SMALL_HEAP_MAX_SIZE
2076  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
2078 #endif
2079 
2080 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
2081  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
2083 #endif
2084 
2085 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
2086 
2087 /*******************************************************************************
2088 END OF CONFIGURATION
2089 */
2090 
2091 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
2092  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
2093 
2094 // Returns number of bits set to 1 in (v).
2095 static inline uint32_t VmaCountBitsSet(uint32_t v)
2096 {
2097  uint32_t c = v - ((v >> 1) & 0x55555555);
2098  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
2099  c = ((c >> 4) + c) & 0x0F0F0F0F;
2100  c = ((c >> 8) + c) & 0x00FF00FF;
2101  c = ((c >> 16) + c) & 0x0000FFFF;
2102  return c;
2103 }
2104 
2105 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
2106 // Use types like uint32_t, uint64_t as T.
2107 template <typename T>
2108 static inline T VmaAlignUp(T val, T align)
2109 {
2110  return (val + align - 1) / align * align;
2111 }
2112 
2113 // Division with mathematical rounding to nearest number.
2114 template <typename T>
2115 inline T VmaRoundDiv(T x, T y)
2116 {
2117  return (x + (y / (T)2)) / y;
2118 }
2119 
2120 #ifndef VMA_SORT
2121 
2122 template<typename Iterator, typename Compare>
2123 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
2124 {
2125  Iterator centerValue = end; --centerValue;
2126  Iterator insertIndex = beg;
2127  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
2128  {
2129  if(cmp(*memTypeIndex, *centerValue))
2130  {
2131  if(insertIndex != memTypeIndex)
2132  {
2133  VMA_SWAP(*memTypeIndex, *insertIndex);
2134  }
2135  ++insertIndex;
2136  }
2137  }
2138  if(insertIndex != centerValue)
2139  {
2140  VMA_SWAP(*insertIndex, *centerValue);
2141  }
2142  return insertIndex;
2143 }
2144 
2145 template<typename Iterator, typename Compare>
2146 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
2147 {
2148  if(beg < end)
2149  {
2150  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
2151  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
2152  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
2153  }
2154 }
2155 
2156 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
2157 
2158 #endif // #ifndef VMA_SORT
2159 
2160 /*
2161 Returns true if two memory blocks occupy overlapping pages.
2162 ResourceA must be in less memory offset than ResourceB.
2163 
2164 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
2165 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
2166 */
2167 static inline bool VmaBlocksOnSamePage(
2168  VkDeviceSize resourceAOffset,
2169  VkDeviceSize resourceASize,
2170  VkDeviceSize resourceBOffset,
2171  VkDeviceSize pageSize)
2172 {
2173  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
2174  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
2175  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
2176  VkDeviceSize resourceBStart = resourceBOffset;
2177  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
2178  return resourceAEndPage == resourceBStartPage;
2179 }
2180 
2181 enum VmaSuballocationType
2182 {
2183  VMA_SUBALLOCATION_TYPE_FREE = 0,
2184  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
2185  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
2186  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
2187  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
2188  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
2189  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
2190 };
2191 
2192 /*
2193 Returns true if given suballocation types could conflict and must respect
2194 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
2195 or linear image and another one is optimal image. If type is unknown, behave
2196 conservatively.
2197 */
2198 static inline bool VmaIsBufferImageGranularityConflict(
2199  VmaSuballocationType suballocType1,
2200  VmaSuballocationType suballocType2)
2201 {
2202  if(suballocType1 > suballocType2)
2203  {
2204  VMA_SWAP(suballocType1, suballocType2);
2205  }
2206 
2207  switch(suballocType1)
2208  {
2209  case VMA_SUBALLOCATION_TYPE_FREE:
2210  return false;
2211  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
2212  return true;
2213  case VMA_SUBALLOCATION_TYPE_BUFFER:
2214  return
2215  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2216  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2217  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
2218  return
2219  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2220  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
2221  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2222  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
2223  return
2224  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2225  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
2226  return false;
2227  default:
2228  VMA_ASSERT(0);
2229  return true;
2230  }
2231 }
2232 
2233 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
2234 struct VmaMutexLock
2235 {
2236 public:
2237  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
2238  m_pMutex(useMutex ? &mutex : VMA_NULL)
2239  {
2240  if(m_pMutex)
2241  {
2242  m_pMutex->Lock();
2243  }
2244  }
2245 
2246  ~VmaMutexLock()
2247  {
2248  if(m_pMutex)
2249  {
2250  m_pMutex->Unlock();
2251  }
2252  }
2253 
2254 private:
2255  VMA_MUTEX* m_pMutex;
2256 };
2257 
2258 #if VMA_DEBUG_GLOBAL_MUTEX
2259  static VMA_MUTEX gDebugGlobalMutex;
2260  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
2261 #else
2262  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
2263 #endif
2264 
2265 // Minimum size of a free suballocation to register it in the free suballocation collection.
2266 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
2267 
2268 /*
2269 Performs binary search and returns iterator to first element that is greater or
2270 equal to (key), according to comparison (cmp).
2271 
2272 Cmp should return true if first argument is less than second argument.
2273 
2274 Returned value is the found element, if present in the collection or place where
2275 new element with value (key) should be inserted.
2276 */
2277 template <typename IterT, typename KeyT, typename CmpT>
2278 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
2279 {
2280  size_t down = 0, up = (end - beg);
2281  while(down < up)
2282  {
2283  const size_t mid = (down + up) / 2;
2284  if(cmp(*(beg+mid), key))
2285  {
2286  down = mid + 1;
2287  }
2288  else
2289  {
2290  up = mid;
2291  }
2292  }
2293  return beg + down;
2294 }
2295 
2297 // Memory allocation
2298 
2299 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
2300 {
2301  if((pAllocationCallbacks != VMA_NULL) &&
2302  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
2303  {
2304  return (*pAllocationCallbacks->pfnAllocation)(
2305  pAllocationCallbacks->pUserData,
2306  size,
2307  alignment,
2308  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2309  }
2310  else
2311  {
2312  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
2313  }
2314 }
2315 
2316 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
2317 {
2318  if((pAllocationCallbacks != VMA_NULL) &&
2319  (pAllocationCallbacks->pfnFree != VMA_NULL))
2320  {
2321  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
2322  }
2323  else
2324  {
2325  VMA_SYSTEM_FREE(ptr);
2326  }
2327 }
2328 
2329 template<typename T>
2330 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
2331 {
2332  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
2333 }
2334 
2335 template<typename T>
2336 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
2337 {
2338  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
2339 }
2340 
2341 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
2342 
2343 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
2344 
2345 template<typename T>
2346 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
2347 {
2348  ptr->~T();
2349  VmaFree(pAllocationCallbacks, ptr);
2350 }
2351 
2352 template<typename T>
2353 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
2354 {
2355  if(ptr != VMA_NULL)
2356  {
2357  for(size_t i = count; i--; )
2358  {
2359  ptr[i].~T();
2360  }
2361  VmaFree(pAllocationCallbacks, ptr);
2362  }
2363 }
2364 
2365 // STL-compatible allocator.
2366 template<typename T>
2367 class VmaStlAllocator
2368 {
2369 public:
2370  const VkAllocationCallbacks* const m_pCallbacks;
2371  typedef T value_type;
2372 
2373  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
2374  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
2375 
2376  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
2377  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
2378 
2379  template<typename U>
2380  bool operator==(const VmaStlAllocator<U>& rhs) const
2381  {
2382  return m_pCallbacks == rhs.m_pCallbacks;
2383  }
2384  template<typename U>
2385  bool operator!=(const VmaStlAllocator<U>& rhs) const
2386  {
2387  return m_pCallbacks != rhs.m_pCallbacks;
2388  }
2389 
2390  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
2391 };
2392 
2393 #if VMA_USE_STL_VECTOR
2394 
2395 #define VmaVector std::vector
2396 
2397 template<typename T, typename allocatorT>
2398 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
2399 {
2400  vec.insert(vec.begin() + index, item);
2401 }
2402 
2403 template<typename T, typename allocatorT>
2404 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
2405 {
2406  vec.erase(vec.begin() + index);
2407 }
2408 
2409 #else // #if VMA_USE_STL_VECTOR
2410 
2411 /* Class with interface compatible with subset of std::vector.
2412 T must be POD because constructors and destructors are not called and memcpy is
2413 used for these objects. */
2414 template<typename T, typename AllocatorT>
2415 class VmaVector
2416 {
2417 public:
2418  typedef T value_type;
2419 
2420  VmaVector(const AllocatorT& allocator) :
2421  m_Allocator(allocator),
2422  m_pArray(VMA_NULL),
2423  m_Count(0),
2424  m_Capacity(0)
2425  {
2426  }
2427 
2428  VmaVector(size_t count, const AllocatorT& allocator) :
2429  m_Allocator(allocator),
2430  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
2431  m_Count(count),
2432  m_Capacity(count)
2433  {
2434  }
2435 
2436  VmaVector(const VmaVector<T, AllocatorT>& src) :
2437  m_Allocator(src.m_Allocator),
2438  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
2439  m_Count(src.m_Count),
2440  m_Capacity(src.m_Count)
2441  {
2442  if(m_Count != 0)
2443  {
2444  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
2445  }
2446  }
2447 
2448  ~VmaVector()
2449  {
2450  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2451  }
2452 
2453  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
2454  {
2455  if(&rhs != this)
2456  {
2457  resize(rhs.m_Count);
2458  if(m_Count != 0)
2459  {
2460  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
2461  }
2462  }
2463  return *this;
2464  }
2465 
2466  bool empty() const { return m_Count == 0; }
2467  size_t size() const { return m_Count; }
2468  T* data() { return m_pArray; }
2469  const T* data() const { return m_pArray; }
2470 
2471  T& operator[](size_t index)
2472  {
2473  VMA_HEAVY_ASSERT(index < m_Count);
2474  return m_pArray[index];
2475  }
2476  const T& operator[](size_t index) const
2477  {
2478  VMA_HEAVY_ASSERT(index < m_Count);
2479  return m_pArray[index];
2480  }
2481 
2482  T& front()
2483  {
2484  VMA_HEAVY_ASSERT(m_Count > 0);
2485  return m_pArray[0];
2486  }
2487  const T& front() const
2488  {
2489  VMA_HEAVY_ASSERT(m_Count > 0);
2490  return m_pArray[0];
2491  }
2492  T& back()
2493  {
2494  VMA_HEAVY_ASSERT(m_Count > 0);
2495  return m_pArray[m_Count - 1];
2496  }
2497  const T& back() const
2498  {
2499  VMA_HEAVY_ASSERT(m_Count > 0);
2500  return m_pArray[m_Count - 1];
2501  }
2502 
2503  void reserve(size_t newCapacity, bool freeMemory = false)
2504  {
2505  newCapacity = VMA_MAX(newCapacity, m_Count);
2506 
2507  if((newCapacity < m_Capacity) && !freeMemory)
2508  {
2509  newCapacity = m_Capacity;
2510  }
2511 
2512  if(newCapacity != m_Capacity)
2513  {
2514  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
2515  if(m_Count != 0)
2516  {
2517  memcpy(newArray, m_pArray, m_Count * sizeof(T));
2518  }
2519  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2520  m_Capacity = newCapacity;
2521  m_pArray = newArray;
2522  }
2523  }
2524 
2525  void resize(size_t newCount, bool freeMemory = false)
2526  {
2527  size_t newCapacity = m_Capacity;
2528  if(newCount > m_Capacity)
2529  {
2530  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
2531  }
2532  else if(freeMemory)
2533  {
2534  newCapacity = newCount;
2535  }
2536 
2537  if(newCapacity != m_Capacity)
2538  {
2539  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
2540  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
2541  if(elementsToCopy != 0)
2542  {
2543  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
2544  }
2545  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2546  m_Capacity = newCapacity;
2547  m_pArray = newArray;
2548  }
2549 
2550  m_Count = newCount;
2551  }
2552 
2553  void clear(bool freeMemory = false)
2554  {
2555  resize(0, freeMemory);
2556  }
2557 
2558  void insert(size_t index, const T& src)
2559  {
2560  VMA_HEAVY_ASSERT(index <= m_Count);
2561  const size_t oldCount = size();
2562  resize(oldCount + 1);
2563  if(index < oldCount)
2564  {
2565  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
2566  }
2567  m_pArray[index] = src;
2568  }
2569 
2570  void remove(size_t index)
2571  {
2572  VMA_HEAVY_ASSERT(index < m_Count);
2573  const size_t oldCount = size();
2574  if(index < oldCount - 1)
2575  {
2576  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
2577  }
2578  resize(oldCount - 1);
2579  }
2580 
2581  void push_back(const T& src)
2582  {
2583  const size_t newIndex = size();
2584  resize(newIndex + 1);
2585  m_pArray[newIndex] = src;
2586  }
2587 
2588  void pop_back()
2589  {
2590  VMA_HEAVY_ASSERT(m_Count > 0);
2591  resize(size() - 1);
2592  }
2593 
2594  void push_front(const T& src)
2595  {
2596  insert(0, src);
2597  }
2598 
2599  void pop_front()
2600  {
2601  VMA_HEAVY_ASSERT(m_Count > 0);
2602  remove(0);
2603  }
2604 
2605  typedef T* iterator;
2606 
2607  iterator begin() { return m_pArray; }
2608  iterator end() { return m_pArray + m_Count; }
2609 
2610 private:
2611  AllocatorT m_Allocator;
2612  T* m_pArray;
2613  size_t m_Count;
2614  size_t m_Capacity;
2615 };
2616 
2617 template<typename T, typename allocatorT>
2618 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
2619 {
2620  vec.insert(index, item);
2621 }
2622 
2623 template<typename T, typename allocatorT>
2624 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
2625 {
2626  vec.remove(index);
2627 }
2628 
2629 #endif // #if VMA_USE_STL_VECTOR
2630 
2631 template<typename CmpLess, typename VectorT>
2632 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
2633 {
2634  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2635  vector.data(),
2636  vector.data() + vector.size(),
2637  value,
2638  CmpLess()) - vector.data();
2639  VmaVectorInsert(vector, indexToInsert, value);
2640  return indexToInsert;
2641 }
2642 
2643 template<typename CmpLess, typename VectorT>
2644 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
2645 {
2646  CmpLess comparator;
2647  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2648  vector.begin(),
2649  vector.end(),
2650  value,
2651  comparator);
2652  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
2653  {
2654  size_t indexToRemove = it - vector.begin();
2655  VmaVectorRemove(vector, indexToRemove);
2656  return true;
2657  }
2658  return false;
2659 }
2660 
2661 template<typename CmpLess, typename VectorT>
2662 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
2663 {
2664  CmpLess comparator;
2665  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2666  vector.data(),
2667  vector.data() + vector.size(),
2668  value,
2669  comparator);
2670  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
2671  {
2672  return it - vector.begin();
2673  }
2674  else
2675  {
2676  return vector.size();
2677  }
2678 }
2679 
2681 // class VmaPoolAllocator
2682 
2683 /*
2684 Allocator for objects of type T using a list of arrays (pools) to speed up
2685 allocation. Number of elements that can be allocated is not bounded because
2686 allocator can create multiple blocks.
2687 */
2688 template<typename T>
2689 class VmaPoolAllocator
2690 {
2691 public:
2692  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
2693  ~VmaPoolAllocator();
2694  void Clear();
2695  T* Alloc();
2696  void Free(T* ptr);
2697 
2698 private:
2699  union Item
2700  {
2701  uint32_t NextFreeIndex;
2702  T Value;
2703  };
2704 
2705  struct ItemBlock
2706  {
2707  Item* pItems;
2708  uint32_t FirstFreeIndex;
2709  };
2710 
2711  const VkAllocationCallbacks* m_pAllocationCallbacks;
2712  size_t m_ItemsPerBlock;
2713  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
2714 
2715  ItemBlock& CreateNewBlock();
2716 };
2717 
2718 template<typename T>
2719 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
2720  m_pAllocationCallbacks(pAllocationCallbacks),
2721  m_ItemsPerBlock(itemsPerBlock),
2722  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
2723 {
2724  VMA_ASSERT(itemsPerBlock > 0);
2725 }
2726 
2727 template<typename T>
2728 VmaPoolAllocator<T>::~VmaPoolAllocator()
2729 {
2730  Clear();
2731 }
2732 
2733 template<typename T>
2734 void VmaPoolAllocator<T>::Clear()
2735 {
2736  for(size_t i = m_ItemBlocks.size(); i--; )
2737  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
2738  m_ItemBlocks.clear();
2739 }
2740 
2741 template<typename T>
2742 T* VmaPoolAllocator<T>::Alloc()
2743 {
2744  for(size_t i = m_ItemBlocks.size(); i--; )
2745  {
2746  ItemBlock& block = m_ItemBlocks[i];
2747  // This block has some free items: Use first one.
2748  if(block.FirstFreeIndex != UINT32_MAX)
2749  {
2750  Item* const pItem = &block.pItems[block.FirstFreeIndex];
2751  block.FirstFreeIndex = pItem->NextFreeIndex;
2752  return &pItem->Value;
2753  }
2754  }
2755 
2756  // No block has free item: Create new one and use it.
2757  ItemBlock& newBlock = CreateNewBlock();
2758  Item* const pItem = &newBlock.pItems[0];
2759  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
2760  return &pItem->Value;
2761 }
2762 
2763 template<typename T>
2764 void VmaPoolAllocator<T>::Free(T* ptr)
2765 {
2766  // Search all memory blocks to find ptr.
2767  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
2768  {
2769  ItemBlock& block = m_ItemBlocks[i];
2770 
2771  // Casting to union.
2772  Item* pItemPtr;
2773  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
2774 
2775  // Check if pItemPtr is in address range of this block.
2776  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
2777  {
2778  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
2779  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
2780  block.FirstFreeIndex = index;
2781  return;
2782  }
2783  }
2784  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
2785 }
2786 
2787 template<typename T>
2788 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
2789 {
2790  ItemBlock newBlock = {
2791  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
2792 
2793  m_ItemBlocks.push_back(newBlock);
2794 
2795  // Setup singly-linked list of all free items in this block.
2796  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
2797  newBlock.pItems[i].NextFreeIndex = i + 1;
2798  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
2799  return m_ItemBlocks.back();
2800 }
2801 
2803 // class VmaRawList, VmaList
2804 
2805 #if VMA_USE_STL_LIST
2806 
2807 #define VmaList std::list
2808 
2809 #else // #if VMA_USE_STL_LIST
2810 
2811 template<typename T>
2812 struct VmaListItem
2813 {
2814  VmaListItem* pPrev;
2815  VmaListItem* pNext;
2816  T Value;
2817 };
2818 
2819 // Doubly linked list.
2820 template<typename T>
2821 class VmaRawList
2822 {
2823 public:
2824  typedef VmaListItem<T> ItemType;
2825 
2826  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
2827  ~VmaRawList();
2828  void Clear();
2829 
2830  size_t GetCount() const { return m_Count; }
2831  bool IsEmpty() const { return m_Count == 0; }
2832 
2833  ItemType* Front() { return m_pFront; }
2834  const ItemType* Front() const { return m_pFront; }
2835  ItemType* Back() { return m_pBack; }
2836  const ItemType* Back() const { return m_pBack; }
2837 
2838  ItemType* PushBack();
2839  ItemType* PushFront();
2840  ItemType* PushBack(const T& value);
2841  ItemType* PushFront(const T& value);
2842  void PopBack();
2843  void PopFront();
2844 
2845  // Item can be null - it means PushBack.
2846  ItemType* InsertBefore(ItemType* pItem);
2847  // Item can be null - it means PushFront.
2848  ItemType* InsertAfter(ItemType* pItem);
2849 
2850  ItemType* InsertBefore(ItemType* pItem, const T& value);
2851  ItemType* InsertAfter(ItemType* pItem, const T& value);
2852 
2853  void Remove(ItemType* pItem);
2854 
2855 private:
2856  const VkAllocationCallbacks* const m_pAllocationCallbacks;
2857  VmaPoolAllocator<ItemType> m_ItemAllocator;
2858  ItemType* m_pFront;
2859  ItemType* m_pBack;
2860  size_t m_Count;
2861 
2862  // Declared not defined, to block copy constructor and assignment operator.
2863  VmaRawList(const VmaRawList<T>& src);
2864  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
2865 };
2866 
2867 template<typename T>
2868 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
2869  m_pAllocationCallbacks(pAllocationCallbacks),
2870  m_ItemAllocator(pAllocationCallbacks, 128),
2871  m_pFront(VMA_NULL),
2872  m_pBack(VMA_NULL),
2873  m_Count(0)
2874 {
2875 }
2876 
2877 template<typename T>
2878 VmaRawList<T>::~VmaRawList()
2879 {
2880  // Intentionally not calling Clear, because that would be unnecessary
2881  // computations to return all items to m_ItemAllocator as free.
2882 }
2883 
2884 template<typename T>
2885 void VmaRawList<T>::Clear()
2886 {
2887  if(IsEmpty() == false)
2888  {
2889  ItemType* pItem = m_pBack;
2890  while(pItem != VMA_NULL)
2891  {
2892  ItemType* const pPrevItem = pItem->pPrev;
2893  m_ItemAllocator.Free(pItem);
2894  pItem = pPrevItem;
2895  }
2896  m_pFront = VMA_NULL;
2897  m_pBack = VMA_NULL;
2898  m_Count = 0;
2899  }
2900 }
2901 
2902 template<typename T>
2903 VmaListItem<T>* VmaRawList<T>::PushBack()
2904 {
2905  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2906  pNewItem->pNext = VMA_NULL;
2907  if(IsEmpty())
2908  {
2909  pNewItem->pPrev = VMA_NULL;
2910  m_pFront = pNewItem;
2911  m_pBack = pNewItem;
2912  m_Count = 1;
2913  }
2914  else
2915  {
2916  pNewItem->pPrev = m_pBack;
2917  m_pBack->pNext = pNewItem;
2918  m_pBack = pNewItem;
2919  ++m_Count;
2920  }
2921  return pNewItem;
2922 }
2923 
2924 template<typename T>
2925 VmaListItem<T>* VmaRawList<T>::PushFront()
2926 {
2927  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2928  pNewItem->pPrev = VMA_NULL;
2929  if(IsEmpty())
2930  {
2931  pNewItem->pNext = VMA_NULL;
2932  m_pFront = pNewItem;
2933  m_pBack = pNewItem;
2934  m_Count = 1;
2935  }
2936  else
2937  {
2938  pNewItem->pNext = m_pFront;
2939  m_pFront->pPrev = pNewItem;
2940  m_pFront = pNewItem;
2941  ++m_Count;
2942  }
2943  return pNewItem;
2944 }
2945 
2946 template<typename T>
2947 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
2948 {
2949  ItemType* const pNewItem = PushBack();
2950  pNewItem->Value = value;
2951  return pNewItem;
2952 }
2953 
2954 template<typename T>
2955 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
2956 {
2957  ItemType* const pNewItem = PushFront();
2958  pNewItem->Value = value;
2959  return pNewItem;
2960 }
2961 
2962 template<typename T>
2963 void VmaRawList<T>::PopBack()
2964 {
2965  VMA_HEAVY_ASSERT(m_Count > 0);
2966  ItemType* const pBackItem = m_pBack;
2967  ItemType* const pPrevItem = pBackItem->pPrev;
2968  if(pPrevItem != VMA_NULL)
2969  {
2970  pPrevItem->pNext = VMA_NULL;
2971  }
2972  m_pBack = pPrevItem;
2973  m_ItemAllocator.Free(pBackItem);
2974  --m_Count;
2975 }
2976 
2977 template<typename T>
2978 void VmaRawList<T>::PopFront()
2979 {
2980  VMA_HEAVY_ASSERT(m_Count > 0);
2981  ItemType* const pFrontItem = m_pFront;
2982  ItemType* const pNextItem = pFrontItem->pNext;
2983  if(pNextItem != VMA_NULL)
2984  {
2985  pNextItem->pPrev = VMA_NULL;
2986  }
2987  m_pFront = pNextItem;
2988  m_ItemAllocator.Free(pFrontItem);
2989  --m_Count;
2990 }
2991 
2992 template<typename T>
2993 void VmaRawList<T>::Remove(ItemType* pItem)
2994 {
2995  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
2996  VMA_HEAVY_ASSERT(m_Count > 0);
2997 
2998  if(pItem->pPrev != VMA_NULL)
2999  {
3000  pItem->pPrev->pNext = pItem->pNext;
3001  }
3002  else
3003  {
3004  VMA_HEAVY_ASSERT(m_pFront == pItem);
3005  m_pFront = pItem->pNext;
3006  }
3007 
3008  if(pItem->pNext != VMA_NULL)
3009  {
3010  pItem->pNext->pPrev = pItem->pPrev;
3011  }
3012  else
3013  {
3014  VMA_HEAVY_ASSERT(m_pBack == pItem);
3015  m_pBack = pItem->pPrev;
3016  }
3017 
3018  m_ItemAllocator.Free(pItem);
3019  --m_Count;
3020 }
3021 
3022 template<typename T>
3023 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
3024 {
3025  if(pItem != VMA_NULL)
3026  {
3027  ItemType* const prevItem = pItem->pPrev;
3028  ItemType* const newItem = m_ItemAllocator.Alloc();
3029  newItem->pPrev = prevItem;
3030  newItem->pNext = pItem;
3031  pItem->pPrev = newItem;
3032  if(prevItem != VMA_NULL)
3033  {
3034  prevItem->pNext = newItem;
3035  }
3036  else
3037  {
3038  VMA_HEAVY_ASSERT(m_pFront == pItem);
3039  m_pFront = newItem;
3040  }
3041  ++m_Count;
3042  return newItem;
3043  }
3044  else
3045  return PushBack();
3046 }
3047 
3048 template<typename T>
3049 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
3050 {
3051  if(pItem != VMA_NULL)
3052  {
3053  ItemType* const nextItem = pItem->pNext;
3054  ItemType* const newItem = m_ItemAllocator.Alloc();
3055  newItem->pNext = nextItem;
3056  newItem->pPrev = pItem;
3057  pItem->pNext = newItem;
3058  if(nextItem != VMA_NULL)
3059  {
3060  nextItem->pPrev = newItem;
3061  }
3062  else
3063  {
3064  VMA_HEAVY_ASSERT(m_pBack == pItem);
3065  m_pBack = newItem;
3066  }
3067  ++m_Count;
3068  return newItem;
3069  }
3070  else
3071  return PushFront();
3072 }
3073 
3074 template<typename T>
3075 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
3076 {
3077  ItemType* const newItem = InsertBefore(pItem);
3078  newItem->Value = value;
3079  return newItem;
3080 }
3081 
3082 template<typename T>
3083 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
3084 {
3085  ItemType* const newItem = InsertAfter(pItem);
3086  newItem->Value = value;
3087  return newItem;
3088 }
3089 
3090 template<typename T, typename AllocatorT>
3091 class VmaList
3092 {
3093 public:
3094  class iterator
3095  {
3096  public:
3097  iterator() :
3098  m_pList(VMA_NULL),
3099  m_pItem(VMA_NULL)
3100  {
3101  }
3102 
3103  T& operator*() const
3104  {
3105  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3106  return m_pItem->Value;
3107  }
3108  T* operator->() const
3109  {
3110  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3111  return &m_pItem->Value;
3112  }
3113 
3114  iterator& operator++()
3115  {
3116  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3117  m_pItem = m_pItem->pNext;
3118  return *this;
3119  }
3120  iterator& operator--()
3121  {
3122  if(m_pItem != VMA_NULL)
3123  {
3124  m_pItem = m_pItem->pPrev;
3125  }
3126  else
3127  {
3128  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3129  m_pItem = m_pList->Back();
3130  }
3131  return *this;
3132  }
3133 
3134  iterator operator++(int)
3135  {
3136  iterator result = *this;
3137  ++*this;
3138  return result;
3139  }
3140  iterator operator--(int)
3141  {
3142  iterator result = *this;
3143  --*this;
3144  return result;
3145  }
3146 
3147  bool operator==(const iterator& rhs) const
3148  {
3149  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3150  return m_pItem == rhs.m_pItem;
3151  }
3152  bool operator!=(const iterator& rhs) const
3153  {
3154  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3155  return m_pItem != rhs.m_pItem;
3156  }
3157 
3158  private:
3159  VmaRawList<T>* m_pList;
3160  VmaListItem<T>* m_pItem;
3161 
3162  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
3163  m_pList(pList),
3164  m_pItem(pItem)
3165  {
3166  }
3167 
3168  friend class VmaList<T, AllocatorT>;
3169  };
3170 
3171  class const_iterator
3172  {
3173  public:
3174  const_iterator() :
3175  m_pList(VMA_NULL),
3176  m_pItem(VMA_NULL)
3177  {
3178  }
3179 
3180  const_iterator(const iterator& src) :
3181  m_pList(src.m_pList),
3182  m_pItem(src.m_pItem)
3183  {
3184  }
3185 
3186  const T& operator*() const
3187  {
3188  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3189  return m_pItem->Value;
3190  }
3191  const T* operator->() const
3192  {
3193  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3194  return &m_pItem->Value;
3195  }
3196 
3197  const_iterator& operator++()
3198  {
3199  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3200  m_pItem = m_pItem->pNext;
3201  return *this;
3202  }
3203  const_iterator& operator--()
3204  {
3205  if(m_pItem != VMA_NULL)
3206  {
3207  m_pItem = m_pItem->pPrev;
3208  }
3209  else
3210  {
3211  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3212  m_pItem = m_pList->Back();
3213  }
3214  return *this;
3215  }
3216 
3217  const_iterator operator++(int)
3218  {
3219  const_iterator result = *this;
3220  ++*this;
3221  return result;
3222  }
3223  const_iterator operator--(int)
3224  {
3225  const_iterator result = *this;
3226  --*this;
3227  return result;
3228  }
3229 
3230  bool operator==(const const_iterator& rhs) const
3231  {
3232  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3233  return m_pItem == rhs.m_pItem;
3234  }
3235  bool operator!=(const const_iterator& rhs) const
3236  {
3237  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3238  return m_pItem != rhs.m_pItem;
3239  }
3240 
3241  private:
3242  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
3243  m_pList(pList),
3244  m_pItem(pItem)
3245  {
3246  }
3247 
3248  const VmaRawList<T>* m_pList;
3249  const VmaListItem<T>* m_pItem;
3250 
3251  friend class VmaList<T, AllocatorT>;
3252  };
3253 
3254  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
3255 
3256  bool empty() const { return m_RawList.IsEmpty(); }
3257  size_t size() const { return m_RawList.GetCount(); }
3258 
3259  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
3260  iterator end() { return iterator(&m_RawList, VMA_NULL); }
3261 
3262  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
3263  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
3264 
3265  void clear() { m_RawList.Clear(); }
3266  void push_back(const T& value) { m_RawList.PushBack(value); }
3267  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
3268  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
3269 
3270 private:
3271  VmaRawList<T> m_RawList;
3272 };
3273 
3274 #endif // #if VMA_USE_STL_LIST
3275 
3277 // class VmaMap
3278 
3279 // Unused in this version.
3280 #if 0
3281 
3282 #if VMA_USE_STL_UNORDERED_MAP
3283 
3284 #define VmaPair std::pair
3285 
3286 #define VMA_MAP_TYPE(KeyT, ValueT) \
3287  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
3288 
3289 #else // #if VMA_USE_STL_UNORDERED_MAP
3290 
3291 template<typename T1, typename T2>
3292 struct VmaPair
3293 {
3294  T1 first;
3295  T2 second;
3296 
3297  VmaPair() : first(), second() { }
3298  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
3299 };
3300 
3301 /* Class compatible with subset of interface of std::unordered_map.
3302 KeyT, ValueT must be POD because they will be stored in VmaVector.
3303 */
3304 template<typename KeyT, typename ValueT>
3305 class VmaMap
3306 {
3307 public:
3308  typedef VmaPair<KeyT, ValueT> PairType;
3309  typedef PairType* iterator;
3310 
3311  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
3312 
3313  iterator begin() { return m_Vector.begin(); }
3314  iterator end() { return m_Vector.end(); }
3315 
3316  void insert(const PairType& pair);
3317  iterator find(const KeyT& key);
3318  void erase(iterator it);
3319 
3320 private:
3321  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
3322 };
3323 
3324 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
3325 
3326 template<typename FirstT, typename SecondT>
3327 struct VmaPairFirstLess
3328 {
3329  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
3330  {
3331  return lhs.first < rhs.first;
3332  }
3333  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
3334  {
3335  return lhs.first < rhsFirst;
3336  }
3337 };
3338 
3339 template<typename KeyT, typename ValueT>
3340 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
3341 {
3342  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3343  m_Vector.data(),
3344  m_Vector.data() + m_Vector.size(),
3345  pair,
3346  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
3347  VmaVectorInsert(m_Vector, indexToInsert, pair);
3348 }
3349 
3350 template<typename KeyT, typename ValueT>
3351 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
3352 {
3353  PairType* it = VmaBinaryFindFirstNotLess(
3354  m_Vector.data(),
3355  m_Vector.data() + m_Vector.size(),
3356  key,
3357  VmaPairFirstLess<KeyT, ValueT>());
3358  if((it != m_Vector.end()) && (it->first == key))
3359  {
3360  return it;
3361  }
3362  else
3363  {
3364  return m_Vector.end();
3365  }
3366 }
3367 
3368 template<typename KeyT, typename ValueT>
3369 void VmaMap<KeyT, ValueT>::erase(iterator it)
3370 {
3371  VmaVectorRemove(m_Vector, it - m_Vector.begin());
3372 }
3373 
3374 #endif // #if VMA_USE_STL_UNORDERED_MAP
3375 
3376 #endif // #if 0
3377 
3379 
3380 class VmaDeviceMemoryBlock;
3381 
3382 struct VmaAllocation_T
3383 {
3384 private:
3385  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
3386 
3387  enum FLAGS
3388  {
3389  FLAG_USER_DATA_STRING = 0x01,
3390  };
3391 
3392 public:
3393  enum ALLOCATION_TYPE
3394  {
3395  ALLOCATION_TYPE_NONE,
3396  ALLOCATION_TYPE_BLOCK,
3397  ALLOCATION_TYPE_DEDICATED,
3398  };
3399 
3400  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
3401  m_Alignment(1),
3402  m_Size(0),
3403  m_pUserData(VMA_NULL),
3404  m_LastUseFrameIndex(currentFrameIndex),
3405  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
3406  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
3407  m_MapCount(0),
3408  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
3409  {
3410  }
3411 
3412  ~VmaAllocation_T()
3413  {
3414  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
3415 
3416  // Check if owned string was freed.
3417  VMA_ASSERT(m_pUserData == VMA_NULL);
3418  }
3419 
3420  void InitBlockAllocation(
3421  VmaPool hPool,
3422  VmaDeviceMemoryBlock* block,
3423  VkDeviceSize offset,
3424  VkDeviceSize alignment,
3425  VkDeviceSize size,
3426  VmaSuballocationType suballocationType,
3427  bool mapped,
3428  bool canBecomeLost)
3429  {
3430  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3431  VMA_ASSERT(block != VMA_NULL);
3432  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3433  m_Alignment = alignment;
3434  m_Size = size;
3435  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3436  m_SuballocationType = (uint8_t)suballocationType;
3437  m_BlockAllocation.m_hPool = hPool;
3438  m_BlockAllocation.m_Block = block;
3439  m_BlockAllocation.m_Offset = offset;
3440  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
3441  }
3442 
3443  void InitLost()
3444  {
3445  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3446  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
3447  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3448  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
3449  m_BlockAllocation.m_Block = VMA_NULL;
3450  m_BlockAllocation.m_Offset = 0;
3451  m_BlockAllocation.m_CanBecomeLost = true;
3452  }
3453 
3454  void ChangeBlockAllocation(
3455  VmaAllocator hAllocator,
3456  VmaDeviceMemoryBlock* block,
3457  VkDeviceSize offset);
3458 
3459  // pMappedData not null means allocation is created with MAPPED flag.
3460  void InitDedicatedAllocation(
3461  uint32_t memoryTypeIndex,
3462  VkDeviceMemory hMemory,
3463  VmaSuballocationType suballocationType,
3464  void* pMappedData,
3465  VkDeviceSize size)
3466  {
3467  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3468  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
3469  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
3470  m_Alignment = 0;
3471  m_Size = size;
3472  m_SuballocationType = (uint8_t)suballocationType;
3473  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3474  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
3475  m_DedicatedAllocation.m_hMemory = hMemory;
3476  m_DedicatedAllocation.m_pMappedData = pMappedData;
3477  }
3478 
3479  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
3480  VkDeviceSize GetAlignment() const { return m_Alignment; }
3481  VkDeviceSize GetSize() const { return m_Size; }
3482  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
3483  void* GetUserData() const { return m_pUserData; }
3484  void SetUserData(VmaAllocator hAllocator, void* pUserData);
3485  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
3486 
3487  VmaDeviceMemoryBlock* GetBlock() const
3488  {
3489  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
3490  return m_BlockAllocation.m_Block;
3491  }
3492  VkDeviceSize GetOffset() const;
3493  VkDeviceMemory GetMemory() const;
3494  uint32_t GetMemoryTypeIndex() const;
3495  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
3496  void* GetMappedData() const;
3497  bool CanBecomeLost() const;
3498  VmaPool GetPool() const;
3499 
3500  uint32_t GetLastUseFrameIndex() const
3501  {
3502  return m_LastUseFrameIndex.load();
3503  }
3504  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
3505  {
3506  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
3507  }
3508  /*
3509  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
3510  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
3511  - Else, returns false.
3512 
3513  If hAllocation is already lost, assert - you should not call it then.
3514  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
3515  */
3516  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3517 
3518  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
3519  {
3520  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
3521  outInfo.blockCount = 1;
3522  outInfo.allocationCount = 1;
3523  outInfo.unusedRangeCount = 0;
3524  outInfo.usedBytes = m_Size;
3525  outInfo.unusedBytes = 0;
3526  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
3527  outInfo.unusedRangeSizeMin = UINT64_MAX;
3528  outInfo.unusedRangeSizeMax = 0;
3529  }
3530 
3531  void BlockAllocMap();
3532  void BlockAllocUnmap();
3533  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
3534  void DedicatedAllocUnmap(VmaAllocator hAllocator);
3535 
3536 private:
3537  VkDeviceSize m_Alignment;
3538  VkDeviceSize m_Size;
3539  void* m_pUserData;
3540  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
3541  uint8_t m_Type; // ALLOCATION_TYPE
3542  uint8_t m_SuballocationType; // VmaSuballocationType
3543  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
3544  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
3545  uint8_t m_MapCount;
3546  uint8_t m_Flags; // enum FLAGS
3547 
3548  // Allocation out of VmaDeviceMemoryBlock.
3549  struct BlockAllocation
3550  {
3551  VmaPool m_hPool; // Null if belongs to general memory.
3552  VmaDeviceMemoryBlock* m_Block;
3553  VkDeviceSize m_Offset;
3554  bool m_CanBecomeLost;
3555  };
3556 
3557  // Allocation for an object that has its own private VkDeviceMemory.
3558  struct DedicatedAllocation
3559  {
3560  uint32_t m_MemoryTypeIndex;
3561  VkDeviceMemory m_hMemory;
3562  void* m_pMappedData; // Not null means memory is mapped.
3563  };
3564 
3565  union
3566  {
3567  // Allocation out of VmaDeviceMemoryBlock.
3568  BlockAllocation m_BlockAllocation;
3569  // Allocation for an object that has its own private VkDeviceMemory.
3570  DedicatedAllocation m_DedicatedAllocation;
3571  };
3572 
3573  void FreeUserDataString(VmaAllocator hAllocator);
3574 };
3575 
3576 /*
3577 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
3578 allocated memory block or free.
3579 */
3580 struct VmaSuballocation
3581 {
3582  VkDeviceSize offset;
3583  VkDeviceSize size;
3584  VmaAllocation hAllocation;
3585  VmaSuballocationType type;
3586 };
3587 
3588 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
3589 
3590 // Cost of one additional allocation lost, as equivalent in bytes.
3591 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
3592 
3593 /*
3594 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
3595 
3596 If canMakeOtherLost was false:
3597 - item points to a FREE suballocation.
3598 - itemsToMakeLostCount is 0.
3599 
3600 If canMakeOtherLost was true:
3601 - item points to first of sequence of suballocations, which are either FREE,
3602  or point to VmaAllocations that can become lost.
3603 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
3604  the requested allocation to succeed.
3605 */
3606 struct VmaAllocationRequest
3607 {
3608  VkDeviceSize offset;
3609  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
3610  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
3611  VmaSuballocationList::iterator item;
3612  size_t itemsToMakeLostCount;
3613 
3614  VkDeviceSize CalcCost() const
3615  {
3616  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
3617  }
3618 };
3619 
3620 /*
3621 Data structure used for bookkeeping of allocations and unused ranges of memory
3622 in a single VkDeviceMemory block.
3623 */
3624 class VmaBlockMetadata
3625 {
3626 public:
3627  VmaBlockMetadata(VmaAllocator hAllocator);
3628  ~VmaBlockMetadata();
3629  void Init(VkDeviceSize size);
3630 
3631  // Validates all data structures inside this object. If not valid, returns false.
3632  bool Validate() const;
3633  VkDeviceSize GetSize() const { return m_Size; }
3634  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
3635  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
3636  VkDeviceSize GetUnusedRangeSizeMax() const;
3637  // Returns true if this block is empty - contains only single free suballocation.
3638  bool IsEmpty() const;
3639 
3640  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
3641  void AddPoolStats(VmaPoolStats& inoutStats) const;
3642 
3643 #if VMA_STATS_STRING_ENABLED
3644  void PrintDetailedMap(class VmaJsonWriter& json) const;
3645 #endif
3646 
3647  // Creates trivial request for case when block is empty.
3648  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
3649 
3650  // Tries to find a place for suballocation with given parameters inside this block.
3651  // If succeeded, fills pAllocationRequest and returns true.
3652  // If failed, returns false.
3653  bool CreateAllocationRequest(
3654  uint32_t currentFrameIndex,
3655  uint32_t frameInUseCount,
3656  VkDeviceSize bufferImageGranularity,
3657  VkDeviceSize allocSize,
3658  VkDeviceSize allocAlignment,
3659  VmaSuballocationType allocType,
3660  bool canMakeOtherLost,
3661  VmaAllocationRequest* pAllocationRequest);
3662 
3663  bool MakeRequestedAllocationsLost(
3664  uint32_t currentFrameIndex,
3665  uint32_t frameInUseCount,
3666  VmaAllocationRequest* pAllocationRequest);
3667 
3668  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3669 
3670  // Makes actual allocation based on request. Request must already be checked and valid.
3671  void Alloc(
3672  const VmaAllocationRequest& request,
3673  VmaSuballocationType type,
3674  VkDeviceSize allocSize,
3675  VmaAllocation hAllocation);
3676 
3677  // Frees suballocation assigned to given memory region.
3678  void Free(const VmaAllocation allocation);
3679  void FreeAtOffset(VkDeviceSize offset);
3680 
3681 private:
3682  VkDeviceSize m_Size;
3683  uint32_t m_FreeCount;
3684  VkDeviceSize m_SumFreeSize;
3685  VmaSuballocationList m_Suballocations;
3686  // Suballocations that are free and have size greater than certain threshold.
3687  // Sorted by size, ascending.
3688  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
3689 
3690  bool ValidateFreeSuballocationList() const;
3691 
3692  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
3693  // If yes, fills pOffset and returns true. If no, returns false.
3694  bool CheckAllocation(
3695  uint32_t currentFrameIndex,
3696  uint32_t frameInUseCount,
3697  VkDeviceSize bufferImageGranularity,
3698  VkDeviceSize allocSize,
3699  VkDeviceSize allocAlignment,
3700  VmaSuballocationType allocType,
3701  VmaSuballocationList::const_iterator suballocItem,
3702  bool canMakeOtherLost,
3703  VkDeviceSize* pOffset,
3704  size_t* itemsToMakeLostCount,
3705  VkDeviceSize* pSumFreeSize,
3706  VkDeviceSize* pSumItemSize) const;
3707  // Given free suballocation, it merges it with following one, which must also be free.
3708  void MergeFreeWithNext(VmaSuballocationList::iterator item);
3709  // Releases given suballocation, making it free.
3710  // Merges it with adjacent free suballocations if applicable.
3711  // Returns iterator to new free suballocation at this place.
3712  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
3713  // Given free suballocation, it inserts it into sorted list of
3714  // m_FreeSuballocationsBySize if it's suitable.
3715  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
3716  // Given free suballocation, it removes it from sorted list of
3717  // m_FreeSuballocationsBySize if it's suitable.
3718  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
3719 };
3720 
3721 // Helper class that represents mapped memory. Synchronized internally.
3722 class VmaDeviceMemoryMapping
3723 {
3724 public:
3725  VmaDeviceMemoryMapping();
3726  ~VmaDeviceMemoryMapping();
3727 
3728  void* GetMappedData() const { return m_pMappedData; }
3729 
3730  // ppData can be null.
3731  VkResult Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count, void **ppData);
3732  void Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count);
3733 
3734 private:
3735  VMA_MUTEX m_Mutex;
3736  uint32_t m_MapCount;
3737  void* m_pMappedData;
3738 };
3739 
3740 /*
3741 Represents a single block of device memory (`VkDeviceMemory`) with all the
3742 data about its regions (aka suballocations, `VmaAllocation`), assigned and free.
3743 
3744 Thread-safety: This class must be externally synchronized.
3745 */
3746 class VmaDeviceMemoryBlock
3747 {
3748 public:
3749  uint32_t m_MemoryTypeIndex;
3750  VkDeviceMemory m_hMemory;
3751  VmaDeviceMemoryMapping m_Mapping;
3752  VmaBlockMetadata m_Metadata;
3753 
3754  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
3755 
3756  ~VmaDeviceMemoryBlock()
3757  {
3758  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
3759  }
3760 
3761  // Always call after construction.
3762  void Init(
3763  uint32_t newMemoryTypeIndex,
3764  VkDeviceMemory newMemory,
3765  VkDeviceSize newSize);
3766  // Always call before destruction.
3767  void Destroy(VmaAllocator allocator);
3768 
3769  // Validates all data structures inside this object. If not valid, returns false.
3770  bool Validate() const;
3771 
3772  // ppData can be null.
3773  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
3774  void Unmap(VmaAllocator hAllocator, uint32_t count);
3775 };
3776 
3777 struct VmaPointerLess
3778 {
3779  bool operator()(const void* lhs, const void* rhs) const
3780  {
3781  return lhs < rhs;
3782  }
3783 };
3784 
3785 class VmaDefragmentator;
3786 
3787 /*
3788 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
3789 Vulkan memory type.
3790 
3791 Synchronized internally with a mutex.
3792 */
3793 struct VmaBlockVector
3794 {
3795  VmaBlockVector(
3796  VmaAllocator hAllocator,
3797  uint32_t memoryTypeIndex,
3798  VkDeviceSize preferredBlockSize,
3799  size_t minBlockCount,
3800  size_t maxBlockCount,
3801  VkDeviceSize bufferImageGranularity,
3802  uint32_t frameInUseCount,
3803  bool isCustomPool);
3804  ~VmaBlockVector();
3805 
3806  VkResult CreateMinBlocks();
3807 
3808  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
3809  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
3810  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
3811  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
3812 
3813  void GetPoolStats(VmaPoolStats* pStats);
3814 
3815  bool IsEmpty() const { return m_Blocks.empty(); }
3816 
3817  VkResult Allocate(
3818  VmaPool hCurrentPool,
3819  uint32_t currentFrameIndex,
3820  const VkMemoryRequirements& vkMemReq,
3821  const VmaAllocationCreateInfo& createInfo,
3822  VmaSuballocationType suballocType,
3823  VmaAllocation* pAllocation);
3824 
3825  void Free(
3826  VmaAllocation hAllocation);
3827 
3828  // Adds statistics of this BlockVector to pStats.
3829  void AddStats(VmaStats* pStats);
3830 
3831 #if VMA_STATS_STRING_ENABLED
3832  void PrintDetailedMap(class VmaJsonWriter& json);
3833 #endif
3834 
3835  void MakePoolAllocationsLost(
3836  uint32_t currentFrameIndex,
3837  size_t* pLostAllocationCount);
3838 
3839  VmaDefragmentator* EnsureDefragmentator(
3840  VmaAllocator hAllocator,
3841  uint32_t currentFrameIndex);
3842 
3843  VkResult Defragment(
3844  VmaDefragmentationStats* pDefragmentationStats,
3845  VkDeviceSize& maxBytesToMove,
3846  uint32_t& maxAllocationsToMove);
3847 
3848  void DestroyDefragmentator();
3849 
3850 private:
3851  friend class VmaDefragmentator;
3852 
3853  const VmaAllocator m_hAllocator;
3854  const uint32_t m_MemoryTypeIndex;
3855  const VkDeviceSize m_PreferredBlockSize;
3856  const size_t m_MinBlockCount;
3857  const size_t m_MaxBlockCount;
3858  const VkDeviceSize m_BufferImageGranularity;
3859  const uint32_t m_FrameInUseCount;
3860  const bool m_IsCustomPool;
3861  VMA_MUTEX m_Mutex;
3862  // Incrementally sorted by sumFreeSize, ascending.
3863  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
3864  /* There can be at most one allocation that is completely empty - a
3865  hysteresis to avoid pessimistic case of alternating creation and destruction
3866  of a VkDeviceMemory. */
3867  bool m_HasEmptyBlock;
3868  VmaDefragmentator* m_pDefragmentator;
3869 
3870  size_t CalcMaxBlockSize() const;
3871 
3872  // Finds and removes given block from vector.
3873  void Remove(VmaDeviceMemoryBlock* pBlock);
3874 
3875  // Performs single step in sorting m_Blocks. They may not be fully sorted
3876  // after this call.
3877  void IncrementallySortBlocks();
3878 
3879  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
3880 };
3881 
3882 struct VmaPool_T
3883 {
3884 public:
3885  VmaBlockVector m_BlockVector;
3886 
3887  // Takes ownership.
3888  VmaPool_T(
3889  VmaAllocator hAllocator,
3890  const VmaPoolCreateInfo& createInfo);
3891  ~VmaPool_T();
3892 
3893  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
3894 
3895 #if VMA_STATS_STRING_ENABLED
3896  //void PrintDetailedMap(class VmaStringBuilder& sb);
3897 #endif
3898 };
3899 
3900 class VmaDefragmentator
3901 {
3902  const VmaAllocator m_hAllocator;
3903  VmaBlockVector* const m_pBlockVector;
3904  uint32_t m_CurrentFrameIndex;
3905  VkDeviceSize m_BytesMoved;
3906  uint32_t m_AllocationsMoved;
3907 
3908  struct AllocationInfo
3909  {
3910  VmaAllocation m_hAllocation;
3911  VkBool32* m_pChanged;
3912 
3913  AllocationInfo() :
3914  m_hAllocation(VK_NULL_HANDLE),
3915  m_pChanged(VMA_NULL)
3916  {
3917  }
3918  };
3919 
3920  struct AllocationInfoSizeGreater
3921  {
3922  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
3923  {
3924  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
3925  }
3926  };
3927 
3928  // Used between AddAllocation and Defragment.
3929  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3930 
3931  struct BlockInfo
3932  {
3933  VmaDeviceMemoryBlock* m_pBlock;
3934  bool m_HasNonMovableAllocations;
3935  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3936 
3937  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
3938  m_pBlock(VMA_NULL),
3939  m_HasNonMovableAllocations(true),
3940  m_Allocations(pAllocationCallbacks),
3941  m_pMappedDataForDefragmentation(VMA_NULL)
3942  {
3943  }
3944 
3945  void CalcHasNonMovableAllocations()
3946  {
3947  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
3948  const size_t defragmentAllocCount = m_Allocations.size();
3949  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
3950  }
3951 
3952  void SortAllocationsBySizeDescecnding()
3953  {
3954  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
3955  }
3956 
3957  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
3958  void Unmap(VmaAllocator hAllocator);
3959 
3960  private:
3961  // Not null if mapped for defragmentation only, not originally mapped.
3962  void* m_pMappedDataForDefragmentation;
3963  };
3964 
3965  struct BlockPointerLess
3966  {
3967  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
3968  {
3969  return pLhsBlockInfo->m_pBlock < pRhsBlock;
3970  }
3971  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3972  {
3973  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
3974  }
3975  };
3976 
3977  // 1. Blocks with some non-movable allocations go first.
3978  // 2. Blocks with smaller sumFreeSize go first.
3979  struct BlockInfoCompareMoveDestination
3980  {
3981  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3982  {
3983  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
3984  {
3985  return true;
3986  }
3987  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
3988  {
3989  return false;
3990  }
3991  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
3992  {
3993  return true;
3994  }
3995  return false;
3996  }
3997  };
3998 
3999  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
4000  BlockInfoVector m_Blocks;
4001 
4002  VkResult DefragmentRound(
4003  VkDeviceSize maxBytesToMove,
4004  uint32_t maxAllocationsToMove);
4005 
4006  static bool MoveMakesSense(
4007  size_t dstBlockIndex, VkDeviceSize dstOffset,
4008  size_t srcBlockIndex, VkDeviceSize srcOffset);
4009 
4010 public:
4011  VmaDefragmentator(
4012  VmaAllocator hAllocator,
4013  VmaBlockVector* pBlockVector,
4014  uint32_t currentFrameIndex);
4015 
4016  ~VmaDefragmentator();
4017 
4018  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
4019  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
4020 
4021  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
4022 
4023  VkResult Defragment(
4024  VkDeviceSize maxBytesToMove,
4025  uint32_t maxAllocationsToMove);
4026 };
4027 
4028 // Main allocator object.
4029 struct VmaAllocator_T
4030 {
4031  bool m_UseMutex;
4032  bool m_UseKhrDedicatedAllocation;
4033  VkDevice m_hDevice;
4034  bool m_AllocationCallbacksSpecified;
4035  VkAllocationCallbacks m_AllocationCallbacks;
4036  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
4037 
4038  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
4039  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
4040  VMA_MUTEX m_HeapSizeLimitMutex;
4041 
4042  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
4043  VkPhysicalDeviceMemoryProperties m_MemProps;
4044 
4045  // Default pools.
4046  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
4047 
4048  // Each vector is sorted by memory (handle value).
4049  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
4050  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
4051  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
4052 
4053  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
4054  ~VmaAllocator_T();
4055 
4056  const VkAllocationCallbacks* GetAllocationCallbacks() const
4057  {
4058  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
4059  }
4060  const VmaVulkanFunctions& GetVulkanFunctions() const
4061  {
4062  return m_VulkanFunctions;
4063  }
4064 
4065  VkDeviceSize GetBufferImageGranularity() const
4066  {
4067  return VMA_MAX(
4068  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
4069  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
4070  }
4071 
4072  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
4073  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
4074 
4075  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
4076  {
4077  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
4078  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
4079  }
4080 
4081  void GetBufferMemoryRequirements(
4082  VkBuffer hBuffer,
4083  VkMemoryRequirements& memReq,
4084  bool& requiresDedicatedAllocation,
4085  bool& prefersDedicatedAllocation) const;
4086  void GetImageMemoryRequirements(
4087  VkImage hImage,
4088  VkMemoryRequirements& memReq,
4089  bool& requiresDedicatedAllocation,
4090  bool& prefersDedicatedAllocation) const;
4091 
4092  // Main allocation function.
4093  VkResult AllocateMemory(
4094  const VkMemoryRequirements& vkMemReq,
4095  bool requiresDedicatedAllocation,
4096  bool prefersDedicatedAllocation,
4097  VkBuffer dedicatedBuffer,
4098  VkImage dedicatedImage,
4099  const VmaAllocationCreateInfo& createInfo,
4100  VmaSuballocationType suballocType,
4101  VmaAllocation* pAllocation);
4102 
4103  // Main deallocation function.
4104  void FreeMemory(const VmaAllocation allocation);
4105 
4106  void CalculateStats(VmaStats* pStats);
4107 
4108 #if VMA_STATS_STRING_ENABLED
4109  void PrintDetailedMap(class VmaJsonWriter& json);
4110 #endif
4111 
4112  VkResult Defragment(
4113  VmaAllocation* pAllocations,
4114  size_t allocationCount,
4115  VkBool32* pAllocationsChanged,
4116  const VmaDefragmentationInfo* pDefragmentationInfo,
4117  VmaDefragmentationStats* pDefragmentationStats);
4118 
4119  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
4120  bool TouchAllocation(VmaAllocation hAllocation);
4121 
4122  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
4123  void DestroyPool(VmaPool pool);
4124  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
4125 
4126  void SetCurrentFrameIndex(uint32_t frameIndex);
4127 
4128  void MakePoolAllocationsLost(
4129  VmaPool hPool,
4130  size_t* pLostAllocationCount);
4131 
4132  void CreateLostAllocation(VmaAllocation* pAllocation);
4133 
4134  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
4135  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
4136 
4137  VkResult Map(VmaAllocation hAllocation, void** ppData);
4138  void Unmap(VmaAllocation hAllocation);
4139 
4140 private:
4141  VkDeviceSize m_PreferredLargeHeapBlockSize;
4142 
4143  VkPhysicalDevice m_PhysicalDevice;
4144  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
4145 
4146  VMA_MUTEX m_PoolsMutex;
4147  // Protected by m_PoolsMutex. Sorted by pointer value.
4148  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
4149 
4150  VmaVulkanFunctions m_VulkanFunctions;
4151 
4152  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
4153 
4154  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
4155 
4156  VkResult AllocateMemoryOfType(
4157  const VkMemoryRequirements& vkMemReq,
4158  bool dedicatedAllocation,
4159  VkBuffer dedicatedBuffer,
4160  VkImage dedicatedImage,
4161  const VmaAllocationCreateInfo& createInfo,
4162  uint32_t memTypeIndex,
4163  VmaSuballocationType suballocType,
4164  VmaAllocation* pAllocation);
4165 
4166  // Allocates and registers new VkDeviceMemory specifically for single allocation.
4167  VkResult AllocateDedicatedMemory(
4168  VkDeviceSize size,
4169  VmaSuballocationType suballocType,
4170  uint32_t memTypeIndex,
4171  bool map,
4172  bool isUserDataString,
4173  void* pUserData,
4174  VkBuffer dedicatedBuffer,
4175  VkImage dedicatedImage,
4176  VmaAllocation* pAllocation);
4177 
4178  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
4179  void FreeDedicatedMemory(VmaAllocation allocation);
4180 };
4181 
4183 // Memory allocation #2 after VmaAllocator_T definition
4184 
4185 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
4186 {
4187  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
4188 }
4189 
4190 static void VmaFree(VmaAllocator hAllocator, void* ptr)
4191 {
4192  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
4193 }
4194 
4195 template<typename T>
4196 static T* VmaAllocate(VmaAllocator hAllocator)
4197 {
4198  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
4199 }
4200 
4201 template<typename T>
4202 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
4203 {
4204  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
4205 }
4206 
4207 template<typename T>
4208 static void vma_delete(VmaAllocator hAllocator, T* ptr)
4209 {
4210  if(ptr != VMA_NULL)
4211  {
4212  ptr->~T();
4213  VmaFree(hAllocator, ptr);
4214  }
4215 }
4216 
4217 template<typename T>
4218 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
4219 {
4220  if(ptr != VMA_NULL)
4221  {
4222  for(size_t i = count; i--; )
4223  ptr[i].~T();
4224  VmaFree(hAllocator, ptr);
4225  }
4226 }
4227 
4229 // VmaStringBuilder
4230 
4231 #if VMA_STATS_STRING_ENABLED
4232 
4233 class VmaStringBuilder
4234 {
4235 public:
4236  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
4237  size_t GetLength() const { return m_Data.size(); }
4238  const char* GetData() const { return m_Data.data(); }
4239 
4240  void Add(char ch) { m_Data.push_back(ch); }
4241  void Add(const char* pStr);
4242  void AddNewLine() { Add('\n'); }
4243  void AddNumber(uint32_t num);
4244  void AddNumber(uint64_t num);
4245  void AddPointer(const void* ptr);
4246 
4247 private:
4248  VmaVector< char, VmaStlAllocator<char> > m_Data;
4249 };
4250 
4251 void VmaStringBuilder::Add(const char* pStr)
4252 {
4253  const size_t strLen = strlen(pStr);
4254  if(strLen > 0)
4255  {
4256  const size_t oldCount = m_Data.size();
4257  m_Data.resize(oldCount + strLen);
4258  memcpy(m_Data.data() + oldCount, pStr, strLen);
4259  }
4260 }
4261 
4262 void VmaStringBuilder::AddNumber(uint32_t num)
4263 {
4264  char buf[11];
4265  VmaUint32ToStr(buf, sizeof(buf), num);
4266  Add(buf);
4267 }
4268 
4269 void VmaStringBuilder::AddNumber(uint64_t num)
4270 {
4271  char buf[21];
4272  VmaUint64ToStr(buf, sizeof(buf), num);
4273  Add(buf);
4274 }
4275 
4276 void VmaStringBuilder::AddPointer(const void* ptr)
4277 {
4278  char buf[21];
4279  VmaPtrToStr(buf, sizeof(buf), ptr);
4280  Add(buf);
4281 }
4282 
4283 #endif // #if VMA_STATS_STRING_ENABLED
4284 
4286 // VmaJsonWriter
4287 
4288 #if VMA_STATS_STRING_ENABLED
4289 
4290 class VmaJsonWriter
4291 {
4292 public:
4293  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
4294  ~VmaJsonWriter();
4295 
4296  void BeginObject(bool singleLine = false);
4297  void EndObject();
4298 
4299  void BeginArray(bool singleLine = false);
4300  void EndArray();
4301 
4302  void WriteString(const char* pStr);
4303  void BeginString(const char* pStr = VMA_NULL);
4304  void ContinueString(const char* pStr);
4305  void ContinueString(uint32_t n);
4306  void ContinueString(uint64_t n);
4307  void ContinueString_Pointer(const void* ptr);
4308  void EndString(const char* pStr = VMA_NULL);
4309 
4310  void WriteNumber(uint32_t n);
4311  void WriteNumber(uint64_t n);
4312  void WriteBool(bool b);
4313  void WriteNull();
4314 
4315 private:
4316  static const char* const INDENT;
4317 
4318  enum COLLECTION_TYPE
4319  {
4320  COLLECTION_TYPE_OBJECT,
4321  COLLECTION_TYPE_ARRAY,
4322  };
4323  struct StackItem
4324  {
4325  COLLECTION_TYPE type;
4326  uint32_t valueCount;
4327  bool singleLineMode;
4328  };
4329 
4330  VmaStringBuilder& m_SB;
4331  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
4332  bool m_InsideString;
4333 
4334  void BeginValue(bool isString);
4335  void WriteIndent(bool oneLess = false);
4336 };
4337 
4338 const char* const VmaJsonWriter::INDENT = " ";
4339 
4340 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
4341  m_SB(sb),
4342  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
4343  m_InsideString(false)
4344 {
4345 }
4346 
4347 VmaJsonWriter::~VmaJsonWriter()
4348 {
4349  VMA_ASSERT(!m_InsideString);
4350  VMA_ASSERT(m_Stack.empty());
4351 }
4352 
4353 void VmaJsonWriter::BeginObject(bool singleLine)
4354 {
4355  VMA_ASSERT(!m_InsideString);
4356 
4357  BeginValue(false);
4358  m_SB.Add('{');
4359 
4360  StackItem item;
4361  item.type = COLLECTION_TYPE_OBJECT;
4362  item.valueCount = 0;
4363  item.singleLineMode = singleLine;
4364  m_Stack.push_back(item);
4365 }
4366 
4367 void VmaJsonWriter::EndObject()
4368 {
4369  VMA_ASSERT(!m_InsideString);
4370 
4371  WriteIndent(true);
4372  m_SB.Add('}');
4373 
4374  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
4375  m_Stack.pop_back();
4376 }
4377 
4378 void VmaJsonWriter::BeginArray(bool singleLine)
4379 {
4380  VMA_ASSERT(!m_InsideString);
4381 
4382  BeginValue(false);
4383  m_SB.Add('[');
4384 
4385  StackItem item;
4386  item.type = COLLECTION_TYPE_ARRAY;
4387  item.valueCount = 0;
4388  item.singleLineMode = singleLine;
4389  m_Stack.push_back(item);
4390 }
4391 
4392 void VmaJsonWriter::EndArray()
4393 {
4394  VMA_ASSERT(!m_InsideString);
4395 
4396  WriteIndent(true);
4397  m_SB.Add(']');
4398 
4399  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
4400  m_Stack.pop_back();
4401 }
4402 
4403 void VmaJsonWriter::WriteString(const char* pStr)
4404 {
4405  BeginString(pStr);
4406  EndString();
4407 }
4408 
4409 void VmaJsonWriter::BeginString(const char* pStr)
4410 {
4411  VMA_ASSERT(!m_InsideString);
4412 
4413  BeginValue(true);
4414  m_SB.Add('"');
4415  m_InsideString = true;
4416  if(pStr != VMA_NULL && pStr[0] != '\0')
4417  {
4418  ContinueString(pStr);
4419  }
4420 }
4421 
4422 void VmaJsonWriter::ContinueString(const char* pStr)
4423 {
4424  VMA_ASSERT(m_InsideString);
4425 
4426  const size_t strLen = strlen(pStr);
4427  for(size_t i = 0; i < strLen; ++i)
4428  {
4429  char ch = pStr[i];
4430  if(ch == '\'')
4431  {
4432  m_SB.Add("\\\\");
4433  }
4434  else if(ch == '"')
4435  {
4436  m_SB.Add("\\\"");
4437  }
4438  else if(ch >= 32)
4439  {
4440  m_SB.Add(ch);
4441  }
4442  else switch(ch)
4443  {
4444  case '\b':
4445  m_SB.Add("\\b");
4446  break;
4447  case '\f':
4448  m_SB.Add("\\f");
4449  break;
4450  case '\n':
4451  m_SB.Add("\\n");
4452  break;
4453  case '\r':
4454  m_SB.Add("\\r");
4455  break;
4456  case '\t':
4457  m_SB.Add("\\t");
4458  break;
4459  default:
4460  VMA_ASSERT(0 && "Character not currently supported.");
4461  break;
4462  }
4463  }
4464 }
4465 
4466 void VmaJsonWriter::ContinueString(uint32_t n)
4467 {
4468  VMA_ASSERT(m_InsideString);
4469  m_SB.AddNumber(n);
4470 }
4471 
4472 void VmaJsonWriter::ContinueString(uint64_t n)
4473 {
4474  VMA_ASSERT(m_InsideString);
4475  m_SB.AddNumber(n);
4476 }
4477 
4478 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
4479 {
4480  VMA_ASSERT(m_InsideString);
4481  m_SB.AddPointer(ptr);
4482 }
4483 
4484 void VmaJsonWriter::EndString(const char* pStr)
4485 {
4486  VMA_ASSERT(m_InsideString);
4487  if(pStr != VMA_NULL && pStr[0] != '\0')
4488  {
4489  ContinueString(pStr);
4490  }
4491  m_SB.Add('"');
4492  m_InsideString = false;
4493 }
4494 
4495 void VmaJsonWriter::WriteNumber(uint32_t n)
4496 {
4497  VMA_ASSERT(!m_InsideString);
4498  BeginValue(false);
4499  m_SB.AddNumber(n);
4500 }
4501 
4502 void VmaJsonWriter::WriteNumber(uint64_t n)
4503 {
4504  VMA_ASSERT(!m_InsideString);
4505  BeginValue(false);
4506  m_SB.AddNumber(n);
4507 }
4508 
4509 void VmaJsonWriter::WriteBool(bool b)
4510 {
4511  VMA_ASSERT(!m_InsideString);
4512  BeginValue(false);
4513  m_SB.Add(b ? "true" : "false");
4514 }
4515 
4516 void VmaJsonWriter::WriteNull()
4517 {
4518  VMA_ASSERT(!m_InsideString);
4519  BeginValue(false);
4520  m_SB.Add("null");
4521 }
4522 
4523 void VmaJsonWriter::BeginValue(bool isString)
4524 {
4525  if(!m_Stack.empty())
4526  {
4527  StackItem& currItem = m_Stack.back();
4528  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4529  currItem.valueCount % 2 == 0)
4530  {
4531  VMA_ASSERT(isString);
4532  }
4533 
4534  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4535  currItem.valueCount % 2 != 0)
4536  {
4537  m_SB.Add(": ");
4538  }
4539  else if(currItem.valueCount > 0)
4540  {
4541  m_SB.Add(", ");
4542  WriteIndent();
4543  }
4544  else
4545  {
4546  WriteIndent();
4547  }
4548  ++currItem.valueCount;
4549  }
4550 }
4551 
4552 void VmaJsonWriter::WriteIndent(bool oneLess)
4553 {
4554  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
4555  {
4556  m_SB.AddNewLine();
4557 
4558  size_t count = m_Stack.size();
4559  if(count > 0 && oneLess)
4560  {
4561  --count;
4562  }
4563  for(size_t i = 0; i < count; ++i)
4564  {
4565  m_SB.Add(INDENT);
4566  }
4567  }
4568 }
4569 
4570 #endif // #if VMA_STATS_STRING_ENABLED
4571 
4573 
4574 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
4575 {
4576  if(IsUserDataString())
4577  {
4578  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
4579 
4580  FreeUserDataString(hAllocator);
4581 
4582  if(pUserData != VMA_NULL)
4583  {
4584  const char* const newStrSrc = (char*)pUserData;
4585  const size_t newStrLen = strlen(newStrSrc);
4586  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
4587  memcpy(newStrDst, newStrSrc, newStrLen + 1);
4588  m_pUserData = newStrDst;
4589  }
4590  }
4591  else
4592  {
4593  m_pUserData = pUserData;
4594  }
4595 }
4596 
4597 void VmaAllocation_T::ChangeBlockAllocation(
4598  VmaAllocator hAllocator,
4599  VmaDeviceMemoryBlock* block,
4600  VkDeviceSize offset)
4601 {
4602  VMA_ASSERT(block != VMA_NULL);
4603  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4604 
4605  // Move mapping reference counter from old block to new block.
4606  if(block != m_BlockAllocation.m_Block)
4607  {
4608  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
4609  if(IsPersistentMap())
4610  ++mapRefCount;
4611  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
4612  block->Map(hAllocator, mapRefCount, VMA_NULL);
4613  }
4614 
4615  m_BlockAllocation.m_Block = block;
4616  m_BlockAllocation.m_Offset = offset;
4617 }
4618 
4619 VkDeviceSize VmaAllocation_T::GetOffset() const
4620 {
4621  switch(m_Type)
4622  {
4623  case ALLOCATION_TYPE_BLOCK:
4624  return m_BlockAllocation.m_Offset;
4625  case ALLOCATION_TYPE_DEDICATED:
4626  return 0;
4627  default:
4628  VMA_ASSERT(0);
4629  return 0;
4630  }
4631 }
4632 
4633 VkDeviceMemory VmaAllocation_T::GetMemory() const
4634 {
4635  switch(m_Type)
4636  {
4637  case ALLOCATION_TYPE_BLOCK:
4638  return m_BlockAllocation.m_Block->m_hMemory;
4639  case ALLOCATION_TYPE_DEDICATED:
4640  return m_DedicatedAllocation.m_hMemory;
4641  default:
4642  VMA_ASSERT(0);
4643  return VK_NULL_HANDLE;
4644  }
4645 }
4646 
4647 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
4648 {
4649  switch(m_Type)
4650  {
4651  case ALLOCATION_TYPE_BLOCK:
4652  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
4653  case ALLOCATION_TYPE_DEDICATED:
4654  return m_DedicatedAllocation.m_MemoryTypeIndex;
4655  default:
4656  VMA_ASSERT(0);
4657  return UINT32_MAX;
4658  }
4659 }
4660 
4661 void* VmaAllocation_T::GetMappedData() const
4662 {
4663  switch(m_Type)
4664  {
4665  case ALLOCATION_TYPE_BLOCK:
4666  if(m_MapCount != 0)
4667  {
4668  void* pBlockData = m_BlockAllocation.m_Block->m_Mapping.GetMappedData();
4669  VMA_ASSERT(pBlockData != VMA_NULL);
4670  return (char*)pBlockData + m_BlockAllocation.m_Offset;
4671  }
4672  else
4673  {
4674  return VMA_NULL;
4675  }
4676  break;
4677  case ALLOCATION_TYPE_DEDICATED:
4678  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
4679  return m_DedicatedAllocation.m_pMappedData;
4680  default:
4681  VMA_ASSERT(0);
4682  return VMA_NULL;
4683  }
4684 }
4685 
4686 bool VmaAllocation_T::CanBecomeLost() const
4687 {
4688  switch(m_Type)
4689  {
4690  case ALLOCATION_TYPE_BLOCK:
4691  return m_BlockAllocation.m_CanBecomeLost;
4692  case ALLOCATION_TYPE_DEDICATED:
4693  return false;
4694  default:
4695  VMA_ASSERT(0);
4696  return false;
4697  }
4698 }
4699 
4700 VmaPool VmaAllocation_T::GetPool() const
4701 {
4702  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4703  return m_BlockAllocation.m_hPool;
4704 }
4705 
4706 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4707 {
4708  VMA_ASSERT(CanBecomeLost());
4709 
4710  /*
4711  Warning: This is a carefully designed algorithm.
4712  Do not modify unless you really know what you're doing :)
4713  */
4714  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
4715  for(;;)
4716  {
4717  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
4718  {
4719  VMA_ASSERT(0);
4720  return false;
4721  }
4722  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
4723  {
4724  return false;
4725  }
4726  else // Last use time earlier than current time.
4727  {
4728  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
4729  {
4730  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
4731  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
4732  return true;
4733  }
4734  }
4735  }
4736 }
4737 
4738 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
4739 {
4740  VMA_ASSERT(IsUserDataString());
4741  if(m_pUserData != VMA_NULL)
4742  {
4743  char* const oldStr = (char*)m_pUserData;
4744  const size_t oldStrLen = strlen(oldStr);
4745  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
4746  m_pUserData = VMA_NULL;
4747  }
4748 }
4749 
4750 void VmaAllocation_T::BlockAllocMap()
4751 {
4752  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
4753 
4754  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
4755  {
4756  ++m_MapCount;
4757  }
4758  else
4759  {
4760  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
4761  }
4762 }
4763 
4764 void VmaAllocation_T::BlockAllocUnmap()
4765 {
4766  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
4767 
4768  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
4769  {
4770  --m_MapCount;
4771  }
4772  else
4773  {
4774  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
4775  }
4776 }
4777 
4778 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
4779 {
4780  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
4781 
4782  if(m_MapCount != 0)
4783  {
4784  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
4785  {
4786  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
4787  *ppData = m_DedicatedAllocation.m_pMappedData;
4788  ++m_MapCount;
4789  return VK_SUCCESS;
4790  }
4791  else
4792  {
4793  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
4794  return VK_ERROR_MEMORY_MAP_FAILED;
4795  }
4796  }
4797  else
4798  {
4799  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
4800  hAllocator->m_hDevice,
4801  m_DedicatedAllocation.m_hMemory,
4802  0, // offset
4803  VK_WHOLE_SIZE,
4804  0, // flags
4805  ppData);
4806  if(result == VK_SUCCESS)
4807  {
4808  m_DedicatedAllocation.m_pMappedData = *ppData;
4809  m_MapCount = 1;
4810  }
4811  return result;
4812  }
4813 }
4814 
4815 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
4816 {
4817  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
4818 
4819  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
4820  {
4821  --m_MapCount;
4822  if(m_MapCount == 0)
4823  {
4824  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
4825  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
4826  hAllocator->m_hDevice,
4827  m_DedicatedAllocation.m_hMemory);
4828  }
4829  }
4830  else
4831  {
4832  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
4833  }
4834 }
4835 
4836 #if VMA_STATS_STRING_ENABLED
4837 
4838 // Correspond to values of enum VmaSuballocationType.
4839 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
4840  "FREE",
4841  "UNKNOWN",
4842  "BUFFER",
4843  "IMAGE_UNKNOWN",
4844  "IMAGE_LINEAR",
4845  "IMAGE_OPTIMAL",
4846 };
4847 
4848 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
4849 {
4850  json.BeginObject();
4851 
4852  json.WriteString("Blocks");
4853  json.WriteNumber(stat.blockCount);
4854 
4855  json.WriteString("Allocations");
4856  json.WriteNumber(stat.allocationCount);
4857 
4858  json.WriteString("UnusedRanges");
4859  json.WriteNumber(stat.unusedRangeCount);
4860 
4861  json.WriteString("UsedBytes");
4862  json.WriteNumber(stat.usedBytes);
4863 
4864  json.WriteString("UnusedBytes");
4865  json.WriteNumber(stat.unusedBytes);
4866 
4867  if(stat.allocationCount > 1)
4868  {
4869  json.WriteString("AllocationSize");
4870  json.BeginObject(true);
4871  json.WriteString("Min");
4872  json.WriteNumber(stat.allocationSizeMin);
4873  json.WriteString("Avg");
4874  json.WriteNumber(stat.allocationSizeAvg);
4875  json.WriteString("Max");
4876  json.WriteNumber(stat.allocationSizeMax);
4877  json.EndObject();
4878  }
4879 
4880  if(stat.unusedRangeCount > 1)
4881  {
4882  json.WriteString("UnusedRangeSize");
4883  json.BeginObject(true);
4884  json.WriteString("Min");
4885  json.WriteNumber(stat.unusedRangeSizeMin);
4886  json.WriteString("Avg");
4887  json.WriteNumber(stat.unusedRangeSizeAvg);
4888  json.WriteString("Max");
4889  json.WriteNumber(stat.unusedRangeSizeMax);
4890  json.EndObject();
4891  }
4892 
4893  json.EndObject();
4894 }
4895 
4896 #endif // #if VMA_STATS_STRING_ENABLED
4897 
4898 struct VmaSuballocationItemSizeLess
4899 {
4900  bool operator()(
4901  const VmaSuballocationList::iterator lhs,
4902  const VmaSuballocationList::iterator rhs) const
4903  {
4904  return lhs->size < rhs->size;
4905  }
4906  bool operator()(
4907  const VmaSuballocationList::iterator lhs,
4908  VkDeviceSize rhsSize) const
4909  {
4910  return lhs->size < rhsSize;
4911  }
4912 };
4913 
4915 // class VmaBlockMetadata
4916 
4917 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
4918  m_Size(0),
4919  m_FreeCount(0),
4920  m_SumFreeSize(0),
4921  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
4922  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
4923 {
4924 }
4925 
4926 VmaBlockMetadata::~VmaBlockMetadata()
4927 {
4928 }
4929 
4930 void VmaBlockMetadata::Init(VkDeviceSize size)
4931 {
4932  m_Size = size;
4933  m_FreeCount = 1;
4934  m_SumFreeSize = size;
4935 
4936  VmaSuballocation suballoc = {};
4937  suballoc.offset = 0;
4938  suballoc.size = size;
4939  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4940  suballoc.hAllocation = VK_NULL_HANDLE;
4941 
4942  m_Suballocations.push_back(suballoc);
4943  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
4944  --suballocItem;
4945  m_FreeSuballocationsBySize.push_back(suballocItem);
4946 }
4947 
4948 bool VmaBlockMetadata::Validate() const
4949 {
4950  if(m_Suballocations.empty())
4951  {
4952  return false;
4953  }
4954 
4955  // Expected offset of new suballocation as calculates from previous ones.
4956  VkDeviceSize calculatedOffset = 0;
4957  // Expected number of free suballocations as calculated from traversing their list.
4958  uint32_t calculatedFreeCount = 0;
4959  // Expected sum size of free suballocations as calculated from traversing their list.
4960  VkDeviceSize calculatedSumFreeSize = 0;
4961  // Expected number of free suballocations that should be registered in
4962  // m_FreeSuballocationsBySize calculated from traversing their list.
4963  size_t freeSuballocationsToRegister = 0;
4964  // True if previous visisted suballocation was free.
4965  bool prevFree = false;
4966 
4967  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4968  suballocItem != m_Suballocations.cend();
4969  ++suballocItem)
4970  {
4971  const VmaSuballocation& subAlloc = *suballocItem;
4972 
4973  // Actual offset of this suballocation doesn't match expected one.
4974  if(subAlloc.offset != calculatedOffset)
4975  {
4976  return false;
4977  }
4978 
4979  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
4980  // Two adjacent free suballocations are invalid. They should be merged.
4981  if(prevFree && currFree)
4982  {
4983  return false;
4984  }
4985 
4986  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
4987  {
4988  return false;
4989  }
4990 
4991  if(currFree)
4992  {
4993  calculatedSumFreeSize += subAlloc.size;
4994  ++calculatedFreeCount;
4995  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
4996  {
4997  ++freeSuballocationsToRegister;
4998  }
4999  }
5000  else
5001  {
5002  if(subAlloc.hAllocation->GetOffset() != subAlloc.offset)
5003  {
5004  return false;
5005  }
5006  if(subAlloc.hAllocation->GetSize() != subAlloc.size)
5007  {
5008  return false;
5009  }
5010  }
5011 
5012  calculatedOffset += subAlloc.size;
5013  prevFree = currFree;
5014  }
5015 
5016  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
5017  // match expected one.
5018  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
5019  {
5020  return false;
5021  }
5022 
5023  VkDeviceSize lastSize = 0;
5024  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
5025  {
5026  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
5027 
5028  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
5029  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
5030  {
5031  return false;
5032  }
5033  // They must be sorted by size ascending.
5034  if(suballocItem->size < lastSize)
5035  {
5036  return false;
5037  }
5038 
5039  lastSize = suballocItem->size;
5040  }
5041 
5042  // Check if totals match calculacted values.
5043  if(!ValidateFreeSuballocationList() ||
5044  (calculatedOffset != m_Size) ||
5045  (calculatedSumFreeSize != m_SumFreeSize) ||
5046  (calculatedFreeCount != m_FreeCount))
5047  {
5048  return false;
5049  }
5050 
5051  return true;
5052 }
5053 
5054 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
5055 {
5056  if(!m_FreeSuballocationsBySize.empty())
5057  {
5058  return m_FreeSuballocationsBySize.back()->size;
5059  }
5060  else
5061  {
5062  return 0;
5063  }
5064 }
5065 
5066 bool VmaBlockMetadata::IsEmpty() const
5067 {
5068  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
5069 }
5070 
5071 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
5072 {
5073  outInfo.blockCount = 1;
5074 
5075  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5076  outInfo.allocationCount = rangeCount - m_FreeCount;
5077  outInfo.unusedRangeCount = m_FreeCount;
5078 
5079  outInfo.unusedBytes = m_SumFreeSize;
5080  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
5081 
5082  outInfo.allocationSizeMin = UINT64_MAX;
5083  outInfo.allocationSizeMax = 0;
5084  outInfo.unusedRangeSizeMin = UINT64_MAX;
5085  outInfo.unusedRangeSizeMax = 0;
5086 
5087  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5088  suballocItem != m_Suballocations.cend();
5089  ++suballocItem)
5090  {
5091  const VmaSuballocation& suballoc = *suballocItem;
5092  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
5093  {
5094  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
5095  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
5096  }
5097  else
5098  {
5099  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
5100  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
5101  }
5102  }
5103 }
5104 
5105 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
5106 {
5107  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5108 
5109  inoutStats.size += m_Size;
5110  inoutStats.unusedSize += m_SumFreeSize;
5111  inoutStats.allocationCount += rangeCount - m_FreeCount;
5112  inoutStats.unusedRangeCount += m_FreeCount;
5113  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
5114 }
5115 
5116 #if VMA_STATS_STRING_ENABLED
5117 
5118 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
5119 {
5120  json.BeginObject();
5121 
5122  json.WriteString("TotalBytes");
5123  json.WriteNumber(m_Size);
5124 
5125  json.WriteString("UnusedBytes");
5126  json.WriteNumber(m_SumFreeSize);
5127 
5128  json.WriteString("Allocations");
5129  json.WriteNumber((uint64_t)m_Suballocations.size() - m_FreeCount);
5130 
5131  json.WriteString("UnusedRanges");
5132  json.WriteNumber(m_FreeCount);
5133 
5134  json.WriteString("Suballocations");
5135  json.BeginArray();
5136  size_t i = 0;
5137  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5138  suballocItem != m_Suballocations.cend();
5139  ++suballocItem, ++i)
5140  {
5141  json.BeginObject(true);
5142 
5143  json.WriteString("Type");
5144  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
5145 
5146  json.WriteString("Size");
5147  json.WriteNumber(suballocItem->size);
5148 
5149  json.WriteString("Offset");
5150  json.WriteNumber(suballocItem->offset);
5151 
5152  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
5153  {
5154  const void* pUserData = suballocItem->hAllocation->GetUserData();
5155  if(pUserData != VMA_NULL)
5156  {
5157  json.WriteString("UserData");
5158  if(suballocItem->hAllocation->IsUserDataString())
5159  {
5160  json.WriteString((const char*)pUserData);
5161  }
5162  else
5163  {
5164  json.BeginString();
5165  json.ContinueString_Pointer(pUserData);
5166  json.EndString();
5167  }
5168  }
5169  }
5170 
5171  json.EndObject();
5172  }
5173  json.EndArray();
5174 
5175  json.EndObject();
5176 }
5177 
5178 #endif // #if VMA_STATS_STRING_ENABLED
5179 
5180 /*
5181 How many suitable free suballocations to analyze before choosing best one.
5182 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
5183  be chosen.
5184 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
5185  suballocations will be analized and best one will be chosen.
5186 - Any other value is also acceptable.
5187 */
5188 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
5189 
5190 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
5191 {
5192  VMA_ASSERT(IsEmpty());
5193  pAllocationRequest->offset = 0;
5194  pAllocationRequest->sumFreeSize = m_SumFreeSize;
5195  pAllocationRequest->sumItemSize = 0;
5196  pAllocationRequest->item = m_Suballocations.begin();
5197  pAllocationRequest->itemsToMakeLostCount = 0;
5198 }
5199 
5200 bool VmaBlockMetadata::CreateAllocationRequest(
5201  uint32_t currentFrameIndex,
5202  uint32_t frameInUseCount,
5203  VkDeviceSize bufferImageGranularity,
5204  VkDeviceSize allocSize,
5205  VkDeviceSize allocAlignment,
5206  VmaSuballocationType allocType,
5207  bool canMakeOtherLost,
5208  VmaAllocationRequest* pAllocationRequest)
5209 {
5210  VMA_ASSERT(allocSize > 0);
5211  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5212  VMA_ASSERT(pAllocationRequest != VMA_NULL);
5213  VMA_HEAVY_ASSERT(Validate());
5214 
5215  // There is not enough total free space in this block to fullfill the request: Early return.
5216  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
5217  {
5218  return false;
5219  }
5220 
5221  // New algorithm, efficiently searching freeSuballocationsBySize.
5222  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
5223  if(freeSuballocCount > 0)
5224  {
5225  if(VMA_BEST_FIT)
5226  {
5227  // Find first free suballocation with size not less than allocSize.
5228  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5229  m_FreeSuballocationsBySize.data(),
5230  m_FreeSuballocationsBySize.data() + freeSuballocCount,
5231  allocSize,
5232  VmaSuballocationItemSizeLess());
5233  size_t index = it - m_FreeSuballocationsBySize.data();
5234  for(; index < freeSuballocCount; ++index)
5235  {
5236  if(CheckAllocation(
5237  currentFrameIndex,
5238  frameInUseCount,
5239  bufferImageGranularity,
5240  allocSize,
5241  allocAlignment,
5242  allocType,
5243  m_FreeSuballocationsBySize[index],
5244  false, // canMakeOtherLost
5245  &pAllocationRequest->offset,
5246  &pAllocationRequest->itemsToMakeLostCount,
5247  &pAllocationRequest->sumFreeSize,
5248  &pAllocationRequest->sumItemSize))
5249  {
5250  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5251  return true;
5252  }
5253  }
5254  }
5255  else
5256  {
5257  // Search staring from biggest suballocations.
5258  for(size_t index = freeSuballocCount; index--; )
5259  {
5260  if(CheckAllocation(
5261  currentFrameIndex,
5262  frameInUseCount,
5263  bufferImageGranularity,
5264  allocSize,
5265  allocAlignment,
5266  allocType,
5267  m_FreeSuballocationsBySize[index],
5268  false, // canMakeOtherLost
5269  &pAllocationRequest->offset,
5270  &pAllocationRequest->itemsToMakeLostCount,
5271  &pAllocationRequest->sumFreeSize,
5272  &pAllocationRequest->sumItemSize))
5273  {
5274  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5275  return true;
5276  }
5277  }
5278  }
5279  }
5280 
5281  if(canMakeOtherLost)
5282  {
5283  // Brute-force algorithm. TODO: Come up with something better.
5284 
5285  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
5286  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
5287 
5288  VmaAllocationRequest tmpAllocRequest = {};
5289  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
5290  suballocIt != m_Suballocations.end();
5291  ++suballocIt)
5292  {
5293  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
5294  suballocIt->hAllocation->CanBecomeLost())
5295  {
5296  if(CheckAllocation(
5297  currentFrameIndex,
5298  frameInUseCount,
5299  bufferImageGranularity,
5300  allocSize,
5301  allocAlignment,
5302  allocType,
5303  suballocIt,
5304  canMakeOtherLost,
5305  &tmpAllocRequest.offset,
5306  &tmpAllocRequest.itemsToMakeLostCount,
5307  &tmpAllocRequest.sumFreeSize,
5308  &tmpAllocRequest.sumItemSize))
5309  {
5310  tmpAllocRequest.item = suballocIt;
5311 
5312  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
5313  {
5314  *pAllocationRequest = tmpAllocRequest;
5315  }
5316  }
5317  }
5318  }
5319 
5320  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
5321  {
5322  return true;
5323  }
5324  }
5325 
5326  return false;
5327 }
5328 
5329 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
5330  uint32_t currentFrameIndex,
5331  uint32_t frameInUseCount,
5332  VmaAllocationRequest* pAllocationRequest)
5333 {
5334  while(pAllocationRequest->itemsToMakeLostCount > 0)
5335  {
5336  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
5337  {
5338  ++pAllocationRequest->item;
5339  }
5340  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5341  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
5342  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
5343  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5344  {
5345  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
5346  --pAllocationRequest->itemsToMakeLostCount;
5347  }
5348  else
5349  {
5350  return false;
5351  }
5352  }
5353 
5354  VMA_HEAVY_ASSERT(Validate());
5355  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5356  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
5357 
5358  return true;
5359 }
5360 
5361 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
5362 {
5363  uint32_t lostAllocationCount = 0;
5364  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
5365  it != m_Suballocations.end();
5366  ++it)
5367  {
5368  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
5369  it->hAllocation->CanBecomeLost() &&
5370  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5371  {
5372  it = FreeSuballocation(it);
5373  ++lostAllocationCount;
5374  }
5375  }
5376  return lostAllocationCount;
5377 }
5378 
5379 void VmaBlockMetadata::Alloc(
5380  const VmaAllocationRequest& request,
5381  VmaSuballocationType type,
5382  VkDeviceSize allocSize,
5383  VmaAllocation hAllocation)
5384 {
5385  VMA_ASSERT(request.item != m_Suballocations.end());
5386  VmaSuballocation& suballoc = *request.item;
5387  // Given suballocation is a free block.
5388  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5389  // Given offset is inside this suballocation.
5390  VMA_ASSERT(request.offset >= suballoc.offset);
5391  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
5392  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
5393  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
5394 
5395  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
5396  // it to become used.
5397  UnregisterFreeSuballocation(request.item);
5398 
5399  suballoc.offset = request.offset;
5400  suballoc.size = allocSize;
5401  suballoc.type = type;
5402  suballoc.hAllocation = hAllocation;
5403 
5404  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
5405  if(paddingEnd)
5406  {
5407  VmaSuballocation paddingSuballoc = {};
5408  paddingSuballoc.offset = request.offset + allocSize;
5409  paddingSuballoc.size = paddingEnd;
5410  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5411  VmaSuballocationList::iterator next = request.item;
5412  ++next;
5413  const VmaSuballocationList::iterator paddingEndItem =
5414  m_Suballocations.insert(next, paddingSuballoc);
5415  RegisterFreeSuballocation(paddingEndItem);
5416  }
5417 
5418  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
5419  if(paddingBegin)
5420  {
5421  VmaSuballocation paddingSuballoc = {};
5422  paddingSuballoc.offset = request.offset - paddingBegin;
5423  paddingSuballoc.size = paddingBegin;
5424  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5425  const VmaSuballocationList::iterator paddingBeginItem =
5426  m_Suballocations.insert(request.item, paddingSuballoc);
5427  RegisterFreeSuballocation(paddingBeginItem);
5428  }
5429 
5430  // Update totals.
5431  m_FreeCount = m_FreeCount - 1;
5432  if(paddingBegin > 0)
5433  {
5434  ++m_FreeCount;
5435  }
5436  if(paddingEnd > 0)
5437  {
5438  ++m_FreeCount;
5439  }
5440  m_SumFreeSize -= allocSize;
5441 }
5442 
5443 void VmaBlockMetadata::Free(const VmaAllocation allocation)
5444 {
5445  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5446  suballocItem != m_Suballocations.end();
5447  ++suballocItem)
5448  {
5449  VmaSuballocation& suballoc = *suballocItem;
5450  if(suballoc.hAllocation == allocation)
5451  {
5452  FreeSuballocation(suballocItem);
5453  VMA_HEAVY_ASSERT(Validate());
5454  return;
5455  }
5456  }
5457  VMA_ASSERT(0 && "Not found!");
5458 }
5459 
5460 void VmaBlockMetadata::FreeAtOffset(VkDeviceSize offset)
5461 {
5462  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5463  suballocItem != m_Suballocations.end();
5464  ++suballocItem)
5465  {
5466  VmaSuballocation& suballoc = *suballocItem;
5467  if(suballoc.offset == offset)
5468  {
5469  FreeSuballocation(suballocItem);
5470  return;
5471  }
5472  }
5473  VMA_ASSERT(0 && "Not found!");
5474 }
5475 
5476 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
5477 {
5478  VkDeviceSize lastSize = 0;
5479  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
5480  {
5481  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
5482 
5483  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
5484  {
5485  VMA_ASSERT(0);
5486  return false;
5487  }
5488  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5489  {
5490  VMA_ASSERT(0);
5491  return false;
5492  }
5493  if(it->size < lastSize)
5494  {
5495  VMA_ASSERT(0);
5496  return false;
5497  }
5498 
5499  lastSize = it->size;
5500  }
5501  return true;
5502 }
5503 
5504 bool VmaBlockMetadata::CheckAllocation(
5505  uint32_t currentFrameIndex,
5506  uint32_t frameInUseCount,
5507  VkDeviceSize bufferImageGranularity,
5508  VkDeviceSize allocSize,
5509  VkDeviceSize allocAlignment,
5510  VmaSuballocationType allocType,
5511  VmaSuballocationList::const_iterator suballocItem,
5512  bool canMakeOtherLost,
5513  VkDeviceSize* pOffset,
5514  size_t* itemsToMakeLostCount,
5515  VkDeviceSize* pSumFreeSize,
5516  VkDeviceSize* pSumItemSize) const
5517 {
5518  VMA_ASSERT(allocSize > 0);
5519  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5520  VMA_ASSERT(suballocItem != m_Suballocations.cend());
5521  VMA_ASSERT(pOffset != VMA_NULL);
5522 
5523  *itemsToMakeLostCount = 0;
5524  *pSumFreeSize = 0;
5525  *pSumItemSize = 0;
5526 
5527  if(canMakeOtherLost)
5528  {
5529  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5530  {
5531  *pSumFreeSize = suballocItem->size;
5532  }
5533  else
5534  {
5535  if(suballocItem->hAllocation->CanBecomeLost() &&
5536  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5537  {
5538  ++*itemsToMakeLostCount;
5539  *pSumItemSize = suballocItem->size;
5540  }
5541  else
5542  {
5543  return false;
5544  }
5545  }
5546 
5547  // Remaining size is too small for this request: Early return.
5548  if(m_Size - suballocItem->offset < allocSize)
5549  {
5550  return false;
5551  }
5552 
5553  // Start from offset equal to beginning of this suballocation.
5554  *pOffset = suballocItem->offset;
5555 
5556  // Apply VMA_DEBUG_MARGIN at the beginning.
5557  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5558  {
5559  *pOffset += VMA_DEBUG_MARGIN;
5560  }
5561 
5562  // Apply alignment.
5563  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5564  *pOffset = VmaAlignUp(*pOffset, alignment);
5565 
5566  // Check previous suballocations for BufferImageGranularity conflicts.
5567  // Make bigger alignment if necessary.
5568  if(bufferImageGranularity > 1)
5569  {
5570  bool bufferImageGranularityConflict = false;
5571  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5572  while(prevSuballocItem != m_Suballocations.cbegin())
5573  {
5574  --prevSuballocItem;
5575  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5576  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5577  {
5578  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5579  {
5580  bufferImageGranularityConflict = true;
5581  break;
5582  }
5583  }
5584  else
5585  // Already on previous page.
5586  break;
5587  }
5588  if(bufferImageGranularityConflict)
5589  {
5590  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5591  }
5592  }
5593 
5594  // Now that we have final *pOffset, check if we are past suballocItem.
5595  // If yes, return false - this function should be called for another suballocItem as starting point.
5596  if(*pOffset >= suballocItem->offset + suballocItem->size)
5597  {
5598  return false;
5599  }
5600 
5601  // Calculate padding at the beginning based on current offset.
5602  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
5603 
5604  // Calculate required margin at the end if this is not last suballocation.
5605  VmaSuballocationList::const_iterator next = suballocItem;
5606  ++next;
5607  const VkDeviceSize requiredEndMargin =
5608  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5609 
5610  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
5611  // Another early return check.
5612  if(suballocItem->offset + totalSize > m_Size)
5613  {
5614  return false;
5615  }
5616 
5617  // Advance lastSuballocItem until desired size is reached.
5618  // Update itemsToMakeLostCount.
5619  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
5620  if(totalSize > suballocItem->size)
5621  {
5622  VkDeviceSize remainingSize = totalSize - suballocItem->size;
5623  while(remainingSize > 0)
5624  {
5625  ++lastSuballocItem;
5626  if(lastSuballocItem == m_Suballocations.cend())
5627  {
5628  return false;
5629  }
5630  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5631  {
5632  *pSumFreeSize += lastSuballocItem->size;
5633  }
5634  else
5635  {
5636  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
5637  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
5638  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5639  {
5640  ++*itemsToMakeLostCount;
5641  *pSumItemSize += lastSuballocItem->size;
5642  }
5643  else
5644  {
5645  return false;
5646  }
5647  }
5648  remainingSize = (lastSuballocItem->size < remainingSize) ?
5649  remainingSize - lastSuballocItem->size : 0;
5650  }
5651  }
5652 
5653  // Check next suballocations for BufferImageGranularity conflicts.
5654  // If conflict exists, we must mark more allocations lost or fail.
5655  if(bufferImageGranularity > 1)
5656  {
5657  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
5658  ++nextSuballocItem;
5659  while(nextSuballocItem != m_Suballocations.cend())
5660  {
5661  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5662  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5663  {
5664  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5665  {
5666  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
5667  if(nextSuballoc.hAllocation->CanBecomeLost() &&
5668  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5669  {
5670  ++*itemsToMakeLostCount;
5671  }
5672  else
5673  {
5674  return false;
5675  }
5676  }
5677  }
5678  else
5679  {
5680  // Already on next page.
5681  break;
5682  }
5683  ++nextSuballocItem;
5684  }
5685  }
5686  }
5687  else
5688  {
5689  const VmaSuballocation& suballoc = *suballocItem;
5690  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5691 
5692  *pSumFreeSize = suballoc.size;
5693 
5694  // Size of this suballocation is too small for this request: Early return.
5695  if(suballoc.size < allocSize)
5696  {
5697  return false;
5698  }
5699 
5700  // Start from offset equal to beginning of this suballocation.
5701  *pOffset = suballoc.offset;
5702 
5703  // Apply VMA_DEBUG_MARGIN at the beginning.
5704  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5705  {
5706  *pOffset += VMA_DEBUG_MARGIN;
5707  }
5708 
5709  // Apply alignment.
5710  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5711  *pOffset = VmaAlignUp(*pOffset, alignment);
5712 
5713  // Check previous suballocations for BufferImageGranularity conflicts.
5714  // Make bigger alignment if necessary.
5715  if(bufferImageGranularity > 1)
5716  {
5717  bool bufferImageGranularityConflict = false;
5718  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5719  while(prevSuballocItem != m_Suballocations.cbegin())
5720  {
5721  --prevSuballocItem;
5722  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5723  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5724  {
5725  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5726  {
5727  bufferImageGranularityConflict = true;
5728  break;
5729  }
5730  }
5731  else
5732  // Already on previous page.
5733  break;
5734  }
5735  if(bufferImageGranularityConflict)
5736  {
5737  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5738  }
5739  }
5740 
5741  // Calculate padding at the beginning based on current offset.
5742  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
5743 
5744  // Calculate required margin at the end if this is not last suballocation.
5745  VmaSuballocationList::const_iterator next = suballocItem;
5746  ++next;
5747  const VkDeviceSize requiredEndMargin =
5748  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5749 
5750  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
5751  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
5752  {
5753  return false;
5754  }
5755 
5756  // Check next suballocations for BufferImageGranularity conflicts.
5757  // If conflict exists, allocation cannot be made here.
5758  if(bufferImageGranularity > 1)
5759  {
5760  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
5761  ++nextSuballocItem;
5762  while(nextSuballocItem != m_Suballocations.cend())
5763  {
5764  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5765  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5766  {
5767  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5768  {
5769  return false;
5770  }
5771  }
5772  else
5773  {
5774  // Already on next page.
5775  break;
5776  }
5777  ++nextSuballocItem;
5778  }
5779  }
5780  }
5781 
5782  // All tests passed: Success. pOffset is already filled.
5783  return true;
5784 }
5785 
5786 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
5787 {
5788  VMA_ASSERT(item != m_Suballocations.end());
5789  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5790 
5791  VmaSuballocationList::iterator nextItem = item;
5792  ++nextItem;
5793  VMA_ASSERT(nextItem != m_Suballocations.end());
5794  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
5795 
5796  item->size += nextItem->size;
5797  --m_FreeCount;
5798  m_Suballocations.erase(nextItem);
5799 }
5800 
5801 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
5802 {
5803  // Change this suballocation to be marked as free.
5804  VmaSuballocation& suballoc = *suballocItem;
5805  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5806  suballoc.hAllocation = VK_NULL_HANDLE;
5807 
5808  // Update totals.
5809  ++m_FreeCount;
5810  m_SumFreeSize += suballoc.size;
5811 
5812  // Merge with previous and/or next suballocation if it's also free.
5813  bool mergeWithNext = false;
5814  bool mergeWithPrev = false;
5815 
5816  VmaSuballocationList::iterator nextItem = suballocItem;
5817  ++nextItem;
5818  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
5819  {
5820  mergeWithNext = true;
5821  }
5822 
5823  VmaSuballocationList::iterator prevItem = suballocItem;
5824  if(suballocItem != m_Suballocations.begin())
5825  {
5826  --prevItem;
5827  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5828  {
5829  mergeWithPrev = true;
5830  }
5831  }
5832 
5833  if(mergeWithNext)
5834  {
5835  UnregisterFreeSuballocation(nextItem);
5836  MergeFreeWithNext(suballocItem);
5837  }
5838 
5839  if(mergeWithPrev)
5840  {
5841  UnregisterFreeSuballocation(prevItem);
5842  MergeFreeWithNext(prevItem);
5843  RegisterFreeSuballocation(prevItem);
5844  return prevItem;
5845  }
5846  else
5847  {
5848  RegisterFreeSuballocation(suballocItem);
5849  return suballocItem;
5850  }
5851 }
5852 
5853 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
5854 {
5855  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5856  VMA_ASSERT(item->size > 0);
5857 
5858  // You may want to enable this validation at the beginning or at the end of
5859  // this function, depending on what do you want to check.
5860  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5861 
5862  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5863  {
5864  if(m_FreeSuballocationsBySize.empty())
5865  {
5866  m_FreeSuballocationsBySize.push_back(item);
5867  }
5868  else
5869  {
5870  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
5871  }
5872  }
5873 
5874  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5875 }
5876 
5877 
5878 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
5879 {
5880  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5881  VMA_ASSERT(item->size > 0);
5882 
5883  // You may want to enable this validation at the beginning or at the end of
5884  // this function, depending on what do you want to check.
5885  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5886 
5887  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5888  {
5889  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5890  m_FreeSuballocationsBySize.data(),
5891  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
5892  item,
5893  VmaSuballocationItemSizeLess());
5894  for(size_t index = it - m_FreeSuballocationsBySize.data();
5895  index < m_FreeSuballocationsBySize.size();
5896  ++index)
5897  {
5898  if(m_FreeSuballocationsBySize[index] == item)
5899  {
5900  VmaVectorRemove(m_FreeSuballocationsBySize, index);
5901  return;
5902  }
5903  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
5904  }
5905  VMA_ASSERT(0 && "Not found.");
5906  }
5907 
5908  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5909 }
5910 
5912 // class VmaDeviceMemoryMapping
5913 
5914 VmaDeviceMemoryMapping::VmaDeviceMemoryMapping() :
5915  m_MapCount(0),
5916  m_pMappedData(VMA_NULL)
5917 {
5918 }
5919 
5920 VmaDeviceMemoryMapping::~VmaDeviceMemoryMapping()
5921 {
5922  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5923 }
5924 
5925 VkResult VmaDeviceMemoryMapping::Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count, void **ppData)
5926 {
5927  if(count == 0)
5928  {
5929  return VK_SUCCESS;
5930  }
5931 
5932  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
5933  if(m_MapCount != 0)
5934  {
5935  m_MapCount += count;
5936  VMA_ASSERT(m_pMappedData != VMA_NULL);
5937  if(ppData != VMA_NULL)
5938  {
5939  *ppData = m_pMappedData;
5940  }
5941  return VK_SUCCESS;
5942  }
5943  else
5944  {
5945  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
5946  hAllocator->m_hDevice,
5947  hMemory,
5948  0, // offset
5949  VK_WHOLE_SIZE,
5950  0, // flags
5951  &m_pMappedData);
5952  if(result == VK_SUCCESS)
5953  {
5954  if(ppData != VMA_NULL)
5955  {
5956  *ppData = m_pMappedData;
5957  }
5958  m_MapCount = count;
5959  }
5960  return result;
5961  }
5962 }
5963 
5964 void VmaDeviceMemoryMapping::Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count)
5965 {
5966  if(count == 0)
5967  {
5968  return;
5969  }
5970 
5971  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
5972  if(m_MapCount >= count)
5973  {
5974  m_MapCount -= count;
5975  if(m_MapCount == 0)
5976  {
5977  m_pMappedData = VMA_NULL;
5978  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, hMemory);
5979  }
5980  }
5981  else
5982  {
5983  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
5984  }
5985 }
5986 
5988 // class VmaDeviceMemoryBlock
5989 
5990 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
5991  m_MemoryTypeIndex(UINT32_MAX),
5992  m_hMemory(VK_NULL_HANDLE),
5993  m_Metadata(hAllocator)
5994 {
5995 }
5996 
5997 void VmaDeviceMemoryBlock::Init(
5998  uint32_t newMemoryTypeIndex,
5999  VkDeviceMemory newMemory,
6000  VkDeviceSize newSize)
6001 {
6002  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
6003 
6004  m_MemoryTypeIndex = newMemoryTypeIndex;
6005  m_hMemory = newMemory;
6006 
6007  m_Metadata.Init(newSize);
6008 }
6009 
6010 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
6011 {
6012  // This is the most important assert in the entire library.
6013  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
6014  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
6015 
6016  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
6017  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
6018  m_hMemory = VK_NULL_HANDLE;
6019 }
6020 
6021 bool VmaDeviceMemoryBlock::Validate() const
6022 {
6023  if((m_hMemory == VK_NULL_HANDLE) ||
6024  (m_Metadata.GetSize() == 0))
6025  {
6026  return false;
6027  }
6028 
6029  return m_Metadata.Validate();
6030 }
6031 
6032 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
6033 {
6034  return m_Mapping.Map(hAllocator, m_hMemory, count, ppData);
6035 }
6036 
6037 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
6038 {
6039  m_Mapping.Unmap(hAllocator, m_hMemory, count);
6040 }
6041 
6042 static void InitStatInfo(VmaStatInfo& outInfo)
6043 {
6044  memset(&outInfo, 0, sizeof(outInfo));
6045  outInfo.allocationSizeMin = UINT64_MAX;
6046  outInfo.unusedRangeSizeMin = UINT64_MAX;
6047 }
6048 
6049 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
6050 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
6051 {
6052  inoutInfo.blockCount += srcInfo.blockCount;
6053  inoutInfo.allocationCount += srcInfo.allocationCount;
6054  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
6055  inoutInfo.usedBytes += srcInfo.usedBytes;
6056  inoutInfo.unusedBytes += srcInfo.unusedBytes;
6057  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
6058  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
6059  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
6060  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
6061 }
6062 
6063 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
6064 {
6065  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
6066  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
6067  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
6068  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
6069 }
6070 
6071 VmaPool_T::VmaPool_T(
6072  VmaAllocator hAllocator,
6073  const VmaPoolCreateInfo& createInfo) :
6074  m_BlockVector(
6075  hAllocator,
6076  createInfo.memoryTypeIndex,
6077  createInfo.blockSize,
6078  createInfo.minBlockCount,
6079  createInfo.maxBlockCount,
6080  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
6081  createInfo.frameInUseCount,
6082  true) // isCustomPool
6083 {
6084 }
6085 
6086 VmaPool_T::~VmaPool_T()
6087 {
6088 }
6089 
6090 #if VMA_STATS_STRING_ENABLED
6091 
6092 #endif // #if VMA_STATS_STRING_ENABLED
6093 
6094 VmaBlockVector::VmaBlockVector(
6095  VmaAllocator hAllocator,
6096  uint32_t memoryTypeIndex,
6097  VkDeviceSize preferredBlockSize,
6098  size_t minBlockCount,
6099  size_t maxBlockCount,
6100  VkDeviceSize bufferImageGranularity,
6101  uint32_t frameInUseCount,
6102  bool isCustomPool) :
6103  m_hAllocator(hAllocator),
6104  m_MemoryTypeIndex(memoryTypeIndex),
6105  m_PreferredBlockSize(preferredBlockSize),
6106  m_MinBlockCount(minBlockCount),
6107  m_MaxBlockCount(maxBlockCount),
6108  m_BufferImageGranularity(bufferImageGranularity),
6109  m_FrameInUseCount(frameInUseCount),
6110  m_IsCustomPool(isCustomPool),
6111  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
6112  m_HasEmptyBlock(false),
6113  m_pDefragmentator(VMA_NULL)
6114 {
6115 }
6116 
6117 VmaBlockVector::~VmaBlockVector()
6118 {
6119  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
6120 
6121  for(size_t i = m_Blocks.size(); i--; )
6122  {
6123  m_Blocks[i]->Destroy(m_hAllocator);
6124  vma_delete(m_hAllocator, m_Blocks[i]);
6125  }
6126 }
6127 
6128 VkResult VmaBlockVector::CreateMinBlocks()
6129 {
6130  for(size_t i = 0; i < m_MinBlockCount; ++i)
6131  {
6132  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
6133  if(res != VK_SUCCESS)
6134  {
6135  return res;
6136  }
6137  }
6138  return VK_SUCCESS;
6139 }
6140 
6141 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
6142 {
6143  pStats->size = 0;
6144  pStats->unusedSize = 0;
6145  pStats->allocationCount = 0;
6146  pStats->unusedRangeCount = 0;
6147  pStats->unusedRangeSizeMax = 0;
6148 
6149  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6150 
6151  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6152  {
6153  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6154  VMA_ASSERT(pBlock);
6155  VMA_HEAVY_ASSERT(pBlock->Validate());
6156  pBlock->m_Metadata.AddPoolStats(*pStats);
6157  }
6158 }
6159 
6160 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
6161 
6162 VkResult VmaBlockVector::Allocate(
6163  VmaPool hCurrentPool,
6164  uint32_t currentFrameIndex,
6165  const VkMemoryRequirements& vkMemReq,
6166  const VmaAllocationCreateInfo& createInfo,
6167  VmaSuballocationType suballocType,
6168  VmaAllocation* pAllocation)
6169 {
6170  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
6171  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
6172 
6173  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6174 
6175  // 1. Search existing allocations. Try to allocate without making other allocations lost.
6176  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6177  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6178  {
6179  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6180  VMA_ASSERT(pCurrBlock);
6181  VmaAllocationRequest currRequest = {};
6182  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6183  currentFrameIndex,
6184  m_FrameInUseCount,
6185  m_BufferImageGranularity,
6186  vkMemReq.size,
6187  vkMemReq.alignment,
6188  suballocType,
6189  false, // canMakeOtherLost
6190  &currRequest))
6191  {
6192  // Allocate from pCurrBlock.
6193  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
6194 
6195  if(mapped)
6196  {
6197  VkResult res = pCurrBlock->Map(m_hAllocator, 1, VMA_NULL);
6198  if(res != VK_SUCCESS)
6199  {
6200  return res;
6201  }
6202  }
6203 
6204  // We no longer have an empty Allocation.
6205  if(pCurrBlock->m_Metadata.IsEmpty())
6206  {
6207  m_HasEmptyBlock = false;
6208  }
6209 
6210  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6211  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
6212  (*pAllocation)->InitBlockAllocation(
6213  hCurrentPool,
6214  pCurrBlock,
6215  currRequest.offset,
6216  vkMemReq.alignment,
6217  vkMemReq.size,
6218  suballocType,
6219  mapped,
6220  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6221  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
6222  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6223  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6224  return VK_SUCCESS;
6225  }
6226  }
6227 
6228  const bool canCreateNewBlock =
6229  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
6230  (m_Blocks.size() < m_MaxBlockCount);
6231 
6232  // 2. Try to create new block.
6233  if(canCreateNewBlock)
6234  {
6235  // Calculate optimal size for new block.
6236  VkDeviceSize newBlockSize = m_PreferredBlockSize;
6237  uint32_t newBlockSizeShift = 0;
6238  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
6239 
6240  // Allocating blocks of other sizes is allowed only in default pools.
6241  // In custom pools block size is fixed.
6242  if(m_IsCustomPool == false)
6243  {
6244  // Allocate 1/8, 1/4, 1/2 as first blocks.
6245  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
6246  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
6247  {
6248  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
6249  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= vkMemReq.size * 2)
6250  {
6251  newBlockSize = smallerNewBlockSize;
6252  ++newBlockSizeShift;
6253  }
6254  else
6255  {
6256  break;
6257  }
6258  }
6259  }
6260 
6261  size_t newBlockIndex = 0;
6262  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
6263  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
6264  if(m_IsCustomPool == false)
6265  {
6266  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
6267  {
6268  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
6269  if(smallerNewBlockSize >= vkMemReq.size)
6270  {
6271  newBlockSize = smallerNewBlockSize;
6272  ++newBlockSizeShift;
6273  res = CreateBlock(newBlockSize, &newBlockIndex);
6274  }
6275  else
6276  {
6277  break;
6278  }
6279  }
6280  }
6281 
6282  if(res == VK_SUCCESS)
6283  {
6284  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
6285  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= vkMemReq.size);
6286 
6287  if(mapped)
6288  {
6289  res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
6290  if(res != VK_SUCCESS)
6291  {
6292  return res;
6293  }
6294  }
6295 
6296  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
6297  VmaAllocationRequest allocRequest;
6298  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
6299  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6300  pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
6301  (*pAllocation)->InitBlockAllocation(
6302  hCurrentPool,
6303  pBlock,
6304  allocRequest.offset,
6305  vkMemReq.alignment,
6306  vkMemReq.size,
6307  suballocType,
6308  mapped,
6309  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6310  VMA_HEAVY_ASSERT(pBlock->Validate());
6311  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
6312  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6313  return VK_SUCCESS;
6314  }
6315  }
6316 
6317  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
6318 
6319  // 3. Try to allocate from existing blocks with making other allocations lost.
6320  if(canMakeOtherLost)
6321  {
6322  uint32_t tryIndex = 0;
6323  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
6324  {
6325  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
6326  VmaAllocationRequest bestRequest = {};
6327  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
6328 
6329  // 1. Search existing allocations.
6330  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6331  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6332  {
6333  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6334  VMA_ASSERT(pCurrBlock);
6335  VmaAllocationRequest currRequest = {};
6336  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6337  currentFrameIndex,
6338  m_FrameInUseCount,
6339  m_BufferImageGranularity,
6340  vkMemReq.size,
6341  vkMemReq.alignment,
6342  suballocType,
6343  canMakeOtherLost,
6344  &currRequest))
6345  {
6346  const VkDeviceSize currRequestCost = currRequest.CalcCost();
6347  if(pBestRequestBlock == VMA_NULL ||
6348  currRequestCost < bestRequestCost)
6349  {
6350  pBestRequestBlock = pCurrBlock;
6351  bestRequest = currRequest;
6352  bestRequestCost = currRequestCost;
6353 
6354  if(bestRequestCost == 0)
6355  {
6356  break;
6357  }
6358  }
6359  }
6360  }
6361 
6362  if(pBestRequestBlock != VMA_NULL)
6363  {
6364  if(mapped)
6365  {
6366  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
6367  if(res != VK_SUCCESS)
6368  {
6369  return res;
6370  }
6371  }
6372 
6373  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
6374  currentFrameIndex,
6375  m_FrameInUseCount,
6376  &bestRequest))
6377  {
6378  // We no longer have an empty Allocation.
6379  if(pBestRequestBlock->m_Metadata.IsEmpty())
6380  {
6381  m_HasEmptyBlock = false;
6382  }
6383  // Allocate from this pBlock.
6384  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6385  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
6386  (*pAllocation)->InitBlockAllocation(
6387  hCurrentPool,
6388  pBestRequestBlock,
6389  bestRequest.offset,
6390  vkMemReq.alignment,
6391  vkMemReq.size,
6392  suballocType,
6393  mapped,
6394  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6395  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
6396  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6397  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6398  return VK_SUCCESS;
6399  }
6400  // else: Some allocations must have been touched while we are here. Next try.
6401  }
6402  else
6403  {
6404  // Could not find place in any of the blocks - break outer loop.
6405  break;
6406  }
6407  }
6408  /* Maximum number of tries exceeded - a very unlike event when many other
6409  threads are simultaneously touching allocations making it impossible to make
6410  lost at the same time as we try to allocate. */
6411  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
6412  {
6413  return VK_ERROR_TOO_MANY_OBJECTS;
6414  }
6415  }
6416 
6417  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6418 }
6419 
6420 void VmaBlockVector::Free(
6421  VmaAllocation hAllocation)
6422 {
6423  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
6424 
6425  // Scope for lock.
6426  {
6427  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6428 
6429  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
6430 
6431  if(hAllocation->IsPersistentMap())
6432  {
6433  pBlock->m_Mapping.Unmap(m_hAllocator, pBlock->m_hMemory, 1);
6434  }
6435 
6436  pBlock->m_Metadata.Free(hAllocation);
6437  VMA_HEAVY_ASSERT(pBlock->Validate());
6438 
6439  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
6440 
6441  // pBlock became empty after this deallocation.
6442  if(pBlock->m_Metadata.IsEmpty())
6443  {
6444  // Already has empty Allocation. We don't want to have two, so delete this one.
6445  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
6446  {
6447  pBlockToDelete = pBlock;
6448  Remove(pBlock);
6449  }
6450  // We now have first empty Allocation.
6451  else
6452  {
6453  m_HasEmptyBlock = true;
6454  }
6455  }
6456  // pBlock didn't become empty, but we have another empty block - find and free that one.
6457  // (This is optional, heuristics.)
6458  else if(m_HasEmptyBlock)
6459  {
6460  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
6461  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
6462  {
6463  pBlockToDelete = pLastBlock;
6464  m_Blocks.pop_back();
6465  m_HasEmptyBlock = false;
6466  }
6467  }
6468 
6469  IncrementallySortBlocks();
6470  }
6471 
6472  // Destruction of a free Allocation. Deferred until this point, outside of mutex
6473  // lock, for performance reason.
6474  if(pBlockToDelete != VMA_NULL)
6475  {
6476  VMA_DEBUG_LOG(" Deleted empty allocation");
6477  pBlockToDelete->Destroy(m_hAllocator);
6478  vma_delete(m_hAllocator, pBlockToDelete);
6479  }
6480 }
6481 
6482 size_t VmaBlockVector::CalcMaxBlockSize() const
6483 {
6484  size_t result = 0;
6485  for(size_t i = m_Blocks.size(); i--; )
6486  {
6487  result = VMA_MAX((uint64_t)result, (uint64_t)m_Blocks[i]->m_Metadata.GetSize());
6488  if(result >= m_PreferredBlockSize)
6489  {
6490  break;
6491  }
6492  }
6493  return result;
6494 }
6495 
6496 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
6497 {
6498  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6499  {
6500  if(m_Blocks[blockIndex] == pBlock)
6501  {
6502  VmaVectorRemove(m_Blocks, blockIndex);
6503  return;
6504  }
6505  }
6506  VMA_ASSERT(0);
6507 }
6508 
6509 void VmaBlockVector::IncrementallySortBlocks()
6510 {
6511  // Bubble sort only until first swap.
6512  for(size_t i = 1; i < m_Blocks.size(); ++i)
6513  {
6514  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
6515  {
6516  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
6517  return;
6518  }
6519  }
6520 }
6521 
6522 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
6523 {
6524  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
6525  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
6526  allocInfo.allocationSize = blockSize;
6527  VkDeviceMemory mem = VK_NULL_HANDLE;
6528  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
6529  if(res < 0)
6530  {
6531  return res;
6532  }
6533 
6534  // New VkDeviceMemory successfully created.
6535 
6536  // Create new Allocation for it.
6537  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
6538  pBlock->Init(
6539  m_MemoryTypeIndex,
6540  mem,
6541  allocInfo.allocationSize);
6542 
6543  m_Blocks.push_back(pBlock);
6544  if(pNewBlockIndex != VMA_NULL)
6545  {
6546  *pNewBlockIndex = m_Blocks.size() - 1;
6547  }
6548 
6549  return VK_SUCCESS;
6550 }
6551 
6552 #if VMA_STATS_STRING_ENABLED
6553 
6554 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
6555 {
6556  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6557 
6558  json.BeginObject();
6559 
6560  if(m_IsCustomPool)
6561  {
6562  json.WriteString("MemoryTypeIndex");
6563  json.WriteNumber(m_MemoryTypeIndex);
6564 
6565  json.WriteString("BlockSize");
6566  json.WriteNumber(m_PreferredBlockSize);
6567 
6568  json.WriteString("BlockCount");
6569  json.BeginObject(true);
6570  if(m_MinBlockCount > 0)
6571  {
6572  json.WriteString("Min");
6573  json.WriteNumber((uint64_t)m_MinBlockCount);
6574  }
6575  if(m_MaxBlockCount < SIZE_MAX)
6576  {
6577  json.WriteString("Max");
6578  json.WriteNumber((uint64_t)m_MaxBlockCount);
6579  }
6580  json.WriteString("Cur");
6581  json.WriteNumber((uint64_t)m_Blocks.size());
6582  json.EndObject();
6583 
6584  if(m_FrameInUseCount > 0)
6585  {
6586  json.WriteString("FrameInUseCount");
6587  json.WriteNumber(m_FrameInUseCount);
6588  }
6589  }
6590  else
6591  {
6592  json.WriteString("PreferredBlockSize");
6593  json.WriteNumber(m_PreferredBlockSize);
6594  }
6595 
6596  json.WriteString("Blocks");
6597  json.BeginArray();
6598  for(size_t i = 0; i < m_Blocks.size(); ++i)
6599  {
6600  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
6601  }
6602  json.EndArray();
6603 
6604  json.EndObject();
6605 }
6606 
6607 #endif // #if VMA_STATS_STRING_ENABLED
6608 
6609 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
6610  VmaAllocator hAllocator,
6611  uint32_t currentFrameIndex)
6612 {
6613  if(m_pDefragmentator == VMA_NULL)
6614  {
6615  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
6616  hAllocator,
6617  this,
6618  currentFrameIndex);
6619  }
6620 
6621  return m_pDefragmentator;
6622 }
6623 
6624 VkResult VmaBlockVector::Defragment(
6625  VmaDefragmentationStats* pDefragmentationStats,
6626  VkDeviceSize& maxBytesToMove,
6627  uint32_t& maxAllocationsToMove)
6628 {
6629  if(m_pDefragmentator == VMA_NULL)
6630  {
6631  return VK_SUCCESS;
6632  }
6633 
6634  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6635 
6636  // Defragment.
6637  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
6638 
6639  // Accumulate statistics.
6640  if(pDefragmentationStats != VMA_NULL)
6641  {
6642  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
6643  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
6644  pDefragmentationStats->bytesMoved += bytesMoved;
6645  pDefragmentationStats->allocationsMoved += allocationsMoved;
6646  VMA_ASSERT(bytesMoved <= maxBytesToMove);
6647  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
6648  maxBytesToMove -= bytesMoved;
6649  maxAllocationsToMove -= allocationsMoved;
6650  }
6651 
6652  // Free empty blocks.
6653  m_HasEmptyBlock = false;
6654  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
6655  {
6656  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
6657  if(pBlock->m_Metadata.IsEmpty())
6658  {
6659  if(m_Blocks.size() > m_MinBlockCount)
6660  {
6661  if(pDefragmentationStats != VMA_NULL)
6662  {
6663  ++pDefragmentationStats->deviceMemoryBlocksFreed;
6664  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
6665  }
6666 
6667  VmaVectorRemove(m_Blocks, blockIndex);
6668  pBlock->Destroy(m_hAllocator);
6669  vma_delete(m_hAllocator, pBlock);
6670  }
6671  else
6672  {
6673  m_HasEmptyBlock = true;
6674  }
6675  }
6676  }
6677 
6678  return result;
6679 }
6680 
6681 void VmaBlockVector::DestroyDefragmentator()
6682 {
6683  if(m_pDefragmentator != VMA_NULL)
6684  {
6685  vma_delete(m_hAllocator, m_pDefragmentator);
6686  m_pDefragmentator = VMA_NULL;
6687  }
6688 }
6689 
6690 void VmaBlockVector::MakePoolAllocationsLost(
6691  uint32_t currentFrameIndex,
6692  size_t* pLostAllocationCount)
6693 {
6694  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6695  size_t lostAllocationCount = 0;
6696  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6697  {
6698  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6699  VMA_ASSERT(pBlock);
6700  lostAllocationCount += pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
6701  }
6702  if(pLostAllocationCount != VMA_NULL)
6703  {
6704  *pLostAllocationCount = lostAllocationCount;
6705  }
6706 }
6707 
6708 void VmaBlockVector::AddStats(VmaStats* pStats)
6709 {
6710  const uint32_t memTypeIndex = m_MemoryTypeIndex;
6711  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
6712 
6713  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6714 
6715  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6716  {
6717  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6718  VMA_ASSERT(pBlock);
6719  VMA_HEAVY_ASSERT(pBlock->Validate());
6720  VmaStatInfo allocationStatInfo;
6721  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
6722  VmaAddStatInfo(pStats->total, allocationStatInfo);
6723  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
6724  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
6725  }
6726 }
6727 
6729 // VmaDefragmentator members definition
6730 
6731 VmaDefragmentator::VmaDefragmentator(
6732  VmaAllocator hAllocator,
6733  VmaBlockVector* pBlockVector,
6734  uint32_t currentFrameIndex) :
6735  m_hAllocator(hAllocator),
6736  m_pBlockVector(pBlockVector),
6737  m_CurrentFrameIndex(currentFrameIndex),
6738  m_BytesMoved(0),
6739  m_AllocationsMoved(0),
6740  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
6741  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
6742 {
6743 }
6744 
6745 VmaDefragmentator::~VmaDefragmentator()
6746 {
6747  for(size_t i = m_Blocks.size(); i--; )
6748  {
6749  vma_delete(m_hAllocator, m_Blocks[i]);
6750  }
6751 }
6752 
6753 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
6754 {
6755  AllocationInfo allocInfo;
6756  allocInfo.m_hAllocation = hAlloc;
6757  allocInfo.m_pChanged = pChanged;
6758  m_Allocations.push_back(allocInfo);
6759 }
6760 
6761 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
6762 {
6763  // It has already been mapped for defragmentation.
6764  if(m_pMappedDataForDefragmentation)
6765  {
6766  *ppMappedData = m_pMappedDataForDefragmentation;
6767  return VK_SUCCESS;
6768  }
6769 
6770  // It is originally mapped.
6771  if(m_pBlock->m_Mapping.GetMappedData())
6772  {
6773  *ppMappedData = m_pBlock->m_Mapping.GetMappedData();
6774  return VK_SUCCESS;
6775  }
6776 
6777  // Map on first usage.
6778  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
6779  *ppMappedData = m_pMappedDataForDefragmentation;
6780  return res;
6781 }
6782 
6783 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
6784 {
6785  if(m_pMappedDataForDefragmentation != VMA_NULL)
6786  {
6787  m_pBlock->Unmap(hAllocator, 1);
6788  }
6789 }
6790 
6791 VkResult VmaDefragmentator::DefragmentRound(
6792  VkDeviceSize maxBytesToMove,
6793  uint32_t maxAllocationsToMove)
6794 {
6795  if(m_Blocks.empty())
6796  {
6797  return VK_SUCCESS;
6798  }
6799 
6800  size_t srcBlockIndex = m_Blocks.size() - 1;
6801  size_t srcAllocIndex = SIZE_MAX;
6802  for(;;)
6803  {
6804  // 1. Find next allocation to move.
6805  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
6806  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
6807  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
6808  {
6809  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
6810  {
6811  // Finished: no more allocations to process.
6812  if(srcBlockIndex == 0)
6813  {
6814  return VK_SUCCESS;
6815  }
6816  else
6817  {
6818  --srcBlockIndex;
6819  srcAllocIndex = SIZE_MAX;
6820  }
6821  }
6822  else
6823  {
6824  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
6825  }
6826  }
6827 
6828  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
6829  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
6830 
6831  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
6832  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
6833  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
6834  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
6835 
6836  // 2. Try to find new place for this allocation in preceding or current block.
6837  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
6838  {
6839  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
6840  VmaAllocationRequest dstAllocRequest;
6841  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
6842  m_CurrentFrameIndex,
6843  m_pBlockVector->GetFrameInUseCount(),
6844  m_pBlockVector->GetBufferImageGranularity(),
6845  size,
6846  alignment,
6847  suballocType,
6848  false, // canMakeOtherLost
6849  &dstAllocRequest) &&
6850  MoveMakesSense(
6851  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
6852  {
6853  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
6854 
6855  // Reached limit on number of allocations or bytes to move.
6856  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
6857  (m_BytesMoved + size > maxBytesToMove))
6858  {
6859  return VK_INCOMPLETE;
6860  }
6861 
6862  void* pDstMappedData = VMA_NULL;
6863  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
6864  if(res != VK_SUCCESS)
6865  {
6866  return res;
6867  }
6868 
6869  void* pSrcMappedData = VMA_NULL;
6870  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
6871  if(res != VK_SUCCESS)
6872  {
6873  return res;
6874  }
6875 
6876  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
6877  memcpy(
6878  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
6879  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
6880  static_cast<size_t>(size));
6881 
6882  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
6883  pSrcBlockInfo->m_pBlock->m_Metadata.FreeAtOffset(srcOffset);
6884 
6885  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
6886 
6887  if(allocInfo.m_pChanged != VMA_NULL)
6888  {
6889  *allocInfo.m_pChanged = VK_TRUE;
6890  }
6891 
6892  ++m_AllocationsMoved;
6893  m_BytesMoved += size;
6894 
6895  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
6896 
6897  break;
6898  }
6899  }
6900 
6901  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
6902 
6903  if(srcAllocIndex > 0)
6904  {
6905  --srcAllocIndex;
6906  }
6907  else
6908  {
6909  if(srcBlockIndex > 0)
6910  {
6911  --srcBlockIndex;
6912  srcAllocIndex = SIZE_MAX;
6913  }
6914  else
6915  {
6916  return VK_SUCCESS;
6917  }
6918  }
6919  }
6920 }
6921 
6922 VkResult VmaDefragmentator::Defragment(
6923  VkDeviceSize maxBytesToMove,
6924  uint32_t maxAllocationsToMove)
6925 {
6926  if(m_Allocations.empty())
6927  {
6928  return VK_SUCCESS;
6929  }
6930 
6931  // Create block info for each block.
6932  const size_t blockCount = m_pBlockVector->m_Blocks.size();
6933  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6934  {
6935  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
6936  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
6937  m_Blocks.push_back(pBlockInfo);
6938  }
6939 
6940  // Sort them by m_pBlock pointer value.
6941  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
6942 
6943  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
6944  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
6945  {
6946  AllocationInfo& allocInfo = m_Allocations[blockIndex];
6947  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
6948  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
6949  {
6950  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
6951  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
6952  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
6953  {
6954  (*it)->m_Allocations.push_back(allocInfo);
6955  }
6956  else
6957  {
6958  VMA_ASSERT(0);
6959  }
6960  }
6961  }
6962  m_Allocations.clear();
6963 
6964  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6965  {
6966  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
6967  pBlockInfo->CalcHasNonMovableAllocations();
6968  pBlockInfo->SortAllocationsBySizeDescecnding();
6969  }
6970 
6971  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
6972  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
6973 
6974  // Execute defragmentation rounds (the main part).
6975  VkResult result = VK_SUCCESS;
6976  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
6977  {
6978  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
6979  }
6980 
6981  // Unmap blocks that were mapped for defragmentation.
6982  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6983  {
6984  m_Blocks[blockIndex]->Unmap(m_hAllocator);
6985  }
6986 
6987  return result;
6988 }
6989 
6990 bool VmaDefragmentator::MoveMakesSense(
6991  size_t dstBlockIndex, VkDeviceSize dstOffset,
6992  size_t srcBlockIndex, VkDeviceSize srcOffset)
6993 {
6994  if(dstBlockIndex < srcBlockIndex)
6995  {
6996  return true;
6997  }
6998  if(dstBlockIndex > srcBlockIndex)
6999  {
7000  return false;
7001  }
7002  if(dstOffset < srcOffset)
7003  {
7004  return true;
7005  }
7006  return false;
7007 }
7008 
7010 // VmaAllocator_T
7011 
7012 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
7013  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
7014  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
7015  m_hDevice(pCreateInfo->device),
7016  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
7017  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
7018  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
7019  m_PreferredLargeHeapBlockSize(0),
7020  m_PhysicalDevice(pCreateInfo->physicalDevice),
7021  m_CurrentFrameIndex(0),
7022  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
7023 {
7024  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
7025 
7026  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
7027  memset(&m_MemProps, 0, sizeof(m_MemProps));
7028  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
7029 
7030  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
7031  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
7032 
7033  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7034  {
7035  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
7036  }
7037 
7038  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
7039  {
7040  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
7041  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
7042  }
7043 
7044  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
7045 
7046  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
7047  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
7048 
7049  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
7050  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
7051 
7052  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
7053  {
7054  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
7055  {
7056  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
7057  if(limit != VK_WHOLE_SIZE)
7058  {
7059  m_HeapSizeLimit[heapIndex] = limit;
7060  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
7061  {
7062  m_MemProps.memoryHeaps[heapIndex].size = limit;
7063  }
7064  }
7065  }
7066  }
7067 
7068  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7069  {
7070  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
7071 
7072  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
7073  this,
7074  memTypeIndex,
7075  preferredBlockSize,
7076  0,
7077  SIZE_MAX,
7078  GetBufferImageGranularity(),
7079  pCreateInfo->frameInUseCount,
7080  false); // isCustomPool
7081  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
7082  // becase minBlockCount is 0.
7083  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
7084  }
7085 }
7086 
7087 VmaAllocator_T::~VmaAllocator_T()
7088 {
7089  VMA_ASSERT(m_Pools.empty());
7090 
7091  for(size_t i = GetMemoryTypeCount(); i--; )
7092  {
7093  vma_delete(this, m_pDedicatedAllocations[i]);
7094  vma_delete(this, m_pBlockVectors[i]);
7095  }
7096 }
7097 
7098 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
7099 {
7100 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7101  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
7102  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
7103  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
7104  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
7105  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
7106  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
7107  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
7108  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
7109  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
7110  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
7111  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
7112  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
7113  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
7114  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
7115  if(m_UseKhrDedicatedAllocation)
7116  {
7117  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
7118  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
7119  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
7120  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
7121  }
7122 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7123 
7124 #define VMA_COPY_IF_NOT_NULL(funcName) \
7125  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
7126 
7127  if(pVulkanFunctions != VMA_NULL)
7128  {
7129  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
7130  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
7131  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
7132  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
7133  VMA_COPY_IF_NOT_NULL(vkMapMemory);
7134  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
7135  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
7136  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
7137  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
7138  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
7139  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
7140  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
7141  VMA_COPY_IF_NOT_NULL(vkCreateImage);
7142  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
7143  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
7144  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
7145  }
7146 
7147 #undef VMA_COPY_IF_NOT_NULL
7148 
7149  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
7150  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
7151  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
7152  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
7153  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
7154  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
7155  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
7156  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
7157  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
7158  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
7159  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
7160  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
7161  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
7162  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
7163  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
7164  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
7165  if(m_UseKhrDedicatedAllocation)
7166  {
7167  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
7168  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
7169  }
7170 }
7171 
7172 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
7173 {
7174  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7175  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
7176  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
7177  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
7178 }
7179 
7180 VkResult VmaAllocator_T::AllocateMemoryOfType(
7181  const VkMemoryRequirements& vkMemReq,
7182  bool dedicatedAllocation,
7183  VkBuffer dedicatedBuffer,
7184  VkImage dedicatedImage,
7185  const VmaAllocationCreateInfo& createInfo,
7186  uint32_t memTypeIndex,
7187  VmaSuballocationType suballocType,
7188  VmaAllocation* pAllocation)
7189 {
7190  VMA_ASSERT(pAllocation != VMA_NULL);
7191  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
7192 
7193  VmaAllocationCreateInfo finalCreateInfo = createInfo;
7194 
7195  // If memory type is not HOST_VISIBLE, disable MAPPED.
7196  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7197  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
7198  {
7199  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
7200  }
7201 
7202  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
7203  VMA_ASSERT(blockVector);
7204 
7205  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
7206  bool preferDedicatedMemory =
7207  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
7208  dedicatedAllocation ||
7209  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
7210  vkMemReq.size > preferredBlockSize / 2;
7211 
7212  if(preferDedicatedMemory &&
7213  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
7214  finalCreateInfo.pool == VK_NULL_HANDLE)
7215  {
7217  }
7218 
7219  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
7220  {
7221  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7222  {
7223  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7224  }
7225  else
7226  {
7227  return AllocateDedicatedMemory(
7228  vkMemReq.size,
7229  suballocType,
7230  memTypeIndex,
7231  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
7232  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
7233  finalCreateInfo.pUserData,
7234  dedicatedBuffer,
7235  dedicatedImage,
7236  pAllocation);
7237  }
7238  }
7239  else
7240  {
7241  VkResult res = blockVector->Allocate(
7242  VK_NULL_HANDLE, // hCurrentPool
7243  m_CurrentFrameIndex.load(),
7244  vkMemReq,
7245  finalCreateInfo,
7246  suballocType,
7247  pAllocation);
7248  if(res == VK_SUCCESS)
7249  {
7250  return res;
7251  }
7252 
7253  // 5. Try dedicated memory.
7254  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7255  {
7256  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7257  }
7258  else
7259  {
7260  res = AllocateDedicatedMemory(
7261  vkMemReq.size,
7262  suballocType,
7263  memTypeIndex,
7264  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
7265  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
7266  finalCreateInfo.pUserData,
7267  dedicatedBuffer,
7268  dedicatedImage,
7269  pAllocation);
7270  if(res == VK_SUCCESS)
7271  {
7272  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
7273  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
7274  return VK_SUCCESS;
7275  }
7276  else
7277  {
7278  // Everything failed: Return error code.
7279  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7280  return res;
7281  }
7282  }
7283  }
7284 }
7285 
7286 VkResult VmaAllocator_T::AllocateDedicatedMemory(
7287  VkDeviceSize size,
7288  VmaSuballocationType suballocType,
7289  uint32_t memTypeIndex,
7290  bool map,
7291  bool isUserDataString,
7292  void* pUserData,
7293  VkBuffer dedicatedBuffer,
7294  VkImage dedicatedImage,
7295  VmaAllocation* pAllocation)
7296 {
7297  VMA_ASSERT(pAllocation);
7298 
7299  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
7300  allocInfo.memoryTypeIndex = memTypeIndex;
7301  allocInfo.allocationSize = size;
7302 
7303  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
7304  if(m_UseKhrDedicatedAllocation)
7305  {
7306  if(dedicatedBuffer != VK_NULL_HANDLE)
7307  {
7308  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
7309  dedicatedAllocInfo.buffer = dedicatedBuffer;
7310  allocInfo.pNext = &dedicatedAllocInfo;
7311  }
7312  else if(dedicatedImage != VK_NULL_HANDLE)
7313  {
7314  dedicatedAllocInfo.image = dedicatedImage;
7315  allocInfo.pNext = &dedicatedAllocInfo;
7316  }
7317  }
7318 
7319  // Allocate VkDeviceMemory.
7320  VkDeviceMemory hMemory = VK_NULL_HANDLE;
7321  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
7322  if(res < 0)
7323  {
7324  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7325  return res;
7326  }
7327 
7328  void* pMappedData = VMA_NULL;
7329  if(map)
7330  {
7331  res = (*m_VulkanFunctions.vkMapMemory)(
7332  m_hDevice,
7333  hMemory,
7334  0,
7335  VK_WHOLE_SIZE,
7336  0,
7337  &pMappedData);
7338  if(res < 0)
7339  {
7340  VMA_DEBUG_LOG(" vkMapMemory FAILED");
7341  FreeVulkanMemory(memTypeIndex, size, hMemory);
7342  return res;
7343  }
7344  }
7345 
7346  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
7347  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
7348  (*pAllocation)->SetUserData(this, pUserData);
7349 
7350  // Register it in m_pDedicatedAllocations.
7351  {
7352  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7353  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
7354  VMA_ASSERT(pDedicatedAllocations);
7355  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
7356  }
7357 
7358  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
7359 
7360  return VK_SUCCESS;
7361 }
7362 
7363 void VmaAllocator_T::GetBufferMemoryRequirements(
7364  VkBuffer hBuffer,
7365  VkMemoryRequirements& memReq,
7366  bool& requiresDedicatedAllocation,
7367  bool& prefersDedicatedAllocation) const
7368 {
7369  if(m_UseKhrDedicatedAllocation)
7370  {
7371  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
7372  memReqInfo.buffer = hBuffer;
7373 
7374  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7375 
7376  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7377  memReq2.pNext = &memDedicatedReq;
7378 
7379  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7380 
7381  memReq = memReq2.memoryRequirements;
7382  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7383  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7384  }
7385  else
7386  {
7387  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
7388  requiresDedicatedAllocation = false;
7389  prefersDedicatedAllocation = false;
7390  }
7391 }
7392 
7393 void VmaAllocator_T::GetImageMemoryRequirements(
7394  VkImage hImage,
7395  VkMemoryRequirements& memReq,
7396  bool& requiresDedicatedAllocation,
7397  bool& prefersDedicatedAllocation) const
7398 {
7399  if(m_UseKhrDedicatedAllocation)
7400  {
7401  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
7402  memReqInfo.image = hImage;
7403 
7404  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7405 
7406  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7407  memReq2.pNext = &memDedicatedReq;
7408 
7409  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7410 
7411  memReq = memReq2.memoryRequirements;
7412  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7413  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7414  }
7415  else
7416  {
7417  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
7418  requiresDedicatedAllocation = false;
7419  prefersDedicatedAllocation = false;
7420  }
7421 }
7422 
7423 VkResult VmaAllocator_T::AllocateMemory(
7424  const VkMemoryRequirements& vkMemReq,
7425  bool requiresDedicatedAllocation,
7426  bool prefersDedicatedAllocation,
7427  VkBuffer dedicatedBuffer,
7428  VkImage dedicatedImage,
7429  const VmaAllocationCreateInfo& createInfo,
7430  VmaSuballocationType suballocType,
7431  VmaAllocation* pAllocation)
7432 {
7433  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
7434  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7435  {
7436  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
7437  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7438  }
7439  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7441  {
7442  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
7443  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7444  }
7445  if(requiresDedicatedAllocation)
7446  {
7447  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7448  {
7449  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
7450  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7451  }
7452  if(createInfo.pool != VK_NULL_HANDLE)
7453  {
7454  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
7455  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7456  }
7457  }
7458  if((createInfo.pool != VK_NULL_HANDLE) &&
7459  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
7460  {
7461  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
7462  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7463  }
7464 
7465  if(createInfo.pool != VK_NULL_HANDLE)
7466  {
7467  return createInfo.pool->m_BlockVector.Allocate(
7468  createInfo.pool,
7469  m_CurrentFrameIndex.load(),
7470  vkMemReq,
7471  createInfo,
7472  suballocType,
7473  pAllocation);
7474  }
7475  else
7476  {
7477  // Bit mask of memory Vulkan types acceptable for this allocation.
7478  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
7479  uint32_t memTypeIndex = UINT32_MAX;
7480  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7481  if(res == VK_SUCCESS)
7482  {
7483  res = AllocateMemoryOfType(
7484  vkMemReq,
7485  requiresDedicatedAllocation || prefersDedicatedAllocation,
7486  dedicatedBuffer,
7487  dedicatedImage,
7488  createInfo,
7489  memTypeIndex,
7490  suballocType,
7491  pAllocation);
7492  // Succeeded on first try.
7493  if(res == VK_SUCCESS)
7494  {
7495  return res;
7496  }
7497  // Allocation from this memory type failed. Try other compatible memory types.
7498  else
7499  {
7500  for(;;)
7501  {
7502  // Remove old memTypeIndex from list of possibilities.
7503  memoryTypeBits &= ~(1u << memTypeIndex);
7504  // Find alternative memTypeIndex.
7505  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7506  if(res == VK_SUCCESS)
7507  {
7508  res = AllocateMemoryOfType(
7509  vkMemReq,
7510  requiresDedicatedAllocation || prefersDedicatedAllocation,
7511  dedicatedBuffer,
7512  dedicatedImage,
7513  createInfo,
7514  memTypeIndex,
7515  suballocType,
7516  pAllocation);
7517  // Allocation from this alternative memory type succeeded.
7518  if(res == VK_SUCCESS)
7519  {
7520  return res;
7521  }
7522  // else: Allocation from this memory type failed. Try next one - next loop iteration.
7523  }
7524  // No other matching memory type index could be found.
7525  else
7526  {
7527  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
7528  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7529  }
7530  }
7531  }
7532  }
7533  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
7534  else
7535  return res;
7536  }
7537 }
7538 
7539 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
7540 {
7541  VMA_ASSERT(allocation);
7542 
7543  if(allocation->CanBecomeLost() == false ||
7544  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
7545  {
7546  switch(allocation->GetType())
7547  {
7548  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7549  {
7550  VmaBlockVector* pBlockVector = VMA_NULL;
7551  VmaPool hPool = allocation->GetPool();
7552  if(hPool != VK_NULL_HANDLE)
7553  {
7554  pBlockVector = &hPool->m_BlockVector;
7555  }
7556  else
7557  {
7558  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
7559  pBlockVector = m_pBlockVectors[memTypeIndex];
7560  }
7561  pBlockVector->Free(allocation);
7562  }
7563  break;
7564  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7565  FreeDedicatedMemory(allocation);
7566  break;
7567  default:
7568  VMA_ASSERT(0);
7569  }
7570  }
7571 
7572  allocation->SetUserData(this, VMA_NULL);
7573  vma_delete(this, allocation);
7574 }
7575 
7576 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
7577 {
7578  // Initialize.
7579  InitStatInfo(pStats->total);
7580  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
7581  InitStatInfo(pStats->memoryType[i]);
7582  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7583  InitStatInfo(pStats->memoryHeap[i]);
7584 
7585  // Process default pools.
7586  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7587  {
7588  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
7589  VMA_ASSERT(pBlockVector);
7590  pBlockVector->AddStats(pStats);
7591  }
7592 
7593  // Process custom pools.
7594  {
7595  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7596  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
7597  {
7598  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
7599  }
7600  }
7601 
7602  // Process dedicated allocations.
7603  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7604  {
7605  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7606  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7607  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
7608  VMA_ASSERT(pDedicatedAllocVector);
7609  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
7610  {
7611  VmaStatInfo allocationStatInfo;
7612  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
7613  VmaAddStatInfo(pStats->total, allocationStatInfo);
7614  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
7615  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
7616  }
7617  }
7618 
7619  // Postprocess.
7620  VmaPostprocessCalcStatInfo(pStats->total);
7621  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
7622  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
7623  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
7624  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
7625 }
7626 
7627 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
7628 
7629 VkResult VmaAllocator_T::Defragment(
7630  VmaAllocation* pAllocations,
7631  size_t allocationCount,
7632  VkBool32* pAllocationsChanged,
7633  const VmaDefragmentationInfo* pDefragmentationInfo,
7634  VmaDefragmentationStats* pDefragmentationStats)
7635 {
7636  if(pAllocationsChanged != VMA_NULL)
7637  {
7638  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
7639  }
7640  if(pDefragmentationStats != VMA_NULL)
7641  {
7642  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
7643  }
7644 
7645  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
7646 
7647  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
7648 
7649  const size_t poolCount = m_Pools.size();
7650 
7651  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
7652  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
7653  {
7654  VmaAllocation hAlloc = pAllocations[allocIndex];
7655  VMA_ASSERT(hAlloc);
7656  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
7657  // DedicatedAlloc cannot be defragmented.
7658  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
7659  // Only HOST_VISIBLE memory types can be defragmented.
7660  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
7661  // Lost allocation cannot be defragmented.
7662  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
7663  {
7664  VmaBlockVector* pAllocBlockVector = VMA_NULL;
7665 
7666  const VmaPool hAllocPool = hAlloc->GetPool();
7667  // This allocation belongs to custom pool.
7668  if(hAllocPool != VK_NULL_HANDLE)
7669  {
7670  pAllocBlockVector = &hAllocPool->GetBlockVector();
7671  }
7672  // This allocation belongs to general pool.
7673  else
7674  {
7675  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
7676  }
7677 
7678  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
7679 
7680  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
7681  &pAllocationsChanged[allocIndex] : VMA_NULL;
7682  pDefragmentator->AddAllocation(hAlloc, pChanged);
7683  }
7684  }
7685 
7686  VkResult result = VK_SUCCESS;
7687 
7688  // ======== Main processing.
7689 
7690  VkDeviceSize maxBytesToMove = SIZE_MAX;
7691  uint32_t maxAllocationsToMove = UINT32_MAX;
7692  if(pDefragmentationInfo != VMA_NULL)
7693  {
7694  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
7695  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
7696  }
7697 
7698  // Process standard memory.
7699  for(uint32_t memTypeIndex = 0;
7700  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
7701  ++memTypeIndex)
7702  {
7703  // Only HOST_VISIBLE memory types can be defragmented.
7704  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7705  {
7706  result = m_pBlockVectors[memTypeIndex]->Defragment(
7707  pDefragmentationStats,
7708  maxBytesToMove,
7709  maxAllocationsToMove);
7710  }
7711  }
7712 
7713  // Process custom pools.
7714  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
7715  {
7716  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
7717  pDefragmentationStats,
7718  maxBytesToMove,
7719  maxAllocationsToMove);
7720  }
7721 
7722  // ======== Destroy defragmentators.
7723 
7724  // Process custom pools.
7725  for(size_t poolIndex = poolCount; poolIndex--; )
7726  {
7727  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
7728  }
7729 
7730  // Process standard memory.
7731  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
7732  {
7733  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7734  {
7735  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
7736  }
7737  }
7738 
7739  return result;
7740 }
7741 
7742 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
7743 {
7744  if(hAllocation->CanBecomeLost())
7745  {
7746  /*
7747  Warning: This is a carefully designed algorithm.
7748  Do not modify unless you really know what you're doing :)
7749  */
7750  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
7751  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
7752  for(;;)
7753  {
7754  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7755  {
7756  pAllocationInfo->memoryType = UINT32_MAX;
7757  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
7758  pAllocationInfo->offset = 0;
7759  pAllocationInfo->size = hAllocation->GetSize();
7760  pAllocationInfo->pMappedData = VMA_NULL;
7761  pAllocationInfo->pUserData = hAllocation->GetUserData();
7762  return;
7763  }
7764  else if(localLastUseFrameIndex == localCurrFrameIndex)
7765  {
7766  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7767  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7768  pAllocationInfo->offset = hAllocation->GetOffset();
7769  pAllocationInfo->size = hAllocation->GetSize();
7770  pAllocationInfo->pMappedData = VMA_NULL;
7771  pAllocationInfo->pUserData = hAllocation->GetUserData();
7772  return;
7773  }
7774  else // Last use time earlier than current time.
7775  {
7776  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
7777  {
7778  localLastUseFrameIndex = localCurrFrameIndex;
7779  }
7780  }
7781  }
7782  }
7783  else
7784  {
7785  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7786  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7787  pAllocationInfo->offset = hAllocation->GetOffset();
7788  pAllocationInfo->size = hAllocation->GetSize();
7789  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
7790  pAllocationInfo->pUserData = hAllocation->GetUserData();
7791  }
7792 }
7793 
7794 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
7795 {
7796  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
7797  if(hAllocation->CanBecomeLost())
7798  {
7799  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
7800  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
7801  for(;;)
7802  {
7803  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7804  {
7805  return false;
7806  }
7807  else if(localLastUseFrameIndex == localCurrFrameIndex)
7808  {
7809  return true;
7810  }
7811  else // Last use time earlier than current time.
7812  {
7813  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
7814  {
7815  localLastUseFrameIndex = localCurrFrameIndex;
7816  }
7817  }
7818  }
7819  }
7820  else
7821  {
7822  return true;
7823  }
7824 }
7825 
7826 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
7827 {
7828  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
7829 
7830  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
7831 
7832  if(newCreateInfo.maxBlockCount == 0)
7833  {
7834  newCreateInfo.maxBlockCount = SIZE_MAX;
7835  }
7836  if(newCreateInfo.blockSize == 0)
7837  {
7838  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
7839  }
7840 
7841  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
7842 
7843  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
7844  if(res != VK_SUCCESS)
7845  {
7846  vma_delete(this, *pPool);
7847  *pPool = VMA_NULL;
7848  return res;
7849  }
7850 
7851  // Add to m_Pools.
7852  {
7853  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7854  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
7855  }
7856 
7857  return VK_SUCCESS;
7858 }
7859 
7860 void VmaAllocator_T::DestroyPool(VmaPool pool)
7861 {
7862  // Remove from m_Pools.
7863  {
7864  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7865  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
7866  VMA_ASSERT(success && "Pool not found in Allocator.");
7867  }
7868 
7869  vma_delete(this, pool);
7870 }
7871 
7872 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
7873 {
7874  pool->m_BlockVector.GetPoolStats(pPoolStats);
7875 }
7876 
7877 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
7878 {
7879  m_CurrentFrameIndex.store(frameIndex);
7880 }
7881 
7882 void VmaAllocator_T::MakePoolAllocationsLost(
7883  VmaPool hPool,
7884  size_t* pLostAllocationCount)
7885 {
7886  hPool->m_BlockVector.MakePoolAllocationsLost(
7887  m_CurrentFrameIndex.load(),
7888  pLostAllocationCount);
7889 }
7890 
7891 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
7892 {
7893  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
7894  (*pAllocation)->InitLost();
7895 }
7896 
7897 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
7898 {
7899  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
7900 
7901  VkResult res;
7902  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
7903  {
7904  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
7905  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
7906  {
7907  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
7908  if(res == VK_SUCCESS)
7909  {
7910  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
7911  }
7912  }
7913  else
7914  {
7915  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
7916  }
7917  }
7918  else
7919  {
7920  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
7921  }
7922 
7923  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
7924  {
7925  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
7926  }
7927 
7928  return res;
7929 }
7930 
7931 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
7932 {
7933  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
7934  {
7935  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
7936  }
7937 
7938  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
7939 
7940  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
7941  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
7942  {
7943  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
7944  m_HeapSizeLimit[heapIndex] += size;
7945  }
7946 }
7947 
7948 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
7949 {
7950  if(hAllocation->CanBecomeLost())
7951  {
7952  return VK_ERROR_MEMORY_MAP_FAILED;
7953  }
7954 
7955  switch(hAllocation->GetType())
7956  {
7957  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7958  {
7959  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
7960  char *pBytes = VMA_NULL;
7961  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
7962  if(res == VK_SUCCESS)
7963  {
7964  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
7965  hAllocation->BlockAllocMap();
7966  }
7967  return res;
7968  }
7969  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7970  return hAllocation->DedicatedAllocMap(this, ppData);
7971  default:
7972  VMA_ASSERT(0);
7973  return VK_ERROR_MEMORY_MAP_FAILED;
7974  }
7975 }
7976 
7977 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
7978 {
7979  switch(hAllocation->GetType())
7980  {
7981  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7982  {
7983  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
7984  hAllocation->BlockAllocUnmap();
7985  pBlock->Unmap(this, 1);
7986  }
7987  break;
7988  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7989  hAllocation->DedicatedAllocUnmap(this);
7990  break;
7991  default:
7992  VMA_ASSERT(0);
7993  }
7994 }
7995 
7996 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
7997 {
7998  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
7999 
8000  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
8001  {
8002  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
8003  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
8004  VMA_ASSERT(pDedicatedAllocations);
8005  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
8006  VMA_ASSERT(success);
8007  }
8008 
8009  VkDeviceMemory hMemory = allocation->GetMemory();
8010 
8011  if(allocation->GetMappedData() != VMA_NULL)
8012  {
8013  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
8014  }
8015 
8016  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
8017 
8018  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
8019 }
8020 
8021 #if VMA_STATS_STRING_ENABLED
8022 
8023 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
8024 {
8025  bool dedicatedAllocationsStarted = false;
8026  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8027  {
8028  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
8029  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
8030  VMA_ASSERT(pDedicatedAllocVector);
8031  if(pDedicatedAllocVector->empty() == false)
8032  {
8033  if(dedicatedAllocationsStarted == false)
8034  {
8035  dedicatedAllocationsStarted = true;
8036  json.WriteString("DedicatedAllocations");
8037  json.BeginObject();
8038  }
8039 
8040  json.BeginString("Type ");
8041  json.ContinueString(memTypeIndex);
8042  json.EndString();
8043 
8044  json.BeginArray();
8045 
8046  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
8047  {
8048  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
8049  json.BeginObject(true);
8050 
8051  json.WriteString("Type");
8052  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
8053 
8054  json.WriteString("Size");
8055  json.WriteNumber(hAlloc->GetSize());
8056 
8057  const void* pUserData = hAlloc->GetUserData();
8058  if(pUserData != VMA_NULL)
8059  {
8060  json.WriteString("UserData");
8061  if(hAlloc->IsUserDataString())
8062  {
8063  json.WriteString((const char*)pUserData);
8064  }
8065  else
8066  {
8067  json.BeginString();
8068  json.ContinueString_Pointer(pUserData);
8069  json.EndString();
8070  }
8071  }
8072 
8073  json.EndObject();
8074  }
8075 
8076  json.EndArray();
8077  }
8078  }
8079  if(dedicatedAllocationsStarted)
8080  {
8081  json.EndObject();
8082  }
8083 
8084  {
8085  bool allocationsStarted = false;
8086  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8087  {
8088  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
8089  {
8090  if(allocationsStarted == false)
8091  {
8092  allocationsStarted = true;
8093  json.WriteString("DefaultPools");
8094  json.BeginObject();
8095  }
8096 
8097  json.BeginString("Type ");
8098  json.ContinueString(memTypeIndex);
8099  json.EndString();
8100 
8101  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
8102  }
8103  }
8104  if(allocationsStarted)
8105  {
8106  json.EndObject();
8107  }
8108  }
8109 
8110  {
8111  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8112  const size_t poolCount = m_Pools.size();
8113  if(poolCount > 0)
8114  {
8115  json.WriteString("Pools");
8116  json.BeginArray();
8117  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
8118  {
8119  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
8120  }
8121  json.EndArray();
8122  }
8123  }
8124 }
8125 
8126 #endif // #if VMA_STATS_STRING_ENABLED
8127 
8128 static VkResult AllocateMemoryForImage(
8129  VmaAllocator allocator,
8130  VkImage image,
8131  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8132  VmaSuballocationType suballocType,
8133  VmaAllocation* pAllocation)
8134 {
8135  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
8136 
8137  VkMemoryRequirements vkMemReq = {};
8138  bool requiresDedicatedAllocation = false;
8139  bool prefersDedicatedAllocation = false;
8140  allocator->GetImageMemoryRequirements(image, vkMemReq,
8141  requiresDedicatedAllocation, prefersDedicatedAllocation);
8142 
8143  return allocator->AllocateMemory(
8144  vkMemReq,
8145  requiresDedicatedAllocation,
8146  prefersDedicatedAllocation,
8147  VK_NULL_HANDLE, // dedicatedBuffer
8148  image, // dedicatedImage
8149  *pAllocationCreateInfo,
8150  suballocType,
8151  pAllocation);
8152 }
8153 
8155 // Public interface
8156 
8157 VkResult vmaCreateAllocator(
8158  const VmaAllocatorCreateInfo* pCreateInfo,
8159  VmaAllocator* pAllocator)
8160 {
8161  VMA_ASSERT(pCreateInfo && pAllocator);
8162  VMA_DEBUG_LOG("vmaCreateAllocator");
8163  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
8164  return VK_SUCCESS;
8165 }
8166 
8167 void vmaDestroyAllocator(
8168  VmaAllocator allocator)
8169 {
8170  if(allocator != VK_NULL_HANDLE)
8171  {
8172  VMA_DEBUG_LOG("vmaDestroyAllocator");
8173  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
8174  vma_delete(&allocationCallbacks, allocator);
8175  }
8176 }
8177 
8179  VmaAllocator allocator,
8180  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
8181 {
8182  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
8183  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
8184 }
8185 
8187  VmaAllocator allocator,
8188  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
8189 {
8190  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
8191  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
8192 }
8193 
8195  VmaAllocator allocator,
8196  uint32_t memoryTypeIndex,
8197  VkMemoryPropertyFlags* pFlags)
8198 {
8199  VMA_ASSERT(allocator && pFlags);
8200  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
8201  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
8202 }
8203 
8205  VmaAllocator allocator,
8206  uint32_t frameIndex)
8207 {
8208  VMA_ASSERT(allocator);
8209  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
8210 
8211  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8212 
8213  allocator->SetCurrentFrameIndex(frameIndex);
8214 }
8215 
8216 void vmaCalculateStats(
8217  VmaAllocator allocator,
8218  VmaStats* pStats)
8219 {
8220  VMA_ASSERT(allocator && pStats);
8221  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8222  allocator->CalculateStats(pStats);
8223 }
8224 
8225 #if VMA_STATS_STRING_ENABLED
8226 
8227 void vmaBuildStatsString(
8228  VmaAllocator allocator,
8229  char** ppStatsString,
8230  VkBool32 detailedMap)
8231 {
8232  VMA_ASSERT(allocator && ppStatsString);
8233  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8234 
8235  VmaStringBuilder sb(allocator);
8236  {
8237  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
8238  json.BeginObject();
8239 
8240  VmaStats stats;
8241  allocator->CalculateStats(&stats);
8242 
8243  json.WriteString("Total");
8244  VmaPrintStatInfo(json, stats.total);
8245 
8246  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
8247  {
8248  json.BeginString("Heap ");
8249  json.ContinueString(heapIndex);
8250  json.EndString();
8251  json.BeginObject();
8252 
8253  json.WriteString("Size");
8254  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
8255 
8256  json.WriteString("Flags");
8257  json.BeginArray(true);
8258  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
8259  {
8260  json.WriteString("DEVICE_LOCAL");
8261  }
8262  json.EndArray();
8263 
8264  if(stats.memoryHeap[heapIndex].blockCount > 0)
8265  {
8266  json.WriteString("Stats");
8267  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
8268  }
8269 
8270  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
8271  {
8272  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
8273  {
8274  json.BeginString("Type ");
8275  json.ContinueString(typeIndex);
8276  json.EndString();
8277 
8278  json.BeginObject();
8279 
8280  json.WriteString("Flags");
8281  json.BeginArray(true);
8282  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
8283  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
8284  {
8285  json.WriteString("DEVICE_LOCAL");
8286  }
8287  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
8288  {
8289  json.WriteString("HOST_VISIBLE");
8290  }
8291  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
8292  {
8293  json.WriteString("HOST_COHERENT");
8294  }
8295  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
8296  {
8297  json.WriteString("HOST_CACHED");
8298  }
8299  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
8300  {
8301  json.WriteString("LAZILY_ALLOCATED");
8302  }
8303  json.EndArray();
8304 
8305  if(stats.memoryType[typeIndex].blockCount > 0)
8306  {
8307  json.WriteString("Stats");
8308  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
8309  }
8310 
8311  json.EndObject();
8312  }
8313  }
8314 
8315  json.EndObject();
8316  }
8317  if(detailedMap == VK_TRUE)
8318  {
8319  allocator->PrintDetailedMap(json);
8320  }
8321 
8322  json.EndObject();
8323  }
8324 
8325  const size_t len = sb.GetLength();
8326  char* const pChars = vma_new_array(allocator, char, len + 1);
8327  if(len > 0)
8328  {
8329  memcpy(pChars, sb.GetData(), len);
8330  }
8331  pChars[len] = '\0';
8332  *ppStatsString = pChars;
8333 }
8334 
8335 void vmaFreeStatsString(
8336  VmaAllocator allocator,
8337  char* pStatsString)
8338 {
8339  if(pStatsString != VMA_NULL)
8340  {
8341  VMA_ASSERT(allocator);
8342  size_t len = strlen(pStatsString);
8343  vma_delete_array(allocator, pStatsString, len + 1);
8344  }
8345 }
8346 
8347 #endif // #if VMA_STATS_STRING_ENABLED
8348 
8349 /*
8350 This function is not protected by any mutex because it just reads immutable data.
8351 */
8352 VkResult vmaFindMemoryTypeIndex(
8353  VmaAllocator allocator,
8354  uint32_t memoryTypeBits,
8355  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8356  uint32_t* pMemoryTypeIndex)
8357 {
8358  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8359  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8360  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8361 
8362  if(pAllocationCreateInfo->memoryTypeBits != 0)
8363  {
8364  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
8365  }
8366 
8367  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
8368  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
8369 
8370  // Convert usage to requiredFlags and preferredFlags.
8371  switch(pAllocationCreateInfo->usage)
8372  {
8374  break;
8376  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8377  break;
8379  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
8380  break;
8382  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8383  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8384  break;
8386  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8387  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
8388  break;
8389  default:
8390  break;
8391  }
8392 
8393  *pMemoryTypeIndex = UINT32_MAX;
8394  uint32_t minCost = UINT32_MAX;
8395  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
8396  memTypeIndex < allocator->GetMemoryTypeCount();
8397  ++memTypeIndex, memTypeBit <<= 1)
8398  {
8399  // This memory type is acceptable according to memoryTypeBits bitmask.
8400  if((memTypeBit & memoryTypeBits) != 0)
8401  {
8402  const VkMemoryPropertyFlags currFlags =
8403  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
8404  // This memory type contains requiredFlags.
8405  if((requiredFlags & ~currFlags) == 0)
8406  {
8407  // Calculate cost as number of bits from preferredFlags not present in this memory type.
8408  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
8409  // Remember memory type with lowest cost.
8410  if(currCost < minCost)
8411  {
8412  *pMemoryTypeIndex = memTypeIndex;
8413  if(currCost == 0)
8414  {
8415  return VK_SUCCESS;
8416  }
8417  minCost = currCost;
8418  }
8419  }
8420  }
8421  }
8422  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
8423 }
8424 
8426  VmaAllocator allocator,
8427  const VkBufferCreateInfo* pBufferCreateInfo,
8428  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8429  uint32_t* pMemoryTypeIndex)
8430 {
8431  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8432  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
8433  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8434  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8435 
8436  const VkDevice hDev = allocator->m_hDevice;
8437  VkBuffer hBuffer = VK_NULL_HANDLE;
8438  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
8439  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
8440  if(res == VK_SUCCESS)
8441  {
8442  VkMemoryRequirements memReq = {};
8443  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
8444  hDev, hBuffer, &memReq);
8445 
8446  res = vmaFindMemoryTypeIndex(
8447  allocator,
8448  memReq.memoryTypeBits,
8449  pAllocationCreateInfo,
8450  pMemoryTypeIndex);
8451 
8452  allocator->GetVulkanFunctions().vkDestroyBuffer(
8453  hDev, hBuffer, allocator->GetAllocationCallbacks());
8454  }
8455  return res;
8456 }
8457 
8459  VmaAllocator allocator,
8460  const VkImageCreateInfo* pImageCreateInfo,
8461  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8462  uint32_t* pMemoryTypeIndex)
8463 {
8464  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8465  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
8466  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8467  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8468 
8469  const VkDevice hDev = allocator->m_hDevice;
8470  VkImage hImage = VK_NULL_HANDLE;
8471  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
8472  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
8473  if(res == VK_SUCCESS)
8474  {
8475  VkMemoryRequirements memReq = {};
8476  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
8477  hDev, hImage, &memReq);
8478 
8479  res = vmaFindMemoryTypeIndex(
8480  allocator,
8481  memReq.memoryTypeBits,
8482  pAllocationCreateInfo,
8483  pMemoryTypeIndex);
8484 
8485  allocator->GetVulkanFunctions().vkDestroyImage(
8486  hDev, hImage, allocator->GetAllocationCallbacks());
8487  }
8488  return res;
8489 }
8490 
8491 VkResult vmaCreatePool(
8492  VmaAllocator allocator,
8493  const VmaPoolCreateInfo* pCreateInfo,
8494  VmaPool* pPool)
8495 {
8496  VMA_ASSERT(allocator && pCreateInfo && pPool);
8497 
8498  VMA_DEBUG_LOG("vmaCreatePool");
8499 
8500  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8501 
8502  return allocator->CreatePool(pCreateInfo, pPool);
8503 }
8504 
8505 void vmaDestroyPool(
8506  VmaAllocator allocator,
8507  VmaPool pool)
8508 {
8509  VMA_ASSERT(allocator);
8510 
8511  if(pool == VK_NULL_HANDLE)
8512  {
8513  return;
8514  }
8515 
8516  VMA_DEBUG_LOG("vmaDestroyPool");
8517 
8518  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8519 
8520  allocator->DestroyPool(pool);
8521 }
8522 
8523 void vmaGetPoolStats(
8524  VmaAllocator allocator,
8525  VmaPool pool,
8526  VmaPoolStats* pPoolStats)
8527 {
8528  VMA_ASSERT(allocator && pool && pPoolStats);
8529 
8530  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8531 
8532  allocator->GetPoolStats(pool, pPoolStats);
8533 }
8534 
8536  VmaAllocator allocator,
8537  VmaPool pool,
8538  size_t* pLostAllocationCount)
8539 {
8540  VMA_ASSERT(allocator && pool);
8541 
8542  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8543 
8544  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
8545 }
8546 
8547 VkResult vmaAllocateMemory(
8548  VmaAllocator allocator,
8549  const VkMemoryRequirements* pVkMemoryRequirements,
8550  const VmaAllocationCreateInfo* pCreateInfo,
8551  VmaAllocation* pAllocation,
8552  VmaAllocationInfo* pAllocationInfo)
8553 {
8554  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
8555 
8556  VMA_DEBUG_LOG("vmaAllocateMemory");
8557 
8558  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8559 
8560  VkResult result = allocator->AllocateMemory(
8561  *pVkMemoryRequirements,
8562  false, // requiresDedicatedAllocation
8563  false, // prefersDedicatedAllocation
8564  VK_NULL_HANDLE, // dedicatedBuffer
8565  VK_NULL_HANDLE, // dedicatedImage
8566  *pCreateInfo,
8567  VMA_SUBALLOCATION_TYPE_UNKNOWN,
8568  pAllocation);
8569 
8570  if(pAllocationInfo && result == VK_SUCCESS)
8571  {
8572  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8573  }
8574 
8575  return result;
8576 }
8577 
8579  VmaAllocator allocator,
8580  VkBuffer buffer,
8581  const VmaAllocationCreateInfo* pCreateInfo,
8582  VmaAllocation* pAllocation,
8583  VmaAllocationInfo* pAllocationInfo)
8584 {
8585  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
8586 
8587  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
8588 
8589  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8590 
8591  VkMemoryRequirements vkMemReq = {};
8592  bool requiresDedicatedAllocation = false;
8593  bool prefersDedicatedAllocation = false;
8594  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
8595  requiresDedicatedAllocation,
8596  prefersDedicatedAllocation);
8597 
8598  VkResult result = allocator->AllocateMemory(
8599  vkMemReq,
8600  requiresDedicatedAllocation,
8601  prefersDedicatedAllocation,
8602  buffer, // dedicatedBuffer
8603  VK_NULL_HANDLE, // dedicatedImage
8604  *pCreateInfo,
8605  VMA_SUBALLOCATION_TYPE_BUFFER,
8606  pAllocation);
8607 
8608  if(pAllocationInfo && result == VK_SUCCESS)
8609  {
8610  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8611  }
8612 
8613  return result;
8614 }
8615 
8616 VkResult vmaAllocateMemoryForImage(
8617  VmaAllocator allocator,
8618  VkImage image,
8619  const VmaAllocationCreateInfo* pCreateInfo,
8620  VmaAllocation* pAllocation,
8621  VmaAllocationInfo* pAllocationInfo)
8622 {
8623  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
8624 
8625  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
8626 
8627  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8628 
8629  VkResult result = AllocateMemoryForImage(
8630  allocator,
8631  image,
8632  pCreateInfo,
8633  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
8634  pAllocation);
8635 
8636  if(pAllocationInfo && result == VK_SUCCESS)
8637  {
8638  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8639  }
8640 
8641  return result;
8642 }
8643 
8644 void vmaFreeMemory(
8645  VmaAllocator allocator,
8646  VmaAllocation allocation)
8647 {
8648  VMA_ASSERT(allocator && allocation);
8649 
8650  VMA_DEBUG_LOG("vmaFreeMemory");
8651 
8652  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8653 
8654  allocator->FreeMemory(allocation);
8655 }
8656 
8658  VmaAllocator allocator,
8659  VmaAllocation allocation,
8660  VmaAllocationInfo* pAllocationInfo)
8661 {
8662  VMA_ASSERT(allocator && allocation && pAllocationInfo);
8663 
8664  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8665 
8666  allocator->GetAllocationInfo(allocation, pAllocationInfo);
8667 }
8668 
8669 VkBool32 vmaTouchAllocation(
8670  VmaAllocator allocator,
8671  VmaAllocation allocation)
8672 {
8673  VMA_ASSERT(allocator && allocation);
8674 
8675  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8676 
8677  return allocator->TouchAllocation(allocation);
8678 }
8679 
8681  VmaAllocator allocator,
8682  VmaAllocation allocation,
8683  void* pUserData)
8684 {
8685  VMA_ASSERT(allocator && allocation);
8686 
8687  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8688 
8689  allocation->SetUserData(allocator, pUserData);
8690 }
8691 
8693  VmaAllocator allocator,
8694  VmaAllocation* pAllocation)
8695 {
8696  VMA_ASSERT(allocator && pAllocation);
8697 
8698  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
8699 
8700  allocator->CreateLostAllocation(pAllocation);
8701 }
8702 
8703 VkResult vmaMapMemory(
8704  VmaAllocator allocator,
8705  VmaAllocation allocation,
8706  void** ppData)
8707 {
8708  VMA_ASSERT(allocator && allocation && ppData);
8709 
8710  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8711 
8712  return allocator->Map(allocation, ppData);
8713 }
8714 
8715 void vmaUnmapMemory(
8716  VmaAllocator allocator,
8717  VmaAllocation allocation)
8718 {
8719  VMA_ASSERT(allocator && allocation);
8720 
8721  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8722 
8723  allocator->Unmap(allocation);
8724 }
8725 
8726 VkResult vmaDefragment(
8727  VmaAllocator allocator,
8728  VmaAllocation* pAllocations,
8729  size_t allocationCount,
8730  VkBool32* pAllocationsChanged,
8731  const VmaDefragmentationInfo *pDefragmentationInfo,
8732  VmaDefragmentationStats* pDefragmentationStats)
8733 {
8734  VMA_ASSERT(allocator && pAllocations);
8735 
8736  VMA_DEBUG_LOG("vmaDefragment");
8737 
8738  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8739 
8740  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
8741 }
8742 
8743 VkResult vmaCreateBuffer(
8744  VmaAllocator allocator,
8745  const VkBufferCreateInfo* pBufferCreateInfo,
8746  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8747  VkBuffer* pBuffer,
8748  VmaAllocation* pAllocation,
8749  VmaAllocationInfo* pAllocationInfo)
8750 {
8751  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
8752 
8753  VMA_DEBUG_LOG("vmaCreateBuffer");
8754 
8755  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8756 
8757  *pBuffer = VK_NULL_HANDLE;
8758  *pAllocation = VK_NULL_HANDLE;
8759 
8760  // 1. Create VkBuffer.
8761  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
8762  allocator->m_hDevice,
8763  pBufferCreateInfo,
8764  allocator->GetAllocationCallbacks(),
8765  pBuffer);
8766  if(res >= 0)
8767  {
8768  // 2. vkGetBufferMemoryRequirements.
8769  VkMemoryRequirements vkMemReq = {};
8770  bool requiresDedicatedAllocation = false;
8771  bool prefersDedicatedAllocation = false;
8772  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
8773  requiresDedicatedAllocation, prefersDedicatedAllocation);
8774 
8775  // Make sure alignment requirements for specific buffer usages reported
8776  // in Physical Device Properties are included in alignment reported by memory requirements.
8777  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
8778  {
8779  VMA_ASSERT(vkMemReq.alignment %
8780  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
8781  }
8782  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
8783  {
8784  VMA_ASSERT(vkMemReq.alignment %
8785  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
8786  }
8787  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
8788  {
8789  VMA_ASSERT(vkMemReq.alignment %
8790  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
8791  }
8792 
8793  // 3. Allocate memory using allocator.
8794  res = allocator->AllocateMemory(
8795  vkMemReq,
8796  requiresDedicatedAllocation,
8797  prefersDedicatedAllocation,
8798  *pBuffer, // dedicatedBuffer
8799  VK_NULL_HANDLE, // dedicatedImage
8800  *pAllocationCreateInfo,
8801  VMA_SUBALLOCATION_TYPE_BUFFER,
8802  pAllocation);
8803  if(res >= 0)
8804  {
8805  // 3. Bind buffer with memory.
8806  res = (*allocator->GetVulkanFunctions().vkBindBufferMemory)(
8807  allocator->m_hDevice,
8808  *pBuffer,
8809  (*pAllocation)->GetMemory(),
8810  (*pAllocation)->GetOffset());
8811  if(res >= 0)
8812  {
8813  // All steps succeeded.
8814  if(pAllocationInfo != VMA_NULL)
8815  {
8816  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8817  }
8818  return VK_SUCCESS;
8819  }
8820  allocator->FreeMemory(*pAllocation);
8821  *pAllocation = VK_NULL_HANDLE;
8822  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
8823  *pBuffer = VK_NULL_HANDLE;
8824  return res;
8825  }
8826  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
8827  *pBuffer = VK_NULL_HANDLE;
8828  return res;
8829  }
8830  return res;
8831 }
8832 
8833 void vmaDestroyBuffer(
8834  VmaAllocator allocator,
8835  VkBuffer buffer,
8836  VmaAllocation allocation)
8837 {
8838  if(buffer != VK_NULL_HANDLE)
8839  {
8840  VMA_ASSERT(allocator);
8841 
8842  VMA_DEBUG_LOG("vmaDestroyBuffer");
8843 
8844  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8845 
8846  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
8847 
8848  allocator->FreeMemory(allocation);
8849  }
8850 }
8851 
8852 VkResult vmaCreateImage(
8853  VmaAllocator allocator,
8854  const VkImageCreateInfo* pImageCreateInfo,
8855  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8856  VkImage* pImage,
8857  VmaAllocation* pAllocation,
8858  VmaAllocationInfo* pAllocationInfo)
8859 {
8860  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
8861 
8862  VMA_DEBUG_LOG("vmaCreateImage");
8863 
8864  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8865 
8866  *pImage = VK_NULL_HANDLE;
8867  *pAllocation = VK_NULL_HANDLE;
8868 
8869  // 1. Create VkImage.
8870  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
8871  allocator->m_hDevice,
8872  pImageCreateInfo,
8873  allocator->GetAllocationCallbacks(),
8874  pImage);
8875  if(res >= 0)
8876  {
8877  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
8878  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
8879  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
8880 
8881  // 2. Allocate memory using allocator.
8882  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
8883  if(res >= 0)
8884  {
8885  // 3. Bind image with memory.
8886  res = (*allocator->GetVulkanFunctions().vkBindImageMemory)(
8887  allocator->m_hDevice,
8888  *pImage,
8889  (*pAllocation)->GetMemory(),
8890  (*pAllocation)->GetOffset());
8891  if(res >= 0)
8892  {
8893  // All steps succeeded.
8894  if(pAllocationInfo != VMA_NULL)
8895  {
8896  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8897  }
8898  return VK_SUCCESS;
8899  }
8900  allocator->FreeMemory(*pAllocation);
8901  *pAllocation = VK_NULL_HANDLE;
8902  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
8903  *pImage = VK_NULL_HANDLE;
8904  return res;
8905  }
8906  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
8907  *pImage = VK_NULL_HANDLE;
8908  return res;
8909  }
8910  return res;
8911 }
8912 
8913 void vmaDestroyImage(
8914  VmaAllocator allocator,
8915  VkImage image,
8916  VmaAllocation allocation)
8917 {
8918  if(image != VK_NULL_HANDLE)
8919  {
8920  VMA_ASSERT(allocator);
8921 
8922  VMA_DEBUG_LOG("vmaDestroyImage");
8923 
8924  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8925 
8926  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
8927 
8928  allocator->FreeMemory(allocation);
8929  }
8930 }
8931 
8932 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:897
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:1151
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:922
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:907
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1108
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:901
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:1457
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:919
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:1632
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:1327
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:1381
Definition: vk_mem_alloc.h:1188
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:890
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1226
Definition: vk_mem_alloc.h:1135
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:931
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:984
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:916
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:1139
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation.
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1049
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:904
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1048
PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR
Definition: vk_mem_alloc.h:912
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:1636
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:948
VmaStatInfo total
Definition: vk_mem_alloc.h:1058
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:1644
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1210
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1627
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:905
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:832
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:925
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:1335
Definition: vk_mem_alloc.h:1329
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:1467
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:902
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:1247
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:1351
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:1387
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:888
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:1338
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VmaMemoryUsage
Definition: vk_mem_alloc.h:1086
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:1622
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:1640
Definition: vk_mem_alloc.h:1125
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:1234
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:903
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1054
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:838
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:859
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:864
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:1642
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1221
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region.
Definition: vk_mem_alloc.h:1397
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:898
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1037
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
Definition: vk_mem_alloc.h:1346
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:851
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:1195
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1050
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:855
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:1341
Definition: vk_mem_alloc.h:1134
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1216
Definition: vk_mem_alloc.h:1207
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1040
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:900
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:1359
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:934
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1390
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:1205
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:1240
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:972
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1056
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:1175
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1049
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:909
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:853
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:908
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1373
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:1481
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:928
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1049
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1046
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:1378
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:1462
Definition: vk_mem_alloc.h:1203
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1638
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:896
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR
Definition: vk_mem_alloc.h:911
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1044
Definition: vk_mem_alloc.h:1091
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:1331
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1042
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:906
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:910
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:1162
Definition: vk_mem_alloc.h:1118
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:1476
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:886
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:899
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:1443
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:1309
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1050
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
TODO finish documentation...
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1057
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1384
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1050
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:1448